xref: /onnv-gate/usr/src/uts/intel/io/agpgart/agpgart.c (revision 3446:5903aece022d)
1*3446Smrj /*
2*3446Smrj  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
3*3446Smrj  * Use is subject to license terms.
4*3446Smrj  */
5*3446Smrj /*
6*3446Smrj  * Portions Philip Brown phil@bolthole.com Dec 2001
7*3446Smrj  */
8*3446Smrj 
9*3446Smrj #pragma ident	"%Z%%M%	%I%	%E% SMI"
10*3446Smrj 
11*3446Smrj /*
12*3446Smrj  * agpgart driver
13*3446Smrj  *
14*3446Smrj  * This driver is primary targeted at providing memory support for INTEL
15*3446Smrj  * AGP device, INTEL memory less video card, and AMD64 cpu GART devices.
16*3446Smrj  * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP,
17*3446Smrj  * ARC_AMD64AGP, ARC_AMD64NOAGP to agpgart driver. However, the memory
18*3446Smrj  * interfaces are the same for these architectures. The difference is how to
19*3446Smrj  * manage the hardware GART table for them.
20*3446Smrj  *
21*3446Smrj  * For large memory allocation, this driver use direct mapping to userland
22*3446Smrj  * application interface to save kernel virtual memory .
23*3446Smrj  */
24*3446Smrj 
25*3446Smrj #include <sys/types.h>
26*3446Smrj #include <sys/pci.h>
27*3446Smrj #include <sys/systm.h>
28*3446Smrj #include <sys/conf.h>
29*3446Smrj #include <sys/file.h>
30*3446Smrj #include <sys/kstat.h>
31*3446Smrj #include <sys/stat.h>
32*3446Smrj #include <sys/modctl.h>
33*3446Smrj #include <sys/ddi.h>
34*3446Smrj #include <sys/sunddi.h>
35*3446Smrj #include <sys/sunldi.h>
36*3446Smrj #include <sys/policy.h>
37*3446Smrj #include <sys/ddidevmap.h>
38*3446Smrj #include <vm/seg_dev.h>
39*3446Smrj #include <sys/pmem.h>
40*3446Smrj #include <sys/agpgart.h>
41*3446Smrj #include <sys/agp/agpdefs.h>
42*3446Smrj #include <sys/agp/agpgart_impl.h>
43*3446Smrj #include <sys/agp/agpamd64gart_io.h>
44*3446Smrj #include <sys/agp/agpmaster_io.h>
45*3446Smrj #include <sys/agp/agptarget_io.h>
46*3446Smrj 
47*3446Smrj /* Dynamic debug support */
48*3446Smrj int agp_debug_var = 0;
49*3446Smrj #define	AGPDB_PRINT1(fmt)	if (agp_debug_var == 1) cmn_err fmt
50*3446Smrj #define	AGPDB_PRINT2(fmt)	if (agp_debug_var >= 1) cmn_err fmt
51*3446Smrj 
52*3446Smrj /* Driver global softstate handle */
53*3446Smrj static void *agpgart_glob_soft_handle;
54*3446Smrj 
55*3446Smrj #define	MAX_INSTNUM			16
56*3446Smrj 
57*3446Smrj #define	AGP_DEV2INST(devt)	(getminor((devt)) >> 4)
58*3446Smrj #define	AGP_INST2MINOR(instance)	((instance) << 4)
59*3446Smrj #define	IS_INTEL_830(type)	((type) == ARC_IGD830)
60*3446Smrj #define	IS_TRUE_AGP(type)	(((type) == ARC_INTELAGP) || \
61*3446Smrj 	((type) == ARC_AMD64AGP))
62*3446Smrj 
63*3446Smrj #define	agpinfo_default_to_32(v, v32)	\
64*3446Smrj 	{				\
65*3446Smrj 		(v32).agpi32_version = (v).agpi_version;	\
66*3446Smrj 		(v32).agpi32_devid = (v).agpi_devid;	\
67*3446Smrj 		(v32).agpi32_mode = (v).agpi_mode;	\
68*3446Smrj 		(v32).agpi32_aperbase = (v).agpi_aperbase;	\
69*3446Smrj 		(v32).agpi32_apersize = (v).agpi_apersize;	\
70*3446Smrj 		(v32).agpi32_pgtotal = (v).agpi_pgtotal;	\
71*3446Smrj 		(v32).agpi32_pgsystem = (v).agpi_pgsystem;	\
72*3446Smrj 		(v32).agpi32_pgused = (v).agpi_pgused;	\
73*3446Smrj 	}
74*3446Smrj 
75*3446Smrj static ddi_dma_attr_t agpgart_dma_attr = {
76*3446Smrj 	DMA_ATTR_V0,
77*3446Smrj 	0U,				/* dma_attr_addr_lo */
78*3446Smrj 	0xffffffffU,			/* dma_attr_addr_hi */
79*3446Smrj 	0xffffffffU,			/* dma_attr_count_max */
80*3446Smrj 	(uint64_t)AGP_PAGE_SIZE,	/* dma_attr_align */
81*3446Smrj 	1,				/* dma_attr_burstsizes */
82*3446Smrj 	1,				/* dma_attr_minxfer */
83*3446Smrj 	0xffffffffU,			/* dma_attr_maxxfer */
84*3446Smrj 	0xffffffffU,			/* dma_attr_seg */
85*3446Smrj 	1,				/* dma_attr_sgllen, variable */
86*3446Smrj 	4,				/* dma_attr_granular */
87*3446Smrj 	0				/* dma_attr_flags */
88*3446Smrj };
89*3446Smrj 
90*3446Smrj /*
91*3446Smrj  * AMD64 supports gart table above 4G. See alloc_gart_table.
92*3446Smrj  */
93*3446Smrj static ddi_dma_attr_t garttable_dma_attr = {
94*3446Smrj 	DMA_ATTR_V0,
95*3446Smrj 	0U,				/* dma_attr_addr_lo */
96*3446Smrj 	0xffffffffU,			/* dma_attr_addr_hi */
97*3446Smrj 	0xffffffffU,			/* dma_attr_count_max */
98*3446Smrj 	(uint64_t)AGP_PAGE_SIZE,	/* dma_attr_align */
99*3446Smrj 	1,				/* dma_attr_burstsizes */
100*3446Smrj 	1,				/* dma_attr_minxfer */
101*3446Smrj 	0xffffffffU,			/* dma_attr_maxxfer */
102*3446Smrj 	0xffffffffU,			/* dma_attr_seg */
103*3446Smrj 	1,				/* dma_attr_sgllen, variable */
104*3446Smrj 	4,				/* dma_attr_granular */
105*3446Smrj 	0				/* dma_attr_flags */
106*3446Smrj };
107*3446Smrj 
108*3446Smrj /*
109*3446Smrj  * AGPGART table need a physical contiguous memory. To assure that
110*3446Smrj  * each access to gart table is strongly ordered and uncachable,
111*3446Smrj  * we use DDI_STRICTORDER_ACC.
112*3446Smrj  */
113*3446Smrj static ddi_device_acc_attr_t gart_dev_acc_attr = {
114*3446Smrj 	DDI_DEVICE_ATTR_V0,
115*3446Smrj 	DDI_NEVERSWAP_ACC,
116*3446Smrj 	DDI_STRICTORDER_ACC	/* must be DDI_STRICTORDER_ACC */
117*3446Smrj };
118*3446Smrj 
119*3446Smrj /*
120*3446Smrj  * AGP memory is usually used as texture memory or for a framebuffer, so we
121*3446Smrj  * can set the memory attribute to write combining. Video drivers will
122*3446Smrj  * determine the frame buffer attributes, for example the memory is write
123*3446Smrj  * combinging or non-cachable. However, the interface between Xorg and agpgart
124*3446Smrj  * driver to support attribute selcetion doesn't exist yet. So we set agp memory
125*3446Smrj  * to non-cachable by default now. This attribute might be overridden
126*3446Smrj  * by MTTR in X86.
127*3446Smrj  */
128*3446Smrj static ddi_device_acc_attr_t mem_dev_acc_attr = {
129*3446Smrj 	DDI_DEVICE_ATTR_V0,
130*3446Smrj 	DDI_NEVERSWAP_ACC,
131*3446Smrj 	DDI_STRICTORDER_ACC	/* Can be DDI_MERGING_OK_ACC */
132*3446Smrj };
133*3446Smrj 
134*3446Smrj static keytable_ent_t *
135*3446Smrj agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset);
136*3446Smrj static void
137*3446Smrj amd64_gart_unregister(amd64_garts_dev_t *cpu_garts);
138*3446Smrj 
139*3446Smrj 
140*3446Smrj static void
141*3446Smrj agp_devmap_unmap(devmap_cookie_t handle, void *devprivate,
142*3446Smrj     offset_t off, size_t len, devmap_cookie_t new_handle1,
143*3446Smrj     void **new_devprivate1, devmap_cookie_t new_handle2,
144*3446Smrj     void **new_devprivate2)
145*3446Smrj {
146*3446Smrj 
147*3446Smrj 	struct keytable_ent *mementry;
148*3446Smrj 	agpgart_softstate_t *softstate;
149*3446Smrj 	agpgart_ctx_t *ctxp, *newctxp1, *newctxp2;
150*3446Smrj 
151*3446Smrj 	ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off));
152*3446Smrj 	ASSERT(devprivate);
153*3446Smrj 	ASSERT(handle);
154*3446Smrj 
155*3446Smrj 	ctxp = (agpgart_ctx_t *)devprivate;
156*3446Smrj 	softstate = ctxp->actx_sc;
157*3446Smrj 	ASSERT(softstate);
158*3446Smrj 
159*3446Smrj 	if (new_handle1 != NULL) {
160*3446Smrj 		newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
161*3446Smrj 		newctxp1->actx_sc = softstate;
162*3446Smrj 		newctxp1->actx_off = ctxp->actx_off;
163*3446Smrj 		*new_devprivate1 = newctxp1;
164*3446Smrj 	}
165*3446Smrj 
166*3446Smrj 	if (new_handle2 != NULL) {
167*3446Smrj 		newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
168*3446Smrj 		newctxp2->actx_sc = softstate;
169*3446Smrj 		newctxp2->actx_off = off + len;
170*3446Smrj 		*new_devprivate2 = newctxp2;
171*3446Smrj 	}
172*3446Smrj 
173*3446Smrj 	mutex_enter(&softstate->asoft_instmutex);
174*3446Smrj 	if ((new_handle1 == NULL) && (new_handle2 == NULL)) {
175*3446Smrj 		mementry =
176*3446Smrj 		    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
177*3446Smrj 		ASSERT(mementry);
178*3446Smrj 		mementry->kte_refcnt--;
179*3446Smrj 	} else if ((new_handle1 != NULL) && (new_handle2 != NULL)) {
180*3446Smrj 		mementry =
181*3446Smrj 		    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
182*3446Smrj 		ASSERT(mementry);
183*3446Smrj 		mementry->kte_refcnt++;
184*3446Smrj 	}
185*3446Smrj 	ASSERT(mementry->kte_refcnt >= 0);
186*3446Smrj 	mutex_exit(&softstate->asoft_instmutex);
187*3446Smrj 	kmem_free(ctxp, sizeof (struct agpgart_ctx));
188*3446Smrj }
189*3446Smrj 
190*3446Smrj /*ARGSUSED*/
191*3446Smrj static int
192*3446Smrj agp_devmap_map(devmap_cookie_t handle, dev_t dev,
193*3446Smrj     uint_t flags, offset_t offset, size_t len, void **new_devprivate)
194*3446Smrj {
195*3446Smrj 	agpgart_softstate_t *softstate;
196*3446Smrj 	int instance;
197*3446Smrj 	struct keytable_ent *mementry;
198*3446Smrj 	agpgart_ctx_t *newctxp;
199*3446Smrj 
200*3446Smrj 	ASSERT(handle);
201*3446Smrj 	instance = AGP_DEV2INST(dev);
202*3446Smrj 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
203*3446Smrj 	if (softstate == NULL) {
204*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err"));
205*3446Smrj 		return (ENXIO);
206*3446Smrj 	}
207*3446Smrj 
208*3446Smrj 	ASSERT(softstate);
209*3446Smrj 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
210*3446Smrj 	ASSERT(len);
211*3446Smrj 	ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len));
212*3446Smrj 
213*3446Smrj 	mementry =
214*3446Smrj 	    agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
215*3446Smrj 	ASSERT(mementry);
216*3446Smrj 	mementry->kte_refcnt++;
217*3446Smrj 	ASSERT(mementry->kte_refcnt >= 0);
218*3446Smrj 	newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
219*3446Smrj 	newctxp->actx_off = offset;
220*3446Smrj 	newctxp->actx_sc = softstate;
221*3446Smrj 	*new_devprivate = newctxp;
222*3446Smrj 
223*3446Smrj 	return (0);
224*3446Smrj }
225*3446Smrj 
226*3446Smrj /*ARGSUSED*/
227*3446Smrj static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate,
228*3446Smrj     devmap_cookie_t new_handle, void **new_devprivate)
229*3446Smrj {
230*3446Smrj 	struct keytable_ent *mementry;
231*3446Smrj 	agpgart_ctx_t *newctxp, *ctxp;
232*3446Smrj 	agpgart_softstate_t *softstate;
233*3446Smrj 
234*3446Smrj 	ASSERT(devprivate);
235*3446Smrj 	ASSERT(handle && new_handle);
236*3446Smrj 
237*3446Smrj 	ctxp = (agpgart_ctx_t *)devprivate;
238*3446Smrj 	ASSERT(AGP_ALIGNED(ctxp->actx_off));
239*3446Smrj 
240*3446Smrj 	newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
241*3446Smrj 	newctxp->actx_off = ctxp->actx_off;
242*3446Smrj 	newctxp->actx_sc = ctxp->actx_sc;
243*3446Smrj 	softstate = (agpgart_softstate_t *)newctxp->actx_sc;
244*3446Smrj 
245*3446Smrj 	mutex_enter(&softstate->asoft_instmutex);
246*3446Smrj 	mementry = agp_find_bound_keyent(softstate,
247*3446Smrj 	    AGP_BYTES2PAGES(newctxp->actx_off));
248*3446Smrj 	mementry->kte_refcnt++;
249*3446Smrj 	ASSERT(mementry->kte_refcnt >= 0);
250*3446Smrj 	mutex_exit(&softstate->asoft_instmutex);
251*3446Smrj 	*new_devprivate = newctxp;
252*3446Smrj 
253*3446Smrj 	return (0);
254*3446Smrj }
255*3446Smrj 
256*3446Smrj struct devmap_callback_ctl agp_devmap_cb = {
257*3446Smrj 	DEVMAP_OPS_REV,		/* rev */
258*3446Smrj 	agp_devmap_map,		/* map */
259*3446Smrj 	NULL,			/* access */
260*3446Smrj 	agp_devmap_dup,		/* dup */
261*3446Smrj 	agp_devmap_unmap,	/* unmap */
262*3446Smrj };
263*3446Smrj 
264*3446Smrj /*
265*3446Smrj  * agp_master_regis_byname()
266*3446Smrj  *
267*3446Smrj  * Description:
268*3446Smrj  * 	Open the AGP master device node by device path name and
269*3446Smrj  * 	register the device handle for later operations.
270*3446Smrj  * 	We check all possible driver instance from 0
271*3446Smrj  * 	to MAX_INSTNUM because the master device could be
272*3446Smrj  * 	at any instance number. Only one AGP master is supported.
273*3446Smrj  *
274*3446Smrj  * Arguments:
275*3446Smrj  * 	master_hdlp		AGP master device LDI handle pointer
276*3446Smrj  *	agpgart_l		AGPGART driver LDI identifier
277*3446Smrj  *
278*3446Smrj  * Returns:
279*3446Smrj  * 	-1			failed
280*3446Smrj  * 	0			success
281*3446Smrj  */
282*3446Smrj static int
283*3446Smrj agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li)
284*3446Smrj {
285*3446Smrj 	int	i;
286*3446Smrj 	char	buf[MAXPATHLEN];
287*3446Smrj 
288*3446Smrj 	ASSERT(master_hdlp);
289*3446Smrj 	ASSERT(agpgart_li);
290*3446Smrj 
291*3446Smrj 	/*
292*3446Smrj 	 * Search all possible instance numbers for the agp master device.
293*3446Smrj 	 * Only one master device is supported now, so the search ends
294*3446Smrj 	 * when one master device is found.
295*3446Smrj 	 */
296*3446Smrj 	for (i = 0; i < MAX_INSTNUM; i++) {
297*3446Smrj 		(void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i);
298*3446Smrj 		if ((ldi_open_by_name(buf, 0, kcred,
299*3446Smrj 		    master_hdlp, agpgart_li)))
300*3446Smrj 			continue;
301*3446Smrj 		AGPDB_PRINT1((CE_NOTE,
302*3446Smrj 		    "master device found: instance number=%d", i));
303*3446Smrj 		break;
304*3446Smrj 
305*3446Smrj 	}
306*3446Smrj 
307*3446Smrj 	/* AGP master device not found */
308*3446Smrj 	if (i == MAX_INSTNUM)
309*3446Smrj 		return (-1);
310*3446Smrj 
311*3446Smrj 	return (0);
312*3446Smrj }
313*3446Smrj 
314*3446Smrj /*
315*3446Smrj  * agp_target_regis_byname()
316*3446Smrj  *
317*3446Smrj  * Description:
318*3446Smrj  * 	This function opens agp bridge device node by
319*3446Smrj  * 	device path name and registers the device handle
320*3446Smrj  * 	for later operations.
321*3446Smrj  * 	We check driver instance from 0 to MAX_INSTNUM
322*3446Smrj  * 	because the master device could be at any instance
323*3446Smrj  * 	number. Only one agp target is supported.
324*3446Smrj  *
325*3446Smrj  *
326*3446Smrj  * Arguments:
327*3446Smrj  *	target_hdlp		AGP target device LDI handle pointer
328*3446Smrj  *	agpgart_l		AGPGART driver LDI identifier
329*3446Smrj  *
330*3446Smrj  * Returns:
331*3446Smrj  * 	-1			failed
332*3446Smrj  * 	0			success
333*3446Smrj  */
334*3446Smrj static int
335*3446Smrj agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li)
336*3446Smrj {
337*3446Smrj 	int	i;
338*3446Smrj 	char	buf[MAXPATHLEN];
339*3446Smrj 
340*3446Smrj 	ASSERT(target_hdlp);
341*3446Smrj 	ASSERT(agpgart_li);
342*3446Smrj 
343*3446Smrj 	for (i = 0; i < MAX_INSTNUM; i++) {
344*3446Smrj 		(void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i);
345*3446Smrj 		if ((ldi_open_by_name(buf, 0, kcred,
346*3446Smrj 		    target_hdlp, agpgart_li)))
347*3446Smrj 			continue;
348*3446Smrj 
349*3446Smrj 		AGPDB_PRINT1((CE_NOTE,
350*3446Smrj 		    "bridge device found: instance number=%d", i));
351*3446Smrj 		break;
352*3446Smrj 
353*3446Smrj 	}
354*3446Smrj 
355*3446Smrj 	/* AGP bridge device not found */
356*3446Smrj 	if (i == MAX_INSTNUM) {
357*3446Smrj 		AGPDB_PRINT2((CE_WARN, "bridge device not found"));
358*3446Smrj 		return (-1);
359*3446Smrj 	}
360*3446Smrj 
361*3446Smrj 	return (0);
362*3446Smrj }
363*3446Smrj 
364*3446Smrj /*
365*3446Smrj  * amd64_gart_regis_byname()
366*3446Smrj  *
367*3446Smrj  * Description:
368*3446Smrj  * 	Open all amd64 gart device nodes by deice path name and
369*3446Smrj  * 	register the device handles for later operations. Each cpu
370*3446Smrj  * 	has its own amd64 gart device.
371*3446Smrj  *
372*3446Smrj  * Arguments:
373*3446Smrj  * 	cpu_garts		cpu garts device list header
374*3446Smrj  *	agpgart_l		AGPGART driver LDI identifier
375*3446Smrj  *
376*3446Smrj  * Returns:
377*3446Smrj  * 	-1			failed
378*3446Smrj  * 	0			success
379*3446Smrj  */
380*3446Smrj static int
381*3446Smrj amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li)
382*3446Smrj {
383*3446Smrj 	amd64_gart_dev_list_t	*gart_list;
384*3446Smrj 	int			i;
385*3446Smrj 	char			buf[MAXPATHLEN];
386*3446Smrj 	ldi_handle_t		gart_hdl;
387*3446Smrj 	int			ret;
388*3446Smrj 
389*3446Smrj 	ASSERT(cpu_garts);
390*3446Smrj 	ASSERT(agpgart_li);
391*3446Smrj 
392*3446Smrj 	/*
393*3446Smrj 	 * Search all possible instance numbers for the gart devices.
394*3446Smrj 	 * There can be multiple on-cpu gart devices for Opteron server.
395*3446Smrj 	 */
396*3446Smrj 	for (i = 0; i < MAX_INSTNUM; i++) {
397*3446Smrj 		(void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i);
398*3446Smrj 		ret = ldi_open_by_name(buf, 0, kcred,
399*3446Smrj 		    &gart_hdl, agpgart_li);
400*3446Smrj 
401*3446Smrj 		if (ret == ENODEV)
402*3446Smrj 			continue;
403*3446Smrj 		else if (ret != 0) { /* There was an error opening the device */
404*3446Smrj 			amd64_gart_unregister(cpu_garts);
405*3446Smrj 			return (ret);
406*3446Smrj 		}
407*3446Smrj 
408*3446Smrj 		AGPDB_PRINT1((CE_NOTE,
409*3446Smrj 		    "amd64 gart device found: instance number=%d", i));
410*3446Smrj 
411*3446Smrj 		gart_list = (amd64_gart_dev_list_t *)
412*3446Smrj 		    kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP);
413*3446Smrj 
414*3446Smrj 		/* Add new item to the head of the gart device list */
415*3446Smrj 		gart_list->gart_devhdl = gart_hdl;
416*3446Smrj 		gart_list->next = cpu_garts->gart_dev_list_head;
417*3446Smrj 		cpu_garts->gart_dev_list_head = gart_list;
418*3446Smrj 		cpu_garts->gart_device_num++;
419*3446Smrj 	}
420*3446Smrj 
421*3446Smrj 	if (cpu_garts->gart_device_num == 0)
422*3446Smrj 		return (ENODEV);
423*3446Smrj 	return (0);
424*3446Smrj }
425*3446Smrj 
426*3446Smrj /*
427*3446Smrj  * Unregister agp master device handle
428*3446Smrj  */
429*3446Smrj static void
430*3446Smrj agp_master_unregister(ldi_handle_t *master_hdlp)
431*3446Smrj {
432*3446Smrj 	ASSERT(master_hdlp);
433*3446Smrj 
434*3446Smrj 	if (master_hdlp) {
435*3446Smrj 		(void) ldi_close(*master_hdlp, 0, kcred);
436*3446Smrj 		*master_hdlp = NULL;
437*3446Smrj 	}
438*3446Smrj }
439*3446Smrj 
440*3446Smrj /*
441*3446Smrj  * Unregister agp bridge device handle
442*3446Smrj  */
443*3446Smrj static void
444*3446Smrj agp_target_unregister(ldi_handle_t *target_hdlp)
445*3446Smrj {
446*3446Smrj 	if (target_hdlp) {
447*3446Smrj 		(void) ldi_close(*target_hdlp, 0, kcred);
448*3446Smrj 		*target_hdlp = NULL;
449*3446Smrj 	}
450*3446Smrj }
451*3446Smrj 
452*3446Smrj /*
453*3446Smrj  * Unregister all amd64 gart device handles
454*3446Smrj  */
455*3446Smrj static void
456*3446Smrj amd64_gart_unregister(amd64_garts_dev_t *cpu_garts)
457*3446Smrj {
458*3446Smrj 	amd64_gart_dev_list_t	*gart_list;
459*3446Smrj 	amd64_gart_dev_list_t	*next;
460*3446Smrj 
461*3446Smrj 	ASSERT(cpu_garts);
462*3446Smrj 
463*3446Smrj 	for (gart_list = cpu_garts->gart_dev_list_head;
464*3446Smrj 	    gart_list; gart_list = next) {
465*3446Smrj 
466*3446Smrj 		ASSERT(gart_list->gart_devhdl);
467*3446Smrj 		(void) ldi_close(gart_list->gart_devhdl, 0, kcred);
468*3446Smrj 		next = gart_list->next;
469*3446Smrj 		/* Free allocated memory */
470*3446Smrj 		kmem_free(gart_list, sizeof (amd64_gart_dev_list_t));
471*3446Smrj 	}
472*3446Smrj 	cpu_garts->gart_dev_list_head = NULL;
473*3446Smrj 	cpu_garts->gart_device_num = 0;
474*3446Smrj }
475*3446Smrj 
476*3446Smrj /*
477*3446Smrj  * lyr_detect_master_type()
478*3446Smrj  *
479*3446Smrj  * Description:
480*3446Smrj  * 	This function gets agp master type by querying agp master device.
481*3446Smrj  *
482*3446Smrj  * Arguments:
483*3446Smrj  * 	master_hdlp		agp master device ldi handle pointer
484*3446Smrj  *
485*3446Smrj  * Returns:
486*3446Smrj  * 	-1			unsupported device
487*3446Smrj  * 	DEVICE_IS_I810		i810 series
488*3446Smrj  * 	DEVICE_IS_I810		i830 series
489*3446Smrj  * 	DEVICE_IS_AGP		true agp master
490*3446Smrj  */
491*3446Smrj static int
492*3446Smrj lyr_detect_master_type(ldi_handle_t *master_hdlp)
493*3446Smrj {
494*3446Smrj 	int vtype;
495*3446Smrj 	int err;
496*3446Smrj 
497*3446Smrj 	ASSERT(master_hdlp);
498*3446Smrj 
499*3446Smrj 	/* ldi_ioctl(agpmaster) */
500*3446Smrj 	err = ldi_ioctl(*master_hdlp, DEVICE_DETECT,
501*3446Smrj 	    (intptr_t)&vtype, FKIOCTL, kcred, 0);
502*3446Smrj 	if (err) /* Unsupported graphics device */
503*3446Smrj 		return (-1);
504*3446Smrj 	return (vtype);
505*3446Smrj }
506*3446Smrj 
507*3446Smrj /*
508*3446Smrj  * devtect_target_type()
509*3446Smrj  *
510*3446Smrj  * Description:
511*3446Smrj  * 	This function gets the host bridge chipset type by querying the agp
512*3446Smrj  *	target device.
513*3446Smrj  *
514*3446Smrj  * Arguments:
515*3446Smrj  * 	target_hdlp		agp target device LDI handle pointer
516*3446Smrj  *
517*3446Smrj  * Returns:
518*3446Smrj  * 	CHIP_IS_INTEL		Intel agp chipsets
519*3446Smrj  * 	CHIP_IS_AMD		AMD agp chipset
520*3446Smrj  * 	-1			unsupported chipset
521*3446Smrj  */
522*3446Smrj static int
523*3446Smrj lyr_detect_target_type(ldi_handle_t *target_hdlp)
524*3446Smrj {
525*3446Smrj 	int btype;
526*3446Smrj 	int err;
527*3446Smrj 
528*3446Smrj 	ASSERT(target_hdlp);
529*3446Smrj 
530*3446Smrj 	err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype,
531*3446Smrj 	    FKIOCTL, kcred, 0);
532*3446Smrj 	if (err)	/* Unsupported bridge device */
533*3446Smrj 		return (-1);
534*3446Smrj 	return (btype);
535*3446Smrj }
536*3446Smrj 
537*3446Smrj /*
538*3446Smrj  * lyr_init()
539*3446Smrj  *
540*3446Smrj  * Description:
541*3446Smrj  * 	This function detects the  graphics system architecture and
542*3446Smrj  * 	registers all relative device handles in a global structure
543*3446Smrj  * 	"agp_regdev". Then it stores the system arc type in driver
544*3446Smrj  * 	soft state.
545*3446Smrj  *
546*3446Smrj  * Arguments:
547*3446Smrj  *	agp_regdev		AGP devices registration struct pointer
548*3446Smrj  *	agpgart_l		AGPGART driver LDI identifier
549*3446Smrj  *
550*3446Smrj  * Returns:
551*3446Smrj  * 	0	System arc supported and agp devices registration successed.
552*3446Smrj  * 	-1	System arc not supported or device registration failed.
553*3446Smrj  */
554*3446Smrj int
555*3446Smrj lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li)
556*3446Smrj {
557*3446Smrj 	ldi_handle_t *master_hdlp;
558*3446Smrj 	ldi_handle_t *target_hdlp;
559*3446Smrj 	amd64_garts_dev_t *garts_dev;
560*3446Smrj 	int card_type, chip_type;
561*3446Smrj 	int ret;
562*3446Smrj 
563*3446Smrj 	ASSERT(agp_regdev);
564*3446Smrj 
565*3446Smrj 	bzero(agp_regdev, sizeof (agp_registered_dev_t));
566*3446Smrj 	agp_regdev->agprd_arctype = ARC_UNKNOWN;
567*3446Smrj 	/*
568*3446Smrj 	 * Register agp devices, assuming all instances attached, and
569*3446Smrj 	 * detect which agp architucture this server belongs to. This
570*3446Smrj 	 * must be done before the agpgart driver starts to use layered
571*3446Smrj 	 * driver interfaces.
572*3446Smrj 	 */
573*3446Smrj 	master_hdlp = &agp_regdev->agprd_masterhdl;
574*3446Smrj 	target_hdlp = &agp_regdev->agprd_targethdl;
575*3446Smrj 	garts_dev = &agp_regdev->agprd_cpugarts;
576*3446Smrj 
577*3446Smrj 	/* Check whether the system is amd64 arc */
578*3446Smrj 	if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) {
579*3446Smrj 		/* No amd64 gart devices */
580*3446Smrj 		AGPDB_PRINT1((CE_NOTE,
581*3446Smrj 		    "lyr_init: this is not an amd64 system"));
582*3446Smrj 		if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
583*3446Smrj 			AGPDB_PRINT2((CE_WARN,
584*3446Smrj 			    "lyr_init: register master device unsuccessful"));
585*3446Smrj 			goto err1;
586*3446Smrj 		}
587*3446Smrj 		if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
588*3446Smrj 			AGPDB_PRINT2((CE_WARN,
589*3446Smrj 			    "lyr_init: register target device unsuccessful"));
590*3446Smrj 			goto err2;
591*3446Smrj 		}
592*3446Smrj 		card_type = lyr_detect_master_type(master_hdlp);
593*3446Smrj 		/*
594*3446Smrj 		 * Detect system arc by master device. If it is a intel
595*3446Smrj 		 * integrated device, finish the detection successfully.
596*3446Smrj 		 */
597*3446Smrj 		switch (card_type) {
598*3446Smrj 		case DEVICE_IS_I810:	/* I810 likewise graphics */
599*3446Smrj 			AGPDB_PRINT1((CE_NOTE,
600*3446Smrj 			    "lyr_init: the system is Intel 810 arch"));
601*3446Smrj 			agp_regdev->agprd_arctype = ARC_IGD810;
602*3446Smrj 			return (0);
603*3446Smrj 		case DEVICE_IS_I830:	/* I830 likewise graphics */
604*3446Smrj 			AGPDB_PRINT1((CE_NOTE,
605*3446Smrj 			    "lyr_init: the system is Intel 830 arch"));
606*3446Smrj 			agp_regdev->agprd_arctype = ARC_IGD830;
607*3446Smrj 			return (0);
608*3446Smrj 		case DEVICE_IS_AGP:	/* AGP graphics */
609*3446Smrj 			break;
610*3446Smrj 		default:		/* Non IGD/AGP graphics */
611*3446Smrj 			AGPDB_PRINT2((CE_WARN,
612*3446Smrj 			    "lyr_init: non-supported master device"));
613*3446Smrj 			goto err3;
614*3446Smrj 		}
615*3446Smrj 
616*3446Smrj 		chip_type = lyr_detect_target_type(target_hdlp);
617*3446Smrj 
618*3446Smrj 		/* Continue to detect AGP arc by target device */
619*3446Smrj 		switch (chip_type) {
620*3446Smrj 		case CHIP_IS_INTEL:	/* Intel chipset */
621*3446Smrj 			AGPDB_PRINT1((CE_NOTE,
622*3446Smrj 			    "lyr_init: Intel AGP arch detected"));
623*3446Smrj 			agp_regdev->agprd_arctype = ARC_INTELAGP;
624*3446Smrj 			return (0);
625*3446Smrj 		case CHIP_IS_AMD:	/* AMD chipset */
626*3446Smrj 			AGPDB_PRINT2((CE_WARN,
627*3446Smrj 			    "lyr_init: no cpu gart, but have AMD64 chipsets"));
628*3446Smrj 			goto err3;
629*3446Smrj 		default:		/* Non supported chipset */
630*3446Smrj 			AGPDB_PRINT2((CE_WARN,
631*3446Smrj 			    "lyr_init: detection can not continue"));
632*3446Smrj 			goto err3;
633*3446Smrj 		}
634*3446Smrj 
635*3446Smrj 	}
636*3446Smrj 
637*3446Smrj 	if (ret)
638*3446Smrj 		return (-1); /* Errors in open amd64 cpu gart devices */
639*3446Smrj 
640*3446Smrj 	/*
641*3446Smrj 	 * AMD64 cpu gart device exsits, continue detection
642*3446Smrj 	 */
643*3446Smrj 
644*3446Smrj 	if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
645*3446Smrj 		AGPDB_PRINT1((CE_NOTE,
646*3446Smrj 		    "lyr_init: register master device unsuccessful"));
647*3446Smrj 
648*3446Smrj 		agp_regdev->agprd_arctype = ARC_AMD64NOAGP;
649*3446Smrj 		AGPDB_PRINT1((CE_NOTE,
650*3446Smrj 		    "lyr_init: no AGP master, but supports IOMMU in amd64"));
651*3446Smrj 		return (0); /* Finished successfully */
652*3446Smrj 	}
653*3446Smrj 
654*3446Smrj 	if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
655*3446Smrj 		AGPDB_PRINT1((CE_NOTE,
656*3446Smrj 		    "lyr_init: register target device unsuccessful"));
657*3446Smrj 
658*3446Smrj 		agp_regdev->agprd_arctype = ARC_AMD64NOAGP;
659*3446Smrj 
660*3446Smrj 		AGPDB_PRINT1((CE_NOTE,
661*3446Smrj 		    "lyr_init: no AGP bridge, but supports IOMMU in amd64"));
662*3446Smrj 
663*3446Smrj 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
664*3446Smrj 		return (0); /* Finished successfully */
665*3446Smrj 
666*3446Smrj 	}
667*3446Smrj 
668*3446Smrj 
669*3446Smrj 	AGPDB_PRINT1((CE_NOTE,
670*3446Smrj 	    "lyr_init: the system is AMD64 AGP architecture"));
671*3446Smrj 
672*3446Smrj 	agp_regdev->agprd_arctype = ARC_AMD64AGP;
673*3446Smrj 
674*3446Smrj 	return (0); /* Finished successfully */
675*3446Smrj 
676*3446Smrj err3:
677*3446Smrj 	agp_target_unregister(&agp_regdev->agprd_targethdl);
678*3446Smrj err2:
679*3446Smrj 	agp_master_unregister(&agp_regdev->agprd_masterhdl);
680*3446Smrj err1:
681*3446Smrj 	agp_regdev->agprd_arctype = ARC_UNKNOWN;
682*3446Smrj 	return (-1);
683*3446Smrj }
684*3446Smrj 
685*3446Smrj void
686*3446Smrj lyr_end(agp_registered_dev_t *agp_regdev)
687*3446Smrj {
688*3446Smrj 	ASSERT(agp_regdev);
689*3446Smrj 
690*3446Smrj 	switch (agp_regdev->agprd_arctype) {
691*3446Smrj 	case ARC_IGD810:
692*3446Smrj 	case ARC_IGD830:
693*3446Smrj 	case ARC_INTELAGP:
694*3446Smrj 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
695*3446Smrj 		agp_target_unregister(&agp_regdev->agprd_targethdl);
696*3446Smrj 
697*3446Smrj 		return;
698*3446Smrj 	case ARC_AMD64AGP:
699*3446Smrj 		agp_master_unregister(&agp_regdev->agprd_masterhdl);
700*3446Smrj 		agp_target_unregister(&agp_regdev->agprd_targethdl);
701*3446Smrj 		amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
702*3446Smrj 
703*3446Smrj 		return;
704*3446Smrj 	case ARC_AMD64NOAGP:
705*3446Smrj 		amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
706*3446Smrj 
707*3446Smrj 		return;
708*3446Smrj 	default:
709*3446Smrj 		ASSERT(0);
710*3446Smrj 		return;
711*3446Smrj 	}
712*3446Smrj }
713*3446Smrj 
714*3446Smrj int
715*3446Smrj lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev)
716*3446Smrj {
717*3446Smrj 	ldi_handle_t hdl;
718*3446Smrj 	igd_info_t value1;
719*3446Smrj 	i_agp_info_t value2;
720*3446Smrj 	amdgart_info_t value3;
721*3446Smrj 	size_t prealloc_size;
722*3446Smrj 	int err;
723*3446Smrj 	amd64_gart_dev_list_t	*gart_head;
724*3446Smrj 
725*3446Smrj 	ASSERT(info);
726*3446Smrj 	ASSERT(agp_regdev);
727*3446Smrj 
728*3446Smrj 	switch (agp_regdev->agprd_arctype) {
729*3446Smrj 	case ARC_IGD810:
730*3446Smrj 		hdl = agp_regdev->agprd_masterhdl;
731*3446Smrj 		err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
732*3446Smrj 		    FKIOCTL, kcred, 0);
733*3446Smrj 		if (err)
734*3446Smrj 			return (-1);
735*3446Smrj 		info->agpki_mdevid = value1.igd_devid;
736*3446Smrj 		info->agpki_aperbase = value1.igd_aperbase;
737*3446Smrj 		info->agpki_apersize = value1.igd_apersize;
738*3446Smrj 
739*3446Smrj 		hdl = agp_regdev->agprd_targethdl;
740*3446Smrj 		err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
741*3446Smrj 		    (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
742*3446Smrj 		if (err)
743*3446Smrj 			return (-1);
744*3446Smrj 		info->agpki_presize = prealloc_size;
745*3446Smrj 
746*3446Smrj 		break;
747*3446Smrj 
748*3446Smrj 	case ARC_IGD830:
749*3446Smrj 		hdl = agp_regdev->agprd_masterhdl;
750*3446Smrj 		err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
751*3446Smrj 		    FKIOCTL, kcred, 0);
752*3446Smrj 		if (err)
753*3446Smrj 			return (-1);
754*3446Smrj 		info->agpki_mdevid = value1.igd_devid;
755*3446Smrj 		info->agpki_aperbase = value1.igd_aperbase;
756*3446Smrj 		info->agpki_apersize = value1.igd_apersize;
757*3446Smrj 
758*3446Smrj 		hdl = agp_regdev->agprd_targethdl;
759*3446Smrj 		err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
760*3446Smrj 		    (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
761*3446Smrj 		if (err)
762*3446Smrj 			return (-1);
763*3446Smrj 
764*3446Smrj 		/*
765*3446Smrj 		 * Assume all units are kilobytes unless explicitly
766*3446Smrj 		 * stated below:
767*3446Smrj 		 * preallocated GTT memory = preallocated memory - GTT size
768*3446Smrj 		 * 	- scratch page size
769*3446Smrj 		 *
770*3446Smrj 		 * scratch page size = 4
771*3446Smrj 		 * GTT size = aperture size (in MBs)
772*3446Smrj 		 * this algorithm came from Xorg source code
773*3446Smrj 		 */
774*3446Smrj 		prealloc_size = prealloc_size - info->agpki_apersize - 4;
775*3446Smrj 		info->agpki_presize = prealloc_size;
776*3446Smrj 		break;
777*3446Smrj 	case ARC_INTELAGP:
778*3446Smrj 	case ARC_AMD64AGP:
779*3446Smrj 		/* AGP devices */
780*3446Smrj 		hdl = agp_regdev->agprd_masterhdl;
781*3446Smrj 		err = ldi_ioctl(hdl, AGP_MASTER_GETINFO,
782*3446Smrj 		    (intptr_t)&value2, FKIOCTL, kcred, 0);
783*3446Smrj 		if (err)
784*3446Smrj 			return (-1);
785*3446Smrj 		info->agpki_mdevid = value2.iagp_devid;
786*3446Smrj 		info->agpki_mver = value2.iagp_ver;
787*3446Smrj 		info->agpki_mstatus = value2.iagp_mode;
788*3446Smrj 		hdl = agp_regdev->agprd_targethdl;
789*3446Smrj 		err = ldi_ioctl(hdl, AGP_TARGET_GETINFO,
790*3446Smrj 		    (intptr_t)&value2, FKIOCTL, kcred, 0);
791*3446Smrj 		if (err)
792*3446Smrj 			return (-1);
793*3446Smrj 		info->agpki_tdevid = value2.iagp_devid;
794*3446Smrj 		info->agpki_tver = value2.iagp_ver;
795*3446Smrj 		info->agpki_tstatus = value2.iagp_mode;
796*3446Smrj 		info->agpki_aperbase = value2.iagp_aperbase;
797*3446Smrj 		info->agpki_apersize = value2.iagp_apersize;
798*3446Smrj 		break;
799*3446Smrj 	case ARC_AMD64NOAGP:
800*3446Smrj 		/* Meaningful for IOMMU support only */
801*3446Smrj 		gart_head = agp_regdev->agprd_cpugarts.gart_dev_list_head;
802*3446Smrj 		err = ldi_ioctl(gart_head->gart_devhdl, AMD64_GET_INFO,
803*3446Smrj 		    (intptr_t)&value3, FKIOCTL, kcred, 0);
804*3446Smrj 		if (err)
805*3446Smrj 			return (-1);
806*3446Smrj 		info->agpki_aperbase = value3.cgart_aperbase;
807*3446Smrj 		info->agpki_apersize = value3.cgart_apersize;
808*3446Smrj 		break;
809*3446Smrj 	default:
810*3446Smrj 		AGPDB_PRINT2((CE_WARN,
811*3446Smrj 		    "lyr_get_info: function doesn't work for unknown arc"));
812*3446Smrj 		return (-1);
813*3446Smrj 	}
814*3446Smrj 	if ((info->agpki_apersize >= MAXAPERMEGAS) ||
815*3446Smrj 	    (info->agpki_apersize == 0) ||
816*3446Smrj 	    (info->agpki_aperbase == 0)) {
817*3446Smrj 		AGPDB_PRINT2((CE_WARN,
818*3446Smrj 		    "lyr_get_info: aperture is not programmed correctly!"));
819*3446Smrj 		return (-1);
820*3446Smrj 	}
821*3446Smrj 
822*3446Smrj 	return (0);
823*3446Smrj }
824*3446Smrj 
825*3446Smrj /*
826*3446Smrj  * lyr_i8xx_add_to_gtt()
827*3446Smrj  *
828*3446Smrj  * Description:
829*3446Smrj  * 	This function sets up the integrated video device gtt table
830*3446Smrj  * 	via an ioclt to the AGP master driver.
831*3446Smrj  *
832*3446Smrj  * Arguments:
833*3446Smrj  * 	pg_offset	The start entry to be setup
834*3446Smrj  * 	keyent		Keytable entity pointer
835*3446Smrj  *	agp_regdev	AGP devices registration struct pointer
836*3446Smrj  *
837*3446Smrj  * Returns:
838*3446Smrj  * 	0		success
839*3446Smrj  * 	-1		invalid operations
840*3446Smrj  */
841*3446Smrj int
842*3446Smrj lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent,
843*3446Smrj     agp_registered_dev_t *agp_regdev)
844*3446Smrj {
845*3446Smrj 	int err = 0;
846*3446Smrj 	int rval;
847*3446Smrj 	ldi_handle_t hdl;
848*3446Smrj 	igd_gtt_seg_t gttseg;
849*3446Smrj 	uint32_t *addrp, i;
850*3446Smrj 	uint32_t npages;
851*3446Smrj 
852*3446Smrj 	ASSERT(keyent);
853*3446Smrj 	ASSERT(agp_regdev);
854*3446Smrj 	gttseg.igs_pgstart =  pg_offset;
855*3446Smrj 	npages = keyent->kte_pages;
856*3446Smrj 	gttseg.igs_npage = npages;
857*3446Smrj 	gttseg.igs_type = keyent->kte_type;
858*3446Smrj 	gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc
859*3446Smrj 		    (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP);
860*3446Smrj 
861*3446Smrj 	addrp = gttseg.igs_phyaddr;
862*3446Smrj 	for (i = 0; i < npages; i++, addrp++) {
863*3446Smrj 		*addrp =
864*3446Smrj 		    (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT);
865*3446Smrj 	}
866*3446Smrj 
867*3446Smrj 	hdl = agp_regdev->agprd_masterhdl;
868*3446Smrj 	if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)&gttseg, FKIOCTL,
869*3446Smrj 	    kcred, &rval)) {
870*3446Smrj 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error"));
871*3446Smrj 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x",
872*3446Smrj 		    gttseg.igs_pgstart));
873*3446Smrj 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x",
874*3446Smrj 		    gttseg.igs_npage));
875*3446Smrj 		AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x",
876*3446Smrj 		    gttseg.igs_type));
877*3446Smrj 		err = -1;
878*3446Smrj 	}
879*3446Smrj 	kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage);
880*3446Smrj 	return (err);
881*3446Smrj }
882*3446Smrj 
883*3446Smrj /*
884*3446Smrj  * lyr_i8xx_remove_from_gtt()
885*3446Smrj  *
886*3446Smrj  * Description:
887*3446Smrj  * 	This function clears the integrated video device gtt table via
888*3446Smrj  * 	an ioctl to the agp master device.
889*3446Smrj  *
890*3446Smrj  * Arguments:
891*3446Smrj  * 	pg_offset	The starting entry to be cleared
892*3446Smrj  * 	npage		The number of entries to be cleared
893*3446Smrj  *	agp_regdev	AGP devices struct pointer
894*3446Smrj  *
895*3446Smrj  * Returns:
896*3446Smrj  * 	0		success
897*3446Smrj  * 	-1		invalid operations
898*3446Smrj  */
899*3446Smrj int
900*3446Smrj lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage,
901*3446Smrj     agp_registered_dev_t *agp_regdev)
902*3446Smrj {
903*3446Smrj 	int			rval;
904*3446Smrj 	ldi_handle_t		hdl;
905*3446Smrj 	igd_gtt_seg_t		gttseg;
906*3446Smrj 
907*3446Smrj 	gttseg.igs_pgstart =  pg_offset;
908*3446Smrj 	gttseg.igs_npage = npage;
909*3446Smrj 
910*3446Smrj 	hdl = agp_regdev->agprd_masterhdl;
911*3446Smrj 	if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)&gttseg, FKIOCTL,
912*3446Smrj 	    kcred, &rval))
913*3446Smrj 		return (-1);
914*3446Smrj 
915*3446Smrj 	return (0);
916*3446Smrj }
917*3446Smrj 
918*3446Smrj /*
919*3446Smrj  * lyr_set_gart_addr()
920*3446Smrj  *
921*3446Smrj  * Description:
922*3446Smrj  *	This function puts the gart table physical address in the
923*3446Smrj  * 	gart base register.
924*3446Smrj  *	Please refer to gart and gtt table base register format for
925*3446Smrj  *	gart base register format in agpdefs.h.
926*3446Smrj  *
927*3446Smrj  * Arguments:
928*3446Smrj  * 	phy_base	The base physical address of gart table
929*3446Smrj  *	agp_regdev	AGP devices registration struct pointer
930*3446Smrj  *
931*3446Smrj  * Returns:
932*3446Smrj  * 	0		success
933*3446Smrj  * 	-1		failed
934*3446Smrj  *
935*3446Smrj  */
936*3446Smrj 
937*3446Smrj int
938*3446Smrj lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev)
939*3446Smrj {
940*3446Smrj 	amd64_gart_dev_list_t	*gart_list;
941*3446Smrj 	ldi_handle_t		hdl;
942*3446Smrj 	int			err = 0;
943*3446Smrj 
944*3446Smrj 	ASSERT(agp_regdev);
945*3446Smrj 	switch (agp_regdev->agprd_arctype) {
946*3446Smrj 	case ARC_IGD810:
947*3446Smrj 	{
948*3446Smrj 		uint32_t base;
949*3446Smrj 
950*3446Smrj 		ASSERT((phy_base & ~I810_POINTER_MASK) == 0);
951*3446Smrj 		base = (uint32_t)phy_base;
952*3446Smrj 
953*3446Smrj 		hdl = agp_regdev->agprd_masterhdl;
954*3446Smrj 		err = ldi_ioctl(hdl, I810_SET_GTT_BASE,
955*3446Smrj 		    (intptr_t)&base, FKIOCTL, kcred, 0);
956*3446Smrj 		break;
957*3446Smrj 	}
958*3446Smrj 	case ARC_INTELAGP:
959*3446Smrj 	{
960*3446Smrj 		uint32_t addr;
961*3446Smrj 		addr = (uint32_t)phy_base;
962*3446Smrj 
963*3446Smrj 		ASSERT((phy_base & ~GTT_POINTER_MASK) == 0);
964*3446Smrj 		hdl = agp_regdev->agprd_targethdl;
965*3446Smrj 		err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR,
966*3446Smrj 		    (intptr_t)&addr, FKIOCTL, kcred, 0);
967*3446Smrj 		break;
968*3446Smrj 	}
969*3446Smrj 	case ARC_AMD64NOAGP:
970*3446Smrj 	case ARC_AMD64AGP:
971*3446Smrj 	{
972*3446Smrj 		uint32_t addr;
973*3446Smrj 
974*3446Smrj 		ASSERT((phy_base & ~AMD64_POINTER_MASK) == 0);
975*3446Smrj 		addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT)
976*3446Smrj 		    & AMD64_GARTBASE_MASK);
977*3446Smrj 
978*3446Smrj 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
979*3446Smrj 		    gart_list;
980*3446Smrj 		    gart_list = gart_list->next) {
981*3446Smrj 			hdl = gart_list->gart_devhdl;
982*3446Smrj 			if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR,
983*3446Smrj 			    (intptr_t)&addr, FKIOCTL, kcred, 0)) {
984*3446Smrj 				err = -1;
985*3446Smrj 				break;
986*3446Smrj 			}
987*3446Smrj 		}
988*3446Smrj 		break;
989*3446Smrj 	}
990*3446Smrj 	default:
991*3446Smrj 		err = -1;
992*3446Smrj 	}
993*3446Smrj 
994*3446Smrj 	if (err)
995*3446Smrj 		return (-1);
996*3446Smrj 
997*3446Smrj 	return (0);
998*3446Smrj }
999*3446Smrj 
1000*3446Smrj int
1001*3446Smrj lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev)
1002*3446Smrj {
1003*3446Smrj 	ldi_handle_t hdl;
1004*3446Smrj 	uint32_t command;
1005*3446Smrj 
1006*3446Smrj 	ASSERT(agp_regdev);
1007*3446Smrj 	command = cmd;
1008*3446Smrj 	hdl = agp_regdev->agprd_targethdl;
1009*3446Smrj 	if (ldi_ioctl(hdl, AGP_TARGET_SETCMD,
1010*3446Smrj 	    (intptr_t)&command, FKIOCTL, kcred, 0))
1011*3446Smrj 		return (-1);
1012*3446Smrj 	hdl = agp_regdev->agprd_masterhdl;
1013*3446Smrj 	if (ldi_ioctl(hdl, AGP_MASTER_SETCMD,
1014*3446Smrj 	    (intptr_t)&command, FKIOCTL, kcred, 0))
1015*3446Smrj 		return (-1);
1016*3446Smrj 
1017*3446Smrj 	return (0);
1018*3446Smrj }
1019*3446Smrj 
1020*3446Smrj int
1021*3446Smrj lyr_config_devices(agp_registered_dev_t *agp_regdev)
1022*3446Smrj {
1023*3446Smrj 	amd64_gart_dev_list_t	*gart_list;
1024*3446Smrj 	ldi_handle_t		hdl;
1025*3446Smrj 	int			rc = 0;
1026*3446Smrj 
1027*3446Smrj 	ASSERT(agp_regdev);
1028*3446Smrj 	switch (agp_regdev->agprd_arctype) {
1029*3446Smrj 	case ARC_IGD830:
1030*3446Smrj 	case ARC_IGD810:
1031*3446Smrj 		break;
1032*3446Smrj 	case ARC_INTELAGP:
1033*3446Smrj 	{
1034*3446Smrj 		hdl = agp_regdev->agprd_targethdl;
1035*3446Smrj 		rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE,
1036*3446Smrj 		    0, FKIOCTL, kcred, 0);
1037*3446Smrj 		break;
1038*3446Smrj 	}
1039*3446Smrj 	case ARC_AMD64NOAGP:
1040*3446Smrj 	case ARC_AMD64AGP:
1041*3446Smrj 	{
1042*3446Smrj 		/*
1043*3446Smrj 		 * BIOS always shadow registers such like Aperture Base
1044*3446Smrj 		 * register, Aperture Size Register from the AGP bridge
1045*3446Smrj 		 * to the AMD64 CPU host bridge. If future BIOSes are broken
1046*3446Smrj 		 * in this regard, we may need to shadow these registers
1047*3446Smrj 		 * in driver.
1048*3446Smrj 		 */
1049*3446Smrj 
1050*3446Smrj 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1051*3446Smrj 		    gart_list;
1052*3446Smrj 		    gart_list = gart_list->next) {
1053*3446Smrj 			hdl = gart_list->gart_devhdl;
1054*3446Smrj 			if (ldi_ioctl(hdl, AMD64_CONFIGURE,
1055*3446Smrj 			    0, FKIOCTL, kcred, 0)) {
1056*3446Smrj 				rc = -1;
1057*3446Smrj 				break;
1058*3446Smrj 			}
1059*3446Smrj 		}
1060*3446Smrj 		break;
1061*3446Smrj 	}
1062*3446Smrj 	default:
1063*3446Smrj 		rc = -1;
1064*3446Smrj 	}
1065*3446Smrj 
1066*3446Smrj 	if (rc)
1067*3446Smrj 		return (-1);
1068*3446Smrj 
1069*3446Smrj 	return (0);
1070*3446Smrj }
1071*3446Smrj 
1072*3446Smrj int
1073*3446Smrj lyr_unconfig_devices(agp_registered_dev_t *agp_regdev)
1074*3446Smrj {
1075*3446Smrj 	amd64_gart_dev_list_t	*gart_list;
1076*3446Smrj 	ldi_handle_t		hdl;
1077*3446Smrj 	int			rc = 0;
1078*3446Smrj 
1079*3446Smrj 	ASSERT(agp_regdev);
1080*3446Smrj 	switch (agp_regdev->agprd_arctype) {
1081*3446Smrj 	case ARC_IGD830:
1082*3446Smrj 	case ARC_IGD810:
1083*3446Smrj 	{
1084*3446Smrj 		hdl = agp_regdev->agprd_masterhdl;
1085*3446Smrj 		rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0);
1086*3446Smrj 		break;
1087*3446Smrj 	}
1088*3446Smrj 	case ARC_INTELAGP:
1089*3446Smrj 	{
1090*3446Smrj 		hdl = agp_regdev->agprd_targethdl;
1091*3446Smrj 		rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG,
1092*3446Smrj 		    0, FKIOCTL, kcred, 0);
1093*3446Smrj 		break;
1094*3446Smrj 	}
1095*3446Smrj 	case ARC_AMD64NOAGP:
1096*3446Smrj 	case ARC_AMD64AGP:
1097*3446Smrj 	{
1098*3446Smrj 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1099*3446Smrj 		    gart_list; gart_list = gart_list->next) {
1100*3446Smrj 			hdl = gart_list->gart_devhdl;
1101*3446Smrj 			if (ldi_ioctl(hdl, AMD64_UNCONFIG,
1102*3446Smrj 			    0, FKIOCTL, kcred, 0)) {
1103*3446Smrj 				rc = -1;
1104*3446Smrj 				break;
1105*3446Smrj 			}
1106*3446Smrj 		}
1107*3446Smrj 		break;
1108*3446Smrj 	}
1109*3446Smrj 	default:
1110*3446Smrj 		rc = -1;
1111*3446Smrj 	}
1112*3446Smrj 
1113*3446Smrj 	if (rc)
1114*3446Smrj 		return (-1);
1115*3446Smrj 
1116*3446Smrj 	return (0);
1117*3446Smrj }
1118*3446Smrj 
1119*3446Smrj /*
1120*3446Smrj  * lyr_flush_gart_cache()
1121*3446Smrj  *
1122*3446Smrj  * Description:
1123*3446Smrj  * 	This function flushes the GART translation look-aside buffer. All
1124*3446Smrj  * 	GART translation caches will be flushed after this operation.
1125*3446Smrj  *
1126*3446Smrj  * Arguments:
1127*3446Smrj  *	agp_regdev	AGP devices struct pointer
1128*3446Smrj  */
1129*3446Smrj void
1130*3446Smrj lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev)
1131*3446Smrj {
1132*3446Smrj 	amd64_gart_dev_list_t	*gart_list;
1133*3446Smrj 	ldi_handle_t		hdl;
1134*3446Smrj 
1135*3446Smrj 	ASSERT(agp_regdev);
1136*3446Smrj 	if ((agp_regdev->agprd_arctype == ARC_AMD64AGP) ||
1137*3446Smrj 	    (agp_regdev->agprd_arctype == ARC_AMD64NOAGP)) {
1138*3446Smrj 		for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1139*3446Smrj 		    gart_list; gart_list = gart_list->next) {
1140*3446Smrj 			hdl = gart_list->gart_devhdl;
1141*3446Smrj 			(void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB,
1142*3446Smrj 			    0, FKIOCTL, kcred, 0);
1143*3446Smrj 		}
1144*3446Smrj 	} else if (agp_regdev->agprd_arctype == ARC_INTELAGP) {
1145*3446Smrj 		hdl = agp_regdev->agprd_targethdl;
1146*3446Smrj 		(void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0,
1147*3446Smrj 		    FKIOCTL, kcred, 0);
1148*3446Smrj 	}
1149*3446Smrj }
1150*3446Smrj 
1151*3446Smrj /*
1152*3446Smrj  * get_max_pages()
1153*3446Smrj  *
1154*3446Smrj  * Description:
1155*3446Smrj  * 	This function compute the total pages allowed for agp aperture
1156*3446Smrj  *	based on the ammount of physical pages.
1157*3446Smrj  * 	The algorithm is: compare the aperture size with 1/4 of total
1158*3446Smrj  *	physical pages, and use the smaller one to for the max available
1159*3446Smrj  * 	pages.
1160*3446Smrj  *
1161*3446Smrj  * Arguments:
1162*3446Smrj  * 	aper_size	system agp aperture size (in MB)
1163*3446Smrj  *
1164*3446Smrj  * Returns:
1165*3446Smrj  * 	The max possible number of agp memory pages available to users
1166*3446Smrj  */
1167*3446Smrj static uint32_t
1168*3446Smrj get_max_pages(uint32_t aper_size)
1169*3446Smrj {
1170*3446Smrj 	uint32_t i, j;
1171*3446Smrj 
1172*3446Smrj 	ASSERT(aper_size <= MAXAPERMEGAS);
1173*3446Smrj 
1174*3446Smrj 	i = AGP_MB2PAGES(aper_size);
1175*3446Smrj 	j = (physmem >> 2);
1176*3446Smrj 
1177*3446Smrj 	return ((i < j) ? i : j);
1178*3446Smrj }
1179*3446Smrj 
1180*3446Smrj /*
1181*3446Smrj  * agp_fill_empty_keyent()
1182*3446Smrj  *
1183*3446Smrj  * Description:
1184*3446Smrj  * 	This function finds a empty key table slot and
1185*3446Smrj  * 	fills it with a new entity.
1186*3446Smrj  *
1187*3446Smrj  * Arguments:
1188*3446Smrj  * 	softsate	driver soft state pointer
1189*3446Smrj  * 	entryp		new entity data pointer
1190*3446Smrj  *
1191*3446Smrj  * Returns:
1192*3446Smrj  * 	NULL	no key table slot available
1193*3446Smrj  * 	entryp	the new entity slot pointer
1194*3446Smrj  */
1195*3446Smrj static keytable_ent_t *
1196*3446Smrj agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1197*3446Smrj {
1198*3446Smrj 	int key;
1199*3446Smrj 	keytable_ent_t *newentryp;
1200*3446Smrj 
1201*3446Smrj 	ASSERT(softstate);
1202*3446Smrj 	ASSERT(entryp);
1203*3446Smrj 	ASSERT(entryp->kte_memhdl);
1204*3446Smrj 	ASSERT(entryp->kte_pfnarray);
1205*3446Smrj 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
1206*3446Smrj 
1207*3446Smrj 	for (key = 0; key < AGP_MAXKEYS; key++) {
1208*3446Smrj 		newentryp = &softstate->asoft_table[key];
1209*3446Smrj 		if (newentryp->kte_memhdl == NULL) {
1210*3446Smrj 			break;
1211*3446Smrj 		}
1212*3446Smrj 	}
1213*3446Smrj 
1214*3446Smrj 	if (key >= AGP_MAXKEYS) {
1215*3446Smrj 		AGPDB_PRINT2((CE_WARN,
1216*3446Smrj 		    "agp_fill_empty_keyent: key table exhausted"));
1217*3446Smrj 		return (NULL);
1218*3446Smrj 	}
1219*3446Smrj 
1220*3446Smrj 	ASSERT(newentryp->kte_pfnarray == NULL);
1221*3446Smrj 	bcopy(entryp, newentryp, sizeof (keytable_ent_t));
1222*3446Smrj 	newentryp->kte_key = key;
1223*3446Smrj 
1224*3446Smrj 	return (newentryp);
1225*3446Smrj }
1226*3446Smrj 
1227*3446Smrj /*
1228*3446Smrj  * agp_find_bound_keyent()
1229*3446Smrj  *
1230*3446Smrj  * Description:
1231*3446Smrj  * 	This function finds the key table entity by agp aperture page offset.
1232*3446Smrj  * 	Every keytable entity will have an agp aperture range after the binding
1233*3446Smrj  *	operation.
1234*3446Smrj  *
1235*3446Smrj  * Arguments:
1236*3446Smrj  * 	softsate	driver soft state pointer
1237*3446Smrj  * 	pg_offset	agp aperture page offset
1238*3446Smrj  *
1239*3446Smrj  * Returns:
1240*3446Smrj  * 	NULL		no such keytable entity
1241*3446Smrj  * 	pointer		key table entity pointer found
1242*3446Smrj  */
1243*3446Smrj static keytable_ent_t *
1244*3446Smrj agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset)
1245*3446Smrj {
1246*3446Smrj 	int keycount;
1247*3446Smrj 	keytable_ent_t *entryp;
1248*3446Smrj 
1249*3446Smrj 	ASSERT(softstate);
1250*3446Smrj 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
1251*3446Smrj 
1252*3446Smrj 	for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) {
1253*3446Smrj 		entryp = &softstate->asoft_table[keycount];
1254*3446Smrj 		if (entryp->kte_bound == 0) {
1255*3446Smrj 			continue;
1256*3446Smrj 		}
1257*3446Smrj 
1258*3446Smrj 		if (pg_offset < entryp->kte_pgoff)
1259*3446Smrj 			continue;
1260*3446Smrj 		if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages))
1261*3446Smrj 			continue;
1262*3446Smrj 
1263*3446Smrj 		ASSERT(entryp->kte_memhdl);
1264*3446Smrj 		ASSERT(entryp->kte_pfnarray);
1265*3446Smrj 
1266*3446Smrj 		return (entryp);
1267*3446Smrj 	}
1268*3446Smrj 
1269*3446Smrj 	return (NULL);
1270*3446Smrj }
1271*3446Smrj 
1272*3446Smrj /*
1273*3446Smrj  * agp_check_off()
1274*3446Smrj  *
1275*3446Smrj  * Description:
1276*3446Smrj  * 	This function checks whether an AGP aperture range to be bound
1277*3446Smrj  *	overlaps with AGP offset already bound.
1278*3446Smrj  *
1279*3446Smrj  * Arguments:
1280*3446Smrj  *	entryp		key table start entry pointer
1281*3446Smrj  * 	pg_start	AGP range start page offset
1282*3446Smrj  *	pg_num		pages number to be bound
1283*3446Smrj  *
1284*3446Smrj  * Returns:
1285*3446Smrj  *	0		Does not overlap
1286*3446Smrj  *	-1		Overlaps
1287*3446Smrj  */
1288*3446Smrj 
1289*3446Smrj static int
1290*3446Smrj agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num)
1291*3446Smrj {
1292*3446Smrj 	int key;
1293*3446Smrj 	uint64_t pg_end;
1294*3446Smrj 	uint64_t kpg_end;
1295*3446Smrj 
1296*3446Smrj 	ASSERT(entryp);
1297*3446Smrj 
1298*3446Smrj 	pg_end = pg_start + pg_num;
1299*3446Smrj 	for (key = 0; key < AGP_MAXKEYS; key++) {
1300*3446Smrj 		if (!entryp[key].kte_bound)
1301*3446Smrj 			continue;
1302*3446Smrj 
1303*3446Smrj 		kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages;
1304*3446Smrj 		if (!((pg_end <= entryp[key].kte_pgoff) ||
1305*3446Smrj 		    (pg_start >= kpg_end)))
1306*3446Smrj 			break;
1307*3446Smrj 	}
1308*3446Smrj 
1309*3446Smrj 	if (key == AGP_MAXKEYS)
1310*3446Smrj 		return (0);
1311*3446Smrj 	else
1312*3446Smrj 		return (-1);
1313*3446Smrj }
1314*3446Smrj 
1315*3446Smrj static int
1316*3446Smrj is_controlling_proc(agpgart_softstate_t *st)
1317*3446Smrj {
1318*3446Smrj 	ASSERT(st);
1319*3446Smrj 
1320*3446Smrj 	if (!st->asoft_acquired) {
1321*3446Smrj 		AGPDB_PRINT2((CE_WARN,
1322*3446Smrj 		    "ioctl_agpgart_setup: gart not acquired"));
1323*3446Smrj 		return (-1);
1324*3446Smrj 	}
1325*3446Smrj 	if (st->asoft_curpid != ddi_get_pid()) {
1326*3446Smrj 		AGPDB_PRINT2((CE_WARN,
1327*3446Smrj 		    "ioctl_agpgart_release: not  controlling process"));
1328*3446Smrj 		return (-1);
1329*3446Smrj 	}
1330*3446Smrj 
1331*3446Smrj 	return (0);
1332*3446Smrj }
1333*3446Smrj 
1334*3446Smrj static void release_control(agpgart_softstate_t *st)
1335*3446Smrj {
1336*3446Smrj 	st->asoft_curpid = 0;
1337*3446Smrj 	st->asoft_acquired = 0;
1338*3446Smrj }
1339*3446Smrj 
1340*3446Smrj static void acquire_control(agpgart_softstate_t *st)
1341*3446Smrj {
1342*3446Smrj 	st->asoft_curpid = ddi_get_pid();
1343*3446Smrj 	st->asoft_acquired = 1;
1344*3446Smrj }
1345*3446Smrj 
1346*3446Smrj /*
1347*3446Smrj  * agp_remove_from_gart()
1348*3446Smrj  *
1349*3446Smrj  * Description:
1350*3446Smrj  * 	This function fills the gart table entries by a given page
1351*3446Smrj  * 	frame number array and setup the agp aperture page to physical
1352*3446Smrj  * 	memory page translation.
1353*3446Smrj  * Arguments:
1354*3446Smrj  * 	pg_offset	Starting aperture page to be bound
1355*3446Smrj  * 	entries		the number of pages to be bound
1356*3446Smrj  * 	acc_hdl		GART table dma memory acc handle
1357*3446Smrj  * 	tablep		GART table kernel virtual address
1358*3446Smrj  */
1359*3446Smrj static void
1360*3446Smrj agp_remove_from_gart(
1361*3446Smrj     uint32_t pg_offset,
1362*3446Smrj     uint32_t entries,
1363*3446Smrj     ddi_dma_handle_t dma_hdl,
1364*3446Smrj     uint32_t *tablep)
1365*3446Smrj {
1366*3446Smrj 	uint32_t items = 0;
1367*3446Smrj 	uint32_t *entryp;
1368*3446Smrj 
1369*3446Smrj 	entryp = tablep + pg_offset;
1370*3446Smrj 	while (items < entries) {
1371*3446Smrj 		*(entryp + items) = 0;
1372*3446Smrj 		items++;
1373*3446Smrj 	}
1374*3446Smrj 	(void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
1375*3446Smrj 	    entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
1376*3446Smrj }
1377*3446Smrj 
1378*3446Smrj /*
1379*3446Smrj  * agp_unbind_key()
1380*3446Smrj  *
1381*3446Smrj  * Description:
1382*3446Smrj  * 	This function unbinds AGP memory from the gart table. It will clear
1383*3446Smrj  * 	all the gart entries related to this agp memory.
1384*3446Smrj  *
1385*3446Smrj  * Arguments:
1386*3446Smrj  * 	softstate		driver soft state pointer
1387*3446Smrj  * 	entryp			key table entity pointer
1388*3446Smrj  *
1389*3446Smrj  * Returns:
1390*3446Smrj  * 	EINVAL		invalid key table entity pointer
1391*3446Smrj  * 	0		success
1392*3446Smrj  *
1393*3446Smrj  */
1394*3446Smrj static int
1395*3446Smrj agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1396*3446Smrj {
1397*3446Smrj 	int retval = 0;
1398*3446Smrj 
1399*3446Smrj 	ASSERT(entryp);
1400*3446Smrj 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
1401*3446Smrj 
1402*3446Smrj 	if (!entryp->kte_bound) {
1403*3446Smrj 		AGPDB_PRINT2((CE_WARN,
1404*3446Smrj 		    "agp_unbind_key: key = 0x%x, not bound",
1405*3446Smrj 		    entryp->kte_key));
1406*3446Smrj 		return (EINVAL);
1407*3446Smrj 	}
1408*3446Smrj 	if (entryp->kte_refcnt) {
1409*3446Smrj 		AGPDB_PRINT2((CE_WARN,
1410*3446Smrj 		    "agp_unbind_key: memory is exported to users"));
1411*3446Smrj 		return (EINVAL);
1412*3446Smrj 	}
1413*3446Smrj 
1414*3446Smrj 	ASSERT((entryp->kte_pgoff + entryp->kte_pages) <=
1415*3446Smrj 	    AGP_MB2PAGES(softstate->asoft_info.agpki_apersize));
1416*3446Smrj 	ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN));
1417*3446Smrj 
1418*3446Smrj 	switch (softstate->asoft_devreg.agprd_arctype) {
1419*3446Smrj 	case ARC_IGD810:
1420*3446Smrj 	case ARC_IGD830:
1421*3446Smrj 		retval = lyr_i8xx_remove_from_gtt(
1422*3446Smrj 		    entryp->kte_pgoff, entryp->kte_pages,
1423*3446Smrj 		    &softstate->asoft_devreg);
1424*3446Smrj 		if (retval) {
1425*3446Smrj 			AGPDB_PRINT2((CE_WARN,
1426*3446Smrj 			    "agp_unbind_key: Key = 0x%x, clear table error",
1427*3446Smrj 			    entryp->kte_key));
1428*3446Smrj 			return (EIO);
1429*3446Smrj 		}
1430*3446Smrj 		break;
1431*3446Smrj 	case ARC_INTELAGP:
1432*3446Smrj 	case ARC_AMD64NOAGP:
1433*3446Smrj 	case ARC_AMD64AGP:
1434*3446Smrj 		agp_remove_from_gart(entryp->kte_pgoff,
1435*3446Smrj 		    entryp->kte_pages,
1436*3446Smrj 		    softstate->gart_dma_handle,
1437*3446Smrj 		    (uint32_t *)softstate->gart_vbase);
1438*3446Smrj 		/* Flush GTLB table */
1439*3446Smrj 		lyr_flush_gart_cache(&softstate->asoft_devreg);
1440*3446Smrj 
1441*3446Smrj 		break;
1442*3446Smrj 	}
1443*3446Smrj 
1444*3446Smrj 	entryp->kte_bound = 0;
1445*3446Smrj 
1446*3446Smrj 	return (0);
1447*3446Smrj }
1448*3446Smrj 
1449*3446Smrj /*
1450*3446Smrj  * agp_dealloc_kmem()
1451*3446Smrj  *
1452*3446Smrj  * Description:
1453*3446Smrj  * 	This function deallocates dma memory resources for userland
1454*3446Smrj  * 	applications.
1455*3446Smrj  *
1456*3446Smrj  * Arguments:
1457*3446Smrj  * 	entryp		keytable entity pointer
1458*3446Smrj  */
1459*3446Smrj static void
1460*3446Smrj agp_dealloc_kmem(keytable_ent_t *entryp)
1461*3446Smrj {
1462*3446Smrj 	kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1463*3446Smrj 	entryp->kte_pfnarray = NULL;
1464*3446Smrj 
1465*3446Smrj 	(void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle);
1466*3446Smrj 	KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0;
1467*3446Smrj 	ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl);
1468*3446Smrj 	KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL;
1469*3446Smrj 	KMEMP(entryp->kte_memhdl)->kmem_reallen = 0;
1470*3446Smrj 	KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL;
1471*3446Smrj 
1472*3446Smrj 	ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle));
1473*3446Smrj 	KMEMP(entryp->kte_memhdl)->kmem_handle = NULL;
1474*3446Smrj 
1475*3446Smrj 	kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t));
1476*3446Smrj 	entryp->kte_memhdl = NULL;
1477*3446Smrj }
1478*3446Smrj 
1479*3446Smrj /*
1480*3446Smrj  * agp_dealloc_pmem()
1481*3446Smrj  *
1482*3446Smrj  * Description:
1483*3446Smrj  * 	This function deallocates memory resource for direct mapping to
1484*3446Smrj  * 	userland applications.
1485*3446Smrj  *
1486*3446Smrj  * Arguments:
1487*3446Smrj  * 	entryp		key table entity pointer
1488*3446Smrj  *
1489*3446Smrj  */
1490*3446Smrj static void
1491*3446Smrj agp_dealloc_pmem(keytable_ent_t *entryp)
1492*3446Smrj {
1493*3446Smrj 	devmap_pmem_free(PMEMP(entryp->kte_memhdl)->pmem_cookie);
1494*3446Smrj 	PMEMP(entryp->kte_memhdl)->pmem_cookie = NULL;
1495*3446Smrj 	kmem_free(entryp->kte_memhdl, sizeof (agp_pmem_handle_t));
1496*3446Smrj 	entryp->kte_memhdl = NULL;
1497*3446Smrj 
1498*3446Smrj 	/* free the page frame number array */
1499*3446Smrj 	kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1500*3446Smrj 	entryp->kte_pfnarray = NULL;
1501*3446Smrj }
1502*3446Smrj 
1503*3446Smrj /*
1504*3446Smrj  * agp_dealloc_mem()
1505*3446Smrj  *
1506*3446Smrj  * Description:
1507*3446Smrj  * 	This function deallocates physical memory resources allocated for
1508*3446Smrj  *	userland applications.
1509*3446Smrj  *
1510*3446Smrj  * Arguments:
1511*3446Smrj  * 	st		driver soft state pointer
1512*3446Smrj  * 	entryp		key table entity pointer
1513*3446Smrj  *
1514*3446Smrj  * Returns:
1515*3446Smrj  * 	-1		not a valid memory type or the memory is mapped by
1516*3446Smrj  * 			user area applications
1517*3446Smrj  * 	0		success
1518*3446Smrj  */
1519*3446Smrj static int
1520*3446Smrj agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t	*entryp)
1521*3446Smrj {
1522*3446Smrj 
1523*3446Smrj 	ASSERT(entryp);
1524*3446Smrj 	ASSERT(st);
1525*3446Smrj 	ASSERT(entryp->kte_memhdl);
1526*3446Smrj 	ASSERT(mutex_owned(&st->asoft_instmutex));
1527*3446Smrj 
1528*3446Smrj 	/* auto unbind here */
1529*3446Smrj 	if (entryp->kte_bound && !entryp->kte_refcnt) {
1530*3446Smrj 		AGPDB_PRINT2((CE_WARN,
1531*3446Smrj 		    "agp_dealloc_mem: key=0x%x, auto unbind",
1532*3446Smrj 		    entryp->kte_key));
1533*3446Smrj 
1534*3446Smrj 		/*
1535*3446Smrj 		 * agp_dealloc_mem may be called indirectly by agp_detach.
1536*3446Smrj 		 * In the agp_detach function, agpgart_close is already
1537*3446Smrj 		 * called which will free the gart table. agp_unbind_key
1538*3446Smrj 		 * will panic if no valid gart table exists. So test if
1539*3446Smrj 		 * gart table exsits here.
1540*3446Smrj 		 */
1541*3446Smrj 		if (st->asoft_opened)
1542*3446Smrj 		    (void) agp_unbind_key(st, entryp);
1543*3446Smrj 	}
1544*3446Smrj 	if (entryp->kte_refcnt) {
1545*3446Smrj 		AGPDB_PRINT2((CE_WARN,
1546*3446Smrj 		    "agp_dealloc_pmem: memory is exported to users"));
1547*3446Smrj 		return (-1);
1548*3446Smrj 	}
1549*3446Smrj 
1550*3446Smrj 	switch (entryp->kte_type) {
1551*3446Smrj 	case AGP_NORMAL:
1552*3446Smrj 		agp_dealloc_pmem(entryp);
1553*3446Smrj 		break;
1554*3446Smrj 	case AGP_PHYSICAL:
1555*3446Smrj 		agp_dealloc_kmem(entryp);
1556*3446Smrj 		break;
1557*3446Smrj 	default:
1558*3446Smrj 		return (-1);
1559*3446Smrj 	}
1560*3446Smrj 
1561*3446Smrj 	return (0);
1562*3446Smrj }
1563*3446Smrj 
1564*3446Smrj /*
1565*3446Smrj  * agp_del_allkeys()
1566*3446Smrj  *
1567*3446Smrj  * Description:
1568*3446Smrj  * 	This function calls agp_dealloc_mem to release all the agp memory
1569*3446Smrj  *	resource allocated.
1570*3446Smrj  *
1571*3446Smrj  * Arguments:
1572*3446Smrj  * 	softsate	driver soft state pointer
1573*3446Smrj  * Returns:
1574*3446Smrj  * 	-1		can not free all agp memory
1575*3446Smrj  * 	0		success
1576*3446Smrj  *
1577*3446Smrj  */
1578*3446Smrj static int
1579*3446Smrj agp_del_allkeys(agpgart_softstate_t *softstate)
1580*3446Smrj {
1581*3446Smrj 	int key;
1582*3446Smrj 	int ret = 0;
1583*3446Smrj 
1584*3446Smrj 	ASSERT(softstate);
1585*3446Smrj 	for (key = 0; key < AGP_MAXKEYS; key++) {
1586*3446Smrj 		if (softstate->asoft_table[key].kte_memhdl != NULL) {
1587*3446Smrj 			/*
1588*3446Smrj 			 * Check if we can free agp memory now.
1589*3446Smrj 			 * If agp memory is exported to user
1590*3446Smrj 			 * applications, agp_dealloc_mem will fail.
1591*3446Smrj 			 */
1592*3446Smrj 			if (agp_dealloc_mem(softstate,
1593*3446Smrj 			    &softstate->asoft_table[key]))
1594*3446Smrj 				ret = -1;
1595*3446Smrj 		}
1596*3446Smrj 	}
1597*3446Smrj 
1598*3446Smrj 	return (ret);
1599*3446Smrj }
1600*3446Smrj 
1601*3446Smrj /*
1602*3446Smrj  * pfn2gartentry()
1603*3446Smrj  *
1604*3446Smrj  * Description:
1605*3446Smrj  *	This function converts a physical address to GART entry.
1606*3446Smrj  *	For AMD64, hardware only support addresses below 40bits,
1607*3446Smrj  *	about 1024G physical address, so the largest pfn
1608*3446Smrj  *	number is below 28 bits. Please refer to GART and GTT entry
1609*3446Smrj  *	format table in agpdefs.h for entry format. Intel IGD only
1610*3446Smrj  * 	only supports GTT entry below 1G. Intel AGP only supports
1611*3446Smrj  * 	GART entry below 4G.
1612*3446Smrj  *
1613*3446Smrj  * Arguments:
1614*3446Smrj  * 	arc_type		system agp arc type
1615*3446Smrj  * 	pfn			page frame number
1616*3446Smrj  * 	itemv			the entry item to be returned
1617*3446Smrj  * Returns:
1618*3446Smrj  * 	-1			not a invalid page frame
1619*3446Smrj  * 	0			conversion success
1620*3446Smrj  */
1621*3446Smrj static int
1622*3446Smrj pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv)
1623*3446Smrj {
1624*3446Smrj 	uint64_t paddr;
1625*3446Smrj 
1626*3446Smrj 	paddr = pfn<<AGP_PAGE_SHIFT;
1627*3446Smrj 
1628*3446Smrj 	switch (arc_type) {
1629*3446Smrj 	case ARC_INTELAGP:
1630*3446Smrj 	{
1631*3446Smrj 		/* Only support 32-bit hardware address */
1632*3446Smrj 		if ((paddr & ~AGP_INTEL_POINTER_MASK) != 0) {
1633*3446Smrj 			AGPDB_PRINT2((CE_WARN,
1634*3446Smrj 			    "INTEL AGP Hardware only support 32 bits"));
1635*3446Smrj 			return (-1);
1636*3446Smrj 		}
1637*3446Smrj 		*itemv =  (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID;
1638*3446Smrj 
1639*3446Smrj 		break;
1640*3446Smrj 	}
1641*3446Smrj 	case ARC_AMD64NOAGP:
1642*3446Smrj 	case ARC_AMD64AGP:
1643*3446Smrj 	{
1644*3446Smrj 		uint32_t value1, value2;
1645*3446Smrj 		/* Physaddr should not exceed 40-bit */
1646*3446Smrj 		if ((paddr & ~AMD64_POINTER_MASK) != 0) {
1647*3446Smrj 			AGPDB_PRINT2((CE_WARN,
1648*3446Smrj 			    "AMD64 GART hardware only supoort 40 bits"));
1649*3446Smrj 			return (-1);
1650*3446Smrj 		}
1651*3446Smrj 		value1 = (uint32_t)pfn >> 20;
1652*3446Smrj 		value1 <<= 4;
1653*3446Smrj 		value2 = (uint32_t)pfn << 12;
1654*3446Smrj 
1655*3446Smrj 		*itemv = value1 | value2 | AMD64_ENTRY_VALID;
1656*3446Smrj 		break;
1657*3446Smrj 	}
1658*3446Smrj 	case ARC_IGD810:
1659*3446Smrj 		if ((paddr & ~I810_POINTER_MASK) != 0) {
1660*3446Smrj 			AGPDB_PRINT2((CE_WARN,
1661*3446Smrj 			    "Intel i810 only support 30 bits"));
1662*3446Smrj 			return (-1);
1663*3446Smrj 		}
1664*3446Smrj 		break;
1665*3446Smrj 
1666*3446Smrj 	case ARC_IGD830:
1667*3446Smrj 		if ((paddr & ~GTT_POINTER_MASK) != 0) {
1668*3446Smrj 			AGPDB_PRINT2((CE_WARN,
1669*3446Smrj 			    "Intel IGD only support 32 bits"));
1670*3446Smrj 			return (-1);
1671*3446Smrj 		}
1672*3446Smrj 		break;
1673*3446Smrj 	default:
1674*3446Smrj 		AGPDB_PRINT2((CE_WARN,
1675*3446Smrj 		    "pfn2gartentry: arc type = %d, not support", arc_type));
1676*3446Smrj 		return (-1);
1677*3446Smrj 	}
1678*3446Smrj 	return (0);
1679*3446Smrj }
1680*3446Smrj 
1681*3446Smrj /*
1682*3446Smrj  * Check allocated physical pages validity, only called in DEBUG
1683*3446Smrj  * mode.
1684*3446Smrj  */
1685*3446Smrj static int
1686*3446Smrj agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items)
1687*3446Smrj {
1688*3446Smrj 	int count;
1689*3446Smrj 	uint32_t ret;
1690*3446Smrj 
1691*3446Smrj 	for (count = 0; count < items; count++) {
1692*3446Smrj 		if (pfn2gartentry(arc_type, pfnarray[count], &ret))
1693*3446Smrj 			break;
1694*3446Smrj 	}
1695*3446Smrj 	if (count < items)
1696*3446Smrj 		return (-1);
1697*3446Smrj 	else
1698*3446Smrj 		return (0);
1699*3446Smrj }
1700*3446Smrj 
1701*3446Smrj /*
1702*3446Smrj  * kmem_getpfns()
1703*3446Smrj  *
1704*3446Smrj  * Description:
1705*3446Smrj  * 	This function gets page frame numbers from dma handle.
1706*3446Smrj  *
1707*3446Smrj  * Arguments:
1708*3446Smrj  * 	dma_handle		dma hanle allocated by ddi_dma_alloc_handle
1709*3446Smrj  * 	dma_cookip		dma cookie pointer
1710*3446Smrj  * 	cookies_num		cookies number
1711*3446Smrj  * 	pfnarray		array to store page frames
1712*3446Smrj  *
1713*3446Smrj  * Returns:
1714*3446Smrj  *	0		success
1715*3446Smrj  */
1716*3446Smrj static int
1717*3446Smrj kmem_getpfns(
1718*3446Smrj     ddi_dma_handle_t dma_handle,
1719*3446Smrj     ddi_dma_cookie_t *dma_cookiep,
1720*3446Smrj     int cookies_num,
1721*3446Smrj     pfn_t *pfnarray)
1722*3446Smrj {
1723*3446Smrj 	int	num_cookies;
1724*3446Smrj 	int	index = 0;
1725*3446Smrj 
1726*3446Smrj 	num_cookies = cookies_num;
1727*3446Smrj 
1728*3446Smrj 	while (num_cookies > 0) {
1729*3446Smrj 		uint64_t ck_startaddr, ck_length, ck_end;
1730*3446Smrj 		ck_startaddr = dma_cookiep->dmac_address;
1731*3446Smrj 		ck_length = dma_cookiep->dmac_size;
1732*3446Smrj 
1733*3446Smrj 		ck_end = ck_startaddr + ck_length;
1734*3446Smrj 		while (ck_startaddr < ck_end) {
1735*3446Smrj 			pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT;
1736*3446Smrj 			ck_startaddr += AGP_PAGE_SIZE;
1737*3446Smrj 			index++;
1738*3446Smrj 		}
1739*3446Smrj 
1740*3446Smrj 		num_cookies--;
1741*3446Smrj 		if (num_cookies > 0) {
1742*3446Smrj 			ddi_dma_nextcookie(dma_handle, dma_cookiep);
1743*3446Smrj 		}
1744*3446Smrj 	}
1745*3446Smrj 
1746*3446Smrj 	return (0);
1747*3446Smrj }
1748*3446Smrj 
1749*3446Smrj static int
1750*3446Smrj copyinfo(agpgart_softstate_t *softstate, agp_info_t *info)
1751*3446Smrj {
1752*3446Smrj 	switch (softstate->asoft_devreg.agprd_arctype) {
1753*3446Smrj 	case ARC_IGD810:
1754*3446Smrj 	case ARC_IGD830:
1755*3446Smrj 		info->agpi_version.agpv_major = 0;
1756*3446Smrj 		info->agpi_version.agpv_minor = 0;
1757*3446Smrj 		info->agpi_devid = softstate->asoft_info.agpki_mdevid;
1758*3446Smrj 		info->agpi_mode = 0;
1759*3446Smrj 		break;
1760*3446Smrj 	case ARC_INTELAGP:
1761*3446Smrj 	case ARC_AMD64AGP:
1762*3446Smrj 		info->agpi_version = softstate->asoft_info.agpki_tver;
1763*3446Smrj 		info->agpi_devid = softstate->asoft_info.agpki_tdevid;
1764*3446Smrj 		info->agpi_mode = softstate->asoft_info.agpki_tstatus;
1765*3446Smrj 		break;
1766*3446Smrj 	case ARC_AMD64NOAGP:
1767*3446Smrj 		break;
1768*3446Smrj 	default:
1769*3446Smrj 		AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC"));
1770*3446Smrj 		return (-1);
1771*3446Smrj 	}
1772*3446Smrj 	/*
1773*3446Smrj 	 * 64bit->32bit conversion possible
1774*3446Smrj 	 */
1775*3446Smrj 	info->agpi_aperbase = softstate->asoft_info.agpki_aperbase;
1776*3446Smrj 	info->agpi_apersize = softstate->asoft_info.agpki_apersize;
1777*3446Smrj 	info->agpi_pgtotal = softstate->asoft_pgtotal;
1778*3446Smrj 	info->agpi_pgsystem = info->agpi_pgtotal;
1779*3446Smrj 	info->agpi_pgused = softstate->asoft_pgused;
1780*3446Smrj 
1781*3446Smrj 	return (0);
1782*3446Smrj }
1783*3446Smrj 
1784*3446Smrj static uint32_t
1785*3446Smrj agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1786*3446Smrj {
1787*3446Smrj 	uint32_t cmd;
1788*3446Smrj 	int rq, sba, over4g, fw, rate;
1789*3446Smrj 
1790*3446Smrj 	/*
1791*3446Smrj 	 * tstatus: target device status
1792*3446Smrj 	 * mstatus: master device status
1793*3446Smrj 	 * mode: the agp mode to be sent
1794*3446Smrj 	 */
1795*3446Smrj 
1796*3446Smrj 	/*
1797*3446Smrj 	 * RQ - Request Queue size
1798*3446Smrj 	 * set RQ to the min of mode and tstatus
1799*3446Smrj 	 * if mode set a RQ larger than hardware can support,
1800*3446Smrj 	 * use the max RQ which hardware can support.
1801*3446Smrj 	 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1802*3446Smrj 	 * Corelogic will enqueue agp transaction
1803*3446Smrj 	 */
1804*3446Smrj 	rq = mode & AGPSTAT_RQ_MASK;
1805*3446Smrj 	if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1806*3446Smrj 		rq = tstatus & AGPSTAT_RQ_MASK;
1807*3446Smrj 
1808*3446Smrj 	/*
1809*3446Smrj 	 * SBA - Sideband Addressing
1810*3446Smrj 	 *
1811*3446Smrj 	 * Sideband Addressing provides an additional bus to pass requests
1812*3446Smrj 	 * (address and command) to the target from the master.
1813*3446Smrj 	 *
1814*3446Smrj 	 * set SBA if all three support it
1815*3446Smrj 	 */
1816*3446Smrj 	sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA)
1817*3446Smrj 		& (mode & AGPSTAT_SBA);
1818*3446Smrj 
1819*3446Smrj 	/* set OVER4G  if all three support it */
1820*3446Smrj 	over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1821*3446Smrj 		& (mode & AGPSTAT_OVER4G);
1822*3446Smrj 
1823*3446Smrj 	/*
1824*3446Smrj 	 * FW - fast write
1825*3446Smrj 	 *
1826*3446Smrj 	 * acceleration of memory write transactions from the corelogic to the
1827*3446Smrj 	 * A.G.P. master device acting like a PCI target.
1828*3446Smrj 	 *
1829*3446Smrj 	 * set FW if all three support it
1830*3446Smrj 	 */
1831*3446Smrj 	fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1832*3446Smrj 		& (mode & AGPSTAT_FW);
1833*3446Smrj 
1834*3446Smrj 	/*
1835*3446Smrj 	 * figure out the max rate
1836*3446Smrj 	 * AGP v2 support: 4X, 2X, 1X speed
1837*3446Smrj 	 * status bit		meaning
1838*3446Smrj 	 * ---------------------------------------------
1839*3446Smrj 	 * 7:3			others
1840*3446Smrj 	 * 3			0 stand for V2 support
1841*3446Smrj 	 * 0:2			001:1X, 010:2X, 100:4X
1842*3446Smrj 	 * ----------------------------------------------
1843*3446Smrj 	 */
1844*3446Smrj 	rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1845*3446Smrj 		& (mode & AGPSTAT_RATE_MASK);
1846*3446Smrj 	if (rate & AGP2_RATE_4X)
1847*3446Smrj 		rate = AGP2_RATE_4X;
1848*3446Smrj 	else if (rate & AGP2_RATE_2X)
1849*3446Smrj 		rate = AGP2_RATE_2X;
1850*3446Smrj 	else
1851*3446Smrj 		rate = AGP2_RATE_1X;
1852*3446Smrj 
1853*3446Smrj 	cmd = rq | sba | over4g | fw | rate;
1854*3446Smrj 	/* enable agp mode */
1855*3446Smrj 	cmd |= AGPCMD_AGPEN;
1856*3446Smrj 
1857*3446Smrj 	return (cmd);
1858*3446Smrj }
1859*3446Smrj 
1860*3446Smrj static uint32_t
1861*3446Smrj agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1862*3446Smrj {
1863*3446Smrj 	uint32_t cmd = 0;
1864*3446Smrj 	uint32_t rq, arqsz, cal, sba, over4g, fw, rate;
1865*3446Smrj 
1866*3446Smrj 	/*
1867*3446Smrj 	 * tstatus: target device status
1868*3446Smrj 	 * mstatus: master device status
1869*3446Smrj 	 * mode: the agp mode to be set
1870*3446Smrj 	 */
1871*3446Smrj 
1872*3446Smrj 	/*
1873*3446Smrj 	 * RQ - Request Queue size
1874*3446Smrj 	 * Set RQ to the min of mode and tstatus
1875*3446Smrj 	 * If mode set a RQ larger than hardware can support,
1876*3446Smrj 	 * use the max RQ which hardware can support.
1877*3446Smrj 	 * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1878*3446Smrj 	 * Corelogic will enqueue agp transaction;
1879*3446Smrj 	 */
1880*3446Smrj 	rq = mode & AGPSTAT_RQ_MASK;
1881*3446Smrj 	if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1882*3446Smrj 		rq = tstatus & AGPSTAT_RQ_MASK;
1883*3446Smrj 
1884*3446Smrj 	/*
1885*3446Smrj 	 * ARQSZ - Asynchronous Request Queue size
1886*3446Smrj 	 * Set the value equal to tstatus.
1887*3446Smrj 	 * Don't allow the mode register to override values
1888*3446Smrj 	 */
1889*3446Smrj 	arqsz = tstatus & AGPSTAT_ARQSZ_MASK;
1890*3446Smrj 
1891*3446Smrj 	/*
1892*3446Smrj 	 * CAL - Calibration cycle
1893*3446Smrj 	 * Set to the min of tstatus and mstatus
1894*3446Smrj 	 * Don't allow override by mode register
1895*3446Smrj 	 */
1896*3446Smrj 	cal = tstatus & AGPSTAT_CAL_MASK;
1897*3446Smrj 	if ((mstatus & AGPSTAT_CAL_MASK) < cal)
1898*3446Smrj 		cal = mstatus & AGPSTAT_CAL_MASK;
1899*3446Smrj 
1900*3446Smrj 	/*
1901*3446Smrj 	 * SBA - Sideband Addressing
1902*3446Smrj 	 *
1903*3446Smrj 	 * Sideband Addressing provides an additional bus to pass requests
1904*3446Smrj 	 * (address and command) to the target from the master.
1905*3446Smrj 	 *
1906*3446Smrj 	 * SBA in agp v3.0 must be set
1907*3446Smrj 	 */
1908*3446Smrj 	sba = AGPCMD_SBAEN;
1909*3446Smrj 
1910*3446Smrj 	/* GART64B is not set since no hardware supports it now */
1911*3446Smrj 
1912*3446Smrj 	/* Set OVER4G if all three support it */
1913*3446Smrj 	over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1914*3446Smrj 		& (mode & AGPSTAT_OVER4G);
1915*3446Smrj 
1916*3446Smrj 	/*
1917*3446Smrj 	 * FW - fast write
1918*3446Smrj 	 *
1919*3446Smrj 	 * Acceleration of memory write transactions from the corelogic to the
1920*3446Smrj 	 * A.G.P. master device acting like a PCI target.
1921*3446Smrj 	 *
1922*3446Smrj 	 * Always set FW in AGP 3.0
1923*3446Smrj 	 */
1924*3446Smrj 	fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1925*3446Smrj 		& (mode & AGPSTAT_FW);
1926*3446Smrj 
1927*3446Smrj 	/*
1928*3446Smrj 	 * Figure out the max rate
1929*3446Smrj 	 *
1930*3446Smrj 	 * AGP v3 support: 8X, 4X speed
1931*3446Smrj 	 *
1932*3446Smrj 	 * status bit		meaning
1933*3446Smrj 	 * ---------------------------------------------
1934*3446Smrj 	 * 7:3			others
1935*3446Smrj 	 * 3			1 stand for V3 support
1936*3446Smrj 	 * 0:2			001:4X, 010:8X, 011:4X,8X
1937*3446Smrj 	 * ----------------------------------------------
1938*3446Smrj 	 */
1939*3446Smrj 	rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1940*3446Smrj 		& (mode & AGPSTAT_RATE_MASK);
1941*3446Smrj 	if (rate & AGP3_RATE_8X)
1942*3446Smrj 		rate = AGP3_RATE_8X;
1943*3446Smrj 	else
1944*3446Smrj 		rate = AGP3_RATE_4X;
1945*3446Smrj 
1946*3446Smrj 	cmd = rq | arqsz | cal | sba | over4g | fw | rate;
1947*3446Smrj 	/* Enable AGP mode */
1948*3446Smrj 	cmd |= AGPCMD_AGPEN;
1949*3446Smrj 
1950*3446Smrj 	return (cmd);
1951*3446Smrj }
1952*3446Smrj 
1953*3446Smrj static int
1954*3446Smrj agp_setup(agpgart_softstate_t *softstate, uint32_t mode)
1955*3446Smrj {
1956*3446Smrj 	uint32_t tstatus, mstatus;
1957*3446Smrj 	uint32_t agp_mode;
1958*3446Smrj 
1959*3446Smrj 	tstatus = softstate->asoft_info.agpki_tstatus;
1960*3446Smrj 	mstatus = softstate->asoft_info.agpki_mstatus;
1961*3446Smrj 
1962*3446Smrj 	/*
1963*3446Smrj 	 * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0
1964*3446Smrj 	 * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we
1965*3446Smrj 	 * only check 2.0 and 3.0 mode. AGP 3.0 device can work in
1966*3446Smrj 	 * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register,
1967*3446Smrj 	 * we can get which mode it is working at. The working mode of
1968*3446Smrj 	 * AGP master and AGP target must be consistent. That is, both
1969*3446Smrj 	 * of them must work on AGP 3.0 mode or AGP 2.0 mode.
1970*3446Smrj 	 */
1971*3446Smrj 	if ((softstate->asoft_info.agpki_tver.agpv_major == 3) &&
1972*3446Smrj 	    (tstatus & AGPSTAT_MODE3)) {
1973*3446Smrj 		/* Master device should be 3.0 mode, too */
1974*3446Smrj 		if ((softstate->asoft_info.agpki_mver.agpv_major != 3) ||
1975*3446Smrj 		    ((mstatus & AGPSTAT_MODE3) == 0))
1976*3446Smrj 			return (EIO);
1977*3446Smrj 
1978*3446Smrj 		agp_mode = agp_v3_setup(tstatus, mstatus, mode);
1979*3446Smrj 		/* Write to the AGPCMD register of target and master devices */
1980*3446Smrj 		if (lyr_set_agp_cmd(agp_mode,
1981*3446Smrj 		    &softstate->asoft_devreg))
1982*3446Smrj 			return (EIO);
1983*3446Smrj 
1984*3446Smrj 		softstate->asoft_mode = agp_mode;
1985*3446Smrj 
1986*3446Smrj 		return (0);
1987*3446Smrj 	}
1988*3446Smrj 
1989*3446Smrj 	/*
1990*3446Smrj 	 * If agp taget device doesn't work in AGP 3.0 mode,
1991*3446Smrj 	 * it must work in AGP 2.0 mode. And make sure
1992*3446Smrj 	 * master device work in AGP 2.0 mode too
1993*3446Smrj 	 */
1994*3446Smrj 	if ((softstate->asoft_info.agpki_mver.agpv_major == 3) &&
1995*3446Smrj 	    (mstatus & AGPSTAT_MODE3))
1996*3446Smrj 		return (EIO);
1997*3446Smrj 
1998*3446Smrj 	agp_mode = agp_v2_setup(tstatus, mstatus, mode);
1999*3446Smrj 	if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg))
2000*3446Smrj 		return (EIO);
2001*3446Smrj 	softstate->asoft_mode = agp_mode;
2002*3446Smrj 
2003*3446Smrj 	return (0);
2004*3446Smrj }
2005*3446Smrj 
2006*3446Smrj /*
2007*3446Smrj  * agp_alloc_pmem()
2008*3446Smrj  *
2009*3446Smrj  * Description:
2010*3446Smrj  * 	This function allocates physical memory for direct mapping to userland
2011*3446Smrj  * 	applications.
2012*3446Smrj  *
2013*3446Smrj  * Arguments:
2014*3446Smrj  * 	softsate	driver soft state pointer
2015*3446Smrj  * 	length		memory size
2016*3446Smrj  * 	type		AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2017*3446Smrj  *			memory type for intel i810 IGD
2018*3446Smrj  *
2019*3446Smrj  * Returns:
2020*3446Smrj  * 	entryp		new key table entity pointer
2021*3446Smrj  * 	NULL		no key table slot available
2022*3446Smrj  */
2023*3446Smrj static keytable_ent_t *
2024*3446Smrj agp_alloc_pmem(agpgart_softstate_t *softstate, size_t length, int type)
2025*3446Smrj {
2026*3446Smrj 	keytable_ent_t	keyentry;
2027*3446Smrj 	keytable_ent_t	*entryp;
2028*3446Smrj 
2029*3446Smrj 	ASSERT(AGP_ALIGNED(length));
2030*3446Smrj 	bzero(&keyentry, sizeof (keytable_ent_t));
2031*3446Smrj 
2032*3446Smrj 	keyentry.kte_pages = AGP_BYTES2PAGES(length);
2033*3446Smrj 	keyentry.kte_type = type;
2034*3446Smrj 
2035*3446Smrj 	keyentry.kte_memhdl =
2036*3446Smrj 	    (agp_pmem_handle_t *)kmem_zalloc(sizeof (agp_pmem_handle_t),
2037*3446Smrj 	    KM_SLEEP);
2038*3446Smrj 
2039*3446Smrj 	if (devmap_pmem_alloc(length,
2040*3446Smrj 	    PMEM_SLEEP,
2041*3446Smrj 	    &PMEMP(keyentry.kte_memhdl)->pmem_cookie) != DDI_SUCCESS)
2042*3446Smrj 		goto err1;
2043*3446Smrj 
2044*3446Smrj 	keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2045*3446Smrj 	    keyentry.kte_pages, KM_SLEEP);
2046*3446Smrj 
2047*3446Smrj 	if (devmap_pmem_getpfns(
2048*3446Smrj 	    PMEMP(keyentry.kte_memhdl)->pmem_cookie,
2049*3446Smrj 	    0, keyentry.kte_pages, keyentry.kte_pfnarray) != DDI_SUCCESS) {
2050*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2051*3446Smrj 		    "agp_alloc_pmem: devmap_map_getpfns failed"));
2052*3446Smrj 		goto err2;
2053*3446Smrj 	}
2054*3446Smrj 	ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2055*3446Smrj 	    keyentry.kte_pfnarray, keyentry.kte_pages));
2056*3446Smrj 	entryp = agp_fill_empty_keyent(softstate, &keyentry);
2057*3446Smrj 
2058*3446Smrj 	if (!entryp) {
2059*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2060*3446Smrj 		    "agp_alloc_pmem: agp_fill_empty_keyent error"));
2061*3446Smrj 		goto err2;
2062*3446Smrj 	}
2063*3446Smrj 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2064*3446Smrj 
2065*3446Smrj 	return (entryp);
2066*3446Smrj 
2067*3446Smrj err2:
2068*3446Smrj 	kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2069*3446Smrj 	keyentry.kte_pfnarray = NULL;
2070*3446Smrj 	devmap_pmem_free(PMEMP(keyentry.kte_memhdl)->pmem_cookie);
2071*3446Smrj 	PMEMP(keyentry.kte_memhdl)->pmem_cookie = NULL;
2072*3446Smrj err1:
2073*3446Smrj 	kmem_free(keyentry.kte_memhdl, sizeof (agp_pmem_handle_t));
2074*3446Smrj 	keyentry.kte_memhdl = NULL;
2075*3446Smrj 
2076*3446Smrj 	return (NULL);
2077*3446Smrj 
2078*3446Smrj }
2079*3446Smrj 
2080*3446Smrj /*
2081*3446Smrj  * agp_alloc_kmem()
2082*3446Smrj  *
2083*3446Smrj  * Description:
2084*3446Smrj  * 	This function allocates physical memory for userland applications
2085*3446Smrj  * 	by ddi interfaces. This function can only be called to allocate
2086*3446Smrj  *	small phsyical contiguous pages, usually tens of kilobytes.
2087*3446Smrj  *
2088*3446Smrj  * Arguments:
2089*3446Smrj  * 	softsate	driver soft state pointer
2090*3446Smrj  * 	length		memory size
2091*3446Smrj  *
2092*3446Smrj  * Returns:
2093*3446Smrj  * 	entryp		new keytable entity pointer
2094*3446Smrj  * 	NULL		no keytable slot available or no physical
2095*3446Smrj  *			memory available
2096*3446Smrj  */
2097*3446Smrj static keytable_ent_t *
2098*3446Smrj agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length)
2099*3446Smrj {
2100*3446Smrj 	keytable_ent_t	keyentry;
2101*3446Smrj 	keytable_ent_t	*entryp;
2102*3446Smrj 	int		ret;
2103*3446Smrj 
2104*3446Smrj 	ASSERT(AGP_ALIGNED(length));
2105*3446Smrj 
2106*3446Smrj 	bzero(&keyentry, sizeof (keytable_ent_t));
2107*3446Smrj 
2108*3446Smrj 	keyentry.kte_pages = AGP_BYTES2PAGES(length);
2109*3446Smrj 	keyentry.kte_type = AGP_PHYSICAL;
2110*3446Smrj 
2111*3446Smrj 	/*
2112*3446Smrj 	 * Set dma_attr_sgllen to assure contiguous physical pages
2113*3446Smrj 	 */
2114*3446Smrj 	agpgart_dma_attr.dma_attr_sgllen = 1;
2115*3446Smrj 
2116*3446Smrj 	/* 4k size pages */
2117*3446Smrj 	keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP);
2118*3446Smrj 
2119*3446Smrj 	if (ddi_dma_alloc_handle(softstate->asoft_dip,
2120*3446Smrj 	    &agpgart_dma_attr,
2121*3446Smrj 	    DDI_DMA_SLEEP, NULL,
2122*3446Smrj 	    &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) {
2123*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2124*3446Smrj 		    "agp_alloc_kmem: ddi_dma_allco_hanlde error"));
2125*3446Smrj 		goto err4;
2126*3446Smrj 	}
2127*3446Smrj 
2128*3446Smrj 	if ((ret = ddi_dma_mem_alloc(
2129*3446Smrj 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2130*3446Smrj 	    length,
2131*3446Smrj 	    &gart_dev_acc_attr,
2132*3446Smrj 	    DDI_DMA_CONSISTENT,
2133*3446Smrj 	    DDI_DMA_SLEEP, NULL,
2134*3446Smrj 	    &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2135*3446Smrj 	    &KMEMP(keyentry.kte_memhdl)->kmem_reallen,
2136*3446Smrj 	    &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) {
2137*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2138*3446Smrj 		    "agp_alloc_kmem: ddi_dma_mem_alloc error"));
2139*3446Smrj 
2140*3446Smrj 		goto err3;
2141*3446Smrj 	}
2142*3446Smrj 
2143*3446Smrj 	ret = ddi_dma_addr_bind_handle(
2144*3446Smrj 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2145*3446Smrj 	    NULL,
2146*3446Smrj 	    KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2147*3446Smrj 	    length,
2148*3446Smrj 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2149*3446Smrj 	    DDI_DMA_SLEEP,
2150*3446Smrj 	    NULL,
2151*3446Smrj 	    &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2152*3446Smrj 	    &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num);
2153*3446Smrj 
2154*3446Smrj 	/*
2155*3446Smrj 	 * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more
2156*3446Smrj 	 * than one cookie, we check this in the if statement.
2157*3446Smrj 	 */
2158*3446Smrj 
2159*3446Smrj 	if ((ret != DDI_DMA_MAPPED) ||
2160*3446Smrj 	    (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1)) {
2161*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2162*3446Smrj 		    "agp_alloc_kmem: can not alloc physical memory properly"));
2163*3446Smrj 		goto err2;
2164*3446Smrj 	}
2165*3446Smrj 
2166*3446Smrj 	keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2167*3446Smrj 	    keyentry.kte_pages, KM_SLEEP);
2168*3446Smrj 
2169*3446Smrj 	if (kmem_getpfns(
2170*3446Smrj 	    KMEMP(keyentry.kte_memhdl)->kmem_handle,
2171*3446Smrj 	    &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2172*3446Smrj 	    KMEMP(keyentry.kte_memhdl)->kmem_cookies_num,
2173*3446Smrj 	    keyentry.kte_pfnarray)) {
2174*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error"));
2175*3446Smrj 		goto err1;
2176*3446Smrj 	}
2177*3446Smrj 
2178*3446Smrj 	ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2179*3446Smrj 	    keyentry.kte_pfnarray, keyentry.kte_pages));
2180*3446Smrj 	entryp = agp_fill_empty_keyent(softstate, &keyentry);
2181*3446Smrj 	if (!entryp) {
2182*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2183*3446Smrj 		    "agp_alloc_kmem: agp_fill_empty_keyent error"));
2184*3446Smrj 
2185*3446Smrj 		goto err1;
2186*3446Smrj 	}
2187*3446Smrj 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2188*3446Smrj 
2189*3446Smrj 	return (entryp);
2190*3446Smrj 
2191*3446Smrj err1:
2192*3446Smrj 	kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2193*3446Smrj 	keyentry.kte_pfnarray = NULL;
2194*3446Smrj 	(void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle);
2195*3446Smrj 	KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0;
2196*3446Smrj err2:
2197*3446Smrj 	ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl);
2198*3446Smrj 	KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL;
2199*3446Smrj 	KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0;
2200*3446Smrj 	KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL;
2201*3446Smrj err3:
2202*3446Smrj 	ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle));
2203*3446Smrj 	KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL;
2204*3446Smrj err4:
2205*3446Smrj 	kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t));
2206*3446Smrj 	keyentry.kte_memhdl = NULL;
2207*3446Smrj 	return (NULL);
2208*3446Smrj 
2209*3446Smrj }
2210*3446Smrj 
2211*3446Smrj /*
2212*3446Smrj  * agp_alloc_mem()
2213*3446Smrj  *
2214*3446Smrj  * Description:
2215*3446Smrj  * 	This function allocate physical memory for userland applications,
2216*3446Smrj  * 	in order to save kernel virtual space, we use the direct mapping
2217*3446Smrj  * 	memory interface if it is available.
2218*3446Smrj  *
2219*3446Smrj  * Arguments:
2220*3446Smrj  * 	st		driver soft state pointer
2221*3446Smrj  * 	length		memory size
2222*3446Smrj  * 	type		AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2223*3446Smrj  *			memory type for intel i810 IGD
2224*3446Smrj  *
2225*3446Smrj  * Returns:
2226*3446Smrj  * 	NULL 	Invalid memory type or can not allocate memory
2227*3446Smrj  * 	Keytable entry pointer returned by agp_alloc_kmem or agp_alloc_pmem
2228*3446Smrj  */
2229*3446Smrj static keytable_ent_t *
2230*3446Smrj agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type)
2231*3446Smrj {
2232*3446Smrj 
2233*3446Smrj 	/*
2234*3446Smrj 	 * AGP_PHYSICAL type require contiguous physical pages exported
2235*3446Smrj 	 * to X drivers, like i810 HW cursor, ARGB cursor. the number of
2236*3446Smrj 	 * pages needed is usuallysmall and contiguous, 4K, 16K. So we
2237*3446Smrj 	 * use DDI interface to allocated such memory. And X use xsvc
2238*3446Smrj 	 * drivers to map this memory into its own address space.
2239*3446Smrj 	 */
2240*3446Smrj 	ASSERT(st);
2241*3446Smrj 
2242*3446Smrj 	switch (type) {
2243*3446Smrj 	case AGP_NORMAL:
2244*3446Smrj 		return (agp_alloc_pmem(st, length, type));
2245*3446Smrj 	case AGP_PHYSICAL:
2246*3446Smrj 		return (agp_alloc_kmem(st, length));
2247*3446Smrj 	default:
2248*3446Smrj 		return (NULL);
2249*3446Smrj 	}
2250*3446Smrj }
2251*3446Smrj 
2252*3446Smrj /*
2253*3446Smrj  * free_gart_table()
2254*3446Smrj  *
2255*3446Smrj  * Description:
2256*3446Smrj  * 	This function frees the gart table memory allocated by driver.
2257*3446Smrj  * 	Must disable gart table before calling this function.
2258*3446Smrj  *
2259*3446Smrj  * Arguments:
2260*3446Smrj  * 	softstate		driver soft state pointer
2261*3446Smrj  *
2262*3446Smrj  */
2263*3446Smrj static void
2264*3446Smrj free_gart_table(agpgart_softstate_t *st)
2265*3446Smrj {
2266*3446Smrj 
2267*3446Smrj 	if (st->gart_dma_handle == NULL)
2268*3446Smrj 		return;
2269*3446Smrj 
2270*3446Smrj 	(void) ddi_dma_unbind_handle(st->gart_dma_handle);
2271*3446Smrj 	ddi_dma_mem_free(&st->gart_dma_acc_handle);
2272*3446Smrj 	st->gart_dma_acc_handle = NULL;
2273*3446Smrj 	ddi_dma_free_handle(&st->gart_dma_handle);
2274*3446Smrj 	st->gart_dma_handle = NULL;
2275*3446Smrj 	st->gart_vbase = 0;
2276*3446Smrj 	st->gart_size = 0;
2277*3446Smrj }
2278*3446Smrj 
2279*3446Smrj /*
2280*3446Smrj  * alloc_gart_table()
2281*3446Smrj  *
2282*3446Smrj  * Description:
2283*3446Smrj  * 	This function allocates one physical continuous gart table.
2284*3446Smrj  * 	INTEL integrated video device except i810 have their special
2285*3446Smrj  * 	video bios; No need to allocate gart table for them.
2286*3446Smrj  *
2287*3446Smrj  * Arguments:
2288*3446Smrj  * 	st		driver soft state pointer
2289*3446Smrj  *
2290*3446Smrj  * Returns:
2291*3446Smrj  * 	0		success
2292*3446Smrj  * 	-1		can not allocate gart tabl
2293*3446Smrj  */
2294*3446Smrj static int
2295*3446Smrj alloc_gart_table(agpgart_softstate_t *st)
2296*3446Smrj {
2297*3446Smrj 	int			num_pages;
2298*3446Smrj 	size_t			table_size;
2299*3446Smrj 	int			ret = DDI_SUCCESS;
2300*3446Smrj 	ddi_dma_cookie_t	cookie;
2301*3446Smrj 	uint32_t		num_cookies;
2302*3446Smrj 
2303*3446Smrj 	num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize);
2304*3446Smrj 
2305*3446Smrj 	/*
2306*3446Smrj 	 * Only 40-bit maximum physical memory is supported by today's
2307*3446Smrj 	 * AGP hardware (32-bit gart tables can hold 40-bit memory addresses).
2308*3446Smrj 	 * No one supports 64-bit gart entries now, so the size of gart
2309*3446Smrj 	 * entries defaults to 32-bit though AGP3.0 specifies the possibility
2310*3446Smrj 	 * of 64-bit gart entries.
2311*3446Smrj 	 */
2312*3446Smrj 
2313*3446Smrj 	table_size = num_pages * (sizeof (uint32_t));
2314*3446Smrj 
2315*3446Smrj 	/*
2316*3446Smrj 	 * Only AMD64 can put gart table above 4G, 40 bits at maximum
2317*3446Smrj 	 */
2318*3446Smrj 	if ((st->asoft_devreg.agprd_arctype == ARC_AMD64AGP) ||
2319*3446Smrj 	    (st->asoft_devreg.agprd_arctype == ARC_AMD64NOAGP))
2320*3446Smrj 		garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL;
2321*3446Smrj 	else
2322*3446Smrj 		garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU;
2323*3446Smrj 	/* Allocate physical continuous page frame for gart table */
2324*3446Smrj 	if (ret = ddi_dma_alloc_handle(st->asoft_dip,
2325*3446Smrj 	    &garttable_dma_attr,
2326*3446Smrj 	    DDI_DMA_SLEEP,
2327*3446Smrj 	    NULL, &st->gart_dma_handle)) {
2328*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2329*3446Smrj 		    "alloc_gart_table: ddi_dma_alloc_handle failed"));
2330*3446Smrj 		goto err3;
2331*3446Smrj 	}
2332*3446Smrj 
2333*3446Smrj 	if (ret = ddi_dma_mem_alloc(st->gart_dma_handle,
2334*3446Smrj 		    table_size,
2335*3446Smrj 		    &gart_dev_acc_attr,
2336*3446Smrj 		    DDI_DMA_CONSISTENT,
2337*3446Smrj 		    DDI_DMA_SLEEP, NULL,
2338*3446Smrj 		    &st->gart_vbase,
2339*3446Smrj 		    &st->gart_size,
2340*3446Smrj 		    &st->gart_dma_acc_handle)) {
2341*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2342*3446Smrj 		    "alloc_gart_table: ddi_dma_mem_alloc failed"));
2343*3446Smrj 		goto err2;
2344*3446Smrj 
2345*3446Smrj 	}
2346*3446Smrj 
2347*3446Smrj 	ret = ddi_dma_addr_bind_handle(st->gart_dma_handle,
2348*3446Smrj 		    NULL, st->gart_vbase,
2349*3446Smrj 		    table_size,
2350*3446Smrj 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2351*3446Smrj 		    DDI_DMA_SLEEP, NULL,
2352*3446Smrj 		    &cookie,  &num_cookies);
2353*3446Smrj 
2354*3446Smrj 	st->gart_pbase = cookie.dmac_address;
2355*3446Smrj 
2356*3446Smrj 	if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) {
2357*3446Smrj 		if (num_cookies > 1)
2358*3446Smrj 			(void) ddi_dma_unbind_handle(st->gart_dma_handle);
2359*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2360*3446Smrj 		    "alloc_gart_table: alloc contiguous phys memory failed"));
2361*3446Smrj 		goto err1;
2362*3446Smrj 	}
2363*3446Smrj 
2364*3446Smrj 	return (0);
2365*3446Smrj err1:
2366*3446Smrj 	ddi_dma_mem_free(&st->gart_dma_acc_handle);
2367*3446Smrj 	st->gart_dma_acc_handle = NULL;
2368*3446Smrj err2:
2369*3446Smrj 	ddi_dma_free_handle(&st->gart_dma_handle);
2370*3446Smrj 	st->gart_dma_handle = NULL;
2371*3446Smrj err3:
2372*3446Smrj 	st->gart_pbase = 0;
2373*3446Smrj 	st->gart_size = 0;
2374*3446Smrj 	st->gart_vbase = 0;
2375*3446Smrj 
2376*3446Smrj 	return (-1);
2377*3446Smrj }
2378*3446Smrj 
2379*3446Smrj /*
2380*3446Smrj  * agp_add_to_gart()
2381*3446Smrj  *
2382*3446Smrj  * Description:
2383*3446Smrj  * 	This function fills the gart table entries by a given page frame number
2384*3446Smrj  * 	array and set up the agp aperture page to physical memory page
2385*3446Smrj  * 	translation.
2386*3446Smrj  * Arguments:
2387*3446Smrj  * 	type		valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP,
2388*3446Smrj  * 			ARC_AMD64AGP
2389*3446Smrj  * 	pfnarray	allocated physical page frame number array
2390*3446Smrj  * 	pg_offset	agp aperture start page to be bound
2391*3446Smrj  * 	entries		the number of pages to be bound
2392*3446Smrj  * 	dma_hdl		gart table dma memory handle
2393*3446Smrj  * 	tablep		gart table kernel virtual address
2394*3446Smrj  * Returns:
2395*3446Smrj  * 	-1		failed
2396*3446Smrj  * 	0		success
2397*3446Smrj  */
2398*3446Smrj static int
2399*3446Smrj agp_add_to_gart(
2400*3446Smrj     agp_arc_type_t type,
2401*3446Smrj     pfn_t *pfnarray,
2402*3446Smrj     uint32_t pg_offset,
2403*3446Smrj     uint32_t entries,
2404*3446Smrj     ddi_dma_handle_t dma_hdl,
2405*3446Smrj     uint32_t *tablep)
2406*3446Smrj {
2407*3446Smrj 	int items = 0;
2408*3446Smrj 	uint32_t *entryp;
2409*3446Smrj 	uint32_t itemv;
2410*3446Smrj 
2411*3446Smrj 	entryp = tablep + pg_offset;
2412*3446Smrj 	while (items < entries) {
2413*3446Smrj 		if (pfn2gartentry(type, pfnarray[items], &itemv))
2414*3446Smrj 			break;
2415*3446Smrj 		*(entryp + items) = itemv;
2416*3446Smrj 		items++;
2417*3446Smrj 	}
2418*3446Smrj 	if (items < entries)
2419*3446Smrj 		return (-1);
2420*3446Smrj 
2421*3446Smrj 	(void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
2422*3446Smrj 	    entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
2423*3446Smrj 
2424*3446Smrj 	return (0);
2425*3446Smrj }
2426*3446Smrj 
2427*3446Smrj /*
2428*3446Smrj  * agp_bind_key()
2429*3446Smrj  *
2430*3446Smrj  * Description:
2431*3446Smrj  * 	This function will call low level gart table access functions to
2432*3446Smrj  * 	set up gart table translation. Also it will do some sanity
2433*3446Smrj  * 	checking on key table entry.
2434*3446Smrj  *
2435*3446Smrj  * Arguments:
2436*3446Smrj  * 	softstate		driver soft state pointer
2437*3446Smrj  * 	keyent			key table entity pointer to be bound
2438*3446Smrj  * 	pg_offset		aperture start page to be bound
2439*3446Smrj  * Returns:
2440*3446Smrj  * 	EINVAL			not a valid operation
2441*3446Smrj  */
2442*3446Smrj static int
2443*3446Smrj agp_bind_key(agpgart_softstate_t *softstate,
2444*3446Smrj     keytable_ent_t  *keyent, uint32_t  pg_offset)
2445*3446Smrj {
2446*3446Smrj 	uint64_t pg_end;
2447*3446Smrj 	int ret = 0;
2448*3446Smrj 
2449*3446Smrj 	ASSERT(keyent);
2450*3446Smrj 	ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS));
2451*3446Smrj 	ASSERT(mutex_owned(&softstate->asoft_instmutex));
2452*3446Smrj 
2453*3446Smrj 	pg_end = pg_offset + keyent->kte_pages;
2454*3446Smrj 
2455*3446Smrj 	if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) {
2456*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2457*3446Smrj 		    "agp_bind_key: key=0x%x,exceed aper range",
2458*3446Smrj 		    keyent->kte_key));
2459*3446Smrj 
2460*3446Smrj 		return (EINVAL);
2461*3446Smrj 	}
2462*3446Smrj 
2463*3446Smrj 	if (agp_check_off(softstate->asoft_table,
2464*3446Smrj 	    pg_offset, keyent->kte_pages)) {
2465*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2466*3446Smrj 		    "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped",
2467*3446Smrj 		    pg_offset, keyent->kte_pages));
2468*3446Smrj 		return (EINVAL);
2469*3446Smrj 	}
2470*3446Smrj 
2471*3446Smrj 	ASSERT(keyent->kte_pfnarray != NULL);
2472*3446Smrj 
2473*3446Smrj 	switch (softstate->asoft_devreg.agprd_arctype) {
2474*3446Smrj 	case ARC_IGD810:
2475*3446Smrj 	case ARC_IGD830:
2476*3446Smrj 		ret = lyr_i8xx_add_to_gtt(pg_offset, keyent,
2477*3446Smrj 		    &softstate->asoft_devreg);
2478*3446Smrj 		if (ret)
2479*3446Smrj 			return (EIO);
2480*3446Smrj 		break;
2481*3446Smrj 	case ARC_INTELAGP:
2482*3446Smrj 	case ARC_AMD64NOAGP:
2483*3446Smrj 	case ARC_AMD64AGP:
2484*3446Smrj 		ret =  agp_add_to_gart(
2485*3446Smrj 		    softstate->asoft_devreg.agprd_arctype,
2486*3446Smrj 		    keyent->kte_pfnarray,
2487*3446Smrj 		    pg_offset,
2488*3446Smrj 		    keyent->kte_pages,
2489*3446Smrj 		    softstate->gart_dma_handle,
2490*3446Smrj 		    (uint32_t *)softstate->gart_vbase);
2491*3446Smrj 		if (ret)
2492*3446Smrj 			return (EINVAL);
2493*3446Smrj 		/* Flush GTLB table */
2494*3446Smrj 		lyr_flush_gart_cache(&softstate->asoft_devreg);
2495*3446Smrj 		break;
2496*3446Smrj 	default:
2497*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2498*3446Smrj 		    "agp_bind_key: arc type = 0x%x unsupported",
2499*3446Smrj 		    softstate->asoft_devreg.agprd_arctype));
2500*3446Smrj 		return (EINVAL);
2501*3446Smrj 	}
2502*3446Smrj 	return (0);
2503*3446Smrj }
2504*3446Smrj 
2505*3446Smrj static int
2506*3446Smrj agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2507*3446Smrj {
2508*3446Smrj 	int instance;
2509*3446Smrj 	agpgart_softstate_t *softstate;
2510*3446Smrj 
2511*3446Smrj 	if (cmd != DDI_ATTACH) {
2512*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2513*3446Smrj 		    "agpgart_attach: only attach op supported"));
2514*3446Smrj 		return (DDI_FAILURE);
2515*3446Smrj 	}
2516*3446Smrj 	instance = ddi_get_instance(dip);
2517*3446Smrj 
2518*3446Smrj 	if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance)
2519*3446Smrj 		    != DDI_SUCCESS) {
2520*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2521*3446Smrj 		    "agpgart_attach: soft state zalloc failed"));
2522*3446Smrj 		goto err1;
2523*3446Smrj 
2524*3446Smrj 	}
2525*3446Smrj 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2526*3446Smrj 	mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL);
2527*3446Smrj 	softstate->asoft_dip = dip;
2528*3446Smrj 	/*
2529*3446Smrj 	 * Allocate LDI identifier for agpgart driver
2530*3446Smrj 	 * Agpgart driver is the kernel consumer
2531*3446Smrj 	 */
2532*3446Smrj 	if (ldi_ident_from_dip(dip, &softstate->asoft_li)) {
2533*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2534*3446Smrj 		    "agpgart_attach: LDI indentifier allcation failed"));
2535*3446Smrj 		goto err2;
2536*3446Smrj 	}
2537*3446Smrj 
2538*3446Smrj 	softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN;
2539*3446Smrj 	/* Install agp kstat */
2540*3446Smrj 	if (agp_init_kstats(softstate)) {
2541*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error"));
2542*3446Smrj 		goto err3;
2543*3446Smrj 	}
2544*3446Smrj 	/*
2545*3446Smrj 	 * devfs will create /dev/agpgart
2546*3446Smrj 	 * and  /devices/agpgart:agpgart
2547*3446Smrj 	 */
2548*3446Smrj 
2549*3446Smrj 	if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR,
2550*3446Smrj 	    AGP_INST2MINOR(instance),
2551*3446Smrj 	    DDI_NT_AGP_PSEUDO, 0)) {
2552*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2553*3446Smrj 		    "agpgart_attach: Can not create minor node"));
2554*3446Smrj 		goto err4;
2555*3446Smrj 	}
2556*3446Smrj 
2557*3446Smrj 	softstate->asoft_table = kmem_zalloc(
2558*3446Smrj 			AGP_MAXKEYS * (sizeof (keytable_ent_t)),
2559*3446Smrj 			KM_SLEEP);
2560*3446Smrj 
2561*3446Smrj 	return (DDI_SUCCESS);
2562*3446Smrj err4:
2563*3446Smrj 	agp_fini_kstats(softstate);
2564*3446Smrj err3:
2565*3446Smrj 	ldi_ident_release(softstate->asoft_li);
2566*3446Smrj err2:
2567*3446Smrj 	ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2568*3446Smrj err1:
2569*3446Smrj 	return (DDI_FAILURE);
2570*3446Smrj }
2571*3446Smrj 
2572*3446Smrj static int
2573*3446Smrj agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2574*3446Smrj {
2575*3446Smrj 	int instance;
2576*3446Smrj 	agpgart_softstate_t *st;
2577*3446Smrj 
2578*3446Smrj 	instance = ddi_get_instance(dip);
2579*3446Smrj 
2580*3446Smrj 	st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2581*3446Smrj 
2582*3446Smrj 	if (cmd != DDI_DETACH)
2583*3446Smrj 		return (DDI_FAILURE);
2584*3446Smrj 
2585*3446Smrj 	/*
2586*3446Smrj 	 * Caller should free all the memory allocated explicitly.
2587*3446Smrj 	 * We release the memory allocated by caller which is not
2588*3446Smrj 	 * properly freed. mutex_enter here make sure assertion on
2589*3446Smrj 	 * softstate mutex success in agp_dealloc_mem.
2590*3446Smrj 	 */
2591*3446Smrj 	mutex_enter(&st->asoft_instmutex);
2592*3446Smrj 	if (agp_del_allkeys(st)) {
2593*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err"));
2594*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2595*3446Smrj 		    "you might free agp memory exported to your applications"));
2596*3446Smrj 
2597*3446Smrj 		mutex_exit(&st->asoft_instmutex);
2598*3446Smrj 		return (DDI_FAILURE);
2599*3446Smrj 	}
2600*3446Smrj 	mutex_exit(&st->asoft_instmutex);
2601*3446Smrj 	if (st->asoft_table) {
2602*3446Smrj 		kmem_free(st->asoft_table,
2603*3446Smrj 		    AGP_MAXKEYS * (sizeof (keytable_ent_t)));
2604*3446Smrj 		st->asoft_table = 0;
2605*3446Smrj 	}
2606*3446Smrj 
2607*3446Smrj 	ddi_remove_minor_node(dip, AGPGART_DEVNODE);
2608*3446Smrj 	agp_fini_kstats(st);
2609*3446Smrj 	ldi_ident_release(st->asoft_li);
2610*3446Smrj 	mutex_destroy(&st->asoft_instmutex);
2611*3446Smrj 	ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2612*3446Smrj 
2613*3446Smrj 	return (DDI_SUCCESS);
2614*3446Smrj }
2615*3446Smrj 
2616*3446Smrj /*ARGSUSED*/
2617*3446Smrj static int
2618*3446Smrj agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
2619*3446Smrj     void **resultp)
2620*3446Smrj {
2621*3446Smrj 	agpgart_softstate_t *st;
2622*3446Smrj 	int instance, rval = DDI_FAILURE;
2623*3446Smrj 	dev_t dev;
2624*3446Smrj 
2625*3446Smrj 	switch (cmd) {
2626*3446Smrj 	case DDI_INFO_DEVT2DEVINFO:
2627*3446Smrj 		dev = (dev_t)arg;
2628*3446Smrj 		instance = AGP_DEV2INST(dev);
2629*3446Smrj 		st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2630*3446Smrj 		if (st != NULL) {
2631*3446Smrj 			mutex_enter(&st->asoft_instmutex);
2632*3446Smrj 			*resultp = st->asoft_dip;
2633*3446Smrj 			mutex_exit(&st->asoft_instmutex);
2634*3446Smrj 			rval = DDI_SUCCESS;
2635*3446Smrj 		} else
2636*3446Smrj 			*resultp = NULL;
2637*3446Smrj 
2638*3446Smrj 		break;
2639*3446Smrj 	case DDI_INFO_DEVT2INSTANCE:
2640*3446Smrj 		dev = (dev_t)arg;
2641*3446Smrj 		instance = AGP_DEV2INST(dev);
2642*3446Smrj 		*resultp = (void *)(uintptr_t)instance;
2643*3446Smrj 		rval = DDI_SUCCESS;
2644*3446Smrj 
2645*3446Smrj 		break;
2646*3446Smrj 	default:
2647*3446Smrj 		break;
2648*3446Smrj 	}
2649*3446Smrj 
2650*3446Smrj 	return (rval);
2651*3446Smrj }
2652*3446Smrj 
2653*3446Smrj /*
2654*3446Smrj  * agpgart_open()
2655*3446Smrj  *
2656*3446Smrj  * Description:
2657*3446Smrj  * 	This function is the driver open entry point. If it is the
2658*3446Smrj  * 	first time the agpgart driver is opened, the driver will
2659*3446Smrj  * 	open other agp related layered drivers and set up the agpgart
2660*3446Smrj  * 	table properly.
2661*3446Smrj  *
2662*3446Smrj  * Arguments:
2663*3446Smrj  * 	dev			device number pointer
2664*3446Smrj  * 	openflags		open flags
2665*3446Smrj  *	otyp			OTYP_BLK, OTYP_CHR
2666*3446Smrj  * 	credp			user's credential's struct pointer
2667*3446Smrj  *
2668*3446Smrj  * Returns:
2669*3446Smrj  * 	ENXIO			operation error
2670*3446Smrj  * 	EAGAIN			resoure temporarily unvailable
2671*3446Smrj  * 	0			success
2672*3446Smrj  */
2673*3446Smrj /*ARGSUSED*/
2674*3446Smrj static int
2675*3446Smrj agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
2676*3446Smrj {
2677*3446Smrj 	int instance = AGP_DEV2INST(*dev);
2678*3446Smrj 	agpgart_softstate_t *softstate;
2679*3446Smrj 	int rc = 0;
2680*3446Smrj 
2681*3446Smrj 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2682*3446Smrj 	if (softstate == NULL) {
2683*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err"));
2684*3446Smrj 		return (ENXIO);
2685*3446Smrj 	}
2686*3446Smrj 	mutex_enter(&softstate->asoft_instmutex);
2687*3446Smrj 
2688*3446Smrj 	if (softstate->asoft_opened) {
2689*3446Smrj 		softstate->asoft_opened++;
2690*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
2691*3446Smrj 		return (0);
2692*3446Smrj 	}
2693*3446Smrj 
2694*3446Smrj 	/*
2695*3446Smrj 	 * The driver is opened first time, so we initialize layered
2696*3446Smrj 	 * driver interface and softstate member here.
2697*3446Smrj 	 */
2698*3446Smrj 	softstate->asoft_pgused = 0;
2699*3446Smrj 	if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) {
2700*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed"));
2701*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
2702*3446Smrj 		return (EAGAIN);
2703*3446Smrj 	}
2704*3446Smrj 
2705*3446Smrj 	/* Call into layered driver */
2706*3446Smrj 	if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) {
2707*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error"));
2708*3446Smrj 		lyr_end(&softstate->asoft_devreg);
2709*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
2710*3446Smrj 		return (EIO);
2711*3446Smrj 	}
2712*3446Smrj 
2713*3446Smrj 	/*
2714*3446Smrj 	 * BIOS already set up gtt table for ARC_IGD830
2715*3446Smrj 	 */
2716*3446Smrj 	if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2717*3446Smrj 		softstate->asoft_opened++;
2718*3446Smrj 
2719*3446Smrj 		softstate->asoft_pgtotal =
2720*3446Smrj 		    get_max_pages(softstate->asoft_info.agpki_apersize);
2721*3446Smrj 
2722*3446Smrj 		if (lyr_config_devices(&softstate->asoft_devreg)) {
2723*3446Smrj 			AGPDB_PRINT2((CE_WARN,
2724*3446Smrj 			    "agpgart_open: lyr_config_devices error"));
2725*3446Smrj 			lyr_end(&softstate->asoft_devreg);
2726*3446Smrj 			mutex_exit(&softstate->asoft_instmutex);
2727*3446Smrj 
2728*3446Smrj 			return (EIO);
2729*3446Smrj 		}
2730*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
2731*3446Smrj 		return (0);
2732*3446Smrj 	}
2733*3446Smrj 
2734*3446Smrj 	rc = alloc_gart_table(softstate);
2735*3446Smrj 
2736*3446Smrj 	/*
2737*3446Smrj 	 * Allocate physically contiguous pages for AGP arc or
2738*3446Smrj 	 * i810 arc. If failed, divide aper_size by 2 to
2739*3446Smrj 	 * reduce gart table size until 4 megabytes. This
2740*3446Smrj 	 * is just a workaround for systems with very few
2741*3446Smrj 	 * physically contiguous memory.
2742*3446Smrj 	 */
2743*3446Smrj 	if (rc) {
2744*3446Smrj 		while ((softstate->asoft_info.agpki_apersize >= 4) &&
2745*3446Smrj 		    (alloc_gart_table(softstate))) {
2746*3446Smrj 			softstate->asoft_info.agpki_apersize >>= 1;
2747*3446Smrj 		}
2748*3446Smrj 		if (softstate->asoft_info.agpki_apersize >= 4)
2749*3446Smrj 			rc = 0;
2750*3446Smrj 	}
2751*3446Smrj 
2752*3446Smrj 	if (rc != 0) {
2753*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2754*3446Smrj 		    "agpgart_open: alloc gart table failed"));
2755*3446Smrj 		lyr_end(&softstate->asoft_devreg);
2756*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
2757*3446Smrj 		return (EAGAIN);
2758*3446Smrj 	}
2759*3446Smrj 
2760*3446Smrj 	softstate->asoft_pgtotal =
2761*3446Smrj 	    get_max_pages(softstate->asoft_info.agpki_apersize);
2762*3446Smrj 	/*
2763*3446Smrj 	 * BIOS doesn't initialize GTT for i810,
2764*3446Smrj 	 * So i810 GTT must be created by driver.
2765*3446Smrj 	 *
2766*3446Smrj 	 * Set up gart table and enable it.
2767*3446Smrj 	 */
2768*3446Smrj 	if (lyr_set_gart_addr(softstate->gart_pbase,
2769*3446Smrj 	    &softstate->asoft_devreg)) {
2770*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2771*3446Smrj 		    "agpgart_open: set gart table addr failed"));
2772*3446Smrj 		free_gart_table(softstate);
2773*3446Smrj 		lyr_end(&softstate->asoft_devreg);
2774*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
2775*3446Smrj 		return (EIO);
2776*3446Smrj 	}
2777*3446Smrj 	if (lyr_config_devices(&softstate->asoft_devreg)) {
2778*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2779*3446Smrj 		    "agpgart_open: lyr_config_devices failed"));
2780*3446Smrj 		free_gart_table(softstate);
2781*3446Smrj 		lyr_end(&softstate->asoft_devreg);
2782*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
2783*3446Smrj 		return (EIO);
2784*3446Smrj 	}
2785*3446Smrj 
2786*3446Smrj 	softstate->asoft_opened++;
2787*3446Smrj 	mutex_exit(&softstate->asoft_instmutex);
2788*3446Smrj 
2789*3446Smrj 	return (0);
2790*3446Smrj }
2791*3446Smrj 
2792*3446Smrj /*
2793*3446Smrj  * agpgart_close()
2794*3446Smrj  *
2795*3446Smrj  * Description:
2796*3446Smrj  * 	agpgart_close will release resources allocated in the first open
2797*3446Smrj  * 	and close other open layered drivers. Also it frees the memory
2798*3446Smrj  *	allocated by ioctls.
2799*3446Smrj  *
2800*3446Smrj  * Arguments:
2801*3446Smrj  * 	dev			device number
2802*3446Smrj  * 	flag			file status flag
2803*3446Smrj  *	otyp			OTYP_BLK, OTYP_CHR
2804*3446Smrj  * 	credp			user's credential's struct pointer
2805*3446Smrj  *
2806*3446Smrj  * Returns:
2807*3446Smrj  * 	ENXIO			not an error, to support "deferred attach"
2808*3446Smrj  * 	0			success
2809*3446Smrj  */
2810*3446Smrj /*ARGSUSED*/
2811*3446Smrj static int
2812*3446Smrj agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp)
2813*3446Smrj {
2814*3446Smrj 	int instance = AGP_DEV2INST(dev);
2815*3446Smrj 	agpgart_softstate_t *softstate;
2816*3446Smrj 
2817*3446Smrj 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2818*3446Smrj 	if (softstate == NULL) {
2819*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err"));
2820*3446Smrj 		return (ENXIO);
2821*3446Smrj 	}
2822*3446Smrj 
2823*3446Smrj 	mutex_enter(&softstate->asoft_instmutex);
2824*3446Smrj 	ASSERT(softstate->asoft_opened);
2825*3446Smrj 
2826*3446Smrj 
2827*3446Smrj 	/*
2828*3446Smrj 	 * If the last process close this device is not the controlling
2829*3446Smrj 	 * process, also release the control over agpgart driver here if the
2830*3446Smrj 	 * the controlling process fails to release the control before it
2831*3446Smrj 	 * close the driver.
2832*3446Smrj 	 */
2833*3446Smrj 	if (softstate->asoft_acquired == 1) {
2834*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2835*3446Smrj 		    "agpgart_close: auto release control over driver"));
2836*3446Smrj 		release_control(softstate);
2837*3446Smrj 	}
2838*3446Smrj 
2839*3446Smrj 	if (lyr_unconfig_devices(&softstate->asoft_devreg)) {
2840*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2841*3446Smrj 		    "agpgart_close: lyr_unconfig_device error"));
2842*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
2843*3446Smrj 		return (EIO);
2844*3446Smrj 	}
2845*3446Smrj 	softstate->asoft_agpen = 0;
2846*3446Smrj 
2847*3446Smrj 	if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2848*3446Smrj 		free_gart_table(softstate);
2849*3446Smrj 	}
2850*3446Smrj 
2851*3446Smrj 	lyr_end(&softstate->asoft_devreg);
2852*3446Smrj 
2853*3446Smrj 	/*
2854*3446Smrj 	 * This statement must be positioned before agp_del_allkeys
2855*3446Smrj 	 * agp_dealloc_mem indirectly called by agp_del_allkeys
2856*3446Smrj 	 * will test this variable.
2857*3446Smrj 	 */
2858*3446Smrj 	softstate->asoft_opened = 0;
2859*3446Smrj 
2860*3446Smrj 	/*
2861*3446Smrj 	 * Free the memory allocated by user applications which
2862*3446Smrj 	 * was never deallocated.
2863*3446Smrj 	 */
2864*3446Smrj 	(void) agp_del_allkeys(softstate);
2865*3446Smrj 
2866*3446Smrj 	mutex_exit(&softstate->asoft_instmutex);
2867*3446Smrj 
2868*3446Smrj 	return (0);
2869*3446Smrj }
2870*3446Smrj 
2871*3446Smrj static int
2872*3446Smrj ioctl_agpgart_info(agpgart_softstate_t  *softstate, void  *arg, int flags)
2873*3446Smrj {
2874*3446Smrj 	agp_info_t infostruct;
2875*3446Smrj #ifdef _MULTI_DATAMODEL
2876*3446Smrj 	agp_info32_t infostruct32;
2877*3446Smrj #endif
2878*3446Smrj 
2879*3446Smrj 	bzero(&infostruct, sizeof (agp_info_t));
2880*3446Smrj 
2881*3446Smrj #ifdef _MULTI_DATAMODEL
2882*3446Smrj 	bzero(&infostruct32, sizeof (agp_info32_t));
2883*3446Smrj 	if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
2884*3446Smrj 		if (copyinfo(softstate, &infostruct))
2885*3446Smrj 			return (EINVAL);
2886*3446Smrj 
2887*3446Smrj 		agpinfo_default_to_32(infostruct, infostruct32);
2888*3446Smrj 		if (ddi_copyout(&infostruct32, arg,
2889*3446Smrj 		    sizeof (agp_info32_t), flags) != 0)
2890*3446Smrj 			return (EFAULT);
2891*3446Smrj 
2892*3446Smrj 		return (0);
2893*3446Smrj 	}
2894*3446Smrj #endif /* _MULTI_DATAMODEL */
2895*3446Smrj 	if (copyinfo(softstate, &infostruct))
2896*3446Smrj 		return (EINVAL);
2897*3446Smrj 
2898*3446Smrj 	if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) {
2899*3446Smrj 		return (EFAULT);
2900*3446Smrj 	}
2901*3446Smrj 
2902*3446Smrj 	return (0);
2903*3446Smrj }
2904*3446Smrj 
2905*3446Smrj static int
2906*3446Smrj ioctl_agpgart_acquire(agpgart_softstate_t  *st)
2907*3446Smrj {
2908*3446Smrj 	if (st->asoft_acquired) {
2909*3446Smrj 		AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired"));
2910*3446Smrj 		return (EBUSY);
2911*3446Smrj 	}
2912*3446Smrj 	acquire_control(st);
2913*3446Smrj 	return (0);
2914*3446Smrj }
2915*3446Smrj 
2916*3446Smrj static int
2917*3446Smrj ioctl_agpgart_release(agpgart_softstate_t  *st)
2918*3446Smrj {
2919*3446Smrj 	if (is_controlling_proc(st) < 0) {
2920*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2921*3446Smrj 		    "ioctl_agpgart_release: not a controlling process"));
2922*3446Smrj 		return (EPERM);
2923*3446Smrj 	}
2924*3446Smrj 	release_control(st);
2925*3446Smrj 	return (0);
2926*3446Smrj }
2927*3446Smrj 
2928*3446Smrj static int
2929*3446Smrj ioctl_agpgart_setup(agpgart_softstate_t  *st, void  *arg, int flags)
2930*3446Smrj {
2931*3446Smrj 	agp_setup_t data;
2932*3446Smrj 	int rc = 0;
2933*3446Smrj 
2934*3446Smrj 	if (is_controlling_proc(st) < 0) {
2935*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2936*3446Smrj 		    "ioctl_agpgart_setup: not a controlling process"));
2937*3446Smrj 		return (EPERM);
2938*3446Smrj 	}
2939*3446Smrj 
2940*3446Smrj 	if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) {
2941*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2942*3446Smrj 		    "ioctl_agpgart_setup: no true agp bridge"));
2943*3446Smrj 		return (EINVAL);
2944*3446Smrj 	}
2945*3446Smrj 
2946*3446Smrj 	if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0)
2947*3446Smrj 		return (EFAULT);
2948*3446Smrj 
2949*3446Smrj 	if (rc = agp_setup(st, data.agps_mode))
2950*3446Smrj 		return (rc);
2951*3446Smrj 	/* Store agp mode status for kstat */
2952*3446Smrj 	st->asoft_agpen = 1;
2953*3446Smrj 	return (0);
2954*3446Smrj }
2955*3446Smrj 
2956*3446Smrj static int
2957*3446Smrj ioctl_agpgart_alloc(agpgart_softstate_t  *st, void  *arg, int flags)
2958*3446Smrj {
2959*3446Smrj 	agp_allocate_t	alloc_info;
2960*3446Smrj 	keytable_ent_t	*entryp;
2961*3446Smrj 	size_t		length;
2962*3446Smrj 	uint64_t	pg_num;
2963*3446Smrj 
2964*3446Smrj 	if (is_controlling_proc(st) < 0) {
2965*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2966*3446Smrj 		    "ioctl_agpgart_alloc: not a controlling process"));
2967*3446Smrj 		return (EPERM);
2968*3446Smrj 	}
2969*3446Smrj 
2970*3446Smrj 	if (ddi_copyin(arg, &alloc_info,
2971*3446Smrj 	    sizeof (agp_allocate_t), flags) != 0) {
2972*3446Smrj 		return (EFAULT);
2973*3446Smrj 	}
2974*3446Smrj 	pg_num = st->asoft_pgused + alloc_info.agpa_pgcount;
2975*3446Smrj 	if (pg_num > st->asoft_pgtotal) {
2976*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2977*3446Smrj 		    "ioctl_agpgart_alloc: exceeding the memory pages limit"));
2978*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2979*3446Smrj 		    "ioctl_agpgart_alloc: request %x pages failed",
2980*3446Smrj 		    alloc_info.agpa_pgcount));
2981*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2982*3446Smrj 		    "ioctl_agpgart_alloc: pages used %x total is %x",
2983*3446Smrj 		    st->asoft_pgused, st->asoft_pgtotal));
2984*3446Smrj 
2985*3446Smrj 		return (EINVAL);
2986*3446Smrj 	}
2987*3446Smrj 
2988*3446Smrj 	length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount);
2989*3446Smrj 	entryp = agp_alloc_mem(st, length, alloc_info.agpa_type);
2990*3446Smrj 	if (!entryp) {
2991*3446Smrj 		AGPDB_PRINT2((CE_WARN,
2992*3446Smrj 		    "ioctl_agpgart_alloc: allocate 0x%lx bytes failed",
2993*3446Smrj 		    length));
2994*3446Smrj 		return (ENOMEM);
2995*3446Smrj 	}
2996*3446Smrj 	ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2997*3446Smrj 	alloc_info.agpa_key = entryp->kte_key;
2998*3446Smrj 	if (alloc_info.agpa_type == AGP_PHYSICAL) {
2999*3446Smrj 		alloc_info.agpa_physical =
3000*3446Smrj 		    (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT);
3001*3446Smrj 	}
3002*3446Smrj 	/* Update the memory pagse used */
3003*3446Smrj 	st->asoft_pgused += alloc_info.agpa_pgcount;
3004*3446Smrj 
3005*3446Smrj 	if (ddi_copyout(&alloc_info, arg,
3006*3446Smrj 	    sizeof (agp_allocate_t), flags) != 0) {
3007*3446Smrj 
3008*3446Smrj 		return (EFAULT);
3009*3446Smrj 	}
3010*3446Smrj 
3011*3446Smrj 	return (0);
3012*3446Smrj }
3013*3446Smrj 
3014*3446Smrj static int
3015*3446Smrj ioctl_agpgart_dealloc(agpgart_softstate_t  *st, intptr_t arg)
3016*3446Smrj {
3017*3446Smrj 	int key;
3018*3446Smrj 	keytable_ent_t  *keyent;
3019*3446Smrj 
3020*3446Smrj 	if (is_controlling_proc(st) < 0) {
3021*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3022*3446Smrj 		    "ioctl_agpgart_dealloc: not a controlling process"));
3023*3446Smrj 		return (EPERM);
3024*3446Smrj 	}
3025*3446Smrj 	key = (int)arg;
3026*3446Smrj 	if ((key >= AGP_MAXKEYS) || key < 0) {
3027*3446Smrj 		return (EINVAL);
3028*3446Smrj 	}
3029*3446Smrj 	keyent = &st->asoft_table[key];
3030*3446Smrj 	if (!keyent->kte_memhdl) {
3031*3446Smrj 		return (EINVAL);
3032*3446Smrj 	}
3033*3446Smrj 
3034*3446Smrj 	if (agp_dealloc_mem(st, keyent))
3035*3446Smrj 		return (EINVAL);
3036*3446Smrj 
3037*3446Smrj 	/* Update the memory pages used */
3038*3446Smrj 	st->asoft_pgused -= keyent->kte_pages;
3039*3446Smrj 	bzero(keyent, sizeof (keytable_ent_t));
3040*3446Smrj 
3041*3446Smrj 	return (0);
3042*3446Smrj }
3043*3446Smrj 
3044*3446Smrj static int
3045*3446Smrj ioctl_agpgart_bind(agpgart_softstate_t  *st, void  *arg, int flags)
3046*3446Smrj {
3047*3446Smrj 	agp_bind_t 	bind_info;
3048*3446Smrj 	keytable_ent_t	*keyent;
3049*3446Smrj 	int		key;
3050*3446Smrj 	uint32_t	pg_offset;
3051*3446Smrj 	int		retval = 0;
3052*3446Smrj 
3053*3446Smrj 	if (is_controlling_proc(st) < 0) {
3054*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3055*3446Smrj 		    "ioctl_agpgart_bind: not a controlling process"));
3056*3446Smrj 		return (EPERM);
3057*3446Smrj 	}
3058*3446Smrj 
3059*3446Smrj 	if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) {
3060*3446Smrj 		return (EFAULT);
3061*3446Smrj 	}
3062*3446Smrj 
3063*3446Smrj 	key = bind_info.agpb_key;
3064*3446Smrj 	if ((key >= AGP_MAXKEYS) || key < 0) {
3065*3446Smrj 		AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key"));
3066*3446Smrj 		return (EINVAL);
3067*3446Smrj 	}
3068*3446Smrj 
3069*3446Smrj 	if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) {
3070*3446Smrj 		if (AGP_PAGES2KB(bind_info.agpb_pgstart) <
3071*3446Smrj 		    st->asoft_info.agpki_presize) {
3072*3446Smrj 			AGPDB_PRINT2((CE_WARN,
3073*3446Smrj 			    "ioctl_agpgart_bind: bind to prealloc area"));
3074*3446Smrj 			return (EINVAL);
3075*3446Smrj 		}
3076*3446Smrj 	}
3077*3446Smrj 
3078*3446Smrj 	pg_offset = bind_info.agpb_pgstart;
3079*3446Smrj 	keyent = &st->asoft_table[key];
3080*3446Smrj 	if (!keyent->kte_memhdl) {
3081*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3082*3446Smrj 		    "ioctl_agpgart_bind: Key = 0x%x can't get keyenty",
3083*3446Smrj 		    key));
3084*3446Smrj 		return (EINVAL);
3085*3446Smrj 	}
3086*3446Smrj 
3087*3446Smrj 	if (keyent->kte_bound != 0) {
3088*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3089*3446Smrj 		    "ioctl_agpgart_bind: Key = 0x%x already bound",
3090*3446Smrj 		    key));
3091*3446Smrj 		return (EINVAL);
3092*3446Smrj 	}
3093*3446Smrj 	retval = agp_bind_key(st, keyent, pg_offset);
3094*3446Smrj 
3095*3446Smrj 	if (retval == 0) {
3096*3446Smrj 		keyent->kte_pgoff = pg_offset;
3097*3446Smrj 		keyent->kte_bound = 1;
3098*3446Smrj 	}
3099*3446Smrj 
3100*3446Smrj 	return (retval);
3101*3446Smrj }
3102*3446Smrj 
3103*3446Smrj static int
3104*3446Smrj ioctl_agpgart_unbind(agpgart_softstate_t  *st, void  *arg, int flags)
3105*3446Smrj {
3106*3446Smrj 	int key, retval = 0;
3107*3446Smrj 	agp_unbind_t unbindinfo;
3108*3446Smrj 	keytable_ent_t *keyent;
3109*3446Smrj 
3110*3446Smrj 	if (is_controlling_proc(st) < 0) {
3111*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3112*3446Smrj 		    "ioctl_agpgart_bind: not a controlling process"));
3113*3446Smrj 		return (EPERM);
3114*3446Smrj 	}
3115*3446Smrj 
3116*3446Smrj 	if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) {
3117*3446Smrj 		return (EFAULT);
3118*3446Smrj 	}
3119*3446Smrj 	key = unbindinfo.agpu_key;
3120*3446Smrj 	if ((key >= AGP_MAXKEYS) || key < 0) {
3121*3446Smrj 		AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key"));
3122*3446Smrj 		return (EINVAL);
3123*3446Smrj 	}
3124*3446Smrj 	keyent = &st->asoft_table[key];
3125*3446Smrj 	if (!keyent->kte_bound) {
3126*3446Smrj 		return (EINVAL);
3127*3446Smrj 	}
3128*3446Smrj 
3129*3446Smrj 	if ((retval = agp_unbind_key(st, keyent)) != 0)
3130*3446Smrj 		return (retval);
3131*3446Smrj 
3132*3446Smrj 	return (0);
3133*3446Smrj }
3134*3446Smrj 
3135*3446Smrj /*ARGSUSED*/
3136*3446Smrj static int
3137*3446Smrj agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags,
3138*3446Smrj     cred_t *credp, int *rvalp)
3139*3446Smrj {
3140*3446Smrj 	int instance;
3141*3446Smrj 	int retval = 0;
3142*3446Smrj 	void *arg = (void*)intarg;
3143*3446Smrj 
3144*3446Smrj 	agpgart_softstate_t *softstate;
3145*3446Smrj 
3146*3446Smrj 	instance = AGP_DEV2INST(dev);
3147*3446Smrj 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3148*3446Smrj 	if (softstate == NULL) {
3149*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err"));
3150*3446Smrj 		return (ENXIO);
3151*3446Smrj 	}
3152*3446Smrj 
3153*3446Smrj 	if ((cmd != AGPIOC_INFO) && secpolicy_gart_access(credp)) {
3154*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: permission denied"));
3155*3446Smrj 		return (EPERM);
3156*3446Smrj 	}
3157*3446Smrj 
3158*3446Smrj 	mutex_enter(&softstate->asoft_instmutex);
3159*3446Smrj 
3160*3446Smrj 	switch (cmd) {
3161*3446Smrj 	case AGPIOC_INFO:
3162*3446Smrj 		retval = ioctl_agpgart_info(softstate, arg, flags);
3163*3446Smrj 		break;
3164*3446Smrj 	case AGPIOC_ACQUIRE:
3165*3446Smrj 		retval = ioctl_agpgart_acquire(softstate);
3166*3446Smrj 		break;
3167*3446Smrj 	case AGPIOC_RELEASE:
3168*3446Smrj 		retval = ioctl_agpgart_release(softstate);
3169*3446Smrj 		break;
3170*3446Smrj 	case AGPIOC_SETUP:
3171*3446Smrj 		retval = ioctl_agpgart_setup(softstate, arg, flags);
3172*3446Smrj 		break;
3173*3446Smrj 	case AGPIOC_ALLOCATE:
3174*3446Smrj 		retval = ioctl_agpgart_alloc(softstate, arg, flags);
3175*3446Smrj 		break;
3176*3446Smrj 	case AGPIOC_DEALLOCATE:
3177*3446Smrj 		retval = ioctl_agpgart_dealloc(softstate, intarg);
3178*3446Smrj 		break;
3179*3446Smrj 	case AGPIOC_BIND:
3180*3446Smrj 		retval = ioctl_agpgart_bind(softstate, arg, flags);
3181*3446Smrj 		break;
3182*3446Smrj 	case AGPIOC_UNBIND:
3183*3446Smrj 		retval = ioctl_agpgart_unbind(softstate, arg, flags);
3184*3446Smrj 		break;
3185*3446Smrj 	default:
3186*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument"));
3187*3446Smrj 		retval = ENXIO;
3188*3446Smrj 		break;
3189*3446Smrj 	}
3190*3446Smrj 
3191*3446Smrj 	mutex_exit(&softstate->asoft_instmutex);
3192*3446Smrj 	return (retval);
3193*3446Smrj }
3194*3446Smrj 
3195*3446Smrj static int
3196*3446Smrj agpgart_segmap(dev_t dev, off_t off, struct as *asp,
3197*3446Smrj     caddr_t *addrp, off_t len, unsigned int prot,
3198*3446Smrj     unsigned int maxprot, unsigned int flags, cred_t *credp)
3199*3446Smrj {
3200*3446Smrj 
3201*3446Smrj 	struct agpgart_softstate *softstate;
3202*3446Smrj 	int instance;
3203*3446Smrj 	int rc = 0;
3204*3446Smrj 
3205*3446Smrj 	instance = AGP_DEV2INST(dev);
3206*3446Smrj 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3207*3446Smrj 	if (softstate == NULL) {
3208*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err"));
3209*3446Smrj 		return (ENXIO);
3210*3446Smrj 	}
3211*3446Smrj 	if (!AGP_ALIGNED(len))
3212*3446Smrj 		return (EINVAL);
3213*3446Smrj 
3214*3446Smrj 	mutex_enter(&softstate->asoft_instmutex);
3215*3446Smrj 
3216*3446Smrj 	/*
3217*3446Smrj 	 * Process must have gart map privilege or gart access privilege
3218*3446Smrj 	 * to map agp memory.
3219*3446Smrj 	 */
3220*3446Smrj 	if (secpolicy_gart_map(credp)) {
3221*3446Smrj 		mutex_exit(&softstate->asoft_instmutex);
3222*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_segmap: permission denied"));
3223*3446Smrj 		return (EPERM);
3224*3446Smrj 	}
3225*3446Smrj 
3226*3446Smrj 	rc = devmap_setup(dev, (offset_t)off, asp, addrp,
3227*3446Smrj 	    (size_t)len, prot, maxprot, flags, credp);
3228*3446Smrj 
3229*3446Smrj 	mutex_exit(&softstate->asoft_instmutex);
3230*3446Smrj 	return (rc);
3231*3446Smrj }
3232*3446Smrj 
3233*3446Smrj /*ARGSUSED*/
3234*3446Smrj static int
3235*3446Smrj agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len,
3236*3446Smrj     size_t *mappedlen, uint_t model)
3237*3446Smrj {
3238*3446Smrj 	struct agpgart_softstate *softstate;
3239*3446Smrj 	int instance, status;
3240*3446Smrj 	struct keytable_ent *mementry;
3241*3446Smrj 	offset_t local_offset;
3242*3446Smrj 
3243*3446Smrj 	instance = AGP_DEV2INST(dev);
3244*3446Smrj 	softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3245*3446Smrj 	if (softstate == NULL) {
3246*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err"));
3247*3446Smrj 		return (ENXIO);
3248*3446Smrj 	}
3249*3446Smrj 
3250*3446Smrj 
3251*3446Smrj 	if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) {
3252*3446Smrj 		AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large"));
3253*3446Smrj 		return (EINVAL);
3254*3446Smrj 	}
3255*3446Smrj 
3256*3446Smrj 	/*
3257*3446Smrj 	 * Can not find any memory now, so fail.
3258*3446Smrj 	 */
3259*3446Smrj 
3260*3446Smrj 	mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
3261*3446Smrj 
3262*3446Smrj 	if (mementry == NULL) {
3263*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3264*3446Smrj 		    "agpgart_devmap: can not find the proper keyent"));
3265*3446Smrj 		return (EINVAL);
3266*3446Smrj 	}
3267*3446Smrj 
3268*3446Smrj 	local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff);
3269*3446Smrj 
3270*3446Smrj 	if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) {
3271*3446Smrj 		len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset;
3272*3446Smrj 	}
3273*3446Smrj 
3274*3446Smrj 	switch (mementry->kte_type) {
3275*3446Smrj 	case AGP_NORMAL:
3276*3446Smrj 		status = devmap_pmem_setup(cookie, softstate->asoft_dip,
3277*3446Smrj 		    &agp_devmap_cb,
3278*3446Smrj 		    PMEMP(mementry->kte_memhdl)->pmem_cookie, local_offset,
3279*3446Smrj 		    len, PROT_ALL, (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE),
3280*3446Smrj 		    &mem_dev_acc_attr);
3281*3446Smrj 		break;
3282*3446Smrj 	default:
3283*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3284*3446Smrj 		    "agpgart_devmap: not a valid memory type"));
3285*3446Smrj 		return (EINVAL);
3286*3446Smrj 	}
3287*3446Smrj 
3288*3446Smrj 
3289*3446Smrj 	if (status == 0) {
3290*3446Smrj 		*mappedlen = len;
3291*3446Smrj 	} else {
3292*3446Smrj 		*mappedlen = 0;
3293*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3294*3446Smrj 		    "agpgart_devmap: devmap interface failed"));
3295*3446Smrj 		return (EINVAL);
3296*3446Smrj 	}
3297*3446Smrj 
3298*3446Smrj 	return (0);
3299*3446Smrj }
3300*3446Smrj 
3301*3446Smrj static struct cb_ops	agpgart_cb_ops = {
3302*3446Smrj 	agpgart_open,		/* open() */
3303*3446Smrj 	agpgart_close,		/* close() */
3304*3446Smrj 	nodev,			/* strategy() */
3305*3446Smrj 	nodev,			/* print routine */
3306*3446Smrj 	nodev,			/* no dump routine */
3307*3446Smrj 	nodev,			/* read() */
3308*3446Smrj 	nodev,			/* write() */
3309*3446Smrj 	agpgart_ioctl,		/* agpgart_ioctl */
3310*3446Smrj 	agpgart_devmap,		/* devmap routine */
3311*3446Smrj 	nodev,			/* no longer use mmap routine */
3312*3446Smrj 	agpgart_segmap,		/* system segmap routine */
3313*3446Smrj 	nochpoll,		/* no chpoll routine */
3314*3446Smrj 	ddi_prop_op,		/* system prop operations */
3315*3446Smrj 	0,			/* not a STREAMS driver */
3316*3446Smrj 	D_DEVMAP | D_MP,	/* safe for multi-thread/multi-processor */
3317*3446Smrj 	CB_REV,			/* cb_ops version? */
3318*3446Smrj 	nodev,			/* cb_aread() */
3319*3446Smrj 	nodev,			/* cb_awrite() */
3320*3446Smrj };
3321*3446Smrj 
3322*3446Smrj static struct dev_ops agpgart_ops = {
3323*3446Smrj 	DEVO_REV,		/* devo_rev */
3324*3446Smrj 	0,			/* devo_refcnt */
3325*3446Smrj 	agpgart_getinfo,	/* devo_getinfo */
3326*3446Smrj 	nulldev,		/* devo_identify */
3327*3446Smrj 	nulldev,		/* devo_probe */
3328*3446Smrj 	agpgart_attach,		/* devo_attach */
3329*3446Smrj 	agpgart_detach,		/* devo_detach */
3330*3446Smrj 	nodev,			/* devo_reset */
3331*3446Smrj 	&agpgart_cb_ops,	/* devo_cb_ops */
3332*3446Smrj 	(struct bus_ops *)0,	/* devo_bus_ops */
3333*3446Smrj 	NULL,			/* devo_power */
3334*3446Smrj };
3335*3446Smrj 
3336*3446Smrj static	struct modldrv modldrv = {
3337*3446Smrj 	&mod_driverops,
3338*3446Smrj 	"AGP driver v%I%",
3339*3446Smrj 	&agpgart_ops,
3340*3446Smrj };
3341*3446Smrj 
3342*3446Smrj static struct modlinkage modlinkage = {
3343*3446Smrj 	MODREV_1,		/* MODREV_1 is indicated by manual */
3344*3446Smrj 	{&modldrv, NULL, NULL, NULL}
3345*3446Smrj };
3346*3446Smrj 
3347*3446Smrj static void *agpgart_glob_soft_handle;
3348*3446Smrj 
3349*3446Smrj int
3350*3446Smrj _init(void)
3351*3446Smrj {
3352*3446Smrj 	int ret = DDI_SUCCESS;
3353*3446Smrj 
3354*3446Smrj 	ret = ddi_soft_state_init(&agpgart_glob_soft_handle,
3355*3446Smrj 		    sizeof (agpgart_softstate_t),
3356*3446Smrj 		    AGPGART_MAX_INSTANCES);
3357*3446Smrj 
3358*3446Smrj 	if (ret != 0) {
3359*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3360*3446Smrj 		    "_init: soft state init error code=0x%x", ret));
3361*3446Smrj 		return (ret);
3362*3446Smrj 	}
3363*3446Smrj 
3364*3446Smrj 	if ((ret = mod_install(&modlinkage)) != 0) {
3365*3446Smrj 		AGPDB_PRINT2((CE_WARN,
3366*3446Smrj 		    "_init: mod install error code=0x%x", ret));
3367*3446Smrj 		ddi_soft_state_fini(&agpgart_glob_soft_handle);
3368*3446Smrj 		return (ret);
3369*3446Smrj 	}
3370*3446Smrj 
3371*3446Smrj 	return (DDI_SUCCESS);
3372*3446Smrj }
3373*3446Smrj 
3374*3446Smrj int
3375*3446Smrj _info(struct modinfo *modinfop)
3376*3446Smrj {
3377*3446Smrj 	return (mod_info(&modlinkage, modinfop));
3378*3446Smrj }
3379*3446Smrj 
3380*3446Smrj int
3381*3446Smrj _fini(void)
3382*3446Smrj {
3383*3446Smrj 	int ret;
3384*3446Smrj 
3385*3446Smrj 	if ((ret = mod_remove(&modlinkage)) == 0) {
3386*3446Smrj 		ddi_soft_state_fini(&agpgart_glob_soft_handle);
3387*3446Smrj 	}
3388*3446Smrj 
3389*3446Smrj 	return (ret);
3390*3446Smrj }
3391