xref: /csrg-svn/sys/vax/uba/uba.c (revision 30389)
123352Smckusick /*
229248Smckusick  * Copyright (c) 1982, 1986 Regents of the University of California.
323352Smckusick  * All rights reserved.  The Berkeley software License Agreement
423352Smckusick  * specifies the terms and conditions for redistribution.
523352Smckusick  *
6*30389Skarels  *	@(#)uba.c	7.3 (Berkeley) 01/14/87
723352Smckusick  */
840Sbill 
99780Ssam #include "../machine/pte.h"
109780Ssam 
1117081Sbloom #include "param.h"
1217081Sbloom #include "systm.h"
1317081Sbloom #include "map.h"
1417081Sbloom #include "buf.h"
1517081Sbloom #include "vm.h"
1617081Sbloom #include "dir.h"
1717081Sbloom #include "user.h"
1817081Sbloom #include "proc.h"
1917081Sbloom #include "conf.h"
20*30389Skarels #include "dkstat.h"
2117081Sbloom #include "kernel.h"
2240Sbill 
238481Sroot #include "../vax/cpu.h"
248481Sroot #include "../vax/mtpr.h"
258481Sroot #include "../vax/nexus.h"
2617081Sbloom #include "ubareg.h"
2717081Sbloom #include "ubavar.h"
288481Sroot 
2929737Skarels #ifdef DW780
302929Swnj char	ubasr_bits[] = UBASR_BITS;
312929Swnj #endif
322929Swnj 
3326371Skarels #define	spluba	spl7		/* IPL 17 */
3426371Skarels 
3540Sbill /*
362570Swnj  * Do transfer on device argument.  The controller
372570Swnj  * and uba involved are implied by the device.
382570Swnj  * We queue for resource wait in the uba code if necessary.
392570Swnj  * We return 1 if the transfer was started, 0 if it was not.
402570Swnj  * If you call this routine with the head of the queue for a
412570Swnj  * UBA, it will automatically remove the device from the UBA
422570Swnj  * queue before it returns.  If some other device is given
432570Swnj  * as argument, it will be added to the request queue if the
442570Swnj  * request cannot be started immediately.  This means that
452570Swnj  * passing a device which is on the queue but not at the head
462570Swnj  * of the request queue is likely to be a disaster.
472570Swnj  */
482570Swnj ubago(ui)
492958Swnj 	register struct uba_device *ui;
502570Swnj {
512958Swnj 	register struct uba_ctlr *um = ui->ui_mi;
522570Swnj 	register struct uba_hd *uh;
532570Swnj 	register int s, unit;
542570Swnj 
552570Swnj 	uh = &uba_hd[um->um_ubanum];
5626371Skarels 	s = spluba();
572628Swnj 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
582616Swnj 		goto rwait;
592570Swnj 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
602570Swnj 	    UBA_NEEDBDP|UBA_CANTWAIT);
612616Swnj 	if (um->um_ubinfo == 0)
622616Swnj 		goto rwait;
632616Swnj 	uh->uh_users++;
642628Swnj 	if (um->um_driver->ud_xclu)
652616Swnj 		uh->uh_xclu = 1;
662570Swnj 	splx(s);
672570Swnj 	if (ui->ui_dk >= 0) {
682570Swnj 		unit = ui->ui_dk;
692570Swnj 		dk_busy |= 1<<unit;
706348Swnj 		dk_xfer[unit]++;
716348Swnj 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
722570Swnj 	}
732570Swnj 	if (uh->uh_actf == ui)
742570Swnj 		uh->uh_actf = ui->ui_forw;
752570Swnj 	(*um->um_driver->ud_dgo)(um);
762570Swnj 	return (1);
772616Swnj rwait:
782616Swnj 	if (uh->uh_actf != ui) {
792616Swnj 		ui->ui_forw = NULL;
802616Swnj 		if (uh->uh_actf == NULL)
812616Swnj 			uh->uh_actf = ui;
822616Swnj 		else
832616Swnj 			uh->uh_actl->ui_forw = ui;
842616Swnj 		uh->uh_actl = ui;
852616Swnj 	}
862616Swnj 	splx(s);
872616Swnj 	return (0);
882570Swnj }
892570Swnj 
902616Swnj ubadone(um)
912958Swnj 	register struct uba_ctlr *um;
922616Swnj {
932616Swnj 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
942616Swnj 
952628Swnj 	if (um->um_driver->ud_xclu)
962616Swnj 		uh->uh_xclu = 0;
972616Swnj 	uh->uh_users--;
982616Swnj 	ubarelse(um->um_ubanum, &um->um_ubinfo);
992616Swnj }
1002616Swnj 
1012570Swnj /*
1022395Swnj  * Allocate and setup UBA map registers, and bdp's
1032395Swnj  * Flags says whether bdp is needed, whether the caller can't
1042395Swnj  * wait (e.g. if the caller is at interrupt level).
10540Sbill  *
1062570Swnj  * Return value:
10740Sbill  *	Bits 0-8	Byte offset
10840Sbill  *	Bits 9-17	Start map reg. no.
10940Sbill  *	Bits 18-27	No. mapping reg's
11040Sbill  *	Bits 28-31	BDP no.
11140Sbill  */
1122395Swnj ubasetup(uban, bp, flags)
1132395Swnj 	struct buf *bp;
11440Sbill {
1152395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
11618417Smckusick 	int pfnum, temp;
11740Sbill 	int npf, reg, bdp;
11840Sbill 	unsigned v;
11940Sbill 	register struct pte *pte, *io;
12040Sbill 	struct proc *rp;
12140Sbill 	int a, o, ubinfo;
12240Sbill 
12329737Skarels #ifdef DW730
12429737Skarels 	if (uh->uh_type == DW730)
1253332Swnj 		flags &= ~UBA_NEEDBDP;
1263332Swnj #endif
12729737Skarels #ifdef QBA
12829737Skarels 	if (uh->uh_type == QBA)
12929737Skarels 		flags &= ~UBA_NEEDBDP;
13029737Skarels #endif
13140Sbill 	v = btop(bp->b_un.b_addr);
13240Sbill 	o = (int)bp->b_un.b_addr & PGOFSET;
13340Sbill 	npf = btoc(bp->b_bcount + o) + 1;
13426371Skarels 	a = spluba();
1358811Sroot 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
1363913Swnj 		if (flags & UBA_CANTWAIT) {
1373913Swnj 			splx(a);
1382395Swnj 			return (0);
1393913Swnj 		}
1402395Swnj 		uh->uh_mrwant++;
1419353Ssam 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
14240Sbill 	}
14317731Skarels 	if ((flags & UBA_NEED16) && reg + npf > 128) {
14417731Skarels 		/*
14517731Skarels 		 * Could hang around and try again (if we can ever succeed).
14617731Skarels 		 * Won't help any current device...
14717731Skarels 		 */
14817731Skarels 		rmfree(uh->uh_map, (long)npf, (long)reg);
14917731Skarels 		splx(a);
15017731Skarels 		return (0);
15117731Skarels 	}
15240Sbill 	bdp = 0;
1532395Swnj 	if (flags & UBA_NEEDBDP) {
15426371Skarels 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
1552395Swnj 			if (flags & UBA_CANTWAIT) {
1568811Sroot 				rmfree(uh->uh_map, (long)npf, (long)reg);
1573913Swnj 				splx(a);
1582395Swnj 				return (0);
1592395Swnj 			}
1602395Swnj 			uh->uh_bdpwant++;
1619353Ssam 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
16240Sbill 		}
1632463Swnj 		uh->uh_bdpfree &= ~(1 << (bdp-1));
1644758Swnj 	} else if (flags & UBA_HAVEBDP)
1654758Swnj 		bdp = (flags >> 28) & 0xf;
16640Sbill 	splx(a);
1672463Swnj 	reg--;
16840Sbill 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
1692958Swnj 	temp = (bdp << 21) | UBAMR_MRV;
17040Sbill 	if (bdp && (o & 01))
1712958Swnj 		temp |= UBAMR_BO;
1726382Swnj 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
1736382Swnj 	if ((bp->b_flags & B_PHYS) == 0)
174728Sbill 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
1756382Swnj 	else if (bp->b_flags & B_UAREA)
1766382Swnj 		pte = &rp->p_addr[v];
1776382Swnj 	else if (bp->b_flags & B_PAGET)
1786382Swnj 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
1796382Swnj 	else
1806382Swnj 		pte = vtopte(rp, v);
18129737Skarels 	io = &uh->uh_mr[reg];
1826382Swnj 	while (--npf != 0) {
18318417Smckusick 		pfnum = pte->pg_pfnum;
18418417Smckusick 		if (pfnum == 0)
1856382Swnj 			panic("uba zero uentry");
18618417Smckusick 		pte++;
18718417Smckusick 		*(int *)io++ = pfnum | temp;
18840Sbill 	}
18940Sbill 	*(int *)io++ = 0;
19040Sbill 	return (ubinfo);
19140Sbill }
19240Sbill 
19340Sbill /*
1942570Swnj  * Non buffer setup interface... set up a buffer and call ubasetup.
19540Sbill  */
1962395Swnj uballoc(uban, addr, bcnt, flags)
1973107Swnj 	int uban;
19840Sbill 	caddr_t addr;
1993107Swnj 	int bcnt, flags;
20040Sbill {
201883Sbill 	struct buf ubabuf;
20240Sbill 
20340Sbill 	ubabuf.b_un.b_addr = addr;
20440Sbill 	ubabuf.b_flags = B_BUSY;
20540Sbill 	ubabuf.b_bcount = bcnt;
206883Sbill 	/* that's all the fields ubasetup() needs */
2072395Swnj 	return (ubasetup(uban, &ubabuf, flags));
20840Sbill }
20940Sbill 
2102053Swnj /*
2112570Swnj  * Release resources on uba uban, and then unblock resource waiters.
2122570Swnj  * The map register parameter is by value since we need to block
2132570Swnj  * against uba resets on 11/780's.
2142053Swnj  */
2152395Swnj ubarelse(uban, amr)
2162053Swnj 	int *amr;
21740Sbill {
2182395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
2192570Swnj 	register int bdp, reg, npf, s;
2202053Swnj 	int mr;
22140Sbill 
2222570Swnj 	/*
2232570Swnj 	 * Carefully see if we should release the space, since
2242570Swnj 	 * it may be released asynchronously at uba reset time.
2252570Swnj 	 */
22626371Skarels 	s = spluba();
2272053Swnj 	mr = *amr;
2282053Swnj 	if (mr == 0) {
2292570Swnj 		/*
2302570Swnj 		 * A ubareset() occurred before we got around
2312570Swnj 		 * to releasing the space... no need to bother.
2322570Swnj 		 */
2332570Swnj 		splx(s);
2342053Swnj 		return;
2352053Swnj 	}
2362067Swnj 	*amr = 0;
23740Sbill 	bdp = (mr >> 28) & 0x0f;
23840Sbill 	if (bdp) {
23929737Skarels 		switch (uh->uh_type) {
24029737Skarels #ifdef DW780
24129737Skarels 		case DW780:
2422958Swnj 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2432423Skre 			break;
2442423Skre #endif
24529737Skarels #ifdef DW750
24629737Skarels 		case DW750:
2472958Swnj 			uh->uh_uba->uba_dpr[bdp] |=
2482958Swnj 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2492423Skre 			break;
2502423Skre #endif
25129737Skarels 		default:
25229737Skarels 			break;
2532423Skre 		}
2542570Swnj 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
2552395Swnj 		if (uh->uh_bdpwant) {
2562395Swnj 			uh->uh_bdpwant = 0;
2579353Ssam 			wakeup((caddr_t)&uh->uh_bdpwant);
25840Sbill 		}
25940Sbill 	}
2602570Swnj 	/*
2612570Swnj 	 * Put back the registers in the resource map.
26217731Skarels 	 * The map code must not be reentered,
26317731Skarels 	 * nor can the registers be freed twice.
26417731Skarels 	 * Unblock interrupts once this is done.
2652570Swnj 	 */
26640Sbill 	npf = (mr >> 18) & 0x3ff;
26740Sbill 	reg = ((mr >> 9) & 0x1ff) + 1;
2688811Sroot 	rmfree(uh->uh_map, (long)npf, (long)reg);
2692570Swnj 	splx(s);
2702570Swnj 
2712570Swnj 	/*
2722570Swnj 	 * Wakeup sleepers for map registers,
2732570Swnj 	 * and also, if there are processes blocked in dgo(),
2742570Swnj 	 * give them a chance at the UNIBUS.
2752570Swnj 	 */
2762395Swnj 	if (uh->uh_mrwant) {
2772395Swnj 		uh->uh_mrwant = 0;
2789353Ssam 		wakeup((caddr_t)&uh->uh_mrwant);
27940Sbill 	}
2802570Swnj 	while (uh->uh_actf && ubago(uh->uh_actf))
2812570Swnj 		;
28240Sbill }
28340Sbill 
2842729Swnj ubapurge(um)
2852958Swnj 	register struct uba_ctlr *um;
2862729Swnj {
2872729Swnj 	register struct uba_hd *uh = um->um_hd;
2882729Swnj 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
2892729Swnj 
29029737Skarels 	switch (uh->uh_type) {
29129737Skarels #ifdef DW780
29229737Skarels 	case DW780:
2932958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2942729Swnj 		break;
2952729Swnj #endif
29629737Skarels #ifdef DW750
29729737Skarels 	case DW750:
2982958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2992729Swnj 		break;
3002729Swnj #endif
30129737Skarels 	default:
30229737Skarels 		break;
3032729Swnj 	}
3042729Swnj }
3052729Swnj 
3066863Swnj ubainitmaps(uhp)
3076863Swnj 	register struct uba_hd *uhp;
3086863Swnj {
3096863Swnj 
31029737Skarels 	rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
31129737Skarels 	switch (uhp->uh_type) {
31229737Skarels #ifdef DW780
31329737Skarels 	case DW780:
3146863Swnj 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
3156863Swnj 		break;
3166863Swnj #endif
31729737Skarels #ifdef DW750
31829737Skarels 	case DW750:
3196863Swnj 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
3206863Swnj 		break;
3216863Swnj #endif
32229737Skarels 	default:
3236863Swnj 		break;
3246863Swnj 	}
3256863Swnj }
3266863Swnj 
3272570Swnj /*
3282570Swnj  * Generate a reset on uba number uban.  Then
3292570Swnj  * call each device in the character device table,
3302570Swnj  * giving it a chance to clean up so as to be able to continue.
3312570Swnj  */
3322395Swnj ubareset(uban)
3332570Swnj 	int uban;
334284Sbill {
335284Sbill 	register struct cdevsw *cdp;
3362646Swnj 	register struct uba_hd *uh = &uba_hd[uban];
3371781Sbill 	int s;
338284Sbill 
33926371Skarels 	s = spluba();
3402646Swnj 	uh->uh_users = 0;
3412646Swnj 	uh->uh_zvcnt = 0;
3422646Swnj 	uh->uh_xclu = 0;
3432646Swnj 	uh->uh_actf = uh->uh_actl = 0;
3442646Swnj 	uh->uh_bdpwant = 0;
3452646Swnj 	uh->uh_mrwant = 0;
3466863Swnj 	ubainitmaps(uh);
3472646Swnj 	wakeup((caddr_t)&uh->uh_bdpwant);
3482646Swnj 	wakeup((caddr_t)&uh->uh_mrwant);
3492958Swnj 	printf("uba%d: reset", uban);
3502958Swnj 	ubainit(uh->uh_uba);
35117731Skarels 	ubameminit(uban);
35211722Ssam 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
3532395Swnj 		(*cdp->d_reset)(uban);
3545221Swnj 	ifubareset(uban);
355284Sbill 	printf("\n");
356302Sbill 	splx(s);
357284Sbill }
3582395Swnj 
3592570Swnj /*
3602570Swnj  * Init a uba.  This is called with a pointer
3612570Swnj  * rather than a virtual address since it is called
3622570Swnj  * by code which runs with memory mapping disabled.
3632570Swnj  * In these cases we really don't need the interrupts
3642570Swnj  * enabled, but since we run with ipl high, we don't care
3652570Swnj  * if they are, they will never happen anyways.
36629737Skarels  * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
3672570Swnj  */
3682423Skre ubainit(uba)
3692423Skre 	register struct uba_regs *uba;
3702395Swnj {
37129737Skarels 	register struct uba_hd *uhp;
37229737Skarels 	int isphys = 0;
3732395Swnj 
37429737Skarels 	for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
37529737Skarels 		if (uhp->uh_uba == uba)
37629737Skarels 			break;
37729737Skarels 		if (uhp->uh_physuba == uba) {
37829737Skarels 			isphys++;
37929737Skarels 			break;
38029737Skarels 		}
38129737Skarels 	}
38229737Skarels 	if (uhp >= uba_hd + numuba) {
38329737Skarels 		printf("init unknown uba\n");
38429737Skarels 		return;
38529737Skarels 	}
38629737Skarels 
38729737Skarels 	switch (uhp->uh_type) {
38829737Skarels #ifdef DW780
38929737Skarels 	case DW780:
3902958Swnj 		uba->uba_cr = UBACR_ADINIT;
3912958Swnj 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
3922958Swnj 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
3932958Swnj 			;
3942958Swnj 		break;
3952958Swnj #endif
39629737Skarels #ifdef DW750
39729737Skarels 	case DW750:
3983352Swnj #endif
39929737Skarels #ifdef DW730
40029737Skarels 	case DW730:
4013352Swnj #endif
40229737Skarels #ifdef QBA
40329737Skarels 	case QBA:
40427255Skridle #endif
40529737Skarels #if DW750 || DW730 || QBA
4063352Swnj 		mtpr(IUR, 0);
4072958Swnj 		/* give devices time to recover from power fail */
4083332Swnj /* THIS IS PROBABLY UNNECESSARY */
4093352Swnj 		DELAY(500000);
4103332Swnj /* END PROBABLY UNNECESSARY */
41129737Skarels #ifdef QBA
41229737Skarels 		/*
41329737Skarels 		 * Re-enable local memory access
41429737Skarels 		 * from the Q-bus.
41529737Skarels 		 */
41629737Skarels 		if (uhp->uh_type == QBA) {
41729737Skarels 			if (isphys)
41829737Skarels 				*((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
41929737Skarels 			else
42029737Skarels 				*(uhp->uh_iopage + QIPCR) = Q_LMEAE;
42129737Skarels 		}
42229737Skarels #endif QBA
4232958Swnj 		break;
42429737Skarels #endif DW750 || DW730 || QBA
4252958Swnj 	}
4262395Swnj }
4272395Swnj 
42829737Skarels #ifdef DW780
4294024Swnj int	ubawedgecnt = 10;
4304024Swnj int	ubacrazy = 500;
43117731Skarels int	zvcnt_max = 5000;	/* in 8 sec */
4322570Swnj /*
43324500Sbloom  * This routine is called by the locore code to process a UBA
43424500Sbloom  * error on an 11/780 or 8600.  The arguments are passed
4352570Swnj  * on the stack, and value-result (through some trickery).
4362570Swnj  * In particular, the uvec argument is used for further
4372570Swnj  * uba processing so the result aspect of it is very important.
4382570Swnj  * It must not be declared register.
4392570Swnj  */
4402423Skre /*ARGSUSED*/
44117731Skarels ubaerror(uban, uh, ipl, uvec, uba)
4422395Swnj 	register int uban;
4432395Swnj 	register struct uba_hd *uh;
44417731Skarels 	int ipl, uvec;
4452395Swnj 	register struct uba_regs *uba;
4462395Swnj {
4472395Swnj 	register sr, s;
4482395Swnj 
4492395Swnj 	if (uvec == 0) {
45026215Skarels 		/*
45126215Skarels 		 * Declare dt as unsigned so that negative values
45226215Skarels 		 * are handled as >8 below, in case time was set back.
45326215Skarels 		 */
45426215Skarels 		u_long	dt = time.tv_sec - uh->uh_zvtime;
45526215Skarels 
45626215Skarels 		uh->uh_zvtotal++;
45717731Skarels 		if (dt > 8) {
45826215Skarels 			uh->uh_zvtime = time.tv_sec;
45917731Skarels 			uh->uh_zvcnt = 0;
46017731Skarels 		}
46117731Skarels 		if (++uh->uh_zvcnt > zvcnt_max) {
46217731Skarels 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
46317731Skarels 				uban, uh->uh_zvcnt, dt + 1);
46417731Skarels 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
46517731Skarels 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
46617731Skarels 				uba->uba_cnfgr&0xff);
46717731Skarels 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
46817731Skarels 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
46917731Skarels 				(uba->uba_dcr&0x8000000)?"":"NOT ");
4702395Swnj 			ubareset(uban);
4712395Swnj 		}
4722395Swnj 		return;
4732395Swnj 	}
4742395Swnj 	if (uba->uba_cnfgr & NEX_CFGFLT) {
4752929Swnj 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
4762929Swnj 		    uban, uba->uba_sr, ubasr_bits,
4773248Swnj 		    uba->uba_cnfgr, NEXFLT_BITS);
4782395Swnj 		ubareset(uban);
4792395Swnj 		uvec = 0;
4802395Swnj 		return;
4812395Swnj 	}
4822395Swnj 	sr = uba->uba_sr;
48326371Skarels 	s = spluba();
4843473Swnj 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
4853473Swnj 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
4862395Swnj 	splx(s);
4872395Swnj 	uba->uba_sr = sr;
4882958Swnj 	uvec &= UBABRRVR_DIV;
4894024Swnj 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
4904024Swnj 		if (uh->uh_errcnt > ubacrazy)
4914024Swnj 			panic("uba crazy");
4924024Swnj 		printf("ERROR LIMIT ");
4934024Swnj 		ubareset(uban);
4944024Swnj 		uvec = 0;
4954024Swnj 		return;
4964024Swnj 	}
4972395Swnj 	return;
4982395Swnj }
4992395Swnj #endif
5003745Sroot 
5013745Sroot /*
50217731Skarels  * Look for devices with unibus memory, allow them to configure, then disable
50317731Skarels  * map registers as necessary.  Called during autoconfiguration and ubareset.
50417731Skarels  * The device ubamem routine returns 0 on success, 1 on success if it is fully
50517731Skarels  * configured (has no csr or interrupt, so doesn't need to be probed),
50617731Skarels  * and -1 on failure.
50717731Skarels  */
50817731Skarels ubameminit(uban)
50917731Skarels {
51017731Skarels 	register struct uba_device *ui;
51117731Skarels 	register struct uba_hd *uh = &uba_hd[uban];
51217731Skarels 	caddr_t umembase = umem[uban] + 0x3e000, addr;
51317731Skarels #define	ubaoff(off)	((int)(off) & 0x1fff)
51417731Skarels 
51517731Skarels 	uh->uh_lastmem = 0;
51617731Skarels 	for (ui = ubdinit; ui->ui_driver; ui++) {
51717731Skarels 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
51817731Skarels 			continue;
51917731Skarels 		if (ui->ui_driver->ud_ubamem) {
52017731Skarels 			/*
52117731Skarels 			 * During autoconfiguration, need to fudge ui_addr.
52217731Skarels 			 */
52317731Skarels 			addr = ui->ui_addr;
52417731Skarels 			ui->ui_addr = umembase + ubaoff(addr);
52517731Skarels 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
52617731Skarels 			case 1:
52717731Skarels 				ui->ui_alive = 1;
52817731Skarels 				/* FALLTHROUGH */
52917731Skarels 			case 0:
53017731Skarels 				ui->ui_ubanum = uban;
53117731Skarels 				break;
53217731Skarels 			}
53317731Skarels 			ui->ui_addr = addr;
53417731Skarels 		}
53517731Skarels 	}
53629737Skarels #ifdef DW780
53717731Skarels 	/*
53829737Skarels 	 * On a DW780, throw away any map registers disabled by rounding
53917731Skarels 	 * the map disable in the configuration register
54017731Skarels 	 * up to the next 8K boundary, or below the last unibus memory.
54117731Skarels 	 */
54229737Skarels 	if (uh->uh_type == DW780) {
54317731Skarels 		register i;
54417731Skarels 
54517731Skarels 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
54617731Skarels 		while (i)
54717731Skarels 			(void) rmget(uh->uh_map, 1, i--);
54817731Skarels 	}
54917731Skarels #endif
55017731Skarels }
55117731Skarels 
55217731Skarels /*
55314790Ssam  * Allocate UNIBUS memory.  Allocates and initializes
55414790Ssam  * sufficient mapping registers for access.  On a 780,
55514790Ssam  * the configuration register is setup to disable UBA
55614790Ssam  * response on DMA transfers to addresses controlled
55714790Ssam  * by the disabled mapping registers.
55829737Skarels  * On a DW780, should only be called from ubameminit, or in ascending order
55917731Skarels  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
56017731Skarels  * the last unibus memory would free unusable map registers.
56117731Skarels  * Doalloc is 1 to allocate, 0 to deallocate.
5626518Sfeldman  */
56314790Ssam ubamem(uban, addr, npg, doalloc)
56414790Ssam 	int uban, addr, npg, doalloc;
5656518Sfeldman {
5666518Sfeldman 	register struct uba_hd *uh = &uba_hd[uban];
56714790Ssam 	register int a;
56817731Skarels 	int s;
5696518Sfeldman 
57017731Skarels 	a = (addr >> 9) + 1;
57126371Skarels 	s = spluba();
57217731Skarels 	if (doalloc)
57317731Skarels 		a = rmget(uh->uh_map, npg, a);
57417731Skarels 	else
57517731Skarels 		rmfree(uh->uh_map, (long)npg, (long)a);
57617731Skarels 	splx(s);
5776518Sfeldman 	if (a) {
57814790Ssam 		register int i, *m;
57914790Ssam 
58029737Skarels 		m = (int *)&uh->uh_mr[a - 1];
58114790Ssam 		for (i = 0; i < npg; i++)
5826518Sfeldman 			*m++ = 0;	/* All off, especially 'valid' */
58317731Skarels 		i = addr + npg * 512;
58417731Skarels 		if (doalloc && i > uh->uh_lastmem)
58517731Skarels 			uh->uh_lastmem = i;
58617731Skarels 		else if (doalloc == 0 && i == uh->uh_lastmem)
58717731Skarels 			uh->uh_lastmem = addr;
58829737Skarels #ifdef DW780
58914790Ssam 		/*
59014790Ssam 		 * On a 780, set up the map register disable
59114790Ssam 		 * field in the configuration register.  Beware
59217731Skarels 		 * of callers that request memory ``out of order''
59317731Skarels 		 * or in sections other than 8K multiples.
59417731Skarels 		 * Ubameminit handles such requests properly, however.
59514790Ssam 		 */
59629737Skarels 		if (uh->uh_type == DW780) {
59717731Skarels 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
59817731Skarels 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
59917731Skarels 			uh->uh_uba->uba_cr = i;
6007473Sfeldman 		}
6017473Sfeldman #endif
6026518Sfeldman 	}
60314790Ssam 	return (a);
6046518Sfeldman }
6057304Ssam 
6069875Ssam #include "ik.h"
60724501Sjg #include "vs.h"
60824501Sjg #if NIK > 0 || NVS > 0
6097304Ssam /*
6107304Ssam  * Map a virtual address into users address space. Actually all we
6117304Ssam  * do is turn on the user mode write protection bits for the particular
6127304Ssam  * page of memory involved.
6137304Ssam  */
6147304Ssam maptouser(vaddress)
6157304Ssam 	caddr_t vaddress;
6167304Ssam {
6177304Ssam 
6187304Ssam 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
6197304Ssam }
6207304Ssam 
6217304Ssam unmaptouser(vaddress)
6227304Ssam 	caddr_t vaddress;
6237304Ssam {
6247304Ssam 
6257304Ssam 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
6267304Ssam }
6279174Ssam #endif
628