xref: /csrg-svn/sys/vax/uba/uba.c (revision 45808)
123352Smckusick /*
236784Sbostic  * Copyright (c) 1982, 1986 The Regents of the University of California.
336784Sbostic  * All rights reserved.
423352Smckusick  *
544553Sbostic  * %sccs.include.redist.c%
636784Sbostic  *
7*45808Sbostic  *	@(#)uba.c	7.10 (Berkeley) 12/16/90
823352Smckusick  */
940Sbill 
10*45808Sbostic #include "sys/param.h"
11*45808Sbostic #include "sys/systm.h"
12*45808Sbostic #include "sys/map.h"
13*45808Sbostic #include "sys/buf.h"
14*45808Sbostic #include "sys/vm.h"
15*45808Sbostic #include "sys/user.h"
16*45808Sbostic #include "sys/proc.h"
17*45808Sbostic #include "sys/conf.h"
18*45808Sbostic #include "sys/dkstat.h"
19*45808Sbostic #include "sys/kernel.h"
2040Sbill 
21*45808Sbostic #include "../include/pte.h"
22*45808Sbostic #include "../include/cpu.h"
23*45808Sbostic #include "../include/mtpr.h"
248481Sroot #include "../vax/nexus.h"
2517081Sbloom #include "ubareg.h"
2617081Sbloom #include "ubavar.h"
278481Sroot 
2829737Skarels #ifdef DW780
292929Swnj char	ubasr_bits[] = UBASR_BITS;
302929Swnj #endif
312929Swnj 
3226371Skarels #define	spluba	spl7		/* IPL 17 */
3326371Skarels 
3440Sbill /*
352570Swnj  * Do transfer on device argument.  The controller
362570Swnj  * and uba involved are implied by the device.
372570Swnj  * We queue for resource wait in the uba code if necessary.
382570Swnj  * We return 1 if the transfer was started, 0 if it was not.
3932521Sbostic  *
4032521Sbostic  * The onq argument must be zero iff the device is not on the
4132521Sbostic  * queue for this UBA.  If onq is set, the device must be at the
4232521Sbostic  * head of the queue.  In any case, if the transfer is started,
4332521Sbostic  * the device will be off the queue, and if not, it will be on.
4432521Sbostic  *
4532521Sbostic  * Drivers that allocate one BDP and hold it for some time should
4632521Sbostic  * set ud_keepbdp.  In this case um_bdp tells which BDP is allocated
4732521Sbostic  * to the controller, unless it is zero, indicating that the controller
4832521Sbostic  * does not now have a BDP.
492570Swnj  */
ubaqueue(ui,onq)5032521Sbostic ubaqueue(ui, onq)
512958Swnj 	register struct uba_device *ui;
5232521Sbostic 	int onq;
532570Swnj {
542958Swnj 	register struct uba_ctlr *um = ui->ui_mi;
552570Swnj 	register struct uba_hd *uh;
5632521Sbostic 	register struct uba_driver *ud;
572570Swnj 	register int s, unit;
582570Swnj 
592570Swnj 	uh = &uba_hd[um->um_ubanum];
6032521Sbostic 	ud = um->um_driver;
6126371Skarels 	s = spluba();
6232521Sbostic 	/*
6332521Sbostic 	 * Honor exclusive BDP use requests.
6432521Sbostic 	 */
6532521Sbostic 	if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
662616Swnj 		goto rwait;
6732521Sbostic 	if (ud->ud_keepbdp) {
6832521Sbostic 		/*
6932521Sbostic 		 * First get just a BDP (though in fact it comes with
7032521Sbostic 		 * one map register too).
7132521Sbostic 		 */
7232521Sbostic 		if (um->um_bdp == 0) {
7332521Sbostic 			um->um_bdp = uballoc(um->um_ubanum,
7432521Sbostic 				(caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT);
7532521Sbostic 			if (um->um_bdp == 0)
7632521Sbostic 				goto rwait;
7732521Sbostic 		}
7832521Sbostic 		/* now share it with this transfer */
7932521Sbostic 		um->um_ubinfo = ubasetup(um->um_ubanum,
8032521Sbostic 			um->um_tab.b_actf->b_actf,
8132521Sbostic 			um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT);
8232521Sbostic 	} else
8332521Sbostic 		um->um_ubinfo = ubasetup(um->um_ubanum,
8432521Sbostic 			um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT);
852616Swnj 	if (um->um_ubinfo == 0)
862616Swnj 		goto rwait;
872616Swnj 	uh->uh_users++;
8832521Sbostic 	if (ud->ud_xclu)
892616Swnj 		uh->uh_xclu = 1;
902570Swnj 	splx(s);
912570Swnj 	if (ui->ui_dk >= 0) {
922570Swnj 		unit = ui->ui_dk;
932570Swnj 		dk_busy |= 1<<unit;
946348Swnj 		dk_xfer[unit]++;
956348Swnj 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
962570Swnj 	}
9732521Sbostic 	if (onq)
982570Swnj 		uh->uh_actf = ui->ui_forw;
9932521Sbostic 	(*ud->ud_dgo)(um);
1002570Swnj 	return (1);
1012616Swnj rwait:
10232521Sbostic 	if (!onq) {
1032616Swnj 		ui->ui_forw = NULL;
1042616Swnj 		if (uh->uh_actf == NULL)
1052616Swnj 			uh->uh_actf = ui;
1062616Swnj 		else
1072616Swnj 			uh->uh_actl->ui_forw = ui;
1082616Swnj 		uh->uh_actl = ui;
1092616Swnj 	}
1102616Swnj 	splx(s);
1112616Swnj 	return (0);
1122570Swnj }
1132570Swnj 
ubadone(um)1142616Swnj ubadone(um)
1152958Swnj 	register struct uba_ctlr *um;
1162616Swnj {
1172616Swnj 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
1182616Swnj 
1192628Swnj 	if (um->um_driver->ud_xclu)
1202616Swnj 		uh->uh_xclu = 0;
1212616Swnj 	uh->uh_users--;
12232521Sbostic 	if (um->um_driver->ud_keepbdp)
12332521Sbostic 		um->um_ubinfo &= ~BDPMASK;	/* keep BDP for misers */
1242616Swnj 	ubarelse(um->um_ubanum, &um->um_ubinfo);
1252616Swnj }
1262616Swnj 
1272570Swnj /*
1282395Swnj  * Allocate and setup UBA map registers, and bdp's
1292395Swnj  * Flags says whether bdp is needed, whether the caller can't
1302395Swnj  * wait (e.g. if the caller is at interrupt level).
13136034Skarels  * Return value encodes map register plus page offset,
13236034Skarels  * bdp number and number of map registers.
13340Sbill  */
ubasetup(uban,bp,flags)1342395Swnj ubasetup(uban, bp, flags)
13534284Skarels 	int uban;
13634284Skarels 	register struct buf *bp;
13734284Skarels 	register int flags;
13840Sbill {
1392395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
14034284Skarels 	register struct pte *pte, *io;
14134284Skarels 	register int npf;
14218417Smckusick 	int pfnum, temp;
14334284Skarels 	int reg, bdp;
14440Sbill 	unsigned v;
14540Sbill 	struct proc *rp;
14640Sbill 	int a, o, ubinfo;
14740Sbill 
14829737Skarels #ifdef DW730
14929737Skarels 	if (uh->uh_type == DW730)
1503332Swnj 		flags &= ~UBA_NEEDBDP;
1513332Swnj #endif
15229737Skarels #ifdef QBA
15329737Skarels 	if (uh->uh_type == QBA)
15429737Skarels 		flags &= ~UBA_NEEDBDP;
15529737Skarels #endif
15640Sbill 	o = (int)bp->b_un.b_addr & PGOFSET;
15740Sbill 	npf = btoc(bp->b_bcount + o) + 1;
15836034Skarels 	if (npf > UBA_MAXNMR)
15936034Skarels 		panic("uba xfer too big");
16026371Skarels 	a = spluba();
1618811Sroot 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
1623913Swnj 		if (flags & UBA_CANTWAIT) {
1633913Swnj 			splx(a);
1642395Swnj 			return (0);
1653913Swnj 		}
1662395Swnj 		uh->uh_mrwant++;
1679353Ssam 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
16840Sbill 	}
16917731Skarels 	if ((flags & UBA_NEED16) && reg + npf > 128) {
17017731Skarels 		/*
17117731Skarels 		 * Could hang around and try again (if we can ever succeed).
17217731Skarels 		 * Won't help any current device...
17317731Skarels 		 */
17417731Skarels 		rmfree(uh->uh_map, (long)npf, (long)reg);
17517731Skarels 		splx(a);
17617731Skarels 		return (0);
17717731Skarels 	}
17840Sbill 	bdp = 0;
1792395Swnj 	if (flags & UBA_NEEDBDP) {
18026371Skarels 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
1812395Swnj 			if (flags & UBA_CANTWAIT) {
1828811Sroot 				rmfree(uh->uh_map, (long)npf, (long)reg);
1833913Swnj 				splx(a);
1842395Swnj 				return (0);
1852395Swnj 			}
1862395Swnj 			uh->uh_bdpwant++;
1879353Ssam 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
18840Sbill 		}
1892463Swnj 		uh->uh_bdpfree &= ~(1 << (bdp-1));
1904758Swnj 	} else if (flags & UBA_HAVEBDP)
1914758Swnj 		bdp = (flags >> 28) & 0xf;
19240Sbill 	splx(a);
1932463Swnj 	reg--;
19436034Skarels 	ubinfo = UBAI_INFO(o, reg, npf, bdp);
1952958Swnj 	temp = (bdp << 21) | UBAMR_MRV;
19640Sbill 	if (bdp && (o & 01))
1972958Swnj 		temp |= UBAMR_BO;
1986382Swnj 	if ((bp->b_flags & B_PHYS) == 0)
19934284Skarels 		pte = kvtopte(bp->b_un.b_addr);
2006382Swnj 	else if (bp->b_flags & B_PAGET)
2016382Swnj 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
20234284Skarels 	else {
20334284Skarels 		rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
20434284Skarels 		v = btop(bp->b_un.b_addr);
20534284Skarels 		if (bp->b_flags & B_UAREA)
20634284Skarels 			pte = &rp->p_addr[v];
20734284Skarels 		else
20834284Skarels 			pte = vtopte(rp, v);
20934284Skarels 	}
21029737Skarels 	io = &uh->uh_mr[reg];
21134284Skarels 	while (--npf > 0) {
21218417Smckusick 		pfnum = pte->pg_pfnum;
21318417Smckusick 		if (pfnum == 0)
2146382Swnj 			panic("uba zero uentry");
21518417Smckusick 		pte++;
21618417Smckusick 		*(int *)io++ = pfnum | temp;
21740Sbill 	}
21834284Skarels 	*(int *)io = 0;
21940Sbill 	return (ubinfo);
22040Sbill }
22140Sbill 
22240Sbill /*
2232570Swnj  * Non buffer setup interface... set up a buffer and call ubasetup.
22440Sbill  */
uballoc(uban,addr,bcnt,flags)2252395Swnj uballoc(uban, addr, bcnt, flags)
2263107Swnj 	int uban;
22740Sbill 	caddr_t addr;
2283107Swnj 	int bcnt, flags;
22940Sbill {
230883Sbill 	struct buf ubabuf;
23140Sbill 
23240Sbill 	ubabuf.b_un.b_addr = addr;
23340Sbill 	ubabuf.b_flags = B_BUSY;
23440Sbill 	ubabuf.b_bcount = bcnt;
235883Sbill 	/* that's all the fields ubasetup() needs */
2362395Swnj 	return (ubasetup(uban, &ubabuf, flags));
23740Sbill }
23840Sbill 
2392053Swnj /*
2402570Swnj  * Release resources on uba uban, and then unblock resource waiters.
2412570Swnj  * The map register parameter is by value since we need to block
2422570Swnj  * against uba resets on 11/780's.
2432053Swnj  */
ubarelse(uban,amr)2442395Swnj ubarelse(uban, amr)
2452053Swnj 	int *amr;
24640Sbill {
2472395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
2482570Swnj 	register int bdp, reg, npf, s;
2492053Swnj 	int mr;
25040Sbill 
2512570Swnj 	/*
2522570Swnj 	 * Carefully see if we should release the space, since
2532570Swnj 	 * it may be released asynchronously at uba reset time.
2542570Swnj 	 */
25526371Skarels 	s = spluba();
2562053Swnj 	mr = *amr;
2572053Swnj 	if (mr == 0) {
2582570Swnj 		/*
2592570Swnj 		 * A ubareset() occurred before we got around
2602570Swnj 		 * to releasing the space... no need to bother.
2612570Swnj 		 */
2622570Swnj 		splx(s);
2632053Swnj 		return;
2642053Swnj 	}
2652067Swnj 	*amr = 0;
26636034Skarels 	bdp = UBAI_BDP(mr);
26740Sbill 	if (bdp) {
26829737Skarels 		switch (uh->uh_type) {
26934284Skarels #ifdef DWBUA
27034284Skarels 		case DWBUA:
27134284Skarels 			BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
27234284Skarels 			break;
27334284Skarels #endif
27429737Skarels #ifdef DW780
27529737Skarels 		case DW780:
2762958Swnj 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2772423Skre 			break;
2782423Skre #endif
27929737Skarels #ifdef DW750
28029737Skarels 		case DW750:
2812958Swnj 			uh->uh_uba->uba_dpr[bdp] |=
2822958Swnj 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2832423Skre 			break;
2842423Skre #endif
28529737Skarels 		default:
28629737Skarels 			break;
2872423Skre 		}
2882570Swnj 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
2892395Swnj 		if (uh->uh_bdpwant) {
2902395Swnj 			uh->uh_bdpwant = 0;
2919353Ssam 			wakeup((caddr_t)&uh->uh_bdpwant);
29240Sbill 		}
29340Sbill 	}
2942570Swnj 	/*
2952570Swnj 	 * Put back the registers in the resource map.
29617731Skarels 	 * The map code must not be reentered,
29717731Skarels 	 * nor can the registers be freed twice.
29817731Skarels 	 * Unblock interrupts once this is done.
2992570Swnj 	 */
30036034Skarels 	npf = UBAI_NMR(mr);
30136034Skarels 	reg = UBAI_MR(mr) + 1;
3028811Sroot 	rmfree(uh->uh_map, (long)npf, (long)reg);
3032570Swnj 	splx(s);
3042570Swnj 
3052570Swnj 	/*
3062570Swnj 	 * Wakeup sleepers for map registers,
3072570Swnj 	 * and also, if there are processes blocked in dgo(),
3082570Swnj 	 * give them a chance at the UNIBUS.
3092570Swnj 	 */
3102395Swnj 	if (uh->uh_mrwant) {
3112395Swnj 		uh->uh_mrwant = 0;
3129353Ssam 		wakeup((caddr_t)&uh->uh_mrwant);
31340Sbill 	}
31432521Sbostic 	while (uh->uh_actf && ubaqueue(uh->uh_actf, 1))
3152570Swnj 		;
31640Sbill }
31740Sbill 
ubapurge(um)3182729Swnj ubapurge(um)
3192958Swnj 	register struct uba_ctlr *um;
3202729Swnj {
3212729Swnj 	register struct uba_hd *uh = um->um_hd;
32236034Skarels 	register int bdp = UBAI_BDP(um->um_ubinfo);
3232729Swnj 
32429737Skarels 	switch (uh->uh_type) {
32534284Skarels #ifdef DWBUA
32634284Skarels 	case DWBUA:
32734284Skarels 		BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
32834284Skarels 		break;
32934284Skarels #endif
33029737Skarels #ifdef DW780
33129737Skarels 	case DW780:
3322958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
3332729Swnj 		break;
3342729Swnj #endif
33529737Skarels #ifdef DW750
33629737Skarels 	case DW750:
3372958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
3382729Swnj 		break;
3392729Swnj #endif
34029737Skarels 	default:
34129737Skarels 		break;
3422729Swnj 	}
3432729Swnj }
3442729Swnj 
ubainitmaps(uhp)3456863Swnj ubainitmaps(uhp)
3466863Swnj 	register struct uba_hd *uhp;
3476863Swnj {
3486863Swnj 
34936034Skarels 	if (uhp->uh_memsize > UBA_MAXMR)
35036034Skarels 		uhp->uh_memsize = UBA_MAXMR;
35129737Skarels 	rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
35229737Skarels 	switch (uhp->uh_type) {
35334284Skarels #ifdef DWBUA
35434284Skarels 	case DWBUA:
35534284Skarels 		uhp->uh_bdpfree = (1<<NBDPBUA) - 1;
35634284Skarels 		break;
35734284Skarels #endif
35829737Skarels #ifdef DW780
35929737Skarels 	case DW780:
3606863Swnj 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
3616863Swnj 		break;
3626863Swnj #endif
36329737Skarels #ifdef DW750
36429737Skarels 	case DW750:
3656863Swnj 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
3666863Swnj 		break;
3676863Swnj #endif
36829737Skarels 	default:
3696863Swnj 		break;
3706863Swnj 	}
3716863Swnj }
3726863Swnj 
3732570Swnj /*
3742570Swnj  * Generate a reset on uba number uban.  Then
3752570Swnj  * call each device in the character device table,
3762570Swnj  * giving it a chance to clean up so as to be able to continue.
3772570Swnj  */
ubareset(uban)3782395Swnj ubareset(uban)
3792570Swnj 	int uban;
380284Sbill {
381284Sbill 	register struct cdevsw *cdp;
3822646Swnj 	register struct uba_hd *uh = &uba_hd[uban];
3831781Sbill 	int s;
384284Sbill 
38526371Skarels 	s = spluba();
3862646Swnj 	uh->uh_users = 0;
3872646Swnj 	uh->uh_zvcnt = 0;
3882646Swnj 	uh->uh_xclu = 0;
3892646Swnj 	uh->uh_actf = uh->uh_actl = 0;
3902646Swnj 	uh->uh_bdpwant = 0;
3912646Swnj 	uh->uh_mrwant = 0;
3926863Swnj 	ubainitmaps(uh);
3932646Swnj 	wakeup((caddr_t)&uh->uh_bdpwant);
3942646Swnj 	wakeup((caddr_t)&uh->uh_mrwant);
3952958Swnj 	printf("uba%d: reset", uban);
3962958Swnj 	ubainit(uh->uh_uba);
39717731Skarels 	ubameminit(uban);
39811722Ssam 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
3992395Swnj 		(*cdp->d_reset)(uban);
4005221Swnj 	ifubareset(uban);
401284Sbill 	printf("\n");
402302Sbill 	splx(s);
403284Sbill }
4042395Swnj 
4052570Swnj /*
4062570Swnj  * Init a uba.  This is called with a pointer
4072570Swnj  * rather than a virtual address since it is called
4082570Swnj  * by code which runs with memory mapping disabled.
4092570Swnj  * In these cases we really don't need the interrupts
4102570Swnj  * enabled, but since we run with ipl high, we don't care
4112570Swnj  * if they are, they will never happen anyways.
41229737Skarels  * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
4132570Swnj  */
ubainit(uba)4142423Skre ubainit(uba)
4152423Skre 	register struct uba_regs *uba;
4162395Swnj {
41729737Skarels 	register struct uba_hd *uhp;
41834284Skarels #ifdef QBA
41929737Skarels 	int isphys = 0;
42034284Skarels #endif
4212395Swnj 
42229737Skarels 	for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
42329737Skarels 		if (uhp->uh_uba == uba)
42429737Skarels 			break;
42529737Skarels 		if (uhp->uh_physuba == uba) {
42634284Skarels #ifdef QBA
42729737Skarels 			isphys++;
42834284Skarels #endif
42929737Skarels 			break;
43029737Skarels 		}
43129737Skarels 	}
43229737Skarels 	if (uhp >= uba_hd + numuba) {
43329737Skarels 		printf("init unknown uba\n");
43429737Skarels 		return;
43529737Skarels 	}
43629737Skarels 
43729737Skarels 	switch (uhp->uh_type) {
43834284Skarels #ifdef DWBUA
43934284Skarels 	case DWBUA:
44034284Skarels 		BUA(uba)->bua_csr |= BUACSR_UPI;
44134284Skarels 		/* give devices time to recover from power fail */
44234284Skarels 		DELAY(500000);
44334284Skarels 		break;
44434284Skarels #endif
44529737Skarels #ifdef DW780
44629737Skarels 	case DW780:
4472958Swnj 		uba->uba_cr = UBACR_ADINIT;
4482958Swnj 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
4492958Swnj 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
4502958Swnj 			;
4512958Swnj 		break;
4522958Swnj #endif
45329737Skarels #ifdef DW750
45429737Skarels 	case DW750:
4553352Swnj #endif
45629737Skarels #ifdef DW730
45729737Skarels 	case DW730:
4583352Swnj #endif
45929737Skarels #ifdef QBA
46029737Skarels 	case QBA:
46127255Skridle #endif
46229737Skarels #if DW750 || DW730 || QBA
4633352Swnj 		mtpr(IUR, 0);
4642958Swnj 		/* give devices time to recover from power fail */
4653332Swnj /* THIS IS PROBABLY UNNECESSARY */
4663352Swnj 		DELAY(500000);
4673332Swnj /* END PROBABLY UNNECESSARY */
46829737Skarels #ifdef QBA
46929737Skarels 		/*
47029737Skarels 		 * Re-enable local memory access
47129737Skarels 		 * from the Q-bus.
47229737Skarels 		 */
47329737Skarels 		if (uhp->uh_type == QBA) {
47429737Skarels 			if (isphys)
47529737Skarels 				*((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
47629737Skarels 			else
47729737Skarels 				*(uhp->uh_iopage + QIPCR) = Q_LMEAE;
47829737Skarels 		}
47929737Skarels #endif QBA
4802958Swnj 		break;
48129737Skarels #endif DW750 || DW730 || QBA
4822958Swnj 	}
4832395Swnj }
4842395Swnj 
48536034Skarels #ifdef QBA
48636034Skarels /*
48736034Skarels  * Determine the interrupt priority of a Q-bus
48836034Skarels  * peripheral.  The device probe routine must spl6(),
48936034Skarels  * attempt to make the device request an interrupt,
49036034Skarels  * delaying as necessary, then call this routine
49136034Skarels  * before resetting the device.
49236034Skarels  */
qbgetpri()49336034Skarels qbgetpri()
49436034Skarels {
49536034Skarels 	int pri;
49636034Skarels 	extern int cvec;
49736034Skarels 
49836034Skarels 	for (pri = 0x17; pri > 0x14; ) {
49936034Skarels 		if (cvec && cvec != 0x200)	/* interrupted at pri */
50036034Skarels 			break;
50136034Skarels 		pri--;
50236034Skarels 		splx(pri - 1);
50336034Skarels 	}
50436034Skarels 	(void) spl0();
50536034Skarels 	return (pri);
50636034Skarels }
50736034Skarels #endif
50836034Skarels 
50929737Skarels #ifdef DW780
5104024Swnj int	ubawedgecnt = 10;
5114024Swnj int	ubacrazy = 500;
51217731Skarels int	zvcnt_max = 5000;	/* in 8 sec */
5132570Swnj /*
51424500Sbloom  * This routine is called by the locore code to process a UBA
51524500Sbloom  * error on an 11/780 or 8600.  The arguments are passed
5162570Swnj  * on the stack, and value-result (through some trickery).
5172570Swnj  * In particular, the uvec argument is used for further
5182570Swnj  * uba processing so the result aspect of it is very important.
5192570Swnj  * It must not be declared register.
5202570Swnj  */
5212423Skre /*ARGSUSED*/
ubaerror(uban,uh,ipl,uvec,uba)52217731Skarels ubaerror(uban, uh, ipl, uvec, uba)
5232395Swnj 	register int uban;
5242395Swnj 	register struct uba_hd *uh;
52517731Skarels 	int ipl, uvec;
5262395Swnj 	register struct uba_regs *uba;
5272395Swnj {
5282395Swnj 	register sr, s;
5292395Swnj 
5302395Swnj 	if (uvec == 0) {
53126215Skarels 		/*
53226215Skarels 		 * Declare dt as unsigned so that negative values
53326215Skarels 		 * are handled as >8 below, in case time was set back.
53426215Skarels 		 */
53526215Skarels 		u_long	dt = time.tv_sec - uh->uh_zvtime;
53626215Skarels 
53726215Skarels 		uh->uh_zvtotal++;
53817731Skarels 		if (dt > 8) {
53926215Skarels 			uh->uh_zvtime = time.tv_sec;
54017731Skarels 			uh->uh_zvcnt = 0;
54117731Skarels 		}
54217731Skarels 		if (++uh->uh_zvcnt > zvcnt_max) {
54317731Skarels 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
54417731Skarels 				uban, uh->uh_zvcnt, dt + 1);
54517731Skarels 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
54617731Skarels 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
54717731Skarels 				uba->uba_cnfgr&0xff);
54817731Skarels 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
54917731Skarels 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
55017731Skarels 				(uba->uba_dcr&0x8000000)?"":"NOT ");
5512395Swnj 			ubareset(uban);
5522395Swnj 		}
5532395Swnj 		return;
5542395Swnj 	}
5552395Swnj 	if (uba->uba_cnfgr & NEX_CFGFLT) {
5562929Swnj 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
5572929Swnj 		    uban, uba->uba_sr, ubasr_bits,
5583248Swnj 		    uba->uba_cnfgr, NEXFLT_BITS);
5592395Swnj 		ubareset(uban);
5602395Swnj 		uvec = 0;
5612395Swnj 		return;
5622395Swnj 	}
5632395Swnj 	sr = uba->uba_sr;
56426371Skarels 	s = spluba();
5653473Swnj 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
5663473Swnj 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
5672395Swnj 	splx(s);
5682395Swnj 	uba->uba_sr = sr;
5692958Swnj 	uvec &= UBABRRVR_DIV;
5704024Swnj 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
5714024Swnj 		if (uh->uh_errcnt > ubacrazy)
5724024Swnj 			panic("uba crazy");
5734024Swnj 		printf("ERROR LIMIT ");
5744024Swnj 		ubareset(uban);
5754024Swnj 		uvec = 0;
5764024Swnj 		return;
5774024Swnj 	}
5782395Swnj 	return;
5792395Swnj }
5802395Swnj #endif
5813745Sroot 
5823745Sroot /*
58317731Skarels  * Look for devices with unibus memory, allow them to configure, then disable
58417731Skarels  * map registers as necessary.  Called during autoconfiguration and ubareset.
58517731Skarels  * The device ubamem routine returns 0 on success, 1 on success if it is fully
58617731Skarels  * configured (has no csr or interrupt, so doesn't need to be probed),
58717731Skarels  * and -1 on failure.
58817731Skarels  */
ubameminit(uban)58917731Skarels ubameminit(uban)
59017731Skarels {
59117731Skarels 	register struct uba_device *ui;
59217731Skarels 	register struct uba_hd *uh = &uba_hd[uban];
59317731Skarels 	caddr_t umembase = umem[uban] + 0x3e000, addr;
59417731Skarels #define	ubaoff(off)	((int)(off) & 0x1fff)
59517731Skarels 
59617731Skarels 	uh->uh_lastmem = 0;
59717731Skarels 	for (ui = ubdinit; ui->ui_driver; ui++) {
59817731Skarels 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
59917731Skarels 			continue;
60017731Skarels 		if (ui->ui_driver->ud_ubamem) {
60117731Skarels 			/*
60217731Skarels 			 * During autoconfiguration, need to fudge ui_addr.
60317731Skarels 			 */
60417731Skarels 			addr = ui->ui_addr;
60517731Skarels 			ui->ui_addr = umembase + ubaoff(addr);
60617731Skarels 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
60717731Skarels 			case 1:
60817731Skarels 				ui->ui_alive = 1;
60917731Skarels 				/* FALLTHROUGH */
61017731Skarels 			case 0:
61117731Skarels 				ui->ui_ubanum = uban;
61217731Skarels 				break;
61317731Skarels 			}
61417731Skarels 			ui->ui_addr = addr;
61517731Skarels 		}
61617731Skarels 	}
61729737Skarels #ifdef DW780
61817731Skarels 	/*
61929737Skarels 	 * On a DW780, throw away any map registers disabled by rounding
62017731Skarels 	 * the map disable in the configuration register
62117731Skarels 	 * up to the next 8K boundary, or below the last unibus memory.
62217731Skarels 	 */
62329737Skarels 	if (uh->uh_type == DW780) {
62417731Skarels 		register i;
62517731Skarels 
62617731Skarels 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
62717731Skarels 		while (i)
62817731Skarels 			(void) rmget(uh->uh_map, 1, i--);
62917731Skarels 	}
63017731Skarels #endif
63117731Skarels }
63217731Skarels 
63317731Skarels /*
63414790Ssam  * Allocate UNIBUS memory.  Allocates and initializes
63514790Ssam  * sufficient mapping registers for access.  On a 780,
63614790Ssam  * the configuration register is setup to disable UBA
63714790Ssam  * response on DMA transfers to addresses controlled
63814790Ssam  * by the disabled mapping registers.
63929737Skarels  * On a DW780, should only be called from ubameminit, or in ascending order
64017731Skarels  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
64117731Skarels  * the last unibus memory would free unusable map registers.
64217731Skarels  * Doalloc is 1 to allocate, 0 to deallocate.
6436518Sfeldman  */
ubamem(uban,addr,npg,doalloc)64414790Ssam ubamem(uban, addr, npg, doalloc)
64514790Ssam 	int uban, addr, npg, doalloc;
6466518Sfeldman {
6476518Sfeldman 	register struct uba_hd *uh = &uba_hd[uban];
64814790Ssam 	register int a;
64917731Skarels 	int s;
6506518Sfeldman 
65117731Skarels 	a = (addr >> 9) + 1;
65226371Skarels 	s = spluba();
65317731Skarels 	if (doalloc)
65417731Skarels 		a = rmget(uh->uh_map, npg, a);
65517731Skarels 	else
65617731Skarels 		rmfree(uh->uh_map, (long)npg, (long)a);
65717731Skarels 	splx(s);
6586518Sfeldman 	if (a) {
65914790Ssam 		register int i, *m;
66014790Ssam 
66129737Skarels 		m = (int *)&uh->uh_mr[a - 1];
66214790Ssam 		for (i = 0; i < npg; i++)
6636518Sfeldman 			*m++ = 0;	/* All off, especially 'valid' */
66417731Skarels 		i = addr + npg * 512;
66517731Skarels 		if (doalloc && i > uh->uh_lastmem)
66617731Skarels 			uh->uh_lastmem = i;
66717731Skarels 		else if (doalloc == 0 && i == uh->uh_lastmem)
66817731Skarels 			uh->uh_lastmem = addr;
66929737Skarels #ifdef DW780
67014790Ssam 		/*
67114790Ssam 		 * On a 780, set up the map register disable
67214790Ssam 		 * field in the configuration register.  Beware
67317731Skarels 		 * of callers that request memory ``out of order''
67417731Skarels 		 * or in sections other than 8K multiples.
67517731Skarels 		 * Ubameminit handles such requests properly, however.
67614790Ssam 		 */
67729737Skarels 		if (uh->uh_type == DW780) {
67817731Skarels 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
67917731Skarels 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
68017731Skarels 			uh->uh_uba->uba_cr = i;
6817473Sfeldman 		}
6827473Sfeldman #endif
6836518Sfeldman 	}
68414790Ssam 	return (a);
6856518Sfeldman }
6867304Ssam 
6879875Ssam #include "ik.h"
68824501Sjg #include "vs.h"
68924501Sjg #if NIK > 0 || NVS > 0
6907304Ssam /*
6917304Ssam  * Map a virtual address into users address space. Actually all we
6927304Ssam  * do is turn on the user mode write protection bits for the particular
6937304Ssam  * page of memory involved.
6947304Ssam  */
maptouser(vaddress)6957304Ssam maptouser(vaddress)
6967304Ssam 	caddr_t vaddress;
6977304Ssam {
6987304Ssam 
69934284Skarels 	kvtopte(vaddress)->pg_prot = (PG_UW >> 27);
7007304Ssam }
7017304Ssam 
unmaptouser(vaddress)7027304Ssam unmaptouser(vaddress)
7037304Ssam 	caddr_t vaddress;
7047304Ssam {
7057304Ssam 
70634284Skarels 	kvtopte(vaddress)->pg_prot = (PG_KW >> 27);
7077304Ssam }
7089174Ssam #endif
709