xref: /csrg-svn/sys/vax/uba/uba.c (revision 34284)
123352Smckusick /*
229248Smckusick  * Copyright (c) 1982, 1986 Regents of the University of California.
323352Smckusick  * All rights reserved.  The Berkeley software License Agreement
423352Smckusick  * specifies the terms and conditions for redistribution.
523352Smckusick  *
6*34284Skarels  *	@(#)uba.c	7.5 (Berkeley) 05/14/88
723352Smckusick  */
840Sbill 
99780Ssam #include "../machine/pte.h"
109780Ssam 
1117081Sbloom #include "param.h"
1217081Sbloom #include "systm.h"
1317081Sbloom #include "map.h"
1417081Sbloom #include "buf.h"
1517081Sbloom #include "vm.h"
1617081Sbloom #include "dir.h"
1717081Sbloom #include "user.h"
1817081Sbloom #include "proc.h"
1917081Sbloom #include "conf.h"
2030389Skarels #include "dkstat.h"
2117081Sbloom #include "kernel.h"
2240Sbill 
238481Sroot #include "../vax/cpu.h"
248481Sroot #include "../vax/mtpr.h"
258481Sroot #include "../vax/nexus.h"
2617081Sbloom #include "ubareg.h"
2717081Sbloom #include "ubavar.h"
288481Sroot 
2929737Skarels #ifdef DW780
302929Swnj char	ubasr_bits[] = UBASR_BITS;
312929Swnj #endif
322929Swnj 
3326371Skarels #define	spluba	spl7		/* IPL 17 */
3426371Skarels 
3532521Sbostic #define	BDPMASK	0xf0000000	/* see ubavar.h */
3632521Sbostic 
3740Sbill /*
382570Swnj  * Do transfer on device argument.  The controller
392570Swnj  * and uba involved are implied by the device.
402570Swnj  * We queue for resource wait in the uba code if necessary.
412570Swnj  * We return 1 if the transfer was started, 0 if it was not.
4232521Sbostic  *
4332521Sbostic  * The onq argument must be zero iff the device is not on the
4432521Sbostic  * queue for this UBA.  If onq is set, the device must be at the
4532521Sbostic  * head of the queue.  In any case, if the transfer is started,
4632521Sbostic  * the device will be off the queue, and if not, it will be on.
4732521Sbostic  *
4832521Sbostic  * Drivers that allocate one BDP and hold it for some time should
4932521Sbostic  * set ud_keepbdp.  In this case um_bdp tells which BDP is allocated
5032521Sbostic  * to the controller, unless it is zero, indicating that the controller
5132521Sbostic  * does not now have a BDP.
522570Swnj  */
5332521Sbostic ubaqueue(ui, onq)
542958Swnj 	register struct uba_device *ui;
5532521Sbostic 	int onq;
562570Swnj {
572958Swnj 	register struct uba_ctlr *um = ui->ui_mi;
582570Swnj 	register struct uba_hd *uh;
5932521Sbostic 	register struct uba_driver *ud;
602570Swnj 	register int s, unit;
612570Swnj 
622570Swnj 	uh = &uba_hd[um->um_ubanum];
6332521Sbostic 	ud = um->um_driver;
6426371Skarels 	s = spluba();
6532521Sbostic 	/*
6632521Sbostic 	 * Honor exclusive BDP use requests.
6732521Sbostic 	 */
6832521Sbostic 	if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
692616Swnj 		goto rwait;
7032521Sbostic 	if (ud->ud_keepbdp) {
7132521Sbostic 		/*
7232521Sbostic 		 * First get just a BDP (though in fact it comes with
7332521Sbostic 		 * one map register too).
7432521Sbostic 		 */
7532521Sbostic 		if (um->um_bdp == 0) {
7632521Sbostic 			um->um_bdp = uballoc(um->um_ubanum,
7732521Sbostic 				(caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT);
7832521Sbostic 			if (um->um_bdp == 0)
7932521Sbostic 				goto rwait;
8032521Sbostic 		}
8132521Sbostic 		/* now share it with this transfer */
8232521Sbostic 		um->um_ubinfo = ubasetup(um->um_ubanum,
8332521Sbostic 			um->um_tab.b_actf->b_actf,
8432521Sbostic 			um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT);
8532521Sbostic 	} else
8632521Sbostic 		um->um_ubinfo = ubasetup(um->um_ubanum,
8732521Sbostic 			um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT);
882616Swnj 	if (um->um_ubinfo == 0)
892616Swnj 		goto rwait;
902616Swnj 	uh->uh_users++;
9132521Sbostic 	if (ud->ud_xclu)
922616Swnj 		uh->uh_xclu = 1;
932570Swnj 	splx(s);
942570Swnj 	if (ui->ui_dk >= 0) {
952570Swnj 		unit = ui->ui_dk;
962570Swnj 		dk_busy |= 1<<unit;
976348Swnj 		dk_xfer[unit]++;
986348Swnj 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
992570Swnj 	}
10032521Sbostic 	if (onq)
1012570Swnj 		uh->uh_actf = ui->ui_forw;
10232521Sbostic 	(*ud->ud_dgo)(um);
1032570Swnj 	return (1);
1042616Swnj rwait:
10532521Sbostic 	if (!onq) {
1062616Swnj 		ui->ui_forw = NULL;
1072616Swnj 		if (uh->uh_actf == NULL)
1082616Swnj 			uh->uh_actf = ui;
1092616Swnj 		else
1102616Swnj 			uh->uh_actl->ui_forw = ui;
1112616Swnj 		uh->uh_actl = ui;
1122616Swnj 	}
1132616Swnj 	splx(s);
1142616Swnj 	return (0);
1152570Swnj }
1162570Swnj 
1172616Swnj ubadone(um)
1182958Swnj 	register struct uba_ctlr *um;
1192616Swnj {
1202616Swnj 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
1212616Swnj 
1222628Swnj 	if (um->um_driver->ud_xclu)
1232616Swnj 		uh->uh_xclu = 0;
1242616Swnj 	uh->uh_users--;
12532521Sbostic 	if (um->um_driver->ud_keepbdp)
12632521Sbostic 		um->um_ubinfo &= ~BDPMASK;	/* keep BDP for misers */
1272616Swnj 	ubarelse(um->um_ubanum, &um->um_ubinfo);
1282616Swnj }
1292616Swnj 
1302570Swnj /*
1312395Swnj  * Allocate and setup UBA map registers, and bdp's
1322395Swnj  * Flags says whether bdp is needed, whether the caller can't
1332395Swnj  * wait (e.g. if the caller is at interrupt level).
13440Sbill  *
1352570Swnj  * Return value:
13640Sbill  *	Bits 0-8	Byte offset
13740Sbill  *	Bits 9-17	Start map reg. no.
13840Sbill  *	Bits 18-27	No. mapping reg's
13940Sbill  *	Bits 28-31	BDP no.
14040Sbill  */
1412395Swnj ubasetup(uban, bp, flags)
142*34284Skarels 	int uban;
143*34284Skarels 	register struct buf *bp;
144*34284Skarels 	register int flags;
14540Sbill {
1462395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
147*34284Skarels 	register struct pte *pte, *io;
148*34284Skarels 	register int npf;
14918417Smckusick 	int pfnum, temp;
150*34284Skarels 	int reg, bdp;
15140Sbill 	unsigned v;
15240Sbill 	struct proc *rp;
15340Sbill 	int a, o, ubinfo;
15440Sbill 
15529737Skarels #ifdef DW730
15629737Skarels 	if (uh->uh_type == DW730)
1573332Swnj 		flags &= ~UBA_NEEDBDP;
1583332Swnj #endif
15929737Skarels #ifdef QBA
16029737Skarels 	if (uh->uh_type == QBA)
16129737Skarels 		flags &= ~UBA_NEEDBDP;
16229737Skarels #endif
16340Sbill 	o = (int)bp->b_un.b_addr & PGOFSET;
16440Sbill 	npf = btoc(bp->b_bcount + o) + 1;
16526371Skarels 	a = spluba();
1668811Sroot 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
1673913Swnj 		if (flags & UBA_CANTWAIT) {
1683913Swnj 			splx(a);
1692395Swnj 			return (0);
1703913Swnj 		}
1712395Swnj 		uh->uh_mrwant++;
1729353Ssam 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
17340Sbill 	}
17417731Skarels 	if ((flags & UBA_NEED16) && reg + npf > 128) {
17517731Skarels 		/*
17617731Skarels 		 * Could hang around and try again (if we can ever succeed).
17717731Skarels 		 * Won't help any current device...
17817731Skarels 		 */
17917731Skarels 		rmfree(uh->uh_map, (long)npf, (long)reg);
18017731Skarels 		splx(a);
18117731Skarels 		return (0);
18217731Skarels 	}
18340Sbill 	bdp = 0;
1842395Swnj 	if (flags & UBA_NEEDBDP) {
18526371Skarels 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
1862395Swnj 			if (flags & UBA_CANTWAIT) {
1878811Sroot 				rmfree(uh->uh_map, (long)npf, (long)reg);
1883913Swnj 				splx(a);
1892395Swnj 				return (0);
1902395Swnj 			}
1912395Swnj 			uh->uh_bdpwant++;
1929353Ssam 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
19340Sbill 		}
1942463Swnj 		uh->uh_bdpfree &= ~(1 << (bdp-1));
1954758Swnj 	} else if (flags & UBA_HAVEBDP)
1964758Swnj 		bdp = (flags >> 28) & 0xf;
19740Sbill 	splx(a);
1982463Swnj 	reg--;
19940Sbill 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
2002958Swnj 	temp = (bdp << 21) | UBAMR_MRV;
20140Sbill 	if (bdp && (o & 01))
2022958Swnj 		temp |= UBAMR_BO;
2036382Swnj 	if ((bp->b_flags & B_PHYS) == 0)
204*34284Skarels 		pte = kvtopte(bp->b_un.b_addr);
2056382Swnj 	else if (bp->b_flags & B_PAGET)
2066382Swnj 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
207*34284Skarels 	else {
208*34284Skarels 		rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
209*34284Skarels 		v = btop(bp->b_un.b_addr);
210*34284Skarels 		if (bp->b_flags & B_UAREA)
211*34284Skarels 			pte = &rp->p_addr[v];
212*34284Skarels 		else
213*34284Skarels 			pte = vtopte(rp, v);
214*34284Skarels 	}
21529737Skarels 	io = &uh->uh_mr[reg];
216*34284Skarels 	while (--npf > 0) {
21718417Smckusick 		pfnum = pte->pg_pfnum;
21818417Smckusick 		if (pfnum == 0)
2196382Swnj 			panic("uba zero uentry");
22018417Smckusick 		pte++;
22118417Smckusick 		*(int *)io++ = pfnum | temp;
22240Sbill 	}
223*34284Skarels 	*(int *)io = 0;
22440Sbill 	return (ubinfo);
22540Sbill }
22640Sbill 
22740Sbill /*
2282570Swnj  * Non buffer setup interface... set up a buffer and call ubasetup.
22940Sbill  */
2302395Swnj uballoc(uban, addr, bcnt, flags)
2313107Swnj 	int uban;
23240Sbill 	caddr_t addr;
2333107Swnj 	int bcnt, flags;
23440Sbill {
235883Sbill 	struct buf ubabuf;
23640Sbill 
23740Sbill 	ubabuf.b_un.b_addr = addr;
23840Sbill 	ubabuf.b_flags = B_BUSY;
23940Sbill 	ubabuf.b_bcount = bcnt;
240883Sbill 	/* that's all the fields ubasetup() needs */
2412395Swnj 	return (ubasetup(uban, &ubabuf, flags));
24240Sbill }
24340Sbill 
2442053Swnj /*
2452570Swnj  * Release resources on uba uban, and then unblock resource waiters.
2462570Swnj  * The map register parameter is by value since we need to block
2472570Swnj  * against uba resets on 11/780's.
2482053Swnj  */
2492395Swnj ubarelse(uban, amr)
2502053Swnj 	int *amr;
25140Sbill {
2522395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
2532570Swnj 	register int bdp, reg, npf, s;
2542053Swnj 	int mr;
25540Sbill 
2562570Swnj 	/*
2572570Swnj 	 * Carefully see if we should release the space, since
2582570Swnj 	 * it may be released asynchronously at uba reset time.
2592570Swnj 	 */
26026371Skarels 	s = spluba();
2612053Swnj 	mr = *amr;
2622053Swnj 	if (mr == 0) {
2632570Swnj 		/*
2642570Swnj 		 * A ubareset() occurred before we got around
2652570Swnj 		 * to releasing the space... no need to bother.
2662570Swnj 		 */
2672570Swnj 		splx(s);
2682053Swnj 		return;
2692053Swnj 	}
2702067Swnj 	*amr = 0;
27140Sbill 	bdp = (mr >> 28) & 0x0f;
27240Sbill 	if (bdp) {
27329737Skarels 		switch (uh->uh_type) {
274*34284Skarels #ifdef DWBUA
275*34284Skarels 		case DWBUA:
276*34284Skarels 			BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
277*34284Skarels 			break;
278*34284Skarels #endif
27929737Skarels #ifdef DW780
28029737Skarels 		case DW780:
2812958Swnj 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2822423Skre 			break;
2832423Skre #endif
28429737Skarels #ifdef DW750
28529737Skarels 		case DW750:
2862958Swnj 			uh->uh_uba->uba_dpr[bdp] |=
2872958Swnj 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2882423Skre 			break;
2892423Skre #endif
29029737Skarels 		default:
29129737Skarels 			break;
2922423Skre 		}
2932570Swnj 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
2942395Swnj 		if (uh->uh_bdpwant) {
2952395Swnj 			uh->uh_bdpwant = 0;
2969353Ssam 			wakeup((caddr_t)&uh->uh_bdpwant);
29740Sbill 		}
29840Sbill 	}
2992570Swnj 	/*
3002570Swnj 	 * Put back the registers in the resource map.
30117731Skarels 	 * The map code must not be reentered,
30217731Skarels 	 * nor can the registers be freed twice.
30317731Skarels 	 * Unblock interrupts once this is done.
3042570Swnj 	 */
30540Sbill 	npf = (mr >> 18) & 0x3ff;
30640Sbill 	reg = ((mr >> 9) & 0x1ff) + 1;
3078811Sroot 	rmfree(uh->uh_map, (long)npf, (long)reg);
3082570Swnj 	splx(s);
3092570Swnj 
3102570Swnj 	/*
3112570Swnj 	 * Wakeup sleepers for map registers,
3122570Swnj 	 * and also, if there are processes blocked in dgo(),
3132570Swnj 	 * give them a chance at the UNIBUS.
3142570Swnj 	 */
3152395Swnj 	if (uh->uh_mrwant) {
3162395Swnj 		uh->uh_mrwant = 0;
3179353Ssam 		wakeup((caddr_t)&uh->uh_mrwant);
31840Sbill 	}
31932521Sbostic 	while (uh->uh_actf && ubaqueue(uh->uh_actf, 1))
3202570Swnj 		;
32140Sbill }
32240Sbill 
3232729Swnj ubapurge(um)
3242958Swnj 	register struct uba_ctlr *um;
3252729Swnj {
3262729Swnj 	register struct uba_hd *uh = um->um_hd;
3272729Swnj 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
3282729Swnj 
32929737Skarels 	switch (uh->uh_type) {
330*34284Skarels #ifdef DWBUA
331*34284Skarels 	case DWBUA:
332*34284Skarels 		BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
333*34284Skarels 		break;
334*34284Skarels #endif
33529737Skarels #ifdef DW780
33629737Skarels 	case DW780:
3372958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
3382729Swnj 		break;
3392729Swnj #endif
34029737Skarels #ifdef DW750
34129737Skarels 	case DW750:
3422958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
3432729Swnj 		break;
3442729Swnj #endif
34529737Skarels 	default:
34629737Skarels 		break;
3472729Swnj 	}
3482729Swnj }
3492729Swnj 
3506863Swnj ubainitmaps(uhp)
3516863Swnj 	register struct uba_hd *uhp;
3526863Swnj {
3536863Swnj 
35429737Skarels 	rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
35529737Skarels 	switch (uhp->uh_type) {
356*34284Skarels #ifdef DWBUA
357*34284Skarels 	case DWBUA:
358*34284Skarels 		uhp->uh_bdpfree = (1<<NBDPBUA) - 1;
359*34284Skarels 		break;
360*34284Skarels #endif
36129737Skarels #ifdef DW780
36229737Skarels 	case DW780:
3636863Swnj 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
3646863Swnj 		break;
3656863Swnj #endif
36629737Skarels #ifdef DW750
36729737Skarels 	case DW750:
3686863Swnj 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
3696863Swnj 		break;
3706863Swnj #endif
37129737Skarels 	default:
3726863Swnj 		break;
3736863Swnj 	}
3746863Swnj }
3756863Swnj 
3762570Swnj /*
3772570Swnj  * Generate a reset on uba number uban.  Then
3782570Swnj  * call each device in the character device table,
3792570Swnj  * giving it a chance to clean up so as to be able to continue.
3802570Swnj  */
3812395Swnj ubareset(uban)
3822570Swnj 	int uban;
383284Sbill {
384284Sbill 	register struct cdevsw *cdp;
3852646Swnj 	register struct uba_hd *uh = &uba_hd[uban];
3861781Sbill 	int s;
387284Sbill 
38826371Skarels 	s = spluba();
3892646Swnj 	uh->uh_users = 0;
3902646Swnj 	uh->uh_zvcnt = 0;
3912646Swnj 	uh->uh_xclu = 0;
3922646Swnj 	uh->uh_actf = uh->uh_actl = 0;
3932646Swnj 	uh->uh_bdpwant = 0;
3942646Swnj 	uh->uh_mrwant = 0;
3956863Swnj 	ubainitmaps(uh);
3962646Swnj 	wakeup((caddr_t)&uh->uh_bdpwant);
3972646Swnj 	wakeup((caddr_t)&uh->uh_mrwant);
3982958Swnj 	printf("uba%d: reset", uban);
3992958Swnj 	ubainit(uh->uh_uba);
40017731Skarels 	ubameminit(uban);
40111722Ssam 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
4022395Swnj 		(*cdp->d_reset)(uban);
4035221Swnj 	ifubareset(uban);
404284Sbill 	printf("\n");
405302Sbill 	splx(s);
406284Sbill }
4072395Swnj 
4082570Swnj /*
4092570Swnj  * Init a uba.  This is called with a pointer
4102570Swnj  * rather than a virtual address since it is called
4112570Swnj  * by code which runs with memory mapping disabled.
4122570Swnj  * In these cases we really don't need the interrupts
4132570Swnj  * enabled, but since we run with ipl high, we don't care
4142570Swnj  * if they are, they will never happen anyways.
41529737Skarels  * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
4162570Swnj  */
4172423Skre ubainit(uba)
4182423Skre 	register struct uba_regs *uba;
4192395Swnj {
42029737Skarels 	register struct uba_hd *uhp;
421*34284Skarels #ifdef QBA
42229737Skarels 	int isphys = 0;
423*34284Skarels #endif
4242395Swnj 
42529737Skarels 	for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
42629737Skarels 		if (uhp->uh_uba == uba)
42729737Skarels 			break;
42829737Skarels 		if (uhp->uh_physuba == uba) {
429*34284Skarels #ifdef QBA
43029737Skarels 			isphys++;
431*34284Skarels #endif
43229737Skarels 			break;
43329737Skarels 		}
43429737Skarels 	}
43529737Skarels 	if (uhp >= uba_hd + numuba) {
43629737Skarels 		printf("init unknown uba\n");
43729737Skarels 		return;
43829737Skarels 	}
43929737Skarels 
44029737Skarels 	switch (uhp->uh_type) {
441*34284Skarels #ifdef DWBUA
442*34284Skarels 	case DWBUA:
443*34284Skarels 		BUA(uba)->bua_csr |= BUACSR_UPI;
444*34284Skarels 		/* give devices time to recover from power fail */
445*34284Skarels 		DELAY(500000);
446*34284Skarels 		break;
447*34284Skarels #endif
44829737Skarels #ifdef DW780
44929737Skarels 	case DW780:
4502958Swnj 		uba->uba_cr = UBACR_ADINIT;
4512958Swnj 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
4522958Swnj 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
4532958Swnj 			;
4542958Swnj 		break;
4552958Swnj #endif
45629737Skarels #ifdef DW750
45729737Skarels 	case DW750:
4583352Swnj #endif
45929737Skarels #ifdef DW730
46029737Skarels 	case DW730:
4613352Swnj #endif
46229737Skarels #ifdef QBA
46329737Skarels 	case QBA:
46427255Skridle #endif
46529737Skarels #if DW750 || DW730 || QBA
4663352Swnj 		mtpr(IUR, 0);
4672958Swnj 		/* give devices time to recover from power fail */
4683332Swnj /* THIS IS PROBABLY UNNECESSARY */
4693352Swnj 		DELAY(500000);
4703332Swnj /* END PROBABLY UNNECESSARY */
47129737Skarels #ifdef QBA
47229737Skarels 		/*
47329737Skarels 		 * Re-enable local memory access
47429737Skarels 		 * from the Q-bus.
47529737Skarels 		 */
47629737Skarels 		if (uhp->uh_type == QBA) {
47729737Skarels 			if (isphys)
47829737Skarels 				*((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
47929737Skarels 			else
48029737Skarels 				*(uhp->uh_iopage + QIPCR) = Q_LMEAE;
48129737Skarels 		}
48229737Skarels #endif QBA
4832958Swnj 		break;
48429737Skarels #endif DW750 || DW730 || QBA
4852958Swnj 	}
4862395Swnj }
4872395Swnj 
48829737Skarels #ifdef DW780
4894024Swnj int	ubawedgecnt = 10;
4904024Swnj int	ubacrazy = 500;
49117731Skarels int	zvcnt_max = 5000;	/* in 8 sec */
4922570Swnj /*
49324500Sbloom  * This routine is called by the locore code to process a UBA
49424500Sbloom  * error on an 11/780 or 8600.  The arguments are passed
4952570Swnj  * on the stack, and value-result (through some trickery).
4962570Swnj  * In particular, the uvec argument is used for further
4972570Swnj  * uba processing so the result aspect of it is very important.
4982570Swnj  * It must not be declared register.
4992570Swnj  */
5002423Skre /*ARGSUSED*/
50117731Skarels ubaerror(uban, uh, ipl, uvec, uba)
5022395Swnj 	register int uban;
5032395Swnj 	register struct uba_hd *uh;
50417731Skarels 	int ipl, uvec;
5052395Swnj 	register struct uba_regs *uba;
5062395Swnj {
5072395Swnj 	register sr, s;
5082395Swnj 
5092395Swnj 	if (uvec == 0) {
51026215Skarels 		/*
51126215Skarels 		 * Declare dt as unsigned so that negative values
51226215Skarels 		 * are handled as >8 below, in case time was set back.
51326215Skarels 		 */
51426215Skarels 		u_long	dt = time.tv_sec - uh->uh_zvtime;
51526215Skarels 
51626215Skarels 		uh->uh_zvtotal++;
51717731Skarels 		if (dt > 8) {
51826215Skarels 			uh->uh_zvtime = time.tv_sec;
51917731Skarels 			uh->uh_zvcnt = 0;
52017731Skarels 		}
52117731Skarels 		if (++uh->uh_zvcnt > zvcnt_max) {
52217731Skarels 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
52317731Skarels 				uban, uh->uh_zvcnt, dt + 1);
52417731Skarels 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
52517731Skarels 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
52617731Skarels 				uba->uba_cnfgr&0xff);
52717731Skarels 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
52817731Skarels 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
52917731Skarels 				(uba->uba_dcr&0x8000000)?"":"NOT ");
5302395Swnj 			ubareset(uban);
5312395Swnj 		}
5322395Swnj 		return;
5332395Swnj 	}
5342395Swnj 	if (uba->uba_cnfgr & NEX_CFGFLT) {
5352929Swnj 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
5362929Swnj 		    uban, uba->uba_sr, ubasr_bits,
5373248Swnj 		    uba->uba_cnfgr, NEXFLT_BITS);
5382395Swnj 		ubareset(uban);
5392395Swnj 		uvec = 0;
5402395Swnj 		return;
5412395Swnj 	}
5422395Swnj 	sr = uba->uba_sr;
54326371Skarels 	s = spluba();
5443473Swnj 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
5453473Swnj 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
5462395Swnj 	splx(s);
5472395Swnj 	uba->uba_sr = sr;
5482958Swnj 	uvec &= UBABRRVR_DIV;
5494024Swnj 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
5504024Swnj 		if (uh->uh_errcnt > ubacrazy)
5514024Swnj 			panic("uba crazy");
5524024Swnj 		printf("ERROR LIMIT ");
5534024Swnj 		ubareset(uban);
5544024Swnj 		uvec = 0;
5554024Swnj 		return;
5564024Swnj 	}
5572395Swnj 	return;
5582395Swnj }
5592395Swnj #endif
5603745Sroot 
5613745Sroot /*
56217731Skarels  * Look for devices with unibus memory, allow them to configure, then disable
56317731Skarels  * map registers as necessary.  Called during autoconfiguration and ubareset.
56417731Skarels  * The device ubamem routine returns 0 on success, 1 on success if it is fully
56517731Skarels  * configured (has no csr or interrupt, so doesn't need to be probed),
56617731Skarels  * and -1 on failure.
56717731Skarels  */
56817731Skarels ubameminit(uban)
56917731Skarels {
57017731Skarels 	register struct uba_device *ui;
57117731Skarels 	register struct uba_hd *uh = &uba_hd[uban];
57217731Skarels 	caddr_t umembase = umem[uban] + 0x3e000, addr;
57317731Skarels #define	ubaoff(off)	((int)(off) & 0x1fff)
57417731Skarels 
57517731Skarels 	uh->uh_lastmem = 0;
57617731Skarels 	for (ui = ubdinit; ui->ui_driver; ui++) {
57717731Skarels 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
57817731Skarels 			continue;
57917731Skarels 		if (ui->ui_driver->ud_ubamem) {
58017731Skarels 			/*
58117731Skarels 			 * During autoconfiguration, need to fudge ui_addr.
58217731Skarels 			 */
58317731Skarels 			addr = ui->ui_addr;
58417731Skarels 			ui->ui_addr = umembase + ubaoff(addr);
58517731Skarels 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
58617731Skarels 			case 1:
58717731Skarels 				ui->ui_alive = 1;
58817731Skarels 				/* FALLTHROUGH */
58917731Skarels 			case 0:
59017731Skarels 				ui->ui_ubanum = uban;
59117731Skarels 				break;
59217731Skarels 			}
59317731Skarels 			ui->ui_addr = addr;
59417731Skarels 		}
59517731Skarels 	}
59629737Skarels #ifdef DW780
59717731Skarels 	/*
59829737Skarels 	 * On a DW780, throw away any map registers disabled by rounding
59917731Skarels 	 * the map disable in the configuration register
60017731Skarels 	 * up to the next 8K boundary, or below the last unibus memory.
60117731Skarels 	 */
60229737Skarels 	if (uh->uh_type == DW780) {
60317731Skarels 		register i;
60417731Skarels 
60517731Skarels 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
60617731Skarels 		while (i)
60717731Skarels 			(void) rmget(uh->uh_map, 1, i--);
60817731Skarels 	}
60917731Skarels #endif
61017731Skarels }
61117731Skarels 
61217731Skarels /*
61314790Ssam  * Allocate UNIBUS memory.  Allocates and initializes
61414790Ssam  * sufficient mapping registers for access.  On a 780,
61514790Ssam  * the configuration register is setup to disable UBA
61614790Ssam  * response on DMA transfers to addresses controlled
61714790Ssam  * by the disabled mapping registers.
61829737Skarels  * On a DW780, should only be called from ubameminit, or in ascending order
61917731Skarels  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
62017731Skarels  * the last unibus memory would free unusable map registers.
62117731Skarels  * Doalloc is 1 to allocate, 0 to deallocate.
6226518Sfeldman  */
62314790Ssam ubamem(uban, addr, npg, doalloc)
62414790Ssam 	int uban, addr, npg, doalloc;
6256518Sfeldman {
6266518Sfeldman 	register struct uba_hd *uh = &uba_hd[uban];
62714790Ssam 	register int a;
62817731Skarels 	int s;
6296518Sfeldman 
63017731Skarels 	a = (addr >> 9) + 1;
63126371Skarels 	s = spluba();
63217731Skarels 	if (doalloc)
63317731Skarels 		a = rmget(uh->uh_map, npg, a);
63417731Skarels 	else
63517731Skarels 		rmfree(uh->uh_map, (long)npg, (long)a);
63617731Skarels 	splx(s);
6376518Sfeldman 	if (a) {
63814790Ssam 		register int i, *m;
63914790Ssam 
64029737Skarels 		m = (int *)&uh->uh_mr[a - 1];
64114790Ssam 		for (i = 0; i < npg; i++)
6426518Sfeldman 			*m++ = 0;	/* All off, especially 'valid' */
64317731Skarels 		i = addr + npg * 512;
64417731Skarels 		if (doalloc && i > uh->uh_lastmem)
64517731Skarels 			uh->uh_lastmem = i;
64617731Skarels 		else if (doalloc == 0 && i == uh->uh_lastmem)
64717731Skarels 			uh->uh_lastmem = addr;
64829737Skarels #ifdef DW780
64914790Ssam 		/*
65014790Ssam 		 * On a 780, set up the map register disable
65114790Ssam 		 * field in the configuration register.  Beware
65217731Skarels 		 * of callers that request memory ``out of order''
65317731Skarels 		 * or in sections other than 8K multiples.
65417731Skarels 		 * Ubameminit handles such requests properly, however.
65514790Ssam 		 */
65629737Skarels 		if (uh->uh_type == DW780) {
65717731Skarels 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
65817731Skarels 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
65917731Skarels 			uh->uh_uba->uba_cr = i;
6607473Sfeldman 		}
6617473Sfeldman #endif
6626518Sfeldman 	}
66314790Ssam 	return (a);
6646518Sfeldman }
6657304Ssam 
6669875Ssam #include "ik.h"
66724501Sjg #include "vs.h"
66824501Sjg #if NIK > 0 || NVS > 0
6697304Ssam /*
6707304Ssam  * Map a virtual address into users address space. Actually all we
6717304Ssam  * do is turn on the user mode write protection bits for the particular
6727304Ssam  * page of memory involved.
6737304Ssam  */
6747304Ssam maptouser(vaddress)
6757304Ssam 	caddr_t vaddress;
6767304Ssam {
6777304Ssam 
678*34284Skarels 	kvtopte(vaddress)->pg_prot = (PG_UW >> 27);
6797304Ssam }
6807304Ssam 
6817304Ssam unmaptouser(vaddress)
6827304Ssam 	caddr_t vaddress;
6837304Ssam {
6847304Ssam 
685*34284Skarels 	kvtopte(vaddress)->pg_prot = (PG_KW >> 27);
6867304Ssam }
6879174Ssam #endif
688