xref: /csrg-svn/sys/vax/uba/uba.c (revision 36784)
123352Smckusick /*
2*36784Sbostic  * Copyright (c) 1982, 1986 The Regents of the University of California.
3*36784Sbostic  * All rights reserved.
423352Smckusick  *
5*36784Sbostic  * Redistribution and use in source and binary forms are permitted
6*36784Sbostic  * provided that the above copyright notice and this paragraph are
7*36784Sbostic  * duplicated in all such forms and that any documentation,
8*36784Sbostic  * advertising materials, and other materials related to such
9*36784Sbostic  * distribution and use acknowledge that the software was developed
10*36784Sbostic  * by the University of California, Berkeley.  The name of the
11*36784Sbostic  * University may not be used to endorse or promote products derived
12*36784Sbostic  * from this software without specific prior written permission.
13*36784Sbostic  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14*36784Sbostic  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15*36784Sbostic  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16*36784Sbostic  *
17*36784Sbostic  *	@(#)uba.c	7.7 (Berkeley) 02/15/89
1823352Smckusick  */
1940Sbill 
2017081Sbloom #include "param.h"
2117081Sbloom #include "systm.h"
2217081Sbloom #include "map.h"
2317081Sbloom #include "buf.h"
2417081Sbloom #include "vm.h"
2517081Sbloom #include "dir.h"
2617081Sbloom #include "user.h"
2717081Sbloom #include "proc.h"
2817081Sbloom #include "conf.h"
2930389Skarels #include "dkstat.h"
3017081Sbloom #include "kernel.h"
3140Sbill 
3236034Skarels #include "../vax/pte.h"
338481Sroot #include "../vax/cpu.h"
348481Sroot #include "../vax/mtpr.h"
358481Sroot #include "../vax/nexus.h"
3617081Sbloom #include "ubareg.h"
3717081Sbloom #include "ubavar.h"
388481Sroot 
3929737Skarels #ifdef DW780
402929Swnj char	ubasr_bits[] = UBASR_BITS;
412929Swnj #endif
422929Swnj 
4326371Skarels #define	spluba	spl7		/* IPL 17 */
4426371Skarels 
4540Sbill /*
462570Swnj  * Do transfer on device argument.  The controller
472570Swnj  * and uba involved are implied by the device.
482570Swnj  * We queue for resource wait in the uba code if necessary.
492570Swnj  * We return 1 if the transfer was started, 0 if it was not.
5032521Sbostic  *
5132521Sbostic  * The onq argument must be zero iff the device is not on the
5232521Sbostic  * queue for this UBA.  If onq is set, the device must be at the
5332521Sbostic  * head of the queue.  In any case, if the transfer is started,
5432521Sbostic  * the device will be off the queue, and if not, it will be on.
5532521Sbostic  *
5632521Sbostic  * Drivers that allocate one BDP and hold it for some time should
5732521Sbostic  * set ud_keepbdp.  In this case um_bdp tells which BDP is allocated
5832521Sbostic  * to the controller, unless it is zero, indicating that the controller
5932521Sbostic  * does not now have a BDP.
602570Swnj  */
6132521Sbostic ubaqueue(ui, onq)
622958Swnj 	register struct uba_device *ui;
6332521Sbostic 	int onq;
642570Swnj {
652958Swnj 	register struct uba_ctlr *um = ui->ui_mi;
662570Swnj 	register struct uba_hd *uh;
6732521Sbostic 	register struct uba_driver *ud;
682570Swnj 	register int s, unit;
692570Swnj 
702570Swnj 	uh = &uba_hd[um->um_ubanum];
7132521Sbostic 	ud = um->um_driver;
7226371Skarels 	s = spluba();
7332521Sbostic 	/*
7432521Sbostic 	 * Honor exclusive BDP use requests.
7532521Sbostic 	 */
7632521Sbostic 	if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
772616Swnj 		goto rwait;
7832521Sbostic 	if (ud->ud_keepbdp) {
7932521Sbostic 		/*
8032521Sbostic 		 * First get just a BDP (though in fact it comes with
8132521Sbostic 		 * one map register too).
8232521Sbostic 		 */
8332521Sbostic 		if (um->um_bdp == 0) {
8432521Sbostic 			um->um_bdp = uballoc(um->um_ubanum,
8532521Sbostic 				(caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT);
8632521Sbostic 			if (um->um_bdp == 0)
8732521Sbostic 				goto rwait;
8832521Sbostic 		}
8932521Sbostic 		/* now share it with this transfer */
9032521Sbostic 		um->um_ubinfo = ubasetup(um->um_ubanum,
9132521Sbostic 			um->um_tab.b_actf->b_actf,
9232521Sbostic 			um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT);
9332521Sbostic 	} else
9432521Sbostic 		um->um_ubinfo = ubasetup(um->um_ubanum,
9532521Sbostic 			um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT);
962616Swnj 	if (um->um_ubinfo == 0)
972616Swnj 		goto rwait;
982616Swnj 	uh->uh_users++;
9932521Sbostic 	if (ud->ud_xclu)
1002616Swnj 		uh->uh_xclu = 1;
1012570Swnj 	splx(s);
1022570Swnj 	if (ui->ui_dk >= 0) {
1032570Swnj 		unit = ui->ui_dk;
1042570Swnj 		dk_busy |= 1<<unit;
1056348Swnj 		dk_xfer[unit]++;
1066348Swnj 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
1072570Swnj 	}
10832521Sbostic 	if (onq)
1092570Swnj 		uh->uh_actf = ui->ui_forw;
11032521Sbostic 	(*ud->ud_dgo)(um);
1112570Swnj 	return (1);
1122616Swnj rwait:
11332521Sbostic 	if (!onq) {
1142616Swnj 		ui->ui_forw = NULL;
1152616Swnj 		if (uh->uh_actf == NULL)
1162616Swnj 			uh->uh_actf = ui;
1172616Swnj 		else
1182616Swnj 			uh->uh_actl->ui_forw = ui;
1192616Swnj 		uh->uh_actl = ui;
1202616Swnj 	}
1212616Swnj 	splx(s);
1222616Swnj 	return (0);
1232570Swnj }
1242570Swnj 
1252616Swnj ubadone(um)
1262958Swnj 	register struct uba_ctlr *um;
1272616Swnj {
1282616Swnj 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
1292616Swnj 
1302628Swnj 	if (um->um_driver->ud_xclu)
1312616Swnj 		uh->uh_xclu = 0;
1322616Swnj 	uh->uh_users--;
13332521Sbostic 	if (um->um_driver->ud_keepbdp)
13432521Sbostic 		um->um_ubinfo &= ~BDPMASK;	/* keep BDP for misers */
1352616Swnj 	ubarelse(um->um_ubanum, &um->um_ubinfo);
1362616Swnj }
1372616Swnj 
1382570Swnj /*
1392395Swnj  * Allocate and setup UBA map registers, and bdp's
1402395Swnj  * Flags says whether bdp is needed, whether the caller can't
1412395Swnj  * wait (e.g. if the caller is at interrupt level).
14236034Skarels  * Return value encodes map register plus page offset,
14336034Skarels  * bdp number and number of map registers.
14440Sbill  */
1452395Swnj ubasetup(uban, bp, flags)
14634284Skarels 	int uban;
14734284Skarels 	register struct buf *bp;
14834284Skarels 	register int flags;
14940Sbill {
1502395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
15134284Skarels 	register struct pte *pte, *io;
15234284Skarels 	register int npf;
15318417Smckusick 	int pfnum, temp;
15434284Skarels 	int reg, bdp;
15540Sbill 	unsigned v;
15640Sbill 	struct proc *rp;
15740Sbill 	int a, o, ubinfo;
15840Sbill 
15929737Skarels #ifdef DW730
16029737Skarels 	if (uh->uh_type == DW730)
1613332Swnj 		flags &= ~UBA_NEEDBDP;
1623332Swnj #endif
16329737Skarels #ifdef QBA
16429737Skarels 	if (uh->uh_type == QBA)
16529737Skarels 		flags &= ~UBA_NEEDBDP;
16629737Skarels #endif
16740Sbill 	o = (int)bp->b_un.b_addr & PGOFSET;
16840Sbill 	npf = btoc(bp->b_bcount + o) + 1;
16936034Skarels 	if (npf > UBA_MAXNMR)
17036034Skarels 		panic("uba xfer too big");
17126371Skarels 	a = spluba();
1728811Sroot 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
1733913Swnj 		if (flags & UBA_CANTWAIT) {
1743913Swnj 			splx(a);
1752395Swnj 			return (0);
1763913Swnj 		}
1772395Swnj 		uh->uh_mrwant++;
1789353Ssam 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
17940Sbill 	}
18017731Skarels 	if ((flags & UBA_NEED16) && reg + npf > 128) {
18117731Skarels 		/*
18217731Skarels 		 * Could hang around and try again (if we can ever succeed).
18317731Skarels 		 * Won't help any current device...
18417731Skarels 		 */
18517731Skarels 		rmfree(uh->uh_map, (long)npf, (long)reg);
18617731Skarels 		splx(a);
18717731Skarels 		return (0);
18817731Skarels 	}
18940Sbill 	bdp = 0;
1902395Swnj 	if (flags & UBA_NEEDBDP) {
19126371Skarels 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
1922395Swnj 			if (flags & UBA_CANTWAIT) {
1938811Sroot 				rmfree(uh->uh_map, (long)npf, (long)reg);
1943913Swnj 				splx(a);
1952395Swnj 				return (0);
1962395Swnj 			}
1972395Swnj 			uh->uh_bdpwant++;
1989353Ssam 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
19940Sbill 		}
2002463Swnj 		uh->uh_bdpfree &= ~(1 << (bdp-1));
2014758Swnj 	} else if (flags & UBA_HAVEBDP)
2024758Swnj 		bdp = (flags >> 28) & 0xf;
20340Sbill 	splx(a);
2042463Swnj 	reg--;
20536034Skarels 	ubinfo = UBAI_INFO(o, reg, npf, bdp);
2062958Swnj 	temp = (bdp << 21) | UBAMR_MRV;
20740Sbill 	if (bdp && (o & 01))
2082958Swnj 		temp |= UBAMR_BO;
2096382Swnj 	if ((bp->b_flags & B_PHYS) == 0)
21034284Skarels 		pte = kvtopte(bp->b_un.b_addr);
2116382Swnj 	else if (bp->b_flags & B_PAGET)
2126382Swnj 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
21334284Skarels 	else {
21434284Skarels 		rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
21534284Skarels 		v = btop(bp->b_un.b_addr);
21634284Skarels 		if (bp->b_flags & B_UAREA)
21734284Skarels 			pte = &rp->p_addr[v];
21834284Skarels 		else
21934284Skarels 			pte = vtopte(rp, v);
22034284Skarels 	}
22129737Skarels 	io = &uh->uh_mr[reg];
22234284Skarels 	while (--npf > 0) {
22318417Smckusick 		pfnum = pte->pg_pfnum;
22418417Smckusick 		if (pfnum == 0)
2256382Swnj 			panic("uba zero uentry");
22618417Smckusick 		pte++;
22718417Smckusick 		*(int *)io++ = pfnum | temp;
22840Sbill 	}
22934284Skarels 	*(int *)io = 0;
23040Sbill 	return (ubinfo);
23140Sbill }
23240Sbill 
23340Sbill /*
2342570Swnj  * Non buffer setup interface... set up a buffer and call ubasetup.
23540Sbill  */
2362395Swnj uballoc(uban, addr, bcnt, flags)
2373107Swnj 	int uban;
23840Sbill 	caddr_t addr;
2393107Swnj 	int bcnt, flags;
24040Sbill {
241883Sbill 	struct buf ubabuf;
24240Sbill 
24340Sbill 	ubabuf.b_un.b_addr = addr;
24440Sbill 	ubabuf.b_flags = B_BUSY;
24540Sbill 	ubabuf.b_bcount = bcnt;
246883Sbill 	/* that's all the fields ubasetup() needs */
2472395Swnj 	return (ubasetup(uban, &ubabuf, flags));
24840Sbill }
24940Sbill 
2502053Swnj /*
2512570Swnj  * Release resources on uba uban, and then unblock resource waiters.
2522570Swnj  * The map register parameter is by value since we need to block
2532570Swnj  * against uba resets on 11/780's.
2542053Swnj  */
2552395Swnj ubarelse(uban, amr)
2562053Swnj 	int *amr;
25740Sbill {
2582395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
2592570Swnj 	register int bdp, reg, npf, s;
2602053Swnj 	int mr;
26140Sbill 
2622570Swnj 	/*
2632570Swnj 	 * Carefully see if we should release the space, since
2642570Swnj 	 * it may be released asynchronously at uba reset time.
2652570Swnj 	 */
26626371Skarels 	s = spluba();
2672053Swnj 	mr = *amr;
2682053Swnj 	if (mr == 0) {
2692570Swnj 		/*
2702570Swnj 		 * A ubareset() occurred before we got around
2712570Swnj 		 * to releasing the space... no need to bother.
2722570Swnj 		 */
2732570Swnj 		splx(s);
2742053Swnj 		return;
2752053Swnj 	}
2762067Swnj 	*amr = 0;
27736034Skarels 	bdp = UBAI_BDP(mr);
27840Sbill 	if (bdp) {
27929737Skarels 		switch (uh->uh_type) {
28034284Skarels #ifdef DWBUA
28134284Skarels 		case DWBUA:
28234284Skarels 			BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
28334284Skarels 			break;
28434284Skarels #endif
28529737Skarels #ifdef DW780
28629737Skarels 		case DW780:
2872958Swnj 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2882423Skre 			break;
2892423Skre #endif
29029737Skarels #ifdef DW750
29129737Skarels 		case DW750:
2922958Swnj 			uh->uh_uba->uba_dpr[bdp] |=
2932958Swnj 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2942423Skre 			break;
2952423Skre #endif
29629737Skarels 		default:
29729737Skarels 			break;
2982423Skre 		}
2992570Swnj 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
3002395Swnj 		if (uh->uh_bdpwant) {
3012395Swnj 			uh->uh_bdpwant = 0;
3029353Ssam 			wakeup((caddr_t)&uh->uh_bdpwant);
30340Sbill 		}
30440Sbill 	}
3052570Swnj 	/*
3062570Swnj 	 * Put back the registers in the resource map.
30717731Skarels 	 * The map code must not be reentered,
30817731Skarels 	 * nor can the registers be freed twice.
30917731Skarels 	 * Unblock interrupts once this is done.
3102570Swnj 	 */
31136034Skarels 	npf = UBAI_NMR(mr);
31236034Skarels 	reg = UBAI_MR(mr) + 1;
3138811Sroot 	rmfree(uh->uh_map, (long)npf, (long)reg);
3142570Swnj 	splx(s);
3152570Swnj 
3162570Swnj 	/*
3172570Swnj 	 * Wakeup sleepers for map registers,
3182570Swnj 	 * and also, if there are processes blocked in dgo(),
3192570Swnj 	 * give them a chance at the UNIBUS.
3202570Swnj 	 */
3212395Swnj 	if (uh->uh_mrwant) {
3222395Swnj 		uh->uh_mrwant = 0;
3239353Ssam 		wakeup((caddr_t)&uh->uh_mrwant);
32440Sbill 	}
32532521Sbostic 	while (uh->uh_actf && ubaqueue(uh->uh_actf, 1))
3262570Swnj 		;
32740Sbill }
32840Sbill 
3292729Swnj ubapurge(um)
3302958Swnj 	register struct uba_ctlr *um;
3312729Swnj {
3322729Swnj 	register struct uba_hd *uh = um->um_hd;
33336034Skarels 	register int bdp = UBAI_BDP(um->um_ubinfo);
3342729Swnj 
33529737Skarels 	switch (uh->uh_type) {
33634284Skarels #ifdef DWBUA
33734284Skarels 	case DWBUA:
33834284Skarels 		BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
33934284Skarels 		break;
34034284Skarels #endif
34129737Skarels #ifdef DW780
34229737Skarels 	case DW780:
3432958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
3442729Swnj 		break;
3452729Swnj #endif
34629737Skarels #ifdef DW750
34729737Skarels 	case DW750:
3482958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
3492729Swnj 		break;
3502729Swnj #endif
35129737Skarels 	default:
35229737Skarels 		break;
3532729Swnj 	}
3542729Swnj }
3552729Swnj 
3566863Swnj ubainitmaps(uhp)
3576863Swnj 	register struct uba_hd *uhp;
3586863Swnj {
3596863Swnj 
36036034Skarels 	if (uhp->uh_memsize > UBA_MAXMR)
36136034Skarels 		uhp->uh_memsize = UBA_MAXMR;
36229737Skarels 	rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
36329737Skarels 	switch (uhp->uh_type) {
36434284Skarels #ifdef DWBUA
36534284Skarels 	case DWBUA:
36634284Skarels 		uhp->uh_bdpfree = (1<<NBDPBUA) - 1;
36734284Skarels 		break;
36834284Skarels #endif
36929737Skarels #ifdef DW780
37029737Skarels 	case DW780:
3716863Swnj 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
3726863Swnj 		break;
3736863Swnj #endif
37429737Skarels #ifdef DW750
37529737Skarels 	case DW750:
3766863Swnj 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
3776863Swnj 		break;
3786863Swnj #endif
37929737Skarels 	default:
3806863Swnj 		break;
3816863Swnj 	}
3826863Swnj }
3836863Swnj 
3842570Swnj /*
3852570Swnj  * Generate a reset on uba number uban.  Then
3862570Swnj  * call each device in the character device table,
3872570Swnj  * giving it a chance to clean up so as to be able to continue.
3882570Swnj  */
3892395Swnj ubareset(uban)
3902570Swnj 	int uban;
391284Sbill {
392284Sbill 	register struct cdevsw *cdp;
3932646Swnj 	register struct uba_hd *uh = &uba_hd[uban];
3941781Sbill 	int s;
395284Sbill 
39626371Skarels 	s = spluba();
3972646Swnj 	uh->uh_users = 0;
3982646Swnj 	uh->uh_zvcnt = 0;
3992646Swnj 	uh->uh_xclu = 0;
4002646Swnj 	uh->uh_actf = uh->uh_actl = 0;
4012646Swnj 	uh->uh_bdpwant = 0;
4022646Swnj 	uh->uh_mrwant = 0;
4036863Swnj 	ubainitmaps(uh);
4042646Swnj 	wakeup((caddr_t)&uh->uh_bdpwant);
4052646Swnj 	wakeup((caddr_t)&uh->uh_mrwant);
4062958Swnj 	printf("uba%d: reset", uban);
4072958Swnj 	ubainit(uh->uh_uba);
40817731Skarels 	ubameminit(uban);
40911722Ssam 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
4102395Swnj 		(*cdp->d_reset)(uban);
4115221Swnj 	ifubareset(uban);
412284Sbill 	printf("\n");
413302Sbill 	splx(s);
414284Sbill }
4152395Swnj 
4162570Swnj /*
4172570Swnj  * Init a uba.  This is called with a pointer
4182570Swnj  * rather than a virtual address since it is called
4192570Swnj  * by code which runs with memory mapping disabled.
4202570Swnj  * In these cases we really don't need the interrupts
4212570Swnj  * enabled, but since we run with ipl high, we don't care
4222570Swnj  * if they are, they will never happen anyways.
42329737Skarels  * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
4242570Swnj  */
4252423Skre ubainit(uba)
4262423Skre 	register struct uba_regs *uba;
4272395Swnj {
42829737Skarels 	register struct uba_hd *uhp;
42934284Skarels #ifdef QBA
43029737Skarels 	int isphys = 0;
43134284Skarels #endif
4322395Swnj 
43329737Skarels 	for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
43429737Skarels 		if (uhp->uh_uba == uba)
43529737Skarels 			break;
43629737Skarels 		if (uhp->uh_physuba == uba) {
43734284Skarels #ifdef QBA
43829737Skarels 			isphys++;
43934284Skarels #endif
44029737Skarels 			break;
44129737Skarels 		}
44229737Skarels 	}
44329737Skarels 	if (uhp >= uba_hd + numuba) {
44429737Skarels 		printf("init unknown uba\n");
44529737Skarels 		return;
44629737Skarels 	}
44729737Skarels 
44829737Skarels 	switch (uhp->uh_type) {
44934284Skarels #ifdef DWBUA
45034284Skarels 	case DWBUA:
45134284Skarels 		BUA(uba)->bua_csr |= BUACSR_UPI;
45234284Skarels 		/* give devices time to recover from power fail */
45334284Skarels 		DELAY(500000);
45434284Skarels 		break;
45534284Skarels #endif
45629737Skarels #ifdef DW780
45729737Skarels 	case DW780:
4582958Swnj 		uba->uba_cr = UBACR_ADINIT;
4592958Swnj 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
4602958Swnj 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
4612958Swnj 			;
4622958Swnj 		break;
4632958Swnj #endif
46429737Skarels #ifdef DW750
46529737Skarels 	case DW750:
4663352Swnj #endif
46729737Skarels #ifdef DW730
46829737Skarels 	case DW730:
4693352Swnj #endif
47029737Skarels #ifdef QBA
47129737Skarels 	case QBA:
47227255Skridle #endif
47329737Skarels #if DW750 || DW730 || QBA
4743352Swnj 		mtpr(IUR, 0);
4752958Swnj 		/* give devices time to recover from power fail */
4763332Swnj /* THIS IS PROBABLY UNNECESSARY */
4773352Swnj 		DELAY(500000);
4783332Swnj /* END PROBABLY UNNECESSARY */
47929737Skarels #ifdef QBA
48029737Skarels 		/*
48129737Skarels 		 * Re-enable local memory access
48229737Skarels 		 * from the Q-bus.
48329737Skarels 		 */
48429737Skarels 		if (uhp->uh_type == QBA) {
48529737Skarels 			if (isphys)
48629737Skarels 				*((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
48729737Skarels 			else
48829737Skarels 				*(uhp->uh_iopage + QIPCR) = Q_LMEAE;
48929737Skarels 		}
49029737Skarels #endif QBA
4912958Swnj 		break;
49229737Skarels #endif DW750 || DW730 || QBA
4932958Swnj 	}
4942395Swnj }
4952395Swnj 
49636034Skarels #ifdef QBA
49736034Skarels /*
49836034Skarels  * Determine the interrupt priority of a Q-bus
49936034Skarels  * peripheral.  The device probe routine must spl6(),
50036034Skarels  * attempt to make the device request an interrupt,
50136034Skarels  * delaying as necessary, then call this routine
50236034Skarels  * before resetting the device.
50336034Skarels  */
50436034Skarels qbgetpri()
50536034Skarels {
50636034Skarels 	int pri;
50736034Skarels 	extern int cvec;
50836034Skarels 
50936034Skarels 	for (pri = 0x17; pri > 0x14; ) {
51036034Skarels 		if (cvec && cvec != 0x200)	/* interrupted at pri */
51136034Skarels 			break;
51236034Skarels 		pri--;
51336034Skarels 		splx(pri - 1);
51436034Skarels 	}
51536034Skarels 	(void) spl0();
51636034Skarels 	return (pri);
51736034Skarels }
51836034Skarels #endif
51936034Skarels 
52029737Skarels #ifdef DW780
5214024Swnj int	ubawedgecnt = 10;
5224024Swnj int	ubacrazy = 500;
52317731Skarels int	zvcnt_max = 5000;	/* in 8 sec */
5242570Swnj /*
52524500Sbloom  * This routine is called by the locore code to process a UBA
52624500Sbloom  * error on an 11/780 or 8600.  The arguments are passed
5272570Swnj  * on the stack, and value-result (through some trickery).
5282570Swnj  * In particular, the uvec argument is used for further
5292570Swnj  * uba processing so the result aspect of it is very important.
5302570Swnj  * It must not be declared register.
5312570Swnj  */
5322423Skre /*ARGSUSED*/
53317731Skarels ubaerror(uban, uh, ipl, uvec, uba)
5342395Swnj 	register int uban;
5352395Swnj 	register struct uba_hd *uh;
53617731Skarels 	int ipl, uvec;
5372395Swnj 	register struct uba_regs *uba;
5382395Swnj {
5392395Swnj 	register sr, s;
5402395Swnj 
5412395Swnj 	if (uvec == 0) {
54226215Skarels 		/*
54326215Skarels 		 * Declare dt as unsigned so that negative values
54426215Skarels 		 * are handled as >8 below, in case time was set back.
54526215Skarels 		 */
54626215Skarels 		u_long	dt = time.tv_sec - uh->uh_zvtime;
54726215Skarels 
54826215Skarels 		uh->uh_zvtotal++;
54917731Skarels 		if (dt > 8) {
55026215Skarels 			uh->uh_zvtime = time.tv_sec;
55117731Skarels 			uh->uh_zvcnt = 0;
55217731Skarels 		}
55317731Skarels 		if (++uh->uh_zvcnt > zvcnt_max) {
55417731Skarels 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
55517731Skarels 				uban, uh->uh_zvcnt, dt + 1);
55617731Skarels 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
55717731Skarels 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
55817731Skarels 				uba->uba_cnfgr&0xff);
55917731Skarels 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
56017731Skarels 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
56117731Skarels 				(uba->uba_dcr&0x8000000)?"":"NOT ");
5622395Swnj 			ubareset(uban);
5632395Swnj 		}
5642395Swnj 		return;
5652395Swnj 	}
5662395Swnj 	if (uba->uba_cnfgr & NEX_CFGFLT) {
5672929Swnj 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
5682929Swnj 		    uban, uba->uba_sr, ubasr_bits,
5693248Swnj 		    uba->uba_cnfgr, NEXFLT_BITS);
5702395Swnj 		ubareset(uban);
5712395Swnj 		uvec = 0;
5722395Swnj 		return;
5732395Swnj 	}
5742395Swnj 	sr = uba->uba_sr;
57526371Skarels 	s = spluba();
5763473Swnj 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
5773473Swnj 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
5782395Swnj 	splx(s);
5792395Swnj 	uba->uba_sr = sr;
5802958Swnj 	uvec &= UBABRRVR_DIV;
5814024Swnj 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
5824024Swnj 		if (uh->uh_errcnt > ubacrazy)
5834024Swnj 			panic("uba crazy");
5844024Swnj 		printf("ERROR LIMIT ");
5854024Swnj 		ubareset(uban);
5864024Swnj 		uvec = 0;
5874024Swnj 		return;
5884024Swnj 	}
5892395Swnj 	return;
5902395Swnj }
5912395Swnj #endif
5923745Sroot 
5933745Sroot /*
59417731Skarels  * Look for devices with unibus memory, allow them to configure, then disable
59517731Skarels  * map registers as necessary.  Called during autoconfiguration and ubareset.
59617731Skarels  * The device ubamem routine returns 0 on success, 1 on success if it is fully
59717731Skarels  * configured (has no csr or interrupt, so doesn't need to be probed),
59817731Skarels  * and -1 on failure.
59917731Skarels  */
60017731Skarels ubameminit(uban)
60117731Skarels {
60217731Skarels 	register struct uba_device *ui;
60317731Skarels 	register struct uba_hd *uh = &uba_hd[uban];
60417731Skarels 	caddr_t umembase = umem[uban] + 0x3e000, addr;
60517731Skarels #define	ubaoff(off)	((int)(off) & 0x1fff)
60617731Skarels 
60717731Skarels 	uh->uh_lastmem = 0;
60817731Skarels 	for (ui = ubdinit; ui->ui_driver; ui++) {
60917731Skarels 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
61017731Skarels 			continue;
61117731Skarels 		if (ui->ui_driver->ud_ubamem) {
61217731Skarels 			/*
61317731Skarels 			 * During autoconfiguration, need to fudge ui_addr.
61417731Skarels 			 */
61517731Skarels 			addr = ui->ui_addr;
61617731Skarels 			ui->ui_addr = umembase + ubaoff(addr);
61717731Skarels 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
61817731Skarels 			case 1:
61917731Skarels 				ui->ui_alive = 1;
62017731Skarels 				/* FALLTHROUGH */
62117731Skarels 			case 0:
62217731Skarels 				ui->ui_ubanum = uban;
62317731Skarels 				break;
62417731Skarels 			}
62517731Skarels 			ui->ui_addr = addr;
62617731Skarels 		}
62717731Skarels 	}
62829737Skarels #ifdef DW780
62917731Skarels 	/*
63029737Skarels 	 * On a DW780, throw away any map registers disabled by rounding
63117731Skarels 	 * the map disable in the configuration register
63217731Skarels 	 * up to the next 8K boundary, or below the last unibus memory.
63317731Skarels 	 */
63429737Skarels 	if (uh->uh_type == DW780) {
63517731Skarels 		register i;
63617731Skarels 
63717731Skarels 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
63817731Skarels 		while (i)
63917731Skarels 			(void) rmget(uh->uh_map, 1, i--);
64017731Skarels 	}
64117731Skarels #endif
64217731Skarels }
64317731Skarels 
64417731Skarels /*
64514790Ssam  * Allocate UNIBUS memory.  Allocates and initializes
64614790Ssam  * sufficient mapping registers for access.  On a 780,
64714790Ssam  * the configuration register is setup to disable UBA
64814790Ssam  * response on DMA transfers to addresses controlled
64914790Ssam  * by the disabled mapping registers.
65029737Skarels  * On a DW780, should only be called from ubameminit, or in ascending order
65117731Skarels  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
65217731Skarels  * the last unibus memory would free unusable map registers.
65317731Skarels  * Doalloc is 1 to allocate, 0 to deallocate.
6546518Sfeldman  */
65514790Ssam ubamem(uban, addr, npg, doalloc)
65614790Ssam 	int uban, addr, npg, doalloc;
6576518Sfeldman {
6586518Sfeldman 	register struct uba_hd *uh = &uba_hd[uban];
65914790Ssam 	register int a;
66017731Skarels 	int s;
6616518Sfeldman 
66217731Skarels 	a = (addr >> 9) + 1;
66326371Skarels 	s = spluba();
66417731Skarels 	if (doalloc)
66517731Skarels 		a = rmget(uh->uh_map, npg, a);
66617731Skarels 	else
66717731Skarels 		rmfree(uh->uh_map, (long)npg, (long)a);
66817731Skarels 	splx(s);
6696518Sfeldman 	if (a) {
67014790Ssam 		register int i, *m;
67114790Ssam 
67229737Skarels 		m = (int *)&uh->uh_mr[a - 1];
67314790Ssam 		for (i = 0; i < npg; i++)
6746518Sfeldman 			*m++ = 0;	/* All off, especially 'valid' */
67517731Skarels 		i = addr + npg * 512;
67617731Skarels 		if (doalloc && i > uh->uh_lastmem)
67717731Skarels 			uh->uh_lastmem = i;
67817731Skarels 		else if (doalloc == 0 && i == uh->uh_lastmem)
67917731Skarels 			uh->uh_lastmem = addr;
68029737Skarels #ifdef DW780
68114790Ssam 		/*
68214790Ssam 		 * On a 780, set up the map register disable
68314790Ssam 		 * field in the configuration register.  Beware
68417731Skarels 		 * of callers that request memory ``out of order''
68517731Skarels 		 * or in sections other than 8K multiples.
68617731Skarels 		 * Ubameminit handles such requests properly, however.
68714790Ssam 		 */
68829737Skarels 		if (uh->uh_type == DW780) {
68917731Skarels 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
69017731Skarels 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
69117731Skarels 			uh->uh_uba->uba_cr = i;
6927473Sfeldman 		}
6937473Sfeldman #endif
6946518Sfeldman 	}
69514790Ssam 	return (a);
6966518Sfeldman }
6977304Ssam 
6989875Ssam #include "ik.h"
69924501Sjg #include "vs.h"
70024501Sjg #if NIK > 0 || NVS > 0
7017304Ssam /*
7027304Ssam  * Map a virtual address into users address space. Actually all we
7037304Ssam  * do is turn on the user mode write protection bits for the particular
7047304Ssam  * page of memory involved.
7057304Ssam  */
7067304Ssam maptouser(vaddress)
7077304Ssam 	caddr_t vaddress;
7087304Ssam {
7097304Ssam 
71034284Skarels 	kvtopte(vaddress)->pg_prot = (PG_UW >> 27);
7117304Ssam }
7127304Ssam 
7137304Ssam unmaptouser(vaddress)
7147304Ssam 	caddr_t vaddress;
7157304Ssam {
7167304Ssam 
71734284Skarels 	kvtopte(vaddress)->pg_prot = (PG_KW >> 27);
7187304Ssam }
7199174Ssam #endif
720