xref: /csrg-svn/sys/vax/uba/uba.c (revision 26215)
123352Smckusick /*
223352Smckusick  * Copyright (c) 1982 Regents of the University of California.
323352Smckusick  * All rights reserved.  The Berkeley software License Agreement
423352Smckusick  * specifies the terms and conditions for redistribution.
523352Smckusick  *
6*26215Skarels  *	@(#)uba.c	6.9 (Berkeley) 02/17/86
723352Smckusick  */
840Sbill 
99780Ssam #include "../machine/pte.h"
109780Ssam 
1117081Sbloom #include "param.h"
1217081Sbloom #include "systm.h"
1317081Sbloom #include "map.h"
1417081Sbloom #include "buf.h"
1517081Sbloom #include "vm.h"
1617081Sbloom #include "dir.h"
1717081Sbloom #include "user.h"
1817081Sbloom #include "proc.h"
1917081Sbloom #include "conf.h"
2017081Sbloom #include "dk.h"
2117081Sbloom #include "kernel.h"
2240Sbill 
238481Sroot #include "../vax/cpu.h"
248481Sroot #include "../vax/mtpr.h"
258481Sroot #include "../vax/nexus.h"
2617081Sbloom #include "ubareg.h"
2717081Sbloom #include "ubavar.h"
288481Sroot 
2924182Sbloom #if defined(VAX780) || defined(VAX8600)
302929Swnj char	ubasr_bits[] = UBASR_BITS;
312929Swnj #endif
322929Swnj 
3340Sbill /*
342570Swnj  * Do transfer on device argument.  The controller
352570Swnj  * and uba involved are implied by the device.
362570Swnj  * We queue for resource wait in the uba code if necessary.
372570Swnj  * We return 1 if the transfer was started, 0 if it was not.
382570Swnj  * If you call this routine with the head of the queue for a
392570Swnj  * UBA, it will automatically remove the device from the UBA
402570Swnj  * queue before it returns.  If some other device is given
412570Swnj  * as argument, it will be added to the request queue if the
422570Swnj  * request cannot be started immediately.  This means that
432570Swnj  * passing a device which is on the queue but not at the head
442570Swnj  * of the request queue is likely to be a disaster.
452570Swnj  */
462570Swnj ubago(ui)
472958Swnj 	register struct uba_device *ui;
482570Swnj {
492958Swnj 	register struct uba_ctlr *um = ui->ui_mi;
502570Swnj 	register struct uba_hd *uh;
512570Swnj 	register int s, unit;
522570Swnj 
532570Swnj 	uh = &uba_hd[um->um_ubanum];
542570Swnj 	s = spl6();
552628Swnj 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
562616Swnj 		goto rwait;
572570Swnj 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
582570Swnj 	    UBA_NEEDBDP|UBA_CANTWAIT);
592616Swnj 	if (um->um_ubinfo == 0)
602616Swnj 		goto rwait;
612616Swnj 	uh->uh_users++;
622628Swnj 	if (um->um_driver->ud_xclu)
632616Swnj 		uh->uh_xclu = 1;
642570Swnj 	splx(s);
652570Swnj 	if (ui->ui_dk >= 0) {
662570Swnj 		unit = ui->ui_dk;
672570Swnj 		dk_busy |= 1<<unit;
686348Swnj 		dk_xfer[unit]++;
696348Swnj 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
702570Swnj 	}
712570Swnj 	if (uh->uh_actf == ui)
722570Swnj 		uh->uh_actf = ui->ui_forw;
732570Swnj 	(*um->um_driver->ud_dgo)(um);
742570Swnj 	return (1);
752616Swnj rwait:
762616Swnj 	if (uh->uh_actf != ui) {
772616Swnj 		ui->ui_forw = NULL;
782616Swnj 		if (uh->uh_actf == NULL)
792616Swnj 			uh->uh_actf = ui;
802616Swnj 		else
812616Swnj 			uh->uh_actl->ui_forw = ui;
822616Swnj 		uh->uh_actl = ui;
832616Swnj 	}
842616Swnj 	splx(s);
852616Swnj 	return (0);
862570Swnj }
872570Swnj 
882616Swnj ubadone(um)
892958Swnj 	register struct uba_ctlr *um;
902616Swnj {
912616Swnj 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
922616Swnj 
932628Swnj 	if (um->um_driver->ud_xclu)
942616Swnj 		uh->uh_xclu = 0;
952616Swnj 	uh->uh_users--;
962616Swnj 	ubarelse(um->um_ubanum, &um->um_ubinfo);
972616Swnj }
982616Swnj 
992570Swnj /*
1002395Swnj  * Allocate and setup UBA map registers, and bdp's
1012395Swnj  * Flags says whether bdp is needed, whether the caller can't
1022395Swnj  * wait (e.g. if the caller is at interrupt level).
10340Sbill  *
1042570Swnj  * Return value:
10540Sbill  *	Bits 0-8	Byte offset
10640Sbill  *	Bits 9-17	Start map reg. no.
10740Sbill  *	Bits 18-27	No. mapping reg's
10840Sbill  *	Bits 28-31	BDP no.
10940Sbill  */
1102395Swnj ubasetup(uban, bp, flags)
1112395Swnj 	struct buf *bp;
11240Sbill {
1132395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
11418417Smckusick 	int pfnum, temp;
11540Sbill 	int npf, reg, bdp;
11640Sbill 	unsigned v;
11740Sbill 	register struct pte *pte, *io;
11840Sbill 	struct proc *rp;
11940Sbill 	int a, o, ubinfo;
12040Sbill 
1216948Ssam #if VAX730
1226948Ssam 	if (cpu == VAX_730)
1233332Swnj 		flags &= ~UBA_NEEDBDP;
1243332Swnj #endif
12540Sbill 	v = btop(bp->b_un.b_addr);
12640Sbill 	o = (int)bp->b_un.b_addr & PGOFSET;
12740Sbill 	npf = btoc(bp->b_bcount + o) + 1;
12840Sbill 	a = spl6();
1298811Sroot 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
1303913Swnj 		if (flags & UBA_CANTWAIT) {
1313913Swnj 			splx(a);
1322395Swnj 			return (0);
1333913Swnj 		}
1342395Swnj 		uh->uh_mrwant++;
1359353Ssam 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
13640Sbill 	}
13717731Skarels 	if ((flags & UBA_NEED16) && reg + npf > 128) {
13817731Skarels 		/*
13917731Skarels 		 * Could hang around and try again (if we can ever succeed).
14017731Skarels 		 * Won't help any current device...
14117731Skarels 		 */
14217731Skarels 		rmfree(uh->uh_map, (long)npf, (long)reg);
14317731Skarels 		splx(a);
14417731Skarels 		return (0);
14517731Skarels 	}
14640Sbill 	bdp = 0;
1472395Swnj 	if (flags & UBA_NEEDBDP) {
1482395Swnj 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
1492395Swnj 			if (flags & UBA_CANTWAIT) {
1508811Sroot 				rmfree(uh->uh_map, (long)npf, (long)reg);
1513913Swnj 				splx(a);
1522395Swnj 				return (0);
1532395Swnj 			}
1542395Swnj 			uh->uh_bdpwant++;
1559353Ssam 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
15640Sbill 		}
1572463Swnj 		uh->uh_bdpfree &= ~(1 << (bdp-1));
1584758Swnj 	} else if (flags & UBA_HAVEBDP)
1594758Swnj 		bdp = (flags >> 28) & 0xf;
16040Sbill 	splx(a);
1612463Swnj 	reg--;
16240Sbill 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
1632958Swnj 	temp = (bdp << 21) | UBAMR_MRV;
16440Sbill 	if (bdp && (o & 01))
1652958Swnj 		temp |= UBAMR_BO;
1666382Swnj 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
1676382Swnj 	if ((bp->b_flags & B_PHYS) == 0)
168728Sbill 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
1696382Swnj 	else if (bp->b_flags & B_UAREA)
1706382Swnj 		pte = &rp->p_addr[v];
1716382Swnj 	else if (bp->b_flags & B_PAGET)
1726382Swnj 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
1736382Swnj 	else
1746382Swnj 		pte = vtopte(rp, v);
1756382Swnj 	io = &uh->uh_uba->uba_map[reg];
1766382Swnj 	while (--npf != 0) {
17718417Smckusick 		pfnum = pte->pg_pfnum;
17818417Smckusick 		if (pfnum == 0)
1796382Swnj 			panic("uba zero uentry");
18018417Smckusick 		pte++;
18118417Smckusick 		*(int *)io++ = pfnum | temp;
18240Sbill 	}
18340Sbill 	*(int *)io++ = 0;
18440Sbill 	return (ubinfo);
18540Sbill }
18640Sbill 
18740Sbill /*
1882570Swnj  * Non buffer setup interface... set up a buffer and call ubasetup.
18940Sbill  */
1902395Swnj uballoc(uban, addr, bcnt, flags)
1913107Swnj 	int uban;
19240Sbill 	caddr_t addr;
1933107Swnj 	int bcnt, flags;
19440Sbill {
195883Sbill 	struct buf ubabuf;
19640Sbill 
19740Sbill 	ubabuf.b_un.b_addr = addr;
19840Sbill 	ubabuf.b_flags = B_BUSY;
19940Sbill 	ubabuf.b_bcount = bcnt;
200883Sbill 	/* that's all the fields ubasetup() needs */
2012395Swnj 	return (ubasetup(uban, &ubabuf, flags));
20240Sbill }
20340Sbill 
2042053Swnj /*
2052570Swnj  * Release resources on uba uban, and then unblock resource waiters.
2062570Swnj  * The map register parameter is by value since we need to block
2072570Swnj  * against uba resets on 11/780's.
2082053Swnj  */
2092395Swnj ubarelse(uban, amr)
2102053Swnj 	int *amr;
21140Sbill {
2122395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
2132570Swnj 	register int bdp, reg, npf, s;
2142053Swnj 	int mr;
21540Sbill 
2162570Swnj 	/*
2172570Swnj 	 * Carefully see if we should release the space, since
2182570Swnj 	 * it may be released asynchronously at uba reset time.
2192570Swnj 	 */
2202570Swnj 	s = spl6();
2212053Swnj 	mr = *amr;
2222053Swnj 	if (mr == 0) {
2232570Swnj 		/*
2242570Swnj 		 * A ubareset() occurred before we got around
2252570Swnj 		 * to releasing the space... no need to bother.
2262570Swnj 		 */
2272570Swnj 		splx(s);
2282053Swnj 		return;
2292053Swnj 	}
2302067Swnj 	*amr = 0;
23140Sbill 	bdp = (mr >> 28) & 0x0f;
23240Sbill 	if (bdp) {
2332729Swnj 		switch (cpu) {
23424182Sbloom #if defined(VAX780) || defined(VAX8600)
23524182Sbloom 		case VAX_8600:
2362423Skre 		case VAX_780:
2372958Swnj 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2382423Skre 			break;
2392423Skre #endif
2402423Skre #if VAX750
2412423Skre 		case VAX_750:
2422958Swnj 			uh->uh_uba->uba_dpr[bdp] |=
2432958Swnj 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2442423Skre 			break;
2452423Skre #endif
2462423Skre 		}
2472570Swnj 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
2482395Swnj 		if (uh->uh_bdpwant) {
2492395Swnj 			uh->uh_bdpwant = 0;
2509353Ssam 			wakeup((caddr_t)&uh->uh_bdpwant);
25140Sbill 		}
25240Sbill 	}
2532570Swnj 	/*
2542570Swnj 	 * Put back the registers in the resource map.
25517731Skarels 	 * The map code must not be reentered,
25617731Skarels 	 * nor can the registers be freed twice.
25717731Skarels 	 * Unblock interrupts once this is done.
2582570Swnj 	 */
25940Sbill 	npf = (mr >> 18) & 0x3ff;
26040Sbill 	reg = ((mr >> 9) & 0x1ff) + 1;
2618811Sroot 	rmfree(uh->uh_map, (long)npf, (long)reg);
2622570Swnj 	splx(s);
2632570Swnj 
2642570Swnj 	/*
2652570Swnj 	 * Wakeup sleepers for map registers,
2662570Swnj 	 * and also, if there are processes blocked in dgo(),
2672570Swnj 	 * give them a chance at the UNIBUS.
2682570Swnj 	 */
2692395Swnj 	if (uh->uh_mrwant) {
2702395Swnj 		uh->uh_mrwant = 0;
2719353Ssam 		wakeup((caddr_t)&uh->uh_mrwant);
27240Sbill 	}
2732570Swnj 	while (uh->uh_actf && ubago(uh->uh_actf))
2742570Swnj 		;
27540Sbill }
27640Sbill 
2772729Swnj ubapurge(um)
2782958Swnj 	register struct uba_ctlr *um;
2792729Swnj {
2802729Swnj 	register struct uba_hd *uh = um->um_hd;
2812729Swnj 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
2822729Swnj 
2832729Swnj 	switch (cpu) {
28424182Sbloom #if defined(VAX780) || defined(VAX8600)
28524182Sbloom 	case VAX_8600:
2862729Swnj 	case VAX_780:
2872958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2882729Swnj 		break;
2892729Swnj #endif
2902729Swnj #if VAX750
2912729Swnj 	case VAX_750:
2922958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2932729Swnj 		break;
2942729Swnj #endif
2952729Swnj 	}
2962729Swnj }
2972729Swnj 
2986863Swnj ubainitmaps(uhp)
2996863Swnj 	register struct uba_hd *uhp;
3006863Swnj {
3016863Swnj 
3028811Sroot 	rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ);
3036863Swnj 	switch (cpu) {
30424182Sbloom #if defined(VAX780) || defined(VAX8600)
30524182Sbloom 	case VAX_8600:
3066863Swnj 	case VAX_780:
3076863Swnj 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
3086863Swnj 		break;
3096863Swnj #endif
3106863Swnj #if VAX750
3116863Swnj 	case VAX_750:
3126863Swnj 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
3136863Swnj 		break;
3146863Swnj #endif
3156948Ssam #if VAX730
3166948Ssam 	case VAX_730:
3176863Swnj 		break;
3186863Swnj #endif
3196863Swnj 	}
3206863Swnj }
3216863Swnj 
3222570Swnj /*
3232570Swnj  * Generate a reset on uba number uban.  Then
3242570Swnj  * call each device in the character device table,
3252570Swnj  * giving it a chance to clean up so as to be able to continue.
3262570Swnj  */
3272395Swnj ubareset(uban)
3282570Swnj 	int uban;
329284Sbill {
330284Sbill 	register struct cdevsw *cdp;
3312646Swnj 	register struct uba_hd *uh = &uba_hd[uban];
3321781Sbill 	int s;
333284Sbill 
334302Sbill 	s = spl6();
3352646Swnj 	uh->uh_users = 0;
3362646Swnj 	uh->uh_zvcnt = 0;
3372646Swnj 	uh->uh_xclu = 0;
3382646Swnj 	uh->uh_actf = uh->uh_actl = 0;
3392646Swnj 	uh->uh_bdpwant = 0;
3402646Swnj 	uh->uh_mrwant = 0;
3416863Swnj 	ubainitmaps(uh);
3422646Swnj 	wakeup((caddr_t)&uh->uh_bdpwant);
3432646Swnj 	wakeup((caddr_t)&uh->uh_mrwant);
3442958Swnj 	printf("uba%d: reset", uban);
3452958Swnj 	ubainit(uh->uh_uba);
34617731Skarels 	ubameminit(uban);
34711722Ssam 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
3482395Swnj 		(*cdp->d_reset)(uban);
3495221Swnj 	ifubareset(uban);
350284Sbill 	printf("\n");
351302Sbill 	splx(s);
352284Sbill }
3532395Swnj 
3542570Swnj /*
3552570Swnj  * Init a uba.  This is called with a pointer
3562570Swnj  * rather than a virtual address since it is called
3572570Swnj  * by code which runs with memory mapping disabled.
3582570Swnj  * In these cases we really don't need the interrupts
3592570Swnj  * enabled, but since we run with ipl high, we don't care
3602570Swnj  * if they are, they will never happen anyways.
3612570Swnj  */
3622423Skre ubainit(uba)
3632423Skre 	register struct uba_regs *uba;
3642395Swnj {
3652395Swnj 
3662958Swnj 	switch (cpu) {
36724182Sbloom #if defined(VAX780) || defined(VAX8600)
36824182Sbloom 	case VAX_8600:
3693248Swnj 	case VAX_780:
3702958Swnj 		uba->uba_cr = UBACR_ADINIT;
3712958Swnj 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
3722958Swnj 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
3732958Swnj 			;
3742958Swnj 		break;
3752958Swnj #endif
3762958Swnj #if VAX750
3773248Swnj 	case VAX_750:
3783352Swnj #endif
3796948Ssam #if VAX730
3806948Ssam 	case VAX_730:
3813352Swnj #endif
3826948Ssam #if defined(VAX750) || defined(VAX730)
3833352Swnj 		mtpr(IUR, 0);
3842958Swnj 		/* give devices time to recover from power fail */
3853332Swnj /* THIS IS PROBABLY UNNECESSARY */
3863352Swnj 		DELAY(500000);
3873332Swnj /* END PROBABLY UNNECESSARY */
3882958Swnj 		break;
3892958Swnj #endif
3902958Swnj 	}
3912395Swnj }
3922395Swnj 
39324500Sbloom #if defined(VAX780) || defined(VAX8600)
3944024Swnj int	ubawedgecnt = 10;
3954024Swnj int	ubacrazy = 500;
39617731Skarels int	zvcnt_max = 5000;	/* in 8 sec */
3972570Swnj /*
39824500Sbloom  * This routine is called by the locore code to process a UBA
39924500Sbloom  * error on an 11/780 or 8600.  The arguments are passed
4002570Swnj  * on the stack, and value-result (through some trickery).
4012570Swnj  * In particular, the uvec argument is used for further
4022570Swnj  * uba processing so the result aspect of it is very important.
4032570Swnj  * It must not be declared register.
4042570Swnj  */
4052423Skre /*ARGSUSED*/
40617731Skarels ubaerror(uban, uh, ipl, uvec, uba)
4072395Swnj 	register int uban;
4082395Swnj 	register struct uba_hd *uh;
40917731Skarels 	int ipl, uvec;
4102395Swnj 	register struct uba_regs *uba;
4112395Swnj {
4122395Swnj 	register sr, s;
4132395Swnj 
4142395Swnj 	if (uvec == 0) {
415*26215Skarels 		/*
416*26215Skarels 		 * Declare dt as unsigned so that negative values
417*26215Skarels 		 * are handled as >8 below, in case time was set back.
418*26215Skarels 		 */
419*26215Skarels 		u_long	dt = time.tv_sec - uh->uh_zvtime;
420*26215Skarels 
421*26215Skarels 		uh->uh_zvtotal++;
42217731Skarels 		if (dt > 8) {
423*26215Skarels 			uh->uh_zvtime = time.tv_sec;
42417731Skarels 			uh->uh_zvcnt = 0;
42517731Skarels 		}
42617731Skarels 		if (++uh->uh_zvcnt > zvcnt_max) {
42717731Skarels 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
42817731Skarels 				uban, uh->uh_zvcnt, dt + 1);
42917731Skarels 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
43017731Skarels 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
43117731Skarels 				uba->uba_cnfgr&0xff);
43217731Skarels 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
43317731Skarels 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
43417731Skarels 				(uba->uba_dcr&0x8000000)?"":"NOT ");
4352395Swnj 			ubareset(uban);
4362395Swnj 		}
4372395Swnj 		return;
4382395Swnj 	}
4392395Swnj 	if (uba->uba_cnfgr & NEX_CFGFLT) {
4402929Swnj 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
4412929Swnj 		    uban, uba->uba_sr, ubasr_bits,
4423248Swnj 		    uba->uba_cnfgr, NEXFLT_BITS);
4432395Swnj 		ubareset(uban);
4442395Swnj 		uvec = 0;
4452395Swnj 		return;
4462395Swnj 	}
4472395Swnj 	sr = uba->uba_sr;
4482395Swnj 	s = spl7();
4493473Swnj 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
4503473Swnj 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
4512395Swnj 	splx(s);
4522395Swnj 	uba->uba_sr = sr;
4532958Swnj 	uvec &= UBABRRVR_DIV;
4544024Swnj 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
4554024Swnj 		if (uh->uh_errcnt > ubacrazy)
4564024Swnj 			panic("uba crazy");
4574024Swnj 		printf("ERROR LIMIT ");
4584024Swnj 		ubareset(uban);
4594024Swnj 		uvec = 0;
4604024Swnj 		return;
4614024Swnj 	}
4622395Swnj 	return;
4632395Swnj }
4642395Swnj #endif
4653745Sroot 
4663745Sroot /*
46717731Skarels  * Look for devices with unibus memory, allow them to configure, then disable
46817731Skarels  * map registers as necessary.  Called during autoconfiguration and ubareset.
46917731Skarels  * The device ubamem routine returns 0 on success, 1 on success if it is fully
47017731Skarels  * configured (has no csr or interrupt, so doesn't need to be probed),
47117731Skarels  * and -1 on failure.
47217731Skarels  */
47317731Skarels ubameminit(uban)
47417731Skarels {
47517731Skarels 	register struct uba_device *ui;
47617731Skarels 	register struct uba_hd *uh = &uba_hd[uban];
47717731Skarels 	caddr_t umembase = umem[uban] + 0x3e000, addr;
47817731Skarels #define	ubaoff(off)	((int)(off) & 0x1fff)
47917731Skarels 
48017731Skarels 	uh->uh_lastmem = 0;
48117731Skarels 	for (ui = ubdinit; ui->ui_driver; ui++) {
48217731Skarels 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
48317731Skarels 			continue;
48417731Skarels 		if (ui->ui_driver->ud_ubamem) {
48517731Skarels 			/*
48617731Skarels 			 * During autoconfiguration, need to fudge ui_addr.
48717731Skarels 			 */
48817731Skarels 			addr = ui->ui_addr;
48917731Skarels 			ui->ui_addr = umembase + ubaoff(addr);
49017731Skarels 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
49117731Skarels 			case 1:
49217731Skarels 				ui->ui_alive = 1;
49317731Skarels 				/* FALLTHROUGH */
49417731Skarels 			case 0:
49517731Skarels 				ui->ui_ubanum = uban;
49617731Skarels 				break;
49717731Skarels 			}
49817731Skarels 			ui->ui_addr = addr;
49917731Skarels 		}
50017731Skarels 	}
50124182Sbloom #if defined(VAX780) || defined(VAX8600)
50217731Skarels 	/*
50317731Skarels 	 * On a 780, throw away any map registers disabled by rounding
50417731Skarels 	 * the map disable in the configuration register
50517731Skarels 	 * up to the next 8K boundary, or below the last unibus memory.
50617731Skarels 	 */
50724182Sbloom 	if ((cpu == VAX_780) || (cpu == VAX_8600)) {
50817731Skarels 		register i;
50917731Skarels 
51017731Skarels 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
51117731Skarels 		while (i)
51217731Skarels 			(void) rmget(uh->uh_map, 1, i--);
51317731Skarels 	}
51417731Skarels #endif
51517731Skarels }
51617731Skarels 
51717731Skarels /*
51814790Ssam  * Allocate UNIBUS memory.  Allocates and initializes
51914790Ssam  * sufficient mapping registers for access.  On a 780,
52014790Ssam  * the configuration register is setup to disable UBA
52114790Ssam  * response on DMA transfers to addresses controlled
52214790Ssam  * by the disabled mapping registers.
52317731Skarels  * On a 780, should only be called from ubameminit, or in ascending order
52417731Skarels  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
52517731Skarels  * the last unibus memory would free unusable map registers.
52617731Skarels  * Doalloc is 1 to allocate, 0 to deallocate.
5276518Sfeldman  */
52814790Ssam ubamem(uban, addr, npg, doalloc)
52914790Ssam 	int uban, addr, npg, doalloc;
5306518Sfeldman {
5316518Sfeldman 	register struct uba_hd *uh = &uba_hd[uban];
53214790Ssam 	register int a;
53317731Skarels 	int s;
5346518Sfeldman 
53517731Skarels 	a = (addr >> 9) + 1;
53617731Skarels 	s = spl6();
53717731Skarels 	if (doalloc)
53817731Skarels 		a = rmget(uh->uh_map, npg, a);
53917731Skarels 	else
54017731Skarels 		rmfree(uh->uh_map, (long)npg, (long)a);
54117731Skarels 	splx(s);
5426518Sfeldman 	if (a) {
54314790Ssam 		register int i, *m;
54414790Ssam 
54514790Ssam 		m = (int *)&uh->uh_uba->uba_map[a - 1];
54614790Ssam 		for (i = 0; i < npg; i++)
5476518Sfeldman 			*m++ = 0;	/* All off, especially 'valid' */
54817731Skarels 		i = addr + npg * 512;
54917731Skarels 		if (doalloc && i > uh->uh_lastmem)
55017731Skarels 			uh->uh_lastmem = i;
55117731Skarels 		else if (doalloc == 0 && i == uh->uh_lastmem)
55217731Skarels 			uh->uh_lastmem = addr;
55324182Sbloom #if defined(VAX780) || defined(VAX8600)
55414790Ssam 		/*
55514790Ssam 		 * On a 780, set up the map register disable
55614790Ssam 		 * field in the configuration register.  Beware
55717731Skarels 		 * of callers that request memory ``out of order''
55817731Skarels 		 * or in sections other than 8K multiples.
55917731Skarels 		 * Ubameminit handles such requests properly, however.
56014790Ssam 		 */
56124182Sbloom 		if ((cpu == VAX_780) || (cpu == VAX_8600)) {
56217731Skarels 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
56317731Skarels 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
56417731Skarels 			uh->uh_uba->uba_cr = i;
5657473Sfeldman 		}
5667473Sfeldman #endif
5676518Sfeldman 	}
56814790Ssam 	return (a);
5696518Sfeldman }
5707304Ssam 
5719875Ssam #include "ik.h"
57224501Sjg #include "vs.h"
57324501Sjg #if NIK > 0 || NVS > 0
5747304Ssam /*
5757304Ssam  * Map a virtual address into users address space. Actually all we
5767304Ssam  * do is turn on the user mode write protection bits for the particular
5777304Ssam  * page of memory involved.
5787304Ssam  */
5797304Ssam maptouser(vaddress)
5807304Ssam 	caddr_t vaddress;
5817304Ssam {
5827304Ssam 
5837304Ssam 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
5847304Ssam }
5857304Ssam 
5867304Ssam unmaptouser(vaddress)
5877304Ssam 	caddr_t vaddress;
5887304Ssam {
5897304Ssam 
5907304Ssam 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
5917304Ssam }
5929174Ssam #endif
593