xref: /csrg-svn/sys/vax/uba/uba.c (revision 6348)
1*6348Swnj /*	uba.c	4.41	82/03/29	*/
240Sbill 
340Sbill #include "../h/param.h"
42395Swnj #include "../h/systm.h"
52395Swnj #include "../h/cpu.h"
640Sbill #include "../h/map.h"
740Sbill #include "../h/pte.h"
82395Swnj #include "../h/buf.h"
92570Swnj #include "../h/vm.h"
102958Swnj #include "../h/ubareg.h"
112958Swnj #include "../h/ubavar.h"
1240Sbill #include "../h/dir.h"
1340Sbill #include "../h/user.h"
1440Sbill #include "../h/proc.h"
15284Sbill #include "../h/conf.h"
161901Swnj #include "../h/mtpr.h"
172395Swnj #include "../h/nexus.h"
182570Swnj #include "../h/dk.h"
1940Sbill 
202929Swnj #if VAX780
212929Swnj char	ubasr_bits[] = UBASR_BITS;
222929Swnj #endif
232929Swnj 
2440Sbill /*
252570Swnj  * Do transfer on device argument.  The controller
262570Swnj  * and uba involved are implied by the device.
272570Swnj  * We queue for resource wait in the uba code if necessary.
282570Swnj  * We return 1 if the transfer was started, 0 if it was not.
292570Swnj  * If you call this routine with the head of the queue for a
302570Swnj  * UBA, it will automatically remove the device from the UBA
312570Swnj  * queue before it returns.  If some other device is given
322570Swnj  * as argument, it will be added to the request queue if the
332570Swnj  * request cannot be started immediately.  This means that
342570Swnj  * passing a device which is on the queue but not at the head
352570Swnj  * of the request queue is likely to be a disaster.
362570Swnj  */
372570Swnj ubago(ui)
382958Swnj 	register struct uba_device *ui;
392570Swnj {
402958Swnj 	register struct uba_ctlr *um = ui->ui_mi;
412570Swnj 	register struct uba_hd *uh;
422570Swnj 	register int s, unit;
432570Swnj 
442570Swnj 	uh = &uba_hd[um->um_ubanum];
452570Swnj 	s = spl6();
462628Swnj 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
472616Swnj 		goto rwait;
482570Swnj 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
492570Swnj 	    UBA_NEEDBDP|UBA_CANTWAIT);
502616Swnj 	if (um->um_ubinfo == 0)
512616Swnj 		goto rwait;
522616Swnj 	uh->uh_users++;
532628Swnj 	if (um->um_driver->ud_xclu)
542616Swnj 		uh->uh_xclu = 1;
552570Swnj 	splx(s);
562570Swnj 	if (ui->ui_dk >= 0) {
572570Swnj 		unit = ui->ui_dk;
582570Swnj 		dk_busy |= 1<<unit;
59*6348Swnj 		dk_xfer[unit]++;
60*6348Swnj 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
612570Swnj 	}
622570Swnj 	if (uh->uh_actf == ui)
632570Swnj 		uh->uh_actf = ui->ui_forw;
642570Swnj 	(*um->um_driver->ud_dgo)(um);
652570Swnj 	return (1);
662616Swnj rwait:
672616Swnj 	if (uh->uh_actf != ui) {
682616Swnj 		ui->ui_forw = NULL;
692616Swnj 		if (uh->uh_actf == NULL)
702616Swnj 			uh->uh_actf = ui;
712616Swnj 		else
722616Swnj 			uh->uh_actl->ui_forw = ui;
732616Swnj 		uh->uh_actl = ui;
742616Swnj 	}
752616Swnj 	splx(s);
762616Swnj 	return (0);
772570Swnj }
782570Swnj 
792616Swnj ubadone(um)
802958Swnj 	register struct uba_ctlr *um;
812616Swnj {
822616Swnj 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
832616Swnj 
842628Swnj 	if (um->um_driver->ud_xclu)
852616Swnj 		uh->uh_xclu = 0;
862616Swnj 	uh->uh_users--;
872616Swnj 	ubarelse(um->um_ubanum, &um->um_ubinfo);
882616Swnj }
892616Swnj 
902570Swnj /*
912395Swnj  * Allocate and setup UBA map registers, and bdp's
922395Swnj  * Flags says whether bdp is needed, whether the caller can't
932395Swnj  * wait (e.g. if the caller is at interrupt level).
9440Sbill  *
952570Swnj  * Return value:
9640Sbill  *	Bits 0-8	Byte offset
9740Sbill  *	Bits 9-17	Start map reg. no.
9840Sbill  *	Bits 18-27	No. mapping reg's
9940Sbill  *	Bits 28-31	BDP no.
10040Sbill  */
1012395Swnj ubasetup(uban, bp, flags)
1022395Swnj 	struct buf *bp;
10340Sbill {
1042395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
10540Sbill 	register int temp, i;
10640Sbill 	int npf, reg, bdp;
10740Sbill 	unsigned v;
10840Sbill 	register struct pte *pte, *io;
10940Sbill 	struct proc *rp;
11040Sbill 	int a, o, ubinfo;
11140Sbill 
1123498Swnj #if VAX7ZZ
1133498Swnj 	if (cpu == VAX_7ZZ)
1143332Swnj 		flags &= ~UBA_NEEDBDP;
1153332Swnj #endif
11640Sbill 	v = btop(bp->b_un.b_addr);
11740Sbill 	o = (int)bp->b_un.b_addr & PGOFSET;
11840Sbill 	npf = btoc(bp->b_bcount + o) + 1;
11940Sbill 	a = spl6();
1202784Swnj 	while ((reg = rmalloc(uh->uh_map, npf)) == 0) {
1213913Swnj 		if (flags & UBA_CANTWAIT) {
1223913Swnj 			splx(a);
1232395Swnj 			return (0);
1243913Swnj 		}
1252395Swnj 		uh->uh_mrwant++;
1262395Swnj 		sleep((caddr_t)uh->uh_map, PSWP);
12740Sbill 	}
12840Sbill 	bdp = 0;
1292395Swnj 	if (flags & UBA_NEEDBDP) {
1302395Swnj 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
1312395Swnj 			if (flags & UBA_CANTWAIT) {
1322784Swnj 				rmfree(uh->uh_map, npf, reg);
1333913Swnj 				splx(a);
1342395Swnj 				return (0);
1352395Swnj 			}
1362395Swnj 			uh->uh_bdpwant++;
1372395Swnj 			sleep((caddr_t)uh->uh_map, PSWP);
13840Sbill 		}
1392463Swnj 		uh->uh_bdpfree &= ~(1 << (bdp-1));
1404758Swnj 	} else if (flags & UBA_HAVEBDP)
1414758Swnj 		bdp = (flags >> 28) & 0xf;
14240Sbill 	splx(a);
1432463Swnj 	reg--;
14440Sbill 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
1452395Swnj 	io = &uh->uh_uba->uba_map[reg];
1462958Swnj 	temp = (bdp << 21) | UBAMR_MRV;
14740Sbill 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
14840Sbill 	if (bdp && (o & 01))
1492958Swnj 		temp |= UBAMR_BO;
15040Sbill 	if (bp->b_flags & B_UAREA) {
15140Sbill 		for (i = UPAGES - bp->b_bcount / NBPG; i < UPAGES; i++) {
15240Sbill 			if (rp->p_addr[i].pg_pfnum == 0)
15340Sbill 				panic("uba: zero upage");
15440Sbill 			*(int *)io++ = rp->p_addr[i].pg_pfnum | temp;
15540Sbill 		}
15640Sbill 	} else if ((bp->b_flags & B_PHYS) == 0) {
157728Sbill 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
15840Sbill 		while (--npf != 0)
159728Sbill 			*(int *)io++ = pte++->pg_pfnum | temp;
16040Sbill 	} else {
16140Sbill 		if (bp->b_flags & B_PAGET)
16240Sbill 			pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
16340Sbill 		else
16440Sbill 			pte = vtopte(rp, v);
16540Sbill 		while (--npf != 0) {
16640Sbill 			if (pte->pg_pfnum == 0)
16740Sbill 				panic("uba zero uentry");
16840Sbill 			*(int *)io++ = pte++->pg_pfnum | temp;
16940Sbill 		}
17040Sbill 	}
17140Sbill 	*(int *)io++ = 0;
17240Sbill 	return (ubinfo);
17340Sbill }
17440Sbill 
17540Sbill /*
1762570Swnj  * Non buffer setup interface... set up a buffer and call ubasetup.
17740Sbill  */
1782395Swnj uballoc(uban, addr, bcnt, flags)
1793107Swnj 	int uban;
18040Sbill 	caddr_t addr;
1813107Swnj 	int bcnt, flags;
18240Sbill {
183883Sbill 	struct buf ubabuf;
18440Sbill 
18540Sbill 	ubabuf.b_un.b_addr = addr;
18640Sbill 	ubabuf.b_flags = B_BUSY;
18740Sbill 	ubabuf.b_bcount = bcnt;
188883Sbill 	/* that's all the fields ubasetup() needs */
1892395Swnj 	return (ubasetup(uban, &ubabuf, flags));
19040Sbill }
19140Sbill 
1922053Swnj /*
1932570Swnj  * Release resources on uba uban, and then unblock resource waiters.
1942570Swnj  * The map register parameter is by value since we need to block
1952570Swnj  * against uba resets on 11/780's.
1962053Swnj  */
1972395Swnj ubarelse(uban, amr)
1982053Swnj 	int *amr;
19940Sbill {
2002395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
2012570Swnj 	register int bdp, reg, npf, s;
2022053Swnj 	int mr;
20340Sbill 
2042570Swnj 	/*
2052570Swnj 	 * Carefully see if we should release the space, since
2062570Swnj 	 * it may be released asynchronously at uba reset time.
2072570Swnj 	 */
2082570Swnj 	s = spl6();
2092053Swnj 	mr = *amr;
2102053Swnj 	if (mr == 0) {
2112570Swnj 		/*
2122570Swnj 		 * A ubareset() occurred before we got around
2132570Swnj 		 * to releasing the space... no need to bother.
2142570Swnj 		 */
2152570Swnj 		splx(s);
2162053Swnj 		return;
2172053Swnj 	}
2182067Swnj 	*amr = 0;
2192570Swnj 	splx(s);		/* let interrupts in, we're safe for a while */
22040Sbill 	bdp = (mr >> 28) & 0x0f;
22140Sbill 	if (bdp) {
2222729Swnj 		switch (cpu) {
2232423Skre #if VAX780
2242423Skre 		case VAX_780:
2252958Swnj 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2262423Skre 			break;
2272423Skre #endif
2282423Skre #if VAX750
2292423Skre 		case VAX_750:
2302958Swnj 			uh->uh_uba->uba_dpr[bdp] |=
2312958Swnj 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2322423Skre 			break;
2332423Skre #endif
2342423Skre 		}
2352570Swnj 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
2362395Swnj 		if (uh->uh_bdpwant) {
2372395Swnj 			uh->uh_bdpwant = 0;
2382395Swnj 			wakeup((caddr_t)uh->uh_map);
23940Sbill 		}
24040Sbill 	}
2412570Swnj 	/*
2422570Swnj 	 * Put back the registers in the resource map.
2432570Swnj 	 * The map code must not be reentered, so we do this
2442570Swnj 	 * at high ipl.
2452570Swnj 	 */
24640Sbill 	npf = (mr >> 18) & 0x3ff;
24740Sbill 	reg = ((mr >> 9) & 0x1ff) + 1;
2482570Swnj 	s = spl6();
2492784Swnj 	rmfree(uh->uh_map, npf, reg);
2502570Swnj 	splx(s);
2512570Swnj 
2522570Swnj 	/*
2532570Swnj 	 * Wakeup sleepers for map registers,
2542570Swnj 	 * and also, if there are processes blocked in dgo(),
2552570Swnj 	 * give them a chance at the UNIBUS.
2562570Swnj 	 */
2572395Swnj 	if (uh->uh_mrwant) {
2582395Swnj 		uh->uh_mrwant = 0;
2592395Swnj 		wakeup((caddr_t)uh->uh_map);
26040Sbill 	}
2612570Swnj 	while (uh->uh_actf && ubago(uh->uh_actf))
2622570Swnj 		;
26340Sbill }
26440Sbill 
2652729Swnj ubapurge(um)
2662958Swnj 	register struct uba_ctlr *um;
2672729Swnj {
2682729Swnj 	register struct uba_hd *uh = um->um_hd;
2692729Swnj 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
2702729Swnj 
2712729Swnj 	switch (cpu) {
2722729Swnj #if VAX780
2732729Swnj 	case VAX_780:
2742958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2752729Swnj 		break;
2762729Swnj #endif
2772729Swnj #if VAX750
2782729Swnj 	case VAX_750:
2792958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2802729Swnj 		break;
2812729Swnj #endif
2822729Swnj 	}
2832729Swnj }
2842729Swnj 
2852570Swnj /*
2862570Swnj  * Generate a reset on uba number uban.  Then
2872570Swnj  * call each device in the character device table,
2882570Swnj  * giving it a chance to clean up so as to be able to continue.
2892570Swnj  */
2902395Swnj ubareset(uban)
2912570Swnj 	int uban;
292284Sbill {
293284Sbill 	register struct cdevsw *cdp;
2942646Swnj 	register struct uba_hd *uh = &uba_hd[uban];
2951781Sbill 	int s;
296284Sbill 
297302Sbill 	s = spl6();
2982646Swnj 	uh->uh_users = 0;
2992646Swnj 	uh->uh_zvcnt = 0;
3002646Swnj 	uh->uh_xclu = 0;
3012646Swnj 	uh->uh_hangcnt = 0;
3022646Swnj 	uh->uh_actf = uh->uh_actl = 0;
3032646Swnj 	uh->uh_bdpwant = 0;
3042646Swnj 	uh->uh_mrwant = 0;
3052646Swnj 	wakeup((caddr_t)&uh->uh_bdpwant);
3062646Swnj 	wakeup((caddr_t)&uh->uh_mrwant);
3072958Swnj 	printf("uba%d: reset", uban);
3082958Swnj 	ubainit(uh->uh_uba);
309284Sbill 	for (cdp = cdevsw; cdp->d_open; cdp++)
3102395Swnj 		(*cdp->d_reset)(uban);
3115221Swnj #ifdef INET
3125221Swnj 	ifubareset(uban);
3135221Swnj #endif
314284Sbill 	printf("\n");
315302Sbill 	splx(s);
316284Sbill }
3172395Swnj 
3182570Swnj /*
3192570Swnj  * Init a uba.  This is called with a pointer
3202570Swnj  * rather than a virtual address since it is called
3212570Swnj  * by code which runs with memory mapping disabled.
3222570Swnj  * In these cases we really don't need the interrupts
3232570Swnj  * enabled, but since we run with ipl high, we don't care
3242570Swnj  * if they are, they will never happen anyways.
3252570Swnj  */
3262423Skre ubainit(uba)
3272423Skre 	register struct uba_regs *uba;
3282395Swnj {
3292395Swnj 
3302958Swnj 	switch (cpu) {
3312958Swnj #if VAX780
3323248Swnj 	case VAX_780:
3332958Swnj 		uba->uba_cr = UBACR_ADINIT;
3342958Swnj 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
3352958Swnj 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
3362958Swnj 			;
3372958Swnj 		break;
3382958Swnj #endif
3392958Swnj #if VAX750
3403248Swnj 	case VAX_750:
3413352Swnj #endif
3423498Swnj #if VAX7ZZ
3433498Swnj 	case VAX_7ZZ:
3443352Swnj #endif
3453498Swnj #if defined(VAX750) || defined(VAX7ZZ)
3463352Swnj 		mtpr(IUR, 0);
3472958Swnj 		/* give devices time to recover from power fail */
3483332Swnj /* THIS IS PROBABLY UNNECESSARY */
3493352Swnj 		DELAY(500000);
3503332Swnj /* END PROBABLY UNNECESSARY */
3512958Swnj 		break;
3522958Swnj #endif
3532958Swnj 	}
3542395Swnj }
3552395Swnj 
3562958Swnj #if VAX780
3572570Swnj /*
3582570Swnj  * Check to make sure the UNIBUS adaptor is not hung,
3592570Swnj  * with an interrupt in the register to be presented,
3602570Swnj  * but not presenting it for an extended period (5 seconds).
3612570Swnj  */
3622395Swnj unhang()
3632395Swnj {
3642395Swnj 	register int uban;
3652395Swnj 
3662395Swnj 	for (uban = 0; uban < numuba; uban++) {
3672395Swnj 		register struct uba_hd *uh = &uba_hd[uban];
3682395Swnj 		register struct uba_regs *up = uh->uh_uba;
3692395Swnj 
3702395Swnj 		if (up->uba_sr == 0)
3712395Swnj 			return;
3723945Sroot 		up->uba_sr = UBASR_CRD|UBASR_LEB;
3732395Swnj 		uh->uh_hangcnt++;
3742759Swnj 		if (uh->uh_hangcnt > 5*hz) {
3752395Swnj 			uh->uh_hangcnt = 0;
3762929Swnj 			printf("uba%d: hung\n", uban);
3772395Swnj 			ubareset(uban);
3782395Swnj 		}
3792395Swnj 	}
3802395Swnj }
3812395Swnj 
3822570Swnj /*
3832570Swnj  * This is a timeout routine which decrements the ``i forgot to
3842570Swnj  * interrupt'' counts, on an 11/780.  This prevents slowly growing
3852570Swnj  * counts from causing a UBA reset since we are interested only
3862570Swnj  * in hang situations.
3872570Swnj  */
3882395Swnj ubawatch()
3892395Swnj {
3902395Swnj 	register struct uba_hd *uh;
3912395Swnj 	register int uban;
3922395Swnj 
3932784Swnj 	if (panicstr)
3942784Swnj 		return;
3952395Swnj 	for (uban = 0; uban < numuba; uban++) {
3962395Swnj 		uh = &uba_hd[uban];
3972395Swnj 		if (uh->uh_hangcnt)
3982395Swnj 			uh->uh_hangcnt--;
3992395Swnj 	}
4002395Swnj }
4012395Swnj 
4024024Swnj int	ubawedgecnt = 10;
4034024Swnj int	ubacrazy = 500;
4042570Swnj /*
4052570Swnj  * This routine is called by the locore code to
4062570Swnj  * process a UBA error on an 11/780.  The arguments are passed
4072570Swnj  * on the stack, and value-result (through some trickery).
4082570Swnj  * In particular, the uvec argument is used for further
4092570Swnj  * uba processing so the result aspect of it is very important.
4102570Swnj  * It must not be declared register.
4112570Swnj  */
4122423Skre /*ARGSUSED*/
4132395Swnj ubaerror(uban, uh, xx, uvec, uba)
4142395Swnj 	register int uban;
4152395Swnj 	register struct uba_hd *uh;
4162395Swnj 	int uvec;
4172395Swnj 	register struct uba_regs *uba;
4182395Swnj {
4192395Swnj 	register sr, s;
4202395Swnj 
4212395Swnj 	if (uvec == 0) {
4222395Swnj 		uh->uh_zvcnt++;
4232395Swnj 		if (uh->uh_zvcnt > 250000) {
4242929Swnj 			printf("uba%d: too many zero vectors\n");
4252395Swnj 			ubareset(uban);
4262395Swnj 		}
4272395Swnj 		uvec = 0;
4282395Swnj 		return;
4292395Swnj 	}
4302395Swnj 	if (uba->uba_cnfgr & NEX_CFGFLT) {
4312929Swnj 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
4322929Swnj 		    uban, uba->uba_sr, ubasr_bits,
4333248Swnj 		    uba->uba_cnfgr, NEXFLT_BITS);
4342395Swnj 		ubareset(uban);
4352395Swnj 		uvec = 0;
4362395Swnj 		return;
4372395Swnj 	}
4382395Swnj 	sr = uba->uba_sr;
4392395Swnj 	s = spl7();
4403473Swnj 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
4413473Swnj 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
4422395Swnj 	splx(s);
4432395Swnj 	uba->uba_sr = sr;
4442958Swnj 	uvec &= UBABRRVR_DIV;
4454024Swnj 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
4464024Swnj 		if (uh->uh_errcnt > ubacrazy)
4474024Swnj 			panic("uba crazy");
4484024Swnj 		printf("ERROR LIMIT ");
4494024Swnj 		ubareset(uban);
4504024Swnj 		uvec = 0;
4514024Swnj 		return;
4524024Swnj 	}
4532395Swnj 	return;
4542395Swnj }
4552395Swnj #endif
4563745Sroot 
457*6348Swnj #ifdef notdef
4583745Sroot /*
4593745Sroot  * This routine allows remapping of previously
4603745Sroot  * allocated UNIBUS bdp and map resources
4613745Sroot  * onto different memory addresses.
4623745Sroot  * It should only be used by routines which need
4633745Sroot  * small fixed length mappings for long periods of time
4643745Sroot  * (like the ARPANET ACC IMP interface).
4653745Sroot  * It only maps kernel addresses.
4663745Sroot  */
4673745Sroot ubaremap(uban, ubinfo, addr)
4683745Sroot 	int uban;
4693745Sroot 	register unsigned ubinfo;
4703745Sroot 	caddr_t addr;
4713745Sroot {
4723745Sroot 	register struct uba_hd *uh = &uba_hd[uban];
4733745Sroot 	register struct pte *pte, *io;
4743745Sroot 	register int temp, bdp;
4753745Sroot 	int npf, o;
4763745Sroot 
4773745Sroot 	o = (int)addr & PGOFSET;
4783745Sroot 	bdp = (ubinfo >> 28) & 0xf;
4793745Sroot 	npf = (ubinfo >> 18) & 0x3ff;
4803745Sroot 	io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff];
4813745Sroot 	temp = (bdp << 21) | UBAMR_MRV;
4823745Sroot 
4833745Sroot 	/*
4843745Sroot 	 * If using buffered data path initiate purge
4853745Sroot 	 * of old data and set byte offset bit if next
4863745Sroot 	 * transfer will be from odd address.
4873745Sroot 	 */
4883745Sroot 	if (bdp) {
4893745Sroot 		switch (cpu) {
4903745Sroot #if VAX780
4913745Sroot 		case VAX_780:
4923745Sroot 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
4933745Sroot 			break;
4943745Sroot #endif
4953745Sroot #if VAX750
4963745Sroot 		case VAX_750:
4973745Sroot 			uh->uh_uba->uba_dpr[bdp] |=
4983745Sroot 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
4993745Sroot 			break;
5003745Sroot #endif
5013745Sroot 		}
5023745Sroot 		if (o & 1)
5033745Sroot 			temp |= UBAMR_BO;
5043745Sroot 	}
5053745Sroot 
5063745Sroot 	/*
5073745Sroot 	 * Set up the map registers, leaving an invalid reg
5083745Sroot 	 * at the end to guard against wild unibus transfers.
5093745Sroot 	 */
5103745Sroot 	pte = &Sysmap[btop(((int)addr)&0x7fffffff)];
5113745Sroot 	while (--npf != 0)
5123745Sroot 		*(int *)io++ = pte++->pg_pfnum | temp;
5133745Sroot 	*(int *)io = 0;
5143745Sroot 
5153745Sroot 	/*
5163745Sroot 	 * Return effective UNIBUS address.
5173745Sroot 	 */
5183745Sroot 	return (ubinfo | o);
5193745Sroot }
5204966Swnj #endif
521