xref: /csrg-svn/sys/vax/uba/uba.c (revision 11722)
1*11722Ssam /*	uba.c	4.61	83/03/25	*/
240Sbill 
39780Ssam #include "../machine/pte.h"
49780Ssam 
540Sbill #include "../h/param.h"
62395Swnj #include "../h/systm.h"
740Sbill #include "../h/map.h"
82395Swnj #include "../h/buf.h"
92570Swnj #include "../h/vm.h"
1040Sbill #include "../h/dir.h"
1140Sbill #include "../h/user.h"
1240Sbill #include "../h/proc.h"
13284Sbill #include "../h/conf.h"
142570Swnj #include "../h/dk.h"
158811Sroot #include "../h/kernel.h"
1640Sbill 
178481Sroot #include "../vax/cpu.h"
188481Sroot #include "../vax/mtpr.h"
198481Sroot #include "../vax/nexus.h"
208481Sroot #include "../vaxuba/ubareg.h"
218481Sroot #include "../vaxuba/ubavar.h"
228481Sroot 
232929Swnj #if VAX780
242929Swnj char	ubasr_bits[] = UBASR_BITS;
252929Swnj #endif
262929Swnj 
2740Sbill /*
282570Swnj  * Do transfer on device argument.  The controller
292570Swnj  * and uba involved are implied by the device.
302570Swnj  * We queue for resource wait in the uba code if necessary.
312570Swnj  * We return 1 if the transfer was started, 0 if it was not.
322570Swnj  * If you call this routine with the head of the queue for a
332570Swnj  * UBA, it will automatically remove the device from the UBA
342570Swnj  * queue before it returns.  If some other device is given
352570Swnj  * as argument, it will be added to the request queue if the
362570Swnj  * request cannot be started immediately.  This means that
372570Swnj  * passing a device which is on the queue but not at the head
382570Swnj  * of the request queue is likely to be a disaster.
392570Swnj  */
402570Swnj ubago(ui)
412958Swnj 	register struct uba_device *ui;
422570Swnj {
432958Swnj 	register struct uba_ctlr *um = ui->ui_mi;
442570Swnj 	register struct uba_hd *uh;
452570Swnj 	register int s, unit;
462570Swnj 
472570Swnj 	uh = &uba_hd[um->um_ubanum];
482570Swnj 	s = spl6();
492628Swnj 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
502616Swnj 		goto rwait;
512570Swnj 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
522570Swnj 	    UBA_NEEDBDP|UBA_CANTWAIT);
532616Swnj 	if (um->um_ubinfo == 0)
542616Swnj 		goto rwait;
552616Swnj 	uh->uh_users++;
562628Swnj 	if (um->um_driver->ud_xclu)
572616Swnj 		uh->uh_xclu = 1;
582570Swnj 	splx(s);
592570Swnj 	if (ui->ui_dk >= 0) {
602570Swnj 		unit = ui->ui_dk;
612570Swnj 		dk_busy |= 1<<unit;
626348Swnj 		dk_xfer[unit]++;
636348Swnj 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
642570Swnj 	}
652570Swnj 	if (uh->uh_actf == ui)
662570Swnj 		uh->uh_actf = ui->ui_forw;
672570Swnj 	(*um->um_driver->ud_dgo)(um);
682570Swnj 	return (1);
692616Swnj rwait:
702616Swnj 	if (uh->uh_actf != ui) {
712616Swnj 		ui->ui_forw = NULL;
722616Swnj 		if (uh->uh_actf == NULL)
732616Swnj 			uh->uh_actf = ui;
742616Swnj 		else
752616Swnj 			uh->uh_actl->ui_forw = ui;
762616Swnj 		uh->uh_actl = ui;
772616Swnj 	}
782616Swnj 	splx(s);
792616Swnj 	return (0);
802570Swnj }
812570Swnj 
822616Swnj ubadone(um)
832958Swnj 	register struct uba_ctlr *um;
842616Swnj {
852616Swnj 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
862616Swnj 
872628Swnj 	if (um->um_driver->ud_xclu)
882616Swnj 		uh->uh_xclu = 0;
892616Swnj 	uh->uh_users--;
902616Swnj 	ubarelse(um->um_ubanum, &um->um_ubinfo);
912616Swnj }
922616Swnj 
932570Swnj /*
942395Swnj  * Allocate and setup UBA map registers, and bdp's
952395Swnj  * Flags says whether bdp is needed, whether the caller can't
962395Swnj  * wait (e.g. if the caller is at interrupt level).
9740Sbill  *
982570Swnj  * Return value:
9940Sbill  *	Bits 0-8	Byte offset
10040Sbill  *	Bits 9-17	Start map reg. no.
10140Sbill  *	Bits 18-27	No. mapping reg's
10240Sbill  *	Bits 28-31	BDP no.
10340Sbill  */
1042395Swnj ubasetup(uban, bp, flags)
1052395Swnj 	struct buf *bp;
10640Sbill {
1072395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
1088612Sroot 	register int temp;
10940Sbill 	int npf, reg, bdp;
11040Sbill 	unsigned v;
11140Sbill 	register struct pte *pte, *io;
11240Sbill 	struct proc *rp;
11340Sbill 	int a, o, ubinfo;
11440Sbill 
1156948Ssam #if VAX730
1166948Ssam 	if (cpu == VAX_730)
1173332Swnj 		flags &= ~UBA_NEEDBDP;
1183332Swnj #endif
11940Sbill 	v = btop(bp->b_un.b_addr);
12040Sbill 	o = (int)bp->b_un.b_addr & PGOFSET;
12140Sbill 	npf = btoc(bp->b_bcount + o) + 1;
12240Sbill 	a = spl6();
1238811Sroot 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
1243913Swnj 		if (flags & UBA_CANTWAIT) {
1253913Swnj 			splx(a);
1262395Swnj 			return (0);
1273913Swnj 		}
1282395Swnj 		uh->uh_mrwant++;
1299353Ssam 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
13040Sbill 	}
13140Sbill 	bdp = 0;
1322395Swnj 	if (flags & UBA_NEEDBDP) {
1332395Swnj 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
1342395Swnj 			if (flags & UBA_CANTWAIT) {
1358811Sroot 				rmfree(uh->uh_map, (long)npf, (long)reg);
1363913Swnj 				splx(a);
1372395Swnj 				return (0);
1382395Swnj 			}
1392395Swnj 			uh->uh_bdpwant++;
1409353Ssam 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
14140Sbill 		}
1422463Swnj 		uh->uh_bdpfree &= ~(1 << (bdp-1));
1434758Swnj 	} else if (flags & UBA_HAVEBDP)
1444758Swnj 		bdp = (flags >> 28) & 0xf;
14540Sbill 	splx(a);
1462463Swnj 	reg--;
14740Sbill 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
1482958Swnj 	temp = (bdp << 21) | UBAMR_MRV;
14940Sbill 	if (bdp && (o & 01))
1502958Swnj 		temp |= UBAMR_BO;
1516382Swnj 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
1526382Swnj 	if ((bp->b_flags & B_PHYS) == 0)
153728Sbill 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
1546382Swnj 	else if (bp->b_flags & B_UAREA)
1556382Swnj 		pte = &rp->p_addr[v];
1566382Swnj 	else if (bp->b_flags & B_PAGET)
1576382Swnj 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
1586382Swnj 	else
1596382Swnj 		pte = vtopte(rp, v);
1606382Swnj 	io = &uh->uh_uba->uba_map[reg];
1616382Swnj 	while (--npf != 0) {
1626382Swnj 		if (pte->pg_pfnum == 0)
1636382Swnj 			panic("uba zero uentry");
1646382Swnj 		*(int *)io++ = pte++->pg_pfnum | temp;
16540Sbill 	}
16640Sbill 	*(int *)io++ = 0;
16740Sbill 	return (ubinfo);
16840Sbill }
16940Sbill 
17040Sbill /*
1712570Swnj  * Non buffer setup interface... set up a buffer and call ubasetup.
17240Sbill  */
1732395Swnj uballoc(uban, addr, bcnt, flags)
1743107Swnj 	int uban;
17540Sbill 	caddr_t addr;
1763107Swnj 	int bcnt, flags;
17740Sbill {
178883Sbill 	struct buf ubabuf;
17940Sbill 
18040Sbill 	ubabuf.b_un.b_addr = addr;
18140Sbill 	ubabuf.b_flags = B_BUSY;
18240Sbill 	ubabuf.b_bcount = bcnt;
183883Sbill 	/* that's all the fields ubasetup() needs */
1842395Swnj 	return (ubasetup(uban, &ubabuf, flags));
18540Sbill }
18640Sbill 
1872053Swnj /*
1882570Swnj  * Release resources on uba uban, and then unblock resource waiters.
1892570Swnj  * The map register parameter is by value since we need to block
1902570Swnj  * against uba resets on 11/780's.
1912053Swnj  */
1922395Swnj ubarelse(uban, amr)
1932053Swnj 	int *amr;
19440Sbill {
1952395Swnj 	register struct uba_hd *uh = &uba_hd[uban];
1962570Swnj 	register int bdp, reg, npf, s;
1972053Swnj 	int mr;
19840Sbill 
1992570Swnj 	/*
2002570Swnj 	 * Carefully see if we should release the space, since
2012570Swnj 	 * it may be released asynchronously at uba reset time.
2022570Swnj 	 */
2032570Swnj 	s = spl6();
2042053Swnj 	mr = *amr;
2052053Swnj 	if (mr == 0) {
2062570Swnj 		/*
2072570Swnj 		 * A ubareset() occurred before we got around
2082570Swnj 		 * to releasing the space... no need to bother.
2092570Swnj 		 */
2102570Swnj 		splx(s);
2112053Swnj 		return;
2122053Swnj 	}
2132067Swnj 	*amr = 0;
2142570Swnj 	splx(s);		/* let interrupts in, we're safe for a while */
21540Sbill 	bdp = (mr >> 28) & 0x0f;
21640Sbill 	if (bdp) {
2172729Swnj 		switch (cpu) {
2182423Skre #if VAX780
2192423Skre 		case VAX_780:
2202958Swnj 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2212423Skre 			break;
2222423Skre #endif
2232423Skre #if VAX750
2242423Skre 		case VAX_750:
2252958Swnj 			uh->uh_uba->uba_dpr[bdp] |=
2262958Swnj 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2272423Skre 			break;
2282423Skre #endif
2292423Skre 		}
2302570Swnj 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
2312395Swnj 		if (uh->uh_bdpwant) {
2322395Swnj 			uh->uh_bdpwant = 0;
2339353Ssam 			wakeup((caddr_t)&uh->uh_bdpwant);
23440Sbill 		}
23540Sbill 	}
2362570Swnj 	/*
2372570Swnj 	 * Put back the registers in the resource map.
2382570Swnj 	 * The map code must not be reentered, so we do this
2392570Swnj 	 * at high ipl.
2402570Swnj 	 */
24140Sbill 	npf = (mr >> 18) & 0x3ff;
24240Sbill 	reg = ((mr >> 9) & 0x1ff) + 1;
2432570Swnj 	s = spl6();
2448811Sroot 	rmfree(uh->uh_map, (long)npf, (long)reg);
2452570Swnj 	splx(s);
2462570Swnj 
2472570Swnj 	/*
2482570Swnj 	 * Wakeup sleepers for map registers,
2492570Swnj 	 * and also, if there are processes blocked in dgo(),
2502570Swnj 	 * give them a chance at the UNIBUS.
2512570Swnj 	 */
2522395Swnj 	if (uh->uh_mrwant) {
2532395Swnj 		uh->uh_mrwant = 0;
2549353Ssam 		wakeup((caddr_t)&uh->uh_mrwant);
25540Sbill 	}
2562570Swnj 	while (uh->uh_actf && ubago(uh->uh_actf))
2572570Swnj 		;
25840Sbill }
25940Sbill 
2602729Swnj ubapurge(um)
2612958Swnj 	register struct uba_ctlr *um;
2622729Swnj {
2632729Swnj 	register struct uba_hd *uh = um->um_hd;
2642729Swnj 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
2652729Swnj 
2662729Swnj 	switch (cpu) {
2672729Swnj #if VAX780
2682729Swnj 	case VAX_780:
2692958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
2702729Swnj 		break;
2712729Swnj #endif
2722729Swnj #if VAX750
2732729Swnj 	case VAX_750:
2742958Swnj 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
2752729Swnj 		break;
2762729Swnj #endif
2772729Swnj 	}
2782729Swnj }
2792729Swnj 
2806863Swnj ubainitmaps(uhp)
2816863Swnj 	register struct uba_hd *uhp;
2826863Swnj {
2836863Swnj 
2848811Sroot 	rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ);
2856863Swnj 	switch (cpu) {
2866863Swnj #if VAX780
2876863Swnj 	case VAX_780:
2886863Swnj 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
2896863Swnj 		break;
2906863Swnj #endif
2916863Swnj #if VAX750
2926863Swnj 	case VAX_750:
2936863Swnj 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
2946863Swnj 		break;
2956863Swnj #endif
2966948Ssam #if VAX730
2976948Ssam 	case VAX_730:
2986863Swnj 		break;
2996863Swnj #endif
3006863Swnj 	}
3016863Swnj }
3026863Swnj 
3032570Swnj /*
3042570Swnj  * Generate a reset on uba number uban.  Then
3052570Swnj  * call each device in the character device table,
3062570Swnj  * giving it a chance to clean up so as to be able to continue.
3072570Swnj  */
3082395Swnj ubareset(uban)
3092570Swnj 	int uban;
310284Sbill {
311284Sbill 	register struct cdevsw *cdp;
3122646Swnj 	register struct uba_hd *uh = &uba_hd[uban];
3131781Sbill 	int s;
314284Sbill 
315302Sbill 	s = spl6();
3162646Swnj 	uh->uh_users = 0;
3172646Swnj 	uh->uh_zvcnt = 0;
3182646Swnj 	uh->uh_xclu = 0;
3192646Swnj 	uh->uh_actf = uh->uh_actl = 0;
3202646Swnj 	uh->uh_bdpwant = 0;
3212646Swnj 	uh->uh_mrwant = 0;
3226863Swnj 	ubainitmaps(uh);
3232646Swnj 	wakeup((caddr_t)&uh->uh_bdpwant);
3242646Swnj 	wakeup((caddr_t)&uh->uh_mrwant);
3252958Swnj 	printf("uba%d: reset", uban);
3262958Swnj 	ubainit(uh->uh_uba);
327*11722Ssam 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
3282395Swnj 		(*cdp->d_reset)(uban);
3295221Swnj #ifdef INET
3305221Swnj 	ifubareset(uban);
3315221Swnj #endif
332284Sbill 	printf("\n");
333302Sbill 	splx(s);
334284Sbill }
3352395Swnj 
3362570Swnj /*
3372570Swnj  * Init a uba.  This is called with a pointer
3382570Swnj  * rather than a virtual address since it is called
3392570Swnj  * by code which runs with memory mapping disabled.
3402570Swnj  * In these cases we really don't need the interrupts
3412570Swnj  * enabled, but since we run with ipl high, we don't care
3422570Swnj  * if they are, they will never happen anyways.
3432570Swnj  */
3442423Skre ubainit(uba)
3452423Skre 	register struct uba_regs *uba;
3462395Swnj {
3472395Swnj 
3482958Swnj 	switch (cpu) {
3492958Swnj #if VAX780
3503248Swnj 	case VAX_780:
3512958Swnj 		uba->uba_cr = UBACR_ADINIT;
3522958Swnj 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
3532958Swnj 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
3542958Swnj 			;
3552958Swnj 		break;
3562958Swnj #endif
3572958Swnj #if VAX750
3583248Swnj 	case VAX_750:
3593352Swnj #endif
3606948Ssam #if VAX730
3616948Ssam 	case VAX_730:
3623352Swnj #endif
3636948Ssam #if defined(VAX750) || defined(VAX730)
3643352Swnj 		mtpr(IUR, 0);
3652958Swnj 		/* give devices time to recover from power fail */
3663332Swnj /* THIS IS PROBABLY UNNECESSARY */
3673352Swnj 		DELAY(500000);
3683332Swnj /* END PROBABLY UNNECESSARY */
3692958Swnj 		break;
3702958Swnj #endif
3712958Swnj 	}
3722395Swnj }
3732395Swnj 
3748780Sroot #ifdef VAX780
3754024Swnj int	ubawedgecnt = 10;
3764024Swnj int	ubacrazy = 500;
3772570Swnj /*
3782570Swnj  * This routine is called by the locore code to
3792570Swnj  * process a UBA error on an 11/780.  The arguments are passed
3802570Swnj  * on the stack, and value-result (through some trickery).
3812570Swnj  * In particular, the uvec argument is used for further
3822570Swnj  * uba processing so the result aspect of it is very important.
3832570Swnj  * It must not be declared register.
3842570Swnj  */
3852423Skre /*ARGSUSED*/
3862395Swnj ubaerror(uban, uh, xx, uvec, uba)
3872395Swnj 	register int uban;
3882395Swnj 	register struct uba_hd *uh;
3892395Swnj 	int uvec;
3902395Swnj 	register struct uba_regs *uba;
3912395Swnj {
3922395Swnj 	register sr, s;
3932395Swnj 
3942395Swnj 	if (uvec == 0) {
3952395Swnj 		uh->uh_zvcnt++;
3962395Swnj 		if (uh->uh_zvcnt > 250000) {
3972929Swnj 			printf("uba%d: too many zero vectors\n");
3982395Swnj 			ubareset(uban);
3992395Swnj 		}
4002395Swnj 		uvec = 0;
4012395Swnj 		return;
4022395Swnj 	}
4032395Swnj 	if (uba->uba_cnfgr & NEX_CFGFLT) {
4042929Swnj 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
4052929Swnj 		    uban, uba->uba_sr, ubasr_bits,
4063248Swnj 		    uba->uba_cnfgr, NEXFLT_BITS);
4072395Swnj 		ubareset(uban);
4082395Swnj 		uvec = 0;
4092395Swnj 		return;
4102395Swnj 	}
4112395Swnj 	sr = uba->uba_sr;
4122395Swnj 	s = spl7();
4133473Swnj 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
4143473Swnj 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
4152395Swnj 	splx(s);
4162395Swnj 	uba->uba_sr = sr;
4172958Swnj 	uvec &= UBABRRVR_DIV;
4184024Swnj 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
4194024Swnj 		if (uh->uh_errcnt > ubacrazy)
4204024Swnj 			panic("uba crazy");
4214024Swnj 		printf("ERROR LIMIT ");
4224024Swnj 		ubareset(uban);
4234024Swnj 		uvec = 0;
4244024Swnj 		return;
4254024Swnj 	}
4262395Swnj 	return;
4272395Swnj }
4282395Swnj #endif
4293745Sroot 
4303745Sroot /*
4316518Sfeldman  * This routine is called by a driver for a device with on-board Unibus
4326518Sfeldman  * memory.  It removes the memory block from the Unibus resource map
4336518Sfeldman  * and clears the map registers for the block.
4346518Sfeldman  *
4356518Sfeldman  * Arguments are the Unibus number, the Unibus address of the memory
4367473Sfeldman  * block, its size in blocks of 512 bytes, and a flag indicating whether
4377473Sfeldman  * to allocate the unibus space form the resource map or whether it already
4387473Sfeldman  * has been.
4396518Sfeldman  *
4407473Sfeldman  * Returns > 0 if successful, 0 if not.
4416518Sfeldman  */
4428612Sroot ubamem(uban, addr, size, doalloc)
4438612Sroot 	int uban, addr, size, doalloc;
4446518Sfeldman {
4456518Sfeldman 	register struct uba_hd *uh = &uba_hd[uban];
4466518Sfeldman 	register int *m;
4476518Sfeldman 	register int i, a, s;
4486518Sfeldman 
4498612Sroot 	if (doalloc) {
4507473Sfeldman 		s = spl6();
4517473Sfeldman 		a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */
4527473Sfeldman 		splx(s);
4537473Sfeldman 	} else
4547473Sfeldman 		a = (addr>>9)+1;
4556518Sfeldman 	if (a) {
4567473Sfeldman 		m = (int *) &uh->uh_uba->uba_map[a-1];
4576518Sfeldman 		for (i=0; i<size; i++)
4586518Sfeldman 			*m++ = 0;	/* All off, especially 'valid' */
4597473Sfeldman #if VAX780
4607473Sfeldman 		if (cpu == VAX_780) {		/* map disable */
4617473Sfeldman 			i = (addr+size*512+8191)/8192;
4627473Sfeldman 			uh->uh_uba->uba_cr |= i<<26;
4637473Sfeldman 		}
4647473Sfeldman #endif
4656518Sfeldman 	}
4666518Sfeldman 	return(a);
4676518Sfeldman }
4687304Ssam 
4699875Ssam #include "ik.h"
4709875Ssam #if NIK > 0
4717304Ssam /*
4727304Ssam  * Map a virtual address into users address space. Actually all we
4737304Ssam  * do is turn on the user mode write protection bits for the particular
4747304Ssam  * page of memory involved.
4757304Ssam  */
4767304Ssam maptouser(vaddress)
4777304Ssam 	caddr_t vaddress;
4787304Ssam {
4797304Ssam 
4807304Ssam 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
4817304Ssam }
4827304Ssam 
4837304Ssam unmaptouser(vaddress)
4847304Ssam 	caddr_t vaddress;
4857304Ssam {
4867304Ssam 
4877304Ssam 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
4887304Ssam }
4899174Ssam #endif
490