xref: /csrg-svn/sys/vax/uba/uba.c (revision 29248)
1 /*
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)uba.c	7.1 (Berkeley) 06/05/86
7  */
8 
9 #include "../machine/pte.h"
10 
11 #include "param.h"
12 #include "systm.h"
13 #include "map.h"
14 #include "buf.h"
15 #include "vm.h"
16 #include "dir.h"
17 #include "user.h"
18 #include "proc.h"
19 #include "conf.h"
20 #include "dk.h"
21 #include "kernel.h"
22 
23 #include "../vax/cpu.h"
24 #include "../vax/mtpr.h"
25 #include "../vax/nexus.h"
26 #include "ubareg.h"
27 #include "ubavar.h"
28 
29 #if defined(VAX780) || defined(VAX8600)
30 char	ubasr_bits[] = UBASR_BITS;
31 #endif
32 
33 #define	spluba	spl7		/* IPL 17 */
34 
35 /*
36  * Do transfer on device argument.  The controller
37  * and uba involved are implied by the device.
38  * We queue for resource wait in the uba code if necessary.
39  * We return 1 if the transfer was started, 0 if it was not.
40  * If you call this routine with the head of the queue for a
41  * UBA, it will automatically remove the device from the UBA
42  * queue before it returns.  If some other device is given
43  * as argument, it will be added to the request queue if the
44  * request cannot be started immediately.  This means that
45  * passing a device which is on the queue but not at the head
46  * of the request queue is likely to be a disaster.
47  */
48 ubago(ui)
49 	register struct uba_device *ui;
50 {
51 	register struct uba_ctlr *um = ui->ui_mi;
52 	register struct uba_hd *uh;
53 	register int s, unit;
54 
55 	uh = &uba_hd[um->um_ubanum];
56 	s = spluba();
57 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
58 		goto rwait;
59 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
60 	    UBA_NEEDBDP|UBA_CANTWAIT);
61 	if (um->um_ubinfo == 0)
62 		goto rwait;
63 	uh->uh_users++;
64 	if (um->um_driver->ud_xclu)
65 		uh->uh_xclu = 1;
66 	splx(s);
67 	if (ui->ui_dk >= 0) {
68 		unit = ui->ui_dk;
69 		dk_busy |= 1<<unit;
70 		dk_xfer[unit]++;
71 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
72 	}
73 	if (uh->uh_actf == ui)
74 		uh->uh_actf = ui->ui_forw;
75 	(*um->um_driver->ud_dgo)(um);
76 	return (1);
77 rwait:
78 	if (uh->uh_actf != ui) {
79 		ui->ui_forw = NULL;
80 		if (uh->uh_actf == NULL)
81 			uh->uh_actf = ui;
82 		else
83 			uh->uh_actl->ui_forw = ui;
84 		uh->uh_actl = ui;
85 	}
86 	splx(s);
87 	return (0);
88 }
89 
90 ubadone(um)
91 	register struct uba_ctlr *um;
92 {
93 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
94 
95 	if (um->um_driver->ud_xclu)
96 		uh->uh_xclu = 0;
97 	uh->uh_users--;
98 	ubarelse(um->um_ubanum, &um->um_ubinfo);
99 }
100 
101 /*
102  * Allocate and setup UBA map registers, and bdp's
103  * Flags says whether bdp is needed, whether the caller can't
104  * wait (e.g. if the caller is at interrupt level).
105  *
106  * Return value:
107  *	Bits 0-8	Byte offset
108  *	Bits 9-17	Start map reg. no.
109  *	Bits 18-27	No. mapping reg's
110  *	Bits 28-31	BDP no.
111  */
112 ubasetup(uban, bp, flags)
113 	struct buf *bp;
114 {
115 	register struct uba_hd *uh = &uba_hd[uban];
116 	int pfnum, temp;
117 	int npf, reg, bdp;
118 	unsigned v;
119 	register struct pte *pte, *io;
120 	struct proc *rp;
121 	int a, o, ubinfo;
122 
123 #if defined(VAX730) || defined(VAX630)
124 	if (cpu == VAX_730 || cpu == VAX_630)
125 		flags &= ~UBA_NEEDBDP;
126 #endif
127 	v = btop(bp->b_un.b_addr);
128 	o = (int)bp->b_un.b_addr & PGOFSET;
129 	npf = btoc(bp->b_bcount + o) + 1;
130 	a = spluba();
131 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
132 		if (flags & UBA_CANTWAIT) {
133 			splx(a);
134 			return (0);
135 		}
136 		uh->uh_mrwant++;
137 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
138 	}
139 	if ((flags & UBA_NEED16) && reg + npf > 128) {
140 		/*
141 		 * Could hang around and try again (if we can ever succeed).
142 		 * Won't help any current device...
143 		 */
144 		rmfree(uh->uh_map, (long)npf, (long)reg);
145 		splx(a);
146 		return (0);
147 	}
148 	bdp = 0;
149 	if (flags & UBA_NEEDBDP) {
150 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
151 			if (flags & UBA_CANTWAIT) {
152 				rmfree(uh->uh_map, (long)npf, (long)reg);
153 				splx(a);
154 				return (0);
155 			}
156 			uh->uh_bdpwant++;
157 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
158 		}
159 		uh->uh_bdpfree &= ~(1 << (bdp-1));
160 	} else if (flags & UBA_HAVEBDP)
161 		bdp = (flags >> 28) & 0xf;
162 	splx(a);
163 	reg--;
164 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
165 	temp = (bdp << 21) | UBAMR_MRV;
166 	if (bdp && (o & 01))
167 		temp |= UBAMR_BO;
168 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
169 	if ((bp->b_flags & B_PHYS) == 0)
170 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
171 	else if (bp->b_flags & B_UAREA)
172 		pte = &rp->p_addr[v];
173 	else if (bp->b_flags & B_PAGET)
174 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
175 	else
176 		pte = vtopte(rp, v);
177 	io = &uh->uh_uba->uba_map[reg];
178 	while (--npf != 0) {
179 		pfnum = pte->pg_pfnum;
180 		if (pfnum == 0)
181 			panic("uba zero uentry");
182 		pte++;
183 		*(int *)io++ = pfnum | temp;
184 	}
185 	*(int *)io++ = 0;
186 	return (ubinfo);
187 }
188 
189 /*
190  * Non buffer setup interface... set up a buffer and call ubasetup.
191  */
192 uballoc(uban, addr, bcnt, flags)
193 	int uban;
194 	caddr_t addr;
195 	int bcnt, flags;
196 {
197 	struct buf ubabuf;
198 
199 	ubabuf.b_un.b_addr = addr;
200 	ubabuf.b_flags = B_BUSY;
201 	ubabuf.b_bcount = bcnt;
202 	/* that's all the fields ubasetup() needs */
203 	return (ubasetup(uban, &ubabuf, flags));
204 }
205 
206 /*
207  * Release resources on uba uban, and then unblock resource waiters.
208  * The map register parameter is by value since we need to block
209  * against uba resets on 11/780's.
210  */
211 ubarelse(uban, amr)
212 	int *amr;
213 {
214 	register struct uba_hd *uh = &uba_hd[uban];
215 	register int bdp, reg, npf, s;
216 	int mr;
217 
218 	/*
219 	 * Carefully see if we should release the space, since
220 	 * it may be released asynchronously at uba reset time.
221 	 */
222 	s = spluba();
223 	mr = *amr;
224 	if (mr == 0) {
225 		/*
226 		 * A ubareset() occurred before we got around
227 		 * to releasing the space... no need to bother.
228 		 */
229 		splx(s);
230 		return;
231 	}
232 	*amr = 0;
233 	bdp = (mr >> 28) & 0x0f;
234 	if (bdp) {
235 		switch (cpu) {
236 #if defined(VAX780) || defined(VAX8600)
237 		case VAX_8600:
238 		case VAX_780:
239 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
240 			break;
241 #endif
242 #if VAX750
243 		case VAX_750:
244 			uh->uh_uba->uba_dpr[bdp] |=
245 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
246 			break;
247 #endif
248 		}
249 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
250 		if (uh->uh_bdpwant) {
251 			uh->uh_bdpwant = 0;
252 			wakeup((caddr_t)&uh->uh_bdpwant);
253 		}
254 	}
255 	/*
256 	 * Put back the registers in the resource map.
257 	 * The map code must not be reentered,
258 	 * nor can the registers be freed twice.
259 	 * Unblock interrupts once this is done.
260 	 */
261 	npf = (mr >> 18) & 0x3ff;
262 	reg = ((mr >> 9) & 0x1ff) + 1;
263 	rmfree(uh->uh_map, (long)npf, (long)reg);
264 	splx(s);
265 
266 	/*
267 	 * Wakeup sleepers for map registers,
268 	 * and also, if there are processes blocked in dgo(),
269 	 * give them a chance at the UNIBUS.
270 	 */
271 	if (uh->uh_mrwant) {
272 		uh->uh_mrwant = 0;
273 		wakeup((caddr_t)&uh->uh_mrwant);
274 	}
275 	while (uh->uh_actf && ubago(uh->uh_actf))
276 		;
277 }
278 
279 ubapurge(um)
280 	register struct uba_ctlr *um;
281 {
282 	register struct uba_hd *uh = um->um_hd;
283 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
284 
285 	switch (cpu) {
286 #if defined(VAX780) || defined(VAX8600)
287 	case VAX_8600:
288 	case VAX_780:
289 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
290 		break;
291 #endif
292 #if VAX750
293 	case VAX_750:
294 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
295 		break;
296 #endif
297 	}
298 }
299 
300 ubainitmaps(uhp)
301 	register struct uba_hd *uhp;
302 {
303 
304 	rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ);
305 	switch (cpu) {
306 #if defined(VAX780) || defined(VAX8600)
307 	case VAX_8600:
308 	case VAX_780:
309 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
310 		break;
311 #endif
312 #if VAX750
313 	case VAX_750:
314 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
315 		break;
316 #endif
317 #if defined(VAX730) || defined(VAX630)
318 	case VAX_730:
319 	case VAX_630:
320 		break;
321 #endif
322 	}
323 }
324 
325 /*
326  * Generate a reset on uba number uban.  Then
327  * call each device in the character device table,
328  * giving it a chance to clean up so as to be able to continue.
329  */
330 ubareset(uban)
331 	int uban;
332 {
333 	register struct cdevsw *cdp;
334 	register struct uba_hd *uh = &uba_hd[uban];
335 	int s;
336 
337 	s = spluba();
338 	uh->uh_users = 0;
339 	uh->uh_zvcnt = 0;
340 	uh->uh_xclu = 0;
341 	uh->uh_actf = uh->uh_actl = 0;
342 	uh->uh_bdpwant = 0;
343 	uh->uh_mrwant = 0;
344 	ubainitmaps(uh);
345 	wakeup((caddr_t)&uh->uh_bdpwant);
346 	wakeup((caddr_t)&uh->uh_mrwant);
347 	printf("uba%d: reset", uban);
348 	ubainit(uh->uh_uba);
349 	ubameminit(uban);
350 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
351 		(*cdp->d_reset)(uban);
352 	ifubareset(uban);
353 	printf("\n");
354 	splx(s);
355 }
356 
357 /*
358  * Init a uba.  This is called with a pointer
359  * rather than a virtual address since it is called
360  * by code which runs with memory mapping disabled.
361  * In these cases we really don't need the interrupts
362  * enabled, but since we run with ipl high, we don't care
363  * if they are, they will never happen anyways.
364  */
365 ubainit(uba)
366 	register struct uba_regs *uba;
367 {
368 
369 	switch (cpu) {
370 #if defined(VAX780) || defined(VAX8600)
371 	case VAX_8600:
372 	case VAX_780:
373 		uba->uba_cr = UBACR_ADINIT;
374 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
375 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
376 			;
377 		break;
378 #endif
379 #if VAX750
380 	case VAX_750:
381 #endif
382 #if VAX730
383 	case VAX_730:
384 #endif
385 #if VAX630
386 	case VAX_630:
387 #endif
388 #if defined(VAX750) || defined(VAX730) || defined(VAX630)
389 		mtpr(IUR, 0);
390 		/* give devices time to recover from power fail */
391 /* THIS IS PROBABLY UNNECESSARY */
392 		DELAY(500000);
393 /* END PROBABLY UNNECESSARY */
394 		break;
395 #endif
396 	}
397 }
398 
399 #if defined(VAX780) || defined(VAX8600)
400 int	ubawedgecnt = 10;
401 int	ubacrazy = 500;
402 int	zvcnt_max = 5000;	/* in 8 sec */
403 /*
404  * This routine is called by the locore code to process a UBA
405  * error on an 11/780 or 8600.  The arguments are passed
406  * on the stack, and value-result (through some trickery).
407  * In particular, the uvec argument is used for further
408  * uba processing so the result aspect of it is very important.
409  * It must not be declared register.
410  */
411 /*ARGSUSED*/
412 ubaerror(uban, uh, ipl, uvec, uba)
413 	register int uban;
414 	register struct uba_hd *uh;
415 	int ipl, uvec;
416 	register struct uba_regs *uba;
417 {
418 	register sr, s;
419 
420 	if (uvec == 0) {
421 		/*
422 		 * Declare dt as unsigned so that negative values
423 		 * are handled as >8 below, in case time was set back.
424 		 */
425 		u_long	dt = time.tv_sec - uh->uh_zvtime;
426 
427 		uh->uh_zvtotal++;
428 		if (dt > 8) {
429 			uh->uh_zvtime = time.tv_sec;
430 			uh->uh_zvcnt = 0;
431 		}
432 		if (++uh->uh_zvcnt > zvcnt_max) {
433 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
434 				uban, uh->uh_zvcnt, dt + 1);
435 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
436 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
437 				uba->uba_cnfgr&0xff);
438 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
439 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
440 				(uba->uba_dcr&0x8000000)?"":"NOT ");
441 			ubareset(uban);
442 		}
443 		return;
444 	}
445 	if (uba->uba_cnfgr & NEX_CFGFLT) {
446 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
447 		    uban, uba->uba_sr, ubasr_bits,
448 		    uba->uba_cnfgr, NEXFLT_BITS);
449 		ubareset(uban);
450 		uvec = 0;
451 		return;
452 	}
453 	sr = uba->uba_sr;
454 	s = spluba();
455 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
456 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
457 	splx(s);
458 	uba->uba_sr = sr;
459 	uvec &= UBABRRVR_DIV;
460 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
461 		if (uh->uh_errcnt > ubacrazy)
462 			panic("uba crazy");
463 		printf("ERROR LIMIT ");
464 		ubareset(uban);
465 		uvec = 0;
466 		return;
467 	}
468 	return;
469 }
470 #endif
471 
472 /*
473  * Look for devices with unibus memory, allow them to configure, then disable
474  * map registers as necessary.  Called during autoconfiguration and ubareset.
475  * The device ubamem routine returns 0 on success, 1 on success if it is fully
476  * configured (has no csr or interrupt, so doesn't need to be probed),
477  * and -1 on failure.
478  */
479 ubameminit(uban)
480 {
481 	register struct uba_device *ui;
482 	register struct uba_hd *uh = &uba_hd[uban];
483 	caddr_t umembase = umem[uban] + 0x3e000, addr;
484 #define	ubaoff(off)	((int)(off) & 0x1fff)
485 
486 	uh->uh_lastmem = 0;
487 	for (ui = ubdinit; ui->ui_driver; ui++) {
488 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
489 			continue;
490 		if (ui->ui_driver->ud_ubamem) {
491 			/*
492 			 * During autoconfiguration, need to fudge ui_addr.
493 			 */
494 			addr = ui->ui_addr;
495 			ui->ui_addr = umembase + ubaoff(addr);
496 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
497 			case 1:
498 				ui->ui_alive = 1;
499 				/* FALLTHROUGH */
500 			case 0:
501 				ui->ui_ubanum = uban;
502 				break;
503 			}
504 			ui->ui_addr = addr;
505 		}
506 	}
507 #if defined(VAX780) || defined(VAX8600)
508 	/*
509 	 * On a 780, throw away any map registers disabled by rounding
510 	 * the map disable in the configuration register
511 	 * up to the next 8K boundary, or below the last unibus memory.
512 	 */
513 	if ((cpu == VAX_780) || (cpu == VAX_8600)) {
514 		register i;
515 
516 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
517 		while (i)
518 			(void) rmget(uh->uh_map, 1, i--);
519 	}
520 #endif
521 }
522 
523 /*
524  * Allocate UNIBUS memory.  Allocates and initializes
525  * sufficient mapping registers for access.  On a 780,
526  * the configuration register is setup to disable UBA
527  * response on DMA transfers to addresses controlled
528  * by the disabled mapping registers.
529  * On a 780, should only be called from ubameminit, or in ascending order
530  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
531  * the last unibus memory would free unusable map registers.
532  * Doalloc is 1 to allocate, 0 to deallocate.
533  */
534 ubamem(uban, addr, npg, doalloc)
535 	int uban, addr, npg, doalloc;
536 {
537 	register struct uba_hd *uh = &uba_hd[uban];
538 	register int a;
539 	int s;
540 
541 	a = (addr >> 9) + 1;
542 	s = spluba();
543 	if (doalloc)
544 		a = rmget(uh->uh_map, npg, a);
545 	else
546 		rmfree(uh->uh_map, (long)npg, (long)a);
547 	splx(s);
548 	if (a) {
549 		register int i, *m;
550 
551 		m = (int *)&uh->uh_uba->uba_map[a - 1];
552 		for (i = 0; i < npg; i++)
553 			*m++ = 0;	/* All off, especially 'valid' */
554 		i = addr + npg * 512;
555 		if (doalloc && i > uh->uh_lastmem)
556 			uh->uh_lastmem = i;
557 		else if (doalloc == 0 && i == uh->uh_lastmem)
558 			uh->uh_lastmem = addr;
559 #if defined(VAX780) || defined(VAX8600)
560 		/*
561 		 * On a 780, set up the map register disable
562 		 * field in the configuration register.  Beware
563 		 * of callers that request memory ``out of order''
564 		 * or in sections other than 8K multiples.
565 		 * Ubameminit handles such requests properly, however.
566 		 */
567 		if ((cpu == VAX_780) || (cpu == VAX_8600)) {
568 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
569 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
570 			uh->uh_uba->uba_cr = i;
571 		}
572 #endif
573 	}
574 	return (a);
575 }
576 
577 #include "ik.h"
578 #include "vs.h"
579 #if NIK > 0 || NVS > 0
580 /*
581  * Map a virtual address into users address space. Actually all we
582  * do is turn on the user mode write protection bits for the particular
583  * page of memory involved.
584  */
585 maptouser(vaddress)
586 	caddr_t vaddress;
587 {
588 
589 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
590 }
591 
592 unmaptouser(vaddress)
593 	caddr_t vaddress;
594 {
595 
596 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
597 }
598 #endif
599