xref: /csrg-svn/sys/vax/uba/uba.c (revision 8576)
1 /*	uba.c	4.51	82/10/17	*/
2 
3 #include "../h/param.h"
4 #include "../h/systm.h"
5 #include "../h/map.h"
6 #include "../h/pte.h"
7 #include "../h/buf.h"
8 #include "../h/vm.h"
9 #include "../h/dir.h"
10 #include "../h/user.h"
11 #include "../h/proc.h"
12 #include "../h/conf.h"
13 #include "../h/dk.h"
14 
15 #include "../vax/cpu.h"
16 #include "../vax/mtpr.h"
17 #include "../vax/nexus.h"
18 #include "../vaxuba/ubareg.h"
19 #include "../vaxuba/ubavar.h"
20 
21 #if VAX780
22 char	ubasr_bits[] = UBASR_BITS;
23 #endif
24 
25 /*
26  * Do transfer on device argument.  The controller
27  * and uba involved are implied by the device.
28  * We queue for resource wait in the uba code if necessary.
29  * We return 1 if the transfer was started, 0 if it was not.
30  * If you call this routine with the head of the queue for a
31  * UBA, it will automatically remove the device from the UBA
32  * queue before it returns.  If some other device is given
33  * as argument, it will be added to the request queue if the
34  * request cannot be started immediately.  This means that
35  * passing a device which is on the queue but not at the head
36  * of the request queue is likely to be a disaster.
37  */
38 ubago(ui)
39 	register struct uba_device *ui;
40 {
41 	register struct uba_ctlr *um = ui->ui_mi;
42 	register struct uba_hd *uh;
43 	register int s, unit;
44 
45 	uh = &uba_hd[um->um_ubanum];
46 	s = spl6();
47 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
48 		goto rwait;
49 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
50 	    UBA_NEEDBDP|UBA_CANTWAIT);
51 	if (um->um_ubinfo == 0)
52 		goto rwait;
53 	uh->uh_users++;
54 	if (um->um_driver->ud_xclu)
55 		uh->uh_xclu = 1;
56 	splx(s);
57 	if (ui->ui_dk >= 0) {
58 		unit = ui->ui_dk;
59 		dk_busy |= 1<<unit;
60 		dk_xfer[unit]++;
61 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
62 	}
63 	if (uh->uh_actf == ui)
64 		uh->uh_actf = ui->ui_forw;
65 	(*um->um_driver->ud_dgo)(um);
66 	return (1);
67 rwait:
68 	if (uh->uh_actf != ui) {
69 		ui->ui_forw = NULL;
70 		if (uh->uh_actf == NULL)
71 			uh->uh_actf = ui;
72 		else
73 			uh->uh_actl->ui_forw = ui;
74 		uh->uh_actl = ui;
75 	}
76 	splx(s);
77 	return (0);
78 }
79 
80 ubadone(um)
81 	register struct uba_ctlr *um;
82 {
83 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
84 
85 	if (um->um_driver->ud_xclu)
86 		uh->uh_xclu = 0;
87 	uh->uh_users--;
88 	ubarelse(um->um_ubanum, &um->um_ubinfo);
89 }
90 
91 /*
92  * Allocate and setup UBA map registers, and bdp's
93  * Flags says whether bdp is needed, whether the caller can't
94  * wait (e.g. if the caller is at interrupt level).
95  *
96  * Return value:
97  *	Bits 0-8	Byte offset
98  *	Bits 9-17	Start map reg. no.
99  *	Bits 18-27	No. mapping reg's
100  *	Bits 28-31	BDP no.
101  */
102 ubasetup(uban, bp, flags)
103 	struct buf *bp;
104 {
105 	register struct uba_hd *uh = &uba_hd[uban];
106 	register int temp, i;
107 	int npf, reg, bdp;
108 	unsigned v;
109 	register struct pte *pte, *io;
110 	struct proc *rp;
111 	int a, o, ubinfo;
112 
113 #if VAX730
114 	if (cpu == VAX_730)
115 		flags &= ~UBA_NEEDBDP;
116 #endif
117 	v = btop(bp->b_un.b_addr);
118 	o = (int)bp->b_un.b_addr & PGOFSET;
119 	npf = btoc(bp->b_bcount + o) + 1;
120 	a = spl6();
121 	while ((reg = rmalloc(uh->uh_map, npf)) == 0) {
122 		if (flags & UBA_CANTWAIT) {
123 			splx(a);
124 			return (0);
125 		}
126 		uh->uh_mrwant++;
127 		sleep((caddr_t)uh->uh_map, PSWP);
128 	}
129 	bdp = 0;
130 	if (flags & UBA_NEEDBDP) {
131 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
132 			if (flags & UBA_CANTWAIT) {
133 				rmfree(uh->uh_map, npf, reg);
134 				splx(a);
135 				return (0);
136 			}
137 			uh->uh_bdpwant++;
138 			sleep((caddr_t)uh->uh_map, PSWP);
139 		}
140 		uh->uh_bdpfree &= ~(1 << (bdp-1));
141 	} else if (flags & UBA_HAVEBDP)
142 		bdp = (flags >> 28) & 0xf;
143 	splx(a);
144 	reg--;
145 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
146 	temp = (bdp << 21) | UBAMR_MRV;
147 	if (bdp && (o & 01))
148 		temp |= UBAMR_BO;
149 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
150 	if ((bp->b_flags & B_PHYS) == 0)
151 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
152 	else if (bp->b_flags & B_UAREA)
153 		pte = &rp->p_addr[v];
154 	else if (bp->b_flags & B_PAGET)
155 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
156 	else
157 		pte = vtopte(rp, v);
158 	io = &uh->uh_uba->uba_map[reg];
159 	while (--npf != 0) {
160 		if (pte->pg_pfnum == 0)
161 			panic("uba zero uentry");
162 		*(int *)io++ = pte++->pg_pfnum | temp;
163 	}
164 	*(int *)io++ = 0;
165 	return (ubinfo);
166 }
167 
168 /*
169  * Non buffer setup interface... set up a buffer and call ubasetup.
170  */
171 uballoc(uban, addr, bcnt, flags)
172 	int uban;
173 	caddr_t addr;
174 	int bcnt, flags;
175 {
176 	struct buf ubabuf;
177 
178 	ubabuf.b_un.b_addr = addr;
179 	ubabuf.b_flags = B_BUSY;
180 	ubabuf.b_bcount = bcnt;
181 	/* that's all the fields ubasetup() needs */
182 	return (ubasetup(uban, &ubabuf, flags));
183 }
184 
185 /*
186  * Release resources on uba uban, and then unblock resource waiters.
187  * The map register parameter is by value since we need to block
188  * against uba resets on 11/780's.
189  */
190 ubarelse(uban, amr)
191 	int *amr;
192 {
193 	register struct uba_hd *uh = &uba_hd[uban];
194 	register int bdp, reg, npf, s;
195 	int mr;
196 
197 	/*
198 	 * Carefully see if we should release the space, since
199 	 * it may be released asynchronously at uba reset time.
200 	 */
201 	s = spl6();
202 	mr = *amr;
203 	if (mr == 0) {
204 		/*
205 		 * A ubareset() occurred before we got around
206 		 * to releasing the space... no need to bother.
207 		 */
208 		splx(s);
209 		return;
210 	}
211 	*amr = 0;
212 	splx(s);		/* let interrupts in, we're safe for a while */
213 	bdp = (mr >> 28) & 0x0f;
214 	if (bdp) {
215 		switch (cpu) {
216 #if VAX780
217 		case VAX_780:
218 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
219 			break;
220 #endif
221 #if VAX750
222 		case VAX_750:
223 			uh->uh_uba->uba_dpr[bdp] |=
224 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
225 			break;
226 #endif
227 		}
228 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
229 		if (uh->uh_bdpwant) {
230 			uh->uh_bdpwant = 0;
231 			wakeup((caddr_t)uh->uh_map);
232 		}
233 	}
234 	/*
235 	 * Put back the registers in the resource map.
236 	 * The map code must not be reentered, so we do this
237 	 * at high ipl.
238 	 */
239 	npf = (mr >> 18) & 0x3ff;
240 	reg = ((mr >> 9) & 0x1ff) + 1;
241 	s = spl6();
242 	rmfree(uh->uh_map, npf, reg);
243 	splx(s);
244 
245 	/*
246 	 * Wakeup sleepers for map registers,
247 	 * and also, if there are processes blocked in dgo(),
248 	 * give them a chance at the UNIBUS.
249 	 */
250 	if (uh->uh_mrwant) {
251 		uh->uh_mrwant = 0;
252 		wakeup((caddr_t)uh->uh_map);
253 	}
254 	while (uh->uh_actf && ubago(uh->uh_actf))
255 		;
256 }
257 
258 ubapurge(um)
259 	register struct uba_ctlr *um;
260 {
261 	register struct uba_hd *uh = um->um_hd;
262 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
263 
264 	switch (cpu) {
265 #if VAX780
266 	case VAX_780:
267 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
268 		break;
269 #endif
270 #if VAX750
271 	case VAX_750:
272 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
273 		break;
274 #endif
275 	}
276 }
277 
278 ubainitmaps(uhp)
279 	register struct uba_hd *uhp;
280 {
281 
282 	rminit(uhp->uh_map, NUBMREG, 1, "uba", UAMSIZ);
283 	switch (cpu) {
284 #if VAX780
285 	case VAX_780:
286 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
287 		break;
288 #endif
289 #if VAX750
290 	case VAX_750:
291 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
292 		break;
293 #endif
294 #if VAX730
295 	case VAX_730:
296 		break;
297 #endif
298 	}
299 }
300 
301 /*
302  * Generate a reset on uba number uban.  Then
303  * call each device in the character device table,
304  * giving it a chance to clean up so as to be able to continue.
305  */
306 ubareset(uban)
307 	int uban;
308 {
309 	register struct cdevsw *cdp;
310 	register struct uba_hd *uh = &uba_hd[uban];
311 	int s;
312 
313 	s = spl6();
314 	uh->uh_users = 0;
315 	uh->uh_zvcnt = 0;
316 	uh->uh_xclu = 0;
317 	uh->uh_hangcnt = 0;
318 	uh->uh_actf = uh->uh_actl = 0;
319 	uh->uh_bdpwant = 0;
320 	uh->uh_mrwant = 0;
321 	ubainitmaps(uh);
322 	wakeup((caddr_t)&uh->uh_bdpwant);
323 	wakeup((caddr_t)&uh->uh_mrwant);
324 	printf("uba%d: reset", uban);
325 	ubainit(uh->uh_uba);
326 	for (cdp = cdevsw; cdp->d_open; cdp++)
327 		(*cdp->d_reset)(uban);
328 #ifdef INET
329 	ifubareset(uban);
330 #endif
331 	printf("\n");
332 	splx(s);
333 }
334 
335 /*
336  * Init a uba.  This is called with a pointer
337  * rather than a virtual address since it is called
338  * by code which runs with memory mapping disabled.
339  * In these cases we really don't need the interrupts
340  * enabled, but since we run with ipl high, we don't care
341  * if they are, they will never happen anyways.
342  */
343 ubainit(uba)
344 	register struct uba_regs *uba;
345 {
346 
347 	switch (cpu) {
348 #if VAX780
349 	case VAX_780:
350 		uba->uba_cr = UBACR_ADINIT;
351 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
352 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
353 			;
354 		break;
355 #endif
356 #if VAX750
357 	case VAX_750:
358 #endif
359 #if VAX730
360 	case VAX_730:
361 #endif
362 #if defined(VAX750) || defined(VAX730)
363 		mtpr(IUR, 0);
364 		/* give devices time to recover from power fail */
365 /* THIS IS PROBABLY UNNECESSARY */
366 		DELAY(500000);
367 /* END PROBABLY UNNECESSARY */
368 		break;
369 #endif
370 	}
371 }
372 
373 #if VAX780
374 /*
375  * Check to make sure the UNIBUS adaptor is not hung,
376  * with an interrupt in the register to be presented,
377  * but not presenting it for an extended period (5 seconds).
378  */
379 unhang()
380 {
381 	register int uban;
382 
383 	for (uban = 0; uban < numuba; uban++) {
384 		register struct uba_hd *uh = &uba_hd[uban];
385 		register struct uba_regs *up = uh->uh_uba;
386 
387 		if (up->uba_sr == 0)
388 			return;
389 		up->uba_sr = UBASR_CRD|UBASR_LEB;
390 		uh->uh_hangcnt++;
391 		if (uh->uh_hangcnt > 5*hz) {
392 			uh->uh_hangcnt = 0;
393 			printf("uba%d: hung\n", uban);
394 			ubareset(uban);
395 		}
396 	}
397 }
398 
399 /*
400  * This is a timeout routine which decrements the ``i forgot to
401  * interrupt'' counts, on an 11/780.  This prevents slowly growing
402  * counts from causing a UBA reset since we are interested only
403  * in hang situations.
404  */
405 ubawatch()
406 {
407 	register struct uba_hd *uh;
408 	register int uban;
409 
410 	if (panicstr)
411 		return;
412 	for (uban = 0; uban < numuba; uban++) {
413 		uh = &uba_hd[uban];
414 		if (uh->uh_hangcnt)
415 			uh->uh_hangcnt--;
416 	}
417 }
418 
419 int	ubawedgecnt = 10;
420 int	ubacrazy = 500;
421 /*
422  * This routine is called by the locore code to
423  * process a UBA error on an 11/780.  The arguments are passed
424  * on the stack, and value-result (through some trickery).
425  * In particular, the uvec argument is used for further
426  * uba processing so the result aspect of it is very important.
427  * It must not be declared register.
428  */
429 /*ARGSUSED*/
430 ubaerror(uban, uh, xx, uvec, uba)
431 	register int uban;
432 	register struct uba_hd *uh;
433 	int uvec;
434 	register struct uba_regs *uba;
435 {
436 	register sr, s;
437 
438 	if (uvec == 0) {
439 		uh->uh_zvcnt++;
440 		if (uh->uh_zvcnt > 250000) {
441 			printf("uba%d: too many zero vectors\n");
442 			ubareset(uban);
443 		}
444 		uvec = 0;
445 		return;
446 	}
447 	if (uba->uba_cnfgr & NEX_CFGFLT) {
448 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
449 		    uban, uba->uba_sr, ubasr_bits,
450 		    uba->uba_cnfgr, NEXFLT_BITS);
451 		ubareset(uban);
452 		uvec = 0;
453 		return;
454 	}
455 	sr = uba->uba_sr;
456 	s = spl7();
457 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
458 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
459 	splx(s);
460 	uba->uba_sr = sr;
461 	uvec &= UBABRRVR_DIV;
462 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
463 		if (uh->uh_errcnt > ubacrazy)
464 			panic("uba crazy");
465 		printf("ERROR LIMIT ");
466 		ubareset(uban);
467 		uvec = 0;
468 		return;
469 	}
470 	return;
471 }
472 #endif
473 
474 #ifdef notdef
475 /*
476  * This routine allows remapping of previously
477  * allocated UNIBUS bdp and map resources
478  * onto different memory addresses.
479  * It should only be used by routines which need
480  * small fixed length mappings for long periods of time
481  * (like the ARPANET ACC IMP interface).
482  * It only maps kernel addresses.
483  */
484 ubaremap(uban, ubinfo, addr)
485 	int uban;
486 	register unsigned ubinfo;
487 	caddr_t addr;
488 {
489 	register struct uba_hd *uh = &uba_hd[uban];
490 	register struct pte *pte, *io;
491 	register int temp, bdp;
492 	int npf, o;
493 
494 	o = (int)addr & PGOFSET;
495 	bdp = (ubinfo >> 28) & 0xf;
496 	npf = (ubinfo >> 18) & 0x3ff;
497 	io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff];
498 	temp = (bdp << 21) | UBAMR_MRV;
499 
500 	/*
501 	 * If using buffered data path initiate purge
502 	 * of old data and set byte offset bit if next
503 	 * transfer will be from odd address.
504 	 */
505 	if (bdp) {
506 		switch (cpu) {
507 #if VAX780
508 		case VAX_780:
509 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
510 			break;
511 #endif
512 #if VAX750
513 		case VAX_750:
514 			uh->uh_uba->uba_dpr[bdp] |=
515 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
516 			break;
517 #endif
518 		}
519 		if (o & 1)
520 			temp |= UBAMR_BO;
521 	}
522 
523 	/*
524 	 * Set up the map registers, leaving an invalid reg
525 	 * at the end to guard against wild unibus transfers.
526 	 */
527 	pte = &Sysmap[btop(((int)addr)&0x7fffffff)];
528 	while (--npf != 0)
529 		*(int *)io++ = pte++->pg_pfnum | temp;
530 	*(int *)io = 0;
531 
532 	/*
533 	 * Return effective UNIBUS address.
534 	 */
535 	return (ubinfo | o);
536 }
537 #endif
538 
539 /*
540  * This routine is called by a driver for a device with on-board Unibus
541  * memory.  It removes the memory block from the Unibus resource map
542  * and clears the map registers for the block.
543  *
544  * Arguments are the Unibus number, the Unibus address of the memory
545  * block, its size in blocks of 512 bytes, and a flag indicating whether
546  * to allocate the unibus space form the resource map or whether it already
547  * has been.
548  *
549  * Returns > 0 if successful, 0 if not.
550  */
551 
552 ubamem(uban, addr, size, alloc)
553 {
554 	register struct uba_hd *uh = &uba_hd[uban];
555 	register int *m;
556 	register int i, a, s;
557 
558 	if (alloc) {
559 		s = spl6();
560 		a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */
561 		splx(s);
562 	} else
563 		a = (addr>>9)+1;
564 	if (a) {
565 		m = (int *) &uh->uh_uba->uba_map[a-1];
566 		for (i=0; i<size; i++)
567 			*m++ = 0;	/* All off, especially 'valid' */
568 #if VAX780
569 		if (cpu == VAX_780) {		/* map disable */
570 			i = (addr+size*512+8191)/8192;
571 			uh->uh_uba->uba_cr |= i<<26;
572 		}
573 #endif
574 	}
575 	return(a);
576 }
577 
578 /*
579  * Map a virtual address into users address space. Actually all we
580  * do is turn on the user mode write protection bits for the particular
581  * page of memory involved.
582  */
583 maptouser(vaddress)
584 	caddr_t vaddress;
585 {
586 
587 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
588 }
589 
590 unmaptouser(vaddress)
591 	caddr_t vaddress;
592 {
593 
594 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
595 }
596