xref: /csrg-svn/sys/vax/uba/uba.c (revision 9174)
1 /*	uba.c	4.57	82/11/13	*/
2 
3 #include "../h/param.h"
4 #include "../h/systm.h"
5 #include "../h/map.h"
6 #include "../h/pte.h"
7 #include "../h/buf.h"
8 #include "../h/vm.h"
9 #include "../h/dir.h"
10 #include "../h/user.h"
11 #include "../h/proc.h"
12 #include "../h/conf.h"
13 #include "../h/dk.h"
14 #include "../h/kernel.h"
15 
16 #include "../vax/cpu.h"
17 #include "../vax/mtpr.h"
18 #include "../vax/nexus.h"
19 #include "../vaxuba/ubareg.h"
20 #include "../vaxuba/ubavar.h"
21 
22 #if VAX780
23 char	ubasr_bits[] = UBASR_BITS;
24 #endif
25 
26 /*
27  * Do transfer on device argument.  The controller
28  * and uba involved are implied by the device.
29  * We queue for resource wait in the uba code if necessary.
30  * We return 1 if the transfer was started, 0 if it was not.
31  * If you call this routine with the head of the queue for a
32  * UBA, it will automatically remove the device from the UBA
33  * queue before it returns.  If some other device is given
34  * as argument, it will be added to the request queue if the
35  * request cannot be started immediately.  This means that
36  * passing a device which is on the queue but not at the head
37  * of the request queue is likely to be a disaster.
38  */
39 ubago(ui)
40 	register struct uba_device *ui;
41 {
42 	register struct uba_ctlr *um = ui->ui_mi;
43 	register struct uba_hd *uh;
44 	register int s, unit;
45 
46 	uh = &uba_hd[um->um_ubanum];
47 	s = spl6();
48 	if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
49 		goto rwait;
50 	um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
51 	    UBA_NEEDBDP|UBA_CANTWAIT);
52 	if (um->um_ubinfo == 0)
53 		goto rwait;
54 	uh->uh_users++;
55 	if (um->um_driver->ud_xclu)
56 		uh->uh_xclu = 1;
57 	splx(s);
58 	if (ui->ui_dk >= 0) {
59 		unit = ui->ui_dk;
60 		dk_busy |= 1<<unit;
61 		dk_xfer[unit]++;
62 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
63 	}
64 	if (uh->uh_actf == ui)
65 		uh->uh_actf = ui->ui_forw;
66 	(*um->um_driver->ud_dgo)(um);
67 	return (1);
68 rwait:
69 	if (uh->uh_actf != ui) {
70 		ui->ui_forw = NULL;
71 		if (uh->uh_actf == NULL)
72 			uh->uh_actf = ui;
73 		else
74 			uh->uh_actl->ui_forw = ui;
75 		uh->uh_actl = ui;
76 	}
77 	splx(s);
78 	return (0);
79 }
80 
81 ubadone(um)
82 	register struct uba_ctlr *um;
83 {
84 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
85 
86 	if (um->um_driver->ud_xclu)
87 		uh->uh_xclu = 0;
88 	uh->uh_users--;
89 	ubarelse(um->um_ubanum, &um->um_ubinfo);
90 }
91 
92 /*
93  * Allocate and setup UBA map registers, and bdp's
94  * Flags says whether bdp is needed, whether the caller can't
95  * wait (e.g. if the caller is at interrupt level).
96  *
97  * Return value:
98  *	Bits 0-8	Byte offset
99  *	Bits 9-17	Start map reg. no.
100  *	Bits 18-27	No. mapping reg's
101  *	Bits 28-31	BDP no.
102  */
103 ubasetup(uban, bp, flags)
104 	struct buf *bp;
105 {
106 	register struct uba_hd *uh = &uba_hd[uban];
107 	register int temp;
108 	int npf, reg, bdp;
109 	unsigned v;
110 	register struct pte *pte, *io;
111 	struct proc *rp;
112 	int a, o, ubinfo;
113 
114 #if VAX730
115 	if (cpu == VAX_730)
116 		flags &= ~UBA_NEEDBDP;
117 #endif
118 	v = btop(bp->b_un.b_addr);
119 	o = (int)bp->b_un.b_addr & PGOFSET;
120 	npf = btoc(bp->b_bcount + o) + 1;
121 	a = spl6();
122 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
123 		if (flags & UBA_CANTWAIT) {
124 			splx(a);
125 			return (0);
126 		}
127 		uh->uh_mrwant++;
128 		sleep((caddr_t)uh->uh_map, PSWP);
129 	}
130 	bdp = 0;
131 	if (flags & UBA_NEEDBDP) {
132 		while ((bdp = ffs(uh->uh_bdpfree)) == 0) {
133 			if (flags & UBA_CANTWAIT) {
134 				rmfree(uh->uh_map, (long)npf, (long)reg);
135 				splx(a);
136 				return (0);
137 			}
138 			uh->uh_bdpwant++;
139 			sleep((caddr_t)uh->uh_map, PSWP);
140 		}
141 		uh->uh_bdpfree &= ~(1 << (bdp-1));
142 	} else if (flags & UBA_HAVEBDP)
143 		bdp = (flags >> 28) & 0xf;
144 	splx(a);
145 	reg--;
146 	ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o;
147 	temp = (bdp << 21) | UBAMR_MRV;
148 	if (bdp && (o & 01))
149 		temp |= UBAMR_BO;
150 	rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
151 	if ((bp->b_flags & B_PHYS) == 0)
152 		pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)];
153 	else if (bp->b_flags & B_UAREA)
154 		pte = &rp->p_addr[v];
155 	else if (bp->b_flags & B_PAGET)
156 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
157 	else
158 		pte = vtopte(rp, v);
159 	io = &uh->uh_uba->uba_map[reg];
160 	while (--npf != 0) {
161 		if (pte->pg_pfnum == 0)
162 			panic("uba zero uentry");
163 		*(int *)io++ = pte++->pg_pfnum | temp;
164 	}
165 	*(int *)io++ = 0;
166 	return (ubinfo);
167 }
168 
169 /*
170  * Non buffer setup interface... set up a buffer and call ubasetup.
171  */
172 uballoc(uban, addr, bcnt, flags)
173 	int uban;
174 	caddr_t addr;
175 	int bcnt, flags;
176 {
177 	struct buf ubabuf;
178 
179 	ubabuf.b_un.b_addr = addr;
180 	ubabuf.b_flags = B_BUSY;
181 	ubabuf.b_bcount = bcnt;
182 	/* that's all the fields ubasetup() needs */
183 	return (ubasetup(uban, &ubabuf, flags));
184 }
185 
186 /*
187  * Release resources on uba uban, and then unblock resource waiters.
188  * The map register parameter is by value since we need to block
189  * against uba resets on 11/780's.
190  */
191 ubarelse(uban, amr)
192 	int *amr;
193 {
194 	register struct uba_hd *uh = &uba_hd[uban];
195 	register int bdp, reg, npf, s;
196 	int mr;
197 
198 	/*
199 	 * Carefully see if we should release the space, since
200 	 * it may be released asynchronously at uba reset time.
201 	 */
202 	s = spl6();
203 	mr = *amr;
204 	if (mr == 0) {
205 		/*
206 		 * A ubareset() occurred before we got around
207 		 * to releasing the space... no need to bother.
208 		 */
209 		splx(s);
210 		return;
211 	}
212 	*amr = 0;
213 	splx(s);		/* let interrupts in, we're safe for a while */
214 	bdp = (mr >> 28) & 0x0f;
215 	if (bdp) {
216 		switch (cpu) {
217 #if VAX780
218 		case VAX_780:
219 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
220 			break;
221 #endif
222 #if VAX750
223 		case VAX_750:
224 			uh->uh_uba->uba_dpr[bdp] |=
225 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
226 			break;
227 #endif
228 		}
229 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
230 		if (uh->uh_bdpwant) {
231 			uh->uh_bdpwant = 0;
232 			wakeup((caddr_t)uh->uh_map);
233 		}
234 	}
235 	/*
236 	 * Put back the registers in the resource map.
237 	 * The map code must not be reentered, so we do this
238 	 * at high ipl.
239 	 */
240 	npf = (mr >> 18) & 0x3ff;
241 	reg = ((mr >> 9) & 0x1ff) + 1;
242 	s = spl6();
243 	rmfree(uh->uh_map, (long)npf, (long)reg);
244 	splx(s);
245 
246 	/*
247 	 * Wakeup sleepers for map registers,
248 	 * and also, if there are processes blocked in dgo(),
249 	 * give them a chance at the UNIBUS.
250 	 */
251 	if (uh->uh_mrwant) {
252 		uh->uh_mrwant = 0;
253 		wakeup((caddr_t)uh->uh_map);
254 	}
255 	while (uh->uh_actf && ubago(uh->uh_actf))
256 		;
257 }
258 
259 ubapurge(um)
260 	register struct uba_ctlr *um;
261 {
262 	register struct uba_hd *uh = um->um_hd;
263 	register int bdp = (um->um_ubinfo >> 28) & 0x0f;
264 
265 	switch (cpu) {
266 #if VAX780
267 	case VAX_780:
268 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
269 		break;
270 #endif
271 #if VAX750
272 	case VAX_750:
273 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
274 		break;
275 #endif
276 	}
277 }
278 
279 ubainitmaps(uhp)
280 	register struct uba_hd *uhp;
281 {
282 
283 	rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ);
284 	switch (cpu) {
285 #if VAX780
286 	case VAX_780:
287 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
288 		break;
289 #endif
290 #if VAX750
291 	case VAX_750:
292 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
293 		break;
294 #endif
295 #if VAX730
296 	case VAX_730:
297 		break;
298 #endif
299 	}
300 }
301 
302 /*
303  * Generate a reset on uba number uban.  Then
304  * call each device in the character device table,
305  * giving it a chance to clean up so as to be able to continue.
306  */
307 ubareset(uban)
308 	int uban;
309 {
310 	register struct cdevsw *cdp;
311 	register struct uba_hd *uh = &uba_hd[uban];
312 	int s;
313 
314 	s = spl6();
315 	uh->uh_users = 0;
316 	uh->uh_zvcnt = 0;
317 	uh->uh_xclu = 0;
318 	uh->uh_actf = uh->uh_actl = 0;
319 	uh->uh_bdpwant = 0;
320 	uh->uh_mrwant = 0;
321 	ubainitmaps(uh);
322 	wakeup((caddr_t)&uh->uh_bdpwant);
323 	wakeup((caddr_t)&uh->uh_mrwant);
324 	printf("uba%d: reset", uban);
325 	ubainit(uh->uh_uba);
326 	for (cdp = cdevsw; cdp->d_open; cdp++)
327 		(*cdp->d_reset)(uban);
328 #ifdef INET
329 	ifubareset(uban);
330 #endif
331 	printf("\n");
332 	splx(s);
333 }
334 
335 /*
336  * Init a uba.  This is called with a pointer
337  * rather than a virtual address since it is called
338  * by code which runs with memory mapping disabled.
339  * In these cases we really don't need the interrupts
340  * enabled, but since we run with ipl high, we don't care
341  * if they are, they will never happen anyways.
342  */
343 ubainit(uba)
344 	register struct uba_regs *uba;
345 {
346 
347 	switch (cpu) {
348 #if VAX780
349 	case VAX_780:
350 		uba->uba_cr = UBACR_ADINIT;
351 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
352 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
353 			;
354 		break;
355 #endif
356 #if VAX750
357 	case VAX_750:
358 #endif
359 #if VAX730
360 	case VAX_730:
361 #endif
362 #if defined(VAX750) || defined(VAX730)
363 		mtpr(IUR, 0);
364 		/* give devices time to recover from power fail */
365 /* THIS IS PROBABLY UNNECESSARY */
366 		DELAY(500000);
367 /* END PROBABLY UNNECESSARY */
368 		break;
369 #endif
370 	}
371 }
372 
373 #ifdef VAX780
374 int	ubawedgecnt = 10;
375 int	ubacrazy = 500;
376 /*
377  * This routine is called by the locore code to
378  * process a UBA error on an 11/780.  The arguments are passed
379  * on the stack, and value-result (through some trickery).
380  * In particular, the uvec argument is used for further
381  * uba processing so the result aspect of it is very important.
382  * It must not be declared register.
383  */
384 /*ARGSUSED*/
385 ubaerror(uban, uh, xx, uvec, uba)
386 	register int uban;
387 	register struct uba_hd *uh;
388 	int uvec;
389 	register struct uba_regs *uba;
390 {
391 	register sr, s;
392 
393 	if (uvec == 0) {
394 		uh->uh_zvcnt++;
395 		if (uh->uh_zvcnt > 250000) {
396 			printf("uba%d: too many zero vectors\n");
397 			ubareset(uban);
398 		}
399 		uvec = 0;
400 		return;
401 	}
402 	if (uba->uba_cnfgr & NEX_CFGFLT) {
403 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
404 		    uban, uba->uba_sr, ubasr_bits,
405 		    uba->uba_cnfgr, NEXFLT_BITS);
406 		ubareset(uban);
407 		uvec = 0;
408 		return;
409 	}
410 	sr = uba->uba_sr;
411 	s = spl7();
412 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
413 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
414 	splx(s);
415 	uba->uba_sr = sr;
416 	uvec &= UBABRRVR_DIV;
417 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
418 		if (uh->uh_errcnt > ubacrazy)
419 			panic("uba crazy");
420 		printf("ERROR LIMIT ");
421 		ubareset(uban);
422 		uvec = 0;
423 		return;
424 	}
425 	return;
426 }
427 #endif
428 
429 /*
430  * This routine is called by a driver for a device with on-board Unibus
431  * memory.  It removes the memory block from the Unibus resource map
432  * and clears the map registers for the block.
433  *
434  * Arguments are the Unibus number, the Unibus address of the memory
435  * block, its size in blocks of 512 bytes, and a flag indicating whether
436  * to allocate the unibus space form the resource map or whether it already
437  * has been.
438  *
439  * Returns > 0 if successful, 0 if not.
440  */
441 ubamem(uban, addr, size, doalloc)
442 	int uban, addr, size, doalloc;
443 {
444 	register struct uba_hd *uh = &uba_hd[uban];
445 	register int *m;
446 	register int i, a, s;
447 
448 	if (doalloc) {
449 		s = spl6();
450 		a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */
451 		splx(s);
452 	} else
453 		a = (addr>>9)+1;
454 	if (a) {
455 		m = (int *) &uh->uh_uba->uba_map[a-1];
456 		for (i=0; i<size; i++)
457 			*m++ = 0;	/* All off, especially 'valid' */
458 #if VAX780
459 		if (cpu == VAX_780) {		/* map disable */
460 			i = (addr+size*512+8191)/8192;
461 			uh->uh_uba->uba_cr |= i<<26;
462 		}
463 #endif
464 	}
465 	return(a);
466 }
467 
468 #ifdef notdef
469 /*
470  * Map a virtual address into users address space. Actually all we
471  * do is turn on the user mode write protection bits for the particular
472  * page of memory involved.
473  */
474 maptouser(vaddress)
475 	caddr_t vaddress;
476 {
477 
478 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27);
479 }
480 
481 unmaptouser(vaddress)
482 	caddr_t vaddress;
483 {
484 
485 	Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27);
486 }
487 #endif
488