xref: /csrg-svn/sys/vax/uba/uba.c (revision 36784)
1 /*
2  * Copyright (c) 1982, 1986 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)uba.c	7.7 (Berkeley) 02/15/89
18  */
19 
20 #include "param.h"
21 #include "systm.h"
22 #include "map.h"
23 #include "buf.h"
24 #include "vm.h"
25 #include "dir.h"
26 #include "user.h"
27 #include "proc.h"
28 #include "conf.h"
29 #include "dkstat.h"
30 #include "kernel.h"
31 
32 #include "../vax/pte.h"
33 #include "../vax/cpu.h"
34 #include "../vax/mtpr.h"
35 #include "../vax/nexus.h"
36 #include "ubareg.h"
37 #include "ubavar.h"
38 
39 #ifdef DW780
40 char	ubasr_bits[] = UBASR_BITS;
41 #endif
42 
43 #define	spluba	spl7		/* IPL 17 */
44 
45 /*
46  * Do transfer on device argument.  The controller
47  * and uba involved are implied by the device.
48  * We queue for resource wait in the uba code if necessary.
49  * We return 1 if the transfer was started, 0 if it was not.
50  *
51  * The onq argument must be zero iff the device is not on the
52  * queue for this UBA.  If onq is set, the device must be at the
53  * head of the queue.  In any case, if the transfer is started,
54  * the device will be off the queue, and if not, it will be on.
55  *
56  * Drivers that allocate one BDP and hold it for some time should
57  * set ud_keepbdp.  In this case um_bdp tells which BDP is allocated
58  * to the controller, unless it is zero, indicating that the controller
59  * does not now have a BDP.
60  */
61 ubaqueue(ui, onq)
62 	register struct uba_device *ui;
63 	int onq;
64 {
65 	register struct uba_ctlr *um = ui->ui_mi;
66 	register struct uba_hd *uh;
67 	register struct uba_driver *ud;
68 	register int s, unit;
69 
70 	uh = &uba_hd[um->um_ubanum];
71 	ud = um->um_driver;
72 	s = spluba();
73 	/*
74 	 * Honor exclusive BDP use requests.
75 	 */
76 	if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu)
77 		goto rwait;
78 	if (ud->ud_keepbdp) {
79 		/*
80 		 * First get just a BDP (though in fact it comes with
81 		 * one map register too).
82 		 */
83 		if (um->um_bdp == 0) {
84 			um->um_bdp = uballoc(um->um_ubanum,
85 				(caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT);
86 			if (um->um_bdp == 0)
87 				goto rwait;
88 		}
89 		/* now share it with this transfer */
90 		um->um_ubinfo = ubasetup(um->um_ubanum,
91 			um->um_tab.b_actf->b_actf,
92 			um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT);
93 	} else
94 		um->um_ubinfo = ubasetup(um->um_ubanum,
95 			um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT);
96 	if (um->um_ubinfo == 0)
97 		goto rwait;
98 	uh->uh_users++;
99 	if (ud->ud_xclu)
100 		uh->uh_xclu = 1;
101 	splx(s);
102 	if (ui->ui_dk >= 0) {
103 		unit = ui->ui_dk;
104 		dk_busy |= 1<<unit;
105 		dk_xfer[unit]++;
106 		dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6;
107 	}
108 	if (onq)
109 		uh->uh_actf = ui->ui_forw;
110 	(*ud->ud_dgo)(um);
111 	return (1);
112 rwait:
113 	if (!onq) {
114 		ui->ui_forw = NULL;
115 		if (uh->uh_actf == NULL)
116 			uh->uh_actf = ui;
117 		else
118 			uh->uh_actl->ui_forw = ui;
119 		uh->uh_actl = ui;
120 	}
121 	splx(s);
122 	return (0);
123 }
124 
125 ubadone(um)
126 	register struct uba_ctlr *um;
127 {
128 	register struct uba_hd *uh = &uba_hd[um->um_ubanum];
129 
130 	if (um->um_driver->ud_xclu)
131 		uh->uh_xclu = 0;
132 	uh->uh_users--;
133 	if (um->um_driver->ud_keepbdp)
134 		um->um_ubinfo &= ~BDPMASK;	/* keep BDP for misers */
135 	ubarelse(um->um_ubanum, &um->um_ubinfo);
136 }
137 
138 /*
139  * Allocate and setup UBA map registers, and bdp's
140  * Flags says whether bdp is needed, whether the caller can't
141  * wait (e.g. if the caller is at interrupt level).
142  * Return value encodes map register plus page offset,
143  * bdp number and number of map registers.
144  */
145 ubasetup(uban, bp, flags)
146 	int uban;
147 	register struct buf *bp;
148 	register int flags;
149 {
150 	register struct uba_hd *uh = &uba_hd[uban];
151 	register struct pte *pte, *io;
152 	register int npf;
153 	int pfnum, temp;
154 	int reg, bdp;
155 	unsigned v;
156 	struct proc *rp;
157 	int a, o, ubinfo;
158 
159 #ifdef DW730
160 	if (uh->uh_type == DW730)
161 		flags &= ~UBA_NEEDBDP;
162 #endif
163 #ifdef QBA
164 	if (uh->uh_type == QBA)
165 		flags &= ~UBA_NEEDBDP;
166 #endif
167 	o = (int)bp->b_un.b_addr & PGOFSET;
168 	npf = btoc(bp->b_bcount + o) + 1;
169 	if (npf > UBA_MAXNMR)
170 		panic("uba xfer too big");
171 	a = spluba();
172 	while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) {
173 		if (flags & UBA_CANTWAIT) {
174 			splx(a);
175 			return (0);
176 		}
177 		uh->uh_mrwant++;
178 		sleep((caddr_t)&uh->uh_mrwant, PSWP);
179 	}
180 	if ((flags & UBA_NEED16) && reg + npf > 128) {
181 		/*
182 		 * Could hang around and try again (if we can ever succeed).
183 		 * Won't help any current device...
184 		 */
185 		rmfree(uh->uh_map, (long)npf, (long)reg);
186 		splx(a);
187 		return (0);
188 	}
189 	bdp = 0;
190 	if (flags & UBA_NEEDBDP) {
191 		while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) {
192 			if (flags & UBA_CANTWAIT) {
193 				rmfree(uh->uh_map, (long)npf, (long)reg);
194 				splx(a);
195 				return (0);
196 			}
197 			uh->uh_bdpwant++;
198 			sleep((caddr_t)&uh->uh_bdpwant, PSWP);
199 		}
200 		uh->uh_bdpfree &= ~(1 << (bdp-1));
201 	} else if (flags & UBA_HAVEBDP)
202 		bdp = (flags >> 28) & 0xf;
203 	splx(a);
204 	reg--;
205 	ubinfo = UBAI_INFO(o, reg, npf, bdp);
206 	temp = (bdp << 21) | UBAMR_MRV;
207 	if (bdp && (o & 01))
208 		temp |= UBAMR_BO;
209 	if ((bp->b_flags & B_PHYS) == 0)
210 		pte = kvtopte(bp->b_un.b_addr);
211 	else if (bp->b_flags & B_PAGET)
212 		pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)];
213 	else {
214 		rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc;
215 		v = btop(bp->b_un.b_addr);
216 		if (bp->b_flags & B_UAREA)
217 			pte = &rp->p_addr[v];
218 		else
219 			pte = vtopte(rp, v);
220 	}
221 	io = &uh->uh_mr[reg];
222 	while (--npf > 0) {
223 		pfnum = pte->pg_pfnum;
224 		if (pfnum == 0)
225 			panic("uba zero uentry");
226 		pte++;
227 		*(int *)io++ = pfnum | temp;
228 	}
229 	*(int *)io = 0;
230 	return (ubinfo);
231 }
232 
233 /*
234  * Non buffer setup interface... set up a buffer and call ubasetup.
235  */
236 uballoc(uban, addr, bcnt, flags)
237 	int uban;
238 	caddr_t addr;
239 	int bcnt, flags;
240 {
241 	struct buf ubabuf;
242 
243 	ubabuf.b_un.b_addr = addr;
244 	ubabuf.b_flags = B_BUSY;
245 	ubabuf.b_bcount = bcnt;
246 	/* that's all the fields ubasetup() needs */
247 	return (ubasetup(uban, &ubabuf, flags));
248 }
249 
250 /*
251  * Release resources on uba uban, and then unblock resource waiters.
252  * The map register parameter is by value since we need to block
253  * against uba resets on 11/780's.
254  */
255 ubarelse(uban, amr)
256 	int *amr;
257 {
258 	register struct uba_hd *uh = &uba_hd[uban];
259 	register int bdp, reg, npf, s;
260 	int mr;
261 
262 	/*
263 	 * Carefully see if we should release the space, since
264 	 * it may be released asynchronously at uba reset time.
265 	 */
266 	s = spluba();
267 	mr = *amr;
268 	if (mr == 0) {
269 		/*
270 		 * A ubareset() occurred before we got around
271 		 * to releasing the space... no need to bother.
272 		 */
273 		splx(s);
274 		return;
275 	}
276 	*amr = 0;
277 	bdp = UBAI_BDP(mr);
278 	if (bdp) {
279 		switch (uh->uh_type) {
280 #ifdef DWBUA
281 		case DWBUA:
282 			BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
283 			break;
284 #endif
285 #ifdef DW780
286 		case DW780:
287 			uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
288 			break;
289 #endif
290 #ifdef DW750
291 		case DW750:
292 			uh->uh_uba->uba_dpr[bdp] |=
293 			    UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
294 			break;
295 #endif
296 		default:
297 			break;
298 		}
299 		uh->uh_bdpfree |= 1 << (bdp-1);		/* atomic */
300 		if (uh->uh_bdpwant) {
301 			uh->uh_bdpwant = 0;
302 			wakeup((caddr_t)&uh->uh_bdpwant);
303 		}
304 	}
305 	/*
306 	 * Put back the registers in the resource map.
307 	 * The map code must not be reentered,
308 	 * nor can the registers be freed twice.
309 	 * Unblock interrupts once this is done.
310 	 */
311 	npf = UBAI_NMR(mr);
312 	reg = UBAI_MR(mr) + 1;
313 	rmfree(uh->uh_map, (long)npf, (long)reg);
314 	splx(s);
315 
316 	/*
317 	 * Wakeup sleepers for map registers,
318 	 * and also, if there are processes blocked in dgo(),
319 	 * give them a chance at the UNIBUS.
320 	 */
321 	if (uh->uh_mrwant) {
322 		uh->uh_mrwant = 0;
323 		wakeup((caddr_t)&uh->uh_mrwant);
324 	}
325 	while (uh->uh_actf && ubaqueue(uh->uh_actf, 1))
326 		;
327 }
328 
329 ubapurge(um)
330 	register struct uba_ctlr *um;
331 {
332 	register struct uba_hd *uh = um->um_hd;
333 	register int bdp = UBAI_BDP(um->um_ubinfo);
334 
335 	switch (uh->uh_type) {
336 #ifdef DWBUA
337 	case DWBUA:
338 		BUA(uh->uh_uba)->bua_dpr[bdp] |= BUADPR_PURGE;
339 		break;
340 #endif
341 #ifdef DW780
342 	case DW780:
343 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE;
344 		break;
345 #endif
346 #ifdef DW750
347 	case DW750:
348 		uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE;
349 		break;
350 #endif
351 	default:
352 		break;
353 	}
354 }
355 
356 ubainitmaps(uhp)
357 	register struct uba_hd *uhp;
358 {
359 
360 	if (uhp->uh_memsize > UBA_MAXMR)
361 		uhp->uh_memsize = UBA_MAXMR;
362 	rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ);
363 	switch (uhp->uh_type) {
364 #ifdef DWBUA
365 	case DWBUA:
366 		uhp->uh_bdpfree = (1<<NBDPBUA) - 1;
367 		break;
368 #endif
369 #ifdef DW780
370 	case DW780:
371 		uhp->uh_bdpfree = (1<<NBDP780) - 1;
372 		break;
373 #endif
374 #ifdef DW750
375 	case DW750:
376 		uhp->uh_bdpfree = (1<<NBDP750) - 1;
377 		break;
378 #endif
379 	default:
380 		break;
381 	}
382 }
383 
384 /*
385  * Generate a reset on uba number uban.  Then
386  * call each device in the character device table,
387  * giving it a chance to clean up so as to be able to continue.
388  */
389 ubareset(uban)
390 	int uban;
391 {
392 	register struct cdevsw *cdp;
393 	register struct uba_hd *uh = &uba_hd[uban];
394 	int s;
395 
396 	s = spluba();
397 	uh->uh_users = 0;
398 	uh->uh_zvcnt = 0;
399 	uh->uh_xclu = 0;
400 	uh->uh_actf = uh->uh_actl = 0;
401 	uh->uh_bdpwant = 0;
402 	uh->uh_mrwant = 0;
403 	ubainitmaps(uh);
404 	wakeup((caddr_t)&uh->uh_bdpwant);
405 	wakeup((caddr_t)&uh->uh_mrwant);
406 	printf("uba%d: reset", uban);
407 	ubainit(uh->uh_uba);
408 	ubameminit(uban);
409 	for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++)
410 		(*cdp->d_reset)(uban);
411 	ifubareset(uban);
412 	printf("\n");
413 	splx(s);
414 }
415 
416 /*
417  * Init a uba.  This is called with a pointer
418  * rather than a virtual address since it is called
419  * by code which runs with memory mapping disabled.
420  * In these cases we really don't need the interrupts
421  * enabled, but since we run with ipl high, we don't care
422  * if they are, they will never happen anyways.
423  * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA.
424  */
425 ubainit(uba)
426 	register struct uba_regs *uba;
427 {
428 	register struct uba_hd *uhp;
429 #ifdef QBA
430 	int isphys = 0;
431 #endif
432 
433 	for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) {
434 		if (uhp->uh_uba == uba)
435 			break;
436 		if (uhp->uh_physuba == uba) {
437 #ifdef QBA
438 			isphys++;
439 #endif
440 			break;
441 		}
442 	}
443 	if (uhp >= uba_hd + numuba) {
444 		printf("init unknown uba\n");
445 		return;
446 	}
447 
448 	switch (uhp->uh_type) {
449 #ifdef DWBUA
450 	case DWBUA:
451 		BUA(uba)->bua_csr |= BUACSR_UPI;
452 		/* give devices time to recover from power fail */
453 		DELAY(500000);
454 		break;
455 #endif
456 #ifdef DW780
457 	case DW780:
458 		uba->uba_cr = UBACR_ADINIT;
459 		uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE;
460 		while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0)
461 			;
462 		break;
463 #endif
464 #ifdef DW750
465 	case DW750:
466 #endif
467 #ifdef DW730
468 	case DW730:
469 #endif
470 #ifdef QBA
471 	case QBA:
472 #endif
473 #if DW750 || DW730 || QBA
474 		mtpr(IUR, 0);
475 		/* give devices time to recover from power fail */
476 /* THIS IS PROBABLY UNNECESSARY */
477 		DELAY(500000);
478 /* END PROBABLY UNNECESSARY */
479 #ifdef QBA
480 		/*
481 		 * Re-enable local memory access
482 		 * from the Q-bus.
483 		 */
484 		if (uhp->uh_type == QBA) {
485 			if (isphys)
486 				*((char *)QIOPAGE630 + QIPCR) = Q_LMEAE;
487 			else
488 				*(uhp->uh_iopage + QIPCR) = Q_LMEAE;
489 		}
490 #endif QBA
491 		break;
492 #endif DW750 || DW730 || QBA
493 	}
494 }
495 
496 #ifdef QBA
497 /*
498  * Determine the interrupt priority of a Q-bus
499  * peripheral.  The device probe routine must spl6(),
500  * attempt to make the device request an interrupt,
501  * delaying as necessary, then call this routine
502  * before resetting the device.
503  */
504 qbgetpri()
505 {
506 	int pri;
507 	extern int cvec;
508 
509 	for (pri = 0x17; pri > 0x14; ) {
510 		if (cvec && cvec != 0x200)	/* interrupted at pri */
511 			break;
512 		pri--;
513 		splx(pri - 1);
514 	}
515 	(void) spl0();
516 	return (pri);
517 }
518 #endif
519 
520 #ifdef DW780
521 int	ubawedgecnt = 10;
522 int	ubacrazy = 500;
523 int	zvcnt_max = 5000;	/* in 8 sec */
524 /*
525  * This routine is called by the locore code to process a UBA
526  * error on an 11/780 or 8600.  The arguments are passed
527  * on the stack, and value-result (through some trickery).
528  * In particular, the uvec argument is used for further
529  * uba processing so the result aspect of it is very important.
530  * It must not be declared register.
531  */
532 /*ARGSUSED*/
533 ubaerror(uban, uh, ipl, uvec, uba)
534 	register int uban;
535 	register struct uba_hd *uh;
536 	int ipl, uvec;
537 	register struct uba_regs *uba;
538 {
539 	register sr, s;
540 
541 	if (uvec == 0) {
542 		/*
543 		 * Declare dt as unsigned so that negative values
544 		 * are handled as >8 below, in case time was set back.
545 		 */
546 		u_long	dt = time.tv_sec - uh->uh_zvtime;
547 
548 		uh->uh_zvtotal++;
549 		if (dt > 8) {
550 			uh->uh_zvtime = time.tv_sec;
551 			uh->uh_zvcnt = 0;
552 		}
553 		if (++uh->uh_zvcnt > zvcnt_max) {
554 			printf("uba%d: too many zero vectors (%d in <%d sec)\n",
555 				uban, uh->uh_zvcnt, dt + 1);
556 			printf("\tIPL 0x%x\n\tcnfgr: %b  Adapter Code: 0x%x\n",
557 				ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS,
558 				uba->uba_cnfgr&0xff);
559 			printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n",
560 				uba->uba_sr, ubasr_bits, uba->uba_dcr,
561 				(uba->uba_dcr&0x8000000)?"":"NOT ");
562 			ubareset(uban);
563 		}
564 		return;
565 	}
566 	if (uba->uba_cnfgr & NEX_CFGFLT) {
567 		printf("uba%d: sbi fault sr=%b cnfgr=%b\n",
568 		    uban, uba->uba_sr, ubasr_bits,
569 		    uba->uba_cnfgr, NEXFLT_BITS);
570 		ubareset(uban);
571 		uvec = 0;
572 		return;
573 	}
574 	sr = uba->uba_sr;
575 	s = spluba();
576 	printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n",
577 	    uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar);
578 	splx(s);
579 	uba->uba_sr = sr;
580 	uvec &= UBABRRVR_DIV;
581 	if (++uh->uh_errcnt % ubawedgecnt == 0) {
582 		if (uh->uh_errcnt > ubacrazy)
583 			panic("uba crazy");
584 		printf("ERROR LIMIT ");
585 		ubareset(uban);
586 		uvec = 0;
587 		return;
588 	}
589 	return;
590 }
591 #endif
592 
593 /*
594  * Look for devices with unibus memory, allow them to configure, then disable
595  * map registers as necessary.  Called during autoconfiguration and ubareset.
596  * The device ubamem routine returns 0 on success, 1 on success if it is fully
597  * configured (has no csr or interrupt, so doesn't need to be probed),
598  * and -1 on failure.
599  */
600 ubameminit(uban)
601 {
602 	register struct uba_device *ui;
603 	register struct uba_hd *uh = &uba_hd[uban];
604 	caddr_t umembase = umem[uban] + 0x3e000, addr;
605 #define	ubaoff(off)	((int)(off) & 0x1fff)
606 
607 	uh->uh_lastmem = 0;
608 	for (ui = ubdinit; ui->ui_driver; ui++) {
609 		if (ui->ui_ubanum != uban && ui->ui_ubanum != '?')
610 			continue;
611 		if (ui->ui_driver->ud_ubamem) {
612 			/*
613 			 * During autoconfiguration, need to fudge ui_addr.
614 			 */
615 			addr = ui->ui_addr;
616 			ui->ui_addr = umembase + ubaoff(addr);
617 			switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) {
618 			case 1:
619 				ui->ui_alive = 1;
620 				/* FALLTHROUGH */
621 			case 0:
622 				ui->ui_ubanum = uban;
623 				break;
624 			}
625 			ui->ui_addr = addr;
626 		}
627 	}
628 #ifdef DW780
629 	/*
630 	 * On a DW780, throw away any map registers disabled by rounding
631 	 * the map disable in the configuration register
632 	 * up to the next 8K boundary, or below the last unibus memory.
633 	 */
634 	if (uh->uh_type == DW780) {
635 		register i;
636 
637 		i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192);
638 		while (i)
639 			(void) rmget(uh->uh_map, 1, i--);
640 	}
641 #endif
642 }
643 
644 /*
645  * Allocate UNIBUS memory.  Allocates and initializes
646  * sufficient mapping registers for access.  On a 780,
647  * the configuration register is setup to disable UBA
648  * response on DMA transfers to addresses controlled
649  * by the disabled mapping registers.
650  * On a DW780, should only be called from ubameminit, or in ascending order
651  * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't
652  * the last unibus memory would free unusable map registers.
653  * Doalloc is 1 to allocate, 0 to deallocate.
654  */
655 ubamem(uban, addr, npg, doalloc)
656 	int uban, addr, npg, doalloc;
657 {
658 	register struct uba_hd *uh = &uba_hd[uban];
659 	register int a;
660 	int s;
661 
662 	a = (addr >> 9) + 1;
663 	s = spluba();
664 	if (doalloc)
665 		a = rmget(uh->uh_map, npg, a);
666 	else
667 		rmfree(uh->uh_map, (long)npg, (long)a);
668 	splx(s);
669 	if (a) {
670 		register int i, *m;
671 
672 		m = (int *)&uh->uh_mr[a - 1];
673 		for (i = 0; i < npg; i++)
674 			*m++ = 0;	/* All off, especially 'valid' */
675 		i = addr + npg * 512;
676 		if (doalloc && i > uh->uh_lastmem)
677 			uh->uh_lastmem = i;
678 		else if (doalloc == 0 && i == uh->uh_lastmem)
679 			uh->uh_lastmem = addr;
680 #ifdef DW780
681 		/*
682 		 * On a 780, set up the map register disable
683 		 * field in the configuration register.  Beware
684 		 * of callers that request memory ``out of order''
685 		 * or in sections other than 8K multiples.
686 		 * Ubameminit handles such requests properly, however.
687 		 */
688 		if (uh->uh_type == DW780) {
689 			i = uh->uh_uba->uba_cr &~ 0x7c000000;
690 			i |= ((uh->uh_lastmem + 8191) / 8192) << 26;
691 			uh->uh_uba->uba_cr = i;
692 		}
693 #endif
694 	}
695 	return (a);
696 }
697 
698 #include "ik.h"
699 #include "vs.h"
700 #if NIK > 0 || NVS > 0
701 /*
702  * Map a virtual address into users address space. Actually all we
703  * do is turn on the user mode write protection bits for the particular
704  * page of memory involved.
705  */
706 maptouser(vaddress)
707 	caddr_t vaddress;
708 {
709 
710 	kvtopte(vaddress)->pg_prot = (PG_UW >> 27);
711 }
712 
713 unmaptouser(vaddress)
714 	caddr_t vaddress;
715 {
716 
717 	kvtopte(vaddress)->pg_prot = (PG_KW >> 27);
718 }
719 #endif
720