1*18417Smckusick /* uba.c 6.4 85/03/19 */ 240Sbill 39780Ssam #include "../machine/pte.h" 49780Ssam 517081Sbloom #include "param.h" 617081Sbloom #include "systm.h" 717081Sbloom #include "map.h" 817081Sbloom #include "buf.h" 917081Sbloom #include "vm.h" 1017081Sbloom #include "dir.h" 1117081Sbloom #include "user.h" 1217081Sbloom #include "proc.h" 1317081Sbloom #include "conf.h" 1417081Sbloom #include "dk.h" 1517081Sbloom #include "kernel.h" 1640Sbill 178481Sroot #include "../vax/cpu.h" 188481Sroot #include "../vax/mtpr.h" 198481Sroot #include "../vax/nexus.h" 2017081Sbloom #include "ubareg.h" 2117081Sbloom #include "ubavar.h" 228481Sroot 232929Swnj #if VAX780 242929Swnj char ubasr_bits[] = UBASR_BITS; 252929Swnj #endif 262929Swnj 2740Sbill /* 282570Swnj * Do transfer on device argument. The controller 292570Swnj * and uba involved are implied by the device. 302570Swnj * We queue for resource wait in the uba code if necessary. 312570Swnj * We return 1 if the transfer was started, 0 if it was not. 322570Swnj * If you call this routine with the head of the queue for a 332570Swnj * UBA, it will automatically remove the device from the UBA 342570Swnj * queue before it returns. If some other device is given 352570Swnj * as argument, it will be added to the request queue if the 362570Swnj * request cannot be started immediately. This means that 372570Swnj * passing a device which is on the queue but not at the head 382570Swnj * of the request queue is likely to be a disaster. 392570Swnj */ 402570Swnj ubago(ui) 412958Swnj register struct uba_device *ui; 422570Swnj { 432958Swnj register struct uba_ctlr *um = ui->ui_mi; 442570Swnj register struct uba_hd *uh; 452570Swnj register int s, unit; 462570Swnj 472570Swnj uh = &uba_hd[um->um_ubanum]; 482570Swnj s = spl6(); 492628Swnj if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 502616Swnj goto rwait; 512570Swnj um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf, 522570Swnj UBA_NEEDBDP|UBA_CANTWAIT); 532616Swnj if (um->um_ubinfo == 0) 542616Swnj goto rwait; 552616Swnj uh->uh_users++; 562628Swnj if (um->um_driver->ud_xclu) 572616Swnj uh->uh_xclu = 1; 582570Swnj splx(s); 592570Swnj if (ui->ui_dk >= 0) { 602570Swnj unit = ui->ui_dk; 612570Swnj dk_busy |= 1<<unit; 626348Swnj dk_xfer[unit]++; 636348Swnj dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 642570Swnj } 652570Swnj if (uh->uh_actf == ui) 662570Swnj uh->uh_actf = ui->ui_forw; 672570Swnj (*um->um_driver->ud_dgo)(um); 682570Swnj return (1); 692616Swnj rwait: 702616Swnj if (uh->uh_actf != ui) { 712616Swnj ui->ui_forw = NULL; 722616Swnj if (uh->uh_actf == NULL) 732616Swnj uh->uh_actf = ui; 742616Swnj else 752616Swnj uh->uh_actl->ui_forw = ui; 762616Swnj uh->uh_actl = ui; 772616Swnj } 782616Swnj splx(s); 792616Swnj return (0); 802570Swnj } 812570Swnj 822616Swnj ubadone(um) 832958Swnj register struct uba_ctlr *um; 842616Swnj { 852616Swnj register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 862616Swnj 872628Swnj if (um->um_driver->ud_xclu) 882616Swnj uh->uh_xclu = 0; 892616Swnj uh->uh_users--; 902616Swnj ubarelse(um->um_ubanum, &um->um_ubinfo); 912616Swnj } 922616Swnj 932570Swnj /* 942395Swnj * Allocate and setup UBA map registers, and bdp's 952395Swnj * Flags says whether bdp is needed, whether the caller can't 962395Swnj * wait (e.g. if the caller is at interrupt level). 9740Sbill * 982570Swnj * Return value: 9940Sbill * Bits 0-8 Byte offset 10040Sbill * Bits 9-17 Start map reg. no. 10140Sbill * Bits 18-27 No. mapping reg's 10240Sbill * Bits 28-31 BDP no. 10340Sbill */ 1042395Swnj ubasetup(uban, bp, flags) 1052395Swnj struct buf *bp; 10640Sbill { 1072395Swnj register struct uba_hd *uh = &uba_hd[uban]; 108*18417Smckusick int pfnum, temp; 10940Sbill int npf, reg, bdp; 11040Sbill unsigned v; 11140Sbill register struct pte *pte, *io; 11240Sbill struct proc *rp; 11340Sbill int a, o, ubinfo; 11440Sbill 1156948Ssam #if VAX730 1166948Ssam if (cpu == VAX_730) 1173332Swnj flags &= ~UBA_NEEDBDP; 1183332Swnj #endif 11940Sbill v = btop(bp->b_un.b_addr); 12040Sbill o = (int)bp->b_un.b_addr & PGOFSET; 12140Sbill npf = btoc(bp->b_bcount + o) + 1; 12240Sbill a = spl6(); 1238811Sroot while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) { 1243913Swnj if (flags & UBA_CANTWAIT) { 1253913Swnj splx(a); 1262395Swnj return (0); 1273913Swnj } 1282395Swnj uh->uh_mrwant++; 1299353Ssam sleep((caddr_t)&uh->uh_mrwant, PSWP); 13040Sbill } 13117731Skarels if ((flags & UBA_NEED16) && reg + npf > 128) { 13217731Skarels /* 13317731Skarels * Could hang around and try again (if we can ever succeed). 13417731Skarels * Won't help any current device... 13517731Skarels */ 13617731Skarels rmfree(uh->uh_map, (long)npf, (long)reg); 13717731Skarels splx(a); 13817731Skarels return (0); 13917731Skarels } 14040Sbill bdp = 0; 1412395Swnj if (flags & UBA_NEEDBDP) { 1422395Swnj while ((bdp = ffs(uh->uh_bdpfree)) == 0) { 1432395Swnj if (flags & UBA_CANTWAIT) { 1448811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 1453913Swnj splx(a); 1462395Swnj return (0); 1472395Swnj } 1482395Swnj uh->uh_bdpwant++; 1499353Ssam sleep((caddr_t)&uh->uh_bdpwant, PSWP); 15040Sbill } 1512463Swnj uh->uh_bdpfree &= ~(1 << (bdp-1)); 1524758Swnj } else if (flags & UBA_HAVEBDP) 1534758Swnj bdp = (flags >> 28) & 0xf; 15440Sbill splx(a); 1552463Swnj reg--; 15640Sbill ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 1572958Swnj temp = (bdp << 21) | UBAMR_MRV; 15840Sbill if (bdp && (o & 01)) 1592958Swnj temp |= UBAMR_BO; 1606382Swnj rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 1616382Swnj if ((bp->b_flags & B_PHYS) == 0) 162728Sbill pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 1636382Swnj else if (bp->b_flags & B_UAREA) 1646382Swnj pte = &rp->p_addr[v]; 1656382Swnj else if (bp->b_flags & B_PAGET) 1666382Swnj pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 1676382Swnj else 1686382Swnj pte = vtopte(rp, v); 1696382Swnj io = &uh->uh_uba->uba_map[reg]; 1706382Swnj while (--npf != 0) { 171*18417Smckusick pfnum = pte->pg_pfnum; 172*18417Smckusick if (pfnum == 0) 1736382Swnj panic("uba zero uentry"); 174*18417Smckusick pte++; 175*18417Smckusick *(int *)io++ = pfnum | temp; 17640Sbill } 17740Sbill *(int *)io++ = 0; 17840Sbill return (ubinfo); 17940Sbill } 18040Sbill 18140Sbill /* 1822570Swnj * Non buffer setup interface... set up a buffer and call ubasetup. 18340Sbill */ 1842395Swnj uballoc(uban, addr, bcnt, flags) 1853107Swnj int uban; 18640Sbill caddr_t addr; 1873107Swnj int bcnt, flags; 18840Sbill { 189883Sbill struct buf ubabuf; 19040Sbill 19140Sbill ubabuf.b_un.b_addr = addr; 19240Sbill ubabuf.b_flags = B_BUSY; 19340Sbill ubabuf.b_bcount = bcnt; 194883Sbill /* that's all the fields ubasetup() needs */ 1952395Swnj return (ubasetup(uban, &ubabuf, flags)); 19640Sbill } 19740Sbill 1982053Swnj /* 1992570Swnj * Release resources on uba uban, and then unblock resource waiters. 2002570Swnj * The map register parameter is by value since we need to block 2012570Swnj * against uba resets on 11/780's. 2022053Swnj */ 2032395Swnj ubarelse(uban, amr) 2042053Swnj int *amr; 20540Sbill { 2062395Swnj register struct uba_hd *uh = &uba_hd[uban]; 2072570Swnj register int bdp, reg, npf, s; 2082053Swnj int mr; 20940Sbill 2102570Swnj /* 2112570Swnj * Carefully see if we should release the space, since 2122570Swnj * it may be released asynchronously at uba reset time. 2132570Swnj */ 2142570Swnj s = spl6(); 2152053Swnj mr = *amr; 2162053Swnj if (mr == 0) { 2172570Swnj /* 2182570Swnj * A ubareset() occurred before we got around 2192570Swnj * to releasing the space... no need to bother. 2202570Swnj */ 2212570Swnj splx(s); 2222053Swnj return; 2232053Swnj } 2242067Swnj *amr = 0; 22540Sbill bdp = (mr >> 28) & 0x0f; 22640Sbill if (bdp) { 2272729Swnj switch (cpu) { 2282423Skre #if VAX780 2292423Skre case VAX_780: 2302958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2312423Skre break; 2322423Skre #endif 2332423Skre #if VAX750 2342423Skre case VAX_750: 2352958Swnj uh->uh_uba->uba_dpr[bdp] |= 2362958Swnj UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2372423Skre break; 2382423Skre #endif 2392423Skre } 2402570Swnj uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 2412395Swnj if (uh->uh_bdpwant) { 2422395Swnj uh->uh_bdpwant = 0; 2439353Ssam wakeup((caddr_t)&uh->uh_bdpwant); 24440Sbill } 24540Sbill } 2462570Swnj /* 2472570Swnj * Put back the registers in the resource map. 24817731Skarels * The map code must not be reentered, 24917731Skarels * nor can the registers be freed twice. 25017731Skarels * Unblock interrupts once this is done. 2512570Swnj */ 25240Sbill npf = (mr >> 18) & 0x3ff; 25340Sbill reg = ((mr >> 9) & 0x1ff) + 1; 2548811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 2552570Swnj splx(s); 2562570Swnj 2572570Swnj /* 2582570Swnj * Wakeup sleepers for map registers, 2592570Swnj * and also, if there are processes blocked in dgo(), 2602570Swnj * give them a chance at the UNIBUS. 2612570Swnj */ 2622395Swnj if (uh->uh_mrwant) { 2632395Swnj uh->uh_mrwant = 0; 2649353Ssam wakeup((caddr_t)&uh->uh_mrwant); 26540Sbill } 2662570Swnj while (uh->uh_actf && ubago(uh->uh_actf)) 2672570Swnj ; 26840Sbill } 26940Sbill 2702729Swnj ubapurge(um) 2712958Swnj register struct uba_ctlr *um; 2722729Swnj { 2732729Swnj register struct uba_hd *uh = um->um_hd; 2742729Swnj register int bdp = (um->um_ubinfo >> 28) & 0x0f; 2752729Swnj 2762729Swnj switch (cpu) { 2772729Swnj #if VAX780 2782729Swnj case VAX_780: 2792958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2802729Swnj break; 2812729Swnj #endif 2822729Swnj #if VAX750 2832729Swnj case VAX_750: 2842958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2852729Swnj break; 2862729Swnj #endif 2872729Swnj } 2882729Swnj } 2892729Swnj 2906863Swnj ubainitmaps(uhp) 2916863Swnj register struct uba_hd *uhp; 2926863Swnj { 2936863Swnj 2948811Sroot rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ); 2956863Swnj switch (cpu) { 2966863Swnj #if VAX780 2976863Swnj case VAX_780: 2986863Swnj uhp->uh_bdpfree = (1<<NBDP780) - 1; 2996863Swnj break; 3006863Swnj #endif 3016863Swnj #if VAX750 3026863Swnj case VAX_750: 3036863Swnj uhp->uh_bdpfree = (1<<NBDP750) - 1; 3046863Swnj break; 3056863Swnj #endif 3066948Ssam #if VAX730 3076948Ssam case VAX_730: 3086863Swnj break; 3096863Swnj #endif 3106863Swnj } 3116863Swnj } 3126863Swnj 3132570Swnj /* 3142570Swnj * Generate a reset on uba number uban. Then 3152570Swnj * call each device in the character device table, 3162570Swnj * giving it a chance to clean up so as to be able to continue. 3172570Swnj */ 3182395Swnj ubareset(uban) 3192570Swnj int uban; 320284Sbill { 321284Sbill register struct cdevsw *cdp; 3222646Swnj register struct uba_hd *uh = &uba_hd[uban]; 3231781Sbill int s; 324284Sbill 325302Sbill s = spl6(); 3262646Swnj uh->uh_users = 0; 3272646Swnj uh->uh_zvcnt = 0; 3282646Swnj uh->uh_xclu = 0; 3292646Swnj uh->uh_actf = uh->uh_actl = 0; 3302646Swnj uh->uh_bdpwant = 0; 3312646Swnj uh->uh_mrwant = 0; 3326863Swnj ubainitmaps(uh); 3332646Swnj wakeup((caddr_t)&uh->uh_bdpwant); 3342646Swnj wakeup((caddr_t)&uh->uh_mrwant); 3352958Swnj printf("uba%d: reset", uban); 3362958Swnj ubainit(uh->uh_uba); 33717731Skarels ubameminit(uban); 33811722Ssam for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++) 3392395Swnj (*cdp->d_reset)(uban); 3405221Swnj #ifdef INET 3415221Swnj ifubareset(uban); 3425221Swnj #endif 343284Sbill printf("\n"); 344302Sbill splx(s); 345284Sbill } 3462395Swnj 3472570Swnj /* 3482570Swnj * Init a uba. This is called with a pointer 3492570Swnj * rather than a virtual address since it is called 3502570Swnj * by code which runs with memory mapping disabled. 3512570Swnj * In these cases we really don't need the interrupts 3522570Swnj * enabled, but since we run with ipl high, we don't care 3532570Swnj * if they are, they will never happen anyways. 3542570Swnj */ 3552423Skre ubainit(uba) 3562423Skre register struct uba_regs *uba; 3572395Swnj { 3582395Swnj 3592958Swnj switch (cpu) { 3602958Swnj #if VAX780 3613248Swnj case VAX_780: 3622958Swnj uba->uba_cr = UBACR_ADINIT; 3632958Swnj uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 3642958Swnj while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 3652958Swnj ; 3662958Swnj break; 3672958Swnj #endif 3682958Swnj #if VAX750 3693248Swnj case VAX_750: 3703352Swnj #endif 3716948Ssam #if VAX730 3726948Ssam case VAX_730: 3733352Swnj #endif 3746948Ssam #if defined(VAX750) || defined(VAX730) 3753352Swnj mtpr(IUR, 0); 3762958Swnj /* give devices time to recover from power fail */ 3773332Swnj /* THIS IS PROBABLY UNNECESSARY */ 3783352Swnj DELAY(500000); 3793332Swnj /* END PROBABLY UNNECESSARY */ 3802958Swnj break; 3812958Swnj #endif 3822958Swnj } 3832395Swnj } 3842395Swnj 3858780Sroot #ifdef VAX780 3864024Swnj int ubawedgecnt = 10; 3874024Swnj int ubacrazy = 500; 38817731Skarels int zvcnt_max = 5000; /* in 8 sec */ 38917731Skarels int zvcnt_total; 39017731Skarels long zvcnt_time; 3912570Swnj /* 3922570Swnj * This routine is called by the locore code to 3932570Swnj * process a UBA error on an 11/780. The arguments are passed 3942570Swnj * on the stack, and value-result (through some trickery). 3952570Swnj * In particular, the uvec argument is used for further 3962570Swnj * uba processing so the result aspect of it is very important. 3972570Swnj * It must not be declared register. 3982570Swnj */ 3992423Skre /*ARGSUSED*/ 40017731Skarels ubaerror(uban, uh, ipl, uvec, uba) 4012395Swnj register int uban; 4022395Swnj register struct uba_hd *uh; 40317731Skarels int ipl, uvec; 4042395Swnj register struct uba_regs *uba; 4052395Swnj { 4062395Swnj register sr, s; 4072395Swnj 4082395Swnj if (uvec == 0) { 40917731Skarels long dt = time.tv_sec - zvcnt_time; 41017731Skarels zvcnt_total++; 41117731Skarels if (dt > 8) { 41217731Skarels zvcnt_time = time.tv_sec; 41317731Skarels uh->uh_zvcnt = 0; 41417731Skarels } 41517731Skarels if (++uh->uh_zvcnt > zvcnt_max) { 41617731Skarels printf("uba%d: too many zero vectors (%d in <%d sec)\n", 41717731Skarels uban, uh->uh_zvcnt, dt + 1); 41817731Skarels printf("\tIPL 0x%x\n\tcnfgr: %b Adapter Code: 0x%x\n", 41917731Skarels ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS, 42017731Skarels uba->uba_cnfgr&0xff); 42117731Skarels printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n", 42217731Skarels uba->uba_sr, ubasr_bits, uba->uba_dcr, 42317731Skarels (uba->uba_dcr&0x8000000)?"":"NOT "); 4242395Swnj ubareset(uban); 4252395Swnj } 4262395Swnj return; 4272395Swnj } 4282395Swnj if (uba->uba_cnfgr & NEX_CFGFLT) { 4292929Swnj printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 4302929Swnj uban, uba->uba_sr, ubasr_bits, 4313248Swnj uba->uba_cnfgr, NEXFLT_BITS); 4322395Swnj ubareset(uban); 4332395Swnj uvec = 0; 4342395Swnj return; 4352395Swnj } 4362395Swnj sr = uba->uba_sr; 4372395Swnj s = spl7(); 4383473Swnj printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 4393473Swnj uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 4402395Swnj splx(s); 4412395Swnj uba->uba_sr = sr; 4422958Swnj uvec &= UBABRRVR_DIV; 4434024Swnj if (++uh->uh_errcnt % ubawedgecnt == 0) { 4444024Swnj if (uh->uh_errcnt > ubacrazy) 4454024Swnj panic("uba crazy"); 4464024Swnj printf("ERROR LIMIT "); 4474024Swnj ubareset(uban); 4484024Swnj uvec = 0; 4494024Swnj return; 4504024Swnj } 4512395Swnj return; 4522395Swnj } 4532395Swnj #endif 4543745Sroot 4553745Sroot /* 45617731Skarels * Look for devices with unibus memory, allow them to configure, then disable 45717731Skarels * map registers as necessary. Called during autoconfiguration and ubareset. 45817731Skarels * The device ubamem routine returns 0 on success, 1 on success if it is fully 45917731Skarels * configured (has no csr or interrupt, so doesn't need to be probed), 46017731Skarels * and -1 on failure. 46117731Skarels */ 46217731Skarels ubameminit(uban) 46317731Skarels { 46417731Skarels register struct uba_device *ui; 46517731Skarels register struct uba_hd *uh = &uba_hd[uban]; 46617731Skarels caddr_t umembase = umem[uban] + 0x3e000, addr; 46717731Skarels #define ubaoff(off) ((int)(off) & 0x1fff) 46817731Skarels 46917731Skarels uh->uh_lastmem = 0; 47017731Skarels for (ui = ubdinit; ui->ui_driver; ui++) { 47117731Skarels if (ui->ui_ubanum != uban && ui->ui_ubanum != '?') 47217731Skarels continue; 47317731Skarels if (ui->ui_driver->ud_ubamem) { 47417731Skarels /* 47517731Skarels * During autoconfiguration, need to fudge ui_addr. 47617731Skarels */ 47717731Skarels addr = ui->ui_addr; 47817731Skarels ui->ui_addr = umembase + ubaoff(addr); 47917731Skarels switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) { 48017731Skarels case 1: 48117731Skarels ui->ui_alive = 1; 48217731Skarels /* FALLTHROUGH */ 48317731Skarels case 0: 48417731Skarels ui->ui_ubanum = uban; 48517731Skarels break; 48617731Skarels } 48717731Skarels ui->ui_addr = addr; 48817731Skarels } 48917731Skarels } 49017731Skarels #if VAX780 49117731Skarels /* 49217731Skarels * On a 780, throw away any map registers disabled by rounding 49317731Skarels * the map disable in the configuration register 49417731Skarels * up to the next 8K boundary, or below the last unibus memory. 49517731Skarels */ 49617731Skarels if (cpu == VAX_780) { 49717731Skarels register i; 49817731Skarels 49917731Skarels i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192); 50017731Skarels while (i) 50117731Skarels (void) rmget(uh->uh_map, 1, i--); 50217731Skarels } 50317731Skarels #endif 50417731Skarels } 50517731Skarels 50617731Skarels /* 50714790Ssam * Allocate UNIBUS memory. Allocates and initializes 50814790Ssam * sufficient mapping registers for access. On a 780, 50914790Ssam * the configuration register is setup to disable UBA 51014790Ssam * response on DMA transfers to addresses controlled 51114790Ssam * by the disabled mapping registers. 51217731Skarels * On a 780, should only be called from ubameminit, or in ascending order 51317731Skarels * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't 51417731Skarels * the last unibus memory would free unusable map registers. 51517731Skarels * Doalloc is 1 to allocate, 0 to deallocate. 5166518Sfeldman */ 51714790Ssam ubamem(uban, addr, npg, doalloc) 51814790Ssam int uban, addr, npg, doalloc; 5196518Sfeldman { 5206518Sfeldman register struct uba_hd *uh = &uba_hd[uban]; 52114790Ssam register int a; 52217731Skarels int s; 5236518Sfeldman 52417731Skarels a = (addr >> 9) + 1; 52517731Skarels s = spl6(); 52617731Skarels if (doalloc) 52717731Skarels a = rmget(uh->uh_map, npg, a); 52817731Skarels else 52917731Skarels rmfree(uh->uh_map, (long)npg, (long)a); 53017731Skarels splx(s); 5316518Sfeldman if (a) { 53214790Ssam register int i, *m; 53314790Ssam 53414790Ssam m = (int *)&uh->uh_uba->uba_map[a - 1]; 53514790Ssam for (i = 0; i < npg; i++) 5366518Sfeldman *m++ = 0; /* All off, especially 'valid' */ 53717731Skarels i = addr + npg * 512; 53817731Skarels if (doalloc && i > uh->uh_lastmem) 53917731Skarels uh->uh_lastmem = i; 54017731Skarels else if (doalloc == 0 && i == uh->uh_lastmem) 54117731Skarels uh->uh_lastmem = addr; 5427473Sfeldman #if VAX780 54314790Ssam /* 54414790Ssam * On a 780, set up the map register disable 54514790Ssam * field in the configuration register. Beware 54617731Skarels * of callers that request memory ``out of order'' 54717731Skarels * or in sections other than 8K multiples. 54817731Skarels * Ubameminit handles such requests properly, however. 54914790Ssam */ 55014790Ssam if (cpu == VAX_780) { 55117731Skarels i = uh->uh_uba->uba_cr &~ 0x7c000000; 55217731Skarels i |= ((uh->uh_lastmem + 8191) / 8192) << 26; 55317731Skarels uh->uh_uba->uba_cr = i; 5547473Sfeldman } 5557473Sfeldman #endif 5566518Sfeldman } 55714790Ssam return (a); 5586518Sfeldman } 5597304Ssam 5609875Ssam #include "ik.h" 5619875Ssam #if NIK > 0 5627304Ssam /* 5637304Ssam * Map a virtual address into users address space. Actually all we 5647304Ssam * do is turn on the user mode write protection bits for the particular 5657304Ssam * page of memory involved. 5667304Ssam */ 5677304Ssam maptouser(vaddress) 5687304Ssam caddr_t vaddress; 5697304Ssam { 5707304Ssam 5717304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 5727304Ssam } 5737304Ssam 5747304Ssam unmaptouser(vaddress) 5757304Ssam caddr_t vaddress; 5767304Ssam { 5777304Ssam 5787304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 5797304Ssam } 5809174Ssam #endif 581