1*17731Skarels /* uba.c 6.3 85/01/18 */ 240Sbill 39780Ssam #include "../machine/pte.h" 49780Ssam 517081Sbloom #include "param.h" 617081Sbloom #include "systm.h" 717081Sbloom #include "map.h" 817081Sbloom #include "buf.h" 917081Sbloom #include "vm.h" 1017081Sbloom #include "dir.h" 1117081Sbloom #include "user.h" 1217081Sbloom #include "proc.h" 1317081Sbloom #include "conf.h" 1417081Sbloom #include "dk.h" 1517081Sbloom #include "kernel.h" 1640Sbill 178481Sroot #include "../vax/cpu.h" 188481Sroot #include "../vax/mtpr.h" 198481Sroot #include "../vax/nexus.h" 2017081Sbloom #include "ubareg.h" 2117081Sbloom #include "ubavar.h" 228481Sroot 232929Swnj #if VAX780 242929Swnj char ubasr_bits[] = UBASR_BITS; 252929Swnj #endif 262929Swnj 2740Sbill /* 282570Swnj * Do transfer on device argument. The controller 292570Swnj * and uba involved are implied by the device. 302570Swnj * We queue for resource wait in the uba code if necessary. 312570Swnj * We return 1 if the transfer was started, 0 if it was not. 322570Swnj * If you call this routine with the head of the queue for a 332570Swnj * UBA, it will automatically remove the device from the UBA 342570Swnj * queue before it returns. If some other device is given 352570Swnj * as argument, it will be added to the request queue if the 362570Swnj * request cannot be started immediately. This means that 372570Swnj * passing a device which is on the queue but not at the head 382570Swnj * of the request queue is likely to be a disaster. 392570Swnj */ 402570Swnj ubago(ui) 412958Swnj register struct uba_device *ui; 422570Swnj { 432958Swnj register struct uba_ctlr *um = ui->ui_mi; 442570Swnj register struct uba_hd *uh; 452570Swnj register int s, unit; 462570Swnj 472570Swnj uh = &uba_hd[um->um_ubanum]; 482570Swnj s = spl6(); 492628Swnj if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 502616Swnj goto rwait; 512570Swnj um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf, 522570Swnj UBA_NEEDBDP|UBA_CANTWAIT); 532616Swnj if (um->um_ubinfo == 0) 542616Swnj goto rwait; 552616Swnj uh->uh_users++; 562628Swnj if (um->um_driver->ud_xclu) 572616Swnj uh->uh_xclu = 1; 582570Swnj splx(s); 592570Swnj if (ui->ui_dk >= 0) { 602570Swnj unit = ui->ui_dk; 612570Swnj dk_busy |= 1<<unit; 626348Swnj dk_xfer[unit]++; 636348Swnj dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 642570Swnj } 652570Swnj if (uh->uh_actf == ui) 662570Swnj uh->uh_actf = ui->ui_forw; 672570Swnj (*um->um_driver->ud_dgo)(um); 682570Swnj return (1); 692616Swnj rwait: 702616Swnj if (uh->uh_actf != ui) { 712616Swnj ui->ui_forw = NULL; 722616Swnj if (uh->uh_actf == NULL) 732616Swnj uh->uh_actf = ui; 742616Swnj else 752616Swnj uh->uh_actl->ui_forw = ui; 762616Swnj uh->uh_actl = ui; 772616Swnj } 782616Swnj splx(s); 792616Swnj return (0); 802570Swnj } 812570Swnj 822616Swnj ubadone(um) 832958Swnj register struct uba_ctlr *um; 842616Swnj { 852616Swnj register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 862616Swnj 872628Swnj if (um->um_driver->ud_xclu) 882616Swnj uh->uh_xclu = 0; 892616Swnj uh->uh_users--; 902616Swnj ubarelse(um->um_ubanum, &um->um_ubinfo); 912616Swnj } 922616Swnj 932570Swnj /* 942395Swnj * Allocate and setup UBA map registers, and bdp's 952395Swnj * Flags says whether bdp is needed, whether the caller can't 962395Swnj * wait (e.g. if the caller is at interrupt level). 9740Sbill * 982570Swnj * Return value: 9940Sbill * Bits 0-8 Byte offset 10040Sbill * Bits 9-17 Start map reg. no. 10140Sbill * Bits 18-27 No. mapping reg's 10240Sbill * Bits 28-31 BDP no. 10340Sbill */ 1042395Swnj ubasetup(uban, bp, flags) 1052395Swnj struct buf *bp; 10640Sbill { 1072395Swnj register struct uba_hd *uh = &uba_hd[uban]; 1088612Sroot register int temp; 10940Sbill int npf, reg, bdp; 11040Sbill unsigned v; 11140Sbill register struct pte *pte, *io; 11240Sbill struct proc *rp; 11340Sbill int a, o, ubinfo; 11440Sbill 1156948Ssam #if VAX730 1166948Ssam if (cpu == VAX_730) 1173332Swnj flags &= ~UBA_NEEDBDP; 1183332Swnj #endif 11940Sbill v = btop(bp->b_un.b_addr); 12040Sbill o = (int)bp->b_un.b_addr & PGOFSET; 12140Sbill npf = btoc(bp->b_bcount + o) + 1; 12240Sbill a = spl6(); 1238811Sroot while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) { 1243913Swnj if (flags & UBA_CANTWAIT) { 1253913Swnj splx(a); 1262395Swnj return (0); 1273913Swnj } 1282395Swnj uh->uh_mrwant++; 1299353Ssam sleep((caddr_t)&uh->uh_mrwant, PSWP); 13040Sbill } 131*17731Skarels if ((flags & UBA_NEED16) && reg + npf > 128) { 132*17731Skarels /* 133*17731Skarels * Could hang around and try again (if we can ever succeed). 134*17731Skarels * Won't help any current device... 135*17731Skarels */ 136*17731Skarels rmfree(uh->uh_map, (long)npf, (long)reg); 137*17731Skarels splx(a); 138*17731Skarels return (0); 139*17731Skarels } 14040Sbill bdp = 0; 1412395Swnj if (flags & UBA_NEEDBDP) { 1422395Swnj while ((bdp = ffs(uh->uh_bdpfree)) == 0) { 1432395Swnj if (flags & UBA_CANTWAIT) { 1448811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 1453913Swnj splx(a); 1462395Swnj return (0); 1472395Swnj } 1482395Swnj uh->uh_bdpwant++; 1499353Ssam sleep((caddr_t)&uh->uh_bdpwant, PSWP); 15040Sbill } 1512463Swnj uh->uh_bdpfree &= ~(1 << (bdp-1)); 1524758Swnj } else if (flags & UBA_HAVEBDP) 1534758Swnj bdp = (flags >> 28) & 0xf; 15440Sbill splx(a); 1552463Swnj reg--; 15640Sbill ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 1572958Swnj temp = (bdp << 21) | UBAMR_MRV; 15840Sbill if (bdp && (o & 01)) 1592958Swnj temp |= UBAMR_BO; 1606382Swnj rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 1616382Swnj if ((bp->b_flags & B_PHYS) == 0) 162728Sbill pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 1636382Swnj else if (bp->b_flags & B_UAREA) 1646382Swnj pte = &rp->p_addr[v]; 1656382Swnj else if (bp->b_flags & B_PAGET) 1666382Swnj pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 1676382Swnj else 1686382Swnj pte = vtopte(rp, v); 1696382Swnj io = &uh->uh_uba->uba_map[reg]; 1706382Swnj while (--npf != 0) { 1716382Swnj if (pte->pg_pfnum == 0) 1726382Swnj panic("uba zero uentry"); 1736382Swnj *(int *)io++ = pte++->pg_pfnum | temp; 17440Sbill } 17540Sbill *(int *)io++ = 0; 17640Sbill return (ubinfo); 17740Sbill } 17840Sbill 17940Sbill /* 1802570Swnj * Non buffer setup interface... set up a buffer and call ubasetup. 18140Sbill */ 1822395Swnj uballoc(uban, addr, bcnt, flags) 1833107Swnj int uban; 18440Sbill caddr_t addr; 1853107Swnj int bcnt, flags; 18640Sbill { 187883Sbill struct buf ubabuf; 18840Sbill 18940Sbill ubabuf.b_un.b_addr = addr; 19040Sbill ubabuf.b_flags = B_BUSY; 19140Sbill ubabuf.b_bcount = bcnt; 192883Sbill /* that's all the fields ubasetup() needs */ 1932395Swnj return (ubasetup(uban, &ubabuf, flags)); 19440Sbill } 19540Sbill 1962053Swnj /* 1972570Swnj * Release resources on uba uban, and then unblock resource waiters. 1982570Swnj * The map register parameter is by value since we need to block 1992570Swnj * against uba resets on 11/780's. 2002053Swnj */ 2012395Swnj ubarelse(uban, amr) 2022053Swnj int *amr; 20340Sbill { 2042395Swnj register struct uba_hd *uh = &uba_hd[uban]; 2052570Swnj register int bdp, reg, npf, s; 2062053Swnj int mr; 20740Sbill 2082570Swnj /* 2092570Swnj * Carefully see if we should release the space, since 2102570Swnj * it may be released asynchronously at uba reset time. 2112570Swnj */ 2122570Swnj s = spl6(); 2132053Swnj mr = *amr; 2142053Swnj if (mr == 0) { 2152570Swnj /* 2162570Swnj * A ubareset() occurred before we got around 2172570Swnj * to releasing the space... no need to bother. 2182570Swnj */ 2192570Swnj splx(s); 2202053Swnj return; 2212053Swnj } 2222067Swnj *amr = 0; 22340Sbill bdp = (mr >> 28) & 0x0f; 22440Sbill if (bdp) { 2252729Swnj switch (cpu) { 2262423Skre #if VAX780 2272423Skre case VAX_780: 2282958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2292423Skre break; 2302423Skre #endif 2312423Skre #if VAX750 2322423Skre case VAX_750: 2332958Swnj uh->uh_uba->uba_dpr[bdp] |= 2342958Swnj UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2352423Skre break; 2362423Skre #endif 2372423Skre } 2382570Swnj uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 2392395Swnj if (uh->uh_bdpwant) { 2402395Swnj uh->uh_bdpwant = 0; 2419353Ssam wakeup((caddr_t)&uh->uh_bdpwant); 24240Sbill } 24340Sbill } 2442570Swnj /* 2452570Swnj * Put back the registers in the resource map. 246*17731Skarels * The map code must not be reentered, 247*17731Skarels * nor can the registers be freed twice. 248*17731Skarels * Unblock interrupts once this is done. 2492570Swnj */ 25040Sbill npf = (mr >> 18) & 0x3ff; 25140Sbill reg = ((mr >> 9) & 0x1ff) + 1; 2528811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 2532570Swnj splx(s); 2542570Swnj 2552570Swnj /* 2562570Swnj * Wakeup sleepers for map registers, 2572570Swnj * and also, if there are processes blocked in dgo(), 2582570Swnj * give them a chance at the UNIBUS. 2592570Swnj */ 2602395Swnj if (uh->uh_mrwant) { 2612395Swnj uh->uh_mrwant = 0; 2629353Ssam wakeup((caddr_t)&uh->uh_mrwant); 26340Sbill } 2642570Swnj while (uh->uh_actf && ubago(uh->uh_actf)) 2652570Swnj ; 26640Sbill } 26740Sbill 2682729Swnj ubapurge(um) 2692958Swnj register struct uba_ctlr *um; 2702729Swnj { 2712729Swnj register struct uba_hd *uh = um->um_hd; 2722729Swnj register int bdp = (um->um_ubinfo >> 28) & 0x0f; 2732729Swnj 2742729Swnj switch (cpu) { 2752729Swnj #if VAX780 2762729Swnj case VAX_780: 2772958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2782729Swnj break; 2792729Swnj #endif 2802729Swnj #if VAX750 2812729Swnj case VAX_750: 2822958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2832729Swnj break; 2842729Swnj #endif 2852729Swnj } 2862729Swnj } 2872729Swnj 2886863Swnj ubainitmaps(uhp) 2896863Swnj register struct uba_hd *uhp; 2906863Swnj { 2916863Swnj 2928811Sroot rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ); 2936863Swnj switch (cpu) { 2946863Swnj #if VAX780 2956863Swnj case VAX_780: 2966863Swnj uhp->uh_bdpfree = (1<<NBDP780) - 1; 2976863Swnj break; 2986863Swnj #endif 2996863Swnj #if VAX750 3006863Swnj case VAX_750: 3016863Swnj uhp->uh_bdpfree = (1<<NBDP750) - 1; 3026863Swnj break; 3036863Swnj #endif 3046948Ssam #if VAX730 3056948Ssam case VAX_730: 3066863Swnj break; 3076863Swnj #endif 3086863Swnj } 3096863Swnj } 3106863Swnj 3112570Swnj /* 3122570Swnj * Generate a reset on uba number uban. Then 3132570Swnj * call each device in the character device table, 3142570Swnj * giving it a chance to clean up so as to be able to continue. 3152570Swnj */ 3162395Swnj ubareset(uban) 3172570Swnj int uban; 318284Sbill { 319284Sbill register struct cdevsw *cdp; 3202646Swnj register struct uba_hd *uh = &uba_hd[uban]; 3211781Sbill int s; 322284Sbill 323302Sbill s = spl6(); 3242646Swnj uh->uh_users = 0; 3252646Swnj uh->uh_zvcnt = 0; 3262646Swnj uh->uh_xclu = 0; 3272646Swnj uh->uh_actf = uh->uh_actl = 0; 3282646Swnj uh->uh_bdpwant = 0; 3292646Swnj uh->uh_mrwant = 0; 3306863Swnj ubainitmaps(uh); 3312646Swnj wakeup((caddr_t)&uh->uh_bdpwant); 3322646Swnj wakeup((caddr_t)&uh->uh_mrwant); 3332958Swnj printf("uba%d: reset", uban); 3342958Swnj ubainit(uh->uh_uba); 335*17731Skarels ubameminit(uban); 33611722Ssam for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++) 3372395Swnj (*cdp->d_reset)(uban); 3385221Swnj #ifdef INET 3395221Swnj ifubareset(uban); 3405221Swnj #endif 341284Sbill printf("\n"); 342302Sbill splx(s); 343284Sbill } 3442395Swnj 3452570Swnj /* 3462570Swnj * Init a uba. This is called with a pointer 3472570Swnj * rather than a virtual address since it is called 3482570Swnj * by code which runs with memory mapping disabled. 3492570Swnj * In these cases we really don't need the interrupts 3502570Swnj * enabled, but since we run with ipl high, we don't care 3512570Swnj * if they are, they will never happen anyways. 3522570Swnj */ 3532423Skre ubainit(uba) 3542423Skre register struct uba_regs *uba; 3552395Swnj { 3562395Swnj 3572958Swnj switch (cpu) { 3582958Swnj #if VAX780 3593248Swnj case VAX_780: 3602958Swnj uba->uba_cr = UBACR_ADINIT; 3612958Swnj uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 3622958Swnj while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 3632958Swnj ; 3642958Swnj break; 3652958Swnj #endif 3662958Swnj #if VAX750 3673248Swnj case VAX_750: 3683352Swnj #endif 3696948Ssam #if VAX730 3706948Ssam case VAX_730: 3713352Swnj #endif 3726948Ssam #if defined(VAX750) || defined(VAX730) 3733352Swnj mtpr(IUR, 0); 3742958Swnj /* give devices time to recover from power fail */ 3753332Swnj /* THIS IS PROBABLY UNNECESSARY */ 3763352Swnj DELAY(500000); 3773332Swnj /* END PROBABLY UNNECESSARY */ 3782958Swnj break; 3792958Swnj #endif 3802958Swnj } 3812395Swnj } 3822395Swnj 3838780Sroot #ifdef VAX780 3844024Swnj int ubawedgecnt = 10; 3854024Swnj int ubacrazy = 500; 386*17731Skarels int zvcnt_max = 5000; /* in 8 sec */ 387*17731Skarels int zvcnt_total; 388*17731Skarels long zvcnt_time; 3892570Swnj /* 3902570Swnj * This routine is called by the locore code to 3912570Swnj * process a UBA error on an 11/780. The arguments are passed 3922570Swnj * on the stack, and value-result (through some trickery). 3932570Swnj * In particular, the uvec argument is used for further 3942570Swnj * uba processing so the result aspect of it is very important. 3952570Swnj * It must not be declared register. 3962570Swnj */ 3972423Skre /*ARGSUSED*/ 398*17731Skarels ubaerror(uban, uh, ipl, uvec, uba) 3992395Swnj register int uban; 4002395Swnj register struct uba_hd *uh; 401*17731Skarels int ipl, uvec; 4022395Swnj register struct uba_regs *uba; 4032395Swnj { 4042395Swnj register sr, s; 4052395Swnj 4062395Swnj if (uvec == 0) { 407*17731Skarels long dt = time.tv_sec - zvcnt_time; 408*17731Skarels zvcnt_total++; 409*17731Skarels if (dt > 8) { 410*17731Skarels zvcnt_time = time.tv_sec; 411*17731Skarels uh->uh_zvcnt = 0; 412*17731Skarels } 413*17731Skarels if (++uh->uh_zvcnt > zvcnt_max) { 414*17731Skarels printf("uba%d: too many zero vectors (%d in <%d sec)\n", 415*17731Skarels uban, uh->uh_zvcnt, dt + 1); 416*17731Skarels printf("\tIPL 0x%x\n\tcnfgr: %b Adapter Code: 0x%x\n", 417*17731Skarels ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS, 418*17731Skarels uba->uba_cnfgr&0xff); 419*17731Skarels printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n", 420*17731Skarels uba->uba_sr, ubasr_bits, uba->uba_dcr, 421*17731Skarels (uba->uba_dcr&0x8000000)?"":"NOT "); 4222395Swnj ubareset(uban); 4232395Swnj } 4242395Swnj return; 4252395Swnj } 4262395Swnj if (uba->uba_cnfgr & NEX_CFGFLT) { 4272929Swnj printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 4282929Swnj uban, uba->uba_sr, ubasr_bits, 4293248Swnj uba->uba_cnfgr, NEXFLT_BITS); 4302395Swnj ubareset(uban); 4312395Swnj uvec = 0; 4322395Swnj return; 4332395Swnj } 4342395Swnj sr = uba->uba_sr; 4352395Swnj s = spl7(); 4363473Swnj printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 4373473Swnj uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 4382395Swnj splx(s); 4392395Swnj uba->uba_sr = sr; 4402958Swnj uvec &= UBABRRVR_DIV; 4414024Swnj if (++uh->uh_errcnt % ubawedgecnt == 0) { 4424024Swnj if (uh->uh_errcnt > ubacrazy) 4434024Swnj panic("uba crazy"); 4444024Swnj printf("ERROR LIMIT "); 4454024Swnj ubareset(uban); 4464024Swnj uvec = 0; 4474024Swnj return; 4484024Swnj } 4492395Swnj return; 4502395Swnj } 4512395Swnj #endif 4523745Sroot 4533745Sroot /* 454*17731Skarels * Look for devices with unibus memory, allow them to configure, then disable 455*17731Skarels * map registers as necessary. Called during autoconfiguration and ubareset. 456*17731Skarels * The device ubamem routine returns 0 on success, 1 on success if it is fully 457*17731Skarels * configured (has no csr or interrupt, so doesn't need to be probed), 458*17731Skarels * and -1 on failure. 459*17731Skarels */ 460*17731Skarels ubameminit(uban) 461*17731Skarels { 462*17731Skarels register struct uba_device *ui; 463*17731Skarels register struct uba_hd *uh = &uba_hd[uban]; 464*17731Skarels caddr_t umembase = umem[uban] + 0x3e000, addr; 465*17731Skarels #define ubaoff(off) ((int)(off) & 0x1fff) 466*17731Skarels 467*17731Skarels uh->uh_lastmem = 0; 468*17731Skarels for (ui = ubdinit; ui->ui_driver; ui++) { 469*17731Skarels if (ui->ui_ubanum != uban && ui->ui_ubanum != '?') 470*17731Skarels continue; 471*17731Skarels if (ui->ui_driver->ud_ubamem) { 472*17731Skarels /* 473*17731Skarels * During autoconfiguration, need to fudge ui_addr. 474*17731Skarels */ 475*17731Skarels addr = ui->ui_addr; 476*17731Skarels ui->ui_addr = umembase + ubaoff(addr); 477*17731Skarels switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) { 478*17731Skarels case 1: 479*17731Skarels ui->ui_alive = 1; 480*17731Skarels /* FALLTHROUGH */ 481*17731Skarels case 0: 482*17731Skarels ui->ui_ubanum = uban; 483*17731Skarels break; 484*17731Skarels } 485*17731Skarels ui->ui_addr = addr; 486*17731Skarels } 487*17731Skarels } 488*17731Skarels #if VAX780 489*17731Skarels /* 490*17731Skarels * On a 780, throw away any map registers disabled by rounding 491*17731Skarels * the map disable in the configuration register 492*17731Skarels * up to the next 8K boundary, or below the last unibus memory. 493*17731Skarels */ 494*17731Skarels if (cpu == VAX_780) { 495*17731Skarels register i; 496*17731Skarels 497*17731Skarels i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192); 498*17731Skarels while (i) 499*17731Skarels (void) rmget(uh->uh_map, 1, i--); 500*17731Skarels } 501*17731Skarels #endif 502*17731Skarels } 503*17731Skarels 504*17731Skarels /* 50514790Ssam * Allocate UNIBUS memory. Allocates and initializes 50614790Ssam * sufficient mapping registers for access. On a 780, 50714790Ssam * the configuration register is setup to disable UBA 50814790Ssam * response on DMA transfers to addresses controlled 50914790Ssam * by the disabled mapping registers. 510*17731Skarels * On a 780, should only be called from ubameminit, or in ascending order 511*17731Skarels * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't 512*17731Skarels * the last unibus memory would free unusable map registers. 513*17731Skarels * Doalloc is 1 to allocate, 0 to deallocate. 5146518Sfeldman */ 51514790Ssam ubamem(uban, addr, npg, doalloc) 51614790Ssam int uban, addr, npg, doalloc; 5176518Sfeldman { 5186518Sfeldman register struct uba_hd *uh = &uba_hd[uban]; 51914790Ssam register int a; 520*17731Skarels int s; 5216518Sfeldman 522*17731Skarels a = (addr >> 9) + 1; 523*17731Skarels s = spl6(); 524*17731Skarels if (doalloc) 525*17731Skarels a = rmget(uh->uh_map, npg, a); 526*17731Skarels else 527*17731Skarels rmfree(uh->uh_map, (long)npg, (long)a); 528*17731Skarels splx(s); 5296518Sfeldman if (a) { 53014790Ssam register int i, *m; 53114790Ssam 53214790Ssam m = (int *)&uh->uh_uba->uba_map[a - 1]; 53314790Ssam for (i = 0; i < npg; i++) 5346518Sfeldman *m++ = 0; /* All off, especially 'valid' */ 535*17731Skarels i = addr + npg * 512; 536*17731Skarels if (doalloc && i > uh->uh_lastmem) 537*17731Skarels uh->uh_lastmem = i; 538*17731Skarels else if (doalloc == 0 && i == uh->uh_lastmem) 539*17731Skarels uh->uh_lastmem = addr; 5407473Sfeldman #if VAX780 54114790Ssam /* 54214790Ssam * On a 780, set up the map register disable 54314790Ssam * field in the configuration register. Beware 544*17731Skarels * of callers that request memory ``out of order'' 545*17731Skarels * or in sections other than 8K multiples. 546*17731Skarels * Ubameminit handles such requests properly, however. 54714790Ssam */ 54814790Ssam if (cpu == VAX_780) { 549*17731Skarels i = uh->uh_uba->uba_cr &~ 0x7c000000; 550*17731Skarels i |= ((uh->uh_lastmem + 8191) / 8192) << 26; 551*17731Skarels uh->uh_uba->uba_cr = i; 5527473Sfeldman } 5537473Sfeldman #endif 5546518Sfeldman } 55514790Ssam return (a); 5566518Sfeldman } 5577304Ssam 5589875Ssam #include "ik.h" 5599875Ssam #if NIK > 0 5607304Ssam /* 5617304Ssam * Map a virtual address into users address space. Actually all we 5627304Ssam * do is turn on the user mode write protection bits for the particular 5637304Ssam * page of memory involved. 5647304Ssam */ 5657304Ssam maptouser(vaddress) 5667304Ssam caddr_t vaddress; 5677304Ssam { 5687304Ssam 5697304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 5707304Ssam } 5717304Ssam 5727304Ssam unmaptouser(vaddress) 5737304Ssam caddr_t vaddress; 5747304Ssam { 5757304Ssam 5767304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 5777304Ssam } 5789174Ssam #endif 579