1*7473Sfeldman /* uba.c 4.47 82/07/21 */ 240Sbill 340Sbill #include "../h/param.h" 42395Swnj #include "../h/systm.h" 52395Swnj #include "../h/cpu.h" 640Sbill #include "../h/map.h" 740Sbill #include "../h/pte.h" 82395Swnj #include "../h/buf.h" 92570Swnj #include "../h/vm.h" 102958Swnj #include "../h/ubareg.h" 112958Swnj #include "../h/ubavar.h" 1240Sbill #include "../h/dir.h" 1340Sbill #include "../h/user.h" 1440Sbill #include "../h/proc.h" 15284Sbill #include "../h/conf.h" 161901Swnj #include "../h/mtpr.h" 172395Swnj #include "../h/nexus.h" 182570Swnj #include "../h/dk.h" 1940Sbill 202929Swnj #if VAX780 212929Swnj char ubasr_bits[] = UBASR_BITS; 222929Swnj #endif 232929Swnj 2440Sbill /* 252570Swnj * Do transfer on device argument. The controller 262570Swnj * and uba involved are implied by the device. 272570Swnj * We queue for resource wait in the uba code if necessary. 282570Swnj * We return 1 if the transfer was started, 0 if it was not. 292570Swnj * If you call this routine with the head of the queue for a 302570Swnj * UBA, it will automatically remove the device from the UBA 312570Swnj * queue before it returns. If some other device is given 322570Swnj * as argument, it will be added to the request queue if the 332570Swnj * request cannot be started immediately. This means that 342570Swnj * passing a device which is on the queue but not at the head 352570Swnj * of the request queue is likely to be a disaster. 362570Swnj */ 372570Swnj ubago(ui) 382958Swnj register struct uba_device *ui; 392570Swnj { 402958Swnj register struct uba_ctlr *um = ui->ui_mi; 412570Swnj register struct uba_hd *uh; 422570Swnj register int s, unit; 432570Swnj 442570Swnj uh = &uba_hd[um->um_ubanum]; 452570Swnj s = spl6(); 462628Swnj if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 472616Swnj goto rwait; 482570Swnj um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf, 492570Swnj UBA_NEEDBDP|UBA_CANTWAIT); 502616Swnj if (um->um_ubinfo == 0) 512616Swnj goto rwait; 522616Swnj uh->uh_users++; 532628Swnj if (um->um_driver->ud_xclu) 542616Swnj uh->uh_xclu = 1; 552570Swnj splx(s); 562570Swnj if (ui->ui_dk >= 0) { 572570Swnj unit = ui->ui_dk; 582570Swnj dk_busy |= 1<<unit; 596348Swnj dk_xfer[unit]++; 606348Swnj dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 612570Swnj } 622570Swnj if (uh->uh_actf == ui) 632570Swnj uh->uh_actf = ui->ui_forw; 642570Swnj (*um->um_driver->ud_dgo)(um); 652570Swnj return (1); 662616Swnj rwait: 672616Swnj if (uh->uh_actf != ui) { 682616Swnj ui->ui_forw = NULL; 692616Swnj if (uh->uh_actf == NULL) 702616Swnj uh->uh_actf = ui; 712616Swnj else 722616Swnj uh->uh_actl->ui_forw = ui; 732616Swnj uh->uh_actl = ui; 742616Swnj } 752616Swnj splx(s); 762616Swnj return (0); 772570Swnj } 782570Swnj 792616Swnj ubadone(um) 802958Swnj register struct uba_ctlr *um; 812616Swnj { 822616Swnj register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 832616Swnj 842628Swnj if (um->um_driver->ud_xclu) 852616Swnj uh->uh_xclu = 0; 862616Swnj uh->uh_users--; 872616Swnj ubarelse(um->um_ubanum, &um->um_ubinfo); 882616Swnj } 892616Swnj 902570Swnj /* 912395Swnj * Allocate and setup UBA map registers, and bdp's 922395Swnj * Flags says whether bdp is needed, whether the caller can't 932395Swnj * wait (e.g. if the caller is at interrupt level). 9440Sbill * 952570Swnj * Return value: 9640Sbill * Bits 0-8 Byte offset 9740Sbill * Bits 9-17 Start map reg. no. 9840Sbill * Bits 18-27 No. mapping reg's 9940Sbill * Bits 28-31 BDP no. 10040Sbill */ 1012395Swnj ubasetup(uban, bp, flags) 1022395Swnj struct buf *bp; 10340Sbill { 1042395Swnj register struct uba_hd *uh = &uba_hd[uban]; 10540Sbill register int temp, i; 10640Sbill int npf, reg, bdp; 10740Sbill unsigned v; 10840Sbill register struct pte *pte, *io; 10940Sbill struct proc *rp; 11040Sbill int a, o, ubinfo; 11140Sbill 1126948Ssam #if VAX730 1136948Ssam if (cpu == VAX_730) 1143332Swnj flags &= ~UBA_NEEDBDP; 1153332Swnj #endif 11640Sbill v = btop(bp->b_un.b_addr); 11740Sbill o = (int)bp->b_un.b_addr & PGOFSET; 11840Sbill npf = btoc(bp->b_bcount + o) + 1; 11940Sbill a = spl6(); 1202784Swnj while ((reg = rmalloc(uh->uh_map, npf)) == 0) { 1213913Swnj if (flags & UBA_CANTWAIT) { 1223913Swnj splx(a); 1232395Swnj return (0); 1243913Swnj } 1252395Swnj uh->uh_mrwant++; 1262395Swnj sleep((caddr_t)uh->uh_map, PSWP); 12740Sbill } 12840Sbill bdp = 0; 1292395Swnj if (flags & UBA_NEEDBDP) { 1302395Swnj while ((bdp = ffs(uh->uh_bdpfree)) == 0) { 1312395Swnj if (flags & UBA_CANTWAIT) { 1322784Swnj rmfree(uh->uh_map, npf, reg); 1333913Swnj splx(a); 1342395Swnj return (0); 1352395Swnj } 1362395Swnj uh->uh_bdpwant++; 1372395Swnj sleep((caddr_t)uh->uh_map, PSWP); 13840Sbill } 1392463Swnj uh->uh_bdpfree &= ~(1 << (bdp-1)); 1404758Swnj } else if (flags & UBA_HAVEBDP) 1414758Swnj bdp = (flags >> 28) & 0xf; 14240Sbill splx(a); 1432463Swnj reg--; 14440Sbill ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 1452958Swnj temp = (bdp << 21) | UBAMR_MRV; 14640Sbill if (bdp && (o & 01)) 1472958Swnj temp |= UBAMR_BO; 1486382Swnj rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 1496382Swnj if ((bp->b_flags & B_PHYS) == 0) 150728Sbill pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 1516382Swnj else if (bp->b_flags & B_UAREA) 1526382Swnj pte = &rp->p_addr[v]; 1536382Swnj else if (bp->b_flags & B_PAGET) 1546382Swnj pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 1556382Swnj else 1566382Swnj pte = vtopte(rp, v); 1576382Swnj io = &uh->uh_uba->uba_map[reg]; 1586382Swnj while (--npf != 0) { 1596382Swnj if (pte->pg_pfnum == 0) 1606382Swnj panic("uba zero uentry"); 1616382Swnj *(int *)io++ = pte++->pg_pfnum | temp; 16240Sbill } 16340Sbill *(int *)io++ = 0; 16440Sbill return (ubinfo); 16540Sbill } 16640Sbill 16740Sbill /* 1682570Swnj * Non buffer setup interface... set up a buffer and call ubasetup. 16940Sbill */ 1702395Swnj uballoc(uban, addr, bcnt, flags) 1713107Swnj int uban; 17240Sbill caddr_t addr; 1733107Swnj int bcnt, flags; 17440Sbill { 175883Sbill struct buf ubabuf; 17640Sbill 17740Sbill ubabuf.b_un.b_addr = addr; 17840Sbill ubabuf.b_flags = B_BUSY; 17940Sbill ubabuf.b_bcount = bcnt; 180883Sbill /* that's all the fields ubasetup() needs */ 1812395Swnj return (ubasetup(uban, &ubabuf, flags)); 18240Sbill } 18340Sbill 1842053Swnj /* 1852570Swnj * Release resources on uba uban, and then unblock resource waiters. 1862570Swnj * The map register parameter is by value since we need to block 1872570Swnj * against uba resets on 11/780's. 1882053Swnj */ 1892395Swnj ubarelse(uban, amr) 1902053Swnj int *amr; 19140Sbill { 1922395Swnj register struct uba_hd *uh = &uba_hd[uban]; 1932570Swnj register int bdp, reg, npf, s; 1942053Swnj int mr; 19540Sbill 1962570Swnj /* 1972570Swnj * Carefully see if we should release the space, since 1982570Swnj * it may be released asynchronously at uba reset time. 1992570Swnj */ 2002570Swnj s = spl6(); 2012053Swnj mr = *amr; 2022053Swnj if (mr == 0) { 2032570Swnj /* 2042570Swnj * A ubareset() occurred before we got around 2052570Swnj * to releasing the space... no need to bother. 2062570Swnj */ 2072570Swnj splx(s); 2082053Swnj return; 2092053Swnj } 2102067Swnj *amr = 0; 2112570Swnj splx(s); /* let interrupts in, we're safe for a while */ 21240Sbill bdp = (mr >> 28) & 0x0f; 21340Sbill if (bdp) { 2142729Swnj switch (cpu) { 2152423Skre #if VAX780 2162423Skre case VAX_780: 2172958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2182423Skre break; 2192423Skre #endif 2202423Skre #if VAX750 2212423Skre case VAX_750: 2222958Swnj uh->uh_uba->uba_dpr[bdp] |= 2232958Swnj UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2242423Skre break; 2252423Skre #endif 2262423Skre } 2272570Swnj uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 2282395Swnj if (uh->uh_bdpwant) { 2292395Swnj uh->uh_bdpwant = 0; 2302395Swnj wakeup((caddr_t)uh->uh_map); 23140Sbill } 23240Sbill } 2332570Swnj /* 2342570Swnj * Put back the registers in the resource map. 2352570Swnj * The map code must not be reentered, so we do this 2362570Swnj * at high ipl. 2372570Swnj */ 23840Sbill npf = (mr >> 18) & 0x3ff; 23940Sbill reg = ((mr >> 9) & 0x1ff) + 1; 2402570Swnj s = spl6(); 2412784Swnj rmfree(uh->uh_map, npf, reg); 2422570Swnj splx(s); 2432570Swnj 2442570Swnj /* 2452570Swnj * Wakeup sleepers for map registers, 2462570Swnj * and also, if there are processes blocked in dgo(), 2472570Swnj * give them a chance at the UNIBUS. 2482570Swnj */ 2492395Swnj if (uh->uh_mrwant) { 2502395Swnj uh->uh_mrwant = 0; 2512395Swnj wakeup((caddr_t)uh->uh_map); 25240Sbill } 2532570Swnj while (uh->uh_actf && ubago(uh->uh_actf)) 2542570Swnj ; 25540Sbill } 25640Sbill 2572729Swnj ubapurge(um) 2582958Swnj register struct uba_ctlr *um; 2592729Swnj { 2602729Swnj register struct uba_hd *uh = um->um_hd; 2612729Swnj register int bdp = (um->um_ubinfo >> 28) & 0x0f; 2622729Swnj 2632729Swnj switch (cpu) { 2642729Swnj #if VAX780 2652729Swnj case VAX_780: 2662958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2672729Swnj break; 2682729Swnj #endif 2692729Swnj #if VAX750 2702729Swnj case VAX_750: 2712958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2722729Swnj break; 2732729Swnj #endif 2742729Swnj } 2752729Swnj } 2762729Swnj 2776863Swnj ubainitmaps(uhp) 2786863Swnj register struct uba_hd *uhp; 2796863Swnj { 2806863Swnj 2816863Swnj rminit(uhp->uh_map, NUBMREG, 1, "uba", UAMSIZ); 2826863Swnj switch (cpu) { 2836863Swnj #if VAX780 2846863Swnj case VAX_780: 2856863Swnj uhp->uh_bdpfree = (1<<NBDP780) - 1; 2866863Swnj break; 2876863Swnj #endif 2886863Swnj #if VAX750 2896863Swnj case VAX_750: 2906863Swnj uhp->uh_bdpfree = (1<<NBDP750) - 1; 2916863Swnj break; 2926863Swnj #endif 2936948Ssam #if VAX730 2946948Ssam case VAX_730: 2956863Swnj break; 2966863Swnj #endif 2976863Swnj } 2986863Swnj } 2996863Swnj 3002570Swnj /* 3012570Swnj * Generate a reset on uba number uban. Then 3022570Swnj * call each device in the character device table, 3032570Swnj * giving it a chance to clean up so as to be able to continue. 3042570Swnj */ 3052395Swnj ubareset(uban) 3062570Swnj int uban; 307284Sbill { 308284Sbill register struct cdevsw *cdp; 3092646Swnj register struct uba_hd *uh = &uba_hd[uban]; 3101781Sbill int s; 311284Sbill 312302Sbill s = spl6(); 3132646Swnj uh->uh_users = 0; 3142646Swnj uh->uh_zvcnt = 0; 3152646Swnj uh->uh_xclu = 0; 3162646Swnj uh->uh_hangcnt = 0; 3172646Swnj uh->uh_actf = uh->uh_actl = 0; 3182646Swnj uh->uh_bdpwant = 0; 3192646Swnj uh->uh_mrwant = 0; 3206863Swnj ubainitmaps(uh); 3212646Swnj wakeup((caddr_t)&uh->uh_bdpwant); 3222646Swnj wakeup((caddr_t)&uh->uh_mrwant); 3232958Swnj printf("uba%d: reset", uban); 3242958Swnj ubainit(uh->uh_uba); 325284Sbill for (cdp = cdevsw; cdp->d_open; cdp++) 3262395Swnj (*cdp->d_reset)(uban); 3275221Swnj #ifdef INET 3285221Swnj ifubareset(uban); 3295221Swnj #endif 330284Sbill printf("\n"); 331302Sbill splx(s); 332284Sbill } 3332395Swnj 3342570Swnj /* 3352570Swnj * Init a uba. This is called with a pointer 3362570Swnj * rather than a virtual address since it is called 3372570Swnj * by code which runs with memory mapping disabled. 3382570Swnj * In these cases we really don't need the interrupts 3392570Swnj * enabled, but since we run with ipl high, we don't care 3402570Swnj * if they are, they will never happen anyways. 3412570Swnj */ 3422423Skre ubainit(uba) 3432423Skre register struct uba_regs *uba; 3442395Swnj { 3452395Swnj 3462958Swnj switch (cpu) { 3472958Swnj #if VAX780 3483248Swnj case VAX_780: 3492958Swnj uba->uba_cr = UBACR_ADINIT; 3502958Swnj uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 3512958Swnj while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 3522958Swnj ; 3532958Swnj break; 3542958Swnj #endif 3552958Swnj #if VAX750 3563248Swnj case VAX_750: 3573352Swnj #endif 3586948Ssam #if VAX730 3596948Ssam case VAX_730: 3603352Swnj #endif 3616948Ssam #if defined(VAX750) || defined(VAX730) 3623352Swnj mtpr(IUR, 0); 3632958Swnj /* give devices time to recover from power fail */ 3643332Swnj /* THIS IS PROBABLY UNNECESSARY */ 3653352Swnj DELAY(500000); 3663332Swnj /* END PROBABLY UNNECESSARY */ 3672958Swnj break; 3682958Swnj #endif 3692958Swnj } 3702395Swnj } 3712395Swnj 3722958Swnj #if VAX780 3732570Swnj /* 3742570Swnj * Check to make sure the UNIBUS adaptor is not hung, 3752570Swnj * with an interrupt in the register to be presented, 3762570Swnj * but not presenting it for an extended period (5 seconds). 3772570Swnj */ 3782395Swnj unhang() 3792395Swnj { 3802395Swnj register int uban; 3812395Swnj 3822395Swnj for (uban = 0; uban < numuba; uban++) { 3832395Swnj register struct uba_hd *uh = &uba_hd[uban]; 3842395Swnj register struct uba_regs *up = uh->uh_uba; 3852395Swnj 3862395Swnj if (up->uba_sr == 0) 3872395Swnj return; 3883945Sroot up->uba_sr = UBASR_CRD|UBASR_LEB; 3892395Swnj uh->uh_hangcnt++; 3902759Swnj if (uh->uh_hangcnt > 5*hz) { 3912395Swnj uh->uh_hangcnt = 0; 3922929Swnj printf("uba%d: hung\n", uban); 3932395Swnj ubareset(uban); 3942395Swnj } 3952395Swnj } 3962395Swnj } 3972395Swnj 3982570Swnj /* 3992570Swnj * This is a timeout routine which decrements the ``i forgot to 4002570Swnj * interrupt'' counts, on an 11/780. This prevents slowly growing 4012570Swnj * counts from causing a UBA reset since we are interested only 4022570Swnj * in hang situations. 4032570Swnj */ 4042395Swnj ubawatch() 4052395Swnj { 4062395Swnj register struct uba_hd *uh; 4072395Swnj register int uban; 4082395Swnj 4092784Swnj if (panicstr) 4102784Swnj return; 4112395Swnj for (uban = 0; uban < numuba; uban++) { 4122395Swnj uh = &uba_hd[uban]; 4132395Swnj if (uh->uh_hangcnt) 4142395Swnj uh->uh_hangcnt--; 4152395Swnj } 4162395Swnj } 4172395Swnj 4184024Swnj int ubawedgecnt = 10; 4194024Swnj int ubacrazy = 500; 4202570Swnj /* 4212570Swnj * This routine is called by the locore code to 4222570Swnj * process a UBA error on an 11/780. The arguments are passed 4232570Swnj * on the stack, and value-result (through some trickery). 4242570Swnj * In particular, the uvec argument is used for further 4252570Swnj * uba processing so the result aspect of it is very important. 4262570Swnj * It must not be declared register. 4272570Swnj */ 4282423Skre /*ARGSUSED*/ 4292395Swnj ubaerror(uban, uh, xx, uvec, uba) 4302395Swnj register int uban; 4312395Swnj register struct uba_hd *uh; 4322395Swnj int uvec; 4332395Swnj register struct uba_regs *uba; 4342395Swnj { 4352395Swnj register sr, s; 4362395Swnj 4372395Swnj if (uvec == 0) { 4382395Swnj uh->uh_zvcnt++; 4392395Swnj if (uh->uh_zvcnt > 250000) { 4402929Swnj printf("uba%d: too many zero vectors\n"); 4412395Swnj ubareset(uban); 4422395Swnj } 4432395Swnj uvec = 0; 4442395Swnj return; 4452395Swnj } 4462395Swnj if (uba->uba_cnfgr & NEX_CFGFLT) { 4472929Swnj printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 4482929Swnj uban, uba->uba_sr, ubasr_bits, 4493248Swnj uba->uba_cnfgr, NEXFLT_BITS); 4502395Swnj ubareset(uban); 4512395Swnj uvec = 0; 4522395Swnj return; 4532395Swnj } 4542395Swnj sr = uba->uba_sr; 4552395Swnj s = spl7(); 4563473Swnj printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 4573473Swnj uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 4582395Swnj splx(s); 4592395Swnj uba->uba_sr = sr; 4602958Swnj uvec &= UBABRRVR_DIV; 4614024Swnj if (++uh->uh_errcnt % ubawedgecnt == 0) { 4624024Swnj if (uh->uh_errcnt > ubacrazy) 4634024Swnj panic("uba crazy"); 4644024Swnj printf("ERROR LIMIT "); 4654024Swnj ubareset(uban); 4664024Swnj uvec = 0; 4674024Swnj return; 4684024Swnj } 4692395Swnj return; 4702395Swnj } 4712395Swnj #endif 4723745Sroot 4736348Swnj #ifdef notdef 4743745Sroot /* 4753745Sroot * This routine allows remapping of previously 4763745Sroot * allocated UNIBUS bdp and map resources 4773745Sroot * onto different memory addresses. 4783745Sroot * It should only be used by routines which need 4793745Sroot * small fixed length mappings for long periods of time 4803745Sroot * (like the ARPANET ACC IMP interface). 4813745Sroot * It only maps kernel addresses. 4823745Sroot */ 4833745Sroot ubaremap(uban, ubinfo, addr) 4843745Sroot int uban; 4853745Sroot register unsigned ubinfo; 4863745Sroot caddr_t addr; 4873745Sroot { 4883745Sroot register struct uba_hd *uh = &uba_hd[uban]; 4893745Sroot register struct pte *pte, *io; 4903745Sroot register int temp, bdp; 4913745Sroot int npf, o; 4923745Sroot 4933745Sroot o = (int)addr & PGOFSET; 4943745Sroot bdp = (ubinfo >> 28) & 0xf; 4953745Sroot npf = (ubinfo >> 18) & 0x3ff; 4963745Sroot io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff]; 4973745Sroot temp = (bdp << 21) | UBAMR_MRV; 4983745Sroot 4993745Sroot /* 5003745Sroot * If using buffered data path initiate purge 5013745Sroot * of old data and set byte offset bit if next 5023745Sroot * transfer will be from odd address. 5033745Sroot */ 5043745Sroot if (bdp) { 5053745Sroot switch (cpu) { 5063745Sroot #if VAX780 5073745Sroot case VAX_780: 5083745Sroot uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 5093745Sroot break; 5103745Sroot #endif 5113745Sroot #if VAX750 5123745Sroot case VAX_750: 5133745Sroot uh->uh_uba->uba_dpr[bdp] |= 5143745Sroot UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 5153745Sroot break; 5163745Sroot #endif 5173745Sroot } 5183745Sroot if (o & 1) 5193745Sroot temp |= UBAMR_BO; 5203745Sroot } 5213745Sroot 5223745Sroot /* 5233745Sroot * Set up the map registers, leaving an invalid reg 5243745Sroot * at the end to guard against wild unibus transfers. 5253745Sroot */ 5263745Sroot pte = &Sysmap[btop(((int)addr)&0x7fffffff)]; 5273745Sroot while (--npf != 0) 5283745Sroot *(int *)io++ = pte++->pg_pfnum | temp; 5293745Sroot *(int *)io = 0; 5303745Sroot 5313745Sroot /* 5323745Sroot * Return effective UNIBUS address. 5333745Sroot */ 5343745Sroot return (ubinfo | o); 5353745Sroot } 5364966Swnj #endif 5376518Sfeldman 5386518Sfeldman /* 5396518Sfeldman * This routine is called by a driver for a device with on-board Unibus 5406518Sfeldman * memory. It removes the memory block from the Unibus resource map 5416518Sfeldman * and clears the map registers for the block. 5426518Sfeldman * 5436518Sfeldman * Arguments are the Unibus number, the Unibus address of the memory 544*7473Sfeldman * block, its size in blocks of 512 bytes, and a flag indicating whether 545*7473Sfeldman * to allocate the unibus space form the resource map or whether it already 546*7473Sfeldman * has been. 5476518Sfeldman * 548*7473Sfeldman * Returns > 0 if successful, 0 if not. 5496518Sfeldman */ 5506518Sfeldman 551*7473Sfeldman ubamem(uban, addr, size, alloc) 5526518Sfeldman { 5536518Sfeldman register struct uba_hd *uh = &uba_hd[uban]; 5546518Sfeldman register int *m; 5556518Sfeldman register int i, a, s; 5566518Sfeldman 557*7473Sfeldman if (alloc) { 558*7473Sfeldman s = spl6(); 559*7473Sfeldman a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */ 560*7473Sfeldman splx(s); 561*7473Sfeldman } else 562*7473Sfeldman a = (addr>>9)+1; 5636518Sfeldman if (a) { 564*7473Sfeldman m = (int *) &uh->uh_uba->uba_map[a-1]; 5656518Sfeldman for (i=0; i<size; i++) 5666518Sfeldman *m++ = 0; /* All off, especially 'valid' */ 567*7473Sfeldman #if VAX780 568*7473Sfeldman if (cpu == VAX_780) { /* map disable */ 569*7473Sfeldman i = (addr+size*512+8191)/8192; 570*7473Sfeldman uh->uh_uba->uba_cr |= i<<26; 571*7473Sfeldman } 572*7473Sfeldman #endif 5736518Sfeldman } 5746518Sfeldman return(a); 5756518Sfeldman } 5767304Ssam 5777304Ssam /* 5787304Ssam * Map a virtual address into users address space. Actually all we 5797304Ssam * do is turn on the user mode write protection bits for the particular 5807304Ssam * page of memory involved. 5817304Ssam */ 5827304Ssam maptouser(vaddress) 5837304Ssam caddr_t vaddress; 5847304Ssam { 5857304Ssam 5867304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 5877304Ssam } 5887304Ssam 5897304Ssam unmaptouser(vaddress) 5907304Ssam caddr_t vaddress; 5917304Ssam { 5927304Ssam 5937304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 5947304Ssam } 595