1*8612Sroot /* uba.c 4.52 82/10/17 */ 240Sbill 340Sbill #include "../h/param.h" 42395Swnj #include "../h/systm.h" 540Sbill #include "../h/map.h" 640Sbill #include "../h/pte.h" 72395Swnj #include "../h/buf.h" 82570Swnj #include "../h/vm.h" 940Sbill #include "../h/dir.h" 1040Sbill #include "../h/user.h" 1140Sbill #include "../h/proc.h" 12284Sbill #include "../h/conf.h" 132570Swnj #include "../h/dk.h" 1440Sbill 158481Sroot #include "../vax/cpu.h" 168481Sroot #include "../vax/mtpr.h" 178481Sroot #include "../vax/nexus.h" 188481Sroot #include "../vaxuba/ubareg.h" 198481Sroot #include "../vaxuba/ubavar.h" 208481Sroot 212929Swnj #if VAX780 222929Swnj char ubasr_bits[] = UBASR_BITS; 232929Swnj #endif 242929Swnj 2540Sbill /* 262570Swnj * Do transfer on device argument. The controller 272570Swnj * and uba involved are implied by the device. 282570Swnj * We queue for resource wait in the uba code if necessary. 292570Swnj * We return 1 if the transfer was started, 0 if it was not. 302570Swnj * If you call this routine with the head of the queue for a 312570Swnj * UBA, it will automatically remove the device from the UBA 322570Swnj * queue before it returns. If some other device is given 332570Swnj * as argument, it will be added to the request queue if the 342570Swnj * request cannot be started immediately. This means that 352570Swnj * passing a device which is on the queue but not at the head 362570Swnj * of the request queue is likely to be a disaster. 372570Swnj */ 382570Swnj ubago(ui) 392958Swnj register struct uba_device *ui; 402570Swnj { 412958Swnj register struct uba_ctlr *um = ui->ui_mi; 422570Swnj register struct uba_hd *uh; 432570Swnj register int s, unit; 442570Swnj 452570Swnj uh = &uba_hd[um->um_ubanum]; 462570Swnj s = spl6(); 472628Swnj if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 482616Swnj goto rwait; 492570Swnj um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf, 502570Swnj UBA_NEEDBDP|UBA_CANTWAIT); 512616Swnj if (um->um_ubinfo == 0) 522616Swnj goto rwait; 532616Swnj uh->uh_users++; 542628Swnj if (um->um_driver->ud_xclu) 552616Swnj uh->uh_xclu = 1; 562570Swnj splx(s); 572570Swnj if (ui->ui_dk >= 0) { 582570Swnj unit = ui->ui_dk; 592570Swnj dk_busy |= 1<<unit; 606348Swnj dk_xfer[unit]++; 616348Swnj dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 622570Swnj } 632570Swnj if (uh->uh_actf == ui) 642570Swnj uh->uh_actf = ui->ui_forw; 652570Swnj (*um->um_driver->ud_dgo)(um); 662570Swnj return (1); 672616Swnj rwait: 682616Swnj if (uh->uh_actf != ui) { 692616Swnj ui->ui_forw = NULL; 702616Swnj if (uh->uh_actf == NULL) 712616Swnj uh->uh_actf = ui; 722616Swnj else 732616Swnj uh->uh_actl->ui_forw = ui; 742616Swnj uh->uh_actl = ui; 752616Swnj } 762616Swnj splx(s); 772616Swnj return (0); 782570Swnj } 792570Swnj 802616Swnj ubadone(um) 812958Swnj register struct uba_ctlr *um; 822616Swnj { 832616Swnj register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 842616Swnj 852628Swnj if (um->um_driver->ud_xclu) 862616Swnj uh->uh_xclu = 0; 872616Swnj uh->uh_users--; 882616Swnj ubarelse(um->um_ubanum, &um->um_ubinfo); 892616Swnj } 902616Swnj 912570Swnj /* 922395Swnj * Allocate and setup UBA map registers, and bdp's 932395Swnj * Flags says whether bdp is needed, whether the caller can't 942395Swnj * wait (e.g. if the caller is at interrupt level). 9540Sbill * 962570Swnj * Return value: 9740Sbill * Bits 0-8 Byte offset 9840Sbill * Bits 9-17 Start map reg. no. 9940Sbill * Bits 18-27 No. mapping reg's 10040Sbill * Bits 28-31 BDP no. 10140Sbill */ 1022395Swnj ubasetup(uban, bp, flags) 1032395Swnj struct buf *bp; 10440Sbill { 1052395Swnj register struct uba_hd *uh = &uba_hd[uban]; 106*8612Sroot register int temp; 10740Sbill int npf, reg, bdp; 10840Sbill unsigned v; 10940Sbill register struct pte *pte, *io; 11040Sbill struct proc *rp; 11140Sbill int a, o, ubinfo; 11240Sbill 1136948Ssam #if VAX730 1146948Ssam if (cpu == VAX_730) 1153332Swnj flags &= ~UBA_NEEDBDP; 1163332Swnj #endif 11740Sbill v = btop(bp->b_un.b_addr); 11840Sbill o = (int)bp->b_un.b_addr & PGOFSET; 11940Sbill npf = btoc(bp->b_bcount + o) + 1; 12040Sbill a = spl6(); 1212784Swnj while ((reg = rmalloc(uh->uh_map, npf)) == 0) { 1223913Swnj if (flags & UBA_CANTWAIT) { 1233913Swnj splx(a); 1242395Swnj return (0); 1253913Swnj } 1262395Swnj uh->uh_mrwant++; 1272395Swnj sleep((caddr_t)uh->uh_map, PSWP); 12840Sbill } 12940Sbill bdp = 0; 1302395Swnj if (flags & UBA_NEEDBDP) { 1312395Swnj while ((bdp = ffs(uh->uh_bdpfree)) == 0) { 1322395Swnj if (flags & UBA_CANTWAIT) { 1332784Swnj rmfree(uh->uh_map, npf, reg); 1343913Swnj splx(a); 1352395Swnj return (0); 1362395Swnj } 1372395Swnj uh->uh_bdpwant++; 1382395Swnj sleep((caddr_t)uh->uh_map, PSWP); 13940Sbill } 1402463Swnj uh->uh_bdpfree &= ~(1 << (bdp-1)); 1414758Swnj } else if (flags & UBA_HAVEBDP) 1424758Swnj bdp = (flags >> 28) & 0xf; 14340Sbill splx(a); 1442463Swnj reg--; 14540Sbill ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 1462958Swnj temp = (bdp << 21) | UBAMR_MRV; 14740Sbill if (bdp && (o & 01)) 1482958Swnj temp |= UBAMR_BO; 1496382Swnj rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 1506382Swnj if ((bp->b_flags & B_PHYS) == 0) 151728Sbill pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 1526382Swnj else if (bp->b_flags & B_UAREA) 1536382Swnj pte = &rp->p_addr[v]; 1546382Swnj else if (bp->b_flags & B_PAGET) 1556382Swnj pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 1566382Swnj else 1576382Swnj pte = vtopte(rp, v); 1586382Swnj io = &uh->uh_uba->uba_map[reg]; 1596382Swnj while (--npf != 0) { 1606382Swnj if (pte->pg_pfnum == 0) 1616382Swnj panic("uba zero uentry"); 1626382Swnj *(int *)io++ = pte++->pg_pfnum | temp; 16340Sbill } 16440Sbill *(int *)io++ = 0; 16540Sbill return (ubinfo); 16640Sbill } 16740Sbill 16840Sbill /* 1692570Swnj * Non buffer setup interface... set up a buffer and call ubasetup. 17040Sbill */ 1712395Swnj uballoc(uban, addr, bcnt, flags) 1723107Swnj int uban; 17340Sbill caddr_t addr; 1743107Swnj int bcnt, flags; 17540Sbill { 176883Sbill struct buf ubabuf; 17740Sbill 17840Sbill ubabuf.b_un.b_addr = addr; 17940Sbill ubabuf.b_flags = B_BUSY; 18040Sbill ubabuf.b_bcount = bcnt; 181883Sbill /* that's all the fields ubasetup() needs */ 1822395Swnj return (ubasetup(uban, &ubabuf, flags)); 18340Sbill } 18440Sbill 1852053Swnj /* 1862570Swnj * Release resources on uba uban, and then unblock resource waiters. 1872570Swnj * The map register parameter is by value since we need to block 1882570Swnj * against uba resets on 11/780's. 1892053Swnj */ 1902395Swnj ubarelse(uban, amr) 1912053Swnj int *amr; 19240Sbill { 1932395Swnj register struct uba_hd *uh = &uba_hd[uban]; 1942570Swnj register int bdp, reg, npf, s; 1952053Swnj int mr; 19640Sbill 1972570Swnj /* 1982570Swnj * Carefully see if we should release the space, since 1992570Swnj * it may be released asynchronously at uba reset time. 2002570Swnj */ 2012570Swnj s = spl6(); 2022053Swnj mr = *amr; 2032053Swnj if (mr == 0) { 2042570Swnj /* 2052570Swnj * A ubareset() occurred before we got around 2062570Swnj * to releasing the space... no need to bother. 2072570Swnj */ 2082570Swnj splx(s); 2092053Swnj return; 2102053Swnj } 2112067Swnj *amr = 0; 2122570Swnj splx(s); /* let interrupts in, we're safe for a while */ 21340Sbill bdp = (mr >> 28) & 0x0f; 21440Sbill if (bdp) { 2152729Swnj switch (cpu) { 2162423Skre #if VAX780 2172423Skre case VAX_780: 2182958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2192423Skre break; 2202423Skre #endif 2212423Skre #if VAX750 2222423Skre case VAX_750: 2232958Swnj uh->uh_uba->uba_dpr[bdp] |= 2242958Swnj UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2252423Skre break; 2262423Skre #endif 2272423Skre } 2282570Swnj uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 2292395Swnj if (uh->uh_bdpwant) { 2302395Swnj uh->uh_bdpwant = 0; 2312395Swnj wakeup((caddr_t)uh->uh_map); 23240Sbill } 23340Sbill } 2342570Swnj /* 2352570Swnj * Put back the registers in the resource map. 2362570Swnj * The map code must not be reentered, so we do this 2372570Swnj * at high ipl. 2382570Swnj */ 23940Sbill npf = (mr >> 18) & 0x3ff; 24040Sbill reg = ((mr >> 9) & 0x1ff) + 1; 2412570Swnj s = spl6(); 2422784Swnj rmfree(uh->uh_map, npf, reg); 2432570Swnj splx(s); 2442570Swnj 2452570Swnj /* 2462570Swnj * Wakeup sleepers for map registers, 2472570Swnj * and also, if there are processes blocked in dgo(), 2482570Swnj * give them a chance at the UNIBUS. 2492570Swnj */ 2502395Swnj if (uh->uh_mrwant) { 2512395Swnj uh->uh_mrwant = 0; 2522395Swnj wakeup((caddr_t)uh->uh_map); 25340Sbill } 2542570Swnj while (uh->uh_actf && ubago(uh->uh_actf)) 2552570Swnj ; 25640Sbill } 25740Sbill 2582729Swnj ubapurge(um) 2592958Swnj register struct uba_ctlr *um; 2602729Swnj { 2612729Swnj register struct uba_hd *uh = um->um_hd; 2622729Swnj register int bdp = (um->um_ubinfo >> 28) & 0x0f; 2632729Swnj 2642729Swnj switch (cpu) { 2652729Swnj #if VAX780 2662729Swnj case VAX_780: 2672958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2682729Swnj break; 2692729Swnj #endif 2702729Swnj #if VAX750 2712729Swnj case VAX_750: 2722958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2732729Swnj break; 2742729Swnj #endif 2752729Swnj } 2762729Swnj } 2772729Swnj 2786863Swnj ubainitmaps(uhp) 2796863Swnj register struct uba_hd *uhp; 2806863Swnj { 2816863Swnj 2826863Swnj rminit(uhp->uh_map, NUBMREG, 1, "uba", UAMSIZ); 2836863Swnj switch (cpu) { 2846863Swnj #if VAX780 2856863Swnj case VAX_780: 2866863Swnj uhp->uh_bdpfree = (1<<NBDP780) - 1; 2876863Swnj break; 2886863Swnj #endif 2896863Swnj #if VAX750 2906863Swnj case VAX_750: 2916863Swnj uhp->uh_bdpfree = (1<<NBDP750) - 1; 2926863Swnj break; 2936863Swnj #endif 2946948Ssam #if VAX730 2956948Ssam case VAX_730: 2966863Swnj break; 2976863Swnj #endif 2986863Swnj } 2996863Swnj } 3006863Swnj 3012570Swnj /* 3022570Swnj * Generate a reset on uba number uban. Then 3032570Swnj * call each device in the character device table, 3042570Swnj * giving it a chance to clean up so as to be able to continue. 3052570Swnj */ 3062395Swnj ubareset(uban) 3072570Swnj int uban; 308284Sbill { 309284Sbill register struct cdevsw *cdp; 3102646Swnj register struct uba_hd *uh = &uba_hd[uban]; 3111781Sbill int s; 312284Sbill 313302Sbill s = spl6(); 3142646Swnj uh->uh_users = 0; 3152646Swnj uh->uh_zvcnt = 0; 3162646Swnj uh->uh_xclu = 0; 3172646Swnj uh->uh_hangcnt = 0; 3182646Swnj uh->uh_actf = uh->uh_actl = 0; 3192646Swnj uh->uh_bdpwant = 0; 3202646Swnj uh->uh_mrwant = 0; 3216863Swnj ubainitmaps(uh); 3222646Swnj wakeup((caddr_t)&uh->uh_bdpwant); 3232646Swnj wakeup((caddr_t)&uh->uh_mrwant); 3242958Swnj printf("uba%d: reset", uban); 3252958Swnj ubainit(uh->uh_uba); 326284Sbill for (cdp = cdevsw; cdp->d_open; cdp++) 3272395Swnj (*cdp->d_reset)(uban); 3285221Swnj #ifdef INET 3295221Swnj ifubareset(uban); 3305221Swnj #endif 331284Sbill printf("\n"); 332302Sbill splx(s); 333284Sbill } 3342395Swnj 3352570Swnj /* 3362570Swnj * Init a uba. This is called with a pointer 3372570Swnj * rather than a virtual address since it is called 3382570Swnj * by code which runs with memory mapping disabled. 3392570Swnj * In these cases we really don't need the interrupts 3402570Swnj * enabled, but since we run with ipl high, we don't care 3412570Swnj * if they are, they will never happen anyways. 3422570Swnj */ 3432423Skre ubainit(uba) 3442423Skre register struct uba_regs *uba; 3452395Swnj { 3462395Swnj 3472958Swnj switch (cpu) { 3482958Swnj #if VAX780 3493248Swnj case VAX_780: 3502958Swnj uba->uba_cr = UBACR_ADINIT; 3512958Swnj uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 3522958Swnj while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 3532958Swnj ; 3542958Swnj break; 3552958Swnj #endif 3562958Swnj #if VAX750 3573248Swnj case VAX_750: 3583352Swnj #endif 3596948Ssam #if VAX730 3606948Ssam case VAX_730: 3613352Swnj #endif 3626948Ssam #if defined(VAX750) || defined(VAX730) 3633352Swnj mtpr(IUR, 0); 3642958Swnj /* give devices time to recover from power fail */ 3653332Swnj /* THIS IS PROBABLY UNNECESSARY */ 3663352Swnj DELAY(500000); 3673332Swnj /* END PROBABLY UNNECESSARY */ 3682958Swnj break; 3692958Swnj #endif 3702958Swnj } 3712395Swnj } 3722395Swnj 3732958Swnj #if VAX780 3742570Swnj /* 3752570Swnj * Check to make sure the UNIBUS adaptor is not hung, 3762570Swnj * with an interrupt in the register to be presented, 3772570Swnj * but not presenting it for an extended period (5 seconds). 3782570Swnj */ 3792395Swnj unhang() 3802395Swnj { 3812395Swnj register int uban; 3822395Swnj 3832395Swnj for (uban = 0; uban < numuba; uban++) { 3842395Swnj register struct uba_hd *uh = &uba_hd[uban]; 3852395Swnj register struct uba_regs *up = uh->uh_uba; 3862395Swnj 3872395Swnj if (up->uba_sr == 0) 3882395Swnj return; 3893945Sroot up->uba_sr = UBASR_CRD|UBASR_LEB; 3902395Swnj uh->uh_hangcnt++; 3912759Swnj if (uh->uh_hangcnt > 5*hz) { 3922395Swnj uh->uh_hangcnt = 0; 3932929Swnj printf("uba%d: hung\n", uban); 3942395Swnj ubareset(uban); 3952395Swnj } 3962395Swnj } 3972395Swnj } 3982395Swnj 3992570Swnj /* 4002570Swnj * This is a timeout routine which decrements the ``i forgot to 4012570Swnj * interrupt'' counts, on an 11/780. This prevents slowly growing 4022570Swnj * counts from causing a UBA reset since we are interested only 4032570Swnj * in hang situations. 4042570Swnj */ 4052395Swnj ubawatch() 4062395Swnj { 4072395Swnj register struct uba_hd *uh; 4082395Swnj register int uban; 4092395Swnj 4102784Swnj if (panicstr) 4112784Swnj return; 4122395Swnj for (uban = 0; uban < numuba; uban++) { 4132395Swnj uh = &uba_hd[uban]; 4142395Swnj if (uh->uh_hangcnt) 4152395Swnj uh->uh_hangcnt--; 4162395Swnj } 4172395Swnj } 4182395Swnj 4194024Swnj int ubawedgecnt = 10; 4204024Swnj int ubacrazy = 500; 4212570Swnj /* 4222570Swnj * This routine is called by the locore code to 4232570Swnj * process a UBA error on an 11/780. The arguments are passed 4242570Swnj * on the stack, and value-result (through some trickery). 4252570Swnj * In particular, the uvec argument is used for further 4262570Swnj * uba processing so the result aspect of it is very important. 4272570Swnj * It must not be declared register. 4282570Swnj */ 4292423Skre /*ARGSUSED*/ 4302395Swnj ubaerror(uban, uh, xx, uvec, uba) 4312395Swnj register int uban; 4322395Swnj register struct uba_hd *uh; 4332395Swnj int uvec; 4342395Swnj register struct uba_regs *uba; 4352395Swnj { 4362395Swnj register sr, s; 4372395Swnj 4382395Swnj if (uvec == 0) { 4392395Swnj uh->uh_zvcnt++; 4402395Swnj if (uh->uh_zvcnt > 250000) { 4412929Swnj printf("uba%d: too many zero vectors\n"); 4422395Swnj ubareset(uban); 4432395Swnj } 4442395Swnj uvec = 0; 4452395Swnj return; 4462395Swnj } 4472395Swnj if (uba->uba_cnfgr & NEX_CFGFLT) { 4482929Swnj printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 4492929Swnj uban, uba->uba_sr, ubasr_bits, 4503248Swnj uba->uba_cnfgr, NEXFLT_BITS); 4512395Swnj ubareset(uban); 4522395Swnj uvec = 0; 4532395Swnj return; 4542395Swnj } 4552395Swnj sr = uba->uba_sr; 4562395Swnj s = spl7(); 4573473Swnj printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 4583473Swnj uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 4592395Swnj splx(s); 4602395Swnj uba->uba_sr = sr; 4612958Swnj uvec &= UBABRRVR_DIV; 4624024Swnj if (++uh->uh_errcnt % ubawedgecnt == 0) { 4634024Swnj if (uh->uh_errcnt > ubacrazy) 4644024Swnj panic("uba crazy"); 4654024Swnj printf("ERROR LIMIT "); 4664024Swnj ubareset(uban); 4674024Swnj uvec = 0; 4684024Swnj return; 4694024Swnj } 4702395Swnj return; 4712395Swnj } 4722395Swnj #endif 4733745Sroot 4746348Swnj #ifdef notdef 4753745Sroot /* 4763745Sroot * This routine allows remapping of previously 4773745Sroot * allocated UNIBUS bdp and map resources 4783745Sroot * onto different memory addresses. 4793745Sroot * It should only be used by routines which need 4803745Sroot * small fixed length mappings for long periods of time 4813745Sroot * (like the ARPANET ACC IMP interface). 4823745Sroot * It only maps kernel addresses. 4833745Sroot */ 4843745Sroot ubaremap(uban, ubinfo, addr) 4853745Sroot int uban; 4863745Sroot register unsigned ubinfo; 4873745Sroot caddr_t addr; 4883745Sroot { 4893745Sroot register struct uba_hd *uh = &uba_hd[uban]; 4903745Sroot register struct pte *pte, *io; 4913745Sroot register int temp, bdp; 4923745Sroot int npf, o; 4933745Sroot 4943745Sroot o = (int)addr & PGOFSET; 4953745Sroot bdp = (ubinfo >> 28) & 0xf; 4963745Sroot npf = (ubinfo >> 18) & 0x3ff; 4973745Sroot io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff]; 4983745Sroot temp = (bdp << 21) | UBAMR_MRV; 4993745Sroot 5003745Sroot /* 5013745Sroot * If using buffered data path initiate purge 5023745Sroot * of old data and set byte offset bit if next 5033745Sroot * transfer will be from odd address. 5043745Sroot */ 5053745Sroot if (bdp) { 5063745Sroot switch (cpu) { 5073745Sroot #if VAX780 5083745Sroot case VAX_780: 5093745Sroot uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 5103745Sroot break; 5113745Sroot #endif 5123745Sroot #if VAX750 5133745Sroot case VAX_750: 5143745Sroot uh->uh_uba->uba_dpr[bdp] |= 5153745Sroot UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 5163745Sroot break; 5173745Sroot #endif 5183745Sroot } 5193745Sroot if (o & 1) 5203745Sroot temp |= UBAMR_BO; 5213745Sroot } 5223745Sroot 5233745Sroot /* 5243745Sroot * Set up the map registers, leaving an invalid reg 5253745Sroot * at the end to guard against wild unibus transfers. 5263745Sroot */ 5273745Sroot pte = &Sysmap[btop(((int)addr)&0x7fffffff)]; 5283745Sroot while (--npf != 0) 5293745Sroot *(int *)io++ = pte++->pg_pfnum | temp; 5303745Sroot *(int *)io = 0; 5313745Sroot 5323745Sroot /* 5333745Sroot * Return effective UNIBUS address. 5343745Sroot */ 5353745Sroot return (ubinfo | o); 5363745Sroot } 5374966Swnj #endif 5386518Sfeldman 5396518Sfeldman /* 5406518Sfeldman * This routine is called by a driver for a device with on-board Unibus 5416518Sfeldman * memory. It removes the memory block from the Unibus resource map 5426518Sfeldman * and clears the map registers for the block. 5436518Sfeldman * 5446518Sfeldman * Arguments are the Unibus number, the Unibus address of the memory 5457473Sfeldman * block, its size in blocks of 512 bytes, and a flag indicating whether 5467473Sfeldman * to allocate the unibus space form the resource map or whether it already 5477473Sfeldman * has been. 5486518Sfeldman * 5497473Sfeldman * Returns > 0 if successful, 0 if not. 5506518Sfeldman */ 5516518Sfeldman 552*8612Sroot ubamem(uban, addr, size, doalloc) 553*8612Sroot int uban, addr, size, doalloc; 5546518Sfeldman { 5556518Sfeldman register struct uba_hd *uh = &uba_hd[uban]; 5566518Sfeldman register int *m; 5576518Sfeldman register int i, a, s; 5586518Sfeldman 559*8612Sroot if (doalloc) { 5607473Sfeldman s = spl6(); 5617473Sfeldman a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */ 5627473Sfeldman splx(s); 5637473Sfeldman } else 5647473Sfeldman a = (addr>>9)+1; 5656518Sfeldman if (a) { 5667473Sfeldman m = (int *) &uh->uh_uba->uba_map[a-1]; 5676518Sfeldman for (i=0; i<size; i++) 5686518Sfeldman *m++ = 0; /* All off, especially 'valid' */ 5697473Sfeldman #if VAX780 5707473Sfeldman if (cpu == VAX_780) { /* map disable */ 5717473Sfeldman i = (addr+size*512+8191)/8192; 5727473Sfeldman uh->uh_uba->uba_cr |= i<<26; 5737473Sfeldman } 5747473Sfeldman #endif 5756518Sfeldman } 5766518Sfeldman return(a); 5776518Sfeldman } 5787304Ssam 5797304Ssam /* 5807304Ssam * Map a virtual address into users address space. Actually all we 5817304Ssam * do is turn on the user mode write protection bits for the particular 5827304Ssam * page of memory involved. 5837304Ssam */ 5847304Ssam maptouser(vaddress) 5857304Ssam caddr_t vaddress; 5867304Ssam { 5877304Ssam 5887304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 5897304Ssam } 5907304Ssam 5917304Ssam unmaptouser(vaddress) 5927304Ssam caddr_t vaddress; 5937304Ssam { 5947304Ssam 5957304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 5967304Ssam } 597