123352Smckusick /* 223352Smckusick * Copyright (c) 1982 Regents of the University of California. 323352Smckusick * All rights reserved. The Berkeley software License Agreement 423352Smckusick * specifies the terms and conditions for redistribution. 523352Smckusick * 6*24182Sbloom * @(#)uba.c 6.6 (Berkeley) 08/05/85 723352Smckusick */ 840Sbill 99780Ssam #include "../machine/pte.h" 109780Ssam 1117081Sbloom #include "param.h" 1217081Sbloom #include "systm.h" 1317081Sbloom #include "map.h" 1417081Sbloom #include "buf.h" 1517081Sbloom #include "vm.h" 1617081Sbloom #include "dir.h" 1717081Sbloom #include "user.h" 1817081Sbloom #include "proc.h" 1917081Sbloom #include "conf.h" 2017081Sbloom #include "dk.h" 2117081Sbloom #include "kernel.h" 2240Sbill 238481Sroot #include "../vax/cpu.h" 248481Sroot #include "../vax/mtpr.h" 258481Sroot #include "../vax/nexus.h" 2617081Sbloom #include "ubareg.h" 2717081Sbloom #include "ubavar.h" 288481Sroot 29*24182Sbloom #if defined(VAX780) || defined(VAX8600) 302929Swnj char ubasr_bits[] = UBASR_BITS; 312929Swnj #endif 322929Swnj 3340Sbill /* 342570Swnj * Do transfer on device argument. The controller 352570Swnj * and uba involved are implied by the device. 362570Swnj * We queue for resource wait in the uba code if necessary. 372570Swnj * We return 1 if the transfer was started, 0 if it was not. 382570Swnj * If you call this routine with the head of the queue for a 392570Swnj * UBA, it will automatically remove the device from the UBA 402570Swnj * queue before it returns. If some other device is given 412570Swnj * as argument, it will be added to the request queue if the 422570Swnj * request cannot be started immediately. This means that 432570Swnj * passing a device which is on the queue but not at the head 442570Swnj * of the request queue is likely to be a disaster. 452570Swnj */ 462570Swnj ubago(ui) 472958Swnj register struct uba_device *ui; 482570Swnj { 492958Swnj register struct uba_ctlr *um = ui->ui_mi; 502570Swnj register struct uba_hd *uh; 512570Swnj register int s, unit; 522570Swnj 532570Swnj uh = &uba_hd[um->um_ubanum]; 542570Swnj s = spl6(); 552628Swnj if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 562616Swnj goto rwait; 572570Swnj um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf, 582570Swnj UBA_NEEDBDP|UBA_CANTWAIT); 592616Swnj if (um->um_ubinfo == 0) 602616Swnj goto rwait; 612616Swnj uh->uh_users++; 622628Swnj if (um->um_driver->ud_xclu) 632616Swnj uh->uh_xclu = 1; 642570Swnj splx(s); 652570Swnj if (ui->ui_dk >= 0) { 662570Swnj unit = ui->ui_dk; 672570Swnj dk_busy |= 1<<unit; 686348Swnj dk_xfer[unit]++; 696348Swnj dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 702570Swnj } 712570Swnj if (uh->uh_actf == ui) 722570Swnj uh->uh_actf = ui->ui_forw; 732570Swnj (*um->um_driver->ud_dgo)(um); 742570Swnj return (1); 752616Swnj rwait: 762616Swnj if (uh->uh_actf != ui) { 772616Swnj ui->ui_forw = NULL; 782616Swnj if (uh->uh_actf == NULL) 792616Swnj uh->uh_actf = ui; 802616Swnj else 812616Swnj uh->uh_actl->ui_forw = ui; 822616Swnj uh->uh_actl = ui; 832616Swnj } 842616Swnj splx(s); 852616Swnj return (0); 862570Swnj } 872570Swnj 882616Swnj ubadone(um) 892958Swnj register struct uba_ctlr *um; 902616Swnj { 912616Swnj register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 922616Swnj 932628Swnj if (um->um_driver->ud_xclu) 942616Swnj uh->uh_xclu = 0; 952616Swnj uh->uh_users--; 962616Swnj ubarelse(um->um_ubanum, &um->um_ubinfo); 972616Swnj } 982616Swnj 992570Swnj /* 1002395Swnj * Allocate and setup UBA map registers, and bdp's 1012395Swnj * Flags says whether bdp is needed, whether the caller can't 1022395Swnj * wait (e.g. if the caller is at interrupt level). 10340Sbill * 1042570Swnj * Return value: 10540Sbill * Bits 0-8 Byte offset 10640Sbill * Bits 9-17 Start map reg. no. 10740Sbill * Bits 18-27 No. mapping reg's 10840Sbill * Bits 28-31 BDP no. 10940Sbill */ 1102395Swnj ubasetup(uban, bp, flags) 1112395Swnj struct buf *bp; 11240Sbill { 1132395Swnj register struct uba_hd *uh = &uba_hd[uban]; 11418417Smckusick int pfnum, temp; 11540Sbill int npf, reg, bdp; 11640Sbill unsigned v; 11740Sbill register struct pte *pte, *io; 11840Sbill struct proc *rp; 11940Sbill int a, o, ubinfo; 12040Sbill 1216948Ssam #if VAX730 1226948Ssam if (cpu == VAX_730) 1233332Swnj flags &= ~UBA_NEEDBDP; 1243332Swnj #endif 12540Sbill v = btop(bp->b_un.b_addr); 12640Sbill o = (int)bp->b_un.b_addr & PGOFSET; 12740Sbill npf = btoc(bp->b_bcount + o) + 1; 12840Sbill a = spl6(); 1298811Sroot while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) { 1303913Swnj if (flags & UBA_CANTWAIT) { 1313913Swnj splx(a); 1322395Swnj return (0); 1333913Swnj } 1342395Swnj uh->uh_mrwant++; 1359353Ssam sleep((caddr_t)&uh->uh_mrwant, PSWP); 13640Sbill } 13717731Skarels if ((flags & UBA_NEED16) && reg + npf > 128) { 13817731Skarels /* 13917731Skarels * Could hang around and try again (if we can ever succeed). 14017731Skarels * Won't help any current device... 14117731Skarels */ 14217731Skarels rmfree(uh->uh_map, (long)npf, (long)reg); 14317731Skarels splx(a); 14417731Skarels return (0); 14517731Skarels } 14640Sbill bdp = 0; 1472395Swnj if (flags & UBA_NEEDBDP) { 1482395Swnj while ((bdp = ffs(uh->uh_bdpfree)) == 0) { 1492395Swnj if (flags & UBA_CANTWAIT) { 1508811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 1513913Swnj splx(a); 1522395Swnj return (0); 1532395Swnj } 1542395Swnj uh->uh_bdpwant++; 1559353Ssam sleep((caddr_t)&uh->uh_bdpwant, PSWP); 15640Sbill } 1572463Swnj uh->uh_bdpfree &= ~(1 << (bdp-1)); 1584758Swnj } else if (flags & UBA_HAVEBDP) 1594758Swnj bdp = (flags >> 28) & 0xf; 16040Sbill splx(a); 1612463Swnj reg--; 16240Sbill ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 1632958Swnj temp = (bdp << 21) | UBAMR_MRV; 16440Sbill if (bdp && (o & 01)) 1652958Swnj temp |= UBAMR_BO; 1666382Swnj rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 1676382Swnj if ((bp->b_flags & B_PHYS) == 0) 168728Sbill pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 1696382Swnj else if (bp->b_flags & B_UAREA) 1706382Swnj pte = &rp->p_addr[v]; 1716382Swnj else if (bp->b_flags & B_PAGET) 1726382Swnj pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 1736382Swnj else 1746382Swnj pte = vtopte(rp, v); 1756382Swnj io = &uh->uh_uba->uba_map[reg]; 1766382Swnj while (--npf != 0) { 17718417Smckusick pfnum = pte->pg_pfnum; 17818417Smckusick if (pfnum == 0) 1796382Swnj panic("uba zero uentry"); 18018417Smckusick pte++; 18118417Smckusick *(int *)io++ = pfnum | temp; 18240Sbill } 18340Sbill *(int *)io++ = 0; 18440Sbill return (ubinfo); 18540Sbill } 18640Sbill 18740Sbill /* 1882570Swnj * Non buffer setup interface... set up a buffer and call ubasetup. 18940Sbill */ 1902395Swnj uballoc(uban, addr, bcnt, flags) 1913107Swnj int uban; 19240Sbill caddr_t addr; 1933107Swnj int bcnt, flags; 19440Sbill { 195883Sbill struct buf ubabuf; 19640Sbill 19740Sbill ubabuf.b_un.b_addr = addr; 19840Sbill ubabuf.b_flags = B_BUSY; 19940Sbill ubabuf.b_bcount = bcnt; 200883Sbill /* that's all the fields ubasetup() needs */ 2012395Swnj return (ubasetup(uban, &ubabuf, flags)); 20240Sbill } 20340Sbill 2042053Swnj /* 2052570Swnj * Release resources on uba uban, and then unblock resource waiters. 2062570Swnj * The map register parameter is by value since we need to block 2072570Swnj * against uba resets on 11/780's. 2082053Swnj */ 2092395Swnj ubarelse(uban, amr) 2102053Swnj int *amr; 21140Sbill { 2122395Swnj register struct uba_hd *uh = &uba_hd[uban]; 2132570Swnj register int bdp, reg, npf, s; 2142053Swnj int mr; 21540Sbill 2162570Swnj /* 2172570Swnj * Carefully see if we should release the space, since 2182570Swnj * it may be released asynchronously at uba reset time. 2192570Swnj */ 2202570Swnj s = spl6(); 2212053Swnj mr = *amr; 2222053Swnj if (mr == 0) { 2232570Swnj /* 2242570Swnj * A ubareset() occurred before we got around 2252570Swnj * to releasing the space... no need to bother. 2262570Swnj */ 2272570Swnj splx(s); 2282053Swnj return; 2292053Swnj } 2302067Swnj *amr = 0; 23140Sbill bdp = (mr >> 28) & 0x0f; 23240Sbill if (bdp) { 2332729Swnj switch (cpu) { 234*24182Sbloom #if defined(VAX780) || defined(VAX8600) 235*24182Sbloom case VAX_8600: 2362423Skre case VAX_780: 2372958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2382423Skre break; 2392423Skre #endif 2402423Skre #if VAX750 2412423Skre case VAX_750: 2422958Swnj uh->uh_uba->uba_dpr[bdp] |= 2432958Swnj UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2442423Skre break; 2452423Skre #endif 2462423Skre } 2472570Swnj uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 2482395Swnj if (uh->uh_bdpwant) { 2492395Swnj uh->uh_bdpwant = 0; 2509353Ssam wakeup((caddr_t)&uh->uh_bdpwant); 25140Sbill } 25240Sbill } 2532570Swnj /* 2542570Swnj * Put back the registers in the resource map. 25517731Skarels * The map code must not be reentered, 25617731Skarels * nor can the registers be freed twice. 25717731Skarels * Unblock interrupts once this is done. 2582570Swnj */ 25940Sbill npf = (mr >> 18) & 0x3ff; 26040Sbill reg = ((mr >> 9) & 0x1ff) + 1; 2618811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 2622570Swnj splx(s); 2632570Swnj 2642570Swnj /* 2652570Swnj * Wakeup sleepers for map registers, 2662570Swnj * and also, if there are processes blocked in dgo(), 2672570Swnj * give them a chance at the UNIBUS. 2682570Swnj */ 2692395Swnj if (uh->uh_mrwant) { 2702395Swnj uh->uh_mrwant = 0; 2719353Ssam wakeup((caddr_t)&uh->uh_mrwant); 27240Sbill } 2732570Swnj while (uh->uh_actf && ubago(uh->uh_actf)) 2742570Swnj ; 27540Sbill } 27640Sbill 2772729Swnj ubapurge(um) 2782958Swnj register struct uba_ctlr *um; 2792729Swnj { 2802729Swnj register struct uba_hd *uh = um->um_hd; 2812729Swnj register int bdp = (um->um_ubinfo >> 28) & 0x0f; 2822729Swnj 2832729Swnj switch (cpu) { 284*24182Sbloom #if defined(VAX780) || defined(VAX8600) 285*24182Sbloom case VAX_8600: 2862729Swnj case VAX_780: 2872958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2882729Swnj break; 2892729Swnj #endif 2902729Swnj #if VAX750 2912729Swnj case VAX_750: 2922958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2932729Swnj break; 2942729Swnj #endif 2952729Swnj } 2962729Swnj } 2972729Swnj 2986863Swnj ubainitmaps(uhp) 2996863Swnj register struct uba_hd *uhp; 3006863Swnj { 3016863Swnj 3028811Sroot rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ); 3036863Swnj switch (cpu) { 304*24182Sbloom #if defined(VAX780) || defined(VAX8600) 305*24182Sbloom case VAX_8600: 3066863Swnj case VAX_780: 3076863Swnj uhp->uh_bdpfree = (1<<NBDP780) - 1; 3086863Swnj break; 3096863Swnj #endif 3106863Swnj #if VAX750 3116863Swnj case VAX_750: 3126863Swnj uhp->uh_bdpfree = (1<<NBDP750) - 1; 3136863Swnj break; 3146863Swnj #endif 3156948Ssam #if VAX730 3166948Ssam case VAX_730: 3176863Swnj break; 3186863Swnj #endif 3196863Swnj } 3206863Swnj } 3216863Swnj 3222570Swnj /* 3232570Swnj * Generate a reset on uba number uban. Then 3242570Swnj * call each device in the character device table, 3252570Swnj * giving it a chance to clean up so as to be able to continue. 3262570Swnj */ 3272395Swnj ubareset(uban) 3282570Swnj int uban; 329284Sbill { 330284Sbill register struct cdevsw *cdp; 3312646Swnj register struct uba_hd *uh = &uba_hd[uban]; 3321781Sbill int s; 333284Sbill 334302Sbill s = spl6(); 3352646Swnj uh->uh_users = 0; 3362646Swnj uh->uh_zvcnt = 0; 3372646Swnj uh->uh_xclu = 0; 3382646Swnj uh->uh_actf = uh->uh_actl = 0; 3392646Swnj uh->uh_bdpwant = 0; 3402646Swnj uh->uh_mrwant = 0; 3416863Swnj ubainitmaps(uh); 3422646Swnj wakeup((caddr_t)&uh->uh_bdpwant); 3432646Swnj wakeup((caddr_t)&uh->uh_mrwant); 3442958Swnj printf("uba%d: reset", uban); 3452958Swnj ubainit(uh->uh_uba); 34617731Skarels ubameminit(uban); 34711722Ssam for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++) 3482395Swnj (*cdp->d_reset)(uban); 3495221Swnj #ifdef INET 3505221Swnj ifubareset(uban); 3515221Swnj #endif 352284Sbill printf("\n"); 353302Sbill splx(s); 354284Sbill } 3552395Swnj 3562570Swnj /* 3572570Swnj * Init a uba. This is called with a pointer 3582570Swnj * rather than a virtual address since it is called 3592570Swnj * by code which runs with memory mapping disabled. 3602570Swnj * In these cases we really don't need the interrupts 3612570Swnj * enabled, but since we run with ipl high, we don't care 3622570Swnj * if they are, they will never happen anyways. 3632570Swnj */ 3642423Skre ubainit(uba) 3652423Skre register struct uba_regs *uba; 3662395Swnj { 3672395Swnj 3682958Swnj switch (cpu) { 369*24182Sbloom #if defined(VAX780) || defined(VAX8600) 370*24182Sbloom case VAX_8600: 3713248Swnj case VAX_780: 3722958Swnj uba->uba_cr = UBACR_ADINIT; 3732958Swnj uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 3742958Swnj while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 3752958Swnj ; 3762958Swnj break; 3772958Swnj #endif 3782958Swnj #if VAX750 3793248Swnj case VAX_750: 3803352Swnj #endif 3816948Ssam #if VAX730 3826948Ssam case VAX_730: 3833352Swnj #endif 3846948Ssam #if defined(VAX750) || defined(VAX730) 3853352Swnj mtpr(IUR, 0); 3862958Swnj /* give devices time to recover from power fail */ 3873332Swnj /* THIS IS PROBABLY UNNECESSARY */ 3883352Swnj DELAY(500000); 3893332Swnj /* END PROBABLY UNNECESSARY */ 3902958Swnj break; 3912958Swnj #endif 3922958Swnj } 3932395Swnj } 3942395Swnj 3958780Sroot #ifdef VAX780 3964024Swnj int ubawedgecnt = 10; 3974024Swnj int ubacrazy = 500; 39817731Skarels int zvcnt_max = 5000; /* in 8 sec */ 39917731Skarels int zvcnt_total; 40017731Skarels long zvcnt_time; 4012570Swnj /* 4022570Swnj * This routine is called by the locore code to 4032570Swnj * process a UBA error on an 11/780. The arguments are passed 4042570Swnj * on the stack, and value-result (through some trickery). 4052570Swnj * In particular, the uvec argument is used for further 4062570Swnj * uba processing so the result aspect of it is very important. 4072570Swnj * It must not be declared register. 4082570Swnj */ 4092423Skre /*ARGSUSED*/ 41017731Skarels ubaerror(uban, uh, ipl, uvec, uba) 4112395Swnj register int uban; 4122395Swnj register struct uba_hd *uh; 41317731Skarels int ipl, uvec; 4142395Swnj register struct uba_regs *uba; 4152395Swnj { 4162395Swnj register sr, s; 4172395Swnj 4182395Swnj if (uvec == 0) { 41917731Skarels long dt = time.tv_sec - zvcnt_time; 42017731Skarels zvcnt_total++; 42117731Skarels if (dt > 8) { 42217731Skarels zvcnt_time = time.tv_sec; 42317731Skarels uh->uh_zvcnt = 0; 42417731Skarels } 42517731Skarels if (++uh->uh_zvcnt > zvcnt_max) { 42617731Skarels printf("uba%d: too many zero vectors (%d in <%d sec)\n", 42717731Skarels uban, uh->uh_zvcnt, dt + 1); 42817731Skarels printf("\tIPL 0x%x\n\tcnfgr: %b Adapter Code: 0x%x\n", 42917731Skarels ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS, 43017731Skarels uba->uba_cnfgr&0xff); 43117731Skarels printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n", 43217731Skarels uba->uba_sr, ubasr_bits, uba->uba_dcr, 43317731Skarels (uba->uba_dcr&0x8000000)?"":"NOT "); 4342395Swnj ubareset(uban); 4352395Swnj } 4362395Swnj return; 4372395Swnj } 4382395Swnj if (uba->uba_cnfgr & NEX_CFGFLT) { 4392929Swnj printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 4402929Swnj uban, uba->uba_sr, ubasr_bits, 4413248Swnj uba->uba_cnfgr, NEXFLT_BITS); 4422395Swnj ubareset(uban); 4432395Swnj uvec = 0; 4442395Swnj return; 4452395Swnj } 4462395Swnj sr = uba->uba_sr; 4472395Swnj s = spl7(); 4483473Swnj printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 4493473Swnj uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 4502395Swnj splx(s); 4512395Swnj uba->uba_sr = sr; 4522958Swnj uvec &= UBABRRVR_DIV; 4534024Swnj if (++uh->uh_errcnt % ubawedgecnt == 0) { 4544024Swnj if (uh->uh_errcnt > ubacrazy) 4554024Swnj panic("uba crazy"); 4564024Swnj printf("ERROR LIMIT "); 4574024Swnj ubareset(uban); 4584024Swnj uvec = 0; 4594024Swnj return; 4604024Swnj } 4612395Swnj return; 4622395Swnj } 4632395Swnj #endif 4643745Sroot 4653745Sroot /* 46617731Skarels * Look for devices with unibus memory, allow them to configure, then disable 46717731Skarels * map registers as necessary. Called during autoconfiguration and ubareset. 46817731Skarels * The device ubamem routine returns 0 on success, 1 on success if it is fully 46917731Skarels * configured (has no csr or interrupt, so doesn't need to be probed), 47017731Skarels * and -1 on failure. 47117731Skarels */ 47217731Skarels ubameminit(uban) 47317731Skarels { 47417731Skarels register struct uba_device *ui; 47517731Skarels register struct uba_hd *uh = &uba_hd[uban]; 47617731Skarels caddr_t umembase = umem[uban] + 0x3e000, addr; 47717731Skarels #define ubaoff(off) ((int)(off) & 0x1fff) 47817731Skarels 47917731Skarels uh->uh_lastmem = 0; 48017731Skarels for (ui = ubdinit; ui->ui_driver; ui++) { 48117731Skarels if (ui->ui_ubanum != uban && ui->ui_ubanum != '?') 48217731Skarels continue; 48317731Skarels if (ui->ui_driver->ud_ubamem) { 48417731Skarels /* 48517731Skarels * During autoconfiguration, need to fudge ui_addr. 48617731Skarels */ 48717731Skarels addr = ui->ui_addr; 48817731Skarels ui->ui_addr = umembase + ubaoff(addr); 48917731Skarels switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) { 49017731Skarels case 1: 49117731Skarels ui->ui_alive = 1; 49217731Skarels /* FALLTHROUGH */ 49317731Skarels case 0: 49417731Skarels ui->ui_ubanum = uban; 49517731Skarels break; 49617731Skarels } 49717731Skarels ui->ui_addr = addr; 49817731Skarels } 49917731Skarels } 500*24182Sbloom #if defined(VAX780) || defined(VAX8600) 50117731Skarels /* 50217731Skarels * On a 780, throw away any map registers disabled by rounding 50317731Skarels * the map disable in the configuration register 50417731Skarels * up to the next 8K boundary, or below the last unibus memory. 50517731Skarels */ 506*24182Sbloom if ((cpu == VAX_780) || (cpu == VAX_8600)) { 50717731Skarels register i; 50817731Skarels 50917731Skarels i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192); 51017731Skarels while (i) 51117731Skarels (void) rmget(uh->uh_map, 1, i--); 51217731Skarels } 51317731Skarels #endif 51417731Skarels } 51517731Skarels 51617731Skarels /* 51714790Ssam * Allocate UNIBUS memory. Allocates and initializes 51814790Ssam * sufficient mapping registers for access. On a 780, 51914790Ssam * the configuration register is setup to disable UBA 52014790Ssam * response on DMA transfers to addresses controlled 52114790Ssam * by the disabled mapping registers. 52217731Skarels * On a 780, should only be called from ubameminit, or in ascending order 52317731Skarels * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't 52417731Skarels * the last unibus memory would free unusable map registers. 52517731Skarels * Doalloc is 1 to allocate, 0 to deallocate. 5266518Sfeldman */ 52714790Ssam ubamem(uban, addr, npg, doalloc) 52814790Ssam int uban, addr, npg, doalloc; 5296518Sfeldman { 5306518Sfeldman register struct uba_hd *uh = &uba_hd[uban]; 53114790Ssam register int a; 53217731Skarels int s; 5336518Sfeldman 53417731Skarels a = (addr >> 9) + 1; 53517731Skarels s = spl6(); 53617731Skarels if (doalloc) 53717731Skarels a = rmget(uh->uh_map, npg, a); 53817731Skarels else 53917731Skarels rmfree(uh->uh_map, (long)npg, (long)a); 54017731Skarels splx(s); 5416518Sfeldman if (a) { 54214790Ssam register int i, *m; 54314790Ssam 54414790Ssam m = (int *)&uh->uh_uba->uba_map[a - 1]; 54514790Ssam for (i = 0; i < npg; i++) 5466518Sfeldman *m++ = 0; /* All off, especially 'valid' */ 54717731Skarels i = addr + npg * 512; 54817731Skarels if (doalloc && i > uh->uh_lastmem) 54917731Skarels uh->uh_lastmem = i; 55017731Skarels else if (doalloc == 0 && i == uh->uh_lastmem) 55117731Skarels uh->uh_lastmem = addr; 552*24182Sbloom #if defined(VAX780) || defined(VAX8600) 55314790Ssam /* 55414790Ssam * On a 780, set up the map register disable 55514790Ssam * field in the configuration register. Beware 55617731Skarels * of callers that request memory ``out of order'' 55717731Skarels * or in sections other than 8K multiples. 55817731Skarels * Ubameminit handles such requests properly, however. 55914790Ssam */ 560*24182Sbloom if ((cpu == VAX_780) || (cpu == VAX_8600)) { 56117731Skarels i = uh->uh_uba->uba_cr &~ 0x7c000000; 56217731Skarels i |= ((uh->uh_lastmem + 8191) / 8192) << 26; 56317731Skarels uh->uh_uba->uba_cr = i; 5647473Sfeldman } 5657473Sfeldman #endif 5666518Sfeldman } 56714790Ssam return (a); 5686518Sfeldman } 5697304Ssam 5709875Ssam #include "ik.h" 5719875Ssam #if NIK > 0 5727304Ssam /* 5737304Ssam * Map a virtual address into users address space. Actually all we 5747304Ssam * do is turn on the user mode write protection bits for the particular 5757304Ssam * page of memory involved. 5767304Ssam */ 5777304Ssam maptouser(vaddress) 5787304Ssam caddr_t vaddress; 5797304Ssam { 5807304Ssam 5817304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 5827304Ssam } 5837304Ssam 5847304Ssam unmaptouser(vaddress) 5857304Ssam caddr_t vaddress; 5867304Ssam { 5877304Ssam 5887304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 5897304Ssam } 5909174Ssam #endif 591