123352Smckusick /* 223352Smckusick * Copyright (c) 1982 Regents of the University of California. 323352Smckusick * All rights reserved. The Berkeley software License Agreement 423352Smckusick * specifies the terms and conditions for redistribution. 523352Smckusick * 6*27255Skridle * @(#)uba.c 6.11 (Berkeley) 04/22/86 723352Smckusick */ 840Sbill 99780Ssam #include "../machine/pte.h" 109780Ssam 1117081Sbloom #include "param.h" 1217081Sbloom #include "systm.h" 1317081Sbloom #include "map.h" 1417081Sbloom #include "buf.h" 1517081Sbloom #include "vm.h" 1617081Sbloom #include "dir.h" 1717081Sbloom #include "user.h" 1817081Sbloom #include "proc.h" 1917081Sbloom #include "conf.h" 2017081Sbloom #include "dk.h" 2117081Sbloom #include "kernel.h" 2240Sbill 238481Sroot #include "../vax/cpu.h" 248481Sroot #include "../vax/mtpr.h" 258481Sroot #include "../vax/nexus.h" 2617081Sbloom #include "ubareg.h" 2717081Sbloom #include "ubavar.h" 288481Sroot 2924182Sbloom #if defined(VAX780) || defined(VAX8600) 302929Swnj char ubasr_bits[] = UBASR_BITS; 312929Swnj #endif 322929Swnj 3326371Skarels #define spluba spl7 /* IPL 17 */ 3426371Skarels 3540Sbill /* 362570Swnj * Do transfer on device argument. The controller 372570Swnj * and uba involved are implied by the device. 382570Swnj * We queue for resource wait in the uba code if necessary. 392570Swnj * We return 1 if the transfer was started, 0 if it was not. 402570Swnj * If you call this routine with the head of the queue for a 412570Swnj * UBA, it will automatically remove the device from the UBA 422570Swnj * queue before it returns. If some other device is given 432570Swnj * as argument, it will be added to the request queue if the 442570Swnj * request cannot be started immediately. This means that 452570Swnj * passing a device which is on the queue but not at the head 462570Swnj * of the request queue is likely to be a disaster. 472570Swnj */ 482570Swnj ubago(ui) 492958Swnj register struct uba_device *ui; 502570Swnj { 512958Swnj register struct uba_ctlr *um = ui->ui_mi; 522570Swnj register struct uba_hd *uh; 532570Swnj register int s, unit; 542570Swnj 552570Swnj uh = &uba_hd[um->um_ubanum]; 5626371Skarels s = spluba(); 572628Swnj if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 582616Swnj goto rwait; 592570Swnj um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf, 602570Swnj UBA_NEEDBDP|UBA_CANTWAIT); 612616Swnj if (um->um_ubinfo == 0) 622616Swnj goto rwait; 632616Swnj uh->uh_users++; 642628Swnj if (um->um_driver->ud_xclu) 652616Swnj uh->uh_xclu = 1; 662570Swnj splx(s); 672570Swnj if (ui->ui_dk >= 0) { 682570Swnj unit = ui->ui_dk; 692570Swnj dk_busy |= 1<<unit; 706348Swnj dk_xfer[unit]++; 716348Swnj dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 722570Swnj } 732570Swnj if (uh->uh_actf == ui) 742570Swnj uh->uh_actf = ui->ui_forw; 752570Swnj (*um->um_driver->ud_dgo)(um); 762570Swnj return (1); 772616Swnj rwait: 782616Swnj if (uh->uh_actf != ui) { 792616Swnj ui->ui_forw = NULL; 802616Swnj if (uh->uh_actf == NULL) 812616Swnj uh->uh_actf = ui; 822616Swnj else 832616Swnj uh->uh_actl->ui_forw = ui; 842616Swnj uh->uh_actl = ui; 852616Swnj } 862616Swnj splx(s); 872616Swnj return (0); 882570Swnj } 892570Swnj 902616Swnj ubadone(um) 912958Swnj register struct uba_ctlr *um; 922616Swnj { 932616Swnj register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 942616Swnj 952628Swnj if (um->um_driver->ud_xclu) 962616Swnj uh->uh_xclu = 0; 972616Swnj uh->uh_users--; 982616Swnj ubarelse(um->um_ubanum, &um->um_ubinfo); 992616Swnj } 1002616Swnj 1012570Swnj /* 1022395Swnj * Allocate and setup UBA map registers, and bdp's 1032395Swnj * Flags says whether bdp is needed, whether the caller can't 1042395Swnj * wait (e.g. if the caller is at interrupt level). 10540Sbill * 1062570Swnj * Return value: 10740Sbill * Bits 0-8 Byte offset 10840Sbill * Bits 9-17 Start map reg. no. 10940Sbill * Bits 18-27 No. mapping reg's 11040Sbill * Bits 28-31 BDP no. 11140Sbill */ 1122395Swnj ubasetup(uban, bp, flags) 1132395Swnj struct buf *bp; 11440Sbill { 1152395Swnj register struct uba_hd *uh = &uba_hd[uban]; 11618417Smckusick int pfnum, temp; 11740Sbill int npf, reg, bdp; 11840Sbill unsigned v; 11940Sbill register struct pte *pte, *io; 12040Sbill struct proc *rp; 12140Sbill int a, o, ubinfo; 12240Sbill 123*27255Skridle #if defined(VAX730) || defined(VAX630) 124*27255Skridle if (cpu == VAX_730 || cpu == VAX_630) 1253332Swnj flags &= ~UBA_NEEDBDP; 1263332Swnj #endif 12740Sbill v = btop(bp->b_un.b_addr); 12840Sbill o = (int)bp->b_un.b_addr & PGOFSET; 12940Sbill npf = btoc(bp->b_bcount + o) + 1; 13026371Skarels a = spluba(); 1318811Sroot while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) { 1323913Swnj if (flags & UBA_CANTWAIT) { 1333913Swnj splx(a); 1342395Swnj return (0); 1353913Swnj } 1362395Swnj uh->uh_mrwant++; 1379353Ssam sleep((caddr_t)&uh->uh_mrwant, PSWP); 13840Sbill } 13917731Skarels if ((flags & UBA_NEED16) && reg + npf > 128) { 14017731Skarels /* 14117731Skarels * Could hang around and try again (if we can ever succeed). 14217731Skarels * Won't help any current device... 14317731Skarels */ 14417731Skarels rmfree(uh->uh_map, (long)npf, (long)reg); 14517731Skarels splx(a); 14617731Skarels return (0); 14717731Skarels } 14840Sbill bdp = 0; 1492395Swnj if (flags & UBA_NEEDBDP) { 15026371Skarels while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) { 1512395Swnj if (flags & UBA_CANTWAIT) { 1528811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 1533913Swnj splx(a); 1542395Swnj return (0); 1552395Swnj } 1562395Swnj uh->uh_bdpwant++; 1579353Ssam sleep((caddr_t)&uh->uh_bdpwant, PSWP); 15840Sbill } 1592463Swnj uh->uh_bdpfree &= ~(1 << (bdp-1)); 1604758Swnj } else if (flags & UBA_HAVEBDP) 1614758Swnj bdp = (flags >> 28) & 0xf; 16240Sbill splx(a); 1632463Swnj reg--; 16440Sbill ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 1652958Swnj temp = (bdp << 21) | UBAMR_MRV; 16640Sbill if (bdp && (o & 01)) 1672958Swnj temp |= UBAMR_BO; 1686382Swnj rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 1696382Swnj if ((bp->b_flags & B_PHYS) == 0) 170728Sbill pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 1716382Swnj else if (bp->b_flags & B_UAREA) 1726382Swnj pte = &rp->p_addr[v]; 1736382Swnj else if (bp->b_flags & B_PAGET) 1746382Swnj pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 1756382Swnj else 1766382Swnj pte = vtopte(rp, v); 1776382Swnj io = &uh->uh_uba->uba_map[reg]; 1786382Swnj while (--npf != 0) { 17918417Smckusick pfnum = pte->pg_pfnum; 18018417Smckusick if (pfnum == 0) 1816382Swnj panic("uba zero uentry"); 18218417Smckusick pte++; 18318417Smckusick *(int *)io++ = pfnum | temp; 18440Sbill } 18540Sbill *(int *)io++ = 0; 18640Sbill return (ubinfo); 18740Sbill } 18840Sbill 18940Sbill /* 1902570Swnj * Non buffer setup interface... set up a buffer and call ubasetup. 19140Sbill */ 1922395Swnj uballoc(uban, addr, bcnt, flags) 1933107Swnj int uban; 19440Sbill caddr_t addr; 1953107Swnj int bcnt, flags; 19640Sbill { 197883Sbill struct buf ubabuf; 19840Sbill 19940Sbill ubabuf.b_un.b_addr = addr; 20040Sbill ubabuf.b_flags = B_BUSY; 20140Sbill ubabuf.b_bcount = bcnt; 202883Sbill /* that's all the fields ubasetup() needs */ 2032395Swnj return (ubasetup(uban, &ubabuf, flags)); 20440Sbill } 20540Sbill 2062053Swnj /* 2072570Swnj * Release resources on uba uban, and then unblock resource waiters. 2082570Swnj * The map register parameter is by value since we need to block 2092570Swnj * against uba resets on 11/780's. 2102053Swnj */ 2112395Swnj ubarelse(uban, amr) 2122053Swnj int *amr; 21340Sbill { 2142395Swnj register struct uba_hd *uh = &uba_hd[uban]; 2152570Swnj register int bdp, reg, npf, s; 2162053Swnj int mr; 21740Sbill 2182570Swnj /* 2192570Swnj * Carefully see if we should release the space, since 2202570Swnj * it may be released asynchronously at uba reset time. 2212570Swnj */ 22226371Skarels s = spluba(); 2232053Swnj mr = *amr; 2242053Swnj if (mr == 0) { 2252570Swnj /* 2262570Swnj * A ubareset() occurred before we got around 2272570Swnj * to releasing the space... no need to bother. 2282570Swnj */ 2292570Swnj splx(s); 2302053Swnj return; 2312053Swnj } 2322067Swnj *amr = 0; 23340Sbill bdp = (mr >> 28) & 0x0f; 23440Sbill if (bdp) { 2352729Swnj switch (cpu) { 23624182Sbloom #if defined(VAX780) || defined(VAX8600) 23724182Sbloom case VAX_8600: 2382423Skre case VAX_780: 2392958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2402423Skre break; 2412423Skre #endif 2422423Skre #if VAX750 2432423Skre case VAX_750: 2442958Swnj uh->uh_uba->uba_dpr[bdp] |= 2452958Swnj UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2462423Skre break; 2472423Skre #endif 2482423Skre } 2492570Swnj uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 2502395Swnj if (uh->uh_bdpwant) { 2512395Swnj uh->uh_bdpwant = 0; 2529353Ssam wakeup((caddr_t)&uh->uh_bdpwant); 25340Sbill } 25440Sbill } 2552570Swnj /* 2562570Swnj * Put back the registers in the resource map. 25717731Skarels * The map code must not be reentered, 25817731Skarels * nor can the registers be freed twice. 25917731Skarels * Unblock interrupts once this is done. 2602570Swnj */ 26140Sbill npf = (mr >> 18) & 0x3ff; 26240Sbill reg = ((mr >> 9) & 0x1ff) + 1; 2638811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 2642570Swnj splx(s); 2652570Swnj 2662570Swnj /* 2672570Swnj * Wakeup sleepers for map registers, 2682570Swnj * and also, if there are processes blocked in dgo(), 2692570Swnj * give them a chance at the UNIBUS. 2702570Swnj */ 2712395Swnj if (uh->uh_mrwant) { 2722395Swnj uh->uh_mrwant = 0; 2739353Ssam wakeup((caddr_t)&uh->uh_mrwant); 27440Sbill } 2752570Swnj while (uh->uh_actf && ubago(uh->uh_actf)) 2762570Swnj ; 27740Sbill } 27840Sbill 2792729Swnj ubapurge(um) 2802958Swnj register struct uba_ctlr *um; 2812729Swnj { 2822729Swnj register struct uba_hd *uh = um->um_hd; 2832729Swnj register int bdp = (um->um_ubinfo >> 28) & 0x0f; 2842729Swnj 2852729Swnj switch (cpu) { 28624182Sbloom #if defined(VAX780) || defined(VAX8600) 28724182Sbloom case VAX_8600: 2882729Swnj case VAX_780: 2892958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2902729Swnj break; 2912729Swnj #endif 2922729Swnj #if VAX750 2932729Swnj case VAX_750: 2942958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2952729Swnj break; 2962729Swnj #endif 2972729Swnj } 2982729Swnj } 2992729Swnj 3006863Swnj ubainitmaps(uhp) 3016863Swnj register struct uba_hd *uhp; 3026863Swnj { 3036863Swnj 3048811Sroot rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ); 3056863Swnj switch (cpu) { 30624182Sbloom #if defined(VAX780) || defined(VAX8600) 30724182Sbloom case VAX_8600: 3086863Swnj case VAX_780: 3096863Swnj uhp->uh_bdpfree = (1<<NBDP780) - 1; 3106863Swnj break; 3116863Swnj #endif 3126863Swnj #if VAX750 3136863Swnj case VAX_750: 3146863Swnj uhp->uh_bdpfree = (1<<NBDP750) - 1; 3156863Swnj break; 3166863Swnj #endif 317*27255Skridle #if defined(VAX730) || defined(VAX630) 3186948Ssam case VAX_730: 319*27255Skridle case VAX_630: 3206863Swnj break; 3216863Swnj #endif 3226863Swnj } 3236863Swnj } 3246863Swnj 3252570Swnj /* 3262570Swnj * Generate a reset on uba number uban. Then 3272570Swnj * call each device in the character device table, 3282570Swnj * giving it a chance to clean up so as to be able to continue. 3292570Swnj */ 3302395Swnj ubareset(uban) 3312570Swnj int uban; 332284Sbill { 333284Sbill register struct cdevsw *cdp; 3342646Swnj register struct uba_hd *uh = &uba_hd[uban]; 3351781Sbill int s; 336284Sbill 33726371Skarels s = spluba(); 3382646Swnj uh->uh_users = 0; 3392646Swnj uh->uh_zvcnt = 0; 3402646Swnj uh->uh_xclu = 0; 3412646Swnj uh->uh_actf = uh->uh_actl = 0; 3422646Swnj uh->uh_bdpwant = 0; 3432646Swnj uh->uh_mrwant = 0; 3446863Swnj ubainitmaps(uh); 3452646Swnj wakeup((caddr_t)&uh->uh_bdpwant); 3462646Swnj wakeup((caddr_t)&uh->uh_mrwant); 3472958Swnj printf("uba%d: reset", uban); 3482958Swnj ubainit(uh->uh_uba); 34917731Skarels ubameminit(uban); 35011722Ssam for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++) 3512395Swnj (*cdp->d_reset)(uban); 3525221Swnj ifubareset(uban); 353284Sbill printf("\n"); 354302Sbill splx(s); 355284Sbill } 3562395Swnj 3572570Swnj /* 3582570Swnj * Init a uba. This is called with a pointer 3592570Swnj * rather than a virtual address since it is called 3602570Swnj * by code which runs with memory mapping disabled. 3612570Swnj * In these cases we really don't need the interrupts 3622570Swnj * enabled, but since we run with ipl high, we don't care 3632570Swnj * if they are, they will never happen anyways. 3642570Swnj */ 3652423Skre ubainit(uba) 3662423Skre register struct uba_regs *uba; 3672395Swnj { 3682395Swnj 3692958Swnj switch (cpu) { 37024182Sbloom #if defined(VAX780) || defined(VAX8600) 37124182Sbloom case VAX_8600: 3723248Swnj case VAX_780: 3732958Swnj uba->uba_cr = UBACR_ADINIT; 3742958Swnj uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 3752958Swnj while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 3762958Swnj ; 3772958Swnj break; 3782958Swnj #endif 3792958Swnj #if VAX750 3803248Swnj case VAX_750: 3813352Swnj #endif 3826948Ssam #if VAX730 3836948Ssam case VAX_730: 3843352Swnj #endif 385*27255Skridle #if VAX630 386*27255Skridle case VAX_630: 387*27255Skridle #endif 388*27255Skridle #if defined(VAX750) || defined(VAX730) || defined(VAX630) 3893352Swnj mtpr(IUR, 0); 3902958Swnj /* give devices time to recover from power fail */ 3913332Swnj /* THIS IS PROBABLY UNNECESSARY */ 3923352Swnj DELAY(500000); 3933332Swnj /* END PROBABLY UNNECESSARY */ 3942958Swnj break; 3952958Swnj #endif 3962958Swnj } 3972395Swnj } 3982395Swnj 39924500Sbloom #if defined(VAX780) || defined(VAX8600) 4004024Swnj int ubawedgecnt = 10; 4014024Swnj int ubacrazy = 500; 40217731Skarels int zvcnt_max = 5000; /* in 8 sec */ 4032570Swnj /* 40424500Sbloom * This routine is called by the locore code to process a UBA 40524500Sbloom * error on an 11/780 or 8600. The arguments are passed 4062570Swnj * on the stack, and value-result (through some trickery). 4072570Swnj * In particular, the uvec argument is used for further 4082570Swnj * uba processing so the result aspect of it is very important. 4092570Swnj * It must not be declared register. 4102570Swnj */ 4112423Skre /*ARGSUSED*/ 41217731Skarels ubaerror(uban, uh, ipl, uvec, uba) 4132395Swnj register int uban; 4142395Swnj register struct uba_hd *uh; 41517731Skarels int ipl, uvec; 4162395Swnj register struct uba_regs *uba; 4172395Swnj { 4182395Swnj register sr, s; 4192395Swnj 4202395Swnj if (uvec == 0) { 42126215Skarels /* 42226215Skarels * Declare dt as unsigned so that negative values 42326215Skarels * are handled as >8 below, in case time was set back. 42426215Skarels */ 42526215Skarels u_long dt = time.tv_sec - uh->uh_zvtime; 42626215Skarels 42726215Skarels uh->uh_zvtotal++; 42817731Skarels if (dt > 8) { 42926215Skarels uh->uh_zvtime = time.tv_sec; 43017731Skarels uh->uh_zvcnt = 0; 43117731Skarels } 43217731Skarels if (++uh->uh_zvcnt > zvcnt_max) { 43317731Skarels printf("uba%d: too many zero vectors (%d in <%d sec)\n", 43417731Skarels uban, uh->uh_zvcnt, dt + 1); 43517731Skarels printf("\tIPL 0x%x\n\tcnfgr: %b Adapter Code: 0x%x\n", 43617731Skarels ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS, 43717731Skarels uba->uba_cnfgr&0xff); 43817731Skarels printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n", 43917731Skarels uba->uba_sr, ubasr_bits, uba->uba_dcr, 44017731Skarels (uba->uba_dcr&0x8000000)?"":"NOT "); 4412395Swnj ubareset(uban); 4422395Swnj } 4432395Swnj return; 4442395Swnj } 4452395Swnj if (uba->uba_cnfgr & NEX_CFGFLT) { 4462929Swnj printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 4472929Swnj uban, uba->uba_sr, ubasr_bits, 4483248Swnj uba->uba_cnfgr, NEXFLT_BITS); 4492395Swnj ubareset(uban); 4502395Swnj uvec = 0; 4512395Swnj return; 4522395Swnj } 4532395Swnj sr = uba->uba_sr; 45426371Skarels s = spluba(); 4553473Swnj printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 4563473Swnj uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 4572395Swnj splx(s); 4582395Swnj uba->uba_sr = sr; 4592958Swnj uvec &= UBABRRVR_DIV; 4604024Swnj if (++uh->uh_errcnt % ubawedgecnt == 0) { 4614024Swnj if (uh->uh_errcnt > ubacrazy) 4624024Swnj panic("uba crazy"); 4634024Swnj printf("ERROR LIMIT "); 4644024Swnj ubareset(uban); 4654024Swnj uvec = 0; 4664024Swnj return; 4674024Swnj } 4682395Swnj return; 4692395Swnj } 4702395Swnj #endif 4713745Sroot 4723745Sroot /* 47317731Skarels * Look for devices with unibus memory, allow them to configure, then disable 47417731Skarels * map registers as necessary. Called during autoconfiguration and ubareset. 47517731Skarels * The device ubamem routine returns 0 on success, 1 on success if it is fully 47617731Skarels * configured (has no csr or interrupt, so doesn't need to be probed), 47717731Skarels * and -1 on failure. 47817731Skarels */ 47917731Skarels ubameminit(uban) 48017731Skarels { 48117731Skarels register struct uba_device *ui; 48217731Skarels register struct uba_hd *uh = &uba_hd[uban]; 48317731Skarels caddr_t umembase = umem[uban] + 0x3e000, addr; 48417731Skarels #define ubaoff(off) ((int)(off) & 0x1fff) 48517731Skarels 48617731Skarels uh->uh_lastmem = 0; 48717731Skarels for (ui = ubdinit; ui->ui_driver; ui++) { 48817731Skarels if (ui->ui_ubanum != uban && ui->ui_ubanum != '?') 48917731Skarels continue; 49017731Skarels if (ui->ui_driver->ud_ubamem) { 49117731Skarels /* 49217731Skarels * During autoconfiguration, need to fudge ui_addr. 49317731Skarels */ 49417731Skarels addr = ui->ui_addr; 49517731Skarels ui->ui_addr = umembase + ubaoff(addr); 49617731Skarels switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) { 49717731Skarels case 1: 49817731Skarels ui->ui_alive = 1; 49917731Skarels /* FALLTHROUGH */ 50017731Skarels case 0: 50117731Skarels ui->ui_ubanum = uban; 50217731Skarels break; 50317731Skarels } 50417731Skarels ui->ui_addr = addr; 50517731Skarels } 50617731Skarels } 50724182Sbloom #if defined(VAX780) || defined(VAX8600) 50817731Skarels /* 50917731Skarels * On a 780, throw away any map registers disabled by rounding 51017731Skarels * the map disable in the configuration register 51117731Skarels * up to the next 8K boundary, or below the last unibus memory. 51217731Skarels */ 51324182Sbloom if ((cpu == VAX_780) || (cpu == VAX_8600)) { 51417731Skarels register i; 51517731Skarels 51617731Skarels i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192); 51717731Skarels while (i) 51817731Skarels (void) rmget(uh->uh_map, 1, i--); 51917731Skarels } 52017731Skarels #endif 52117731Skarels } 52217731Skarels 52317731Skarels /* 52414790Ssam * Allocate UNIBUS memory. Allocates and initializes 52514790Ssam * sufficient mapping registers for access. On a 780, 52614790Ssam * the configuration register is setup to disable UBA 52714790Ssam * response on DMA transfers to addresses controlled 52814790Ssam * by the disabled mapping registers. 52917731Skarels * On a 780, should only be called from ubameminit, or in ascending order 53017731Skarels * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't 53117731Skarels * the last unibus memory would free unusable map registers. 53217731Skarels * Doalloc is 1 to allocate, 0 to deallocate. 5336518Sfeldman */ 53414790Ssam ubamem(uban, addr, npg, doalloc) 53514790Ssam int uban, addr, npg, doalloc; 5366518Sfeldman { 5376518Sfeldman register struct uba_hd *uh = &uba_hd[uban]; 53814790Ssam register int a; 53917731Skarels int s; 5406518Sfeldman 54117731Skarels a = (addr >> 9) + 1; 54226371Skarels s = spluba(); 54317731Skarels if (doalloc) 54417731Skarels a = rmget(uh->uh_map, npg, a); 54517731Skarels else 54617731Skarels rmfree(uh->uh_map, (long)npg, (long)a); 54717731Skarels splx(s); 5486518Sfeldman if (a) { 54914790Ssam register int i, *m; 55014790Ssam 55114790Ssam m = (int *)&uh->uh_uba->uba_map[a - 1]; 55214790Ssam for (i = 0; i < npg; i++) 5536518Sfeldman *m++ = 0; /* All off, especially 'valid' */ 55417731Skarels i = addr + npg * 512; 55517731Skarels if (doalloc && i > uh->uh_lastmem) 55617731Skarels uh->uh_lastmem = i; 55717731Skarels else if (doalloc == 0 && i == uh->uh_lastmem) 55817731Skarels uh->uh_lastmem = addr; 55924182Sbloom #if defined(VAX780) || defined(VAX8600) 56014790Ssam /* 56114790Ssam * On a 780, set up the map register disable 56214790Ssam * field in the configuration register. Beware 56317731Skarels * of callers that request memory ``out of order'' 56417731Skarels * or in sections other than 8K multiples. 56517731Skarels * Ubameminit handles such requests properly, however. 56614790Ssam */ 56724182Sbloom if ((cpu == VAX_780) || (cpu == VAX_8600)) { 56817731Skarels i = uh->uh_uba->uba_cr &~ 0x7c000000; 56917731Skarels i |= ((uh->uh_lastmem + 8191) / 8192) << 26; 57017731Skarels uh->uh_uba->uba_cr = i; 5717473Sfeldman } 5727473Sfeldman #endif 5736518Sfeldman } 57414790Ssam return (a); 5756518Sfeldman } 5767304Ssam 5779875Ssam #include "ik.h" 57824501Sjg #include "vs.h" 57924501Sjg #if NIK > 0 || NVS > 0 5807304Ssam /* 5817304Ssam * Map a virtual address into users address space. Actually all we 5827304Ssam * do is turn on the user mode write protection bits for the particular 5837304Ssam * page of memory involved. 5847304Ssam */ 5857304Ssam maptouser(vaddress) 5867304Ssam caddr_t vaddress; 5877304Ssam { 5887304Ssam 5897304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 5907304Ssam } 5917304Ssam 5927304Ssam unmaptouser(vaddress) 5937304Ssam caddr_t vaddress; 5947304Ssam { 5957304Ssam 5967304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 5977304Ssam } 5989174Ssam #endif 599