123352Smckusick /* 229248Smckusick * Copyright (c) 1982, 1986 Regents of the University of California. 323352Smckusick * All rights reserved. The Berkeley software License Agreement 423352Smckusick * specifies the terms and conditions for redistribution. 523352Smckusick * 6*32521Sbostic * @(#)uba.c 7.4 (Berkeley) 10/23/87 723352Smckusick */ 840Sbill 99780Ssam #include "../machine/pte.h" 109780Ssam 1117081Sbloom #include "param.h" 1217081Sbloom #include "systm.h" 1317081Sbloom #include "map.h" 1417081Sbloom #include "buf.h" 1517081Sbloom #include "vm.h" 1617081Sbloom #include "dir.h" 1717081Sbloom #include "user.h" 1817081Sbloom #include "proc.h" 1917081Sbloom #include "conf.h" 2030389Skarels #include "dkstat.h" 2117081Sbloom #include "kernel.h" 2240Sbill 238481Sroot #include "../vax/cpu.h" 248481Sroot #include "../vax/mtpr.h" 258481Sroot #include "../vax/nexus.h" 2617081Sbloom #include "ubareg.h" 2717081Sbloom #include "ubavar.h" 288481Sroot 2929737Skarels #ifdef DW780 302929Swnj char ubasr_bits[] = UBASR_BITS; 312929Swnj #endif 322929Swnj 3326371Skarels #define spluba spl7 /* IPL 17 */ 3426371Skarels 35*32521Sbostic #define BDPMASK 0xf0000000 /* see ubavar.h */ 36*32521Sbostic 3740Sbill /* 382570Swnj * Do transfer on device argument. The controller 392570Swnj * and uba involved are implied by the device. 402570Swnj * We queue for resource wait in the uba code if necessary. 412570Swnj * We return 1 if the transfer was started, 0 if it was not. 42*32521Sbostic * 43*32521Sbostic * The onq argument must be zero iff the device is not on the 44*32521Sbostic * queue for this UBA. If onq is set, the device must be at the 45*32521Sbostic * head of the queue. In any case, if the transfer is started, 46*32521Sbostic * the device will be off the queue, and if not, it will be on. 47*32521Sbostic * 48*32521Sbostic * Drivers that allocate one BDP and hold it for some time should 49*32521Sbostic * set ud_keepbdp. In this case um_bdp tells which BDP is allocated 50*32521Sbostic * to the controller, unless it is zero, indicating that the controller 51*32521Sbostic * does not now have a BDP. 522570Swnj */ 53*32521Sbostic ubaqueue(ui, onq) 542958Swnj register struct uba_device *ui; 55*32521Sbostic int onq; 562570Swnj { 572958Swnj register struct uba_ctlr *um = ui->ui_mi; 582570Swnj register struct uba_hd *uh; 59*32521Sbostic register struct uba_driver *ud; 602570Swnj register int s, unit; 612570Swnj 622570Swnj uh = &uba_hd[um->um_ubanum]; 63*32521Sbostic ud = um->um_driver; 6426371Skarels s = spluba(); 65*32521Sbostic /* 66*32521Sbostic * Honor exclusive BDP use requests. 67*32521Sbostic */ 68*32521Sbostic if (ud->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 692616Swnj goto rwait; 70*32521Sbostic if (ud->ud_keepbdp) { 71*32521Sbostic /* 72*32521Sbostic * First get just a BDP (though in fact it comes with 73*32521Sbostic * one map register too). 74*32521Sbostic */ 75*32521Sbostic if (um->um_bdp == 0) { 76*32521Sbostic um->um_bdp = uballoc(um->um_ubanum, 77*32521Sbostic (caddr_t)0, 0, UBA_NEEDBDP|UBA_CANTWAIT); 78*32521Sbostic if (um->um_bdp == 0) 79*32521Sbostic goto rwait; 80*32521Sbostic } 81*32521Sbostic /* now share it with this transfer */ 82*32521Sbostic um->um_ubinfo = ubasetup(um->um_ubanum, 83*32521Sbostic um->um_tab.b_actf->b_actf, 84*32521Sbostic um->um_bdp|UBA_HAVEBDP|UBA_CANTWAIT); 85*32521Sbostic } else 86*32521Sbostic um->um_ubinfo = ubasetup(um->um_ubanum, 87*32521Sbostic um->um_tab.b_actf->b_actf, UBA_NEEDBDP|UBA_CANTWAIT); 882616Swnj if (um->um_ubinfo == 0) 892616Swnj goto rwait; 902616Swnj uh->uh_users++; 91*32521Sbostic if (ud->ud_xclu) 922616Swnj uh->uh_xclu = 1; 932570Swnj splx(s); 942570Swnj if (ui->ui_dk >= 0) { 952570Swnj unit = ui->ui_dk; 962570Swnj dk_busy |= 1<<unit; 976348Swnj dk_xfer[unit]++; 986348Swnj dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 992570Swnj } 100*32521Sbostic if (onq) 1012570Swnj uh->uh_actf = ui->ui_forw; 102*32521Sbostic (*ud->ud_dgo)(um); 1032570Swnj return (1); 1042616Swnj rwait: 105*32521Sbostic if (!onq) { 1062616Swnj ui->ui_forw = NULL; 1072616Swnj if (uh->uh_actf == NULL) 1082616Swnj uh->uh_actf = ui; 1092616Swnj else 1102616Swnj uh->uh_actl->ui_forw = ui; 1112616Swnj uh->uh_actl = ui; 1122616Swnj } 1132616Swnj splx(s); 1142616Swnj return (0); 1152570Swnj } 1162570Swnj 1172616Swnj ubadone(um) 1182958Swnj register struct uba_ctlr *um; 1192616Swnj { 1202616Swnj register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 1212616Swnj 1222628Swnj if (um->um_driver->ud_xclu) 1232616Swnj uh->uh_xclu = 0; 1242616Swnj uh->uh_users--; 125*32521Sbostic if (um->um_driver->ud_keepbdp) 126*32521Sbostic um->um_ubinfo &= ~BDPMASK; /* keep BDP for misers */ 1272616Swnj ubarelse(um->um_ubanum, &um->um_ubinfo); 1282616Swnj } 1292616Swnj 1302570Swnj /* 1312395Swnj * Allocate and setup UBA map registers, and bdp's 1322395Swnj * Flags says whether bdp is needed, whether the caller can't 1332395Swnj * wait (e.g. if the caller is at interrupt level). 13440Sbill * 1352570Swnj * Return value: 13640Sbill * Bits 0-8 Byte offset 13740Sbill * Bits 9-17 Start map reg. no. 13840Sbill * Bits 18-27 No. mapping reg's 13940Sbill * Bits 28-31 BDP no. 14040Sbill */ 1412395Swnj ubasetup(uban, bp, flags) 1422395Swnj struct buf *bp; 14340Sbill { 1442395Swnj register struct uba_hd *uh = &uba_hd[uban]; 14518417Smckusick int pfnum, temp; 14640Sbill int npf, reg, bdp; 14740Sbill unsigned v; 14840Sbill register struct pte *pte, *io; 14940Sbill struct proc *rp; 15040Sbill int a, o, ubinfo; 15140Sbill 15229737Skarels #ifdef DW730 15329737Skarels if (uh->uh_type == DW730) 1543332Swnj flags &= ~UBA_NEEDBDP; 1553332Swnj #endif 15629737Skarels #ifdef QBA 15729737Skarels if (uh->uh_type == QBA) 15829737Skarels flags &= ~UBA_NEEDBDP; 15929737Skarels #endif 16040Sbill v = btop(bp->b_un.b_addr); 16140Sbill o = (int)bp->b_un.b_addr & PGOFSET; 16240Sbill npf = btoc(bp->b_bcount + o) + 1; 16326371Skarels a = spluba(); 1648811Sroot while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) { 1653913Swnj if (flags & UBA_CANTWAIT) { 1663913Swnj splx(a); 1672395Swnj return (0); 1683913Swnj } 1692395Swnj uh->uh_mrwant++; 1709353Ssam sleep((caddr_t)&uh->uh_mrwant, PSWP); 17140Sbill } 17217731Skarels if ((flags & UBA_NEED16) && reg + npf > 128) { 17317731Skarels /* 17417731Skarels * Could hang around and try again (if we can ever succeed). 17517731Skarels * Won't help any current device... 17617731Skarels */ 17717731Skarels rmfree(uh->uh_map, (long)npf, (long)reg); 17817731Skarels splx(a); 17917731Skarels return (0); 18017731Skarels } 18140Sbill bdp = 0; 1822395Swnj if (flags & UBA_NEEDBDP) { 18326371Skarels while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) { 1842395Swnj if (flags & UBA_CANTWAIT) { 1858811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 1863913Swnj splx(a); 1872395Swnj return (0); 1882395Swnj } 1892395Swnj uh->uh_bdpwant++; 1909353Ssam sleep((caddr_t)&uh->uh_bdpwant, PSWP); 19140Sbill } 1922463Swnj uh->uh_bdpfree &= ~(1 << (bdp-1)); 1934758Swnj } else if (flags & UBA_HAVEBDP) 1944758Swnj bdp = (flags >> 28) & 0xf; 19540Sbill splx(a); 1962463Swnj reg--; 19740Sbill ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 1982958Swnj temp = (bdp << 21) | UBAMR_MRV; 19940Sbill if (bdp && (o & 01)) 2002958Swnj temp |= UBAMR_BO; 2016382Swnj rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 2026382Swnj if ((bp->b_flags & B_PHYS) == 0) 203728Sbill pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 2046382Swnj else if (bp->b_flags & B_UAREA) 2056382Swnj pte = &rp->p_addr[v]; 2066382Swnj else if (bp->b_flags & B_PAGET) 2076382Swnj pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 2086382Swnj else 2096382Swnj pte = vtopte(rp, v); 21029737Skarels io = &uh->uh_mr[reg]; 2116382Swnj while (--npf != 0) { 21218417Smckusick pfnum = pte->pg_pfnum; 21318417Smckusick if (pfnum == 0) 2146382Swnj panic("uba zero uentry"); 21518417Smckusick pte++; 21618417Smckusick *(int *)io++ = pfnum | temp; 21740Sbill } 21840Sbill *(int *)io++ = 0; 21940Sbill return (ubinfo); 22040Sbill } 22140Sbill 22240Sbill /* 2232570Swnj * Non buffer setup interface... set up a buffer and call ubasetup. 22440Sbill */ 2252395Swnj uballoc(uban, addr, bcnt, flags) 2263107Swnj int uban; 22740Sbill caddr_t addr; 2283107Swnj int bcnt, flags; 22940Sbill { 230883Sbill struct buf ubabuf; 23140Sbill 23240Sbill ubabuf.b_un.b_addr = addr; 23340Sbill ubabuf.b_flags = B_BUSY; 23440Sbill ubabuf.b_bcount = bcnt; 235883Sbill /* that's all the fields ubasetup() needs */ 2362395Swnj return (ubasetup(uban, &ubabuf, flags)); 23740Sbill } 23840Sbill 2392053Swnj /* 2402570Swnj * Release resources on uba uban, and then unblock resource waiters. 2412570Swnj * The map register parameter is by value since we need to block 2422570Swnj * against uba resets on 11/780's. 2432053Swnj */ 2442395Swnj ubarelse(uban, amr) 2452053Swnj int *amr; 24640Sbill { 2472395Swnj register struct uba_hd *uh = &uba_hd[uban]; 2482570Swnj register int bdp, reg, npf, s; 2492053Swnj int mr; 25040Sbill 2512570Swnj /* 2522570Swnj * Carefully see if we should release the space, since 2532570Swnj * it may be released asynchronously at uba reset time. 2542570Swnj */ 25526371Skarels s = spluba(); 2562053Swnj mr = *amr; 2572053Swnj if (mr == 0) { 2582570Swnj /* 2592570Swnj * A ubareset() occurred before we got around 2602570Swnj * to releasing the space... no need to bother. 2612570Swnj */ 2622570Swnj splx(s); 2632053Swnj return; 2642053Swnj } 2652067Swnj *amr = 0; 26640Sbill bdp = (mr >> 28) & 0x0f; 26740Sbill if (bdp) { 26829737Skarels switch (uh->uh_type) { 26929737Skarels #ifdef DW780 27029737Skarels case DW780: 2712958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 2722423Skre break; 2732423Skre #endif 27429737Skarels #ifdef DW750 27529737Skarels case DW750: 2762958Swnj uh->uh_uba->uba_dpr[bdp] |= 2772958Swnj UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 2782423Skre break; 2792423Skre #endif 28029737Skarels default: 28129737Skarels break; 2822423Skre } 2832570Swnj uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 2842395Swnj if (uh->uh_bdpwant) { 2852395Swnj uh->uh_bdpwant = 0; 2869353Ssam wakeup((caddr_t)&uh->uh_bdpwant); 28740Sbill } 28840Sbill } 2892570Swnj /* 2902570Swnj * Put back the registers in the resource map. 29117731Skarels * The map code must not be reentered, 29217731Skarels * nor can the registers be freed twice. 29317731Skarels * Unblock interrupts once this is done. 2942570Swnj */ 29540Sbill npf = (mr >> 18) & 0x3ff; 29640Sbill reg = ((mr >> 9) & 0x1ff) + 1; 2978811Sroot rmfree(uh->uh_map, (long)npf, (long)reg); 2982570Swnj splx(s); 2992570Swnj 3002570Swnj /* 3012570Swnj * Wakeup sleepers for map registers, 3022570Swnj * and also, if there are processes blocked in dgo(), 3032570Swnj * give them a chance at the UNIBUS. 3042570Swnj */ 3052395Swnj if (uh->uh_mrwant) { 3062395Swnj uh->uh_mrwant = 0; 3079353Ssam wakeup((caddr_t)&uh->uh_mrwant); 30840Sbill } 309*32521Sbostic while (uh->uh_actf && ubaqueue(uh->uh_actf, 1)) 3102570Swnj ; 31140Sbill } 31240Sbill 3132729Swnj ubapurge(um) 3142958Swnj register struct uba_ctlr *um; 3152729Swnj { 3162729Swnj register struct uba_hd *uh = um->um_hd; 3172729Swnj register int bdp = (um->um_ubinfo >> 28) & 0x0f; 3182729Swnj 31929737Skarels switch (uh->uh_type) { 32029737Skarels #ifdef DW780 32129737Skarels case DW780: 3222958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 3232729Swnj break; 3242729Swnj #endif 32529737Skarels #ifdef DW750 32629737Skarels case DW750: 3272958Swnj uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 3282729Swnj break; 3292729Swnj #endif 33029737Skarels default: 33129737Skarels break; 3322729Swnj } 3332729Swnj } 3342729Swnj 3356863Swnj ubainitmaps(uhp) 3366863Swnj register struct uba_hd *uhp; 3376863Swnj { 3386863Swnj 33929737Skarels rminit(uhp->uh_map, (long)uhp->uh_memsize, (long)1, "uba", UAMSIZ); 34029737Skarels switch (uhp->uh_type) { 34129737Skarels #ifdef DW780 34229737Skarels case DW780: 3436863Swnj uhp->uh_bdpfree = (1<<NBDP780) - 1; 3446863Swnj break; 3456863Swnj #endif 34629737Skarels #ifdef DW750 34729737Skarels case DW750: 3486863Swnj uhp->uh_bdpfree = (1<<NBDP750) - 1; 3496863Swnj break; 3506863Swnj #endif 35129737Skarels default: 3526863Swnj break; 3536863Swnj } 3546863Swnj } 3556863Swnj 3562570Swnj /* 3572570Swnj * Generate a reset on uba number uban. Then 3582570Swnj * call each device in the character device table, 3592570Swnj * giving it a chance to clean up so as to be able to continue. 3602570Swnj */ 3612395Swnj ubareset(uban) 3622570Swnj int uban; 363284Sbill { 364284Sbill register struct cdevsw *cdp; 3652646Swnj register struct uba_hd *uh = &uba_hd[uban]; 3661781Sbill int s; 367284Sbill 36826371Skarels s = spluba(); 3692646Swnj uh->uh_users = 0; 3702646Swnj uh->uh_zvcnt = 0; 3712646Swnj uh->uh_xclu = 0; 3722646Swnj uh->uh_actf = uh->uh_actl = 0; 3732646Swnj uh->uh_bdpwant = 0; 3742646Swnj uh->uh_mrwant = 0; 3756863Swnj ubainitmaps(uh); 3762646Swnj wakeup((caddr_t)&uh->uh_bdpwant); 3772646Swnj wakeup((caddr_t)&uh->uh_mrwant); 3782958Swnj printf("uba%d: reset", uban); 3792958Swnj ubainit(uh->uh_uba); 38017731Skarels ubameminit(uban); 38111722Ssam for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++) 3822395Swnj (*cdp->d_reset)(uban); 3835221Swnj ifubareset(uban); 384284Sbill printf("\n"); 385302Sbill splx(s); 386284Sbill } 3872395Swnj 3882570Swnj /* 3892570Swnj * Init a uba. This is called with a pointer 3902570Swnj * rather than a virtual address since it is called 3912570Swnj * by code which runs with memory mapping disabled. 3922570Swnj * In these cases we really don't need the interrupts 3932570Swnj * enabled, but since we run with ipl high, we don't care 3942570Swnj * if they are, they will never happen anyways. 39529737Skarels * SHOULD GET POINTER TO UBA_HD INSTEAD OF UBA. 3962570Swnj */ 3972423Skre ubainit(uba) 3982423Skre register struct uba_regs *uba; 3992395Swnj { 40029737Skarels register struct uba_hd *uhp; 40129737Skarels int isphys = 0; 4022395Swnj 40329737Skarels for (uhp = uba_hd; uhp < uba_hd + numuba; uhp++) { 40429737Skarels if (uhp->uh_uba == uba) 40529737Skarels break; 40629737Skarels if (uhp->uh_physuba == uba) { 40729737Skarels isphys++; 40829737Skarels break; 40929737Skarels } 41029737Skarels } 41129737Skarels if (uhp >= uba_hd + numuba) { 41229737Skarels printf("init unknown uba\n"); 41329737Skarels return; 41429737Skarels } 41529737Skarels 41629737Skarels switch (uhp->uh_type) { 41729737Skarels #ifdef DW780 41829737Skarels case DW780: 4192958Swnj uba->uba_cr = UBACR_ADINIT; 4202958Swnj uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 4212958Swnj while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 4222958Swnj ; 4232958Swnj break; 4242958Swnj #endif 42529737Skarels #ifdef DW750 42629737Skarels case DW750: 4273352Swnj #endif 42829737Skarels #ifdef DW730 42929737Skarels case DW730: 4303352Swnj #endif 43129737Skarels #ifdef QBA 43229737Skarels case QBA: 43327255Skridle #endif 43429737Skarels #if DW750 || DW730 || QBA 4353352Swnj mtpr(IUR, 0); 4362958Swnj /* give devices time to recover from power fail */ 4373332Swnj /* THIS IS PROBABLY UNNECESSARY */ 4383352Swnj DELAY(500000); 4393332Swnj /* END PROBABLY UNNECESSARY */ 44029737Skarels #ifdef QBA 44129737Skarels /* 44229737Skarels * Re-enable local memory access 44329737Skarels * from the Q-bus. 44429737Skarels */ 44529737Skarels if (uhp->uh_type == QBA) { 44629737Skarels if (isphys) 44729737Skarels *((char *)QIOPAGE630 + QIPCR) = Q_LMEAE; 44829737Skarels else 44929737Skarels *(uhp->uh_iopage + QIPCR) = Q_LMEAE; 45029737Skarels } 45129737Skarels #endif QBA 4522958Swnj break; 45329737Skarels #endif DW750 || DW730 || QBA 4542958Swnj } 4552395Swnj } 4562395Swnj 45729737Skarels #ifdef DW780 4584024Swnj int ubawedgecnt = 10; 4594024Swnj int ubacrazy = 500; 46017731Skarels int zvcnt_max = 5000; /* in 8 sec */ 4612570Swnj /* 46224500Sbloom * This routine is called by the locore code to process a UBA 46324500Sbloom * error on an 11/780 or 8600. The arguments are passed 4642570Swnj * on the stack, and value-result (through some trickery). 4652570Swnj * In particular, the uvec argument is used for further 4662570Swnj * uba processing so the result aspect of it is very important. 4672570Swnj * It must not be declared register. 4682570Swnj */ 4692423Skre /*ARGSUSED*/ 47017731Skarels ubaerror(uban, uh, ipl, uvec, uba) 4712395Swnj register int uban; 4722395Swnj register struct uba_hd *uh; 47317731Skarels int ipl, uvec; 4742395Swnj register struct uba_regs *uba; 4752395Swnj { 4762395Swnj register sr, s; 4772395Swnj 4782395Swnj if (uvec == 0) { 47926215Skarels /* 48026215Skarels * Declare dt as unsigned so that negative values 48126215Skarels * are handled as >8 below, in case time was set back. 48226215Skarels */ 48326215Skarels u_long dt = time.tv_sec - uh->uh_zvtime; 48426215Skarels 48526215Skarels uh->uh_zvtotal++; 48617731Skarels if (dt > 8) { 48726215Skarels uh->uh_zvtime = time.tv_sec; 48817731Skarels uh->uh_zvcnt = 0; 48917731Skarels } 49017731Skarels if (++uh->uh_zvcnt > zvcnt_max) { 49117731Skarels printf("uba%d: too many zero vectors (%d in <%d sec)\n", 49217731Skarels uban, uh->uh_zvcnt, dt + 1); 49317731Skarels printf("\tIPL 0x%x\n\tcnfgr: %b Adapter Code: 0x%x\n", 49417731Skarels ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS, 49517731Skarels uba->uba_cnfgr&0xff); 49617731Skarels printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n", 49717731Skarels uba->uba_sr, ubasr_bits, uba->uba_dcr, 49817731Skarels (uba->uba_dcr&0x8000000)?"":"NOT "); 4992395Swnj ubareset(uban); 5002395Swnj } 5012395Swnj return; 5022395Swnj } 5032395Swnj if (uba->uba_cnfgr & NEX_CFGFLT) { 5042929Swnj printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 5052929Swnj uban, uba->uba_sr, ubasr_bits, 5063248Swnj uba->uba_cnfgr, NEXFLT_BITS); 5072395Swnj ubareset(uban); 5082395Swnj uvec = 0; 5092395Swnj return; 5102395Swnj } 5112395Swnj sr = uba->uba_sr; 51226371Skarels s = spluba(); 5133473Swnj printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 5143473Swnj uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 5152395Swnj splx(s); 5162395Swnj uba->uba_sr = sr; 5172958Swnj uvec &= UBABRRVR_DIV; 5184024Swnj if (++uh->uh_errcnt % ubawedgecnt == 0) { 5194024Swnj if (uh->uh_errcnt > ubacrazy) 5204024Swnj panic("uba crazy"); 5214024Swnj printf("ERROR LIMIT "); 5224024Swnj ubareset(uban); 5234024Swnj uvec = 0; 5244024Swnj return; 5254024Swnj } 5262395Swnj return; 5272395Swnj } 5282395Swnj #endif 5293745Sroot 5303745Sroot /* 53117731Skarels * Look for devices with unibus memory, allow them to configure, then disable 53217731Skarels * map registers as necessary. Called during autoconfiguration and ubareset. 53317731Skarels * The device ubamem routine returns 0 on success, 1 on success if it is fully 53417731Skarels * configured (has no csr or interrupt, so doesn't need to be probed), 53517731Skarels * and -1 on failure. 53617731Skarels */ 53717731Skarels ubameminit(uban) 53817731Skarels { 53917731Skarels register struct uba_device *ui; 54017731Skarels register struct uba_hd *uh = &uba_hd[uban]; 54117731Skarels caddr_t umembase = umem[uban] + 0x3e000, addr; 54217731Skarels #define ubaoff(off) ((int)(off) & 0x1fff) 54317731Skarels 54417731Skarels uh->uh_lastmem = 0; 54517731Skarels for (ui = ubdinit; ui->ui_driver; ui++) { 54617731Skarels if (ui->ui_ubanum != uban && ui->ui_ubanum != '?') 54717731Skarels continue; 54817731Skarels if (ui->ui_driver->ud_ubamem) { 54917731Skarels /* 55017731Skarels * During autoconfiguration, need to fudge ui_addr. 55117731Skarels */ 55217731Skarels addr = ui->ui_addr; 55317731Skarels ui->ui_addr = umembase + ubaoff(addr); 55417731Skarels switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) { 55517731Skarels case 1: 55617731Skarels ui->ui_alive = 1; 55717731Skarels /* FALLTHROUGH */ 55817731Skarels case 0: 55917731Skarels ui->ui_ubanum = uban; 56017731Skarels break; 56117731Skarels } 56217731Skarels ui->ui_addr = addr; 56317731Skarels } 56417731Skarels } 56529737Skarels #ifdef DW780 56617731Skarels /* 56729737Skarels * On a DW780, throw away any map registers disabled by rounding 56817731Skarels * the map disable in the configuration register 56917731Skarels * up to the next 8K boundary, or below the last unibus memory. 57017731Skarels */ 57129737Skarels if (uh->uh_type == DW780) { 57217731Skarels register i; 57317731Skarels 57417731Skarels i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192); 57517731Skarels while (i) 57617731Skarels (void) rmget(uh->uh_map, 1, i--); 57717731Skarels } 57817731Skarels #endif 57917731Skarels } 58017731Skarels 58117731Skarels /* 58214790Ssam * Allocate UNIBUS memory. Allocates and initializes 58314790Ssam * sufficient mapping registers for access. On a 780, 58414790Ssam * the configuration register is setup to disable UBA 58514790Ssam * response on DMA transfers to addresses controlled 58614790Ssam * by the disabled mapping registers. 58729737Skarels * On a DW780, should only be called from ubameminit, or in ascending order 58817731Skarels * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't 58917731Skarels * the last unibus memory would free unusable map registers. 59017731Skarels * Doalloc is 1 to allocate, 0 to deallocate. 5916518Sfeldman */ 59214790Ssam ubamem(uban, addr, npg, doalloc) 59314790Ssam int uban, addr, npg, doalloc; 5946518Sfeldman { 5956518Sfeldman register struct uba_hd *uh = &uba_hd[uban]; 59614790Ssam register int a; 59717731Skarels int s; 5986518Sfeldman 59917731Skarels a = (addr >> 9) + 1; 60026371Skarels s = spluba(); 60117731Skarels if (doalloc) 60217731Skarels a = rmget(uh->uh_map, npg, a); 60317731Skarels else 60417731Skarels rmfree(uh->uh_map, (long)npg, (long)a); 60517731Skarels splx(s); 6066518Sfeldman if (a) { 60714790Ssam register int i, *m; 60814790Ssam 60929737Skarels m = (int *)&uh->uh_mr[a - 1]; 61014790Ssam for (i = 0; i < npg; i++) 6116518Sfeldman *m++ = 0; /* All off, especially 'valid' */ 61217731Skarels i = addr + npg * 512; 61317731Skarels if (doalloc && i > uh->uh_lastmem) 61417731Skarels uh->uh_lastmem = i; 61517731Skarels else if (doalloc == 0 && i == uh->uh_lastmem) 61617731Skarels uh->uh_lastmem = addr; 61729737Skarels #ifdef DW780 61814790Ssam /* 61914790Ssam * On a 780, set up the map register disable 62014790Ssam * field in the configuration register. Beware 62117731Skarels * of callers that request memory ``out of order'' 62217731Skarels * or in sections other than 8K multiples. 62317731Skarels * Ubameminit handles such requests properly, however. 62414790Ssam */ 62529737Skarels if (uh->uh_type == DW780) { 62617731Skarels i = uh->uh_uba->uba_cr &~ 0x7c000000; 62717731Skarels i |= ((uh->uh_lastmem + 8191) / 8192) << 26; 62817731Skarels uh->uh_uba->uba_cr = i; 6297473Sfeldman } 6307473Sfeldman #endif 6316518Sfeldman } 63214790Ssam return (a); 6336518Sfeldman } 6347304Ssam 6359875Ssam #include "ik.h" 63624501Sjg #include "vs.h" 63724501Sjg #if NIK > 0 || NVS > 0 6387304Ssam /* 6397304Ssam * Map a virtual address into users address space. Actually all we 6407304Ssam * do is turn on the user mode write protection bits for the particular 6417304Ssam * page of memory involved. 6427304Ssam */ 6437304Ssam maptouser(vaddress) 6447304Ssam caddr_t vaddress; 6457304Ssam { 6467304Ssam 6477304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 6487304Ssam } 6497304Ssam 6507304Ssam unmaptouser(vaddress) 6517304Ssam caddr_t vaddress; 6527304Ssam { 6537304Ssam 6547304Ssam Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 6557304Ssam } 6569174Ssam #endif 657