1 /* 2 * Copyright (c) 1982 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)uba.c 6.10 (Berkeley) 02/23/86 7 */ 8 9 #include "../machine/pte.h" 10 11 #include "param.h" 12 #include "systm.h" 13 #include "map.h" 14 #include "buf.h" 15 #include "vm.h" 16 #include "dir.h" 17 #include "user.h" 18 #include "proc.h" 19 #include "conf.h" 20 #include "dk.h" 21 #include "kernel.h" 22 23 #include "../vax/cpu.h" 24 #include "../vax/mtpr.h" 25 #include "../vax/nexus.h" 26 #include "ubareg.h" 27 #include "ubavar.h" 28 29 #if defined(VAX780) || defined(VAX8600) 30 char ubasr_bits[] = UBASR_BITS; 31 #endif 32 33 #define spluba spl7 /* IPL 17 */ 34 35 /* 36 * Do transfer on device argument. The controller 37 * and uba involved are implied by the device. 38 * We queue for resource wait in the uba code if necessary. 39 * We return 1 if the transfer was started, 0 if it was not. 40 * If you call this routine with the head of the queue for a 41 * UBA, it will automatically remove the device from the UBA 42 * queue before it returns. If some other device is given 43 * as argument, it will be added to the request queue if the 44 * request cannot be started immediately. This means that 45 * passing a device which is on the queue but not at the head 46 * of the request queue is likely to be a disaster. 47 */ 48 ubago(ui) 49 register struct uba_device *ui; 50 { 51 register struct uba_ctlr *um = ui->ui_mi; 52 register struct uba_hd *uh; 53 register int s, unit; 54 55 uh = &uba_hd[um->um_ubanum]; 56 s = spluba(); 57 if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 58 goto rwait; 59 um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf, 60 UBA_NEEDBDP|UBA_CANTWAIT); 61 if (um->um_ubinfo == 0) 62 goto rwait; 63 uh->uh_users++; 64 if (um->um_driver->ud_xclu) 65 uh->uh_xclu = 1; 66 splx(s); 67 if (ui->ui_dk >= 0) { 68 unit = ui->ui_dk; 69 dk_busy |= 1<<unit; 70 dk_xfer[unit]++; 71 dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 72 } 73 if (uh->uh_actf == ui) 74 uh->uh_actf = ui->ui_forw; 75 (*um->um_driver->ud_dgo)(um); 76 return (1); 77 rwait: 78 if (uh->uh_actf != ui) { 79 ui->ui_forw = NULL; 80 if (uh->uh_actf == NULL) 81 uh->uh_actf = ui; 82 else 83 uh->uh_actl->ui_forw = ui; 84 uh->uh_actl = ui; 85 } 86 splx(s); 87 return (0); 88 } 89 90 ubadone(um) 91 register struct uba_ctlr *um; 92 { 93 register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 94 95 if (um->um_driver->ud_xclu) 96 uh->uh_xclu = 0; 97 uh->uh_users--; 98 ubarelse(um->um_ubanum, &um->um_ubinfo); 99 } 100 101 /* 102 * Allocate and setup UBA map registers, and bdp's 103 * Flags says whether bdp is needed, whether the caller can't 104 * wait (e.g. if the caller is at interrupt level). 105 * 106 * Return value: 107 * Bits 0-8 Byte offset 108 * Bits 9-17 Start map reg. no. 109 * Bits 18-27 No. mapping reg's 110 * Bits 28-31 BDP no. 111 */ 112 ubasetup(uban, bp, flags) 113 struct buf *bp; 114 { 115 register struct uba_hd *uh = &uba_hd[uban]; 116 int pfnum, temp; 117 int npf, reg, bdp; 118 unsigned v; 119 register struct pte *pte, *io; 120 struct proc *rp; 121 int a, o, ubinfo; 122 123 #if VAX730 124 if (cpu == VAX_730) 125 flags &= ~UBA_NEEDBDP; 126 #endif 127 v = btop(bp->b_un.b_addr); 128 o = (int)bp->b_un.b_addr & PGOFSET; 129 npf = btoc(bp->b_bcount + o) + 1; 130 a = spluba(); 131 while ((reg = rmalloc(uh->uh_map, (long)npf)) == 0) { 132 if (flags & UBA_CANTWAIT) { 133 splx(a); 134 return (0); 135 } 136 uh->uh_mrwant++; 137 sleep((caddr_t)&uh->uh_mrwant, PSWP); 138 } 139 if ((flags & UBA_NEED16) && reg + npf > 128) { 140 /* 141 * Could hang around and try again (if we can ever succeed). 142 * Won't help any current device... 143 */ 144 rmfree(uh->uh_map, (long)npf, (long)reg); 145 splx(a); 146 return (0); 147 } 148 bdp = 0; 149 if (flags & UBA_NEEDBDP) { 150 while ((bdp = ffs((long)uh->uh_bdpfree)) == 0) { 151 if (flags & UBA_CANTWAIT) { 152 rmfree(uh->uh_map, (long)npf, (long)reg); 153 splx(a); 154 return (0); 155 } 156 uh->uh_bdpwant++; 157 sleep((caddr_t)&uh->uh_bdpwant, PSWP); 158 } 159 uh->uh_bdpfree &= ~(1 << (bdp-1)); 160 } else if (flags & UBA_HAVEBDP) 161 bdp = (flags >> 28) & 0xf; 162 splx(a); 163 reg--; 164 ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 165 temp = (bdp << 21) | UBAMR_MRV; 166 if (bdp && (o & 01)) 167 temp |= UBAMR_BO; 168 rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 169 if ((bp->b_flags & B_PHYS) == 0) 170 pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 171 else if (bp->b_flags & B_UAREA) 172 pte = &rp->p_addr[v]; 173 else if (bp->b_flags & B_PAGET) 174 pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 175 else 176 pte = vtopte(rp, v); 177 io = &uh->uh_uba->uba_map[reg]; 178 while (--npf != 0) { 179 pfnum = pte->pg_pfnum; 180 if (pfnum == 0) 181 panic("uba zero uentry"); 182 pte++; 183 *(int *)io++ = pfnum | temp; 184 } 185 *(int *)io++ = 0; 186 return (ubinfo); 187 } 188 189 /* 190 * Non buffer setup interface... set up a buffer and call ubasetup. 191 */ 192 uballoc(uban, addr, bcnt, flags) 193 int uban; 194 caddr_t addr; 195 int bcnt, flags; 196 { 197 struct buf ubabuf; 198 199 ubabuf.b_un.b_addr = addr; 200 ubabuf.b_flags = B_BUSY; 201 ubabuf.b_bcount = bcnt; 202 /* that's all the fields ubasetup() needs */ 203 return (ubasetup(uban, &ubabuf, flags)); 204 } 205 206 /* 207 * Release resources on uba uban, and then unblock resource waiters. 208 * The map register parameter is by value since we need to block 209 * against uba resets on 11/780's. 210 */ 211 ubarelse(uban, amr) 212 int *amr; 213 { 214 register struct uba_hd *uh = &uba_hd[uban]; 215 register int bdp, reg, npf, s; 216 int mr; 217 218 /* 219 * Carefully see if we should release the space, since 220 * it may be released asynchronously at uba reset time. 221 */ 222 s = spluba(); 223 mr = *amr; 224 if (mr == 0) { 225 /* 226 * A ubareset() occurred before we got around 227 * to releasing the space... no need to bother. 228 */ 229 splx(s); 230 return; 231 } 232 *amr = 0; 233 bdp = (mr >> 28) & 0x0f; 234 if (bdp) { 235 switch (cpu) { 236 #if defined(VAX780) || defined(VAX8600) 237 case VAX_8600: 238 case VAX_780: 239 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 240 break; 241 #endif 242 #if VAX750 243 case VAX_750: 244 uh->uh_uba->uba_dpr[bdp] |= 245 UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 246 break; 247 #endif 248 } 249 uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 250 if (uh->uh_bdpwant) { 251 uh->uh_bdpwant = 0; 252 wakeup((caddr_t)&uh->uh_bdpwant); 253 } 254 } 255 /* 256 * Put back the registers in the resource map. 257 * The map code must not be reentered, 258 * nor can the registers be freed twice. 259 * Unblock interrupts once this is done. 260 */ 261 npf = (mr >> 18) & 0x3ff; 262 reg = ((mr >> 9) & 0x1ff) + 1; 263 rmfree(uh->uh_map, (long)npf, (long)reg); 264 splx(s); 265 266 /* 267 * Wakeup sleepers for map registers, 268 * and also, if there are processes blocked in dgo(), 269 * give them a chance at the UNIBUS. 270 */ 271 if (uh->uh_mrwant) { 272 uh->uh_mrwant = 0; 273 wakeup((caddr_t)&uh->uh_mrwant); 274 } 275 while (uh->uh_actf && ubago(uh->uh_actf)) 276 ; 277 } 278 279 ubapurge(um) 280 register struct uba_ctlr *um; 281 { 282 register struct uba_hd *uh = um->um_hd; 283 register int bdp = (um->um_ubinfo >> 28) & 0x0f; 284 285 switch (cpu) { 286 #if defined(VAX780) || defined(VAX8600) 287 case VAX_8600: 288 case VAX_780: 289 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 290 break; 291 #endif 292 #if VAX750 293 case VAX_750: 294 uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 295 break; 296 #endif 297 } 298 } 299 300 ubainitmaps(uhp) 301 register struct uba_hd *uhp; 302 { 303 304 rminit(uhp->uh_map, (long)NUBMREG, (long)1, "uba", UAMSIZ); 305 switch (cpu) { 306 #if defined(VAX780) || defined(VAX8600) 307 case VAX_8600: 308 case VAX_780: 309 uhp->uh_bdpfree = (1<<NBDP780) - 1; 310 break; 311 #endif 312 #if VAX750 313 case VAX_750: 314 uhp->uh_bdpfree = (1<<NBDP750) - 1; 315 break; 316 #endif 317 #if VAX730 318 case VAX_730: 319 break; 320 #endif 321 } 322 } 323 324 /* 325 * Generate a reset on uba number uban. Then 326 * call each device in the character device table, 327 * giving it a chance to clean up so as to be able to continue. 328 */ 329 ubareset(uban) 330 int uban; 331 { 332 register struct cdevsw *cdp; 333 register struct uba_hd *uh = &uba_hd[uban]; 334 int s; 335 336 s = spluba(); 337 uh->uh_users = 0; 338 uh->uh_zvcnt = 0; 339 uh->uh_xclu = 0; 340 uh->uh_actf = uh->uh_actl = 0; 341 uh->uh_bdpwant = 0; 342 uh->uh_mrwant = 0; 343 ubainitmaps(uh); 344 wakeup((caddr_t)&uh->uh_bdpwant); 345 wakeup((caddr_t)&uh->uh_mrwant); 346 printf("uba%d: reset", uban); 347 ubainit(uh->uh_uba); 348 ubameminit(uban); 349 for (cdp = cdevsw; cdp < cdevsw + nchrdev; cdp++) 350 (*cdp->d_reset)(uban); 351 ifubareset(uban); 352 printf("\n"); 353 splx(s); 354 } 355 356 /* 357 * Init a uba. This is called with a pointer 358 * rather than a virtual address since it is called 359 * by code which runs with memory mapping disabled. 360 * In these cases we really don't need the interrupts 361 * enabled, but since we run with ipl high, we don't care 362 * if they are, they will never happen anyways. 363 */ 364 ubainit(uba) 365 register struct uba_regs *uba; 366 { 367 368 switch (cpu) { 369 #if defined(VAX780) || defined(VAX8600) 370 case VAX_8600: 371 case VAX_780: 372 uba->uba_cr = UBACR_ADINIT; 373 uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 374 while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 375 ; 376 break; 377 #endif 378 #if VAX750 379 case VAX_750: 380 #endif 381 #if VAX730 382 case VAX_730: 383 #endif 384 #if defined(VAX750) || defined(VAX730) 385 mtpr(IUR, 0); 386 /* give devices time to recover from power fail */ 387 /* THIS IS PROBABLY UNNECESSARY */ 388 DELAY(500000); 389 /* END PROBABLY UNNECESSARY */ 390 break; 391 #endif 392 } 393 } 394 395 #if defined(VAX780) || defined(VAX8600) 396 int ubawedgecnt = 10; 397 int ubacrazy = 500; 398 int zvcnt_max = 5000; /* in 8 sec */ 399 /* 400 * This routine is called by the locore code to process a UBA 401 * error on an 11/780 or 8600. The arguments are passed 402 * on the stack, and value-result (through some trickery). 403 * In particular, the uvec argument is used for further 404 * uba processing so the result aspect of it is very important. 405 * It must not be declared register. 406 */ 407 /*ARGSUSED*/ 408 ubaerror(uban, uh, ipl, uvec, uba) 409 register int uban; 410 register struct uba_hd *uh; 411 int ipl, uvec; 412 register struct uba_regs *uba; 413 { 414 register sr, s; 415 416 if (uvec == 0) { 417 /* 418 * Declare dt as unsigned so that negative values 419 * are handled as >8 below, in case time was set back. 420 */ 421 u_long dt = time.tv_sec - uh->uh_zvtime; 422 423 uh->uh_zvtotal++; 424 if (dt > 8) { 425 uh->uh_zvtime = time.tv_sec; 426 uh->uh_zvcnt = 0; 427 } 428 if (++uh->uh_zvcnt > zvcnt_max) { 429 printf("uba%d: too many zero vectors (%d in <%d sec)\n", 430 uban, uh->uh_zvcnt, dt + 1); 431 printf("\tIPL 0x%x\n\tcnfgr: %b Adapter Code: 0x%x\n", 432 ipl, uba->uba_cnfgr&(~0xff), UBACNFGR_BITS, 433 uba->uba_cnfgr&0xff); 434 printf("\tsr: %b\n\tdcr: %x (MIC %sOK)\n", 435 uba->uba_sr, ubasr_bits, uba->uba_dcr, 436 (uba->uba_dcr&0x8000000)?"":"NOT "); 437 ubareset(uban); 438 } 439 return; 440 } 441 if (uba->uba_cnfgr & NEX_CFGFLT) { 442 printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 443 uban, uba->uba_sr, ubasr_bits, 444 uba->uba_cnfgr, NEXFLT_BITS); 445 ubareset(uban); 446 uvec = 0; 447 return; 448 } 449 sr = uba->uba_sr; 450 s = spluba(); 451 printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 452 uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 453 splx(s); 454 uba->uba_sr = sr; 455 uvec &= UBABRRVR_DIV; 456 if (++uh->uh_errcnt % ubawedgecnt == 0) { 457 if (uh->uh_errcnt > ubacrazy) 458 panic("uba crazy"); 459 printf("ERROR LIMIT "); 460 ubareset(uban); 461 uvec = 0; 462 return; 463 } 464 return; 465 } 466 #endif 467 468 /* 469 * Look for devices with unibus memory, allow them to configure, then disable 470 * map registers as necessary. Called during autoconfiguration and ubareset. 471 * The device ubamem routine returns 0 on success, 1 on success if it is fully 472 * configured (has no csr or interrupt, so doesn't need to be probed), 473 * and -1 on failure. 474 */ 475 ubameminit(uban) 476 { 477 register struct uba_device *ui; 478 register struct uba_hd *uh = &uba_hd[uban]; 479 caddr_t umembase = umem[uban] + 0x3e000, addr; 480 #define ubaoff(off) ((int)(off) & 0x1fff) 481 482 uh->uh_lastmem = 0; 483 for (ui = ubdinit; ui->ui_driver; ui++) { 484 if (ui->ui_ubanum != uban && ui->ui_ubanum != '?') 485 continue; 486 if (ui->ui_driver->ud_ubamem) { 487 /* 488 * During autoconfiguration, need to fudge ui_addr. 489 */ 490 addr = ui->ui_addr; 491 ui->ui_addr = umembase + ubaoff(addr); 492 switch ((*ui->ui_driver->ud_ubamem)(ui, uban)) { 493 case 1: 494 ui->ui_alive = 1; 495 /* FALLTHROUGH */ 496 case 0: 497 ui->ui_ubanum = uban; 498 break; 499 } 500 ui->ui_addr = addr; 501 } 502 } 503 #if defined(VAX780) || defined(VAX8600) 504 /* 505 * On a 780, throw away any map registers disabled by rounding 506 * the map disable in the configuration register 507 * up to the next 8K boundary, or below the last unibus memory. 508 */ 509 if ((cpu == VAX_780) || (cpu == VAX_8600)) { 510 register i; 511 512 i = btop(((uh->uh_lastmem + 8191) / 8192) * 8192); 513 while (i) 514 (void) rmget(uh->uh_map, 1, i--); 515 } 516 #endif 517 } 518 519 /* 520 * Allocate UNIBUS memory. Allocates and initializes 521 * sufficient mapping registers for access. On a 780, 522 * the configuration register is setup to disable UBA 523 * response on DMA transfers to addresses controlled 524 * by the disabled mapping registers. 525 * On a 780, should only be called from ubameminit, or in ascending order 526 * from 0 with 8K-sized and -aligned addresses; freeing memory that isn't 527 * the last unibus memory would free unusable map registers. 528 * Doalloc is 1 to allocate, 0 to deallocate. 529 */ 530 ubamem(uban, addr, npg, doalloc) 531 int uban, addr, npg, doalloc; 532 { 533 register struct uba_hd *uh = &uba_hd[uban]; 534 register int a; 535 int s; 536 537 a = (addr >> 9) + 1; 538 s = spluba(); 539 if (doalloc) 540 a = rmget(uh->uh_map, npg, a); 541 else 542 rmfree(uh->uh_map, (long)npg, (long)a); 543 splx(s); 544 if (a) { 545 register int i, *m; 546 547 m = (int *)&uh->uh_uba->uba_map[a - 1]; 548 for (i = 0; i < npg; i++) 549 *m++ = 0; /* All off, especially 'valid' */ 550 i = addr + npg * 512; 551 if (doalloc && i > uh->uh_lastmem) 552 uh->uh_lastmem = i; 553 else if (doalloc == 0 && i == uh->uh_lastmem) 554 uh->uh_lastmem = addr; 555 #if defined(VAX780) || defined(VAX8600) 556 /* 557 * On a 780, set up the map register disable 558 * field in the configuration register. Beware 559 * of callers that request memory ``out of order'' 560 * or in sections other than 8K multiples. 561 * Ubameminit handles such requests properly, however. 562 */ 563 if ((cpu == VAX_780) || (cpu == VAX_8600)) { 564 i = uh->uh_uba->uba_cr &~ 0x7c000000; 565 i |= ((uh->uh_lastmem + 8191) / 8192) << 26; 566 uh->uh_uba->uba_cr = i; 567 } 568 #endif 569 } 570 return (a); 571 } 572 573 #include "ik.h" 574 #include "vs.h" 575 #if NIK > 0 || NVS > 0 576 /* 577 * Map a virtual address into users address space. Actually all we 578 * do is turn on the user mode write protection bits for the particular 579 * page of memory involved. 580 */ 581 maptouser(vaddress) 582 caddr_t vaddress; 583 { 584 585 Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 586 } 587 588 unmaptouser(vaddress) 589 caddr_t vaddress; 590 { 591 592 Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 593 } 594 #endif 595