1 /* uba.c 4.48 82/08/13 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/cpu.h" 6 #include "../h/map.h" 7 #include "../h/pte.h" 8 #include "../h/buf.h" 9 #include "../h/vm.h" 10 #include "../h/ubareg.h" 11 #include "../h/ubavar.h" 12 #include "../h/dir.h" 13 #include "../h/user.h" 14 #include "../h/proc.h" 15 #include "../h/conf.h" 16 #include "../h/mtpr.h" 17 #include "../h/nexus.h" 18 #include "../h/dk.h" 19 20 #if VAX780 21 char ubasr_bits[] = UBASR_BITS; 22 #endif 23 24 /* 25 * Do transfer on device argument. The controller 26 * and uba involved are implied by the device. 27 * We queue for resource wait in the uba code if necessary. 28 * We return 1 if the transfer was started, 0 if it was not. 29 * If you call this routine with the head of the queue for a 30 * UBA, it will automatically remove the device from the UBA 31 * queue before it returns. If some other device is given 32 * as argument, it will be added to the request queue if the 33 * request cannot be started immediately. This means that 34 * passing a device which is on the queue but not at the head 35 * of the request queue is likely to be a disaster. 36 */ 37 ubago(ui) 38 register struct uba_device *ui; 39 { 40 register struct uba_ctlr *um = ui->ui_mi; 41 register struct uba_hd *uh; 42 register int s, unit; 43 44 uh = &uba_hd[um->um_ubanum]; 45 s = spl6(); 46 if (um->um_driver->ud_xclu && uh->uh_users > 0 || uh->uh_xclu) 47 goto rwait; 48 um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf, 49 UBA_NEEDBDP|UBA_CANTWAIT); 50 if (um->um_ubinfo == 0) 51 goto rwait; 52 uh->uh_users++; 53 if (um->um_driver->ud_xclu) 54 uh->uh_xclu = 1; 55 splx(s); 56 if (ui->ui_dk >= 0) { 57 unit = ui->ui_dk; 58 dk_busy |= 1<<unit; 59 dk_xfer[unit]++; 60 dk_wds[unit] += um->um_tab.b_actf->b_actf->b_bcount>>6; 61 } 62 if (uh->uh_actf == ui) 63 uh->uh_actf = ui->ui_forw; 64 (*um->um_driver->ud_dgo)(um); 65 return (1); 66 rwait: 67 if (uh->uh_actf != ui) { 68 ui->ui_forw = NULL; 69 if (uh->uh_actf == NULL) 70 uh->uh_actf = ui; 71 else 72 uh->uh_actl->ui_forw = ui; 73 uh->uh_actl = ui; 74 } 75 splx(s); 76 return (0); 77 } 78 79 ubadone(um) 80 register struct uba_ctlr *um; 81 { 82 register struct uba_hd *uh = &uba_hd[um->um_ubanum]; 83 84 if (um->um_driver->ud_xclu) 85 uh->uh_xclu = 0; 86 uh->uh_users--; 87 ubarelse(um->um_ubanum, &um->um_ubinfo); 88 } 89 90 /* 91 * Allocate and setup UBA map registers, and bdp's 92 * Flags says whether bdp is needed, whether the caller can't 93 * wait (e.g. if the caller is at interrupt level). 94 * 95 * Return value: 96 * Bits 0-8 Byte offset 97 * Bits 9-17 Start map reg. no. 98 * Bits 18-27 No. mapping reg's 99 * Bits 28-31 BDP no. 100 */ 101 ubasetup(uban, bp, flags) 102 struct buf *bp; 103 { 104 register struct uba_hd *uh = &uba_hd[uban]; 105 register int temp, i; 106 int npf, reg, bdp; 107 unsigned v; 108 register struct pte *pte, *io; 109 struct proc *rp; 110 int a, o, ubinfo; 111 112 #if VAX730 113 if (cpu == VAX_730) 114 flags &= ~UBA_NEEDBDP; 115 #endif 116 v = btop(bp->b_un.b_addr); 117 o = (int)bp->b_un.b_addr & PGOFSET; 118 npf = btoc(bp->b_bcount + o) + 1; 119 a = spl6(); 120 while ((reg = rmalloc(uh->uh_map, npf)) == 0) { 121 if (flags & UBA_CANTWAIT) { 122 splx(a); 123 return (0); 124 } 125 uh->uh_mrwant++; 126 sleep((caddr_t)uh->uh_map, PSWP); 127 } 128 bdp = 0; 129 if (flags & UBA_NEEDBDP) { 130 while ((bdp = ffs(uh->uh_bdpfree)) == 0) { 131 if (flags & UBA_CANTWAIT) { 132 rmfree(uh->uh_map, npf, reg); 133 splx(a); 134 return (0); 135 } 136 uh->uh_bdpwant++; 137 sleep((caddr_t)uh->uh_map, PSWP); 138 } 139 uh->uh_bdpfree &= ~(1 << (bdp-1)); 140 } else if (flags & UBA_HAVEBDP) 141 bdp = (flags >> 28) & 0xf; 142 splx(a); 143 reg--; 144 ubinfo = (bdp << 28) | (npf << 18) | (reg << 9) | o; 145 temp = (bdp << 21) | UBAMR_MRV; 146 if (bdp && (o & 01)) 147 temp |= UBAMR_BO; 148 rp = bp->b_flags&B_DIRTY ? &proc[2] : bp->b_proc; 149 if ((bp->b_flags & B_PHYS) == 0) 150 pte = &Sysmap[btop(((int)bp->b_un.b_addr)&0x7fffffff)]; 151 else if (bp->b_flags & B_UAREA) 152 pte = &rp->p_addr[v]; 153 else if (bp->b_flags & B_PAGET) 154 pte = &Usrptmap[btokmx((struct pte *)bp->b_un.b_addr)]; 155 else 156 pte = vtopte(rp, v); 157 io = &uh->uh_uba->uba_map[reg]; 158 while (--npf != 0) { 159 if (pte->pg_pfnum == 0) 160 panic("uba zero uentry"); 161 *(int *)io++ = pte++->pg_pfnum | temp; 162 } 163 *(int *)io++ = 0; 164 return (ubinfo); 165 } 166 167 /* 168 * Non buffer setup interface... set up a buffer and call ubasetup. 169 */ 170 uballoc(uban, addr, bcnt, flags) 171 int uban; 172 caddr_t addr; 173 int bcnt, flags; 174 { 175 struct buf ubabuf; 176 177 ubabuf.b_un.b_addr = addr; 178 ubabuf.b_flags = B_BUSY; 179 ubabuf.b_bcount = bcnt; 180 /* that's all the fields ubasetup() needs */ 181 return (ubasetup(uban, &ubabuf, flags)); 182 } 183 184 /* 185 * Release resources on uba uban, and then unblock resource waiters. 186 * The map register parameter is by value since we need to block 187 * against uba resets on 11/780's. 188 */ 189 ubarelse(uban, amr) 190 int *amr; 191 { 192 register struct uba_hd *uh = &uba_hd[uban]; 193 register int bdp, reg, npf, s; 194 int mr; 195 196 /* 197 * Carefully see if we should release the space, since 198 * it may be released asynchronously at uba reset time. 199 */ 200 s = spl6(); 201 mr = *amr; 202 if (mr == 0) { 203 /* 204 * A ubareset() occurred before we got around 205 * to releasing the space... no need to bother. 206 */ 207 splx(s); 208 return; 209 } 210 *amr = 0; 211 splx(s); /* let interrupts in, we're safe for a while */ 212 bdp = (mr >> 28) & 0x0f; 213 if (bdp) { 214 switch (cpu) { 215 #if VAX780 216 case VAX_780: 217 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 218 break; 219 #endif 220 #if VAX750 221 case VAX_750: 222 uh->uh_uba->uba_dpr[bdp] |= 223 UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 224 break; 225 #endif 226 } 227 uh->uh_bdpfree |= 1 << (bdp-1); /* atomic */ 228 if (uh->uh_bdpwant) { 229 uh->uh_bdpwant = 0; 230 wakeup((caddr_t)uh->uh_map); 231 } 232 } 233 /* 234 * Put back the registers in the resource map. 235 * The map code must not be reentered, so we do this 236 * at high ipl. 237 */ 238 npf = (mr >> 18) & 0x3ff; 239 reg = ((mr >> 9) & 0x1ff) + 1; 240 s = spl6(); 241 rmfree(uh->uh_map, npf, reg); 242 splx(s); 243 244 /* 245 * Wakeup sleepers for map registers, 246 * and also, if there are processes blocked in dgo(), 247 * give them a chance at the UNIBUS. 248 */ 249 if (uh->uh_mrwant) { 250 uh->uh_mrwant = 0; 251 wakeup((caddr_t)uh->uh_map); 252 } 253 while (uh->uh_actf && ubago(uh->uh_actf)) 254 ; 255 } 256 257 ubapurge(um) 258 register struct uba_ctlr *um; 259 { 260 register struct uba_hd *uh = um->um_hd; 261 register int bdp = (um->um_ubinfo >> 28) & 0x0f; 262 263 switch (cpu) { 264 #if VAX780 265 case VAX_780: 266 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 267 break; 268 #endif 269 #if VAX750 270 case VAX_750: 271 uh->uh_uba->uba_dpr[bdp] |= UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 272 break; 273 #endif 274 } 275 } 276 277 ubainitmaps(uhp) 278 register struct uba_hd *uhp; 279 { 280 281 rminit(uhp->uh_map, NUBMREG, 1, "uba", UAMSIZ); 282 switch (cpu) { 283 #if VAX780 284 case VAX_780: 285 uhp->uh_bdpfree = (1<<NBDP780) - 1; 286 break; 287 #endif 288 #if VAX750 289 case VAX_750: 290 uhp->uh_bdpfree = (1<<NBDP750) - 1; 291 break; 292 #endif 293 #if VAX730 294 case VAX_730: 295 break; 296 #endif 297 } 298 } 299 300 /* 301 * Generate a reset on uba number uban. Then 302 * call each device in the character device table, 303 * giving it a chance to clean up so as to be able to continue. 304 */ 305 ubareset(uban) 306 int uban; 307 { 308 register struct cdevsw *cdp; 309 register struct uba_hd *uh = &uba_hd[uban]; 310 int s; 311 312 s = spl6(); 313 uh->uh_users = 0; 314 uh->uh_zvcnt = 0; 315 uh->uh_xclu = 0; 316 uh->uh_hangcnt = 0; 317 uh->uh_actf = uh->uh_actl = 0; 318 uh->uh_bdpwant = 0; 319 uh->uh_mrwant = 0; 320 ubainitmaps(uh); 321 wakeup((caddr_t)&uh->uh_bdpwant); 322 wakeup((caddr_t)&uh->uh_mrwant); 323 printf("uba%d: reset", uban); 324 ubainit(uh->uh_uba); 325 for (cdp = cdevsw; cdp->d_open; cdp++) 326 (*cdp->d_reset)(uban); 327 #ifdef INET 328 ifubareset(uban); 329 #endif 330 printf("\n"); 331 splx(s); 332 } 333 334 /* 335 * Init a uba. This is called with a pointer 336 * rather than a virtual address since it is called 337 * by code which runs with memory mapping disabled. 338 * In these cases we really don't need the interrupts 339 * enabled, but since we run with ipl high, we don't care 340 * if they are, they will never happen anyways. 341 */ 342 ubainit(uba) 343 register struct uba_regs *uba; 344 { 345 346 switch (cpu) { 347 #if VAX780 348 case VAX_780: 349 uba->uba_cr = UBACR_ADINIT; 350 uba->uba_cr = UBACR_IFS|UBACR_BRIE|UBACR_USEFIE|UBACR_SUEFIE; 351 while ((uba->uba_cnfgr & UBACNFGR_UBIC) == 0) 352 ; 353 break; 354 #endif 355 #if VAX750 356 case VAX_750: 357 #endif 358 #if VAX730 359 case VAX_730: 360 #endif 361 #if defined(VAX750) || defined(VAX730) 362 mtpr(IUR, 0); 363 /* give devices time to recover from power fail */ 364 /* THIS IS PROBABLY UNNECESSARY */ 365 DELAY(500000); 366 /* END PROBABLY UNNECESSARY */ 367 break; 368 #endif 369 } 370 } 371 372 #if VAX780 373 /* 374 * Check to make sure the UNIBUS adaptor is not hung, 375 * with an interrupt in the register to be presented, 376 * but not presenting it for an extended period (5 seconds). 377 */ 378 unhang() 379 { 380 register int uban; 381 382 for (uban = 0; uban < numuba; uban++) { 383 register struct uba_hd *uh = &uba_hd[uban]; 384 register struct uba_regs *up = uh->uh_uba; 385 386 if (up->uba_sr == 0) 387 return; 388 up->uba_sr = UBASR_CRD|UBASR_LEB; 389 uh->uh_hangcnt++; 390 if (uh->uh_hangcnt > 5*hz) { 391 uh->uh_hangcnt = 0; 392 printf("uba%d: hung\n", uban); 393 ubareset(uban); 394 } 395 } 396 } 397 398 /* 399 * This is a timeout routine which decrements the ``i forgot to 400 * interrupt'' counts, on an 11/780. This prevents slowly growing 401 * counts from causing a UBA reset since we are interested only 402 * in hang situations. 403 */ 404 ubawatch() 405 { 406 register struct uba_hd *uh; 407 register int uban; 408 409 if (panicstr) 410 return; 411 for (uban = 0; uban < numuba; uban++) { 412 uh = &uba_hd[uban]; 413 if (uh->uh_hangcnt) 414 uh->uh_hangcnt--; 415 } 416 } 417 418 int ubawedgecnt = 10; 419 int ubacrazy = 500; 420 /* 421 * This routine is called by the locore code to 422 * process a UBA error on an 11/780. The arguments are passed 423 * on the stack, and value-result (through some trickery). 424 * In particular, the uvec argument is used for further 425 * uba processing so the result aspect of it is very important. 426 * It must not be declared register. 427 */ 428 /*ARGSUSED*/ 429 ubaerror(uban, uh, xx, uvec, uba) 430 register int uban; 431 register struct uba_hd *uh; 432 int uvec; 433 register struct uba_regs *uba; 434 { 435 register sr, s; 436 437 if (uvec == 0) { 438 uh->uh_zvcnt++; 439 if (uh->uh_zvcnt > 250000) { 440 printf("uba%d: too many zero vectors\n"); 441 ubareset(uban); 442 } 443 uvec = 0; 444 return; 445 } 446 if (uba->uba_cnfgr & NEX_CFGFLT) { 447 printf("uba%d: sbi fault sr=%b cnfgr=%b\n", 448 uban, uba->uba_sr, ubasr_bits, 449 uba->uba_cnfgr, NEXFLT_BITS); 450 ubareset(uban); 451 uvec = 0; 452 return; 453 } 454 sr = uba->uba_sr; 455 s = spl7(); 456 printf("uba%d: uba error sr=%b fmer=%x fubar=%o\n", 457 uban, uba->uba_sr, ubasr_bits, uba->uba_fmer, 4*uba->uba_fubar); 458 splx(s); 459 uba->uba_sr = sr; 460 uvec &= UBABRRVR_DIV; 461 if (++uh->uh_errcnt % ubawedgecnt == 0) { 462 if (uh->uh_errcnt > ubacrazy) 463 panic("uba crazy"); 464 printf("ERROR LIMIT "); 465 ubareset(uban); 466 uvec = 0; 467 return; 468 } 469 return; 470 } 471 #endif 472 473 #ifdef notdef 474 /* 475 * This routine allows remapping of previously 476 * allocated UNIBUS bdp and map resources 477 * onto different memory addresses. 478 * It should only be used by routines which need 479 * small fixed length mappings for long periods of time 480 * (like the ARPANET ACC IMP interface). 481 * It only maps kernel addresses. 482 */ 483 ubaremap(uban, ubinfo, addr) 484 int uban; 485 register unsigned ubinfo; 486 caddr_t addr; 487 { 488 register struct uba_hd *uh = &uba_hd[uban]; 489 register struct pte *pte, *io; 490 register int temp, bdp; 491 int npf, o; 492 493 o = (int)addr & PGOFSET; 494 bdp = (ubinfo >> 28) & 0xf; 495 npf = (ubinfo >> 18) & 0x3ff; 496 io = &uh->uh_uba->uba_map[(ubinfo >> 9) & 0x1ff]; 497 temp = (bdp << 21) | UBAMR_MRV; 498 499 /* 500 * If using buffered data path initiate purge 501 * of old data and set byte offset bit if next 502 * transfer will be from odd address. 503 */ 504 if (bdp) { 505 switch (cpu) { 506 #if VAX780 507 case VAX_780: 508 uh->uh_uba->uba_dpr[bdp] |= UBADPR_BNE; 509 break; 510 #endif 511 #if VAX750 512 case VAX_750: 513 uh->uh_uba->uba_dpr[bdp] |= 514 UBADPR_PURGE|UBADPR_NXM|UBADPR_UCE; 515 break; 516 #endif 517 } 518 if (o & 1) 519 temp |= UBAMR_BO; 520 } 521 522 /* 523 * Set up the map registers, leaving an invalid reg 524 * at the end to guard against wild unibus transfers. 525 */ 526 pte = &Sysmap[btop(((int)addr)&0x7fffffff)]; 527 while (--npf != 0) 528 *(int *)io++ = pte++->pg_pfnum | temp; 529 *(int *)io = 0; 530 531 /* 532 * Return effective UNIBUS address. 533 */ 534 return (ubinfo | o); 535 } 536 #endif 537 538 /* 539 * This routine is called by a driver for a device with on-board Unibus 540 * memory. It removes the memory block from the Unibus resource map 541 * and clears the map registers for the block. 542 * 543 * Arguments are the Unibus number, the Unibus address of the memory 544 * block, its size in blocks of 512 bytes, and a flag indicating whether 545 * to allocate the unibus space form the resource map or whether it already 546 * has been. 547 * 548 * Returns > 0 if successful, 0 if not. 549 */ 550 551 ubamem(uban, addr, size, alloc) 552 { 553 register struct uba_hd *uh = &uba_hd[uban]; 554 register int *m; 555 register int i, a, s; 556 557 if (alloc) { 558 s = spl6(); 559 a = rmget(uh->uh_map, size, (addr>>9)+1); /* starts at ONE! */ 560 splx(s); 561 } else 562 a = (addr>>9)+1; 563 if (a) { 564 m = (int *) &uh->uh_uba->uba_map[a-1]; 565 for (i=0; i<size; i++) 566 *m++ = 0; /* All off, especially 'valid' */ 567 #if VAX780 568 if (cpu == VAX_780) { /* map disable */ 569 i = (addr+size*512+8191)/8192; 570 uh->uh_uba->uba_cr |= i<<26; 571 } 572 #endif 573 } 574 return(a); 575 } 576 577 /* 578 * Map a virtual address into users address space. Actually all we 579 * do is turn on the user mode write protection bits for the particular 580 * page of memory involved. 581 */ 582 maptouser(vaddress) 583 caddr_t vaddress; 584 { 585 586 Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_UW>>27); 587 } 588 589 unmaptouser(vaddress) 590 caddr_t vaddress; 591 { 592 593 Sysmap[(((unsigned)(vaddress))-0x80000000) >> 9].pg_prot = (PG_KW>>27); 594 } 595