1 /* tm.c 4.16 02/23/81 */ 2 3 #include "tm.h" 4 #if NTM > 0 5 /* 6 * TM11/TE10 tape driver 7 * 8 * THIS DRIVER HAS NOT BEEN TESTED WITH MORE THAN ONE TRANSPORT. 9 */ 10 #define DELAY(N) { register int d = N; while (--d > 0); } 11 #include "../h/param.h" 12 #include "../h/buf.h" 13 #include "../h/dir.h" 14 #include "../h/conf.h" 15 #include "../h/user.h" 16 #include "../h/file.h" 17 #include "../h/map.h" 18 #include "../h/pte.h" 19 #include "../h/vm.h" 20 #include "../h/uba.h" 21 #include "../h/mtio.h" 22 #include "../h/ioctl.h" 23 #include "../h/cmap.h" 24 #include "../h/cpu.h" 25 26 #include "../h/tmreg.h" 27 28 struct buf ctmbuf[NTE]; 29 struct buf rtmbuf[NTE]; 30 31 int tmprobe(), tmslave(), tmattach(), tmdgo(), tmintr(); 32 struct uba_minfo *tmminfo[NTM]; 33 struct uba_dinfo *tmdinfo[NTE]; 34 struct buf tmutab[NTE]; 35 #ifdef notyet 36 struct uba_dinfo *tmip[NTM][4]; 37 #endif 38 u_short tmstd[] = { 0772520, 0 }; 39 struct uba_driver tmdriver = 40 { tmprobe, tmslave, tmattach, tmdgo, tmstd, "te", tmdinfo, "tm", tmminfo, 0 }; 41 42 /* bits in minor device */ 43 #define TMUNIT(dev) (minor(dev)&03) 44 #define T_NOREWIND 04 45 #define T_1600BPI 08 46 47 #define INF (daddr_t)1000000L 48 49 /* 50 * Software state per tape transport. 51 */ 52 struct tm_softc { 53 char sc_openf; /* lock against multiple opens */ 54 char sc_lastiow; /* last op was a write */ 55 daddr_t sc_blkno; /* block number, for block device tape */ 56 daddr_t sc_nxrec; /* desired block position */ 57 u_short sc_erreg; /* copy of last erreg */ 58 u_short sc_dsreg; /* copy of last dsreg */ 59 short sc_resid; /* copy of last bc */ 60 } tm_softc[NTM]; 61 62 /* 63 * States for um->um_tab.b_active, the 64 * per controller state flag. 65 */ 66 #define SSEEK 1 /* seeking */ 67 #define SIO 2 /* doing seq i/o */ 68 #define SCOM 3 /* sending control command */ 69 #define SREW 4 /* sending a drive rewind */ 70 71 /* WE CURRENTLY HANDLE REWINDS PRIMITIVELY, BUSYING OUT THE CONTROLLER */ 72 /* DURING THE REWIND... IF WE EVER GET TWO TRANSPORTS, WE CAN DEBUG MORE */ 73 /* SOPHISTICATED LOGIC... THIS SIMPLE CODE AT LEAST MAY WORK. */ 74 75 /* 76 * Determine if there is a controller for 77 * a tm at address reg. Our goal is to make the 78 * device interrupt. 79 */ 80 tmprobe(reg) 81 caddr_t reg; 82 { 83 register int br, cvec; 84 85 #ifdef lint 86 br = 0; br = cvec; cvec = br; 87 #endif 88 ((struct device *)reg)->tmcs = TM_IE; 89 /* 90 * If this is a tm11, it ought to have interrupted 91 * by now, if it isn't (ie: it is a ts04) then we just 92 * hope that it didn't interrupt, so autoconf will ignore it. 93 * Just in case, we will reference one 94 * of the more distant registers, and hope for a machine 95 * check, or similar disaster if this is a ts. 96 * 97 * Note: on an 11/780, badaddr will just generate 98 * a uba error for a ts; but our caller will notice that 99 * so we won't check for it. 100 */ 101 if (badaddr(&((struct device *)reg)->tmrd, 2)) 102 return (0); 103 return (1); 104 } 105 106 /* 107 * Due to a design flaw, we cannot ascertain if the tape 108 * exists or not unless it is on line - ie: unless a tape is 109 * mounted. This is too servere a restriction to bear, 110 * so all units are assumed to exist. 111 */ 112 /*ARGSUSED*/ 113 tmslave(ui, reg) 114 struct uba_dinfo *ui; 115 caddr_t reg; 116 { 117 118 return (1); 119 } 120 121 /* 122 * Record attachment of the unit to the controller port. 123 */ 124 /*ARGSUSED*/ 125 tmattach(ui) 126 struct uba_dinfo *ui; 127 { 128 129 #ifdef notyet 130 tmip[ui->ui_ctlr][ui->ui_slave] = ui; 131 #endif 132 } 133 134 /* 135 * Open the device. Tapes are unique open 136 * devices, so we refuse if it is already open. 137 * We also check that a tape is available, and 138 * don't block waiting here. 139 */ 140 tmopen(dev, flag) 141 dev_t dev; 142 int flag; 143 { 144 register int unit; 145 register struct uba_dinfo *ui; 146 register struct tm_softc *sc; 147 148 unit = TMUNIT(dev); 149 if (unit>=NTE || (sc = &tm_softc[unit])->sc_openf || 150 (ui = tmdinfo[unit]) == 0 || ui->ui_alive == 0) { 151 u.u_error = ENXIO; 152 return; 153 } 154 tmcommand(dev, TM_SENSE, 1); 155 if ((sc->sc_erreg&(TM_SELR|TM_TUR)) != (TM_SELR|TM_TUR)) { 156 uprintf("tape not online\n"); 157 u.u_error = EIO; 158 return; 159 } 160 if ((flag&(FREAD|FWRITE)) == FWRITE && sc->sc_erreg&TM_WRL) { 161 uprintf("tape write protected\n"); 162 u.u_error = EIO; 163 return; 164 } 165 sc->sc_openf = 1; 166 sc->sc_blkno = (daddr_t)0; 167 sc->sc_nxrec = INF; 168 sc->sc_lastiow = 0; 169 sc->sc_openf = 1; 170 return; 171 } 172 173 /* 174 * Close tape device. 175 * 176 * If tape was open for writing or last operation was 177 * a write, then write two EOF's and backspace over the last one. 178 * Unless this is a non-rewinding special file, rewind the tape. 179 * Make the tape available to others. 180 */ 181 tmclose(dev, flag) 182 register dev_t dev; 183 register flag; 184 { 185 register struct tm_softc *sc = &tm_softc[TMUNIT(dev)]; 186 187 if (flag == FWRITE || (flag&FWRITE) && sc->sc_lastiow) { 188 tmcommand(dev, TM_WEOF, 1); 189 tmcommand(dev, TM_WEOF, 1); 190 tmcommand(dev, TM_SREV, 1); 191 } 192 if ((minor(dev)&T_NOREWIND) == 0) 193 tmcommand(dev, TM_REW, 1); 194 sc->sc_openf = 0; 195 } 196 197 /* 198 * Execute a command on the tape drive 199 * a specified number of times. 200 */ 201 tmcommand(dev, com, count) 202 dev_t dev; 203 int com, count; 204 { 205 register struct buf *bp; 206 207 bp = &ctmbuf[TMUNIT(dev)]; 208 (void) spl5(); 209 while (bp->b_flags&B_BUSY) { 210 bp->b_flags |= B_WANTED; 211 sleep((caddr_t)bp, PRIBIO); 212 } 213 bp->b_flags = B_BUSY|B_READ; 214 (void) spl0(); 215 bp->b_dev = dev; 216 bp->b_repcnt = -count; 217 bp->b_command = com; 218 bp->b_blkno = 0; 219 tmstrategy(bp); 220 iowait(bp); 221 if (bp->b_flags&B_WANTED) 222 wakeup((caddr_t)bp); 223 bp->b_flags &= B_ERROR; 224 } 225 226 /* 227 * Decipher a tape operation and do what is needed 228 * to see that it happens. 229 */ 230 tmstrategy(bp) 231 register struct buf *bp; 232 { 233 int unit = TMUNIT(bp->b_dev); 234 register struct uba_minfo *um; 235 register struct buf *dp; 236 register struct tm_softc *sc = &tm_softc[unit]; 237 238 /* 239 * Put transfer at end of unit queue 240 */ 241 dp = &tmutab[unit]; 242 bp->av_forw = NULL; 243 (void) spl5(); 244 if (dp->b_actf == NULL) { 245 dp->b_actf = bp; 246 /* 247 * Transport not already active... 248 * put at end of controller queue. 249 */ 250 dp->b_forw = NULL; 251 um = tmdinfo[unit]->ui_mi; 252 if (um->um_tab.b_actf == NULL) 253 um->um_tab.b_actf = dp; 254 else 255 um->um_tab.b_actl->b_forw = dp; 256 um->um_tab.b_actl = dp; 257 } else 258 dp->b_actl->av_forw = bp; 259 dp->b_actl = bp; 260 /* 261 * If the controller is not busy, get 262 * it going. 263 */ 264 if (um->um_tab.b_active == 0) 265 tmstart(um); 266 (void) spl0(); 267 } 268 269 /* 270 * Start activity on a tm controller. 271 */ 272 tmstart(um) 273 register struct uba_minfo *um; 274 { 275 register struct buf *bp, *dp; 276 register struct device *addr = (struct device *)um->um_addr; 277 register struct tm_softc *sc; 278 register struct uba_dinfo *ui; 279 int unit, cmd; 280 daddr_t blkno; 281 282 /* 283 * Look for an idle transport on the controller. 284 */ 285 loop: 286 if ((dp = um->um_tab.b_actf) == NULL) 287 return; 288 if ((bp = dp->b_actf) == NULL) { 289 um->um_tab.b_actf = dp->b_forw; 290 goto loop; 291 } 292 unit = TMUNIT(bp->b_dev); 293 ui = tmdinfo[unit]; 294 /* 295 * Record pre-transfer status (e.g. for TM_SENSE) 296 */ 297 sc = &tm_softc[unit]; 298 addr = (struct device *)um->um_addr; 299 addr->tmcs = (ui->ui_slave << 8); 300 sc->sc_dsreg = addr->tmcs; 301 sc->sc_erreg = addr->tmer; 302 sc->sc_resid = addr->tmbc; 303 /* 304 * Default is that last command was NOT a write command; 305 * if we do a write command we will notice this in tmintr(). 306 */ 307 sc->sc_lastiow = 1; 308 if (sc->sc_openf < 0 || (addr->tmcs&TM_CUR) == 0) { 309 /* 310 * Have had a hard error on this (non-raw) tape, 311 * or the tape unit is now unavailable (e.g. taken off 312 * line). 313 */ 314 bp->b_flags |= B_ERROR; 315 goto next; 316 } 317 /* 318 * If operation is not a control operation, 319 * check for boundary conditions. 320 */ 321 if (bp != &ctmbuf[unit]) { 322 if (dbtofsb(bp->b_blkno) > sc->sc_nxrec) { 323 bp->b_flags |= B_ERROR; 324 bp->b_error = ENXIO; /* past EOF */ 325 goto next; 326 } 327 if (dbtofsb(bp->b_blkno) == sc->sc_nxrec && 328 bp->b_flags&B_READ) { 329 bp->b_resid = bp->b_bcount; 330 clrbuf(bp); /* at EOF */ 331 goto next; 332 } 333 if ((bp->b_flags&B_READ) == 0) 334 /* write sets EOF */ 335 sc->sc_nxrec = dbtofsb(bp->b_blkno) + 1; 336 } 337 /* 338 * Set up the command, and then if this is a mt ioctl, 339 * do the operation using, for TM_SFORW and TM_SREV, the specified 340 * operation count. 341 */ 342 cmd = TM_IE | TM_GO | (ui->ui_slave << 8); 343 if ((minor(bp->b_dev) & T_1600BPI) == 0) 344 cmd |= TM_D800; 345 if (bp == &ctmbuf[unit]) { 346 if (bp->b_command == TM_SENSE) 347 goto next; 348 cmd |= bp->b_command; 349 um->um_tab.b_active = 350 bp->b_command == TM_REW ? SREW : SCOM; 351 if (bp->b_command == TM_SFORW || bp->b_command == TM_SREV) 352 addr->tmbc = bp->b_repcnt; 353 addr->tmcs = cmd; 354 return; 355 } 356 /* 357 * If the data transfer command is in the correct place, 358 * set up all the registers except the csr, and give 359 * control over to the UNIBUS adapter routines, to 360 * wait for resources to start the i/o. 361 */ 362 if ((blkno = sc->sc_blkno) == dbtofsb(bp->b_blkno)) { 363 addr->tmbc = -bp->b_bcount; 364 if ((bp->b_flags&B_READ) == 0) { 365 if (um->um_tab.b_errcnt) 366 cmd |= TM_WIRG; 367 else 368 cmd |= TM_WCOM; 369 } else 370 cmd |= TM_RCOM; 371 um->um_tab.b_active = SIO; 372 um->um_cmd = cmd; 373 ubago(ui); 374 return; 375 } 376 /* 377 * Block tape positioned incorrectly; 378 * seek forwards or backwards to the correct spot. 379 */ 380 um->um_tab.b_active = SSEEK; 381 if (blkno < dbtofsb(bp->b_blkno)) { 382 cmd |= TM_SFORW; 383 addr->tmbc = blkno - dbtofsb(bp->b_blkno); 384 } else { 385 cmd |= TM_SREV; 386 addr->tmbc = dbtofsb(bp->b_blkno) - blkno; 387 } 388 addr->tmcs = cmd; 389 return; 390 391 next: 392 /* 393 * Done with this operation due to error or 394 * the fact that it doesn't do anything. 395 * Release UBA resources (if any), dequeue 396 * the transfer and continue processing this slave. 397 */ 398 if (um->um_ubinfo) 399 ubadone(um); 400 um->um_tab.b_errcnt = 0; 401 dp->b_actf = bp->av_forw; 402 iodone(bp); 403 goto loop; 404 } 405 406 /* 407 * The UNIBUS resources we needed have been 408 * allocated to us; start the device. 409 */ 410 tmdgo(um) 411 register struct uba_minfo *um; 412 { 413 register struct device *addr = (struct device *)um->um_addr; 414 415 addr->tmba = um->um_ubinfo; 416 addr->tmcs = um->um_cmd | ((um->um_ubinfo >> 12) & 0x30); 417 } 418 419 /* 420 * Tm interrupt routine. 421 */ 422 /*ARGSUSED*/ 423 tmintr(tm11) 424 int tm11; 425 { 426 struct buf *dp; 427 register struct buf *bp; 428 register struct uba_minfo *um = tmminfo[tm11]; 429 register struct device *addr = (struct device *)tmdinfo[tm11]->ui_addr; 430 register struct tm_softc *sc; 431 int unit; 432 register state; 433 434 /* 435 * If last command was a rewind, and tape is still 436 * rewinding, wait for the rewind complete interrupt. 437 */ 438 if (um->um_tab.b_active == SREW) { 439 um->um_tab.b_active = SCOM; 440 if (addr->tmer&TM_RWS) 441 return; 442 } 443 /* 444 * An operation completed... record status 445 */ 446 if ((dp = um->um_tab.b_actf) == NULL) 447 return; 448 bp = dp->b_actf; 449 unit = TMUNIT(bp->b_dev); 450 sc = &tm_softc[unit]; 451 sc->sc_dsreg = addr->tmcs; 452 sc->sc_erreg = addr->tmer; 453 sc->sc_resid = addr->tmbc; 454 if ((bp->b_flags & B_READ) == 0) 455 sc->sc_lastiow = 1; 456 state = um->um_tab.b_active; 457 um->um_tab.b_active = 0; 458 /* 459 * Check for errors. 460 */ 461 if (addr->tmcs&TM_ERR) { 462 while (addr->tmer & TM_SDWN) 463 ; /* await settle down */ 464 /* 465 * If we hit the end of the tape update our position. 466 */ 467 if (addr->tmer&TM_EOF) { 468 tmseteof(bp); /* set blkno and nxrec */ 469 state = SCOM; /* force completion */ 470 /* 471 * Stuff bc so it will be unstuffed correctly 472 * later to get resid. 473 */ 474 addr->tmbc = -bp->b_bcount; 475 goto opdone; 476 } 477 /* 478 * If we were reading and the only error was that the 479 * record was to long, then we don't consider this an error. 480 */ 481 if ((bp->b_flags&B_READ) && 482 (addr->tmer&(TM_HARD|TM_SOFT)) == TM_RLE) 483 goto ignoreerr; 484 /* 485 * If error is not hard, and this was an i/o operation 486 * retry up to 8 times. 487 */ 488 if ((addr->tmer&TM_HARD)==0 && state==SIO) { 489 if (++um->um_tab.b_errcnt < 7) { 490 /* SHOULD CHECK THAT RECOVERY WORKS IN THIS CASE */ 491 /* AND THEN ONLY PRINT IF errcnt==7 */ 492 if((addr->tmer&TM_SOFT) == TM_NXM) 493 printf("TM UBA late error\n"); 494 sc->sc_blkno++; 495 ubadone(um); 496 goto opcont; 497 } 498 } else 499 /* 500 * Hard or non-i/o errors on non-raw tape 501 * cause it to close. 502 */ 503 if (sc->sc_openf>0 && bp != &rtmbuf[unit]) 504 sc->sc_openf = -1; 505 /* 506 * Couldn't recover error 507 */ 508 deverror(bp, sc->sc_erreg, sc->sc_dsreg); 509 bp->b_flags |= B_ERROR; 510 goto opdone; 511 } 512 /* 513 * Advance tape control FSM. 514 */ 515 ignoreerr: 516 switch (state) { 517 518 case SIO: 519 /* 520 * Read/write increments tape block number 521 */ 522 sc->sc_blkno++; 523 goto opdone; 524 525 case SCOM: 526 /* 527 * Unless special operation, op completed. 528 */ 529 if (bp != &ctmbuf[unit]) 530 goto opdone; 531 /* 532 * Operation on block device... 533 * iterate operations which don't repeat 534 * for themselves in the hardware; for forward/ 535 * backward space record update the current position. 536 */ 537 switch (bp->b_command) { 538 539 case TM_SFORW: 540 sc->sc_blkno -= bp->b_repcnt; 541 goto opdone; 542 543 case TM_SREV: 544 sc->sc_blkno += bp->b_repcnt; 545 goto opdone; 546 547 default: 548 if (++bp->b_repcnt < 0) 549 goto opcont; 550 goto opdone; 551 } 552 553 case SSEEK: 554 sc->sc_blkno = dbtofsb(bp->b_blkno); 555 goto opcont; 556 557 default: 558 panic("tmintr"); 559 } 560 opdone: 561 /* 562 * Reset error count and remove 563 * from device queue. 564 */ 565 um->um_tab.b_errcnt = 0; 566 dp->b_actf = bp->av_forw; 567 bp->b_resid = -addr->tmbc; 568 ubadone(um); 569 iodone(bp); 570 /* 571 * Circulate slave to end of controller 572 * queue to give other slaves a chance. 573 */ 574 um->um_tab.b_actf = dp->b_forw; 575 if (dp->b_actf) { 576 dp->b_forw = NULL; 577 if (um->um_tab.b_actf == NULL) 578 um->um_tab.b_actf = dp; 579 else 580 um->um_tab.b_actl->b_forw = dp; 581 um->um_tab.b_actl = dp; 582 } 583 if (um->um_tab.b_actf == 0) 584 return; 585 opcont: 586 tmstart(um); 587 } 588 589 tmseteof(bp) 590 register struct buf *bp; 591 { 592 register int unit = TMUNIT(bp->b_dev); 593 register struct device *addr = 594 (struct device *)tmdinfo[unit]->ui_addr; 595 register struct tm_softc *sc = &tm_softc[unit]; 596 597 if (bp == &ctmbuf[unit]) { 598 if (sc->sc_blkno > dbtofsb(bp->b_blkno)) { 599 /* reversing */ 600 sc->sc_nxrec = dbtofsb(bp->b_blkno) - addr->tmbc; 601 sc->sc_blkno = sc->sc_nxrec; 602 } else { 603 /* spacing forward */ 604 sc->sc_blkno = dbtofsb(bp->b_blkno) + addr->tmbc; 605 sc->sc_nxrec = sc->sc_blkno - 1; 606 } 607 return; 608 } 609 /* eof on read */ 610 sc->sc_nxrec = dbtofsb(bp->b_blkno); 611 } 612 613 tmread(dev) 614 dev_t dev; 615 { 616 617 tmphys(dev); 618 physio(tmstrategy, &rtmbuf[TMUNIT(dev)], dev, B_READ, minphys); 619 } 620 621 tmwrite(dev) 622 dev_t dev; 623 { 624 625 tmphys(dev); 626 physio(tmstrategy, &rtmbuf[TMUNIT(dev)], dev, B_WRITE, minphys); 627 } 628 629 tmphys(dev) 630 dev_t dev; 631 { 632 register daddr_t a; 633 register struct tm_softc *sc = &tm_softc[TMUNIT(dev)]; 634 635 a = dbtofsb(u.u_offset >> 9); 636 sc->sc_blkno = a; 637 sc->sc_nxrec = a + 1; 638 } 639 640 tmreset(uban) 641 int uban; 642 { 643 int printed = 0; 644 register struct uba_minfo *um; 645 register tm11, unit; 646 register struct uba_dinfo *ui; 647 register struct buf *dp; 648 649 for (tm11 = 0; tm11 < NTM; tm11++) { 650 if ((um = tmminfo[tm11]) == 0 || um->um_alive == 0 || 651 um->um_ubanum != uban) 652 continue; 653 if (printed == 0) { 654 printf(" tm"); 655 DELAY(2000000); /* time to self test */ 656 printed = 1; 657 } 658 um->um_tab.b_active = 0; 659 um->um_tab.b_actf = um->um_tab.b_actl = 0; 660 if (um->um_ubinfo) { 661 printf("<%d>", (um->um_ubinfo>>28)&0xf); 662 ubadone(um); 663 } 664 ((struct device *)(um->um_addr))->tmcs = TM_DCLR; 665 for (unit = 0; unit < NTE; unit++) { 666 if ((ui = tmdinfo[unit]) == 0) 667 continue; 668 if (ui->ui_alive == 0) 669 continue; 670 dp = &tmutab[unit]; 671 dp->b_active = 0; 672 dp->b_forw = 0; 673 if (um->um_tab.b_actf == NULL) 674 um->um_tab.b_actf = dp; 675 else 676 um->um_tab.b_actl->b_forw = dp; 677 um->um_tab.b_actl = dp; 678 tm_softc[unit].sc_openf = -1; 679 } 680 tmstart(um); 681 } 682 } 683 684 /*ARGSUSED*/ 685 tmioctl(dev, cmd, addr, flag) 686 caddr_t addr; 687 dev_t dev; 688 { 689 int unit = TMUNIT(dev); 690 register struct tm_softc *sc = &tm_softc[unit]; 691 register struct buf *bp = &ctmbuf[unit]; 692 register callcount; 693 int fcount; 694 struct mtop mtop; 695 struct mtget mtget; 696 /* we depend of the values and order of the MT codes here */ 697 static tmops[] = 698 {TM_WEOF,TM_SFORW,TM_SREV,TM_SFORW,TM_SREV,TM_REW,TM_OFFL,TM_SENSE}; 699 700 switch (cmd) { 701 case MTIOCTOP: /* tape operation */ 702 if (copyin((caddr_t)addr, (caddr_t)&mtop, sizeof(mtop))) { 703 u.u_error = EFAULT; 704 return; 705 } 706 switch(mtop.mt_op) { 707 case MTWEOF: 708 callcount = mtop.mt_count; 709 fcount = 1; 710 break; 711 case MTFSF: case MTBSF: 712 callcount = mtop.mt_count; 713 fcount = INF; 714 break; 715 case MTFSR: case MTBSR: 716 callcount = 1; 717 fcount = mtop.mt_count; 718 break; 719 case MTREW: case MTOFFL: case MTNOP: 720 callcount = 1; 721 fcount = 1; 722 break; 723 default: 724 u.u_error = ENXIO; 725 return; 726 } 727 if (callcount <= 0 || fcount <= 0) { 728 u.u_error = ENXIO; 729 return; 730 } 731 while (--callcount >= 0) { 732 tmcommand(dev, tmops[mtop.mt_op], fcount); 733 if ((mtop.mt_op == MTFSR || mtop.mt_op == MTBSR) && 734 bp->b_resid) { 735 u.u_error = EIO; 736 break; 737 } 738 if ((bp->b_flags&B_ERROR) || sc->sc_erreg&TM_BOT) 739 break; 740 } 741 geterror(bp); 742 return; 743 case MTIOCGET: 744 mtget.mt_dsreg = sc->sc_dsreg; 745 mtget.mt_erreg = sc->sc_erreg; 746 mtget.mt_resid = sc->sc_resid; 747 if (copyout((caddr_t)&mtget, addr, sizeof(mtget))) 748 u.u_error = EFAULT; 749 return; 750 default: 751 u.u_error = ENXIO; 752 } 753 } 754 755 #define DBSIZE 20 756 757 tmdump() 758 { 759 register struct uba_dinfo *ui; 760 register struct uba_regs *up; 761 register struct device *addr; 762 int blk, num; 763 int start; 764 765 start = 0; 766 num = maxfree; 767 #define phys(a,b) ((b)((int)(a)&0x7fffffff)) 768 if (tmdinfo[0] == 0) { 769 printf("dna\n"); 770 return (-1); 771 } 772 ui = phys(tmdinfo[0], struct uba_dinfo *); 773 up = phys(ui->ui_hd, struct uba_hd *)->uh_physuba; 774 #if VAX780 775 if (cpu == VAX_780) 776 ubainit(up); 777 #endif 778 DELAY(1000000); 779 addr = (struct device *)ui->ui_physaddr; 780 tmwait(addr); 781 addr->tmcs = TM_DCLR | TM_GO; 782 while (num > 0) { 783 blk = num > DBSIZE ? DBSIZE : num; 784 tmdwrite(start, blk, addr, up); 785 start += blk; 786 num -= blk; 787 } 788 tmeof(addr); 789 tmeof(addr); 790 tmwait(addr); 791 addr->tmcs = TM_REW | TM_GO; 792 tmwait(addr); 793 return (0); 794 } 795 796 tmdwrite(dbuf, num, addr, up) 797 register dbuf, num; 798 register struct device *addr; 799 struct uba_regs *up; 800 { 801 register struct pte *io; 802 register int npf; 803 804 tmwait(addr); 805 io = up->uba_map; 806 npf = num+1; 807 while (--npf != 0) 808 *(int *)io++ = (dbuf++ | (1<<UBA_DPSHIFT) | UBA_MRV); 809 *(int *)io = 0; 810 addr->tmbc = -(num*NBPG); 811 addr->tmba = 0; 812 addr->tmcs = TM_WCOM | TM_GO; 813 } 814 815 tmwait(addr) 816 register struct device *addr; 817 { 818 register s; 819 820 do 821 s = addr->tmcs; 822 while ((s & TM_CUR) == 0); 823 } 824 825 tmeof(addr) 826 struct device *addr; 827 { 828 829 tmwait(addr); 830 addr->tmcs = TM_WEOF | TM_GO; 831 } 832 #endif 833