1 /* tm.c 4.12 02/17/81 */ 2 3 #include "tm.h" 4 #if NTM03 > 0 5 /* 6 * TM tape driver 7 * 8 * THIS HANDLES ONLY ONE DRIVE ON ONE CONTROLER, AS WE HAVE NO 9 * WAY TO TEST MULTIPLE TRANSPORTS. 10 */ 11 #define DELAY(N) { register int d = N; while (--d > 0); } 12 #include "../h/param.h" 13 #include "../h/buf.h" 14 #include "../h/dir.h" 15 #include "../h/conf.h" 16 #include "../h/user.h" 17 #include "../h/file.h" 18 #include "../h/map.h" 19 #include "../h/pte.h" 20 #include "../h/uba.h" 21 #include "../h/mtio.h" 22 #include "../h/ioctl.h" 23 #include "../h/vm.h" 24 #include "../h/cmap.h" 25 #include "../h/cpu.h" 26 27 #include "../h/tmreg.h" 28 29 struct buf ctmbuf; 30 struct buf rtmbuf; 31 32 int tmcntrlr(), tmslave(), tmdgo(), tmintr(); 33 struct uba_minfo *tmminfo[NTM03]; 34 struct uba_dinfo *tmdinfo[NTM11]; 35 u_short tmstd[] = { 0772520, 0 }; 36 struct uba_driver tmdriver = 37 { tmcntrlr, tmslave, tmdgo, 0, tmstd, "tm", tmdinfo, tmminfo }; 38 39 /* bits in minor device */ 40 #define T_NOREWIND 04 41 #define T_1600BPI 08 42 43 #define INF (daddr_t)1000000L 44 45 struct tm_softc { 46 char sc_openf; 47 char sc_flags; 48 daddr_t sc_blkno; 49 daddr_t sc_nxrec; 50 u_short sc_erreg; 51 u_short sc_dsreg; 52 short sc_resid; 53 int sc_ubinfo; 54 } tm_softc[NTM03]; 55 56 #define SSEEK 1 /* seeking */ 57 #define SIO 2 /* doing seq i/o */ 58 #define SCOM 3 /* sending control command */ 59 60 #define LASTIOW 1 /* last op was a write */ 61 #define WAITREW 2 /* someone is waiting for a rewind */ 62 63 /* 64 * Determine if there is a controller for 65 * a tm at address reg. Our goal is to make the 66 * device interrupt. 67 */ 68 tmcntrlr(um, reg) 69 struct uba_minfo *um; 70 caddr_t reg; 71 { 72 register int br, cvec; 73 74 ((struct device *)reg)->tmcs = IENABLE; 75 /* 76 * If this is a tm03/tc11, it ought to have interrupted 77 * by now, if it isn't (ie: it is a ts04) then we just 78 * hope that it didn't interrupt, so autoconf will ignore it. 79 * Just in case, we will reference one 80 * of the more distant registers, and hope for a machine 81 * check, or similar disaster if this is a ts. 82 * 83 * Note: on an 11/780, badaddr will just generate 84 * a uba error for a ts; but our caller will notice that 85 * so we won't check for it. 86 */ 87 if (badaddr(&((struct device *)reg)->tmrd, 2)) 88 return (0); 89 return (1); 90 } 91 92 tmslave(ui, reg, slaveno) 93 struct uba_dinfo *ui; 94 caddr_t reg; 95 { 96 97 /* 98 * Due to a design flaw, we cannot ascertain if the tape 99 * exists or not unless it is on line - ie: unless a tape is 100 * mounted. This is too servere a restriction to bear. 101 * As we can only handle one tape, we might just as well insist 102 * that it be slave #0, and just assume that it exists. 103 * Something better will have to be done if you have two 104 * tapes on one controller, or two controllers 105 */ 106 if (slaveno != 0 || tmdinfo[0]) 107 return(0); 108 return (1); 109 } 110 111 tmopen(dev, flag) 112 dev_t dev; 113 int flag; 114 { 115 register ds, unit; 116 register struct uba_dinfo *ui; 117 register struct tm_softc *sc = &tm_softc[0]; 118 119 tmminfo[0]->um_tab.b_flags |= B_TAPE; 120 unit = minor(dev)&03; 121 if (unit>=NTM11 || sc->sc_openf || (ui = tmdinfo[0]) == 0 || ui->ui_alive==0) { 122 u.u_error = ENXIO; /* out of range or open */ 123 return; 124 } 125 tcommand(dev, NOP, 1); 126 if ((sc->sc_erreg&SELR) == 0) { 127 u.u_error = EIO; 128 goto eio; 129 } 130 sc->sc_openf = 1; 131 if (sc->sc_erreg&RWS) 132 tmwaitrws(dev); /* wait for rewind complete */ 133 while (sc->sc_erreg&SDWN) 134 tcommand(dev, NOP, 1); /* await settle down */ 135 if ((sc->sc_erreg&TUR)==0 || 136 ((flag&(FREAD|FWRITE)) == FWRITE && (sc->sc_erreg&WRL))) { 137 ((struct device *)ui->ui_addr)->tmcs = DCLR|GO; 138 u.u_error = EIO; /* offline or write protect */ 139 } 140 if (u.u_error != 0) { 141 sc->sc_openf = 0; 142 if (u.u_error == EIO) 143 eio: 144 uprintf("tape offline or protected\n"); 145 return; 146 } 147 sc->sc_blkno = (daddr_t)0; 148 sc->sc_nxrec = INF; 149 sc->sc_flags = 0; 150 sc->sc_openf = 1; 151 } 152 153 tmwaitrws(dev) 154 register dev; 155 { 156 register struct device *addr = 157 (struct device *)tmdinfo[0]->ui_addr; 158 register struct tm_softc *sc = &tm_softc[0]; 159 160 spl5(); 161 for (;;) { 162 if ((addr->tmer&RWS) == 0) { 163 spl0(); /* rewind complete */ 164 return; 165 } 166 sc->sc_flags |= WAITREW; 167 sleep((caddr_t)&sc->sc_flags, PRIBIO); 168 } 169 } 170 171 tmclose(dev, flag) 172 register dev_t dev; 173 register flag; 174 { 175 register struct tm_softc *sc = &tm_softc[0]; 176 177 if (flag == FWRITE || ((flag&FWRITE) && (sc->sc_flags&LASTIOW))) { 178 tcommand(dev, WEOF, 1); 179 tcommand(dev, WEOF, 1); 180 tcommand(dev, SREV, 1); 181 } 182 if ((minor(dev)&T_NOREWIND) == 0) 183 tcommand(dev, REW, 1); 184 sc->sc_openf = 0; 185 } 186 187 tcommand(dev, com, count) 188 dev_t dev; 189 int com, count; 190 { 191 register struct buf *bp; 192 193 bp = &ctmbuf; 194 (void) spl5(); 195 while (bp->b_flags&B_BUSY) { 196 bp->b_flags |= B_WANTED; 197 sleep((caddr_t)bp, PRIBIO); 198 } 199 bp->b_flags = B_BUSY|B_READ; 200 (void) spl0(); 201 bp->b_dev = dev; 202 bp->b_repcnt = -count; 203 bp->b_command = com; 204 bp->b_blkno = 0; 205 tmstrategy(bp); 206 iowait(bp); 207 if (bp->b_flags&B_WANTED) 208 wakeup((caddr_t)bp); 209 bp->b_flags &= B_ERROR; 210 } 211 212 tmstrategy(bp) 213 register struct buf *bp; 214 { 215 register daddr_t *p; 216 register struct buf *tmi; 217 218 tmwaitrws(bp->b_dev); 219 if (bp != &ctmbuf) { 220 p = &tm_softc[0].sc_nxrec; 221 if (dbtofsb(bp->b_blkno) > *p) { 222 bp->b_flags |= B_ERROR; 223 bp->b_error = ENXIO; /* past EOF */ 224 iodone(bp); 225 return; 226 } else if (dbtofsb(bp->b_blkno) == *p && bp->b_flags&B_READ) { 227 bp->b_resid = bp->b_bcount; 228 clrbuf(bp); /* at EOF */ 229 iodone(bp); 230 return; 231 } else if ((bp->b_flags&B_READ) == 0) 232 *p = dbtofsb(bp->b_blkno) + 1; /* write sets EOF */ 233 } 234 bp->av_forw = NULL; 235 (void) spl5(); 236 tmi = &tmminfo[0]->um_tab; 237 if (tmi->b_actf == NULL) 238 tmi->b_actf = bp; 239 else 240 tmi->b_actl->av_forw = bp; 241 tmi->b_actl = bp; 242 if (tmi->b_active == 0) 243 tmstart(); 244 (void) spl0(); 245 } 246 247 tmstart() 248 { 249 register struct buf *bp; 250 register struct uba_minfo *um = tmminfo[0]; 251 register struct uba_dinfo *ui; 252 register struct device *addr; 253 register struct tm_softc *sc = &tm_softc[0]; 254 int cmd, s; 255 daddr_t blkno; 256 257 loop: 258 if ((bp = um->um_tab.b_actf) == 0) 259 return; 260 ui = tmdinfo[0]; 261 addr = (struct device *)ui->ui_addr; 262 sc->sc_dsreg = addr->tmcs; 263 sc->sc_erreg = addr->tmer; 264 sc->sc_resid = addr->tmbc; 265 sc->sc_flags &= ~LASTIOW; 266 if (sc->sc_openf < 0 || (addr->tmcs&CUR) == 0) { 267 /* sc->sc_openf = -1; ??? */ 268 bp->b_flags |= B_ERROR; /* hard error'ed or !SELR */ 269 goto next; 270 } 271 cmd = IENABLE | GO; 272 if ((minor(bp->b_dev) & T_1600BPI) == 0) 273 cmd |= D800; 274 if (bp == &ctmbuf) { 275 if (bp->b_command == NOP) 276 goto next; /* just get status */ 277 else { 278 cmd |= bp->b_command; 279 um->um_tab.b_active = SCOM; 280 if (bp->b_command == SFORW || bp->b_command == SREV) 281 addr->tmbc = bp->b_repcnt; 282 addr->tmcs = cmd; 283 return; 284 } 285 } 286 if ((blkno = sc->sc_blkno) == dbtofsb(bp->b_blkno)) { 287 addr->tmbc = -bp->b_bcount; 288 s = spl6(); 289 if (sc->sc_ubinfo == 0) 290 sc->sc_ubinfo = ubasetup(ui->ui_ubanum, bp, 1); 291 splx(s); 292 if ((bp->b_flags&B_READ) == 0) { 293 if (um->um_tab.b_errcnt) 294 cmd |= WIRG; 295 else 296 cmd |= WCOM; 297 } else 298 cmd |= RCOM; 299 cmd |= (sc->sc_ubinfo >> 12) & 0x30; 300 um->um_tab.b_active = SIO; 301 addr->tmba = sc->sc_ubinfo; 302 addr->tmcs = cmd; 303 return; 304 } 305 um->um_tab.b_active = SSEEK; 306 if (blkno < dbtofsb(bp->b_blkno)) { 307 cmd |= SFORW; 308 addr->tmbc = blkno - dbtofsb(bp->b_blkno); 309 } else { 310 cmd |= SREV; 311 addr->tmbc = dbtofsb(bp->b_blkno) - blkno; 312 } 313 addr->tmcs = cmd; 314 return; 315 316 next: 317 ubarelse(ui->ui_ubanum, &sc->sc_ubinfo); 318 um->um_tab.b_actf = bp->av_forw; 319 iodone(bp); 320 goto loop; 321 } 322 323 tmdgo() 324 { 325 326 } 327 328 /*ARGSUSED*/ 329 tmintr(d) 330 int d; 331 { 332 register struct buf *bp; 333 register struct uba_minfo *um = tmminfo[0]; 334 register struct device *addr = (struct device *)tmdinfo[0]->ui_addr; 335 register struct tm_softc *sc = &tm_softc[0]; 336 register state; 337 338 if (sc->sc_flags&WAITREW && (addr->tmer&RWS) == 0) { 339 sc->sc_flags &= ~WAITREW; 340 wakeup((caddr_t)&sc->sc_flags); 341 } 342 if ((bp = um->um_tab.b_actf) == NULL) 343 return; 344 sc->sc_dsreg = addr->tmcs; 345 sc->sc_erreg = addr->tmer; 346 sc->sc_resid = addr->tmbc; 347 if ((bp->b_flags & B_READ) == 0) 348 sc->sc_flags |= LASTIOW; 349 state = um->um_tab.b_active; 350 um->um_tab.b_active = 0; 351 if (addr->tmcs&ERROR) { 352 while(addr->tmer & SDWN) 353 ; /* await settle down */ 354 if (addr->tmer&EOF) { 355 tmseteof(bp); /* set blkno and nxrec */ 356 state = SCOM; 357 addr->tmbc = -bp->b_bcount; 358 goto errout; 359 } 360 if ((bp->b_flags&B_READ) && (addr->tmer&(HARD|SOFT)) == RLE) 361 goto out; 362 if ((addr->tmer&HARD)==0 && state==SIO) { 363 if (++um->um_tab.b_errcnt < 7) { 364 if((addr->tmer&SOFT) == NXM) 365 printf("TM UBA late error\n"); 366 sc->sc_blkno++; 367 ubarelse(um->um_ubanum, &sc->sc_ubinfo); 368 tmstart(); 369 return; 370 } 371 } else if (sc->sc_openf>0 && bp != &rtmbuf) 372 sc->sc_openf = -1; 373 deverror(bp, sc->sc_erreg, sc->sc_dsreg); 374 bp->b_flags |= B_ERROR; 375 state = SIO; 376 } 377 out: 378 switch (state) { 379 380 case SIO: 381 sc->sc_blkno++; 382 /* fall into ... */ 383 384 case SCOM: 385 if (bp == &ctmbuf) { 386 switch (bp->b_command) { 387 case SFORW: 388 sc->sc_blkno -= bp->b_repcnt; 389 break; 390 391 case SREV: 392 sc->sc_blkno += bp->b_repcnt; 393 break; 394 395 default: 396 if (++bp->b_repcnt < 0) { 397 tmstart(); /* continue */ 398 return; 399 } 400 } 401 } 402 errout: 403 um->um_tab.b_errcnt = 0; 404 um->um_tab.b_actf = bp->av_forw; 405 bp->b_resid = -addr->tmbc; 406 ubarelse(um->um_ubanum, &sc->sc_ubinfo); 407 iodone(bp); 408 break; 409 410 case SSEEK: 411 sc->sc_blkno = dbtofsb(bp->b_blkno); 412 break; 413 414 default: 415 return; 416 } 417 tmstart(); 418 } 419 420 tmseteof(bp) 421 register struct buf *bp; 422 { 423 register struct device *addr = 424 (struct device *)tmdinfo[0]->ui_addr; 425 register struct tm_softc *sc = &tm_softc[0]; 426 427 if (bp == &ctmbuf) { 428 if (sc->sc_blkno > dbtofsb(bp->b_blkno)) { 429 /* reversing */ 430 sc->sc_nxrec = dbtofsb(bp->b_blkno) - addr->tmbc; 431 sc->sc_blkno = sc->sc_nxrec; 432 } else { 433 /* spacing forward */ 434 sc->sc_blkno = dbtofsb(bp->b_blkno) + addr->tmbc; 435 sc->sc_nxrec = sc->sc_blkno - 1; 436 } 437 return; 438 } 439 /* eof on read */ 440 sc->sc_nxrec = dbtofsb(bp->b_blkno); 441 } 442 443 tmread(dev) 444 { 445 446 tmphys(dev); 447 physio(tmstrategy, &rtmbuf, dev, B_READ, minphys); 448 } 449 450 tmwrite(dev) 451 { 452 453 tmphys(dev); 454 physio(tmstrategy, &rtmbuf, dev, B_WRITE, minphys); 455 } 456 457 tmphys(dev) 458 { 459 register daddr_t a; 460 register struct tm_softc *sc = &tm_softc[0]; 461 462 a = dbtofsb(u.u_offset >> 9); 463 sc->sc_blkno = a; 464 sc->sc_nxrec = a + 1; 465 } 466 467 /*ARGSUSED*/ 468 tmioctl(dev, cmd, addr, flag) 469 caddr_t addr; 470 dev_t dev; 471 { 472 register callcount; 473 register struct tm_softc *sc = &tm_softc[0]; 474 int fcount; 475 struct mtop mtop; 476 struct mtget mtget; 477 /* we depend of the values and order of the MT codes here */ 478 static tmops[] = {WEOF, SFORW, SREV, SFORW, SREV, REW, OFFL, NOP}; 479 480 switch(cmd) { 481 case MTIOCTOP: /* tape operation */ 482 if (copyin((caddr_t)addr, (caddr_t)&mtop, sizeof(mtop))) { 483 u.u_error = EFAULT; 484 return; 485 } 486 switch(mtop.mt_op) { 487 case MTWEOF: case MTFSF: case MTBSF: 488 callcount = mtop.mt_count; 489 fcount = INF; 490 break; 491 case MTFSR: case MTBSR: 492 callcount = 1; 493 fcount = mtop.mt_count; 494 break; 495 case MTREW: case MTOFFL: case MTNOP: 496 callcount = 1; 497 fcount = 1; 498 break; 499 default: 500 u.u_error = ENXIO; 501 return; 502 } 503 if (callcount <= 0 || fcount <= 0) 504 u.u_error = ENXIO; 505 else while (--callcount >= 0) { 506 tcommand(dev, tmops[mtop.mt_op], fcount); 507 if ((mtop.mt_op == MTFSR || mtop.mt_op == MTBSR) && 508 ctmbuf.b_resid) { 509 u.u_error = EIO; 510 break; 511 } 512 if ((ctmbuf.b_flags&B_ERROR) || 513 sc->sc_erreg&BOT) 514 break; 515 } 516 geterror(&ctmbuf); 517 return; 518 case MTIOCGET: 519 mtget.mt_dsreg = sc->sc_dsreg; 520 mtget.mt_erreg = sc->sc_erreg; 521 mtget.mt_resid = sc->sc_resid; 522 if (copyout((caddr_t)&mtget, addr, sizeof(mtget))) 523 u.u_error = EFAULT; 524 return; 525 default: 526 u.u_error = ENXIO; 527 } 528 } 529 530 #define DBSIZE 20 531 532 tmdump() 533 { 534 register struct uba_dinfo *ui; 535 register struct uba_regs *up; 536 register struct device *addr; 537 int blk, num; 538 int start; 539 540 start = 0; 541 num = maxfree; 542 #define phys(a,b) ((b)((int)(a)&0x7fffffff)) 543 if (tmdinfo[0] == 0) { 544 printf("dna\n"); 545 return (-1); 546 } 547 ui = phys(tmdinfo[0], struct uba_dinfo *); 548 up = phys(ui->ui_hd, struct uba_hd *)->uh_physuba; 549 #if VAX780 550 if (cpu == VAX_780) 551 ubainit(up); 552 #endif 553 DELAY(1000000); 554 addr = (struct device *)ui->ui_physaddr; 555 tmwait(addr); 556 addr->tmcs = DCLR | GO; 557 while (num > 0) { 558 blk = num > DBSIZE ? DBSIZE : num; 559 tmdwrite(start, blk, addr, up); 560 start += blk; 561 num -= blk; 562 } 563 tmeof(addr); 564 tmeof(addr); 565 tmwait(addr); 566 addr->tmcs = REW | GO; 567 tmwait(addr); 568 return (0); 569 } 570 571 tmdwrite(buf, num, addr, up) 572 register buf, num; 573 register struct device *addr; 574 struct uba_regs *up; 575 { 576 register struct pte *io; 577 register int npf; 578 579 tmwait(addr); 580 io = up->uba_map; 581 npf = num+1; 582 while (--npf != 0) 583 *(int *)io++ = (buf++ | (1<<UBA_DPSHIFT) | UBA_MRV); 584 *(int *)io = 0; 585 addr->tmbc = -(num*NBPG); 586 addr->tmba = 0; 587 addr->tmcs = WCOM | GO; 588 } 589 590 tmwait(addr) 591 register struct device *addr; 592 { 593 register s; 594 595 do 596 s = addr->tmcs; 597 while ((s & CUR) == 0); 598 } 599 600 tmeof(addr) 601 struct device *addr; 602 { 603 604 tmwait(addr); 605 addr->tmcs = WEOF | GO; 606 } 607 #endif 608