1 /* tm.c 4.9 02/10/81 */ 2 3 #include "tm.h" 4 #if NTM > 0 5 /* 6 * TM tape driver 7 */ 8 #define DELAY(N) { register int d; d = N; while (--d > 0); } 9 #include "../h/param.h" 10 #include "../h/buf.h" 11 #include "../h/dir.h" 12 #include "../h/conf.h" 13 #include "../h/user.h" 14 #include "../h/file.h" 15 #include "../h/map.h" 16 #include "../h/pte.h" 17 #include "../h/uba.h" 18 #include "../h/mtio.h" 19 #include "../h/ioctl.h" 20 #include "../h/vm.h" 21 #include "../h/cmap.h" 22 #include "../h/cpu.h" 23 24 #include "../h/tmreg.h" 25 26 struct buf tmtab; 27 struct buf ctmbuf; 28 struct buf rtmbuf; 29 30 int tmcntrlr(), tmslave(), tmdgo(), tmintr(); 31 struct uba_dinfo *tminfo[NTM]; 32 u_short tmstd[] = { 0 }; 33 int (*tmivec[])() = { tmintr, 0 }; 34 struct uba_driver tmdriver = 35 { tmcntrlr, tmslave, tmdgo, 0, 0, tmstd, tminfo, tmivec }; 36 int tm_ubinfo; 37 38 /* bits in minor device */ 39 #define T_NOREWIND 04 40 #define T_1600BPI 08 41 42 #define INF (daddr_t)1000000L 43 44 /* 45 * Really only handle one tape drive... if you have more than one, 46 * you can put all these (and some of the above) in a structure, 47 * change the obvious things, and make tmslave smarter, but 48 * it is not clear what happens when some drives are transferring while 49 * others rewind, so we don't pretend that this driver handles multiple 50 * tape drives. 51 */ 52 char t_openf; 53 daddr_t t_blkno; 54 char t_flags; 55 daddr_t t_nxrec; 56 u_short t_erreg; 57 u_short t_dsreg; 58 short t_resid; 59 60 #define SSEEK 1 /* seeking */ 61 #define SIO 2 /* doing seq i/o */ 62 #define SCOM 3 /* sending control command */ 63 64 #define LASTIOW 1 /* last op was a write */ 65 #define WAITREW 2 /* someone is waiting for a rewind */ 66 67 tmcntrlr(ui, reg) 68 struct uba_dinfo *ui; 69 caddr_t reg; 70 { 71 ((struct device *)reg)->tmcs = IENABLE; 72 /* 73 * If this is a tm03/tc11, it ought to have interrupted 74 * by now, if it isn't (ie: it is a ts04) then we just 75 * pray that it didn't interrupt, so autoconf will ignore it 76 * - just in case out prayers fail, we will reference one 77 * of the more distant registers, and hope for a machine 78 * check, or similar disaster 79 */ 80 if (badaddr(&((struct device *)reg)->tmrd, 2)) 81 return(0); 82 return(1); 83 } 84 85 tmslave(ui, reg, slaveno) 86 struct uba_dinfo *ui; 87 caddr_t reg; 88 { 89 /* 90 * Due to a design flaw, we cannot ascertain if the tape 91 * exists or not unless it is on line - ie: unless a tape is 92 * mounted. This is too servere a restriction to bear. 93 * As we can only handle one tape, we might just as well insist 94 * that it be slave #0, and just assume that it exists. 95 * Something better will have to be done if you have two 96 * tapes on one controller, or two controllers 97 */ 98 if (slaveno != 0 || tminfo[0]) 99 return(0); 100 return(1); 101 } 102 103 tmopen(dev, flag) 104 dev_t dev; 105 int flag; 106 { 107 register ds, unit; 108 register struct uba_dinfo *ui; 109 110 tmtab.b_flags |= B_TAPE; 111 unit = minor(dev)&03; 112 if (unit>=NTM || t_openf || !(ui = tminfo[minor(dev)>>3])->ui_alive) { 113 u.u_error = ENXIO; /* out of range or open */ 114 return; 115 } 116 tcommand(dev, NOP, 1); 117 if ((t_erreg&SELR) == 0) { 118 u.u_error = EIO; /* offline */ 119 return; 120 } 121 t_openf = 1; 122 if (t_erreg&RWS) 123 tmwaitrws(dev); /* wait for rewind complete */ 124 while (t_erreg&SDWN) 125 tcommand(dev, NOP, 1); /* await settle down */ 126 if ((t_erreg&TUR)==0 || 127 ((flag&(FREAD|FWRITE)) == FWRITE && (t_erreg&WRL))) { 128 ((struct device *)ui->ui_addr)->tmcs = DCLR|GO; 129 u.u_error = EIO; /* offline or write protect */ 130 } 131 if (u.u_error != 0) { 132 t_openf = 0; 133 return; 134 } 135 t_blkno = (daddr_t)0; 136 t_nxrec = INF; 137 t_flags = 0; 138 t_openf = 1; 139 } 140 141 tmwaitrws(dev) 142 register dev; 143 { 144 register struct device *addr = 145 (struct device *)tminfo[minor(dev)>>3]->ui_addr; 146 147 spl5(); 148 for (;;) { 149 if ((addr->tmer&RWS) == 0) { 150 spl0(); /* rewind complete */ 151 return; 152 } 153 t_flags |= WAITREW; 154 sleep((caddr_t)&t_flags, PRIBIO); 155 } 156 } 157 158 tmclose(dev, flag) 159 register dev_t dev; 160 register flag; 161 { 162 163 if (flag == FWRITE || ((flag&FWRITE) && (t_flags&LASTIOW))) { 164 tcommand(dev, WEOF, 1); 165 tcommand(dev, WEOF, 1); 166 tcommand(dev, SREV, 1); 167 } 168 if ((minor(dev)&T_NOREWIND) == 0) 169 tcommand(dev, REW, 1); 170 t_openf = 0; 171 } 172 173 tcommand(dev, com, count) 174 dev_t dev; 175 int com, count; 176 { 177 register struct buf *bp; 178 179 bp = &ctmbuf; 180 (void) spl5(); 181 while (bp->b_flags&B_BUSY) { 182 bp->b_flags |= B_WANTED; 183 sleep((caddr_t)bp, PRIBIO); 184 } 185 bp->b_flags = B_BUSY|B_READ; 186 (void) spl0(); 187 bp->b_dev = dev; 188 bp->b_repcnt = -count; 189 bp->b_command = com; 190 bp->b_blkno = 0; 191 tmstrategy(bp); 192 iowait(bp); 193 if (bp->b_flags&B_WANTED) 194 wakeup((caddr_t)bp); 195 bp->b_flags &= B_ERROR; 196 } 197 198 tmstrategy(bp) 199 register struct buf *bp; 200 { 201 register daddr_t *p; 202 203 tmwaitrws(bp->b_dev); 204 if (bp != &ctmbuf) { 205 p = &t_nxrec; 206 if (dbtofsb(bp->b_blkno) > *p) { 207 bp->b_flags |= B_ERROR; 208 bp->b_error = ENXIO; /* past EOF */ 209 iodone(bp); 210 return; 211 } else if (dbtofsb(bp->b_blkno) == *p && bp->b_flags&B_READ) { 212 bp->b_resid = bp->b_bcount; 213 clrbuf(bp); /* at EOF */ 214 iodone(bp); 215 return; 216 } else if ((bp->b_flags&B_READ) == 0) 217 *p = dbtofsb(bp->b_blkno) + 1; /* write sets EOF */ 218 } 219 bp->av_forw = NULL; 220 (void) spl5(); 221 if (tmtab.b_actf == NULL) 222 tmtab.b_actf = bp; 223 else 224 tmtab.b_actl->av_forw = bp; 225 tmtab.b_actl = bp; 226 if (tmtab.b_active == 0) 227 tmstart(); 228 (void) spl0(); 229 } 230 231 tmstart() 232 { 233 register struct buf *bp; 234 register struct uba_dinfo *ui; 235 register struct device *addr; 236 register cmd; 237 register daddr_t blkno; 238 int s; 239 240 loop: 241 if ((bp = tmtab.b_actf) == 0) 242 return; 243 ui = tminfo[minor(bp->b_dev)>>3]; 244 addr = (struct device *)ui->ui_addr; 245 t_dsreg = addr->tmcs; 246 t_erreg = addr->tmer; 247 t_resid = addr->tmbc; 248 t_flags &= ~LASTIOW; 249 if (t_openf < 0 || (addr->tmcs&CUR) == 0) { 250 /* t_openf = -1; ??? */ 251 bp->b_flags |= B_ERROR; /* hard error'ed or !SELR */ 252 goto next; 253 } 254 cmd = IENABLE | GO; 255 if ((minor(bp->b_dev) & T_1600BPI) == 0) 256 cmd |= D800; 257 if (bp == &ctmbuf) { 258 if (bp->b_command == NOP) 259 goto next; /* just get status */ 260 else { 261 cmd |= bp->b_command; 262 tmtab.b_active = SCOM; 263 if (bp->b_command == SFORW || bp->b_command == SREV) 264 addr->tmbc = bp->b_repcnt; 265 addr->tmcs = cmd; 266 return; 267 } 268 } 269 if ((blkno = t_blkno) == dbtofsb(bp->b_blkno)) { 270 addr->tmbc = -bp->b_bcount; 271 s = spl6(); 272 if (tm_ubinfo == 0) 273 tm_ubinfo = ubasetup(ui->ui_ubanum, bp, 1); 274 splx(s); 275 if ((bp->b_flags&B_READ) == 0) { 276 if (tmtab.b_errcnt) 277 cmd |= WIRG; 278 else 279 cmd |= WCOM; 280 } else 281 cmd |= RCOM; 282 cmd |= (tm_ubinfo >> 12) & 0x30; 283 tmtab.b_active = SIO; 284 addr->tmba = tm_ubinfo; 285 addr->tmcs = cmd; 286 return; 287 } 288 tmtab.b_active = SSEEK; 289 if (blkno < dbtofsb(bp->b_blkno)) { 290 cmd |= SFORW; 291 addr->tmbc = blkno - dbtofsb(bp->b_blkno); 292 } else { 293 cmd |= SREV; 294 addr->tmbc = dbtofsb(bp->b_blkno) - blkno; 295 } 296 addr->tmcs = cmd; 297 return; 298 299 next: 300 ubarelse(ui->ui_ubanum, &tm_ubinfo); 301 tmtab.b_actf = bp->av_forw; 302 iodone(bp); 303 goto loop; 304 } 305 306 tmdgo() 307 { 308 } 309 310 tmintr(d) 311 { 312 register struct buf *bp; 313 register struct device *addr = (struct device *)tminfo[d]->ui_addr; 314 register state; 315 316 if (t_flags&WAITREW && (addr->tmer&RWS) == 0) { 317 t_flags &= ~WAITREW; 318 wakeup((caddr_t)&t_flags); 319 } 320 if ((bp = tmtab.b_actf) == NULL) 321 return; 322 t_dsreg = addr->tmcs; 323 t_erreg = addr->tmer; 324 t_resid = addr->tmbc; 325 if ((bp->b_flags & B_READ) == 0) 326 t_flags |= LASTIOW; 327 state = tmtab.b_active; 328 tmtab.b_active = 0; 329 if (addr->tmcs&ERROR) { 330 while(addr->tmer & SDWN) 331 ; /* await settle down */ 332 if (addr->tmer&EOF) { 333 tmseteof(bp); /* set blkno and nxrec */ 334 state = SCOM; 335 addr->tmbc = -bp->b_bcount; 336 goto errout; 337 } 338 if ((bp->b_flags&B_READ) && (addr->tmer&(HARD|SOFT)) == RLE) 339 goto out; 340 if ((addr->tmer&HARD)==0 && state==SIO) { 341 if (++tmtab.b_errcnt < 7) { 342 if((addr->tmer&SOFT) == NXM) 343 printf("TM UBA late error\n"); 344 else 345 t_blkno++; 346 ubarelse(tminfo[d]->ui_ubanum, &tm_ubinfo); 347 tmstart(); 348 return; 349 } 350 } else if (t_openf>0 && bp != &rtmbuf) 351 t_openf = -1; 352 deverror(bp, t_erreg, t_dsreg); 353 bp->b_flags |= B_ERROR; 354 state = SIO; 355 } 356 out: 357 switch (state) { 358 359 case SIO: 360 t_blkno++; 361 /* fall into ... */ 362 363 case SCOM: 364 if (bp == &ctmbuf) { 365 switch (bp->b_command) { 366 case SFORW: 367 t_blkno -= bp->b_repcnt; 368 break; 369 370 case SREV: 371 t_blkno += bp->b_repcnt; 372 break; 373 374 default: 375 if (++bp->b_repcnt < 0) { 376 tmstart(); /* continue */ 377 return; 378 } 379 } 380 } 381 errout: 382 tmtab.b_errcnt = 0; 383 tmtab.b_actf = bp->av_forw; 384 bp->b_resid = -addr->tmbc; 385 ubarelse(tminfo[d]->ui_ubanum, &tm_ubinfo); 386 iodone(bp); 387 break; 388 389 case SSEEK: 390 t_blkno = dbtofsb(bp->b_blkno); 391 break; 392 393 default: 394 return; 395 } 396 tmstart(); 397 } 398 399 tmseteof(bp) 400 register struct buf *bp; 401 { 402 register struct device *addr = 403 (struct device *)tminfo[minor(bp->b_dev)>>3]->ui_addr; 404 405 if (bp == &ctmbuf) { 406 if (t_blkno > dbtofsb(bp->b_blkno)) { 407 /* reversing */ 408 t_nxrec = dbtofsb(bp->b_blkno) - addr->tmbc; 409 t_blkno = t_nxrec; 410 } else { 411 /* spacing forward */ 412 t_blkno = dbtofsb(bp->b_blkno) + addr->tmbc; 413 t_nxrec = t_blkno - 1; 414 } 415 return; 416 } 417 /* eof on read */ 418 t_nxrec = dbtofsb(bp->b_blkno); 419 } 420 421 tmread(dev) 422 { 423 424 tmphys(dev); 425 physio(tmstrategy, &rtmbuf, dev, B_READ, minphys); 426 } 427 428 tmwrite(dev) 429 { 430 431 tmphys(dev); 432 physio(tmstrategy, &rtmbuf, dev, B_WRITE, minphys); 433 } 434 435 tmphys(dev) 436 { 437 register daddr_t a; 438 439 a = dbtofsb(u.u_offset >> 9); 440 t_blkno = a; 441 t_nxrec = a + 1; 442 } 443 444 /*ARGSUSED*/ 445 tmioctl(dev, cmd, addr, flag) 446 caddr_t addr; 447 dev_t dev; 448 { 449 register callcount; 450 int fcount; 451 struct mtop mtop; 452 struct mtget mtget; 453 /* we depend of the values and order of the MT codes here */ 454 static tmops[] = {WEOF, SFORW, SREV, SFORW, SREV, REW, OFFL, NOP}; 455 456 switch(cmd) { 457 case MTIOCTOP: /* tape operation */ 458 if (copyin((caddr_t)addr, (caddr_t)&mtop, sizeof(mtop))) { 459 u.u_error = EFAULT; 460 return; 461 } 462 switch(mtop.mt_op) { 463 case MTWEOF: case MTFSF: case MTBSF: 464 callcount = mtop.mt_count; 465 fcount = INF; 466 break; 467 case MTFSR: case MTBSR: 468 callcount = 1; 469 fcount = mtop.mt_count; 470 break; 471 case MTREW: case MTOFFL: case MTNOP: 472 callcount = 1; 473 fcount = 1; 474 break; 475 default: 476 u.u_error = ENXIO; 477 return; 478 } 479 if (callcount <= 0 || fcount <= 0) 480 u.u_error = ENXIO; 481 else while (--callcount >= 0) { 482 tcommand(dev, tmops[mtop.mt_op], fcount); 483 if ((mtop.mt_op == MTFSR || mtop.mt_op == MTBSR) && 484 ctmbuf.b_resid) { 485 u.u_error = EIO; 486 break; 487 } 488 if ((ctmbuf.b_flags&B_ERROR) || t_erreg&BOT) 489 break; 490 } 491 geterror(&ctmbuf); 492 return; 493 case MTIOCGET: 494 mtget.mt_dsreg = t_dsreg; 495 mtget.mt_erreg = t_erreg; 496 mtget.mt_resid = t_resid; 497 if (copyout((caddr_t)&mtget, addr, sizeof(mtget))) 498 u.u_error = EFAULT; 499 return; 500 default: 501 u.u_error = ENXIO; 502 } 503 } 504 505 #define DBSIZE 20 506 507 tmdump() 508 { 509 510 tmwall((char *)0, maxfree); /* write out memory */ 511 tmeof(); 512 tmeof(); 513 tmrewind(); 514 tmwait(); 515 } 516 517 tmwall(start, num) 518 int start, num; 519 { 520 register struct uba_dinfo *ui; 521 register struct uba_regs *up; 522 register struct device *addr; 523 int blk, bdp; 524 525 #define phys1(a,b) ((b)((int)(a)&0x7fffffff)) 526 #define phys(a,b) phys1(*phys1(&a, b*), b) 527 if (tminfo[0] == 0) { 528 printf("dna\n"); 529 return (-1); 530 } 531 ui = phys(tminfo[0], struct uba_dinfo *); 532 up = phys(ui->ui_hd, struct uba_hd *)->uh_physuba; 533 #if VAX780 534 if (cpu == VAX_780) 535 ubainit(up); 536 #endif 537 DELAY(1000000); 538 addr = (struct device *)ui->ui_physaddr; 539 tmwait(addr); 540 addr->tmcs = DCLR | GO; 541 while (num > 0) { 542 blk = num > DBSIZE ? DBSIZE : num; 543 tmdwrite(start, blk, addr, up); 544 start += blk; 545 num -= blk; 546 } 547 bdp = 1; /* crud to fool c compiler */ 548 up->uba_dpr[bdp] |= UBA_BNE; 549 return (0); 550 } 551 552 tmdwrite(buf, num, addr, up) 553 register buf, num; 554 register struct device *addr; 555 struct uba_regs *up; 556 { 557 register struct pte *io; 558 register int npf; 559 int bdp; 560 561 tmwait(addr); 562 bdp = 1; /* more dastardly tricks on pcc */ 563 up->uba_dpr[bdp] |= UBA_BNE; 564 io = up->uba_map; 565 npf = num+1; 566 while (--npf != 0) 567 *(int *)io++ = (buf++ | (1<<UBA_DPSHIFT) | UBA_MRV); 568 *(int *)io = 0; 569 addr->tmbc = -(num*NBPG); 570 addr->tmba = 0; 571 addr->tmcs = WCOM | GO; 572 } 573 574 tmwait(addr) 575 register struct device *addr; 576 { 577 register s; 578 579 do 580 s = addr->tmcs; 581 while ((s & CUR) == 0); 582 } 583 584 tmrewind(addr) 585 struct device *addr; 586 { 587 588 tmwait(addr); 589 addr->tmcs = REW | GO; 590 } 591 592 tmeof(addr) 593 struct device *addr; 594 { 595 596 tmwait(addr); 597 addr->tmcs = WEOF | GO; 598 } 599 #endif 600