1 /* tm.c 4.10 02/15/81 */ 2 3 #include "tm.h" 4 #if NTM > 0 5 /* 6 * TM tape driver 7 */ 8 #define DELAY(N) { register int d; d = N; while (--d > 0); } 9 #include "../h/param.h" 10 #include "../h/buf.h" 11 #include "../h/dir.h" 12 #include "../h/conf.h" 13 #include "../h/user.h" 14 #include "../h/file.h" 15 #include "../h/map.h" 16 #include "../h/pte.h" 17 #include "../h/uba.h" 18 #include "../h/mtio.h" 19 #include "../h/ioctl.h" 20 #include "../h/vm.h" 21 #include "../h/cmap.h" 22 #include "../h/cpu.h" 23 24 #include "../h/tmreg.h" 25 26 struct buf tmtab; 27 struct buf ctmbuf; 28 struct buf rtmbuf; 29 30 int tmcntrlr(), tmslave(), tmdgo(), tmintr(); 31 struct uba_dinfo *tminfo[NTM]; 32 extern u_short tmstd[]; 33 struct uba_driver tmdriver = 34 { tmcntrlr, tmslave, tmdgo, 4, 0, tmstd, "tm", tminfo }; 35 int tm_ubinfo; 36 37 /* bits in minor device */ 38 #define T_NOREWIND 04 39 #define T_1600BPI 08 40 41 #define INF (daddr_t)1000000L 42 43 /* 44 * Really only handle one tape drive... if you have more than one, 45 * you can put all these (and some of the above) in a structure, 46 * change the obvious things, and make tmslave smarter, but 47 * it is not clear what happens when some drives are transferring while 48 * others rewind, so we don't pretend that this driver handles multiple 49 * tape drives. 50 */ 51 char t_openf; 52 daddr_t t_blkno; 53 char t_flags; 54 daddr_t t_nxrec; 55 u_short t_erreg; 56 u_short t_dsreg; 57 short t_resid; 58 59 #define SSEEK 1 /* seeking */ 60 #define SIO 2 /* doing seq i/o */ 61 #define SCOM 3 /* sending control command */ 62 63 #define LASTIOW 1 /* last op was a write */ 64 #define WAITREW 2 /* someone is waiting for a rewind */ 65 66 /* 67 * Determine if there is a controller for 68 * a tm at address reg. Our goal is to make the 69 * device interrupt. 70 * THE ARGUMENT UI IS OBSOLETE 71 */ 72 tmcntrlr(ui, reg) 73 struct uba_dinfo *ui; 74 caddr_t reg; 75 { 76 77 ((struct device *)reg)->tmcs = IENABLE; 78 /* 79 * If this is a tm03/tc11, it ought to have interrupted 80 * by now, if it isn't (ie: it is a ts04) then we just 81 * pray that it didn't interrupt, so autoconf will ignore it 82 * - just in case out prayers fail, we will reference one 83 * of the more distant registers, and hope for a machine 84 * check, or similar disaster 85 */ 86 if (badaddr(&((struct device *)reg)->tmrd, 2)) 87 return(0); 88 return(1); 89 } 90 91 tmslave(ui, reg, slaveno) 92 struct uba_dinfo *ui; 93 caddr_t reg; 94 { 95 /* 96 * Due to a design flaw, we cannot ascertain if the tape 97 * exists or not unless it is on line - ie: unless a tape is 98 * mounted. This is too servere a restriction to bear. 99 * As we can only handle one tape, we might just as well insist 100 * that it be slave #0, and just assume that it exists. 101 * Something better will have to be done if you have two 102 * tapes on one controller, or two controllers 103 */ 104 printf("tm: sl %d - tmi %x\n", slaveno, tminfo[0]); 105 if (slaveno != 0 || tminfo[0]) 106 return(0); 107 return(1); 108 } 109 110 tmopen(dev, flag) 111 dev_t dev; 112 int flag; 113 { 114 register ds, unit; 115 register struct uba_dinfo *ui; 116 117 tmtab.b_flags |= B_TAPE; 118 unit = minor(dev)&03; 119 if (unit>=NTM || t_openf || !(ui = tminfo[minor(dev)&03])->ui_alive) { 120 u.u_error = ENXIO; /* out of range or open */ 121 return; 122 } 123 tcommand(dev, NOP, 1); 124 if ((t_erreg&SELR) == 0) { 125 u.u_error = EIO; /* offline */ 126 return; 127 } 128 t_openf = 1; 129 if (t_erreg&RWS) 130 tmwaitrws(dev); /* wait for rewind complete */ 131 while (t_erreg&SDWN) 132 tcommand(dev, NOP, 1); /* await settle down */ 133 if ((t_erreg&TUR)==0 || 134 ((flag&(FREAD|FWRITE)) == FWRITE && (t_erreg&WRL))) { 135 ((struct device *)ui->ui_addr)->tmcs = DCLR|GO; 136 u.u_error = EIO; /* offline or write protect */ 137 } 138 if (u.u_error != 0) { 139 t_openf = 0; 140 return; 141 } 142 t_blkno = (daddr_t)0; 143 t_nxrec = INF; 144 t_flags = 0; 145 t_openf = 1; 146 } 147 148 tmwaitrws(dev) 149 register dev; 150 { 151 register struct device *addr = 152 (struct device *)tminfo[minor(dev)&03]->ui_addr; 153 154 spl5(); 155 for (;;) { 156 if ((addr->tmer&RWS) == 0) { 157 spl0(); /* rewind complete */ 158 return; 159 } 160 t_flags |= WAITREW; 161 sleep((caddr_t)&t_flags, PRIBIO); 162 } 163 } 164 165 tmclose(dev, flag) 166 register dev_t dev; 167 register flag; 168 { 169 170 if (flag == FWRITE || ((flag&FWRITE) && (t_flags&LASTIOW))) { 171 tcommand(dev, WEOF, 1); 172 tcommand(dev, WEOF, 1); 173 tcommand(dev, SREV, 1); 174 } 175 if ((minor(dev)&T_NOREWIND) == 0) 176 tcommand(dev, REW, 1); 177 t_openf = 0; 178 } 179 180 tcommand(dev, com, count) 181 dev_t dev; 182 int com, count; 183 { 184 register struct buf *bp; 185 186 bp = &ctmbuf; 187 (void) spl5(); 188 while (bp->b_flags&B_BUSY) { 189 bp->b_flags |= B_WANTED; 190 sleep((caddr_t)bp, PRIBIO); 191 } 192 bp->b_flags = B_BUSY|B_READ; 193 (void) spl0(); 194 bp->b_dev = dev; 195 bp->b_repcnt = -count; 196 bp->b_command = com; 197 bp->b_blkno = 0; 198 tmstrategy(bp); 199 iowait(bp); 200 if (bp->b_flags&B_WANTED) 201 wakeup((caddr_t)bp); 202 bp->b_flags &= B_ERROR; 203 } 204 205 tmstrategy(bp) 206 register struct buf *bp; 207 { 208 register daddr_t *p; 209 210 tmwaitrws(bp->b_dev); 211 if (bp != &ctmbuf) { 212 p = &t_nxrec; 213 if (dbtofsb(bp->b_blkno) > *p) { 214 bp->b_flags |= B_ERROR; 215 bp->b_error = ENXIO; /* past EOF */ 216 iodone(bp); 217 return; 218 } else if (dbtofsb(bp->b_blkno) == *p && bp->b_flags&B_READ) { 219 bp->b_resid = bp->b_bcount; 220 clrbuf(bp); /* at EOF */ 221 iodone(bp); 222 return; 223 } else if ((bp->b_flags&B_READ) == 0) 224 *p = dbtofsb(bp->b_blkno) + 1; /* write sets EOF */ 225 } 226 bp->av_forw = NULL; 227 (void) spl5(); 228 if (tmtab.b_actf == NULL) 229 tmtab.b_actf = bp; 230 else 231 tmtab.b_actl->av_forw = bp; 232 tmtab.b_actl = bp; 233 if (tmtab.b_active == 0) 234 tmstart(); 235 (void) spl0(); 236 } 237 238 tmstart() 239 { 240 register struct buf *bp; 241 register struct uba_dinfo *ui; 242 register struct device *addr; 243 register cmd; 244 register daddr_t blkno; 245 int s; 246 247 loop: 248 if ((bp = tmtab.b_actf) == 0) 249 return; 250 ui = tminfo[minor(bp->b_dev)&03]; 251 addr = (struct device *)ui->ui_addr; 252 t_dsreg = addr->tmcs; 253 t_erreg = addr->tmer; 254 t_resid = addr->tmbc; 255 t_flags &= ~LASTIOW; 256 if (t_openf < 0 || (addr->tmcs&CUR) == 0) { 257 /* t_openf = -1; ??? */ 258 bp->b_flags |= B_ERROR; /* hard error'ed or !SELR */ 259 goto next; 260 } 261 cmd = IENABLE | GO; 262 if ((minor(bp->b_dev) & T_1600BPI) == 0) 263 cmd |= D800; 264 if (bp == &ctmbuf) { 265 if (bp->b_command == NOP) 266 goto next; /* just get status */ 267 else { 268 cmd |= bp->b_command; 269 tmtab.b_active = SCOM; 270 if (bp->b_command == SFORW || bp->b_command == SREV) 271 addr->tmbc = bp->b_repcnt; 272 addr->tmcs = cmd; 273 return; 274 } 275 } 276 if ((blkno = t_blkno) == dbtofsb(bp->b_blkno)) { 277 addr->tmbc = -bp->b_bcount; 278 s = spl6(); 279 if (tm_ubinfo == 0) 280 tm_ubinfo = ubasetup(ui->ui_ubanum, bp, 1); 281 splx(s); 282 if ((bp->b_flags&B_READ) == 0) { 283 if (tmtab.b_errcnt) 284 cmd |= WIRG; 285 else 286 cmd |= WCOM; 287 } else 288 cmd |= RCOM; 289 cmd |= (tm_ubinfo >> 12) & 0x30; 290 tmtab.b_active = SIO; 291 addr->tmba = tm_ubinfo; 292 addr->tmcs = cmd; 293 return; 294 } 295 tmtab.b_active = SSEEK; 296 if (blkno < dbtofsb(bp->b_blkno)) { 297 cmd |= SFORW; 298 addr->tmbc = blkno - dbtofsb(bp->b_blkno); 299 } else { 300 cmd |= SREV; 301 addr->tmbc = dbtofsb(bp->b_blkno) - blkno; 302 } 303 addr->tmcs = cmd; 304 return; 305 306 next: 307 ubarelse(ui->ui_ubanum, &tm_ubinfo); 308 tmtab.b_actf = bp->av_forw; 309 iodone(bp); 310 goto loop; 311 } 312 313 tmdgo() 314 { 315 } 316 317 tmintr(d) 318 { 319 register struct buf *bp; 320 register struct device *addr = (struct device *)tminfo[d]->ui_addr; 321 register state; 322 323 if (t_flags&WAITREW && (addr->tmer&RWS) == 0) { 324 t_flags &= ~WAITREW; 325 wakeup((caddr_t)&t_flags); 326 } 327 if ((bp = tmtab.b_actf) == NULL) 328 return; 329 t_dsreg = addr->tmcs; 330 t_erreg = addr->tmer; 331 t_resid = addr->tmbc; 332 if ((bp->b_flags & B_READ) == 0) 333 t_flags |= LASTIOW; 334 state = tmtab.b_active; 335 tmtab.b_active = 0; 336 if (addr->tmcs&ERROR) { 337 while(addr->tmer & SDWN) 338 ; /* await settle down */ 339 if (addr->tmer&EOF) { 340 tmseteof(bp); /* set blkno and nxrec */ 341 state = SCOM; 342 addr->tmbc = -bp->b_bcount; 343 goto errout; 344 } 345 if ((bp->b_flags&B_READ) && (addr->tmer&(HARD|SOFT)) == RLE) 346 goto out; 347 if ((addr->tmer&HARD)==0 && state==SIO) { 348 if (++tmtab.b_errcnt < 7) { 349 if((addr->tmer&SOFT) == NXM) 350 printf("TM UBA late error\n"); 351 t_blkno++; 352 ubarelse(tminfo[d]->ui_ubanum, &tm_ubinfo); 353 tmstart(); 354 return; 355 } 356 } else if (t_openf>0 && bp != &rtmbuf) 357 t_openf = -1; 358 deverror(bp, t_erreg, t_dsreg); 359 bp->b_flags |= B_ERROR; 360 state = SIO; 361 } 362 out: 363 switch (state) { 364 365 case SIO: 366 t_blkno++; 367 /* fall into ... */ 368 369 case SCOM: 370 if (bp == &ctmbuf) { 371 switch (bp->b_command) { 372 case SFORW: 373 t_blkno -= bp->b_repcnt; 374 break; 375 376 case SREV: 377 t_blkno += bp->b_repcnt; 378 break; 379 380 default: 381 if (++bp->b_repcnt < 0) { 382 tmstart(); /* continue */ 383 return; 384 } 385 } 386 } 387 errout: 388 tmtab.b_errcnt = 0; 389 tmtab.b_actf = bp->av_forw; 390 bp->b_resid = -addr->tmbc; 391 ubarelse(tminfo[d]->ui_ubanum, &tm_ubinfo); 392 iodone(bp); 393 break; 394 395 case SSEEK: 396 t_blkno = dbtofsb(bp->b_blkno); 397 break; 398 399 default: 400 return; 401 } 402 tmstart(); 403 } 404 405 tmseteof(bp) 406 register struct buf *bp; 407 { 408 register struct device *addr = 409 (struct device *)tminfo[minor(bp->b_dev)&03]->ui_addr; 410 411 if (bp == &ctmbuf) { 412 if (t_blkno > dbtofsb(bp->b_blkno)) { 413 /* reversing */ 414 t_nxrec = dbtofsb(bp->b_blkno) - addr->tmbc; 415 t_blkno = t_nxrec; 416 } else { 417 /* spacing forward */ 418 t_blkno = dbtofsb(bp->b_blkno) + addr->tmbc; 419 t_nxrec = t_blkno - 1; 420 } 421 return; 422 } 423 /* eof on read */ 424 t_nxrec = dbtofsb(bp->b_blkno); 425 } 426 427 tmread(dev) 428 { 429 430 tmphys(dev); 431 physio(tmstrategy, &rtmbuf, dev, B_READ, minphys); 432 } 433 434 tmwrite(dev) 435 { 436 437 tmphys(dev); 438 physio(tmstrategy, &rtmbuf, dev, B_WRITE, minphys); 439 } 440 441 tmphys(dev) 442 { 443 register daddr_t a; 444 445 a = dbtofsb(u.u_offset >> 9); 446 t_blkno = a; 447 t_nxrec = a + 1; 448 } 449 450 /*ARGSUSED*/ 451 tmioctl(dev, cmd, addr, flag) 452 caddr_t addr; 453 dev_t dev; 454 { 455 register callcount; 456 int fcount; 457 struct mtop mtop; 458 struct mtget mtget; 459 /* we depend of the values and order of the MT codes here */ 460 static tmops[] = {WEOF, SFORW, SREV, SFORW, SREV, REW, OFFL, NOP}; 461 462 switch(cmd) { 463 case MTIOCTOP: /* tape operation */ 464 if (copyin((caddr_t)addr, (caddr_t)&mtop, sizeof(mtop))) { 465 u.u_error = EFAULT; 466 return; 467 } 468 switch(mtop.mt_op) { 469 case MTWEOF: case MTFSF: case MTBSF: 470 callcount = mtop.mt_count; 471 fcount = INF; 472 break; 473 case MTFSR: case MTBSR: 474 callcount = 1; 475 fcount = mtop.mt_count; 476 break; 477 case MTREW: case MTOFFL: case MTNOP: 478 callcount = 1; 479 fcount = 1; 480 break; 481 default: 482 u.u_error = ENXIO; 483 return; 484 } 485 if (callcount <= 0 || fcount <= 0) 486 u.u_error = ENXIO; 487 else while (--callcount >= 0) { 488 tcommand(dev, tmops[mtop.mt_op], fcount); 489 if ((mtop.mt_op == MTFSR || mtop.mt_op == MTBSR) && 490 ctmbuf.b_resid) { 491 u.u_error = EIO; 492 break; 493 } 494 if ((ctmbuf.b_flags&B_ERROR) || t_erreg&BOT) 495 break; 496 } 497 geterror(&ctmbuf); 498 return; 499 case MTIOCGET: 500 mtget.mt_dsreg = t_dsreg; 501 mtget.mt_erreg = t_erreg; 502 mtget.mt_resid = t_resid; 503 if (copyout((caddr_t)&mtget, addr, sizeof(mtget))) 504 u.u_error = EFAULT; 505 return; 506 default: 507 u.u_error = ENXIO; 508 } 509 } 510 511 #define DBSIZE 20 512 513 tmdump() 514 { 515 register struct uba_dinfo *ui; 516 register struct uba_regs *up; 517 register struct device *addr; 518 int blk, num; 519 int start; 520 521 start = 0; 522 num = maxfree; 523 #define phys(a,b) ((b)((int)(a)&0x7fffffff)) 524 if (tminfo[0] == 0) { 525 printf("dna\n"); 526 return (-1); 527 } 528 ui = phys(tminfo[0], struct uba_dinfo *); 529 up = phys(ui->ui_hd, struct uba_hd *)->uh_physuba; 530 #if VAX780 531 if (cpu == VAX_780) 532 ubainit(up); 533 #endif 534 DELAY(1000000); 535 addr = (struct device *)ui->ui_physaddr; 536 tmwait(addr); 537 addr->tmcs = DCLR | GO; 538 while (num > 0) { 539 blk = num > DBSIZE ? DBSIZE : num; 540 tmdwrite(start, blk, addr, up); 541 start += blk; 542 num -= blk; 543 } 544 tmwait(addr); 545 tmeof(addr); 546 tmeof(addr); 547 tmrewind(addr); 548 tmwait(addr); 549 return (0); 550 } 551 552 tmdwrite(buf, num, addr, up) 553 register buf, num; 554 register struct device *addr; 555 struct uba_regs *up; 556 { 557 register struct pte *io; 558 register int npf; 559 560 tmwait(addr); 561 io = up->uba_map; 562 npf = num+1; 563 while (--npf != 0) 564 *(int *)io++ = (buf++ | (1<<UBA_DPSHIFT) | UBA_MRV); 565 *(int *)io = 0; 566 addr->tmbc = -(num*NBPG); 567 addr->tmba = 0; 568 addr->tmcs = WCOM | GO; 569 } 570 571 tmwait(addr) 572 register struct device *addr; 573 { 574 register s; 575 576 do 577 s = addr->tmcs; 578 while ((s & CUR) == 0); 579 } 580 581 tmrewind(addr) 582 struct device *addr; 583 { 584 585 tmwait(addr); 586 addr->tmcs = REW | GO; 587 } 588 589 tmeof(addr) 590 struct device *addr; 591 { 592 593 tmwait(addr); 594 addr->tmcs = WEOF | GO; 595 } 596 #endif 597