xref: /csrg-svn/sys/vax/uba/tm.c (revision 2574)
1 /*	tm.c	4.13	02/19/81	*/
2 
3 #include "tm.h"
4 #if NTM03 > 0
5 /*
6  * TM tape driver
7  *
8  * THIS HANDLES ONLY ONE DRIVE ON ONE CONTROLER, AS WE HAVE NO
9  * WAY TO TEST MULTIPLE TRANSPORTS.
10  */
11 #define	DELAY(N)		{ register int d = N; while (--d > 0); }
12 #include "../h/param.h"
13 #include "../h/buf.h"
14 #include "../h/dir.h"
15 #include "../h/conf.h"
16 #include "../h/user.h"
17 #include "../h/file.h"
18 #include "../h/map.h"
19 #include "../h/pte.h"
20 #include "../h/vm.h"
21 #include "../h/uba.h"
22 #include "../h/mtio.h"
23 #include "../h/ioctl.h"
24 #include "../h/cmap.h"
25 #include "../h/cpu.h"
26 
27 #include "../h/tmreg.h"
28 
29 struct	buf	ctmbuf;
30 struct	buf	rtmbuf;
31 
32 int	tmcntrlr(), tmslave(), tmdgo(), tmintr();
33 struct	uba_minfo *tmminfo[NTM03];
34 struct	uba_dinfo *tmdinfo[NTM11];
35 u_short	tmstd[] = { 0772520, 0 };
36 struct	uba_driver tmdriver =
37 	{ tmcntrlr, tmslave, tmdgo, 0, tmstd, "mt", tmdinfo, "tm", tmminfo };
38 
39 /* bits in minor device */
40 #define	T_NOREWIND	04
41 #define	T_1600BPI	08
42 
43 #define	INF	(daddr_t)1000000L
44 
45 struct	tm_softc {
46 	char	sc_openf;
47 	char	sc_flags;
48 	daddr_t	sc_blkno;
49 	daddr_t	sc_nxrec;
50 	u_short	sc_erreg;
51 	u_short	sc_dsreg;
52 	short	sc_resid;
53 } tm_softc[NTM03];
54 
55 #define	SSEEK	1		/* seeking */
56 #define	SIO	2		/* doing seq i/o */
57 #define	SCOM	3		/* sending control command */
58 
59 #define	LASTIOW 1		/* last op was a write */
60 #define	WAITREW	2		/* someone is waiting for a rewind */
61 
62 /*
63  * Determine if there is a controller for
64  * a tm at address reg.  Our goal is to make the
65  * device interrupt.
66  */
67 tmcntrlr(um, reg)
68 	struct uba_minfo *um;
69 	caddr_t reg;
70 {
71 	register int br, cvec;
72 
73 	((struct device *)reg)->tmcs = IENABLE;
74 	/*
75 	 * If this is a tm03/tc11, it ought to have interrupted
76 	 * by now, if it isn't (ie: it is a ts04) then we just
77 	 * hope that it didn't interrupt, so autoconf will ignore it.
78 	 * Just in case, we will reference one
79 	 * of the more distant registers, and hope for a machine
80 	 * check, or similar disaster if this is a ts.
81 	 *
82 	 * Note: on an 11/780, badaddr will just generate
83 	 * a uba error for a ts; but our caller will notice that
84 	 * so we won't check for it.
85 	 */
86 	if (badaddr(&((struct device *)reg)->tmrd, 2))
87 		return (0);
88 	return (1);
89 }
90 
91 tmslave(ui, reg)
92 	struct uba_dinfo *ui;
93 	caddr_t reg;
94 {
95 
96 	/*
97 	 * Due to a design flaw, we cannot ascertain if the tape
98 	 * exists or not unless it is on line - ie: unless a tape is
99 	 * mounted. This is too servere a restriction to bear.
100 	 * As we can only handle one tape, we might just as well insist
101 	 * that it be slave #0, and just assume that it exists.
102 	 * Something better will have to be done if you have two
103 	 * tapes on one controller, or two controllers
104 	 */
105 	if (ui->ui_slave != 0 || tmdinfo[0])
106 		return(0);
107 	return (1);
108 }
109 
110 tmopen(dev, flag)
111 	dev_t dev;
112 	int flag;
113 {
114 	register ds, unit;
115 	register struct uba_dinfo *ui;
116 	register struct tm_softc *sc = &tm_softc[0];
117 
118 	tmminfo[0]->um_tab.b_flags |= B_TAPE;
119 	unit = minor(dev)&03;
120 	if (unit>=NTM11 || sc->sc_openf || (ui = tmdinfo[0]) == 0 || ui->ui_alive==0) {
121 		u.u_error = ENXIO;		/* out of range or open */
122 		return;
123 	}
124 	tmcommand(dev, NOP, 1);
125 	if ((sc->sc_erreg&SELR) == 0) {
126 		u.u_error = EIO;
127 		goto eio;
128 	}
129 	sc->sc_openf = 1;
130 	if (sc->sc_erreg&RWS)
131 		tmwaitrws(dev);			/* wait for rewind complete */
132 	while (sc->sc_erreg&SDWN)
133 		tmcommand(dev, NOP, 1);		/* await settle down */
134 	if ((sc->sc_erreg&TUR)==0 ||
135 	    ((flag&(FREAD|FWRITE)) == FWRITE && (sc->sc_erreg&WRL))) {
136 		((struct device *)ui->ui_addr)->tmcs = DCLR|GO;
137 		u.u_error = EIO;		/* offline or write protect */
138 	}
139 	if (u.u_error != 0) {
140 		sc->sc_openf = 0;
141 		if (u.u_error == EIO)
142 eio:
143 			uprintf("tape offline or protected\n");
144 		return;
145 	}
146 	sc->sc_blkno = (daddr_t)0;
147 	sc->sc_nxrec = INF;
148 	sc->sc_flags = 0;
149 	sc->sc_openf = 1;
150 }
151 
152 tmwaitrws(dev)
153 	register dev;
154 {
155 	register struct device *addr =
156 	    (struct device *)tmdinfo[0]->ui_addr;
157 	register struct tm_softc *sc = &tm_softc[0];
158 
159 	spl5();
160 	for (;;) {
161 		if ((addr->tmer&RWS) == 0) {
162 			spl0();		/* rewind complete */
163 			return;
164 		}
165 		sc->sc_flags |= WAITREW;
166 		sleep((caddr_t)&sc->sc_flags, PRIBIO);
167 	}
168 }
169 
170 tmclose(dev, flag)
171 	register dev_t dev;
172 	register flag;
173 {
174 	register struct tm_softc *sc = &tm_softc[0];
175 
176 	if (flag == FWRITE || ((flag&FWRITE) && (sc->sc_flags&LASTIOW))) {
177 		tmcommand(dev, WEOF, 1);
178 		tmcommand(dev, WEOF, 1);
179 		tmcommand(dev, SREV, 1);
180 	}
181 	if ((minor(dev)&T_NOREWIND) == 0)
182 		tmcommand(dev, REW, 1);
183 	sc->sc_openf = 0;
184 }
185 
186 tmcommand(dev, com, count)
187 	dev_t dev;
188 	int com, count;
189 {
190 	register struct buf *bp;
191 
192 	bp = &ctmbuf;
193 	(void) spl5();
194 	while (bp->b_flags&B_BUSY) {
195 		bp->b_flags |= B_WANTED;
196 		sleep((caddr_t)bp, PRIBIO);
197 	}
198 	bp->b_flags = B_BUSY|B_READ;
199 	(void) spl0();
200 	bp->b_dev = dev;
201 	bp->b_repcnt = -count;
202 	bp->b_command = com;
203 	bp->b_blkno = 0;
204 	tmstrategy(bp);
205 	iowait(bp);
206 	if (bp->b_flags&B_WANTED)
207 		wakeup((caddr_t)bp);
208 	bp->b_flags &= B_ERROR;
209 }
210 
211 tmstrategy(bp)
212 	register struct buf *bp;
213 {
214 	register daddr_t *p;
215 	register struct buf *tmi;
216 
217 	tmwaitrws(bp->b_dev);
218 	if (bp != &ctmbuf) {
219 		p = &tm_softc[0].sc_nxrec;
220 		if (dbtofsb(bp->b_blkno) > *p) {
221 			bp->b_flags |= B_ERROR;
222 			bp->b_error = ENXIO;		/* past EOF */
223 			iodone(bp);
224 			return;
225 		} else if (dbtofsb(bp->b_blkno) == *p && bp->b_flags&B_READ) {
226 			bp->b_resid = bp->b_bcount;
227 			clrbuf(bp);			/* at EOF */
228 			iodone(bp);
229 			return;
230 		} else if ((bp->b_flags&B_READ) == 0)
231 			*p = dbtofsb(bp->b_blkno) + 1;	/* write sets EOF */
232 	}
233 	bp->av_forw = NULL;
234 	(void) spl5();
235 	tmi = &tmminfo[0]->um_tab;
236 	if (tmi->b_actf == NULL)
237 		tmi->b_actf = bp;
238 	else
239 		tmi->b_actl->av_forw = bp;
240 	tmi->b_actl = bp;
241 	if (tmi->b_active == 0)
242 		tmstart();
243 	(void) spl0();
244 }
245 
246 tmstart()
247 {
248 	register struct buf *bp;
249 	register struct uba_minfo *um = tmminfo[0];
250 	register struct uba_dinfo *ui;
251 	register struct device *addr;
252 	register struct tm_softc *sc = &tm_softc[0];
253 	int cmd, s;
254 	daddr_t blkno;
255 
256 loop:
257 	if ((bp = um->um_tab.b_actf) == 0)
258 		return;
259 	ui = tmdinfo[0];
260 	addr = (struct device *)ui->ui_addr;
261 	sc->sc_dsreg = addr->tmcs;
262 	sc->sc_erreg = addr->tmer;
263 	sc->sc_resid = addr->tmbc;
264 	sc->sc_flags &= ~LASTIOW;
265 	if (sc->sc_openf < 0 || (addr->tmcs&CUR) == 0) {
266 		/* sc->sc_openf = -1; ??? */
267 		bp->b_flags |= B_ERROR;		/* hard error'ed or !SELR */
268 		goto next;
269 	}
270 	cmd = IENABLE | GO;
271 	if ((minor(bp->b_dev) & T_1600BPI) == 0)
272 		cmd |= D800;
273 	if (bp == &ctmbuf) {
274 		if (bp->b_command == NOP)
275 			goto next;		/* just get status */
276 		else {
277 			cmd |= bp->b_command;
278 			um->um_tab.b_active = SCOM;
279 			if (bp->b_command == SFORW || bp->b_command == SREV)
280 				addr->tmbc = bp->b_repcnt;
281 			addr->tmcs = cmd;
282 			return;
283 		}
284 	}
285 	if ((blkno = sc->sc_blkno) == dbtofsb(bp->b_blkno)) {
286 		addr->tmbc = -bp->b_bcount;
287 		if ((bp->b_flags&B_READ) == 0) {
288 			if (um->um_tab.b_errcnt)
289 				cmd |= WIRG;
290 			else
291 				cmd |= WCOM;
292 		} else
293 			cmd |= RCOM;
294 		um->um_tab.b_active = SIO;
295 		if (um->um_ubinfo)
296 			panic("tmstart");
297 		um->um_cmd = cmd;
298 		ubago(ui);
299 		splx(s);
300 		return;
301 	}
302 	um->um_tab.b_active = SSEEK;
303 	if (blkno < dbtofsb(bp->b_blkno)) {
304 		cmd |= SFORW;
305 		addr->tmbc = blkno - dbtofsb(bp->b_blkno);
306 	} else {
307 		cmd |= SREV;
308 		addr->tmbc = dbtofsb(bp->b_blkno) - blkno;
309 	}
310 	addr->tmcs = cmd;
311 	return;
312 
313 next:
314 	ubarelse(um->um_ubanum, &um->um_ubinfo);
315 	um->um_tab.b_actf = bp->av_forw;
316 	iodone(bp);
317 	goto loop;
318 }
319 
320 tmdgo(um)
321 	register struct uba_minfo *um;
322 {
323 	register struct device *addr = (struct device *)um->um_addr;
324 
325 	printf("tmdgo %x %x\n", um->um_ubinfo, um->um_cmd);
326 	addr->tmba = um->um_ubinfo;
327 	addr->tmcs = um->um_cmd | ((um->um_ubinfo >> 12) & 0x30);
328 }
329 
330 /*ARGSUSED*/
331 tmintr(d)
332 	int d;
333 {
334 	register struct buf *bp;
335 	register struct uba_minfo *um = tmminfo[0];
336 	register struct device *addr = (struct device *)tmdinfo[0]->ui_addr;
337 	register struct tm_softc *sc = &tm_softc[0];
338 	register state;
339 
340 	printf("tmintr %x %x\n", um->um_tab.b_actf, um->um_tab.b_active);
341 	if (sc->sc_flags&WAITREW && (addr->tmer&RWS) == 0) {
342 		sc->sc_flags &= ~WAITREW;
343 		wakeup((caddr_t)&sc->sc_flags);
344 	}
345 	if ((bp = um->um_tab.b_actf) == NULL)
346 		return;
347 	sc->sc_dsreg = addr->tmcs;
348 	sc->sc_erreg = addr->tmer;
349 	sc->sc_resid = addr->tmbc;
350 	if ((bp->b_flags & B_READ) == 0)
351 		sc->sc_flags |= LASTIOW;
352 	state = um->um_tab.b_active;
353 	um->um_tab.b_active = 0;
354 	if (addr->tmcs&ERROR) {
355 		while(addr->tmer & SDWN)
356 			;			/* await settle down */
357 		if (addr->tmer&EOF) {
358 			tmseteof(bp);	/* set blkno and nxrec */
359 			state = SCOM;
360 			addr->tmbc = -bp->b_bcount;
361 			goto errout;
362 		}
363 		if ((bp->b_flags&B_READ) && (addr->tmer&(HARD|SOFT)) == RLE)
364 			goto out;
365 		if ((addr->tmer&HARD)==0 && state==SIO) {
366 			if (++um->um_tab.b_errcnt < 7) {
367 				if((addr->tmer&SOFT) == NXM)
368 					printf("TM UBA late error\n");
369 				sc->sc_blkno++;
370 				ubarelse(um->um_ubanum, &um->um_ubinfo);
371 				tmstart();
372 				return;
373 			}
374 		} else if (sc->sc_openf>0 && bp != &rtmbuf)
375 			sc->sc_openf = -1;
376 		deverror(bp, sc->sc_erreg, sc->sc_dsreg);
377 		bp->b_flags |= B_ERROR;
378 		state = SIO;
379 	}
380 out:
381 	switch (state) {
382 
383 	case SIO:
384 		sc->sc_blkno++;
385 		/* fall into ... */
386 
387 	case SCOM:
388 		if (bp == &ctmbuf) {
389 			switch (bp->b_command) {
390 			case SFORW:
391 				sc->sc_blkno -= bp->b_repcnt;
392 				break;
393 
394 			case SREV:
395 				sc->sc_blkno += bp->b_repcnt;
396 				break;
397 
398 			default:
399 				if (++bp->b_repcnt < 0) {
400 					tmstart();	/* continue */
401 					return;
402 				}
403 			}
404 		}
405 errout:
406 		um->um_tab.b_errcnt = 0;
407 		um->um_tab.b_actf = bp->av_forw;
408 		bp->b_resid = -addr->tmbc;
409 		ubarelse(um->um_ubanum, &um->um_ubinfo);
410 		iodone(bp);
411 		break;
412 
413 	case SSEEK:
414 		sc->sc_blkno = dbtofsb(bp->b_blkno);
415 		break;
416 
417 	default:
418 		return;
419 	}
420 	tmstart();
421 }
422 
423 tmseteof(bp)
424 	register struct buf *bp;
425 {
426 	register struct device *addr =
427 	    (struct device *)tmdinfo[0]->ui_addr;
428 	register struct tm_softc *sc = &tm_softc[0];
429 
430 	if (bp == &ctmbuf) {
431 		if (sc->sc_blkno > dbtofsb(bp->b_blkno)) {
432 			/* reversing */
433 			sc->sc_nxrec = dbtofsb(bp->b_blkno) - addr->tmbc;
434 			sc->sc_blkno = sc->sc_nxrec;
435 		} else {
436 			/* spacing forward */
437 			sc->sc_blkno = dbtofsb(bp->b_blkno) + addr->tmbc;
438 			sc->sc_nxrec = sc->sc_blkno - 1;
439 		}
440 		return;
441 	}
442 	/* eof on read */
443 	sc->sc_nxrec = dbtofsb(bp->b_blkno);
444 }
445 
446 tmread(dev)
447 {
448 
449 	tmphys(dev);
450 	physio(tmstrategy, &rtmbuf, dev, B_READ, minphys);
451 }
452 
453 tmwrite(dev)
454 {
455 
456 	tmphys(dev);
457 	physio(tmstrategy, &rtmbuf, dev, B_WRITE, minphys);
458 }
459 
460 tmphys(dev)
461 {
462 	register daddr_t a;
463 	register struct tm_softc *sc = &tm_softc[0];
464 
465 	a = dbtofsb(u.u_offset >> 9);
466 	sc->sc_blkno = a;
467 	sc->sc_nxrec = a + 1;
468 }
469 
470 /*ARGSUSED*/
471 tmioctl(dev, cmd, addr, flag)
472 	caddr_t addr;
473 	dev_t dev;
474 {
475 	register callcount;
476 	register struct tm_softc *sc = &tm_softc[0];
477 	int fcount;
478 	struct mtop mtop;
479 	struct mtget mtget;
480 	/* we depend of the values and order of the MT codes here */
481 	static tmops[] = {WEOF, SFORW, SREV, SFORW, SREV, REW, OFFL, NOP};
482 
483 	switch(cmd) {
484 		case MTIOCTOP:	/* tape operation */
485 		if (copyin((caddr_t)addr, (caddr_t)&mtop, sizeof(mtop))) {
486 			u.u_error = EFAULT;
487 			return;
488 		}
489 		switch(mtop.mt_op) {
490 		case MTWEOF: case MTFSF: case MTBSF:
491 			callcount = mtop.mt_count;
492 			fcount = INF;
493 			break;
494 		case MTFSR: case MTBSR:
495 			callcount = 1;
496 			fcount = mtop.mt_count;
497 			break;
498 		case MTREW: case MTOFFL: case MTNOP:
499 			callcount = 1;
500 			fcount = 1;
501 			break;
502 		default:
503 			u.u_error = ENXIO;
504 			return;
505 		}
506 		if (callcount <= 0 || fcount <= 0)
507 			u.u_error = ENXIO;
508 		else while (--callcount >= 0) {
509 			tmcommand(dev, tmops[mtop.mt_op], fcount);
510 			if ((mtop.mt_op == MTFSR || mtop.mt_op == MTBSR) &&
511 			    ctmbuf.b_resid) {
512 				u.u_error = EIO;
513 				break;
514 			}
515 			if ((ctmbuf.b_flags&B_ERROR) ||
516 			    sc->sc_erreg&BOT)
517 				break;
518 		}
519 		geterror(&ctmbuf);
520 		return;
521 	case MTIOCGET:
522 		mtget.mt_dsreg = sc->sc_dsreg;
523 		mtget.mt_erreg = sc->sc_erreg;
524 		mtget.mt_resid = sc->sc_resid;
525 		if (copyout((caddr_t)&mtget, addr, sizeof(mtget)))
526 			u.u_error = EFAULT;
527 		return;
528 	default:
529 		u.u_error = ENXIO;
530 	}
531 }
532 
533 #define	DBSIZE	20
534 
535 tmdump()
536 {
537 	register struct uba_dinfo *ui;
538 	register struct uba_regs *up;
539 	register struct device *addr;
540 	int blk, num;
541 	int start;
542 
543 	start = 0;
544 	num = maxfree;
545 #define	phys(a,b)	((b)((int)(a)&0x7fffffff))
546 	if (tmdinfo[0] == 0) {
547 		printf("dna\n");
548 		return (-1);
549 	}
550 	ui = phys(tmdinfo[0], struct uba_dinfo *);
551 	up = phys(ui->ui_hd, struct uba_hd *)->uh_physuba;
552 #if VAX780
553 	if (cpu == VAX_780)
554 		ubainit(up);
555 #endif
556 	DELAY(1000000);
557 	addr = (struct device *)ui->ui_physaddr;
558 	tmwait(addr);
559 	addr->tmcs = DCLR | GO;
560 	while (num > 0) {
561 		blk = num > DBSIZE ? DBSIZE : num;
562 		tmdwrite(start, blk, addr, up);
563 		start += blk;
564 		num -= blk;
565 	}
566 	tmeof(addr);
567 	tmeof(addr);
568 	tmwait(addr);
569 	addr->tmcs = REW | GO;
570 	tmwait(addr);
571 	return (0);
572 }
573 
574 tmdwrite(buf, num, addr, up)
575 	register buf, num;
576 	register struct device *addr;
577 	struct uba_regs *up;
578 {
579 	register struct pte *io;
580 	register int npf;
581 
582 	tmwait(addr);
583 	io = up->uba_map;
584 	npf = num+1;
585 	while (--npf != 0)
586 		 *(int *)io++ = (buf++ | (1<<UBA_DPSHIFT) | UBA_MRV);
587 	*(int *)io = 0;
588 	addr->tmbc = -(num*NBPG);
589 	addr->tmba = 0;
590 	addr->tmcs = WCOM | GO;
591 }
592 
593 tmwait(addr)
594 	register struct device *addr;
595 {
596 	register s;
597 
598 	do
599 		s = addr->tmcs;
600 	while ((s & CUR) == 0);
601 }
602 
603 tmeof(addr)
604 	struct device *addr;
605 {
606 
607 	tmwait(addr);
608 	addr->tmcs = WEOF | GO;
609 }
610 #endif
611