xref: /csrg-svn/sys/vax/if/if_dmc.c (revision 23286)
1 /*
2  * Copyright (c) 1982 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)if_dmc.c	6.6 (Berkeley) 06/08/85
7  */
8 
9 #include "dmc.h"
10 #if NDMC > 0
11 
12 /*
13  * DMC11 device driver, internet version
14  *
15  *	Bill Nesheim
16  *	Cornell University
17  *
18  *	Lou Salkind
19  *	New York University
20  */
21 
22 /* #define DEBUG	/* for base table dump on fatal error */
23 
24 #include "../machine/pte.h"
25 
26 #include "param.h"
27 #include "systm.h"
28 #include "mbuf.h"
29 #include "buf.h"
30 #include "ioctl.h"		/* must precede tty.h */
31 #include "tty.h"
32 #include "protosw.h"
33 #include "socket.h"
34 #include "vmmac.h"
35 #include "errno.h"
36 
37 #include "../net/if.h"
38 #include "../net/netisr.h"
39 #include "../net/route.h"
40 #include "../netinet/in.h"
41 #include "../netinet/in_systm.h"
42 #include "../netinet/ip.h"
43 #include "../netinet/ip_var.h"
44 
45 #include "../vax/cpu.h"
46 #include "../vax/mtpr.h"
47 #include "if_uba.h"
48 #include "if_dmc.h"
49 #include "../vaxuba/ubareg.h"
50 #include "../vaxuba/ubavar.h"
51 
52 #include "../h/time.h"
53 #include "../h/kernel.h"
54 
55 int	dmctimer;			/* timer started? */
56 int	dmc_timeout = 8;		/* timeout value */
57 int	dmcwatch();
58 
59 /*
60  * Driver information for auto-configuration stuff.
61  */
62 int	dmcprobe(), dmcattach(), dmcinit(), dmcioctl();
63 int	dmcoutput(), dmcreset();
64 struct	uba_device *dmcinfo[NDMC];
65 u_short	dmcstd[] = { 0 };
66 struct	uba_driver dmcdriver =
67 	{ dmcprobe, 0, dmcattach, 0, dmcstd, "dmc", dmcinfo };
68 
69 #define NRCV 7
70 #define NXMT 3
71 #define NTOT (NRCV + NXMT)
72 #define NCMDS	(NTOT+4)	/* size of command queue */
73 
74 #define printd if(dmcdebug)printf
75 int dmcdebug = 0;
76 
77 /* error reporting intervals */
78 #define DMC_RPNBFS	50
79 #define DMC_RPDSC	1
80 #define DMC_RPTMO	10
81 #define DMC_RPDCK	10
82 
83 struct  dmc_command {
84 	char	qp_cmd;		/* command */
85 	short	qp_ubaddr;	/* buffer address */
86 	short	qp_cc;		/* character count || XMEM */
87 	struct	dmc_command *qp_next;	/* next command on queue */
88 };
89 
90 /*
91  * The dmcuba structures generalize the ifuba structure
92  * to an arbitrary number of receive and transmit buffers.
93  */
94 struct	ifxmt {
95 	struct	ifrw x_ifrw;		/* mapping info */
96 	struct	pte x_map[IF_MAXNUBAMR];	/* output base pages */
97 	short 	x_xswapd;		/* mask of clusters swapped */
98 	struct	mbuf *x_xtofree;	/* pages being dma'd out */
99 };
100 
101 struct	dmcuba {
102 	short	ifu_uban;		/* uba number */
103 	short	ifu_hlen;		/* local net header length */
104 	struct	uba_regs *ifu_uba;	/* uba regs, in vm */
105 	struct	ifrw ifu_r[NRCV];	/* receive information */
106 	struct	ifxmt ifu_w[NXMT];	/* transmit information */
107 				/* these should only be pointers */
108 	short	ifu_flags;		/* used during uballoc's */
109 };
110 
111 struct dmcbufs {
112 	int	ubinfo;		/* from uballoc */
113 	short	cc;		/* buffer size */
114 	short	flags;		/* access control */
115 };
116 #define	DBUF_OURS	0	/* buffer is available */
117 #define	DBUF_DMCS	1	/* buffer claimed by somebody */
118 #define	DBUF_XMIT	4	/* transmit buffer */
119 #define	DBUF_RCV	8	/* receive buffer */
120 
121 struct mbuf *dmc_get();
122 
123 /*
124  * DMC software status per interface.
125  *
126  * Each interface is referenced by a network interface structure,
127  * sc_if, which the routing code uses to locate the interface.
128  * This structure contains the output queue for the interface, its address, ...
129  * We also have, for each interface, a  set of 7 UBA interface structures
130  * for each, which
131  * contain information about the UNIBUS resources held by the interface:
132  * map registers, buffered data paths, etc.  Information is cached in this
133  * structure for use by the if_uba.c routines in running the interface
134  * efficiently.
135  */
136 struct dmc_softc {
137 	short	sc_oused;		/* output buffers currently in use */
138 	short	sc_iused;		/* input buffers given to DMC */
139 	short	sc_flag;		/* flags */
140 	int	sc_nticks;		/* seconds since last interrupt */
141 	struct	ifnet sc_if;		/* network-visible interface */
142 	struct	dmcbufs sc_rbufs[NRCV];	/* receive buffer info */
143 	struct	dmcbufs sc_xbufs[NXMT];	/* transmit buffer info */
144 	struct	dmcuba sc_ifuba;	/* UNIBUS resources */
145 	int	sc_ubinfo;		/* UBA mapping info for base table */
146 	int	sc_errors[4];		/* non-fatal error counters */
147 #define sc_datck sc_errors[0]
148 #define sc_timeo sc_errors[1]
149 #define sc_nobuf sc_errors[2]
150 #define sc_disc  sc_errors[3]
151 	/* command queue stuff */
152 	struct	dmc_command sc_cmdbuf[NCMDS];
153 	struct	dmc_command *sc_qhead;	/* head of command queue */
154 	struct	dmc_command *sc_qtail;	/* tail of command queue */
155 	struct	dmc_command *sc_qactive;	/* command in progress */
156 	struct	dmc_command *sc_qfreeh;	/* head of list of free cmd buffers */
157 	struct	dmc_command *sc_qfreet;	/* tail of list of free cmd buffers */
158 	/* end command queue stuff */
159 } dmc_softc[NDMC];
160 
161 /* flags */
162 #define DMC_ALLOC	01		/* unibus resources allocated */
163 #define DMC_BMAPPED	02		/* base table mapped */
164 #define DMC_RESTART	04		/* software restart in progress */
165 #define DMC_ACTIVE	08		/* device active */
166 
167 struct dmc_base {
168 	short	d_base[128];		/* DMC base table */
169 } dmc_base[NDMC];
170 
171 /* queue manipulation macros */
172 #define	QUEUE_AT_HEAD(qp, head, tail) \
173 	(qp)->qp_next = (head); \
174 	(head) = (qp); \
175 	if ((tail) == (struct dmc_command *) 0) \
176 		(tail) = (head)
177 
178 #define QUEUE_AT_TAIL(qp, head, tail) \
179 	if ((tail)) \
180 		(tail)->qp_next = (qp); \
181 	else \
182 		(head) = (qp); \
183 	(qp)->qp_next = (struct dmc_command *) 0; \
184 	(tail) = (qp)
185 
186 #define DEQUEUE(head, tail) \
187 	(head) = (head)->qp_next;\
188 	if ((head) == (struct dmc_command *) 0)\
189 		(tail) = (head)
190 
191 dmcprobe(reg)
192 	caddr_t reg;
193 {
194 	register int br, cvec;
195 	register struct dmcdevice *addr = (struct dmcdevice *)reg;
196 	register int i;
197 
198 #ifdef lint
199 	br = 0; cvec = br; br = cvec;
200 	dmcrint(0); dmcxint(0);
201 #endif
202 	addr->bsel1 = DMC_MCLR;
203 	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
204 		;
205 	if ((addr->bsel1 & DMC_RUN) == 0) {
206 		printf("dmcprobe: can't start device\n" );
207 		return (0);
208 	}
209 	addr->bsel0 = DMC_RQI|DMC_IEI;
210 	/* let's be paranoid */
211 	addr->bsel0 |= DMC_RQI|DMC_IEI;
212 	DELAY(1000000);
213 	addr->bsel1 = DMC_MCLR;
214 	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
215 		;
216 	return (1);
217 }
218 
219 /*
220  * Interface exists: make available by filling in network interface
221  * record.  System will initialize the interface when it is ready
222  * to accept packets.
223  */
224 dmcattach(ui)
225 	register struct uba_device *ui;
226 {
227 	register struct dmc_softc *sc = &dmc_softc[ui->ui_unit];
228 
229 	sc->sc_if.if_unit = ui->ui_unit;
230 	sc->sc_if.if_name = "dmc";
231 	sc->sc_if.if_mtu = DMCMTU;
232 	sc->sc_if.if_init = dmcinit;
233 	sc->sc_if.if_output = dmcoutput;
234 	sc->sc_if.if_ioctl = dmcioctl;
235 	sc->sc_if.if_reset = dmcreset;
236 	sc->sc_if.if_flags = IFF_POINTOPOINT;
237 	sc->sc_ifuba.ifu_flags = UBA_CANTWAIT;
238 
239 	if_attach(&sc->sc_if);
240 	if (dmctimer == 0) {
241 		dmctimer = 1;
242 		timeout(dmcwatch, (caddr_t) 0, hz);
243 	}
244 }
245 
246 /*
247  * Reset of interface after UNIBUS reset.
248  * If interface is on specified UBA, reset its state.
249  */
250 dmcreset(unit, uban)
251 	int unit, uban;
252 {
253 	register struct uba_device *ui;
254 	register struct dmc_softc *sc = &dmc_softc[unit];
255 
256 	if (unit >= NDMC || (ui = dmcinfo[unit]) == 0 || ui->ui_alive == 0 ||
257 	    ui->ui_ubanum != uban)
258 		return;
259 	printf(" dmc%d", unit);
260 	sc->sc_flag = 0;
261 	sc->sc_if.if_flags &= ~IFF_RUNNING;
262 	dmcinit(unit);
263 }
264 
265 /*
266  * Initialization of interface; reinitialize UNIBUS usage.
267  */
268 dmcinit(unit)
269 	int unit;
270 {
271 	register struct dmc_softc *sc = &dmc_softc[unit];
272 	register struct uba_device *ui = dmcinfo[unit];
273 	register struct dmcdevice *addr;
274 	register struct ifnet *ifp = &sc->sc_if;
275 	register struct ifrw *ifrw;
276 	register struct ifxmt *ifxp;
277 	register struct dmcbufs *rp;
278 	register struct dmc_command *qp;
279 	struct ifaddr *ifa;
280 	int base;
281 	int s;
282 
283 	addr = (struct dmcdevice *)ui->ui_addr;
284 
285 	/*
286 	 * Check to see that an address has been set
287 	 * (both local and destination for an address family).
288 	 */
289 	for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next)
290 		if (ifa->ifa_addr.sa_family && ifa->ifa_dstaddr.sa_family)
291 			break;
292 	if (ifa == (struct ifaddr *) 0)
293 		return;
294 
295 	if ((addr->bsel1&DMC_RUN) == 0) {
296 		printf("dmcinit: DMC not running\n");
297 		ifp->if_flags &= ~IFF_UP;
298 		return;
299 	}
300 	/* map base table */
301 	if ((sc->sc_flag & DMC_BMAPPED) == 0) {
302 		sc->sc_ubinfo = uballoc(ui->ui_ubanum,
303 			(caddr_t)&dmc_base[unit], sizeof (struct dmc_base), 0);
304 		sc->sc_flag |= DMC_BMAPPED;
305 	}
306 	/* initialize UNIBUS resources */
307 	sc->sc_iused = sc->sc_oused = 0;
308 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
309 		if (dmc_ubainit(&sc->sc_ifuba, ui->ui_ubanum,
310 		    sizeof(struct dmc_header), (int)btoc(DMCMTU)) == 0) {
311 			printf("dmc%d: can't allocate uba resources\n", unit);
312 			ifp->if_flags &= ~IFF_UP;
313 			return;
314 		}
315 		ifp->if_flags |= IFF_RUNNING;
316 	}
317 
318 	/* initialize buffer pool */
319 	/* receives */
320 	ifrw = &sc->sc_ifuba.ifu_r[0];
321 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
322 		rp->ubinfo = ifrw->ifrw_info & 0x3ffff;
323 		rp->cc = DMCMTU + sizeof (struct dmc_header);
324 		rp->flags = DBUF_OURS|DBUF_RCV;
325 		ifrw++;
326 	}
327 	/* transmits */
328 	ifxp = &sc->sc_ifuba.ifu_w[0];
329 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
330 		rp->ubinfo = ifxp->x_ifrw.ifrw_info & 0x3ffff;
331 		rp->cc = 0;
332 		rp->flags = DBUF_OURS|DBUF_XMIT;
333 		ifxp++;
334 	}
335 
336 	/* set up command queues */
337 	sc->sc_qfreeh = sc->sc_qfreet
338 		 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
339 		(struct dmc_command *)0;
340 	/* set up free command buffer list */
341 	for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
342 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
343 	}
344 
345 	/* base in */
346 	base = sc->sc_ubinfo & 0x3ffff;
347 	dmcload(sc, DMC_BASEI, base, (base>>2) & DMC_XMEM);
348 	/* specify half duplex operation, flags tell if primary */
349 	/* or secondary station */
350 	if (ui->ui_flags == 0)
351 		/* use DDMCP mode in full duplex */
352 		dmcload(sc, DMC_CNTLI, 0, 0);
353 	else if (ui->ui_flags == 1)
354 		/* use MAINTENENCE mode */
355 		dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
356 	else if (ui->ui_flags == 2)
357 		/* use DDCMP half duplex as primary station */
358 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
359 	else if (ui->ui_flags == 3)
360 		/* use DDCMP half duplex as secondary station */
361 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
362 
363 	/* enable operation done interrupts */
364 	sc->sc_flag &= ~DMC_ACTIVE;
365 	while ((addr->bsel2 & DMC_IEO) == 0)
366 		addr->bsel2 |= DMC_IEO;
367 	s = spl5();
368 	/* queue first NRCV buffers for DMC to fill */
369 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
370 		rp->flags |= DBUF_DMCS;
371 		dmcload(sc, DMC_READ, rp->ubinfo,
372 			(((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
373 		sc->sc_iused++;
374 	}
375 	splx(s);
376 }
377 
378 /*
379  * Start output on interface.  Get another datagram
380  * to send from the interface queue and map it to
381  * the interface before starting output.
382  *
383  * Must be called at spl 5
384  */
385 dmcstart(dev)
386 	dev_t dev;
387 {
388 	int unit = minor(dev);
389 	register struct dmc_softc *sc = &dmc_softc[unit];
390 	struct mbuf *m;
391 	register struct dmcbufs *rp;
392 	register int n;
393 
394 	/*
395 	 * Dequeue up to NXMT requests and map them to the UNIBUS.
396 	 * If no more requests, or no dmc buffers available, just return.
397 	 */
398 	n = 0;
399 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
400 		/* find an available buffer */
401 		if ((rp->flags & DBUF_DMCS) == 0) {
402 			IF_DEQUEUE(&sc->sc_if.if_snd, m);
403 			if (m == 0)
404 				return;
405 			/* mark it dmcs */
406 			rp->flags |= (DBUF_DMCS);
407 			/*
408 			 * Have request mapped to UNIBUS for transmission
409 			 * and start the output.
410 			 */
411 			rp->cc = dmcput(&sc->sc_ifuba, n, m);
412 			rp->cc &= DMC_CCOUNT;
413 			sc->sc_oused++;
414 			dmcload(sc, DMC_WRITE, rp->ubinfo,
415 				rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
416 		}
417 		n++;
418 	}
419 }
420 
421 /*
422  * Utility routine to load the DMC device registers.
423  */
424 dmcload(sc, type, w0, w1)
425 	register struct dmc_softc *sc;
426 	int type, w0, w1;
427 {
428 	register struct dmcdevice *addr;
429 	register int unit, sps;
430 	register struct dmc_command *qp;
431 
432 	unit = sc - dmc_softc;
433 	addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr;
434 	sps = spl5();
435 
436 	/* grab a command buffer from the free list */
437 	if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
438 		panic("dmc command queue overflow");
439 	DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
440 
441 	/* fill in requested info */
442 	qp->qp_cmd = (type | DMC_RQI);
443 	qp->qp_ubaddr = w0;
444 	qp->qp_cc = w1;
445 
446 	if (sc->sc_qactive) {	/* command in progress */
447 		if (type == DMC_READ) {
448 			QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
449 		} else {
450 			QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
451 		}
452 	} else {	/* command port free */
453 		sc->sc_qactive = qp;
454 		addr->bsel0 = qp->qp_cmd;
455 		dmcrint(unit);
456 	}
457 	splx(sps);
458 }
459 
460 /*
461  * DMC interface receiver interrupt.
462  * Ready to accept another command,
463  * pull one off the command queue.
464  */
465 dmcrint(unit)
466 	int unit;
467 {
468 	register struct dmc_softc *sc;
469 	register struct dmcdevice *addr;
470 	register struct dmc_command *qp;
471 	register int n;
472 
473 	addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr;
474 	sc = &dmc_softc[unit];
475 	if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
476 		printf("dmc%d: dmcrint no command\n", unit);
477 		return;
478 	}
479 	while (addr->bsel0&DMC_RDYI) {
480 		addr->sel4 = qp->qp_ubaddr;
481 		addr->sel6 = qp->qp_cc;
482 		addr->bsel0 &= ~(DMC_IEI|DMC_RQI);
483 		/* free command buffer */
484 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
485 		while (addr->bsel0 & DMC_RDYI) {
486 			/*
487 			 * Can't check for RDYO here 'cause
488 			 * this routine isn't reentrant!
489 			 */
490 			DELAY(5);
491 		}
492 		/* move on to next command */
493 		if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
494 			break;		/* all done */
495 		/* more commands to do, start the next one */
496 		qp = sc->sc_qactive;
497 		DEQUEUE(sc->sc_qhead, sc->sc_qtail);
498 		addr->bsel0 = qp->qp_cmd;
499 		n = RDYSCAN;
500 		while (n-- > 0)
501 			if ((addr->bsel0&DMC_RDYI) || (addr->bsel2&DMC_RDYO))
502 				break;
503 	}
504 	if (sc->sc_qactive) {
505 		addr->bsel0 |= DMC_IEI|DMC_RQI;
506 		/* VMS does it twice !*$%@# */
507 		addr->bsel0 |= DMC_IEI|DMC_RQI;
508 	}
509 
510 }
511 
512 /*
513  * DMC interface transmitter interrupt.
514  * A transfer may have completed, check for errors.
515  * If it was a read, notify appropriate protocol.
516  * If it was a write, pull the next one off the queue.
517  */
518 dmcxint(unit)
519 	int unit;
520 {
521 	register struct dmc_softc *sc;
522 	register struct ifnet *ifp;
523 	struct uba_device *ui = dmcinfo[unit];
524 	struct dmcdevice *addr;
525 	struct mbuf *m;
526 	struct ifqueue *inq;
527 	int arg, pkaddr, cmd, len;
528 	register struct ifrw *ifrw;
529 	register struct dmcbufs *rp;
530 	register struct ifxmt *ifxp;
531 	struct dmc_header *dh;
532 	int off, resid;
533 
534 	addr = (struct dmcdevice *)ui->ui_addr;
535 	sc = &dmc_softc[unit];
536 	ifp = &sc->sc_if;
537 
538 	while (addr->bsel2 & DMC_RDYO) {
539 
540 		cmd = addr->bsel2 & 0xff;
541 		arg = addr->sel6 & 0xffff;
542 		/* reconstruct UNIBUS address of buffer returned to us */
543 		pkaddr = ((arg&DMC_XMEM)<<2) | (addr->sel4 & 0xffff);
544 		/* release port */
545 		addr->bsel2 &= ~DMC_RDYO;
546 		switch (cmd & 07) {
547 
548 		case DMC_OUR:
549 			/*
550 			 * A read has completed.
551 			 * Pass packet to type specific
552 			 * higher-level input routine.
553 			 */
554 			ifp->if_ipackets++;
555 			/* find location in dmcuba struct */
556 			ifrw= &sc->sc_ifuba.ifu_r[0];
557 			for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
558 				if(rp->ubinfo == pkaddr)
559 					break;
560 				ifrw++;
561 			}
562 			if (rp >= &sc->sc_rbufs[NRCV])
563 				panic("dmc rcv");
564 			if ((rp->flags & DBUF_DMCS) == 0)
565 				printf("dmc%d: done unalloc rbuf\n", unit);
566 
567 			len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
568 			if (len < 0 || len > DMCMTU) {
569 				ifp->if_ierrors++;
570 				printd("dmc%d: bad rcv pkt addr 0x%x len 0x%x\n",
571 				    unit, pkaddr, len);
572 				goto setup;
573 			}
574 			/*
575 			 * Deal with trailer protocol: if type is trailer
576 			 * get true type from first 16-bit word past data.
577 			 * Remember that type was trailer by setting off.
578 			 */
579 			dh = (struct dmc_header *)ifrw->ifrw_addr;
580 			dh->dmc_type = ntohs((u_short)dh->dmc_type);
581 #define dmcdataaddr(dh, off, type)	((type)(((caddr_t)((dh)+1)+(off))))
582 			if (dh->dmc_type >= DMC_TRAILER &&
583 			    dh->dmc_type < DMC_TRAILER+DMC_NTRAILER) {
584 				off = (dh->dmc_type - DMC_TRAILER) * 512;
585 				if (off >= DMCMTU)
586 					goto setup;		/* sanity */
587 				dh->dmc_type = ntohs(*dmcdataaddr(dh, off, u_short *));
588 				resid = ntohs(*(dmcdataaddr(dh, off+2, u_short *)));
589 				if (off + resid > len)
590 					goto setup;		/* sanity */
591 				len = off + resid;
592 			} else
593 				off = 0;
594 			if (len == 0)
595 				goto setup;
596 
597 			/*
598 			 * Pull packet off interface.  Off is nonzero if
599 			 * packet has trailing header; dmc_get will then
600 			 * force this header information to be at the front,
601 			 * but we still have to drop the type and length
602 			 * which are at the front of any trailer data.
603 			 */
604 			m = dmc_get(&sc->sc_ifuba, ifrw, len, off);
605 			if (m == 0)
606 				goto setup;
607 			if (off) {
608 				m->m_off += 2 * sizeof (u_short);
609 				m->m_len -= 2 * sizeof (u_short);
610 			}
611 			switch (dh->dmc_type) {
612 
613 #ifdef INET
614 			case DMC_IPTYPE:
615 				schednetisr(NETISR_IP);
616 				inq = &ipintrq;
617 				break;
618 #endif
619 			default:
620 				m_freem(m);
621 				goto setup;
622 			}
623 
624 			if (IF_QFULL(inq)) {
625 				IF_DROP(inq);
626 				m_freem(m);
627 			} else
628 				IF_ENQUEUE(inq, m);
629 
630 	setup:
631 			/* is this needed? */
632 			rp->ubinfo = ifrw->ifrw_info & 0x3ffff;
633 
634 			dmcload(sc, DMC_READ, rp->ubinfo,
635 			    ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
636 			break;
637 
638 		case DMC_OUX:
639 			/*
640 			 * A write has completed, start another
641 			 * transfer if there is more data to send.
642 			 */
643 			ifp->if_opackets++;
644 			/* find associated dmcbuf structure */
645 			ifxp = &sc->sc_ifuba.ifu_w[0];
646 			for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
647 				if(rp->ubinfo == pkaddr)
648 					break;
649 				ifxp++;
650 			}
651 			if (rp >= &sc->sc_xbufs[NXMT]) {
652 				printf("dmc%d: bad packet address 0x%x\n",
653 				    unit, pkaddr);
654 				break;
655 			}
656 			if ((rp->flags & DBUF_DMCS) == 0)
657 				printf("dmc%d: unallocated packet 0x%x\n",
658 				    unit, pkaddr);
659 			/* mark buffer free */
660 			if (ifxp->x_xtofree) {
661 				(void)m_freem(ifxp->x_xtofree);
662 				ifxp->x_xtofree = 0;
663 			}
664 			rp->flags &= ~DBUF_DMCS;
665 			sc->sc_oused--;
666 			sc->sc_nticks = 0;
667 			sc->sc_flag |= DMC_ACTIVE;
668 			break;
669 
670 		case DMC_CNTLO:
671 			arg &= DMC_CNTMASK;
672 			if (arg & DMC_FATAL) {
673 				printd("dmc%d: fatal error, flags=%b\n",
674 				    unit, arg, CNTLO_BITS);
675 				dmcrestart(unit);
676 				break;
677 			}
678 			/* ACCUMULATE STATISTICS */
679 			switch(arg) {
680 			case DMC_NOBUFS:
681 				ifp->if_ierrors++;
682 				if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
683 					goto report;
684 				break;
685 			case DMC_DISCONN:
686 				if ((sc->sc_disc++ % DMC_RPDSC) == 0)
687 					goto report;
688 				break;
689 			case DMC_TIMEOUT:
690 				if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
691 					goto report;
692 				break;
693 			case DMC_DATACK:
694 				ifp->if_oerrors++;
695 				if ((sc->sc_datck++ % DMC_RPDCK) == 0)
696 					goto report;
697 				break;
698 			default:
699 				goto report;
700 			}
701 			break;
702 		report:
703 			printd("dmc%d: soft error, flags=%b\n", unit,
704 			    arg, CNTLO_BITS);
705 			if ((sc->sc_flag & DMC_RESTART) == 0) {
706 				/*
707 				 * kill off the dmc to get things
708 				 * going again by generating a
709 				 * procedure error
710 				 */
711 				sc->sc_flag |= DMC_RESTART;
712 				arg = sc->sc_ubinfo & 0x3ffff;
713 				dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
714 			}
715 			break;
716 
717 		default:
718 			printf("dmc%d: bad control %o\n", unit, cmd);
719 			break;
720 		}
721 	}
722 	dmcstart(unit);
723 	return;
724 }
725 
726 /*
727  * DMC output routine.
728  * Encapsulate a packet of type family for the dmc.
729  * Use trailer local net encapsulation if enough data in first
730  * packet leaves a multiple of 512 bytes of data in remainder.
731  */
732 dmcoutput(ifp, m0, dst)
733 	register struct ifnet *ifp;
734 	register struct mbuf *m0;
735 	struct sockaddr *dst;
736 {
737 	int type, error, s;
738 	register struct mbuf *m = m0;
739 	register struct dmc_header *dh;
740 	register int off;
741 
742 	switch (dst->sa_family) {
743 #ifdef	INET
744 	case AF_INET:
745 		off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
746 		if ((ifp->if_flags & IFF_NOTRAILERS) == 0)
747 		if (off > 0 && (off & 0x1ff) == 0 &&
748 		    m->m_off >= MMINOFF + 2 * sizeof (u_short)) {
749 			type = DMC_TRAILER + (off>>9);
750 			m->m_off -= 2 * sizeof (u_short);
751 			m->m_len += 2 * sizeof (u_short);
752 			*mtod(m, u_short *) = htons((u_short)DMC_IPTYPE);
753 			*(mtod(m, u_short *) + 1) = htons((u_short)m->m_len);
754 			goto gottrailertype;
755 		}
756 		type = DMC_IPTYPE;
757 		off = 0;
758 		goto gottype;
759 #endif
760 
761 	case AF_UNSPEC:
762 		dh = (struct dmc_header *)dst->sa_data;
763 		type = dh->dmc_type;
764 		goto gottype;
765 
766 	default:
767 		printf("dmc%d: can't handle af%d\n", ifp->if_unit,
768 			dst->sa_family);
769 		error = EAFNOSUPPORT;
770 		goto bad;
771 	}
772 
773 gottrailertype:
774 	/*
775 	 * Packet to be sent as a trailer; move first packet
776 	 * (control information) to end of chain.
777 	 */
778 	while (m->m_next)
779 		m = m->m_next;
780 	m->m_next = m0;
781 	m = m0->m_next;
782 	m0->m_next = 0;
783 	m0 = m;
784 
785 gottype:
786 	/*
787 	 * Add local network header
788 	 * (there is space for a uba on a vax to step on)
789 	 */
790 	if (m->m_off > MMAXOFF ||
791 	    MMINOFF + sizeof(struct dmc_header) > m->m_off) {
792 		m = m_get(M_DONTWAIT, MT_HEADER);
793 		if (m == 0) {
794 			error = ENOBUFS;
795 			goto bad;
796 		}
797 		m->m_next = m0;
798 		m->m_off = MMINOFF;
799 		m->m_len = sizeof (struct dmc_header);
800 	} else {
801 		m->m_off -= sizeof (struct dmc_header);
802 		m->m_len += sizeof (struct dmc_header);
803 	}
804 	dh = mtod(m, struct dmc_header *);
805 	dh->dmc_type = htons((u_short)type);
806 
807 	/*
808 	 * Queue message on interface, and start output if interface
809 	 * not yet active.
810 	 */
811 	s = splimp();
812 	if (IF_QFULL(&ifp->if_snd)) {
813 		IF_DROP(&ifp->if_snd);
814 		m_freem(m);
815 		splx(s);
816 		return (ENOBUFS);
817 	}
818 	IF_ENQUEUE(&ifp->if_snd, m);
819 	dmcstart(ifp->if_unit);
820 	splx(s);
821 	return (0);
822 
823 bad:
824 	m_freem(m0);
825 	return (error);
826 }
827 
828 
829 /*
830  * Process an ioctl request.
831  */
832 dmcioctl(ifp, cmd, data)
833 	register struct ifnet *ifp;
834 	int cmd;
835 	caddr_t data;
836 {
837 	int s = splimp(), error = 0;
838 
839 	switch (cmd) {
840 
841 	case SIOCSIFADDR:
842 		ifp->if_flags |= IFF_UP;
843 		if ((ifp->if_flags & IFF_RUNNING) == 0)
844 			dmcinit(ifp->if_unit);
845 		break;
846 
847 	case SIOCSIFDSTADDR:
848 		if ((ifp->if_flags & IFF_RUNNING) == 0)
849 			dmcinit(ifp->if_unit);
850 		break;
851 
852 	default:
853 		error = EINVAL;
854 	}
855 	splx(s);
856 	return (error);
857 }
858 
859 
860 /*
861  * Routines supporting UNIBUS network interfaces.
862  */
863 
864 /*
865  * Init UNIBUS for interface on uban whose headers of size hlen are to
866  * end on a page boundary.  We allocate a UNIBUS map register for the page
867  * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
868  * doing this for each receive and transmit buffer.  We also
869  * allocate page frames in the mbuffer pool for these pages.
870  */
871 dmc_ubainit(ifu, uban, hlen, nmr)
872 	register struct dmcuba *ifu;
873 	int uban, hlen, nmr;
874 {
875 	register caddr_t cp, dp;
876 	register struct ifrw *ifrw;
877 	register struct ifxmt *ifxp;
878 	int i, ncl;
879 
880 	ncl = clrnd(nmr + CLSIZE) / CLSIZE;
881 	if (ifu->ifu_r[0].ifrw_addr)
882 		/*
883 		 * If the first read buffer has a non-zero
884 		 * address, it means we have already allocated core
885 		 */
886 		cp = ifu->ifu_r[0].ifrw_addr - (CLBYTES - hlen);
887 	else {
888 		cp = m_clalloc(NTOT * ncl, MPG_SPACE);
889 		if (cp == 0)
890 			return (0);
891 		ifu->ifu_hlen = hlen;
892 		ifu->ifu_uban = uban;
893 		ifu->ifu_uba = uba_hd[uban].uh_uba;
894 		dp = cp + CLBYTES - hlen;
895 		for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) {
896 			ifrw->ifrw_addr = dp;
897 			dp += ncl * CLBYTES;
898 		}
899 		for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) {
900 			ifxp->x_ifrw.ifrw_addr = dp;
901 			dp += ncl * CLBYTES;
902 		}
903 	}
904 	/* allocate for receive ring */
905 	for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) {
906 		if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) {
907 			struct ifrw *rw;
908 
909 			for (rw = ifu->ifu_r; rw < ifrw; rw++)
910 				ubarelse(ifu->ifu_uban, &rw->ifrw_info);
911 			goto bad;
912 		}
913 	}
914 	/* and now transmit ring */
915 	for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) {
916 		ifrw = &ifxp->x_ifrw;
917 		if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) {
918 			struct ifxmt *xp;
919 
920 			for (xp = ifu->ifu_w; xp < ifxp; xp++)
921 				ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info);
922 			for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++)
923 				ubarelse(ifu->ifu_uban, &ifrw->ifrw_info);
924 			goto bad;
925 		}
926 		for (i = 0; i < nmr; i++)
927 			ifxp->x_map[i] = ifrw->ifrw_mr[i];
928 		ifxp->x_xswapd = 0;
929 	}
930 	return (1);
931 bad:
932 	m_pgfree(cp, NTOT * ncl);
933 	ifu->ifu_r[0].ifrw_addr = 0;
934 	return (0);
935 }
936 
937 /*
938  * Setup either a ifrw structure by allocating UNIBUS map registers,
939  * possibly a buffered data path, and initializing the fields of
940  * the ifrw structure to minimize run-time overhead.
941  */
942 static
943 dmc_ubaalloc(ifu, ifrw, nmr)
944 	struct dmcuba *ifu;
945 	register struct ifrw *ifrw;
946 	int nmr;
947 {
948 	register int info;
949 
950 	info =
951 	    uballoc(ifu->ifu_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->ifu_hlen,
952 		ifu->ifu_flags);
953 	if (info == 0)
954 		return (0);
955 	ifrw->ifrw_info = info;
956 	ifrw->ifrw_bdp = UBAI_BDP(info);
957 	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);
958 	ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1];
959 	return (1);
960 }
961 
962 /*
963  * Pull read data off a interface.
964  * Len is length of data, with local net header stripped.
965  * Off is non-zero if a trailer protocol was used, and
966  * gives the offset of the trailer information.
967  * We copy the trailer information and then all the normal
968  * data into mbufs.  When full cluster sized units are present
969  * on the interface on cluster boundaries we can get them more
970  * easily by remapping, and take advantage of this here.
971  */
972 struct mbuf *
973 dmc_get(ifu, ifrw, totlen, off0)
974 	register struct dmcuba *ifu;
975 	register struct ifrw *ifrw;
976 	int totlen, off0;
977 {
978 	struct mbuf *top, **mp, *m;
979 	int off = off0, len;
980 	register caddr_t cp = ifrw->ifrw_addr + ifu->ifu_hlen;
981 
982 	top = 0;
983 	mp = &top;
984 	while (totlen > 0) {
985 		MGET(m, M_DONTWAIT, MT_DATA);
986 		if (m == 0)
987 			goto bad;
988 		if (off) {
989 			len = totlen - off;
990 			cp = ifrw->ifrw_addr + ifu->ifu_hlen + off;
991 		} else
992 			len = totlen;
993 		if (len >= CLBYTES) {
994 			struct mbuf *p;
995 			struct pte *cpte, *ppte;
996 			int x, *ip, i;
997 
998 			MCLGET(p, 1);
999 			if (p == 0)
1000 				goto nopage;
1001 			len = m->m_len = CLBYTES;
1002 			m->m_off = (int)p - (int)m;
1003 			if (!claligned(cp))
1004 				goto copy;
1005 
1006 			/*
1007 			 * Switch pages mapped to UNIBUS with new page p,
1008 			 * as quick form of copy.  Remap UNIBUS and invalidate.
1009 			 */
1010 			cpte = &Mbmap[mtocl(cp)*CLSIZE];
1011 			ppte = &Mbmap[mtocl(p)*CLSIZE];
1012 			x = btop(cp - ifrw->ifrw_addr);
1013 			ip = (int *)&ifrw->ifrw_mr[x];
1014 			for (i = 0; i < CLSIZE; i++) {
1015 				struct pte t;
1016 				t = *ppte; *ppte++ = *cpte; *cpte = t;
1017 				*ip++ =
1018 				    cpte++->pg_pfnum|ifrw->ifrw_proto;
1019 				mtpr(TBIS, cp);
1020 				cp += NBPG;
1021 				mtpr(TBIS, (caddr_t)p);
1022 				p += NBPG / sizeof (*p);
1023 			}
1024 			goto nocopy;
1025 		}
1026 nopage:
1027 		m->m_len = MIN(MLEN, len);
1028 		m->m_off = MMINOFF;
1029 copy:
1030 		bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len);
1031 		cp += m->m_len;
1032 nocopy:
1033 		*mp = m;
1034 		mp = &m->m_next;
1035 		if (off) {
1036 			/* sort of an ALGOL-W style for statement... */
1037 			off += m->m_len;
1038 			if (off == totlen) {
1039 				cp = ifrw->ifrw_addr + ifu->ifu_hlen;
1040 				off = 0;
1041 				totlen = off0;
1042 			}
1043 		} else
1044 			totlen -= m->m_len;
1045 	}
1046 	return (top);
1047 bad:
1048 	m_freem(top);
1049 	return (0);
1050 }
1051 
1052 /*
1053  * Map a chain of mbufs onto a network interface
1054  * in preparation for an i/o operation.
1055  * The argument chain of mbufs includes the local network
1056  * header which is copied to be in the mapped, aligned
1057  * i/o space.
1058  */
1059 dmcput(ifu, n, m)
1060 	struct dmcuba *ifu;
1061 	int n;
1062 	register struct mbuf *m;
1063 {
1064 	register struct mbuf *mp;
1065 	register caddr_t cp;
1066 	register struct ifxmt *ifxp;
1067 	register struct ifrw *ifrw;
1068 	register int i;
1069 	int xswapd = 0;
1070 	int x, cc, t;
1071 	caddr_t dp;
1072 
1073 	ifxp = &ifu->ifu_w[n];
1074 	ifrw = &ifxp->x_ifrw;
1075 	cp = ifrw->ifrw_addr;
1076 	while (m) {
1077 		dp = mtod(m, char *);
1078 		if (claligned(cp) && claligned(dp) && m->m_len == CLBYTES) {
1079 			struct pte *pte; int *ip;
1080 			pte = &Mbmap[mtocl(dp)*CLSIZE];
1081 			x = btop(cp - ifrw->ifrw_addr);
1082 			ip = (int *)&ifrw->ifrw_mr[x];
1083 			for (i = 0; i < CLSIZE; i++)
1084 				*ip++ = ifrw->ifrw_proto | pte++->pg_pfnum;
1085 			xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT));
1086 			mp = m->m_next;
1087 			m->m_next = ifxp->x_xtofree;
1088 			ifxp->x_xtofree = m;
1089 			cp += m->m_len;
1090 		} else {
1091 			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
1092 			cp += m->m_len;
1093 			MFREE(m, mp);
1094 		}
1095 		m = mp;
1096 	}
1097 
1098 	/*
1099 	 * Xswapd is the set of clusters we just mapped out.  Ifxp->x_xswapd
1100 	 * is the set of clusters mapped out from before.  We compute
1101 	 * the number of clusters involved in this operation in x.
1102 	 * Clusters mapped out before and involved in this operation
1103 	 * should be unmapped so original pages will be accessed by the device.
1104 	 */
1105 	cc = cp - ifrw->ifrw_addr;
1106 	x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT;
1107 	ifxp->x_xswapd &= ~xswapd;
1108 	while (i = ffs(ifxp->x_xswapd)) {
1109 		i--;
1110 		if (i >= x)
1111 			break;
1112 		ifxp->x_xswapd &= ~(1<<i);
1113 		i *= CLSIZE;
1114 		for (t = 0; t < CLSIZE; t++) {
1115 			ifrw->ifrw_mr[i] = ifxp->x_map[i];
1116 			i++;
1117 		}
1118 	}
1119 	ifxp->x_xswapd |= xswapd;
1120 	return (cc);
1121 }
1122 
1123 /*
1124  * Restart after a fatal error.
1125  * Clear device and reinitialize.
1126  */
1127 dmcrestart(unit)
1128 	int unit;
1129 {
1130 	register struct dmc_softc *sc = &dmc_softc[unit];
1131 	register struct uba_device *ui = dmcinfo[unit];
1132 	register struct dmcdevice *addr;
1133 	register struct ifxmt *ifxp;
1134 	register int i;
1135 	register struct mbuf *m;
1136 	struct dmcuba *ifu;
1137 
1138 	addr = (struct dmcdevice *)ui->ui_addr;
1139 	ifu = &sc->sc_ifuba;
1140 #ifdef DEBUG
1141 	/* dump base table */
1142 	printf("dmc%d base table:\n", unit);
1143 	for (i = 0; i < sizeof (struct dmc_base); i++)
1144 		printf("%o\n" ,dmc_base[unit].d_base[i]);
1145 #endif
1146 	/*
1147 	 * Let the DMR finish the MCLR.	 At 1 Mbit, it should do so
1148 	 * in about a max of 6.4 milliseconds with diagnostics enabled.
1149 	 */
1150 	addr->bsel1 = DMC_MCLR;
1151 	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
1152 		;
1153 	/* Did the timer expire or did the DMR finish? */
1154 	if ((addr->bsel1 & DMC_RUN) == 0) {
1155 		printf("dmc%d: M820 Test Failed\n", unit);
1156 		return;
1157 	}
1158 
1159 #ifdef notdef	/* tef sez why throw these packets away??? */
1160 	/* purge send queue */
1161 	IF_DEQUEUE(&sc->sc_if.if_snd, m);
1162 	while (m) {
1163 		m_freem(m);
1164 		IF_DEQUEUE(&sc->sc_if.if_snd, m);
1165 	}
1166 #endif
1167 	for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) {
1168 		if (ifxp->x_xtofree) {
1169 			(void) m_freem(ifxp->x_xtofree);
1170 			ifxp->x_xtofree = 0;
1171 		}
1172 	}
1173 
1174 	/* restart DMC */
1175 	dmcinit(unit);
1176 	sc->sc_flag &= ~DMC_RESTART;
1177 	sc->sc_if.if_collisions++;	/* why not? */
1178 }
1179 
1180 /*
1181  * Check to see that transmitted packets don't
1182  * lose interrupts.  The device has to be active.
1183  */
1184 dmcwatch()
1185 {
1186 	register struct uba_device *ui;
1187 	register struct dmc_softc *sc;
1188 	struct dmcdevice *addr;
1189 	register int i;
1190 
1191 	for (i = 0; i < NDMC; i++) {
1192 		sc = &dmc_softc[i];
1193 		if ((sc->sc_flag & DMC_ACTIVE) == 0)
1194 			continue;
1195 		if ((ui = dmcinfo[i]) == 0 || ui->ui_alive == 0)
1196 			continue;
1197 		if (sc->sc_oused) {
1198 			sc->sc_nticks++;
1199 			if (sc->sc_nticks > dmc_timeout) {
1200 				sc->sc_nticks = 0;
1201 				addr = (struct dmcdevice *)ui->ui_addr;
1202 				printd("dmc%d hung: bsel0=%b bsel2=%b\n", i,
1203 				    addr->bsel0 & 0xff, DMC0BITS,
1204 				    addr->bsel2 & 0xff, DMC2BITS);
1205 				dmcrestart(i);
1206 			}
1207 		}
1208 	}
1209 	timeout(dmcwatch, (caddr_t) 0, hz);
1210 }
1211 #endif
1212