xref: /csrg-svn/sys/vax/if/if_dmc.c (revision 17565)
1 /*	if_dmc.c	6.4	84/12/20	*/
2 
3 #include "dmc.h"
4 #if NDMC > 0
5 
6 /*
7  * DMC11 device driver, internet version
8  *
9  *	Bill Nesheim
10  *	Cornell University
11  *
12  *	Lou Salkind
13  *	New York University
14  */
15 
16 /* #define DEBUG	/* for base table dump on fatal error */
17 
18 #include "../machine/pte.h"
19 
20 #include "param.h"
21 #include "systm.h"
22 #include "mbuf.h"
23 #include "buf.h"
24 #include "ioctl.h"		/* must precede tty.h */
25 #include "tty.h"
26 #include "protosw.h"
27 #include "socket.h"
28 #include "vmmac.h"
29 #include "errno.h"
30 
31 #include "../net/if.h"
32 #include "../net/netisr.h"
33 #include "../net/route.h"
34 #include "../netinet/in.h"
35 #include "../netinet/in_systm.h"
36 #include "../netinet/ip.h"
37 #include "../netinet/ip_var.h"
38 
39 #include "../vax/cpu.h"
40 #include "../vax/mtpr.h"
41 #include "if_uba.h"
42 #include "if_dmc.h"
43 #include "../vaxuba/ubareg.h"
44 #include "../vaxuba/ubavar.h"
45 
46 #include "../h/time.h"
47 #include "../h/kernel.h"
48 
49 int	dmctimer;			/* timer started? */
50 int	dmc_timeout = 8;		/* timeout value */
51 int	dmcwatch();
52 
53 /*
54  * Driver information for auto-configuration stuff.
55  */
56 int	dmcprobe(), dmcattach(), dmcinit(), dmcioctl();
57 int	dmcoutput(), dmcreset();
58 struct	uba_device *dmcinfo[NDMC];
59 u_short	dmcstd[] = { 0 };
60 struct	uba_driver dmcdriver =
61 	{ dmcprobe, 0, dmcattach, 0, dmcstd, "dmc", dmcinfo };
62 
63 #define NRCV 7
64 #define NXMT 3
65 #define NTOT (NRCV + NXMT)
66 #define NCMDS	(NTOT+4)	/* size of command queue */
67 
68 #define printd if(dmcdebug)printf
69 int dmcdebug = 0;
70 
71 /* error reporting intervals */
72 #define DMC_RPNBFS	50
73 #define DMC_RPDSC	1
74 #define DMC_RPTMO	10
75 #define DMC_RPDCK	10
76 
77 struct  dmc_command {
78 	char	qp_cmd;		/* command */
79 	short	qp_ubaddr;	/* buffer address */
80 	short	qp_cc;		/* character count || XMEM */
81 	struct	dmc_command *qp_next;	/* next command on queue */
82 };
83 
84 /*
85  * The dmcuba structures generalize the ifuba structure
86  * to an arbitrary number of receive and transmit buffers.
87  */
88 struct	ifxmt {
89 	struct	ifrw x_ifrw;		/* mapping info */
90 	struct	pte x_map[IF_MAXNUBAMR];	/* output base pages */
91 	short 	x_xswapd;		/* mask of clusters swapped */
92 	struct	mbuf *x_xtofree;	/* pages being dma'd out */
93 };
94 
95 struct	dmcuba {
96 	short	ifu_uban;		/* uba number */
97 	short	ifu_hlen;		/* local net header length */
98 	struct	uba_regs *ifu_uba;	/* uba regs, in vm */
99 	struct	ifrw ifu_r[NRCV];	/* receive information */
100 	struct	ifxmt ifu_w[NXMT];	/* transmit information */
101 				/* these should only be pointers */
102 	short	ifu_flags;		/* used during uballoc's */
103 };
104 
105 struct dmcbufs {
106 	int	ubinfo;		/* from uballoc */
107 	short	cc;		/* buffer size */
108 	short	flags;		/* access control */
109 };
110 #define	DBUF_OURS	0	/* buffer is available */
111 #define	DBUF_DMCS	1	/* buffer claimed by somebody */
112 #define	DBUF_XMIT	4	/* transmit buffer */
113 #define	DBUF_RCV	8	/* receive buffer */
114 
115 struct mbuf *dmc_get();
116 
117 /*
118  * DMC software status per interface.
119  *
120  * Each interface is referenced by a network interface structure,
121  * sc_if, which the routing code uses to locate the interface.
122  * This structure contains the output queue for the interface, its address, ...
123  * We also have, for each interface, a  set of 7 UBA interface structures
124  * for each, which
125  * contain information about the UNIBUS resources held by the interface:
126  * map registers, buffered data paths, etc.  Information is cached in this
127  * structure for use by the if_uba.c routines in running the interface
128  * efficiently.
129  */
130 struct dmc_softc {
131 	short	sc_oused;		/* output buffers currently in use */
132 	short	sc_iused;		/* input buffers given to DMC */
133 	short	sc_flag;		/* flags */
134 	int	sc_nticks;		/* seconds since last interrupt */
135 	struct	ifnet sc_if;		/* network-visible interface */
136 	struct	dmcbufs sc_rbufs[NRCV];	/* receive buffer info */
137 	struct	dmcbufs sc_xbufs[NXMT];	/* transmit buffer info */
138 	struct	dmcuba sc_ifuba;	/* UNIBUS resources */
139 	int	sc_ubinfo;		/* UBA mapping info for base table */
140 	int	sc_errors[4];		/* non-fatal error counters */
141 #define sc_datck sc_errors[0]
142 #define sc_timeo sc_errors[1]
143 #define sc_nobuf sc_errors[2]
144 #define sc_disc  sc_errors[3]
145 	/* command queue stuff */
146 	struct	dmc_command sc_cmdbuf[NCMDS];
147 	struct	dmc_command *sc_qhead;	/* head of command queue */
148 	struct	dmc_command *sc_qtail;	/* tail of command queue */
149 	struct	dmc_command *sc_qactive;	/* command in progress */
150 	struct	dmc_command *sc_qfreeh;	/* head of list of free cmd buffers */
151 	struct	dmc_command *sc_qfreet;	/* tail of list of free cmd buffers */
152 	/* end command queue stuff */
153 } dmc_softc[NDMC];
154 
155 /* flags */
156 #define DMC_ALLOC	01		/* unibus resources allocated */
157 #define DMC_BMAPPED	02		/* base table mapped */
158 #define DMC_RESTART	04		/* software restart in progress */
159 #define DMC_ACTIVE	08		/* device active */
160 
161 struct dmc_base {
162 	short	d_base[128];		/* DMC base table */
163 } dmc_base[NDMC];
164 
165 /* queue manipulation macros */
166 #define	QUEUE_AT_HEAD(qp, head, tail) \
167 	(qp)->qp_next = (head); \
168 	(head) = (qp); \
169 	if ((tail) == (struct dmc_command *) 0) \
170 		(tail) = (head)
171 
172 #define QUEUE_AT_TAIL(qp, head, tail) \
173 	if ((tail)) \
174 		(tail)->qp_next = (qp); \
175 	else \
176 		(head) = (qp); \
177 	(qp)->qp_next = (struct dmc_command *) 0; \
178 	(tail) = (qp)
179 
180 #define DEQUEUE(head, tail) \
181 	(head) = (head)->qp_next;\
182 	if ((head) == (struct dmc_command *) 0)\
183 		(tail) = (head)
184 
185 dmcprobe(reg)
186 	caddr_t reg;
187 {
188 	register int br, cvec;
189 	register struct dmcdevice *addr = (struct dmcdevice *)reg;
190 	register int i;
191 
192 #ifdef lint
193 	br = 0; cvec = br; br = cvec;
194 	dmcrint(0); dmcxint(0);
195 #endif
196 	addr->bsel1 = DMC_MCLR;
197 	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
198 		;
199 	if ((addr->bsel1 & DMC_RUN) == 0) {
200 		printf("dmcprobe: can't start device\n" );
201 		return (0);
202 	}
203 	addr->bsel0 = DMC_RQI|DMC_IEI;
204 	/* let's be paranoid */
205 	addr->bsel0 |= DMC_RQI|DMC_IEI;
206 	DELAY(1000000);
207 	addr->bsel1 = DMC_MCLR;
208 	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
209 		;
210 	return (1);
211 }
212 
213 /*
214  * Interface exists: make available by filling in network interface
215  * record.  System will initialize the interface when it is ready
216  * to accept packets.
217  */
218 dmcattach(ui)
219 	register struct uba_device *ui;
220 {
221 	register struct dmc_softc *sc = &dmc_softc[ui->ui_unit];
222 
223 	sc->sc_if.if_unit = ui->ui_unit;
224 	sc->sc_if.if_name = "dmc";
225 	sc->sc_if.if_mtu = DMCMTU;
226 	sc->sc_if.if_init = dmcinit;
227 	sc->sc_if.if_output = dmcoutput;
228 	sc->sc_if.if_ioctl = dmcioctl;
229 	sc->sc_if.if_reset = dmcreset;
230 	sc->sc_if.if_flags = IFF_POINTOPOINT;
231 	sc->sc_ifuba.ifu_flags = UBA_CANTWAIT;
232 
233 	if_attach(&sc->sc_if);
234 	if (dmctimer == 0) {
235 		dmctimer = 1;
236 		timeout(dmcwatch, (caddr_t) 0, hz);
237 	}
238 }
239 
240 /*
241  * Reset of interface after UNIBUS reset.
242  * If interface is on specified UBA, reset it's state.
243  */
244 dmcreset(unit, uban)
245 	int unit, uban;
246 {
247 	register struct uba_device *ui;
248 	register struct dmc_softc *sc = &dmc_softc[unit];
249 
250 	if (unit >= NDMC || (ui = dmcinfo[unit]) == 0 || ui->ui_alive == 0 ||
251 	    ui->ui_ubanum != uban)
252 		return;
253 	printf(" dmc%d", unit);
254 	sc->sc_flag = 0;
255 	dmcinit(unit);
256 }
257 
258 /*
259  * Initialization of interface; reinitialize UNIBUS usage.
260  */
261 dmcinit(unit)
262 	int unit;
263 {
264 	register struct dmc_softc *sc = &dmc_softc[unit];
265 	register struct uba_device *ui = dmcinfo[unit];
266 	register struct dmcdevice *addr;
267 	register struct ifnet *ifp = &sc->sc_if;
268 	register struct ifrw *ifrw;
269 	register struct ifxmt *ifxp;
270 	register struct dmcbufs *rp;
271 	register struct dmc_command *qp;
272 	int base;
273 	struct sockaddr_in *sin;
274 	int s;
275 
276 	addr = (struct dmcdevice *)ui->ui_addr;
277 
278 	sin = (struct sockaddr_in *) &ifp->if_addr;
279 	if (sin->sin_addr.s_addr == 0)	/* if address still unknown */
280 		return;
281 	sin = (struct sockaddr_in *) &ifp->if_dstaddr;
282 	if (sin->sin_addr.s_addr == 0)	/* if address still unknown */
283 		return;
284 
285 	if ((addr->bsel1&DMC_RUN) == 0) {
286 		printf("dmcinit: DMC not running\n");
287 		ifp->if_flags &= ~(IFF_RUNNING|IFF_UP);
288 		return;
289 	}
290 	/* map base table */
291 	if ((sc->sc_flag & DMC_BMAPPED) == 0) {
292 		sc->sc_ubinfo = uballoc(ui->ui_ubanum,
293 			(caddr_t)&dmc_base[unit], sizeof (struct dmc_base), 0);
294 		sc->sc_flag |= DMC_BMAPPED;
295 	}
296 	/* initialize UNIBUS resources */
297 	sc->sc_iused = sc->sc_oused = 0;
298 	if ((sc->sc_flag & DMC_ALLOC) == 0) {
299 		if (dmc_ubainit(&sc->sc_ifuba, ui->ui_ubanum,
300 		    sizeof(struct dmc_header), (int)btoc(DMCMTU)) == 0) {
301 			printf("dmc%d: can't initialize\n", unit);
302 			ifp->if_flags &= ~IFF_UP;
303 			return;
304 		}
305 		sc->sc_flag |= DMC_ALLOC;
306 	}
307 
308 	/* initialize buffer pool */
309 	/* receives */
310 	ifrw = &sc->sc_ifuba.ifu_r[0];
311 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
312 		rp->ubinfo = ifrw->ifrw_info & 0x3ffff;
313 		rp->cc = DMCMTU + sizeof (struct dmc_header);
314 		rp->flags = DBUF_OURS|DBUF_RCV;
315 		ifrw++;
316 	}
317 	/* transmits */
318 	ifxp = &sc->sc_ifuba.ifu_w[0];
319 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
320 		rp->ubinfo = ifxp->x_ifrw.ifrw_info & 0x3ffff;
321 		rp->cc = 0;
322 		rp->flags = DBUF_OURS|DBUF_XMIT;
323 		ifxp++;
324 	}
325 
326 	/* set up command queues */
327 	sc->sc_qfreeh = sc->sc_qfreet
328 		 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
329 		(struct dmc_command *)0;
330 	/* set up free command buffer list */
331 	for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
332 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
333 	}
334 
335 	/* base in */
336 	base = sc->sc_ubinfo & 0x3ffff;
337 	dmcload(sc, DMC_BASEI, base, (base>>2) & DMC_XMEM);
338 	/* specify half duplex operation, flags tell if primary */
339 	/* or secondary station */
340 	if (ui->ui_flags == 0)
341 		/* use DDMCP mode in full duplex */
342 		dmcload(sc, DMC_CNTLI, 0, 0);
343 	else if (ui->ui_flags == 1)
344 		/* use MAINTENENCE mode */
345 		dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
346 	else if (ui->ui_flags == 2)
347 		/* use DDCMP half duplex as primary station */
348 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
349 	else if (ui->ui_flags == 3)
350 		/* use DDCMP half duplex as secondary station */
351 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
352 
353 	/* enable operation done interrupts */
354 	sc->sc_flag &= ~DMC_ACTIVE;
355 	while ((addr->bsel2 & DMC_IEO) == 0)
356 		addr->bsel2 |= DMC_IEO;
357 	s = spl5();
358 	/* queue first NRCV buffers for DMC to fill */
359 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
360 		rp->flags |= DBUF_DMCS;
361 		dmcload(sc, DMC_READ, rp->ubinfo,
362 			(((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
363 		sc->sc_iused++;
364 	}
365 	splx(s);
366 	ifp->if_flags |= IFF_UP|IFF_RUNNING;
367 
368 }
369 
370 /*
371  * Start output on interface.  Get another datagram
372  * to send from the interface queue and map it to
373  * the interface before starting output.
374  *
375  * Must be called at spl 5
376  */
377 dmcstart(dev)
378 	dev_t dev;
379 {
380 	int unit = minor(dev);
381 	register struct dmc_softc *sc = &dmc_softc[unit];
382 	struct mbuf *m;
383 	register struct dmcbufs *rp;
384 	register int n;
385 
386 	/*
387 	 * Dequeue up to NXMT requests and map them to the UNIBUS.
388 	 * If no more requests, or no dmc buffers available, just return.
389 	 */
390 	n = 0;
391 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
392 		/* find an available buffer */
393 		if ((rp->flags & DBUF_DMCS) == 0) {
394 			IF_DEQUEUE(&sc->sc_if.if_snd, m);
395 			if (m == 0)
396 				return;
397 			/* mark it dmcs */
398 			rp->flags |= (DBUF_DMCS);
399 			/*
400 			 * Have request mapped to UNIBUS for transmission
401 			 * and start the output.
402 			 */
403 			rp->cc = dmcput(&sc->sc_ifuba, n, m);
404 			rp->cc &= DMC_CCOUNT;
405 			sc->sc_oused++;
406 			dmcload(sc, DMC_WRITE, rp->ubinfo,
407 				rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
408 		}
409 		n++;
410 	}
411 }
412 
413 /*
414  * Utility routine to load the DMC device registers.
415  */
416 dmcload(sc, type, w0, w1)
417 	register struct dmc_softc *sc;
418 	int type, w0, w1;
419 {
420 	register struct dmcdevice *addr;
421 	register int unit, sps;
422 	register struct dmc_command *qp;
423 
424 	unit = sc - dmc_softc;
425 	addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr;
426 	sps = spl5();
427 
428 	/* grab a command buffer from the free list */
429 	if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
430 		panic("dmc command queue overflow");
431 	DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
432 
433 	/* fill in requested info */
434 	qp->qp_cmd = (type | DMC_RQI);
435 	qp->qp_ubaddr = w0;
436 	qp->qp_cc = w1;
437 
438 	if (sc->sc_qactive) {	/* command in progress */
439 		if (type == DMC_READ) {
440 			QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
441 		} else {
442 			QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
443 		}
444 	} else {	/* command port free */
445 		sc->sc_qactive = qp;
446 		addr->bsel0 = qp->qp_cmd;
447 		dmcrint(unit);
448 	}
449 	splx(sps);
450 }
451 
452 /*
453  * DMC interface receiver interrupt.
454  * Ready to accept another command,
455  * pull one off the command queue.
456  */
457 dmcrint(unit)
458 	int unit;
459 {
460 	register struct dmc_softc *sc;
461 	register struct dmcdevice *addr;
462 	register struct dmc_command *qp;
463 	register int n;
464 
465 	addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr;
466 	sc = &dmc_softc[unit];
467 	if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
468 		printf("dmc%d: dmcrint no command\n", unit);
469 		return;
470 	}
471 	while (addr->bsel0&DMC_RDYI) {
472 		addr->sel4 = qp->qp_ubaddr;
473 		addr->sel6 = qp->qp_cc;
474 		addr->bsel0 &= ~(DMC_IEI|DMC_RQI);
475 		/* free command buffer */
476 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
477 		while (addr->bsel0 & DMC_RDYI) {
478 			/*
479 			 * Can't check for RDYO here 'cause
480 			 * this routine isn't reentrant!
481 			 */
482 			DELAY(5);
483 		}
484 		/* move on to next command */
485 		if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
486 			break;		/* all done */
487 		/* more commands to do, start the next one */
488 		qp = sc->sc_qactive;
489 		DEQUEUE(sc->sc_qhead, sc->sc_qtail);
490 		addr->bsel0 = qp->qp_cmd;
491 		n = RDYSCAN;
492 		while (n-- > 0)
493 			if ((addr->bsel0&DMC_RDYI) || (addr->bsel2&DMC_RDYO))
494 				break;
495 	}
496 	if (sc->sc_qactive) {
497 		addr->bsel0 |= DMC_IEI|DMC_RQI;
498 		/* VMS does it twice !*$%@# */
499 		addr->bsel0 |= DMC_IEI|DMC_RQI;
500 	}
501 
502 }
503 
504 /*
505  * DMC interface transmitter interrupt.
506  * A transfer may have completed, check for errors.
507  * If it was a read, notify appropriate protocol.
508  * If it was a write, pull the next one off the queue.
509  */
510 dmcxint(unit)
511 	int unit;
512 {
513 	register struct dmc_softc *sc;
514 	register struct ifnet *ifp;
515 	struct uba_device *ui = dmcinfo[unit];
516 	struct dmcdevice *addr;
517 	struct mbuf *m;
518 	struct ifqueue *inq;
519 	int arg, pkaddr, cmd, len;
520 	register struct ifrw *ifrw;
521 	register struct dmcbufs *rp;
522 	register struct ifxmt *ifxp;
523 	struct dmc_header *dh;
524 	int off, resid;
525 
526 	addr = (struct dmcdevice *)ui->ui_addr;
527 	sc = &dmc_softc[unit];
528 	ifp = &sc->sc_if;
529 
530 	while (addr->bsel2 & DMC_RDYO) {
531 
532 		cmd = addr->bsel2 & 0xff;
533 		arg = addr->sel6 & 0xffff;
534 		/* reconstruct UNIBUS address of buffer returned to us */
535 		pkaddr = ((arg&DMC_XMEM)<<2) | (addr->sel4 & 0xffff);
536 		/* release port */
537 		addr->bsel2 &= ~DMC_RDYO;
538 		switch (cmd & 07) {
539 
540 		case DMC_OUR:
541 			/*
542 			 * A read has completed.
543 			 * Pass packet to type specific
544 			 * higher-level input routine.
545 			 */
546 			ifp->if_ipackets++;
547 			/* find location in dmcuba struct */
548 			ifrw= &sc->sc_ifuba.ifu_r[0];
549 			for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
550 				if(rp->ubinfo == pkaddr)
551 					break;
552 				ifrw++;
553 			}
554 			if (rp >= &sc->sc_rbufs[NRCV])
555 				panic("dmc rcv");
556 			if ((rp->flags & DBUF_DMCS) == 0)
557 				printf("dmc%d: done unalloc rbuf\n", unit);
558 
559 			len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
560 			if (len < 0 || len > DMCMTU) {
561 				ifp->if_ierrors++;
562 				printd("dmc%d: bad rcv pkt addr 0x%x len 0x%x\n",
563 				    unit, pkaddr, len);
564 				goto setup;
565 			}
566 			/*
567 			 * Deal with trailer protocol: if type is trailer
568 			 * get true type from first 16-bit word past data.
569 			 * Remember that type was trailer by setting off.
570 			 */
571 			dh = (struct dmc_header *)ifrw->ifrw_addr;
572 			dh->dmc_type = ntohs((u_short)dh->dmc_type);
573 #define dmcdataaddr(dh, off, type)	((type)(((caddr_t)((dh)+1)+(off))))
574 			if (dh->dmc_type >= DMC_TRAILER &&
575 			    dh->dmc_type < DMC_TRAILER+DMC_NTRAILER) {
576 				off = (dh->dmc_type - DMC_TRAILER) * 512;
577 				if (off >= DMCMTU)
578 					goto setup;		/* sanity */
579 				dh->dmc_type = ntohs(*dmcdataaddr(dh, off, u_short *));
580 				resid = ntohs(*(dmcdataaddr(dh, off+2, u_short *)));
581 				if (off + resid > len)
582 					goto setup;		/* sanity */
583 				len = off + resid;
584 			} else
585 				off = 0;
586 			if (len == 0)
587 				goto setup;
588 
589 			/*
590 			 * Pull packet off interface.  Off is nonzero if
591 			 * packet has trailing header; dmc_get will then
592 			 * force this header information to be at the front,
593 			 * but we still have to drop the type and length
594 			 * which are at the front of any trailer data.
595 			 */
596 			m = dmc_get(&sc->sc_ifuba, ifrw, len, off);
597 			if (m == 0)
598 				goto setup;
599 			if (off) {
600 				m->m_off += 2 * sizeof (u_short);
601 				m->m_len -= 2 * sizeof (u_short);
602 			}
603 			switch (dh->dmc_type) {
604 
605 #ifdef INET
606 			case DMC_IPTYPE:
607 				schednetisr(NETISR_IP);
608 				inq = &ipintrq;
609 				break;
610 #endif
611 			default:
612 				m_freem(m);
613 				goto setup;
614 			}
615 
616 			if (IF_QFULL(inq)) {
617 				IF_DROP(inq);
618 				m_freem(m);
619 			} else
620 				IF_ENQUEUE(inq, m);
621 
622 	setup:
623 			/* is this needed? */
624 			rp->ubinfo = ifrw->ifrw_info & 0x3ffff;
625 
626 			dmcload(sc, DMC_READ, rp->ubinfo,
627 			    ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
628 			break;
629 
630 		case DMC_OUX:
631 			/*
632 			 * A write has completed, start another
633 			 * transfer if there is more data to send.
634 			 */
635 			ifp->if_opackets++;
636 			/* find associated dmcbuf structure */
637 			ifxp = &sc->sc_ifuba.ifu_w[0];
638 			for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
639 				if(rp->ubinfo == pkaddr)
640 					break;
641 				ifxp++;
642 			}
643 			if (rp >= &sc->sc_xbufs[NXMT]) {
644 				printf("dmc%d: bad packet address 0x%x\n",
645 				    unit, pkaddr);
646 				break;
647 			}
648 			if ((rp->flags & DBUF_DMCS) == 0)
649 				printf("dmc%d: unallocated packet 0x%x\n",
650 				    unit, pkaddr);
651 			/* mark buffer free */
652 			if (ifxp->x_xtofree) {
653 				(void)m_freem(ifxp->x_xtofree);
654 				ifxp->x_xtofree = 0;
655 			}
656 			rp->flags &= ~DBUF_DMCS;
657 			sc->sc_oused--;
658 			sc->sc_nticks = 0;
659 			sc->sc_flag |= DMC_ACTIVE;
660 			break;
661 
662 		case DMC_CNTLO:
663 			arg &= DMC_CNTMASK;
664 			if (arg & DMC_FATAL) {
665 				printd("dmc%d: fatal error, flags=%b\n",
666 				    unit, arg, CNTLO_BITS);
667 				ifp->if_flags &= ~(IFF_RUNNING|IFF_UP);
668 				dmcrestart(unit);
669 				break;
670 			}
671 			/* ACCUMULATE STATISTICS */
672 			switch(arg) {
673 			case DMC_NOBUFS:
674 				ifp->if_ierrors++;
675 				if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
676 					goto report;
677 				break;
678 			case DMC_DISCONN:
679 				if ((sc->sc_disc++ % DMC_RPDSC) == 0)
680 					goto report;
681 				break;
682 			case DMC_TIMEOUT:
683 				if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
684 					goto report;
685 				break;
686 			case DMC_DATACK:
687 				ifp->if_oerrors++;
688 				if ((sc->sc_datck++ % DMC_RPDCK) == 0)
689 					goto report;
690 				break;
691 			default:
692 				goto report;
693 			}
694 			break;
695 		report:
696 			printd("dmc%d: soft error, flags=%b\n", unit,
697 			    arg, CNTLO_BITS);
698 			if ((sc->sc_flag & DMC_RESTART) == 0) {
699 				/*
700 				 * kill off the dmc to get things
701 				 * going again by generating a
702 				 * procedure error
703 				 */
704 				sc->sc_flag |= DMC_RESTART;
705 				arg = sc->sc_ubinfo & 0x3ffff;
706 				dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
707 			}
708 			break;
709 
710 		default:
711 			printf("dmc%d: bad control %o\n", unit, cmd);
712 			break;
713 		}
714 	}
715 	dmcstart(unit);
716 	return;
717 }
718 
719 /*
720  * DMC output routine.
721  * Encapsulate a packet of type family for the dmc.
722  * Use trailer local net encapsulation if enough data in first
723  * packet leaves a multiple of 512 bytes of data in remainder.
724  */
725 dmcoutput(ifp, m0, dst)
726 	register struct ifnet *ifp;
727 	register struct mbuf *m0;
728 	struct sockaddr *dst;
729 {
730 	int type, error, s;
731 	register struct mbuf *m = m0;
732 	register struct dmc_header *dh;
733 	register int off;
734 
735 	switch (dst->sa_family) {
736 #ifdef	INET
737 	case AF_INET:
738 		off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
739 		if ((ifp->if_flags & IFF_NOTRAILERS) == 0)
740 		if (off > 0 && (off & 0x1ff) == 0 &&
741 		    m->m_off >= MMINOFF + 2 * sizeof (u_short)) {
742 			type = DMC_TRAILER + (off>>9);
743 			m->m_off -= 2 * sizeof (u_short);
744 			m->m_len += 2 * sizeof (u_short);
745 			*mtod(m, u_short *) = htons((u_short)DMC_IPTYPE);
746 			*(mtod(m, u_short *) + 1) = htons((u_short)m->m_len);
747 			goto gottrailertype;
748 		}
749 		type = DMC_IPTYPE;
750 		off = 0;
751 		goto gottype;
752 #endif
753 
754 	case AF_UNSPEC:
755 		dh = (struct dmc_header *)dst->sa_data;
756 		type = dh->dmc_type;
757 		goto gottype;
758 
759 	default:
760 		printf("dmc%d: can't handle af%d\n", ifp->if_unit,
761 			dst->sa_family);
762 		error = EAFNOSUPPORT;
763 		goto bad;
764 	}
765 
766 gottrailertype:
767 	/*
768 	 * Packet to be sent as a trailer; move first packet
769 	 * (control information) to end of chain.
770 	 */
771 	while (m->m_next)
772 		m = m->m_next;
773 	m->m_next = m0;
774 	m = m0->m_next;
775 	m0->m_next = 0;
776 	m0 = m;
777 
778 gottype:
779 	/*
780 	 * Add local network header
781 	 * (there is space for a uba on a vax to step on)
782 	 */
783 	if (m->m_off > MMAXOFF ||
784 	    MMINOFF + sizeof(struct dmc_header) > m->m_off) {
785 		m = m_get(M_DONTWAIT, MT_HEADER);
786 		if (m == 0) {
787 			error = ENOBUFS;
788 			goto bad;
789 		}
790 		m->m_next = m0;
791 		m->m_off = MMINOFF;
792 		m->m_len = sizeof (struct dmc_header);
793 	} else {
794 		m->m_off -= sizeof (struct dmc_header);
795 		m->m_len += sizeof (struct dmc_header);
796 	}
797 	dh = mtod(m, struct dmc_header *);
798 	dh->dmc_type = htons((u_short)type);
799 
800 	/*
801 	 * Queue message on interface, and start output if interface
802 	 * not yet active.
803 	 */
804 	s = splimp();
805 	if (IF_QFULL(&ifp->if_snd)) {
806 		IF_DROP(&ifp->if_snd);
807 		m_freem(m);
808 		splx(s);
809 		return (ENOBUFS);
810 	}
811 	IF_ENQUEUE(&ifp->if_snd, m);
812 	dmcstart(ifp->if_unit);
813 	splx(s);
814 	return (0);
815 
816 bad:
817 	m_freem(m0);
818 	return (error);
819 }
820 
821 
822 /*
823  * Process an ioctl request.
824  */
825 dmcioctl(ifp, cmd, data)
826 	register struct ifnet *ifp;
827 	int cmd;
828 	caddr_t data;
829 {
830 	struct ifreq *ifr = (struct ifreq *)data;
831 	struct sockaddr_in *sin;
832 	int s = splimp(), error = 0;
833 
834 	switch (cmd) {
835 
836 	case SIOCSIFADDR:
837 		sin = (struct sockaddr_in *)&ifr->ifr_addr;
838 		if (sin->sin_family != AF_INET)
839 			return (EINVAL);
840 		if (ifp->if_flags & IFF_RUNNING)
841 			if_rtinit(ifp, -1);	/* delete previous route */
842 		ifp->if_addr = *(struct sockaddr *)sin;
843 		ifp->if_net = in_netof(sin->sin_addr);
844 		ifp->if_flags |= IFF_UP;
845 		/* set up routing table entry */
846 		if ((ifp->if_flags & IFF_ROUTE) == 0) {
847 			rtinit(&ifp->if_dstaddr, &ifp->if_addr, RTF_HOST|RTF_UP);
848 			ifp->if_flags |= IFF_ROUTE;
849 		}
850 		break;
851 
852 	case SIOCSIFDSTADDR:
853 		ifp->if_dstaddr = ifr->ifr_dstaddr;
854 		break;
855 
856 	default:
857 		error = EINVAL;
858 	}
859 	if ((ifp->if_flags & IFF_RUNNING) == 0)
860 		dmcinit(ifp->if_unit);
861 	splx(s);
862 	return (error);
863 }
864 
865 
866 /*
867  * Routines supporting UNIBUS network interfaces.
868  */
869 
870 /*
871  * Init UNIBUS for interface on uban whose headers of size hlen are to
872  * end on a page boundary.  We allocate a UNIBUS map register for the page
873  * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
874  * doing this for each receive and transmit buffer.  We also
875  * allocate page frames in the mbuffer pool for these pages.
876  */
877 dmc_ubainit(ifu, uban, hlen, nmr)
878 	register struct dmcuba *ifu;
879 	int uban, hlen, nmr;
880 {
881 	register caddr_t cp, dp;
882 	register struct ifrw *ifrw;
883 	register struct ifxmt *ifxp;
884 	int i, ncl;
885 
886 	ncl = clrnd(nmr + CLSIZE) / CLSIZE;
887 	if (ifu->ifu_r[0].ifrw_addr)
888 		/*
889 		 * If the first read buffer has a non-zero
890 		 * address, it means we have already allocated core
891 		 */
892 		cp = ifu->ifu_r[0].ifrw_addr - (CLBYTES - hlen);
893 	else {
894 		cp = m_clalloc(NTOT * ncl, MPG_SPACE);
895 		if (cp == 0)
896 			return (0);
897 		ifu->ifu_hlen = hlen;
898 		ifu->ifu_uban = uban;
899 		ifu->ifu_uba = uba_hd[uban].uh_uba;
900 		dp = cp + CLBYTES - hlen;
901 		for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) {
902 			ifrw->ifrw_addr = dp;
903 			dp += ncl * CLBYTES;
904 		}
905 		for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) {
906 			ifxp->x_ifrw.ifrw_addr = dp;
907 			dp += ncl * CLBYTES;
908 		}
909 	}
910 	/* allocate for receive ring */
911 	for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) {
912 		if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) {
913 			struct ifrw *rw;
914 
915 			for (rw = ifu->ifu_r; rw < ifrw; rw++)
916 				ubarelse(ifu->ifu_uban, &rw->ifrw_info);
917 			goto bad;
918 		}
919 	}
920 	/* and now transmit ring */
921 	for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) {
922 		ifrw = &ifxp->x_ifrw;
923 		if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) {
924 			struct ifxmt *xp;
925 
926 			for (xp = ifu->ifu_w; xp < ifxp; xp++)
927 				ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info);
928 			for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++)
929 				ubarelse(ifu->ifu_uban, &ifrw->ifrw_info);
930 			goto bad;
931 		}
932 		for (i = 0; i < nmr; i++)
933 			ifxp->x_map[i] = ifrw->ifrw_mr[i];
934 		ifxp->x_xswapd = 0;
935 	}
936 	return (1);
937 bad:
938 	m_pgfree(cp, NTOT * ncl);
939 	ifu->ifu_r[0].ifrw_addr = 0;
940 	return (0);
941 }
942 
943 /*
944  * Setup either a ifrw structure by allocating UNIBUS map registers,
945  * possibly a buffered data path, and initializing the fields of
946  * the ifrw structure to minimize run-time overhead.
947  */
948 static
949 dmc_ubaalloc(ifu, ifrw, nmr)
950 	struct dmcuba *ifu;
951 	register struct ifrw *ifrw;
952 	int nmr;
953 {
954 	register int info;
955 
956 	info =
957 	    uballoc(ifu->ifu_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->ifu_hlen,
958 		ifu->ifu_flags);
959 	if (info == 0)
960 		return (0);
961 	ifrw->ifrw_info = info;
962 	ifrw->ifrw_bdp = UBAI_BDP(info);
963 	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);
964 	ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1];
965 	return (1);
966 }
967 
968 /*
969  * Pull read data off a interface.
970  * Len is length of data, with local net header stripped.
971  * Off is non-zero if a trailer protocol was used, and
972  * gives the offset of the trailer information.
973  * We copy the trailer information and then all the normal
974  * data into mbufs.  When full cluster sized units are present
975  * on the interface on cluster boundaries we can get them more
976  * easily by remapping, and take advantage of this here.
977  */
978 struct mbuf *
979 dmc_get(ifu, ifrw, totlen, off0)
980 	register struct dmcuba *ifu;
981 	register struct ifrw *ifrw;
982 	int totlen, off0;
983 {
984 	struct mbuf *top, **mp, *m;
985 	int off = off0, len;
986 	register caddr_t cp = ifrw->ifrw_addr + ifu->ifu_hlen;
987 
988 	top = 0;
989 	mp = &top;
990 	while (totlen > 0) {
991 		MGET(m, M_DONTWAIT, MT_DATA);
992 		if (m == 0)
993 			goto bad;
994 		if (off) {
995 			len = totlen - off;
996 			cp = ifrw->ifrw_addr + ifu->ifu_hlen + off;
997 		} else
998 			len = totlen;
999 		if (len >= CLBYTES) {
1000 			struct mbuf *p;
1001 			struct pte *cpte, *ppte;
1002 			int x, *ip, i;
1003 
1004 			MCLGET(p, 1);
1005 			if (p == 0)
1006 				goto nopage;
1007 			len = m->m_len = CLBYTES;
1008 			m->m_off = (int)p - (int)m;
1009 			if (!claligned(cp))
1010 				goto copy;
1011 
1012 			/*
1013 			 * Switch pages mapped to UNIBUS with new page p,
1014 			 * as quick form of copy.  Remap UNIBUS and invalidate.
1015 			 */
1016 			cpte = &Mbmap[mtocl(cp)*CLSIZE];
1017 			ppte = &Mbmap[mtocl(p)*CLSIZE];
1018 			x = btop(cp - ifrw->ifrw_addr);
1019 			ip = (int *)&ifrw->ifrw_mr[x];
1020 			for (i = 0; i < CLSIZE; i++) {
1021 				struct pte t;
1022 				t = *ppte; *ppte++ = *cpte; *cpte = t;
1023 				*ip++ =
1024 				    cpte++->pg_pfnum|ifrw->ifrw_proto;
1025 				mtpr(TBIS, cp);
1026 				cp += NBPG;
1027 				mtpr(TBIS, (caddr_t)p);
1028 				p += NBPG / sizeof (*p);
1029 			}
1030 			goto nocopy;
1031 		}
1032 nopage:
1033 		m->m_len = MIN(MLEN, len);
1034 		m->m_off = MMINOFF;
1035 copy:
1036 		bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len);
1037 		cp += m->m_len;
1038 nocopy:
1039 		*mp = m;
1040 		mp = &m->m_next;
1041 		if (off) {
1042 			/* sort of an ALGOL-W style for statement... */
1043 			off += m->m_len;
1044 			if (off == totlen) {
1045 				cp = ifrw->ifrw_addr + ifu->ifu_hlen;
1046 				off = 0;
1047 				totlen = off0;
1048 			}
1049 		} else
1050 			totlen -= m->m_len;
1051 	}
1052 	return (top);
1053 bad:
1054 	m_freem(top);
1055 	return (0);
1056 }
1057 
1058 /*
1059  * Map a chain of mbufs onto a network interface
1060  * in preparation for an i/o operation.
1061  * The argument chain of mbufs includes the local network
1062  * header which is copied to be in the mapped, aligned
1063  * i/o space.
1064  */
1065 dmcput(ifu, n, m)
1066 	struct dmcuba *ifu;
1067 	int n;
1068 	register struct mbuf *m;
1069 {
1070 	register struct mbuf *mp;
1071 	register caddr_t cp;
1072 	register struct ifxmt *ifxp;
1073 	register struct ifrw *ifrw;
1074 	register int i;
1075 	int xswapd = 0;
1076 	int x, cc, t;
1077 	caddr_t dp;
1078 
1079 	ifxp = &ifu->ifu_w[n];
1080 	ifrw = &ifxp->x_ifrw;
1081 	cp = ifrw->ifrw_addr;
1082 	while (m) {
1083 		dp = mtod(m, char *);
1084 		if (claligned(cp) && claligned(dp) && m->m_len == CLBYTES) {
1085 			struct pte *pte; int *ip;
1086 			pte = &Mbmap[mtocl(dp)*CLSIZE];
1087 			x = btop(cp - ifrw->ifrw_addr);
1088 			ip = (int *)&ifrw->ifrw_mr[x];
1089 			for (i = 0; i < CLSIZE; i++)
1090 				*ip++ = ifrw->ifrw_proto | pte++->pg_pfnum;
1091 			xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT));
1092 			mp = m->m_next;
1093 			m->m_next = ifxp->x_xtofree;
1094 			ifxp->x_xtofree = m;
1095 			cp += m->m_len;
1096 		} else {
1097 			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
1098 			cp += m->m_len;
1099 			MFREE(m, mp);
1100 		}
1101 		m = mp;
1102 	}
1103 
1104 	/*
1105 	 * Xswapd is the set of clusters we just mapped out.  Ifxp->x_xswapd
1106 	 * is the set of clusters mapped out from before.  We compute
1107 	 * the number of clusters involved in this operation in x.
1108 	 * Clusters mapped out before and involved in this operation
1109 	 * should be unmapped so original pages will be accessed by the device.
1110 	 */
1111 	cc = cp - ifrw->ifrw_addr;
1112 	x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT;
1113 	ifxp->x_xswapd &= ~xswapd;
1114 	while (i = ffs(ifxp->x_xswapd)) {
1115 		i--;
1116 		if (i >= x)
1117 			break;
1118 		ifxp->x_xswapd &= ~(1<<i);
1119 		i *= CLSIZE;
1120 		for (t = 0; t < CLSIZE; t++) {
1121 			ifrw->ifrw_mr[i] = ifxp->x_map[i];
1122 			i++;
1123 		}
1124 	}
1125 	ifxp->x_xswapd |= xswapd;
1126 	return (cc);
1127 }
1128 
1129 /*
1130  * Restart after a fatal error.
1131  * Clear device and reinitialize.
1132  */
1133 dmcrestart(unit)
1134 	int unit;
1135 {
1136 	register struct dmc_softc *sc = &dmc_softc[unit];
1137 	register struct uba_device *ui = dmcinfo[unit];
1138 	register struct dmcdevice *addr;
1139 	register struct ifxmt *ifxp;
1140 	register int i;
1141 	register struct mbuf *m;
1142 	struct dmcuba *ifu;
1143 
1144 	addr = (struct dmcdevice *)ui->ui_addr;
1145 	ifu = &sc->sc_ifuba;
1146 #ifdef DEBUG
1147 	/* dump base table */
1148 	printf("dmc%d base table:\n", unit);
1149 	for (i = 0; i < sizeof (struct dmc_base); i++)
1150 		printf("%o\n" ,dmc_base[unit].d_base[i]);
1151 #endif
1152 	/*
1153 	 * Let the DMR finish the MCLR.	 At 1 Mbit, it should do so
1154 	 * in about a max of 6.4 milliseconds with diagnostics enabled.
1155 	 */
1156 	addr->bsel1 = DMC_MCLR;
1157 	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
1158 		;
1159 	/* Did the timer expire or did the DMR finish? */
1160 	if ((addr->bsel1 & DMC_RUN) == 0) {
1161 		printf("dmc%d: M820 Test Failed\n", unit);
1162 		return;
1163 	}
1164 
1165 #ifdef notdef	/* tef sez why throw these packets away??? */
1166 	/* purge send queue */
1167 	IF_DEQUEUE(&sc->sc_if.if_snd, m);
1168 	while (m) {
1169 		m_freem(m);
1170 		IF_DEQUEUE(&sc->sc_if.if_snd, m);
1171 	}
1172 #endif
1173 	for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) {
1174 		if (ifxp->x_xtofree) {
1175 			(void) m_freem(ifxp->x_xtofree);
1176 			ifxp->x_xtofree = 0;
1177 		}
1178 	}
1179 
1180 	/* restart DMC */
1181 	dmcinit(unit);
1182 	sc->sc_flag &= ~DMC_RESTART;
1183 	sc->sc_if.if_collisions++;	/* why not? */
1184 }
1185 
1186 /*
1187  * Check to see that transmitted packets don't
1188  * lose interrupts.  The device has to be active.
1189  */
1190 dmcwatch()
1191 {
1192 	register struct uba_device *ui;
1193 	register struct dmc_softc *sc;
1194 	struct dmcdevice *addr;
1195 	register int i;
1196 
1197 	for (i = 0; i < NDMC; i++) {
1198 		sc = &dmc_softc[i];
1199 		if ((sc->sc_flag & DMC_ACTIVE) == 0)
1200 			continue;
1201 		if ((ui = dmcinfo[i]) == 0 || ui->ui_alive == 0)
1202 			continue;
1203 		if (sc->sc_oused) {
1204 			sc->sc_nticks++;
1205 			if (sc->sc_nticks > dmc_timeout) {
1206 				sc->sc_nticks = 0;
1207 				addr = (struct dmcdevice *)ui->ui_addr;
1208 				printd("dmc%d hung: bsel0=%b bsel2=%b\n", i,
1209 				    addr->bsel0 & 0xff, DMC0BITS,
1210 				    addr->bsel2 & 0xff, DMC2BITS);
1211 				dmcrestart(i);
1212 			}
1213 		}
1214 	}
1215 	timeout(dmcwatch, (caddr_t) 0, hz);
1216 }
1217 #endif
1218