xref: /csrg-svn/sys/vax/if/if_dmc.c (revision 17221)
1 /*	if_dmc.c	6.3	84/09/27	*/
2 
3 #include "dmc.h"
4 #if NDMC > 0
5 #define printd if(dmcdebug)printf
6 int dmcdebug = 0;
7 /*
8  * DMC11 device driver, internet version
9  *
10  *	Bill Nesheim	(bill@cornell.arpa or {vax135,uw-beaver,ihnp4}!bill)
11  *	Cornell University
12  *	Department of Computer Science
13  *
14  *	Based loosly on 4.2BSD release
15  *	The UNIBUS support routines were taken from Lou Salkind's DEUNA driver
16  *
17  * TO DO:
18  *	generalize unibus routines
19  *	add timeout to mark interface down when other end of link dies
20  *	figure out better way to check for completed buffers
21  *	(not critical with DMC, only 7 bufs, but may cause problems
22  *		on a DMR)
23  */
24 #include "../machine/pte.h"
25 
26 #include "param.h"
27 #include "systm.h"
28 #include "mbuf.h"
29 #include "buf.h"
30 #include "ioctl.h"		/* must precede tty.h */
31 #include "tty.h"
32 #include "protosw.h"
33 #include "socket.h"
34 #include "vmmac.h"
35 #include "errno.h"
36 
37 #include "../net/if.h"
38 #include "../net/netisr.h"
39 #include "../net/route.h"
40 #include "../netinet/in.h"
41 #include "../netinet/in_systm.h"
42 
43 #include "../vax/cpu.h"
44 #include "../vax/mtpr.h"
45 #include "if_uba.h"
46 #include "if_dmc.h"
47 #include "../vaxuba/ubareg.h"
48 #include "../vaxuba/ubavar.h"
49 
50 /*
51  * Driver information for auto-configuration stuff.
52  */
53 int	dmcprobe(), dmcattach(), dmcinit(), dmcioctl();
54 int	dmcoutput(), dmcreset();
55 struct	uba_device *dmcinfo[NDMC];
56 u_short	dmcstd[] = { 0 };
57 struct	uba_driver dmcdriver =
58 	{ dmcprobe, 0, dmcattach, 0, dmcstd, "dmc", dmcinfo };
59 
60 /* as long as we use clists for command queues, we only have 28 bytes to use! */
61 /* DMC-11 only has 7 buffers; DMR-11 has 64 */
62 #define NRCV 7
63 #define NXMT (NRCV - 2)	/* avoid running out of buffers on recv end */
64 #define NTOT (NRCV + NXMT)
65 
66 /* error reporting intervals */
67 #define DMC_RPNBFS	50
68 #define DMC_RPDSC	1
69 #define DMC_RPTMO	20
70 #define DMC_RPDCK	5
71 
72 struct  dmc_command {
73 	char	qp_cmd;		/* command */
74 	short	qp_ubaddr;	/* buffer address */
75 	short	qp_cc;		/* character count || XMEM */
76 	struct	dmc_command *qp_next;	/* next command on queue */
77 };
78 
79 /*
80  * The dmcuba structures generalize the ifuba structure
81  * to an arbitrary number of recieve and transmit buffers.
82  */
83 struct	ifxmt {
84 	struct	ifrw x_ifrw;		/* mapping imfo */
85 	struct	pte x_map[IF_MAXNUBAMR];	/* output base pages */
86 	short 	x_xswapd;		/* mask of clusters swapped */
87 	struct	mbuf *x_xtofree;	/* pages being dma'd out */
88 };
89 struct	dmcuba {
90 	short	ifu_uban;		/* uba number */
91 	short	ifu_hlen;		/* local net header length */
92 	struct	uba_regs *ifu_uba;	/* uba regs, in vm */
93 	struct	ifrw ifu_r[NRCV];	/* receive information */
94 	struct	ifxmt ifu_w[NXMT];	/* transmit information */
95 				/* these should only be pointers */
96 	short	ifu_flags;		/* used during uballoc's */
97 };
98 
99 struct dmcbufs {
100 	int	ubinfo;		/* from uballoc */
101 	short	cc;		/* buffer size */
102 	short	flags;		/* access control */
103 };
104 #define	DBUF_OURS	0	/* buffer is available */
105 #define	DBUF_DMCS	1	/* buffer claimed by somebody */
106 #define	DBUF_XMIT	4	/* transmit buffer */
107 #define	DBUF_RCV	8	/* recieve buffer */
108 
109 struct mbuf *dmc_get();
110 
111 /*
112  * DMC software status per interface.
113  *
114  * Each interface is referenced by a network interface structure,
115  * sc_if, which the routing code uses to locate the interface.
116  * This structure contains the output queue for the interface, its address, ...
117  * We also have, for each interface, a  set of 7 UBA interface structures
118  * for each, which
119  * contain information about the UNIBUS resources held by the interface:
120  * map registers, buffered data paths, etc.  Information is cached in this
121  * structure for use by the if_uba.c routines in running the interface
122  * efficiently.
123  */
124 struct dmc_softc {
125 	short	sc_oused;		/* output buffers currently in use */
126 	short	sc_iused;		/* input buffers given to DMC */
127 	short	sc_flag;		/* flags */
128 	struct	ifnet sc_if;		/* network-visible interface */
129 	struct	dmcbufs sc_rbufs[NRCV];	/* recieve buffer info */
130 	struct	dmcbufs sc_xbufs[NXMT];	/* transmit buffer info */
131 	struct	dmcuba sc_ifuba;	/* UNIBUS resources */
132 	int	sc_ubinfo;		/* UBA mapping info for base table */
133 	int	sc_errors[4];		/* non-fatal error counters */
134 #define sc_datck sc_errors[0]
135 #define sc_timeo sc_errors[1]
136 #define sc_nobuf sc_errors[2]
137 #define sc_disc  sc_errors[3]
138 	/* command queue stuff */
139 	struct	dmc_command sc_cmdbuf[NTOT+3];
140 	struct	dmc_command *sc_qhead;	/* head of command queue */
141 	struct	dmc_command *sc_qtail;	/* tail of command queue */
142 	struct	dmc_command *sc_qactive;	/* command in progress */
143 	struct	dmc_command *sc_qfreeh;	/* head of list of free cmd buffers */
144 	struct	dmc_command *sc_qfreet;	/* tail of list of free cmd buffers */
145 	/* end command queue stuff */
146 } dmc_softc[NDMC];
147 
148 /* flags */
149 #define	DMC_ALLOC	01	/* unibus resources allocated */
150 #define	DMC_BMAPPED	02	/* base table mapped */
151 
152 struct	dmc_base {
153 	short	d_base[128];	/* DMC base table */
154 } dmc_base[NDMC];
155 
156 /* queue manipulation macros */
157 #define	QUEUE_AT_HEAD(qp, head, tail) \
158 	(qp)->qp_next = (head); \
159 	(head) = (qp); \
160 	if ((tail) == (struct dmc_command *) 0) \
161 		(tail) = (head)
162 
163 #define QUEUE_AT_TAIL(qp, head, tail) \
164 	if ((tail)) \
165 		(tail)->qp_next = (qp); \
166 	else \
167 		(head) = (qp); \
168 	(qp)->qp_next = (struct dmc_command *) 0; \
169 	(tail) = (qp)
170 
171 #define DEQUEUE(head, tail) \
172 	(head) = (head)->qp_next;\
173 	if ((head) == (struct dmc_command *) 0)\
174 		(tail) = (head)
175 
176 dmcprobe(reg)
177 	caddr_t reg;
178 {
179 	register int br, cvec;
180 	register struct dmcdevice *addr = (struct dmcdevice *)reg;
181 	register int i;
182 
183 #ifdef lint
184 	br = 0; cvec = br; br = cvec;
185 	dmcrint(0); dmcxint(0);
186 #endif
187 	addr->bsel1 = DMC_MCLR;
188 	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
189 		;
190 	if ((addr->bsel1 & DMC_RUN) == 0)
191 		return (0);
192 	/* MCLR is self clearing */
193 	addr->bsel0 = DMC_RQI|DMC_IEI;
194 	DELAY(100000);
195 	addr->bsel1 = DMC_MCLR;
196 	for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
197 		;
198 	return (1);
199 }
200 
201 /*
202  * Interface exists: make available by filling in network interface
203  * record.  System will initialize the interface when it is ready
204  * to accept packets.
205  */
206 dmcattach(ui)
207 	register struct uba_device *ui;
208 {
209 	register struct dmc_softc *sc = &dmc_softc[ui->ui_unit];
210 	register struct dmc_command *qp;
211 
212 	sc->sc_if.if_unit = ui->ui_unit;
213 	sc->sc_if.if_name = "dmc";
214 	sc->sc_if.if_mtu = DMCMTU;
215 	sc->sc_if.if_init = dmcinit;
216 	sc->sc_if.if_output = dmcoutput;
217 	sc->sc_if.if_ioctl = dmcioctl;
218 	sc->sc_if.if_reset = dmcreset;
219 	sc->sc_if.if_flags = IFF_POINTOPOINT;
220 	sc->sc_ifuba.ifu_flags = UBA_CANTWAIT;
221 
222 	/* set up command queues */
223 	sc->sc_qfreeh = sc->sc_qfreet =
224 		sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
225 		(struct dmc_command *) 0;
226 	/* set up free command buffer list */
227 	for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NTOT+2]; qp++ ) {
228 		QUEUE_AT_HEAD( qp, sc->sc_qfreeh, sc->sc_qfreet);
229 	}
230 	if_attach(&sc->sc_if);
231 }
232 
233 /*
234  * Reset of interface after UNIBUS reset.
235  * If interface is on specified UBA, reset it's state.
236  */
237 dmcreset(unit, uban)
238 	int unit, uban;
239 {
240 	register struct uba_device *ui;
241 	register struct dmc_softc *sc = &dmc_softc[unit];
242 
243 	if (unit >= NDMC || (ui = dmcinfo[unit]) == 0 || ui->ui_alive == 0 ||
244 	    ui->ui_ubanum != uban)
245 		return;
246 	printf(" dmc%d", unit);
247 	sc->sc_flag = 0;	/* previous unibus resources no longer valid */
248 	dmcinit(unit);
249 }
250 
251 /*
252  * Initialization of interface; reinitialize UNIBUS usage.
253  */
254 dmcinit(unit)
255 	int unit;
256 {
257 	register struct dmc_softc *sc = &dmc_softc[unit];
258 	register struct uba_device *ui = dmcinfo[unit];
259 	register struct dmcdevice *addr;
260 	register struct ifnet *ifp = &sc->sc_if;
261 	register struct ifrw *ifrw;
262 	register struct ifxmt *ifxp;
263 	register struct dmcbufs *rp;
264 	int base;
265 	struct sockaddr_in *sin;
266 
267 	printd("dmcinit\n");
268 	addr = (struct dmcdevice *)ui->ui_addr;
269 
270 	sin = (struct sockaddr_in *) &ifp->if_addr;
271 	if (sin->sin_addr.s_addr == 0)	/* if address still unknown */
272 		return;
273 	sin = (struct sockaddr_in *) &ifp->if_dstaddr;
274 	if (sin->sin_addr.s_addr == 0)	/* if address still unknown */
275 		return;
276 
277 	if ((addr->bsel1&DMC_RUN) == 0) {
278 		printf("dmcinit: DMC not running\n");
279 		ifp->if_flags &= ~(IFF_RUNNING|IFF_UP);
280 		return;
281 	}
282 	/* map base table */
283 	if ((sc->sc_flag&DMC_BMAPPED) == 0) {
284 		sc->sc_ubinfo = uballoc(ui->ui_ubanum,
285 			(caddr_t)&dmc_base[unit], sizeof (struct dmc_base), 0);
286 		sc->sc_flag |= DMC_BMAPPED;
287 	}
288 	/* initialize UNIBUS resources */
289 	sc->sc_iused = sc->sc_oused = 0;
290 	if ((sc->sc_flag&DMC_ALLOC) == 0) {
291 		if (dmc_ubainit(&sc->sc_ifuba, ui->ui_ubanum, 0,
292 				(int)btoc(DMCMTU)) == 0) {
293 			printf("dmc%d: can't initialize\n", unit);
294 			ifp->if_flags &= ~IFF_UP;
295 			return;
296 		}
297 		sc->sc_flag |= DMC_ALLOC;
298 	}
299 
300 	/* initialize buffer pool */
301 	/* recieves */
302 	ifrw = &sc->sc_ifuba.ifu_r[0];
303 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
304 		rp->ubinfo = ifrw->ifrw_info & 0x3ffff;
305 		rp->cc = DMCMTU;
306 		rp->flags = DBUF_OURS|DBUF_RCV;
307 		printd("rcv: 0x%x\n",rp->ubinfo);
308 		ifrw++;
309 	}
310 	/* transmits */
311 	ifxp = &sc->sc_ifuba.ifu_w[0];
312 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
313 		rp->ubinfo = ifxp->x_ifrw.ifrw_info & 0x3ffff;
314 		rp->cc = 0;
315 		rp->flags = DBUF_OURS|DBUF_XMIT;
316 		printd("xmit: 0x%x\n",rp->ubinfo);
317 		ifxp++;
318 	}
319 	/* base in */
320 	base = sc->sc_ubinfo & 0x3ffff;
321 	printd("  base 0x%x\n", base);
322 	dmcload(sc, DMC_BASEI, base, (base>>2)&DMC_XMEM);
323 	/* specify half duplex operation, flags tell if primary */
324 	/* or secondary station */
325 	if (ui->ui_flags == 0)
326 		       /* use DDMCP mode in full duplex */
327 			dmcload(sc, DMC_CNTLI, 0, 0);
328 	else if (ui->ui_flags == 1)
329 		       /* use MAINTENENCE mode */
330 		       dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
331 	else if (ui->ui_flags == 2)
332 		/* use DDCMP half duplex as primary station */
333 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
334 	else if (ui->ui_flags == 3)
335 		/* use DDCMP half duplex as secondary station */
336 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
337 
338 	/* queue first NRCV buffers for DMC to fill */
339 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
340 		rp->flags |= DBUF_DMCS;
341 		dmcload(sc, DMC_READ, rp->ubinfo,
342 			(((rp->ubinfo>>2)&DMC_XMEM)|rp->cc));
343 		sc->sc_iused++;
344 	}
345 
346 	/* enable output interrupts */
347 	while ((addr->bsel2&DMC_IEO) == 0)
348 		addr->bsel2 |= DMC_IEO;
349 	ifp->if_flags |= IFF_UP|IFF_RUNNING;
350 }
351 
352 /*
353  * Start output on interface.  Get another datagram
354  * to send from the interface queue and map it to
355  * the interface before starting output.
356  *
357  * Must be called at spl 5
358  */
359 dmcstart(dev)
360 	dev_t dev;
361 {
362 	int unit = minor(dev);
363 	register struct dmc_softc *sc = &dmc_softc[unit];
364 	struct mbuf *m;
365 	register struct dmcbufs *rp;
366 	register int n;
367 
368 	if ((sc->sc_flag & DMC_ALLOC) == 0) {
369 		printf("dmcstart: no unibus resources!!\n");
370 		return;
371 	}
372 	/*
373 	 * Dequeue up to NXMT requests and map them to the UNIBUS.
374 	 * If no more requests, or no dmc buffers available, just return.
375 	 */
376 	n = 0;
377 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
378 		/* find an available buffer */
379 		if ((rp->flags&DBUF_DMCS) == 0){
380 			IF_DEQUEUE(&sc->sc_if.if_snd, m);
381 			if (m == 0)
382 				return;
383 			if ((rp->flags&DBUF_XMIT) == 0)
384 				printf("dmcstart: not xmit buf\n");
385 			/* mark it dmcs */
386 			rp->flags |= (DBUF_DMCS);
387 			/*
388 			 * Have request mapped to UNIBUS for transmission
389 			 * and start the output.
390 			 */
391 			rp->cc = (dmcput(&sc->sc_ifuba, n, m))&DMC_CCOUNT;
392 			sc->sc_oused++;
393 			dmcload(sc, DMC_WRITE, rp->ubinfo,
394 				rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
395 		}
396 		n++;
397 	}
398 }
399 
400 /*
401  * Utility routine to load the DMC device registers.
402  */
403 dmcload(sc, type, w0, w1)
404 	register struct dmc_softc *sc;
405 	int type, w0, w1;
406 {
407 	register struct dmcdevice *addr;
408 	register int unit, sps;
409 	register struct dmc_command *qp;
410 
411 	unit = (sc - dmc_softc)/ sizeof (struct dmc_softc);
412 	addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr;
413 	sps = spl5();
414 
415 	/* grab a command buffer from the free list */
416 	if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
417 		panic("dmc command queue overflow");
418 	DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
419 
420 	/* fill in requested info */
421 	qp->qp_cmd = (type | DMC_RQI);
422 	qp->qp_ubaddr = w0;
423 	qp->qp_cc = w1;
424 
425 	if (sc->sc_qactive) {	/* command in progress */
426 		if (type == DMC_READ) {
427 			QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
428 		} else {
429 			QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
430 		}
431 	} else {	/* command port free */
432 		sc->sc_qactive = qp;
433 		addr->bsel0 = qp->qp_cmd;
434 		dmcrint(unit);
435 	}
436 	splx(sps);
437 }
438 
439 /*
440  * DMC interface receiver interrupt.
441  * Ready to accept another command,
442  * pull one off the command queue.
443  */
444 dmcrint(unit)
445 	int unit;
446 {
447 	register struct dmc_softc *sc;
448 	register struct dmcdevice *addr;
449 	register struct dmc_command *qp;
450 	register int n;
451 
452 	addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr;
453 	sc = &dmc_softc[unit];
454 	if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
455 		printf("dmcrint: no command\n");
456 		return;
457 	}
458 	while (addr->bsel0&DMC_RDYI) {
459 		addr->sel4 = qp->qp_ubaddr;
460 		addr->sel6 = qp->qp_cc;
461 		addr->bsel0 &= ~(DMC_IEI|DMC_RQI);
462 		printd("load done, cmd 0x%x, ubaddr 0x%x, cc 0x%x\n",
463 			qp->qp_cmd, qp->qp_ubaddr, qp->qp_cc);
464 		/* free command buffer */
465 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
466 		while (addr->bsel0 & DMC_RDYI) {
467 			/*
468 			 * Can't check for RDYO here 'cause
469 			 * this routine isn't reentrant!
470 			 */
471 			DELAY(5);
472 		}
473 		/* move on to next command */
474 		if ((sc->sc_qactive = sc->sc_qhead)==(struct dmc_command *) 0)
475 			/* all done */
476 			break;
477 		/* more commands to do, start the next one */
478 		qp = sc->sc_qactive;
479 		DEQUEUE(sc->sc_qhead, sc->sc_qtail);
480 		addr->bsel0 = qp->qp_cmd;
481 		n = RDYSCAN;
482 		while (n-- && (addr->bsel0&DMC_RDYI) == 0)
483 			DELAY(5);
484 	}
485 	if (sc->sc_qactive) {
486 		addr->bsel0 |= DMC_IEI|DMC_RQI;
487 		/* VMS does it twice !*$%@# */
488 		addr->bsel0 |= DMC_IEI|DMC_RQI;
489 	}
490 }
491 
492 /*
493  * DMC interface transmitter interrupt.
494  * A transfer may have completed, check for errors.
495  * If it was a read, notify appropriate protocol.
496  * If it was a write, pull the next one off the queue.
497  */
498 dmcxint(unit)
499 	int unit;
500 {
501 	register struct dmc_softc *sc;
502 	register struct ifnet *ifp;
503 	struct uba_device *ui = dmcinfo[unit];
504 	struct dmcdevice *addr;
505 	struct mbuf *m;
506 	register struct ifqueue *inq;
507 	int arg, pkaddr, cmd, len;
508 	register struct ifrw *ifrw;
509 	register struct dmcbufs *rp;
510 
511 	addr = (struct dmcdevice *)ui->ui_addr;
512 	sc = &dmc_softc[unit];
513 	ifp = &sc->sc_if;
514 
515 	cmd = addr->bsel2 & 0xff;
516 	arg = addr->sel6 & 0xffff;
517 	if ((cmd&DMC_RDYO) == 0)  {
518 		printf("dmc%d: bogus xmit intr\n", unit);
519 		return;
520 	}
521 	/* reconstruct UNIBUS address of buffer returned to us */
522 	pkaddr = ((arg&DMC_XMEM)<<2)|(addr->sel4 & 0xffff);
523 	/* release port */
524 	addr->bsel2 &= ~DMC_RDYO;
525 	switch (cmd & 07) {
526 
527 	case DMC_OUR:
528 		/*
529 		 * A read has completed.
530 		 * Pass packet to type specific
531 		 * higher-level input routine.
532 		 */
533 		ifp->if_ipackets++;
534 		len = arg & DMC_CCOUNT;
535 		/* find location in dmcuba struct */
536 		ifrw = &sc->sc_ifuba.ifu_r[0];
537 		rp = &sc->sc_rbufs[0];
538 		for (; rp < &sc->sc_rbufs[NRCV]; rp++) {
539 			if (rp->ubinfo == pkaddr)
540 				goto foundrcv;
541 			ifrw++;
542 		}
543 		printf("bad rcv pkt addr 0x%x len 0x%x\n", pkaddr, len);
544 		goto setup;
545 
546 	foundrcv:
547 		if ((rp->flags&DBUF_DMCS) == 0) {
548 			printf("dmcxint: done unalloc rbuf\n");
549 		}
550 		switch (ifp->if_addr.sa_family) {
551 #ifdef INET
552 		case AF_INET:
553 			schednetisr(NETISR_IP);
554 			inq = &ipintrq;
555 			break;
556 #endif
557 
558 		default:
559 			printf("dmc%d: unknown address type %d\n", unit,
560 			    ifp->if_addr.sa_family);
561 			goto setup;
562 		}
563 
564 		m = dmc_get(&sc->sc_ifuba, ifrw, len, 0);
565 		if (m == (struct mbuf *)0)
566 			goto setup;
567 		if (IF_QFULL(inq)) {
568 			IF_DROP(inq);
569 			m_freem(m);
570 		} else
571 			IF_ENQUEUE(inq, m);
572 setup:
573 		arg = ifrw->ifrw_info & 0x3ffff;
574 		dmcload(sc, DMC_READ, arg, ((arg >> 2) & DMC_XMEM) | DMCMTU);
575 		break;
576 
577 	case DMC_OUX:
578 		/*
579 		 * A write has completed, start another
580 		 * transfer if there is more data to send.
581 		 */
582 		ifp->if_opackets++;
583 		printd("OUX pkaddr 0x%x\n",pkaddr);
584 		/* find associated dmcbuf structure */
585 		rp = &sc->sc_xbufs[0];
586 		for (; rp < &sc->sc_xbufs[NXMT]; rp++) {
587 			if (rp->ubinfo == pkaddr)
588 				goto found;
589 		}
590 		printf("dmc%d: bad packet address 0x%x\n",
591 			unit, pkaddr);
592 		break;
593 	found:
594 		if ((rp->flags&DBUF_DMCS) == 0)
595 			printf("dmc returned unallocated packet 0x%x\n",
596 				pkaddr);
597 		/* mark buffer free */
598 		rp->flags &= ~(DBUF_DMCS);
599 		sc->sc_oused--;
600 		dmcstart(unit);
601 		break;
602 
603 	case DMC_CNTLO:
604 		arg &= DMC_CNTMASK;
605 		if (arg&DMC_FATAL) {
606 			register int i;
607 
608 			printf("dmc%d: fatal error, flags=%b\n",
609 			    unit, arg, CNTLO_BITS);
610 			ifp->if_flags &= ~(IFF_RUNNING|IFF_UP);
611 			/* master clear device */
612 			addr->bsel1 = DMC_MCLR;
613 			for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--)
614 				;
615 			dmcinit(unit);
616 			ifp->if_ierrors++;
617 			break;
618 		} else {
619                         /* ACCUMULATE STATISTICS */
620 			switch(arg) {
621 			case DMC_NOBUFS:
622                         	ifp->if_ierrors++;
623 				if((sc->sc_nobuf++ % DMC_RPNBFS) != 0)
624 					break;
625 				goto report;
626 			case DMC_DISCONN:
627 				if((sc->sc_disc++ % DMC_RPDSC) != 0)
628 					break;
629 				goto report;
630 			case DMC_TIMEOUT:
631 				if((sc->sc_timeo++ % DMC_RPTMO) != 0)
632 					break;
633 				goto report;
634 			case DMC_DATACK:
635                         	ifp->if_oerrors++;
636 				if((sc->sc_datck++ % DMC_RPDCK) != 0)
637 					break;
638 				goto report;
639 			default:
640 				goto report;
641 			}
642 			break;
643 		report:
644                         printf("dmc%d: soft error, flags=%b\n",
645                             unit, arg, CNTLO_BITS);
646 		}
647 		break;
648 
649 	default:
650 		printf("dmc%d: bad control %o\n", unit, cmd);
651 	}
652 	return;
653 }
654 
655 /*
656  * DMC output routine.
657  * Just send the data, header was supplied by
658  * upper level protocol routines.
659  */
660 dmcoutput(ifp, m, dst)
661 	register struct ifnet *ifp;
662 	register struct mbuf *m;
663 	struct sockaddr *dst;
664 {
665 	int s;
666 
667 	if (dst->sa_family != ifp->if_addr.sa_family) {
668 		printf("dmc%d: af%d not supported\n", ifp->if_unit,
669 		    dst->sa_family);
670 		m_freem(m);
671 		return (EAFNOSUPPORT);
672 	}
673 	s = spl5();
674 	if (IF_QFULL(&ifp->if_snd)) {
675 		IF_DROP(&ifp->if_snd);
676 		m_freem(m);
677 		splx(s);
678 		return (ENOBUFS);
679 	}
680 	IF_ENQUEUE(&ifp->if_snd, m);
681 	dmcstart(ifp->if_unit);
682 	splx(s);
683 	return (0);
684 }
685 
686 /*
687  * Process an ioctl request.
688  */
689 dmcioctl(ifp, cmd, data)
690 	register struct ifnet *ifp;
691 	int cmd;
692 	caddr_t data;
693 {
694 	struct ifreq *ifr = (struct ifreq *)data;
695 	struct sockaddr_in *sin;
696 	int s = splimp(), error = 0;
697 
698 	switch (cmd) {
699 
700 	case SIOCSIFADDR:
701 		if (ifp->if_flags & IFF_RUNNING)
702 			if_rtinit(ifp, -1);     /* delete previous route */
703 		sin = (struct sockaddr_in *)&ifr->ifr_addr;
704 		ifp->if_addr = *(struct sockaddr *)sin;
705 		ifp->if_net = in_netof(sin->sin_addr);
706 		ifp->if_flags |= IFF_UP;
707 		/* set up routing table entry */
708 		if ((ifp->if_flags & IFF_ROUTE) == 0) {
709 			rtinit(&ifp->if_dstaddr, &ifp->if_addr, RTF_HOST|RTF_UP);
710 			ifp->if_flags |= IFF_ROUTE;
711 		}
712 		break;
713 
714 	case SIOCSIFDSTADDR:
715 		ifp->if_dstaddr = ifr->ifr_dstaddr;
716 		break;
717 
718 	default:
719 		error = EINVAL;
720 	}
721 	if ((ifp->if_flags & IFF_RUNNING) == 0)
722 		dmcinit(ifp->if_unit);
723 	splx(s);
724 	return (error);
725 }
726 
727 /*
728  * Routines supporting UNIBUS network interfaces.
729  */
730 
731 /*
732  * Init UNIBUS for interface on uban whose headers of size hlen are to
733  * end on a page boundary.  We allocate a UNIBUS map register for the page
734  * with the header, and nmr more UNIBUS map registers for i/o on the adapter,
735  * doing this for each receive and transmit buffer.  We also
736  * allocate page frames in the mbuffer pool for these pages.
737  */
738 dmc_ubainit(ifu, uban, hlen, nmr)
739 	register struct dmcuba *ifu;
740 	int uban, hlen, nmr;
741 {
742 	register caddr_t cp, dp;
743 	register struct ifrw *ifrw;
744 	register struct ifxmt *ifxp;
745 	int i, ncl;
746 
747 	ncl = clrnd(nmr + CLSIZE) / CLSIZE;
748 	if (ifu->ifu_r[0].ifrw_addr) {
749 		/*
750 		 * If the first read buffer has a non-zero
751 		 * address, it means we have already allocated core
752 		 */
753 		cp = ifu->ifu_r[0].ifrw_addr - (CLBYTES - hlen);
754 	} else {
755 		cp = m_clalloc(NTOT * ncl, MPG_SPACE);
756 		if (cp == 0)
757 			return (0);
758 		ifu->ifu_hlen = hlen;
759 		ifu->ifu_uban = uban;
760 		ifu->ifu_uba = uba_hd[uban].uh_uba;
761 		dp = cp + CLBYTES - hlen;
762 		for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) {
763 			ifrw->ifrw_addr = dp;
764 			dp += ncl * CLBYTES;
765 		}
766 		for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) {
767 			ifxp->x_ifrw.ifrw_addr = dp;
768 			dp += ncl * CLBYTES;
769 		}
770 	}
771 	/* allocate for receive ring */
772 	for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) {
773 		if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) {
774 			struct ifrw *rw;
775 
776 			for (rw = ifu->ifu_r; rw < ifrw; rw++)
777 				ubarelse(ifu->ifu_uban, &rw->ifrw_info);
778 			goto bad;
779 		}
780 	}
781 	/* and now transmit ring */
782 	for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) {
783 		ifrw = &ifxp->x_ifrw;
784 		if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) {
785 			struct ifxmt *xp;
786 
787 			for (xp = ifu->ifu_w; xp < ifxp; xp++)
788 				ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info);
789 			for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++)
790 				ubarelse(ifu->ifu_uban, &ifrw->ifrw_info);
791 			goto bad;
792 		}
793 		for (i = 0; i < nmr; i++)
794 			ifxp->x_map[i] = ifrw->ifrw_mr[i];
795 		ifxp->x_xswapd = 0;
796 	}
797 	return (1);
798 bad:
799 	m_pgfree(cp, NTOT * ncl);
800 	ifu->ifu_r[0].ifrw_addr = 0;
801 	return (0);
802 }
803 
804 /*
805  * Setup either a ifrw structure by allocating UNIBUS map registers,
806  * possibly a buffered data path, and initializing the fields of
807  * the ifrw structure to minimize run-time overhead.
808  */
809 static
810 dmc_ubaalloc(ifu, ifrw, nmr)
811 	struct dmcuba *ifu;
812 	register struct ifrw *ifrw;
813 	int nmr;
814 {
815 	register int info;
816 
817 	info =
818 	    uballoc(ifu->ifu_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->ifu_hlen,
819 		ifu->ifu_flags);
820 	if (info == 0)
821 		return (0);
822 	ifrw->ifrw_info = info;
823 	ifrw->ifrw_bdp = UBAI_BDP(info);
824 	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);
825 	ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1];
826 	return (1);
827 }
828 
829 /*
830  * Pull read data off a interface.
831  * Len is length of data, with local net header stripped.
832  * Off is non-zero if a trailer protocol was used, and
833  * gives the offset of the trailer information.
834  * We copy the trailer information and then all the normal
835  * data into mbufs.  When full cluster sized units are present
836  * on the interface on cluster boundaries we can get them more
837  * easily by remapping, and take advantage of this here.
838  */
839 struct mbuf *
840 dmc_get(ifu, ifrw, totlen, off0)
841 	register struct dmcuba *ifu;
842 	register struct ifrw *ifrw;
843 	int totlen, off0;
844 {
845 	struct mbuf *top, **mp, *m;
846 	int off = off0, len;
847 	register caddr_t cp = ifrw->ifrw_addr + ifu->ifu_hlen;
848 
849 	top = 0;
850 	mp = &top;
851 	while (totlen > 0) {
852 		MGET(m, M_DONTWAIT, MT_DATA);
853 		if (m == 0)
854 			goto bad;
855 		if (off) {
856 			len = totlen - off;
857 			cp = ifrw->ifrw_addr + ifu->ifu_hlen + off;
858 		} else
859 			len = totlen;
860 		if (len >= CLBYTES) {
861 			struct mbuf *p;
862 			struct pte *cpte, *ppte;
863 			int x, *ip, i;
864 
865 			MCLGET(p, 1);
866 			if (p == 0)
867 				goto nopage;
868 			len = m->m_len = CLBYTES;
869 			m->m_off = (int)p - (int)m;
870 			if (!claligned(cp))
871 				goto copy;
872 
873 			/*
874 			 * Switch pages mapped to UNIBUS with new page p,
875 			 * as quick form of copy.  Remap UNIBUS and invalidate.
876 			 */
877 			cpte = &Mbmap[mtocl(cp)*CLSIZE];
878 			ppte = &Mbmap[mtocl(p)*CLSIZE];
879 			x = btop(cp - ifrw->ifrw_addr);
880 			ip = (int *)&ifrw->ifrw_mr[x];
881 			for (i = 0; i < CLSIZE; i++) {
882 				struct pte t;
883 				t = *ppte; *ppte++ = *cpte; *cpte = t;
884 				*ip++ =
885 				    cpte++->pg_pfnum|ifrw->ifrw_proto;
886 				mtpr(TBIS, cp);
887 				cp += NBPG;
888 				mtpr(TBIS, (caddr_t)p);
889 				p += NBPG / sizeof (*p);
890 			}
891 			goto nocopy;
892 		}
893 nopage:
894 		m->m_len = MIN(MLEN, len);
895 		m->m_off = MMINOFF;
896 copy:
897 		bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len);
898 		cp += m->m_len;
899 nocopy:
900 		*mp = m;
901 		mp = &m->m_next;
902 		if (off) {
903 			/* sort of an ALGOL-W style for statement... */
904 			off += m->m_len;
905 			if (off == totlen) {
906 				cp = ifrw->ifrw_addr + ifu->ifu_hlen;
907 				off = 0;
908 				totlen = off0;
909 			}
910 		} else
911 			totlen -= m->m_len;
912 	}
913 	return (top);
914 bad:
915 	m_freem(top);
916 	return (0);
917 }
918 
919 /*
920  * Map a chain of mbufs onto a network interface
921  * in preparation for an i/o operation.
922  * The argument chain of mbufs includes the local network
923  * header which is copied to be in the mapped, aligned
924  * i/o space.
925  */
926 dmcput(ifu, n, m)
927 	struct dmcuba *ifu;
928 	int n;
929 	register struct mbuf *m;
930 {
931 	register struct mbuf *mp;
932 	register caddr_t cp;
933 	register struct ifxmt *ifxp;
934 	register struct ifrw *ifrw;
935 	register int i;
936 	int xswapd = 0;
937 	int x, cc, t;
938 	caddr_t dp;
939 
940 	ifxp = &ifu->ifu_w[n];
941 	ifrw = &ifxp->x_ifrw;
942 	cp = ifrw->ifrw_addr;
943 	while (m) {
944 		dp = mtod(m, char *);
945 		if (claligned(cp) && claligned(dp) && m->m_len == CLBYTES) {
946 			struct pte *pte; int *ip;
947 			pte = &Mbmap[mtocl(dp)*CLSIZE];
948 			x = btop(cp - ifrw->ifrw_addr);
949 			ip = (int *)&ifrw->ifrw_mr[x];
950 			for (i = 0; i < CLSIZE; i++)
951 				*ip++ = ifrw->ifrw_proto | pte++->pg_pfnum;
952 			xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT));
953 			mp = m->m_next;
954 			m->m_next = ifxp->x_xtofree;
955 			ifxp->x_xtofree = m;
956 			cp += m->m_len;
957 		} else {
958 			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
959 			cp += m->m_len;
960 			MFREE(m, mp);
961 		}
962 		m = mp;
963 	}
964 
965 	/*
966 	 * Xswapd is the set of clusters we just mapped out.  Ifxp->x_xswapd
967 	 * is the set of clusters mapped out from before.  We compute
968 	 * the number of clusters involved in this operation in x.
969 	 * Clusters mapped out before and involved in this operation
970 	 * should be unmapped so original pages will be accessed by the device.
971 	 */
972 	cc = cp - ifrw->ifrw_addr;
973 	x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT;
974 	ifxp->x_xswapd &= ~xswapd;
975 	while (i = ffs(ifxp->x_xswapd)) {
976 		i--;
977 		if (i >= x)
978 			break;
979 		ifxp->x_xswapd &= ~(1<<i);
980 		i *= CLSIZE;
981 		for (t = 0; t < CLSIZE; t++) {
982 			ifrw->ifrw_mr[i] = ifxp->x_map[i];
983 			i++;
984 		}
985 	}
986 	ifxp->x_xswapd |= xswapd;
987 	return (cc);
988 }
989 #endif
990