xref: /netbsd-src/sys/dev/qbus/if_dmc.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: if_dmc.c,v 1.21 2012/10/27 17:18:37 chs Exp $	*/
2 /*
3  * Copyright (c) 1982, 1986 Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)if_dmc.c	7.10 (Berkeley) 12/16/90
31  */
32 
33 /*
34  * DMC11 device driver, internet version
35  *
36  *	Bill Nesheim
37  *	Cornell University
38  *
39  *	Lou Salkind
40  *	New York University
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: if_dmc.c,v 1.21 2012/10/27 17:18:37 chs Exp $");
45 
46 #undef DMCDEBUG	/* for base table dump on fatal error */
47 
48 #include "opt_inet.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/ioctl.h>
54 #include <sys/socket.h>
55 #include <sys/syslog.h>
56 #include <sys/device.h>
57 
58 #include <net/if.h>
59 #include <net/netisr.h>
60 
61 #ifdef	INET
62 #include <netinet/in.h>
63 #include <netinet/in_var.h>
64 #endif
65 
66 #include <sys/bus.h>
67 
68 #include <dev/qbus/ubareg.h>
69 #include <dev/qbus/ubavar.h>
70 #include <dev/qbus/if_uba.h>
71 
72 #include <dev/qbus/if_dmcreg.h>
73 
74 
75 /*
76  * output timeout value, sec.; should depend on line speed.
77  */
78 static int dmc_timeout = 20;
79 
80 #define NRCV 7
81 #define NXMT 3
82 #define NCMDS	(NRCV+NXMT+4)	/* size of command queue */
83 
84 #define DMC_WBYTE(csr, val) \
85 	bus_space_write_1(sc->sc_iot, sc->sc_ioh, csr, val)
86 #define DMC_WWORD(csr, val) \
87 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
88 #define DMC_RBYTE(csr) \
89 	bus_space_read_1(sc->sc_iot, sc->sc_ioh, csr)
90 #define DMC_RWORD(csr) \
91 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
92 
93 
94 #ifdef DMCDEBUG
95 #define printd if(dmcdebug)printf
96 int dmcdebug = 0;
97 #endif
98 
99 /* error reporting intervals */
100 #define DMC_RPNBFS	50
101 #define DMC_RPDSC	1
102 #define DMC_RPTMO	10
103 #define DMC_RPDCK	10
104 
105 struct  dmc_command {
106 	char	qp_cmd;		/* command */
107 	short	qp_ubaddr;	/* buffer address */
108 	short	qp_cc;		/* character count || XMEM */
109 	struct	dmc_command *qp_next;	/* next command on queue */
110 };
111 
112 struct dmcbufs {
113 	int	ubinfo;		/* from uballoc */
114 	short	cc;		/* buffer size */
115 	short	flags;		/* access control */
116 };
117 #define	DBUF_OURS	0	/* buffer is available */
118 #define	DBUF_DMCS	1	/* buffer claimed by somebody */
119 #define	DBUF_XMIT	4	/* transmit buffer */
120 #define	DBUF_RCV	8	/* receive buffer */
121 
122 
123 /*
124  * DMC software status per interface.
125  *
126  * Each interface is referenced by a network interface structure,
127  * sc_if, which the routing code uses to locate the interface.
128  * This structure contains the output queue for the interface, its address, ...
129  * We also have, for each interface, a  set of 7 UBA interface structures
130  * for each, which
131  * contain information about the UNIBUS resources held by the interface:
132  * map registers, buffered data paths, etc.  Information is cached in this
133  * structure for use by the if_uba.c routines in running the interface
134  * efficiently.
135  */
136 struct dmc_softc {
137 	device_t sc_dev;		/* Configuration common part */
138 	struct	ifnet sc_if;		/* network-visible interface */
139 	short	sc_oused;		/* output buffers currently in use */
140 	short	sc_iused;		/* input buffers given to DMC */
141 	short	sc_flag;		/* flags */
142 	struct	ubinfo sc_ui;		/* UBA mapping info for base table */
143 	int	sc_errors[4];		/* non-fatal error counters */
144 	bus_space_tag_t sc_iot;
145 	bus_addr_t sc_ioh;
146 	bus_dma_tag_t sc_dmat;
147 	struct	evcnt sc_rintrcnt;	/* Interrupt counting */
148 	struct	evcnt sc_tintrcnt;	/* Interrupt counting */
149 #define sc_datck sc_errors[0]
150 #define sc_timeo sc_errors[1]
151 #define sc_nobuf sc_errors[2]
152 #define sc_disc  sc_errors[3]
153 	struct	dmcbufs sc_rbufs[NRCV];	/* receive buffer info */
154 	struct	dmcbufs sc_xbufs[NXMT];	/* transmit buffer info */
155 	struct	ifubinfo sc_ifuba;	/* UNIBUS resources */
156 	struct	ifrw sc_ifr[NRCV];	/* UNIBUS receive buffer maps */
157 	struct	ifxmt sc_ifw[NXMT];	/* UNIBUS receive buffer maps */
158 	/* command queue stuff */
159 	struct	dmc_command sc_cmdbuf[NCMDS];
160 	struct	dmc_command *sc_qhead;	/* head of command queue */
161 	struct	dmc_command *sc_qtail;	/* tail of command queue */
162 	struct	dmc_command *sc_qactive;	/* command in progress */
163 	struct	dmc_command *sc_qfreeh;	/* head of list of free cmd buffers */
164 	struct	dmc_command *sc_qfreet;	/* tail of list of free cmd buffers */
165 	/* end command queue stuff */
166 	struct dmc_base {
167 		short	d_base[128];		/* DMC base table */
168 	} dmc_base;
169 };
170 
171 static  int dmcmatch(device_t, cfdata_t, void *);
172 static  void dmcattach(device_t, device_t, void *);
173 static  int dmcinit(struct ifnet *);
174 static  void dmcrint(void *);
175 static  void dmcxint(void *);
176 static  void dmcdown(struct dmc_softc *sc);
177 static  void dmcrestart(struct dmc_softc *);
178 static  void dmcload(struct dmc_softc *, int, u_short, u_short);
179 static  void dmcstart(struct ifnet *);
180 static  void dmctimeout(struct ifnet *);
181 static  int dmcioctl(struct ifnet *, u_long, void *);
182 static  int dmcoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
183 	struct rtentry *);
184 static  void dmcreset(device_t);
185 
186 CFATTACH_DECL_NEW(dmc, sizeof(struct dmc_softc),
187     dmcmatch, dmcattach, NULL, NULL);
188 
189 /* flags */
190 #define DMC_RUNNING	0x01		/* device initialized */
191 #define DMC_BMAPPED	0x02		/* base table mapped */
192 #define DMC_RESTART	0x04		/* software restart in progress */
193 #define DMC_ONLINE	0x08		/* device running (had a RDYO) */
194 
195 
196 /* queue manipulation macros */
197 #define	QUEUE_AT_HEAD(qp, head, tail) \
198 	(qp)->qp_next = (head); \
199 	(head) = (qp); \
200 	if ((tail) == (struct dmc_command *) 0) \
201 		(tail) = (head)
202 
203 #define QUEUE_AT_TAIL(qp, head, tail) \
204 	if ((tail)) \
205 		(tail)->qp_next = (qp); \
206 	else \
207 		(head) = (qp); \
208 	(qp)->qp_next = (struct dmc_command *) 0; \
209 	(tail) = (qp)
210 
211 #define DEQUEUE(head, tail) \
212 	(head) = (head)->qp_next;\
213 	if ((head) == (struct dmc_command *) 0)\
214 		(tail) = (head)
215 
216 int
217 dmcmatch(device_t parent, cfdata_t cf, void *aux)
218 {
219 	struct uba_attach_args *ua = aux;
220 	struct dmc_softc ssc;
221 	struct dmc_softc *sc = &ssc;
222 	int i;
223 
224 	sc->sc_iot = ua->ua_iot;
225 	sc->sc_ioh = ua->ua_ioh;
226 
227 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
228 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
229 		;
230 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
231 		printf("dmcprobe: can't start device\n" );
232 		return (0);
233 	}
234 	DMC_WBYTE(DMC_BSEL0, DMC_RQI|DMC_IEI);
235 	/* let's be paranoid */
236 	DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) | DMC_RQI|DMC_IEI);
237 	DELAY(1000000);
238 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
239 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
240 		;
241 	return (1);
242 }
243 
244 /*
245  * Interface exists: make available by filling in network interface
246  * record.  System will initialize the interface when it is ready
247  * to accept packets.
248  */
249 void
250 dmcattach(device_t parent, device_t self, void *aux)
251 {
252 	struct uba_attach_args *ua = aux;
253 	struct dmc_softc *sc = device_private(self);
254 
255 	sc->sc_dev = self;
256 	sc->sc_iot = ua->ua_iot;
257 	sc->sc_ioh = ua->ua_ioh;
258 	sc->sc_dmat = ua->ua_dmat;
259 
260 	strlcpy(sc->sc_if.if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
261 	sc->sc_if.if_mtu = DMCMTU;
262 	sc->sc_if.if_init = dmcinit;
263 	sc->sc_if.if_output = dmcoutput;
264 	sc->sc_if.if_ioctl = dmcioctl;
265 	sc->sc_if.if_watchdog = dmctimeout;
266 	sc->sc_if.if_flags = IFF_POINTOPOINT;
267 	sc->sc_if.if_softc = sc;
268 	IFQ_SET_READY(&sc->sc_if.if_snd);
269 
270 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, dmcrint, sc,
271 	    &sc->sc_rintrcnt);
272 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec+4, dmcxint, sc,
273 	    &sc->sc_tintrcnt);
274 	uba_reset_establish(dmcreset, sc->sc_dev);
275 	evcnt_attach_dynamic(&sc->sc_rintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
276 	    device_xname(sc->sc_dev), "intr");
277 	evcnt_attach_dynamic(&sc->sc_tintrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
278 	    device_xname(sc->sc_dev), "intr");
279 
280 	if_attach(&sc->sc_if);
281 }
282 
283 /*
284  * Reset of interface after UNIBUS reset.
285  * If interface is on specified UBA, reset its state.
286  */
287 void
288 dmcreset(device_t dev)
289 {
290 	struct dmc_softc *sc = device_private(dev);
291 
292 	sc->sc_flag = 0;
293 	sc->sc_if.if_flags &= ~IFF_RUNNING;
294 	dmcinit(&sc->sc_if);
295 }
296 
297 /*
298  * Initialization of interface; reinitialize UNIBUS usage.
299  */
300 int
301 dmcinit(struct ifnet *ifp)
302 {
303 	struct dmc_softc *sc = ifp->if_softc;
304 	struct ifrw *ifrw;
305 	struct ifxmt *ifxp;
306 	struct dmcbufs *rp;
307 	struct dmc_command *qp;
308 	struct ifaddr *ifa;
309 	cfdata_t ui = device_cfdata(sc->sc_dev);
310 	int base;
311 	int s;
312 
313 	/*
314 	 * Check to see that an address has been set
315 	 * (both local and destination for an address family).
316 	 */
317 	IFADDR_FOREACH(ifa, ifp)
318 		if (ifa->ifa_addr->sa_family && ifa->ifa_dstaddr->sa_family)
319 			break;
320 	if (ifa == (struct ifaddr *) 0)
321 		return 0;
322 
323 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
324 		printf("dmcinit: DMC not running\n");
325 		ifp->if_flags &= ~IFF_UP;
326 		return 0;
327 	}
328 	/* map base table */
329 	if ((sc->sc_flag & DMC_BMAPPED) == 0) {
330 		sc->sc_ui.ui_size = sizeof(struct dmc_base);
331 		sc->sc_ui.ui_vaddr = (void *)&sc->dmc_base;
332 		uballoc(device_private(device_parent(sc->sc_dev)), &sc->sc_ui, 0);
333 		sc->sc_flag |= DMC_BMAPPED;
334 	}
335 	/* initialize UNIBUS resources */
336 	sc->sc_iused = sc->sc_oused = 0;
337 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
338 		if (if_ubaminit(&sc->sc_ifuba,
339 		    device_private(device_parent(sc->sc_dev)),
340 		    sizeof(struct dmc_header) + DMCMTU,
341 		    sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) {
342 			aprint_error_dev(sc->sc_dev, "can't allocate uba resources\n");
343 			ifp->if_flags &= ~IFF_UP;
344 			return 0;
345 		}
346 		ifp->if_flags |= IFF_RUNNING;
347 	}
348 	sc->sc_flag &= ~DMC_ONLINE;
349 	sc->sc_flag |= DMC_RUNNING;
350 	/*
351 	 * Limit packets enqueued until we see if we're on the air.
352 	 */
353 	ifp->if_snd.ifq_maxlen = 3;
354 
355 	/* initialize buffer pool */
356 	/* receives */
357 	ifrw = &sc->sc_ifr[0];
358 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
359 		rp->ubinfo = ifrw->ifrw_info;
360 		rp->cc = DMCMTU + sizeof (struct dmc_header);
361 		rp->flags = DBUF_OURS|DBUF_RCV;
362 		ifrw++;
363 	}
364 	/* transmits */
365 	ifxp = &sc->sc_ifw[0];
366 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
367 		rp->ubinfo = ifxp->ifw_info;
368 		rp->cc = 0;
369 		rp->flags = DBUF_OURS|DBUF_XMIT;
370 		ifxp++;
371 	}
372 
373 	/* set up command queues */
374 	sc->sc_qfreeh = sc->sc_qfreet
375 		 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive =
376 		(struct dmc_command *)0;
377 	/* set up free command buffer list */
378 	for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) {
379 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
380 	}
381 
382 	/* base in */
383 	base = sc->sc_ui.ui_baddr;
384 	dmcload(sc, DMC_BASEI, (u_short)base, (base>>2) & DMC_XMEM);
385 	/* specify half duplex operation, flags tell if primary */
386 	/* or secondary station */
387 	if (ui->cf_flags == 0)
388 		/* use DDCMP mode in full duplex */
389 		dmcload(sc, DMC_CNTLI, 0, 0);
390 	else if (ui->cf_flags == 1)
391 		/* use MAINTENENCE mode */
392 		dmcload(sc, DMC_CNTLI, 0, DMC_MAINT );
393 	else if (ui->cf_flags == 2)
394 		/* use DDCMP half duplex as primary station */
395 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX);
396 	else if (ui->cf_flags == 3)
397 		/* use DDCMP half duplex as secondary station */
398 		dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC);
399 
400 	/* enable operation done interrupts */
401 	while ((DMC_RBYTE(DMC_BSEL2) & DMC_IEO) == 0)
402 		DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) | DMC_IEO);
403 	s = splnet();
404 	/* queue first NRCV buffers for DMC to fill */
405 	for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
406 		rp->flags |= DBUF_DMCS;
407 		dmcload(sc, DMC_READ, rp->ubinfo,
408 			(((rp->ubinfo>>2)&DMC_XMEM) | rp->cc));
409 		sc->sc_iused++;
410 	}
411 	splx(s);
412 	return 0;
413 }
414 
415 /*
416  * Start output on interface.  Get another datagram
417  * to send from the interface queue and map it to
418  * the interface before starting output.
419  *
420  * Must be called at spl 5
421  */
422 void
423 dmcstart(struct ifnet *ifp)
424 {
425 	struct dmc_softc *sc = ifp->if_softc;
426 	struct mbuf *m;
427 	struct dmcbufs *rp;
428 	int n;
429 
430 	/*
431 	 * Dequeue up to NXMT requests and map them to the UNIBUS.
432 	 * If no more requests, or no dmc buffers available, just return.
433 	 */
434 	n = 0;
435 	for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) {
436 		/* find an available buffer */
437 		if ((rp->flags & DBUF_DMCS) == 0) {
438 			IFQ_DEQUEUE(&sc->sc_if.if_snd, m);
439 			if (m == 0)
440 				return;
441 			/* mark it dmcs */
442 			rp->flags |= (DBUF_DMCS);
443 			/*
444 			 * Have request mapped to UNIBUS for transmission
445 			 * and start the output.
446 			 */
447 			rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m);
448 			rp->cc &= DMC_CCOUNT;
449 			if (++sc->sc_oused == 1)
450 				sc->sc_if.if_timer = dmc_timeout;
451 			dmcload(sc, DMC_WRITE, rp->ubinfo,
452 				rp->cc | ((rp->ubinfo>>2)&DMC_XMEM));
453 		}
454 		n++;
455 	}
456 }
457 
458 /*
459  * Utility routine to load the DMC device registers.
460  */
461 void
462 dmcload(struct dmc_softc *sc, int type, u_short w0, u_short w1)
463 {
464 	struct dmc_command *qp;
465 	int sps;
466 
467 	sps = splnet();
468 
469 	/* grab a command buffer from the free list */
470 	if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0)
471 		panic("dmc command queue overflow");
472 	DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet);
473 
474 	/* fill in requested info */
475 	qp->qp_cmd = (type | DMC_RQI);
476 	qp->qp_ubaddr = w0;
477 	qp->qp_cc = w1;
478 
479 	if (sc->sc_qactive) {	/* command in progress */
480 		if (type == DMC_READ) {
481 			QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail);
482 		} else {
483 			QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail);
484 		}
485 	} else {	/* command port free */
486 		sc->sc_qactive = qp;
487 		DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
488 		dmcrint(sc);
489 	}
490 	splx(sps);
491 }
492 
493 /*
494  * DMC interface receiver interrupt.
495  * Ready to accept another command,
496  * pull one off the command queue.
497  */
498 void
499 dmcrint(void *arg)
500 {
501 	struct dmc_softc *sc = arg;
502 	struct dmc_command *qp;
503 	int n;
504 
505 	if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) {
506 		printf("%s: dmcrint no command\n", device_xname(sc->sc_dev));
507 		return;
508 	}
509 	while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
510 		DMC_WWORD(DMC_SEL4, qp->qp_ubaddr);
511 		DMC_WWORD(DMC_SEL6, qp->qp_cc);
512 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & ~(DMC_IEI|DMC_RQI));
513 		/* free command buffer */
514 		QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet);
515 		while (DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) {
516 			/*
517 			 * Can't check for RDYO here 'cause
518 			 * this routine isn't reentrant!
519 			 */
520 			DELAY(5);
521 		}
522 		/* move on to next command */
523 		if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0)
524 			break;		/* all done */
525 		/* more commands to do, start the next one */
526 		qp = sc->sc_qactive;
527 		DEQUEUE(sc->sc_qhead, sc->sc_qtail);
528 		DMC_WBYTE(DMC_BSEL0, qp->qp_cmd);
529 		n = RDYSCAN;
530 		while (n-- > 0)
531 			if ((DMC_RBYTE(DMC_BSEL0) & DMC_RDYI) ||
532 			    (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO))
533 				break;
534 	}
535 	if (sc->sc_qactive) {
536 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
537 		/* VMS does it twice !*$%@# */
538 		DMC_WBYTE(DMC_BSEL0, DMC_RBYTE(DMC_BSEL0) & (DMC_IEI|DMC_RQI));
539 	}
540 
541 }
542 
543 /*
544  * DMC interface transmitter interrupt.
545  * A transfer may have completed, check for errors.
546  * If it was a read, notify appropriate protocol.
547  * If it was a write, pull the next one off the queue.
548  */
549 void
550 dmcxint(void *a)
551 {
552 	struct dmc_softc *sc = a;
553 
554 	struct ifnet *ifp;
555 	struct mbuf *m;
556 	struct ifqueue *inq;
557 	int arg, pkaddr, cmd, len, s;
558 	struct ifrw *ifrw;
559 	struct dmcbufs *rp;
560 	struct ifxmt *ifxp;
561 	struct dmc_header *dh;
562 	char buf[64];
563 
564 	ifp = &sc->sc_if;
565 
566 	while (DMC_RBYTE(DMC_BSEL2) & DMC_RDYO) {
567 
568 		cmd = DMC_RBYTE(DMC_BSEL2) & 0xff;
569 		arg = DMC_RWORD(DMC_SEL6) & 0xffff;
570 		/* reconstruct UNIBUS address of buffer returned to us */
571 		pkaddr = ((arg&DMC_XMEM)<<2) | (DMC_RWORD(DMC_SEL4) & 0xffff);
572 		/* release port */
573 		DMC_WBYTE(DMC_BSEL2, DMC_RBYTE(DMC_BSEL2) & ~DMC_RDYO);
574 		switch (cmd & 07) {
575 
576 		case DMC_OUR:
577 			/*
578 			 * A read has completed.
579 			 * Pass packet to type specific
580 			 * higher-level input routine.
581 			 */
582 			ifp->if_ipackets++;
583 			/* find location in dmcuba struct */
584 			ifrw= &sc->sc_ifr[0];
585 			for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) {
586 				if(rp->ubinfo == pkaddr)
587 					break;
588 				ifrw++;
589 			}
590 			if (rp >= &sc->sc_rbufs[NRCV])
591 				panic("dmc rcv");
592 			if ((rp->flags & DBUF_DMCS) == 0)
593 				aprint_error_dev(sc->sc_dev, "done unalloc rbuf\n");
594 
595 			len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header);
596 			if (len < 0 || len > DMCMTU) {
597 				ifp->if_ierrors++;
598 #ifdef DMCDEBUG
599 				printd("%s: bad rcv pkt addr 0x%x len 0x%x\n",
600 				    device_xname(sc->sc_dev), pkaddr, len);
601 #endif
602 				goto setup;
603 			}
604 			/*
605 			 * Deal with trailer protocol: if type is trailer
606 			 * get true type from first 16-bit word past data.
607 			 * Remember that type was trailer by setting off.
608 			 */
609 			dh = (struct dmc_header *)ifrw->ifrw_addr;
610 			dh->dmc_type = ntohs((u_short)dh->dmc_type);
611 			if (len == 0)
612 				goto setup;
613 
614 			/*
615 			 * Pull packet off interface.  Off is nonzero if
616 			 * packet has trailing header; dmc_get will then
617 			 * force this header information to be at the front,
618 			 * but we still have to drop the type and length
619 			 * which are at the front of any trailer data.
620 			 */
621 			m = if_ubaget(&sc->sc_ifuba, ifrw, ifp, len);
622 			if (m == 0)
623 				goto setup;
624 			/* Shave off dmc_header */
625 			m_adj(m, sizeof(struct dmc_header));
626 			switch (dh->dmc_type) {
627 
628 #ifdef INET
629 			case DMC_IPTYPE:
630 				schednetisr(NETISR_IP);
631 				inq = &ipintrq;
632 				break;
633 #endif
634 			default:
635 				m_freem(m);
636 				goto setup;
637 			}
638 
639 			s = splnet();
640 			if (IF_QFULL(inq)) {
641 				IF_DROP(inq);
642 				m_freem(m);
643 			} else
644 				IF_ENQUEUE(inq, m);
645 			splx(s);
646 
647 	setup:
648 			/* is this needed? */
649 			rp->ubinfo = ifrw->ifrw_info;
650 
651 			dmcload(sc, DMC_READ, rp->ubinfo,
652 			    ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc);
653 			break;
654 
655 		case DMC_OUX:
656 			/*
657 			 * A write has completed, start another
658 			 * transfer if there is more data to send.
659 			 */
660 			ifp->if_opackets++;
661 			/* find associated dmcbuf structure */
662 			ifxp = &sc->sc_ifw[0];
663 			for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) {
664 				if(rp->ubinfo == pkaddr)
665 					break;
666 				ifxp++;
667 			}
668 			if (rp >= &sc->sc_xbufs[NXMT]) {
669 				aprint_error_dev(sc->sc_dev, "bad packet address 0x%x\n",
670 				    pkaddr);
671 				break;
672 			}
673 			if ((rp->flags & DBUF_DMCS) == 0)
674 				aprint_error_dev(sc->sc_dev, "unallocated packet 0x%x\n",
675 				    pkaddr);
676 			/* mark buffer free */
677 			if_ubaend(&sc->sc_ifuba, ifxp);
678 			rp->flags &= ~DBUF_DMCS;
679 			if (--sc->sc_oused == 0)
680 				sc->sc_if.if_timer = 0;
681 			else
682 				sc->sc_if.if_timer = dmc_timeout;
683 			if ((sc->sc_flag & DMC_ONLINE) == 0) {
684 				extern int ifqmaxlen;
685 
686 				/*
687 				 * We're on the air.
688 				 * Open the queue to the usual value.
689 				 */
690 				sc->sc_flag |= DMC_ONLINE;
691 				ifp->if_snd.ifq_maxlen = ifqmaxlen;
692 			}
693 			break;
694 
695 		case DMC_CNTLO:
696 			arg &= DMC_CNTMASK;
697 			if (arg & DMC_FATAL) {
698 				if (arg != DMC_START) {
699 					snprintb(buf, sizeof(buf), CNTLO_BITS,
700 					    arg);
701 					log(LOG_ERR,
702 					    "%s: fatal error, flags=%s\n",
703 					    device_xname(sc->sc_dev), buf);
704 				}
705 				dmcrestart(sc);
706 				break;
707 			}
708 			/* ACCUMULATE STATISTICS */
709 			switch(arg) {
710 			case DMC_NOBUFS:
711 				ifp->if_ierrors++;
712 				if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0)
713 					goto report;
714 				break;
715 			case DMC_DISCONN:
716 				if ((sc->sc_disc++ % DMC_RPDSC) == 0)
717 					goto report;
718 				break;
719 			case DMC_TIMEOUT:
720 				if ((sc->sc_timeo++ % DMC_RPTMO) == 0)
721 					goto report;
722 				break;
723 			case DMC_DATACK:
724 				ifp->if_oerrors++;
725 				if ((sc->sc_datck++ % DMC_RPDCK) == 0)
726 					goto report;
727 				break;
728 			default:
729 				goto report;
730 			}
731 			break;
732 		report:
733 #ifdef DMCDEBUG
734 			snprintb(buf, sizeof(buf), CNTLO_BITS, arg);
735 			printd("%s: soft error, flags=%s\n",
736 			    device_xname(sc->sc_dev), buf);
737 #endif
738 			if ((sc->sc_flag & DMC_RESTART) == 0) {
739 				/*
740 				 * kill off the dmc to get things
741 				 * going again by generating a
742 				 * procedure error
743 				 */
744 				sc->sc_flag |= DMC_RESTART;
745 				arg = sc->sc_ui.ui_baddr;
746 				dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM);
747 			}
748 			break;
749 
750 		default:
751 			printf("%s: bad control %o\n",
752 			    device_xname(sc->sc_dev), cmd);
753 			break;
754 		}
755 	}
756 	dmcstart(ifp);
757 }
758 
759 /*
760  * DMC output routine.
761  * Encapsulate a packet of type family for the dmc.
762  * Use trailer local net encapsulation if enough data in first
763  * packet leaves a multiple of 512 bytes of data in remainder.
764  */
765 int
766 dmcoutput(struct ifnet *ifp, struct mbuf *m0, struct sockaddr *dst,
767     struct rtentry *rt)
768 {
769 	int type, error, s;
770 	struct mbuf *m = m0;
771 	struct dmc_header *dh;
772 	ALTQ_DECL(struct altq_pktattr pktattr;)
773 
774 	if ((ifp->if_flags & IFF_UP) == 0) {
775 		error = ENETDOWN;
776 		goto bad;
777 	}
778 
779 	IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr);
780 
781 	switch (dst->sa_family) {
782 #ifdef	INET
783 	case AF_INET:
784 		type = DMC_IPTYPE;
785 		break;
786 #endif
787 
788 	case AF_UNSPEC:
789 		dh = (struct dmc_header *)dst->sa_data;
790 		type = dh->dmc_type;
791 		break;
792 
793 	default:
794 		printf("%s: can't handle af%d\n", ifp->if_xname,
795 			dst->sa_family);
796 		error = EAFNOSUPPORT;
797 		goto bad;
798 	}
799 
800 	/*
801 	 * Add local network header
802 	 * (there is space for a uba on a vax to step on)
803 	 */
804 	M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT);
805 	if (m == 0) {
806 		error = ENOBUFS;
807 		goto bad;
808 	}
809 	dh = mtod(m, struct dmc_header *);
810 	dh->dmc_type = htons((u_short)type);
811 
812 	/*
813 	 * Queue message on interface, and start output if interface
814 	 * not yet active.
815 	 */
816 	s = splnet();
817 	IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error);
818 	if (error) {
819 		/* mbuf is already freed */
820 		splx(s);
821 		return (error);
822 	}
823 	dmcstart(ifp);
824 	splx(s);
825 	return (0);
826 
827 bad:
828 	m_freem(m0);
829 	return (error);
830 }
831 
832 
833 /*
834  * Process an ioctl request.
835  */
836 /* ARGSUSED */
837 int
838 dmcioctl(struct ifnet *ifp, u_long cmd, void *data)
839 {
840 	int s = splnet(), error = 0;
841 	register struct dmc_softc *sc = ifp->if_softc;
842 
843 	switch (cmd) {
844 
845 	case SIOCINITIFADDR:
846 		ifp->if_flags |= IFF_UP;
847 		if ((ifp->if_flags & IFF_RUNNING) == 0)
848 			dmcinit(ifp);
849 		break;
850 
851 	case SIOCSIFDSTADDR:
852 		if ((ifp->if_flags & IFF_RUNNING) == 0)
853 			dmcinit(ifp);
854 		break;
855 
856 	case SIOCSIFFLAGS:
857 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
858 			break;
859 		if ((ifp->if_flags & IFF_UP) == 0 &&
860 		    sc->sc_flag & DMC_RUNNING)
861 			dmcdown(sc);
862 		else if (ifp->if_flags & IFF_UP &&
863 		    (sc->sc_flag & DMC_RUNNING) == 0)
864 			dmcrestart(sc);
865 		break;
866 
867 	default:
868 		error = ifioctl_common(ifp, cmd, data);
869 	}
870 	splx(s);
871 	return (error);
872 }
873 
874 /*
875  * Restart after a fatal error.
876  * Clear device and reinitialize.
877  */
878 void
879 dmcrestart(struct dmc_softc *sc)
880 {
881 	int s, i;
882 
883 #ifdef DMCDEBUG
884 	/* dump base table */
885 	printf("%s base table:\n", device_xname(sc->sc_dev));
886 	for (i = 0; i < sizeof (struct dmc_base); i++)
887 		printf("%o\n" ,dmc_base[unit].d_base[i]);
888 #endif
889 
890 	dmcdown(sc);
891 
892 	/*
893 	 * Let the DMR finish the MCLR.	 At 1 Mbit, it should do so
894 	 * in about a max of 6.4 milliseconds with diagnostics enabled.
895 	 */
896 	for (i = 100000; i && (DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0; i--)
897 		;
898 	/* Did the timer expire or did the DMR finish? */
899 	if ((DMC_RBYTE(DMC_BSEL1) & DMC_RUN) == 0) {
900 		log(LOG_ERR, "%s: M820 Test Failed\n", device_xname(sc->sc_dev));
901 		return;
902 	}
903 
904 	/* restart DMC */
905 	dmcinit(&sc->sc_if);
906 	sc->sc_flag &= ~DMC_RESTART;
907 	s = splnet();
908 	dmcstart(&sc->sc_if);
909 	splx(s);
910 	sc->sc_if.if_collisions++;	/* why not? */
911 }
912 
913 /*
914  * Reset a device and mark down.
915  * Flush output queue and drop queue limit.
916  */
917 void
918 dmcdown(struct dmc_softc *sc)
919 {
920 	struct ifxmt *ifxp;
921 
922 	DMC_WBYTE(DMC_BSEL1, DMC_MCLR);
923 	sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE);
924 
925 	for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) {
926 #ifdef notyet
927 		if (ifxp->ifw_xtofree) {
928 			(void) m_freem(ifxp->ifw_xtofree);
929 			ifxp->ifw_xtofree = 0;
930 		}
931 #endif
932 	}
933 	IF_PURGE(&sc->sc_if.if_snd);
934 }
935 
936 /*
937  * Watchdog timeout to see that transmitted packets don't
938  * lose interrupts.  The device has to be online (the first
939  * transmission may block until the other side comes up).
940  */
941 void
942 dmctimeout(struct ifnet *ifp)
943 {
944 	struct dmc_softc *sc = ifp->if_softc;
945 	char buf1[64], buf2[64];
946 
947 	if (sc->sc_flag & DMC_ONLINE) {
948 		snprintb(buf1, sizeof(buf1), DMC0BITS,
949 		    DMC_RBYTE(DMC_BSEL0) & 0xff);
950 		snprintb(buf2, sizeof(buf2), DMC2BITS,
951 		    DMC_RBYTE(DMC_BSEL2) & 0xff);
952 		log(LOG_ERR, "%s: output timeout, bsel0=%s bsel2=%s\n",
953 		    device_xname(sc->sc_dev), buf1, buf2);
954 		dmcrestart(sc);
955 	}
956 }
957