xref: /csrg-svn/sys/vax/if/if_qe.c (revision 27477)
1*27477Skjd /*	@(#)if_qe.c	1.1 (Berkeley) 04/28/86 */
2*27477Skjd 
3*27477Skjd #ifndef lint
4*27477Skjd static	char	*sccsid = "@(#)if_qe.c	1.15	(ULTRIX)	4/16/86";
5*27477Skjd #endif lint
6*27477Skjd 
7*27477Skjd 
8*27477Skjd /****************************************************************
9*27477Skjd  *								*
10*27477Skjd  *        Licensed from Digital Equipment Corporation 		*
11*27477Skjd  *                       Copyright (c) 				*
12*27477Skjd  *               Digital Equipment Corporation			*
13*27477Skjd  *                   Maynard, Massachusetts 			*
14*27477Skjd  *                         1985, 1986 				*
15*27477Skjd  *                    All rights reserved. 			*
16*27477Skjd  *								*
17*27477Skjd  *        The Information in this software is subject to change *
18*27477Skjd  *   without notice and should not be construed as a commitment *
19*27477Skjd  *   by  Digital  Equipment  Corporation.   Digital   makes  no *
20*27477Skjd  *   representations about the suitability of this software for *
21*27477Skjd  *   any purpose.  It is supplied "As Is" without expressed  or *
22*27477Skjd  *   implied  warranty. 					*
23*27477Skjd  *								*
24*27477Skjd  *        If the Regents of the University of California or its *
25*27477Skjd  *   licensees modify the software in a manner creating  	*
26*27477Skjd  *   diriviative copyright rights, appropriate copyright  	*
27*27477Skjd  *   legends may be placed on  the drivative work in addition   *
28*27477Skjd  *   to that set forth above. 					*
29*27477Skjd  *								*
30*27477Skjd  ****************************************************************/
31*27477Skjd /* ---------------------------------------------------------------------
32*27477Skjd  * Modification History
33*27477Skjd  *
34*27477Skjd  * 15-Apr-86  -- afd
35*27477Skjd  *	Rename "unused_multi" to "qunused_multi" for extending Generic
36*27477Skjd  *	kernel to MicroVAXen.
37*27477Skjd  *
38*27477Skjd  * 18-mar-86  -- jaw     br/cvec changed to NOT use registers.
39*27477Skjd  *
40*27477Skjd  * 12 March 86 -- Jeff Chase
41*27477Skjd  *	Modified to handle the new MCLGET macro
42*27477Skjd  *	Changed if_qe_data.c to use more receive buffers
43*27477Skjd  *	Added a flag to poke with adb to log qe_restarts on console
44*27477Skjd  *
45*27477Skjd  * 19 Oct 85 -- rjl
46*27477Skjd  *	Changed the watch dog timer from 30 seconds to 3.  VMS is using
47*27477Skjd  * 	less than 1 second in their's. Also turned the printf into an
48*27477Skjd  *	mprintf.
49*27477Skjd  *
50*27477Skjd  *  09/16/85 -- Larry Cohen
51*27477Skjd  * 		Add 43bsd alpha tape changes for subnet routing
52*27477Skjd  *
53*27477Skjd  *  1 Aug 85 -- rjl
54*27477Skjd  *	Panic on a non-existent memory interrupt and the case where a packet
55*27477Skjd  *	was chained.  The first should never happen because non-existant
56*27477Skjd  *	memory interrupts cause a bus reset. The second should never happen
57*27477Skjd  *	because we hang 2k input buffers on the device.
58*27477Skjd  *
59*27477Skjd  *  1 Aug 85 -- rich
60*27477Skjd  *      Fixed the broadcast loopback code to handle Clusters without
61*27477Skjd  *      wedging the system.
62*27477Skjd  *
63*27477Skjd  *  27 Feb. 85 -- ejf
64*27477Skjd  *	Return default hardware address on ioctl request.
65*27477Skjd  *
66*27477Skjd  *  12 Feb. 85 -- ejf
67*27477Skjd  *	Added internal extended loopback capability.
68*27477Skjd  *
69*27477Skjd  *  27 Dec. 84 -- rjl
70*27477Skjd  *	Fixed bug that caused every other transmit descriptor to be used
71*27477Skjd  *	instead of every descriptor.
72*27477Skjd  *
73*27477Skjd  *  21 Dec. 84 -- rjl
74*27477Skjd  *	Added watchdog timer to mask hardware bug that causes device lockup.
75*27477Skjd  *
76*27477Skjd  *  18 Dec. 84 -- rjl
77*27477Skjd  *	Reworked driver to use q-bus mapping routines.  MicroVAX-I now does
78*27477Skjd  *	copying instead of m-buf shuffleing.
79*27477Skjd  *	A number of deficencies in the hardware/firmware were compensated
80*27477Skjd  *	for. See comments in qestart and qerint.
81*27477Skjd  *
82*27477Skjd  *  14 Nov. 84 -- jf
83*27477Skjd  *	Added usage counts for multicast addresses.
84*27477Skjd  *	Updated general protocol support to allow access to the Ethernet
85*27477Skjd  *	header.
86*27477Skjd  *
87*27477Skjd  *  04 Oct. 84 -- jf
88*27477Skjd  *	Added support for new ioctls to add and delete multicast addresses
89*27477Skjd  *	and set the physical address.
90*27477Skjd  *	Add support for general protocols.
91*27477Skjd  *
92*27477Skjd  *  14 Aug. 84 -- rjl
93*27477Skjd  *	Integrated Shannon changes. (allow arp above 1024 and ? )
94*27477Skjd  *
95*27477Skjd  *  13 Feb. 84 -- rjl
96*27477Skjd  *
97*27477Skjd  *	Initial version of driver. derived from IL driver.
98*27477Skjd  *
99*27477Skjd  * ---------------------------------------------------------------------
100*27477Skjd  */
101*27477Skjd 
102*27477Skjd #include "qe.h"
103*27477Skjd #if	NQE > 0 || defined(BINARY)
104*27477Skjd /*
105*27477Skjd  * Digital Q-BUS to NI Adapter
106*27477Skjd  */
107*27477Skjd 
108*27477Skjd #include "../data/if_qe_data.c"
109*27477Skjd extern struct protosw *iftype_to_proto(), *iffamily_to_proto();
110*27477Skjd extern struct timeval time;
111*27477Skjd extern timeout();
112*27477Skjd 
113*27477Skjd int	qeprobe(), qeattach(), qeint(), qewatch();
114*27477Skjd int	qeinit(),qeoutput(),qeioctl(),qereset(),qewatch();
115*27477Skjd struct mbuf *qeget();
116*27477Skjd 
117*27477Skjd u_short qestd[] = { 0 };
118*27477Skjd struct	uba_driver qedriver =
119*27477Skjd 	{ qeprobe, 0, qeattach, 0, qestd, "qe", qeinfo };
120*27477Skjd 
121*27477Skjd u_char qunused_multi[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
122*27477Skjd 
123*27477Skjd #define QE_TIMEO	(15)
124*27477Skjd #define	QEUNIT(x)	minor(x)
125*27477Skjd static int mask = 0x3ffff;		/* address mask		*/
126*27477Skjd int qewatchrun = 0;			/* watchdog running	*/
127*27477Skjd /*
128*27477Skjd  * The deqna shouldn't recieve more than ETHERMTU + sizeof(struct ether_header)
129*27477Skjd  * but will actually take in up to 2048 bytes. To guard against the receiver
130*27477Skjd  * chaining buffers (which we aren't prepared to handle) we allocate 2kb
131*27477Skjd  * size buffers.
132*27477Skjd  */
133*27477Skjd #define MAXPACKETSIZE 2048		/* Should really be ETHERMTU	*/
134*27477Skjd /*
135*27477Skjd  * Probe the QNA to see if it's there
136*27477Skjd  */
137*27477Skjd qeprobe(reg)
138*27477Skjd 	caddr_t reg;
139*27477Skjd {
140*27477Skjd 
141*27477Skjd 	register struct qedevice *addr = (struct qedevice *)reg;
142*27477Skjd 	register struct qe_ring *rp;
143*27477Skjd 	register struct qe_ring *prp; 	/* physical rp 		*/
144*27477Skjd 	register int i, j, ncl;
145*27477Skjd 	static int next=0;		/* softc index		*/
146*27477Skjd 	register struct qe_softc *sc = &qe_softc[next++];
147*27477Skjd 
148*27477Skjd 	/*
149*27477Skjd 	 * Set the address mask for the particular cpu
150*27477Skjd 	 */
151*27477Skjd 	if( cpu == MVAX_I )
152*27477Skjd 		mask = 0x3fffff;
153*27477Skjd 	else
154*27477Skjd 		mask = 0x3ffff;
155*27477Skjd 
156*27477Skjd 	/*
157*27477Skjd 	 * The QNA interrupts on i/o operations. To do an I/O operation
158*27477Skjd 	 * we have to setup the interface by transmitting a setup  packet.
159*27477Skjd 	 */
160*27477Skjd 	addr->qe_csr = QE_RESET;
161*27477Skjd 	addr->qe_vector = (uba_hd[numuba].uh_lastiv -= 4);
162*27477Skjd 
163*27477Skjd 	/*
164*27477Skjd 	 * Map the communications area and the setup packet.
165*27477Skjd 	 */
166*27477Skjd 	sc->setupaddr =
167*27477Skjd 		uballoc(0, sc->setup_pkt, sizeof(sc->setup_pkt), 0);
168*27477Skjd 	sc->rringaddr = (struct qe_ring *)
169*27477Skjd 		uballoc(0, sc->rring, sizeof(struct qe_ring)*(nNTOT+2),0);
170*27477Skjd 	prp = (struct qe_ring *)((int)sc->rringaddr & mask);
171*27477Skjd 
172*27477Skjd 	/*
173*27477Skjd 	 * The QNA will loop the setup packet back to the receive ring
174*27477Skjd 	 * for verification, therefore we initialize the first
175*27477Skjd 	 * receive & transmit ring descriptors and link the setup packet
176*27477Skjd 	 * to them.
177*27477Skjd 	 */
178*27477Skjd 	qeinitdesc( sc->tring, sc->setupaddr & mask, sizeof(sc->setup_pkt));
179*27477Skjd 	qeinitdesc( sc->rring, sc->setupaddr & mask, sizeof(sc->setup_pkt));
180*27477Skjd 
181*27477Skjd 	rp = (struct qe_ring *)sc->tring;
182*27477Skjd 	rp->qe_setup = 1;
183*27477Skjd 	rp->qe_eomsg = 1;
184*27477Skjd 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
185*27477Skjd 	rp->qe_valid = 1;
186*27477Skjd 
187*27477Skjd 	rp = (struct qe_ring *)sc->rring;
188*27477Skjd 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
189*27477Skjd 	rp->qe_valid = 1;
190*27477Skjd 
191*27477Skjd 	/*
192*27477Skjd 	 * Get the addr off of the interface and place it into the setup
193*27477Skjd 	 * packet. This code looks strange due to the fact that the address
194*27477Skjd 	 * is placed in the setup packet in col. major order.
195*27477Skjd 	 */
196*27477Skjd 	for( i = 0 ; i < 6 ; i++ )
197*27477Skjd 		sc->setup_pkt[i][1] = addr->qe_sta_addr[i];
198*27477Skjd 
199*27477Skjd 	qesetup( sc );
200*27477Skjd 	/*
201*27477Skjd 	 * Start the interface and wait for the packet.
202*27477Skjd 	 */
203*27477Skjd 	j = cvec;
204*27477Skjd 	addr->qe_csr = QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT;
205*27477Skjd 	addr->qe_rcvlist_lo = (short)prp;
206*27477Skjd 	addr->qe_rcvlist_hi = (short)((int)prp >> 16);
207*27477Skjd 	prp += nNRCV+1;
208*27477Skjd 	addr->qe_xmtlist_lo = (short)prp;
209*27477Skjd 	addr->qe_xmtlist_hi = (short)((int)prp >> 16);
210*27477Skjd 	DELAY(10000);
211*27477Skjd 	/*
212*27477Skjd 	 * All done with the bus resources. If it's a uVAX-I they weren't
213*27477Skjd 	 * really allocated otherwise deallocated them.
214*27477Skjd 	 */
215*27477Skjd 	if( cpu != MVAX_I ) {
216*27477Skjd 		ubarelse(0, &sc->setupaddr);
217*27477Skjd 		ubarelse(0, &sc->rringaddr);
218*27477Skjd 	}
219*27477Skjd 	if( cvec == j )
220*27477Skjd 		return 0;		/* didn't interrupt	*/
221*27477Skjd 
222*27477Skjd 	/*
223*27477Skjd 	 * Allocate page size buffers now. If we wait until the network
224*27477Skjd 	 * is setup they have already fragmented. By doing it here in
225*27477Skjd 	 * conjunction with always coping on uVAX-I processors we obtain
226*27477Skjd 	 * physically contigous buffers for dma transfers.
227*27477Skjd 	 */
228*27477Skjd 	ncl = clrnd((int)btoc(MAXPACKETSIZE) + CLSIZE) / CLSIZE;
229*27477Skjd 	sc->buffers = m_clalloc(nNTOT * ncl, MPG_SPACE);
230*27477Skjd 	return( sizeof(struct qedevice) );
231*27477Skjd }
232*27477Skjd 
233*27477Skjd /*
234*27477Skjd  * Interface exists: make available by filling in network interface
235*27477Skjd  * record.  System will initialize the interface when it is ready
236*27477Skjd  * to accept packets.
237*27477Skjd  */
238*27477Skjd qeattach(ui)
239*27477Skjd 	struct uba_device *ui;
240*27477Skjd {
241*27477Skjd 	register struct qe_softc *sc = &qe_softc[ui->ui_unit];
242*27477Skjd 	register struct ifnet *ifp = &sc->is_if;
243*27477Skjd 	register struct qedevice *addr = (struct qedevice *)ui->ui_addr;
244*27477Skjd 	register int i;
245*27477Skjd 	struct sockaddr_in *sin;
246*27477Skjd 
247*27477Skjd 	ifp->if_unit = ui->ui_unit;
248*27477Skjd 	ifp->if_name = "qe";
249*27477Skjd 	ifp->if_mtu = ETHERMTU;
250*27477Skjd 	ifp->if_flags |= IFF_BROADCAST | IFF_DYNPROTO;
251*27477Skjd 
252*27477Skjd 	/*
253*27477Skjd 	 * Read the address from the prom and save it.
254*27477Skjd 	 */
255*27477Skjd 	for( i=0 ; i<6 ; i++ )
256*27477Skjd 		sc->setup_pkt[i][1] = sc->is_addr[i] = addr->qe_sta_addr[i] & 0xff;
257*27477Skjd 
258*27477Skjd 	/*
259*27477Skjd 	 * Save the vector for initialization at reset time.
260*27477Skjd 	 */
261*27477Skjd 	sc->qe_intvec = addr->qe_vector;
262*27477Skjd 
263*27477Skjd 	sin = (struct sockaddr_in *)&ifp->if_addr;
264*27477Skjd 	sin->sin_family = AF_INET;
265*27477Skjd 	ifp->if_init = qeinit;
266*27477Skjd 	ifp->if_output = qeoutput;
267*27477Skjd 	ifp->if_ioctl = qeioctl;
268*27477Skjd 	ifp->if_reset = qereset;
269*27477Skjd 	if_attach(ifp);
270*27477Skjd }
271*27477Skjd 
272*27477Skjd /*
273*27477Skjd  * Reset of interface after UNIBUS reset.
274*27477Skjd  * If interface is on specified uba, reset its state.
275*27477Skjd  */
276*27477Skjd qereset(unit, uban)
277*27477Skjd 	int unit, uban;
278*27477Skjd {
279*27477Skjd 	register struct uba_device *ui;
280*27477Skjd 
281*27477Skjd 	if (unit >= nNQE || (ui = qeinfo[unit]) == 0 || ui->ui_alive == 0 ||
282*27477Skjd 		ui->ui_ubanum != uban)
283*27477Skjd 		return;
284*27477Skjd 	printf(" qe%d", unit);
285*27477Skjd 	qeinit(unit);
286*27477Skjd }
287*27477Skjd 
288*27477Skjd /*
289*27477Skjd  * Initialization of interface.
290*27477Skjd  */
291*27477Skjd qeinit(unit)
292*27477Skjd 	int unit;
293*27477Skjd {
294*27477Skjd 	register struct qe_softc *sc = &qe_softc[unit];
295*27477Skjd 	register struct uba_device *ui = qeinfo[unit];
296*27477Skjd 	register struct qedevice *addr = (struct qedevice *)ui->ui_addr;
297*27477Skjd 	register struct ifnet *ifp = &sc->is_if;
298*27477Skjd 	register i;
299*27477Skjd 	int s;
300*27477Skjd 
301*27477Skjd 	/* address not known */
302*27477Skjd 	/* DECnet must set this somewhere to make device happy */
303*27477Skjd 	if (ifp->if_addrlist == (struct ifaddr *)0)
304*27477Skjd 			return;
305*27477Skjd 	if (ifp->if_flags & IFF_RUNNING)
306*27477Skjd 		return;
307*27477Skjd 
308*27477Skjd 	/*
309*27477Skjd 	 * map the communications area onto the device
310*27477Skjd 	 */
311*27477Skjd 	sc->rringaddr = (struct qe_ring *)((int)uballoc(0,
312*27477Skjd 		sc->rring, sizeof(struct qe_ring)*(nNTOT+2),0)&mask);
313*27477Skjd 	sc->tringaddr = sc->rringaddr+nNRCV+1;
314*27477Skjd 	sc->setupaddr =	uballoc(0, sc->setup_pkt, sizeof(sc->setup_pkt), 0) & mask;
315*27477Skjd 	/*
316*27477Skjd 	 * init buffers and maps
317*27477Skjd 	 */
318*27477Skjd 	if (qe_ubainit(&sc->qeuba, ui->ui_ubanum,
319*27477Skjd 	    sizeof (struct ether_header), (int)btoc(MAXPACKETSIZE), sc->buffers) == 0) {
320*27477Skjd 		printf("qe%d: can't initialize\n", unit);
321*27477Skjd 		sc->is_if.if_flags &= ~IFF_UP;
322*27477Skjd 		return;
323*27477Skjd 	}
324*27477Skjd 	/*
325*27477Skjd 	 * Init the buffer descriptors and indexes for each of the lists and
326*27477Skjd 	 * loop them back to form a ring.
327*27477Skjd 	 */
328*27477Skjd 	for( i = 0 ; i < nNRCV ; i++ ){
329*27477Skjd 		qeinitdesc( &sc->rring[i],
330*27477Skjd 			sc->qeuba.ifu_r[i].ifrw_info & mask, MAXPACKETSIZE);
331*27477Skjd 		sc->rring[i].qe_flag = sc->rring[i].qe_status1 = QE_NOTYET;
332*27477Skjd 		sc->rring[i].qe_valid = 1;
333*27477Skjd 	}
334*27477Skjd 	qeinitdesc( &sc->rring[i], NULL, 0 );
335*27477Skjd 
336*27477Skjd 	sc->rring[i].qe_addr_lo = (short)sc->rringaddr;
337*27477Skjd 	sc->rring[i].qe_addr_hi = (short)((int)sc->rringaddr >> 16);
338*27477Skjd 	sc->rring[i].qe_chain = 1;
339*27477Skjd 	sc->rring[i].qe_flag = sc->rring[i].qe_status1 = QE_NOTYET;
340*27477Skjd 	sc->rring[i].qe_valid = 1;
341*27477Skjd 
342*27477Skjd 	for( i = 0 ; i <= nNXMT ; i++ )
343*27477Skjd 		qeinitdesc( &sc->tring[i], NULL, 0 );
344*27477Skjd 	i--;
345*27477Skjd 
346*27477Skjd 	sc->tring[i].qe_addr_lo = (short)sc->tringaddr;
347*27477Skjd 	sc->tring[i].qe_addr_hi = (short)((int)sc->tringaddr >> 16);
348*27477Skjd 	sc->tring[i].qe_chain = 1;
349*27477Skjd 	sc->tring[i].qe_flag = sc->tring[i].qe_status1 = QE_NOTYET;
350*27477Skjd 	sc->tring[i].qe_valid = 1;
351*27477Skjd 
352*27477Skjd 	sc->nxmit = sc->otindex = sc->tindex = sc->rindex = 0;
353*27477Skjd 
354*27477Skjd 	/*
355*27477Skjd 	 * Take the interface out of reset, program the vector,
356*27477Skjd 	 * enable interrupts, and tell the world we are up.
357*27477Skjd 	 */
358*27477Skjd 	s = splimp();
359*27477Skjd 	addr->qe_vector = sc->qe_intvec;
360*27477Skjd 	sc->addr = addr;
361*27477Skjd 	if ( ifp->if_flags & IFF_LOOPBACK )
362*27477Skjd 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ELOOP;
363*27477Skjd 	else
364*27477Skjd 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ILOOP;
365*27477Skjd 	addr->qe_rcvlist_lo = (short)sc->rringaddr;
366*27477Skjd 	addr->qe_rcvlist_hi = (short)((int)sc->rringaddr >> 16);
367*27477Skjd 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
368*27477Skjd 	qesetup( sc );
369*27477Skjd 	qestart( unit );
370*27477Skjd 	sc->ztime = time.tv_sec;
371*27477Skjd 	splx( s );
372*27477Skjd 
373*27477Skjd }
374*27477Skjd 
375*27477Skjd /*
376*27477Skjd  * Start output on interface.
377*27477Skjd  *
378*27477Skjd  */
379*27477Skjd qestart(dev)
380*27477Skjd 	dev_t dev;
381*27477Skjd {
382*27477Skjd 	int unit = QEUNIT(dev);
383*27477Skjd 	struct uba_device *ui = qeinfo[unit];
384*27477Skjd 	register struct qe_softc *sc = &qe_softc[unit];
385*27477Skjd 	register struct qedevice *addr;
386*27477Skjd 	register struct qe_ring *rp;
387*27477Skjd 	register index;
388*27477Skjd 	struct mbuf *m, *m0;
389*27477Skjd 	int buf_addr, len, j,  s;
390*27477Skjd 
391*27477Skjd 
392*27477Skjd 	s = splimp();
393*27477Skjd 	addr = (struct qedevice *)ui->ui_addr;
394*27477Skjd 	/*
395*27477Skjd 	 * The deqna doesn't look at anything but the valid bit
396*27477Skjd 	 * to determine if it should transmit this packet. If you have
397*27477Skjd 	 * a ring and fill it the device will loop indefinately on the
398*27477Skjd 	 * packet and continue to flood the net with packets until you
399*27477Skjd 	 * break the ring. For this reason we never queue more than n-1
400*27477Skjd 	 * packets in the transmit ring.
401*27477Skjd 	 *
402*27477Skjd 	 * The microcoders should have obeyed their own defination of the
403*27477Skjd 	 * flag and status words, but instead we have to compensate.
404*27477Skjd 	 */
405*27477Skjd 	for( index = sc->tindex;
406*27477Skjd 		sc->tring[index].qe_valid == 0 && sc->nxmit < (nNXMT-1) ;
407*27477Skjd 		sc->tindex = index = ++index % nNXMT){
408*27477Skjd 		rp = &sc->tring[index];
409*27477Skjd 		if( sc->setupqueued ) {
410*27477Skjd 			buf_addr = sc->setupaddr;
411*27477Skjd 			len = 128;
412*27477Skjd 			rp->qe_setup = 1;
413*27477Skjd 			sc->setupqueued = 0;
414*27477Skjd 		} else {
415*27477Skjd 			IF_DEQUEUE(&sc->is_if.if_snd, m);
416*27477Skjd 			if( m == 0 ){
417*27477Skjd 				splx(s);
418*27477Skjd 				return;
419*27477Skjd 			}
420*27477Skjd 			buf_addr = sc->qeuba.ifu_w[index].x_ifrw.ifrw_info;
421*27477Skjd 			len = qeput(&sc->qeuba, index, m);
422*27477Skjd 		}
423*27477Skjd 		/*
424*27477Skjd 		 *  Does buffer end on odd byte ?
425*27477Skjd 		 */
426*27477Skjd 		if( len & 1 ) {
427*27477Skjd 			len++;
428*27477Skjd 			rp->qe_odd_end = 1;
429*27477Skjd 		}
430*27477Skjd 		if( len < MINDATA )
431*27477Skjd 			len = MINDATA;
432*27477Skjd 		rp->qe_buf_len = -(len/2);
433*27477Skjd 		buf_addr &= mask;
434*27477Skjd 		rp->qe_flag = rp->qe_status1 = QE_NOTYET;
435*27477Skjd 		rp->qe_addr_lo = (short)buf_addr;
436*27477Skjd 		rp->qe_addr_hi = (short)(buf_addr >> 16);
437*27477Skjd 		rp->qe_eomsg = 1;
438*27477Skjd 		rp->qe_flag = rp->qe_status1 = QE_NOTYET;
439*27477Skjd 		rp->qe_valid = 1;
440*27477Skjd 		sc->nxmit++;
441*27477Skjd 		/*
442*27477Skjd 		 * If the watchdog time isn't running kick it.
443*27477Skjd 		 */
444*27477Skjd 		sc->timeout=1;
445*27477Skjd 		if( !qewatchrun++ )
446*27477Skjd 			timeout(qewatch,0,QE_TIMEO);
447*27477Skjd 
448*27477Skjd 		/*
449*27477Skjd 		 * See if the xmit list is invalid.
450*27477Skjd 		 */
451*27477Skjd 		if( addr->qe_csr & QE_XL_INVALID ) {
452*27477Skjd 			buf_addr = (int)(sc->tringaddr+index);
453*27477Skjd 			addr->qe_xmtlist_lo = (short)buf_addr;
454*27477Skjd 			addr->qe_xmtlist_hi = (short)(buf_addr >> 16);
455*27477Skjd 		}
456*27477Skjd 		/*
457*27477Skjd 		 * Accumulate statistics for DECnet
458*27477Skjd 		 */
459*27477Skjd 		if ((sc->ctrblk.est_bytesent + len) > sc->ctrblk.est_bytesent)
460*27477Skjd 			sc->ctrblk.est_bytesent += len;
461*27477Skjd 		if (sc->ctrblk.est_bloksent != 0xffffffff)
462*27477Skjd 			sc->ctrblk.est_bloksent++;
463*27477Skjd 	}
464*27477Skjd 	splx( s );
465*27477Skjd }
466*27477Skjd 
467*27477Skjd /*
468*27477Skjd  * Ethernet interface interrupt processor
469*27477Skjd  */
470*27477Skjd qeintr(unit)
471*27477Skjd 	int unit;
472*27477Skjd {
473*27477Skjd 	register struct qe_softc *sc = &qe_softc[unit];
474*27477Skjd 	register struct ifnet *ifp = &sc->is_if;
475*27477Skjd 	struct qedevice *addr = (struct qedevice *)qeinfo[unit]->ui_addr;
476*27477Skjd 	int s, buf_addr, csr;
477*27477Skjd 
478*27477Skjd 	s = splimp();
479*27477Skjd 	csr = addr->qe_csr;
480*27477Skjd 	if ( ifp->if_flags & IFF_LOOPBACK )
481*27477Skjd 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ELOOP;
482*27477Skjd 	else
483*27477Skjd 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ILOOP;
484*27477Skjd 	if( csr & QE_RCV_INT )
485*27477Skjd 		qerint( unit );
486*27477Skjd 	if( csr & QE_XMIT_INT )
487*27477Skjd 		qetint( unit );
488*27477Skjd 	if( csr & QE_NEX_MEM_INT )
489*27477Skjd 		panic("qe: Non existant memory interrupt");
490*27477Skjd 
491*27477Skjd 	if( addr->qe_csr & QE_RL_INVALID && sc->rring[sc->rindex].qe_status1 == QE_NOTYET ) {
492*27477Skjd 		buf_addr = (int)&sc->rringaddr[sc->rindex];
493*27477Skjd 		addr->qe_rcvlist_lo = (short)buf_addr;
494*27477Skjd 		addr->qe_rcvlist_hi = (short)(buf_addr >> 16);
495*27477Skjd 	}
496*27477Skjd 	splx( s );
497*27477Skjd }
498*27477Skjd 
499*27477Skjd /*
500*27477Skjd  * Ethernet interface transmit interrupt.
501*27477Skjd  */
502*27477Skjd 
503*27477Skjd qetint(unit)
504*27477Skjd 	int unit;
505*27477Skjd {
506*27477Skjd 	register struct qe_softc *sc = &qe_softc[unit];
507*27477Skjd 	register struct mbuf *mp, *mp0;
508*27477Skjd 	register first, index;
509*27477Skjd 	register struct qe_ring *rp;
510*27477Skjd 	register struct ifrw *ifrw;
511*27477Skjd 	register struct ifxmt *ifxp;
512*27477Skjd 	struct ether_header *eh;
513*27477Skjd 	int i, status1, status2, setupflag;
514*27477Skjd 	short len;
515*27477Skjd 
516*27477Skjd 
517*27477Skjd 	while( sc->otindex != sc->tindex && sc->tring[sc->otindex].qe_status1 != QE_NOTYET && sc->nxmit > 0 ) {
518*27477Skjd 		/*
519*27477Skjd 		 * Save the status words from the descriptor so that it can
520*27477Skjd 		 * be released.
521*27477Skjd 		 */
522*27477Skjd 		rp = &sc->tring[sc->otindex];
523*27477Skjd 		status1 = rp->qe_status1;
524*27477Skjd 		status2 = rp->qe_status2;
525*27477Skjd 		setupflag = rp->qe_setup;
526*27477Skjd 		len = (-rp->qe_buf_len) * 2;
527*27477Skjd 		if( rp->qe_odd_end )
528*27477Skjd 			len++;
529*27477Skjd 		/*
530*27477Skjd 		 * Init the buffer descriptor
531*27477Skjd 		 */
532*27477Skjd 		bzero( rp, sizeof(struct qe_ring));
533*27477Skjd 		if( --sc->nxmit == 0 )
534*27477Skjd 			sc->timeout = 0;
535*27477Skjd 		if( !setupflag ) {
536*27477Skjd 			/*
537*27477Skjd 			 * Do some statistics.
538*27477Skjd 			 */
539*27477Skjd 			sc->is_if.if_opackets++;
540*27477Skjd 			sc->is_if.if_collisions += ( status1 & QE_CCNT ) >> 4;
541*27477Skjd 			/*
542*27477Skjd 			 * Accumulate DECnet statistics
543*27477Skjd 			 */
544*27477Skjd 			if (status1 & QE_CCNT) {
545*27477Skjd 				if (((status1 & QE_CCNT) >> 4) == 1) {
546*27477Skjd 					if (sc->ctrblk.est_single != 0xffffffff)
547*27477Skjd 						sc->ctrblk.est_single++;
548*27477Skjd 				} else {
549*27477Skjd 					if (sc->ctrblk.est_multiple != 0xffffffff)
550*27477Skjd 						sc->ctrblk.est_multiple++;
551*27477Skjd 				}
552*27477Skjd 			}
553*27477Skjd 			if (status1 & QE_FAIL)
554*27477Skjd 				if (sc->ctrblk.est_collis != 0xffff)
555*27477Skjd 					sc->ctrblk.est_collis++;
556*27477Skjd 			if( status1 & QE_ERROR ) {
557*27477Skjd 				sc->is_if.if_oerrors++;
558*27477Skjd 				if (sc->ctrblk.est_sendfail != 0xffff) {
559*27477Skjd 					sc->ctrblk.est_sendfail++;
560*27477Skjd 					if (status1 & QE_ABORT)
561*27477Skjd 						sc->ctrblk.est_sendfail_bm |= 1;
562*27477Skjd 					if (status1 & QE_NOCAR)
563*27477Skjd 						sc->ctrblk.est_sendfail_bm |= 2;
564*27477Skjd 				}
565*27477Skjd 			}
566*27477Skjd 			/*
567*27477Skjd 			 * If this was a broadcast packet loop it
568*27477Skjd 			 * back because the hardware can't hear it's own
569*27477Skjd 			 * transmits and the rwho deamon expects to see them.
570*27477Skjd 			 * This code will have to be expanded to include multi-
571*27477Skjd 			 * cast if the same situation developes.
572*27477Skjd 			 */
573*27477Skjd 			ifxp = &sc->qeuba.ifu_w[sc->otindex];
574*27477Skjd 			ifrw = &sc->qeuba.ifu_w[sc->otindex].x_ifrw;
575*27477Skjd 			eh = (struct ether_header *)ifrw->ifrw_addr;
576*27477Skjd 
577*27477Skjd /*
578*27477Skjd  * This is a Kludge to do a fast check to see if the ethernet
579*27477Skjd  * address is all 1's, the ethernet broadcast addr, and loop the
580*27477Skjd  * packet back.
581*27477Skjd  */
582*27477Skjd 
583*27477Skjd #define QUAD(x) (*(long *)((x)->ether_dhost))
584*27477Skjd #define ESHORT(x)	(*(short *)(&((x)->ether_dhost[4])))
585*27477Skjd 
586*27477Skjd 			if(QUAD(eh) == -1 && ESHORT(eh) == -1){
587*27477Skjd 				qeread(sc, ifrw, len, ifxp->x_xtofree);
588*27477Skjd 				ifxp->x_xtofree =0;
589*27477Skjd 			}else if( ifxp->x_xtofree ) {
590*27477Skjd 				m_freem( ifxp->x_xtofree );
591*27477Skjd 				ifxp->x_xtofree = 0;
592*27477Skjd 			}
593*27477Skjd 		}
594*27477Skjd 		sc->otindex = ++sc->otindex % nNXMT;
595*27477Skjd 	}
596*27477Skjd 	qestart( unit );
597*27477Skjd }
598*27477Skjd 
599*27477Skjd /*
600*27477Skjd  * Ethernet interface receiver interrupt.
601*27477Skjd  * If can't determine length from type, then have to drop packet.
602*27477Skjd  * Othewise decapsulate packet based on type and pass to type specific
603*27477Skjd  * higher-level input routine.
604*27477Skjd  */
605*27477Skjd qerint(unit)
606*27477Skjd 	int unit;
607*27477Skjd {
608*27477Skjd 	register struct qe_softc *sc = &qe_softc[unit];
609*27477Skjd 	register struct ifnet *ifp = &sc->is_if;
610*27477Skjd 	register struct qe_ring *rp;
611*27477Skjd 	int len, status1, status2;
612*27477Skjd 	int bufaddr;
613*27477Skjd 	struct ether_header *eh;
614*27477Skjd 
615*27477Skjd 	/*
616*27477Skjd 	 * Traverse the receive ring looking for packets to pass back.
617*27477Skjd 	 * The search is complete when we find a descriptor not in use.
618*27477Skjd 	 *
619*27477Skjd 	 * As in the transmit case the deqna doesn't honor it's own protocols
620*27477Skjd 	 * so there exists the possibility that the device can beat us around
621*27477Skjd 	 * the ring. The proper way to guard against this is to insure that
622*27477Skjd 	 * there is always at least one invalid descriptor. We chose instead
623*27477Skjd 	 * to make the ring large enough to minimize the problem. With a ring
624*27477Skjd 	 * size of 4 we haven't been able to see the problem. To be safe we
625*27477Skjd 	 * doubled that to 8.
626*27477Skjd 	 *
627*27477Skjd 	 */
628*27477Skjd 	for( ; sc->rring[sc->rindex].qe_status1 != QE_NOTYET ; sc->rindex = ++sc->rindex % nNRCV ){
629*27477Skjd 		rp = &sc->rring[sc->rindex];
630*27477Skjd 		status1 = rp->qe_status1;
631*27477Skjd 		status2 = rp->qe_status2;
632*27477Skjd 		bzero( rp, sizeof(struct qe_ring));
633*27477Skjd 		if( (status1 & QE_MASK) == QE_MASK )
634*27477Skjd 			panic("qe: chained packet");
635*27477Skjd 		len = ((status1 & QE_RBL_HI) | (status2 & QE_RBL_LO));
636*27477Skjd 		if( ! (ifp->if_flags & IFF_LOOPBACK) )
637*27477Skjd 			len += 60;
638*27477Skjd 		sc->is_if.if_ipackets++;
639*27477Skjd 
640*27477Skjd 		if( ! (ifp->if_flags & IFF_LOOPBACK) ) {
641*27477Skjd 			if( status1 & QE_ERROR ) {
642*27477Skjd 				sc->is_if.if_ierrors++;
643*27477Skjd 				if ((status1 & (QE_OVF | QE_CRCERR | QE_FRAME)) &&
644*27477Skjd 					(sc->ctrblk.est_recvfail != 0xffff)) {
645*27477Skjd 					sc->ctrblk.est_recvfail++;
646*27477Skjd 					if (status1 & QE_OVF)
647*27477Skjd 						sc->ctrblk.est_recvfail_bm |= 4;
648*27477Skjd 					if (status1 & QE_CRCERR)
649*27477Skjd 						sc->ctrblk.est_recvfail_bm |= 1;
650*27477Skjd 					if (status1 & QE_FRAME)
651*27477Skjd 						sc->ctrblk.est_recvfail_bm |= 2;
652*27477Skjd 				}
653*27477Skjd 			} else {
654*27477Skjd 				/*
655*27477Skjd 				 * We don't process setup packets.
656*27477Skjd 				 */
657*27477Skjd 				if( !(status1 & QE_ESETUP) )
658*27477Skjd 					qeread(sc, &sc->qeuba.ifu_r[sc->rindex],
659*27477Skjd 						len - sizeof(struct ether_header),0);
660*27477Skjd 			}
661*27477Skjd 		} else {
662*27477Skjd 			eh = (struct ether_header *)sc->qeuba.ifu_r[sc->rindex].ifrw_addr;
663*27477Skjd 			if ( bcmp(eh->ether_dhost, sc->is_addr, 6) == NULL )
664*27477Skjd 					qeread(sc, &sc->qeuba.ifu_r[sc->rindex],
665*27477Skjd 						len - sizeof(struct ether_header),0);
666*27477Skjd 		}
667*27477Skjd 		/*
668*27477Skjd 		 * Return the buffer to the ring
669*27477Skjd 		 */
670*27477Skjd 		bufaddr = sc->qeuba.ifu_r[sc->rindex].ifrw_info & mask;
671*27477Skjd 		rp->qe_buf_len = -((MAXPACKETSIZE)/2);
672*27477Skjd 		rp->qe_addr_lo = (short)bufaddr;
673*27477Skjd 		rp->qe_addr_hi = (short)((int)bufaddr >> 16);
674*27477Skjd 		rp->qe_flag = rp->qe_status1 = QE_NOTYET;
675*27477Skjd 		rp->qe_valid = 1;
676*27477Skjd 	}
677*27477Skjd }
678*27477Skjd /*
679*27477Skjd  * Ethernet output routine.
680*27477Skjd  * Encapsulate a packet of type family for the local net.
681*27477Skjd  * Use trailer local net encapsulation if enough data in first
682*27477Skjd  * packet leaves a multiple of 512 bytes of data in remainder.
683*27477Skjd  */
684*27477Skjd qeoutput(ifp, m0, dst)
685*27477Skjd 	struct ifnet *ifp;
686*27477Skjd 	struct mbuf *m0;
687*27477Skjd 	struct sockaddr *dst;
688*27477Skjd {
689*27477Skjd 	int type, s, error;
690*27477Skjd 	u_char edst[6];
691*27477Skjd 	struct in_addr idst;
692*27477Skjd 	struct protosw *pr;
693*27477Skjd 	register struct qe_softc *is = &qe_softc[ifp->if_unit];
694*27477Skjd 	register struct mbuf *m = m0;
695*27477Skjd 	register struct ether_header *eh;
696*27477Skjd 	register int off;
697*27477Skjd 
698*27477Skjd 	switch (dst->sa_family) {
699*27477Skjd 
700*27477Skjd #ifdef INET
701*27477Skjd 	case AF_INET:
702*27477Skjd 		if (nINET == 0) {
703*27477Skjd 			printf("qe%d: can't handle af%d\n", ifp->if_unit,
704*27477Skjd 				dst->sa_family);
705*27477Skjd 			error = EAFNOSUPPORT;
706*27477Skjd 			goto bad;
707*27477Skjd 		}
708*27477Skjd 		idst = ((struct sockaddr_in *)dst)->sin_addr;
709*27477Skjd 		if (!arpresolve(&is->is_ac, m, &idst, edst))
710*27477Skjd 			return (0);	/* if not yet resolved */
711*27477Skjd 		off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
712*27477Skjd 		/* need per host negotiation */
713*27477Skjd 		if ((ifp->if_flags & IFF_NOTRAILERS) == 0)
714*27477Skjd 		if (off > 0 && (off & 0x1ff) == 0 &&
715*27477Skjd 			m->m_off >= MMINOFF + 2 * sizeof (u_short)) {
716*27477Skjd 			type = ETHERTYPE_TRAIL + (off>>9);
717*27477Skjd 			m->m_off -= 2 * sizeof (u_short);
718*27477Skjd 			m->m_len += 2 * sizeof (u_short);
719*27477Skjd 			*mtod(m, u_short *) = htons((u_short)ETHERTYPE_IP);
720*27477Skjd 			*(mtod(m, u_short *) + 1) = htons((u_short)m->m_len);
721*27477Skjd 			goto gottraqeertype;
722*27477Skjd 		}
723*27477Skjd 		type = ETHERTYPE_IP;
724*27477Skjd 		off = 0;
725*27477Skjd 		goto gottype;
726*27477Skjd #endif
727*27477Skjd 
728*27477Skjd 	case AF_UNSPEC:
729*27477Skjd 		eh = (struct ether_header *)dst->sa_data;
730*27477Skjd  		bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, sizeof (edst));
731*27477Skjd 		type = eh->ether_type;
732*27477Skjd 		goto gottype;
733*27477Skjd 
734*27477Skjd 	default:
735*27477Skjd 		/*
736*27477Skjd 		 * Try to find other address families and call protocol
737*27477Skjd 		 * specific output routine.
738*27477Skjd 		 */
739*27477Skjd 		if (pr = iffamily_to_proto(dst->sa_family)) {
740*27477Skjd 			(*pr->pr_ifoutput)(ifp, m0, dst, &type, (char *)edst);
741*27477Skjd 			goto gottype;
742*27477Skjd 		} else {
743*27477Skjd 			printf("qe%d: can't handle af%d\n", ifp->if_unit,
744*27477Skjd 				dst->sa_family);
745*27477Skjd 			error = EAFNOSUPPORT;
746*27477Skjd 			goto bad;
747*27477Skjd 		}
748*27477Skjd 	}
749*27477Skjd 
750*27477Skjd gottraqeertype:
751*27477Skjd 	/*
752*27477Skjd 	 * Packet to be sent as trailer: move first packet
753*27477Skjd 	 * (control information) to end of chain.
754*27477Skjd 	 */
755*27477Skjd 	while (m->m_next)
756*27477Skjd 		m = m->m_next;
757*27477Skjd 	m->m_next = m0;
758*27477Skjd 	m = m0->m_next;
759*27477Skjd 	m0->m_next = 0;
760*27477Skjd 	m0 = m;
761*27477Skjd 
762*27477Skjd gottype:
763*27477Skjd 	/*
764*27477Skjd 	 * Add local net header.  If no space in first mbuf,
765*27477Skjd 	 * allocate another.
766*27477Skjd 	 */
767*27477Skjd 	if (m->m_off > MMAXOFF || MMINOFF + sizeof (struct ether_header) > m->m_off) {
768*27477Skjd 		m = m_get(M_DONTWAIT, MT_HEADER);
769*27477Skjd 		if (m == 0) {
770*27477Skjd 			error = ENOBUFS;
771*27477Skjd 			goto bad;
772*27477Skjd 		}
773*27477Skjd 		m->m_next = m0;
774*27477Skjd 		m->m_off = MMINOFF;
775*27477Skjd 		m->m_len = sizeof (struct ether_header);
776*27477Skjd 	} else {
777*27477Skjd 		m->m_off -= sizeof (struct ether_header);
778*27477Skjd 		m->m_len += sizeof (struct ether_header);
779*27477Skjd 	}
780*27477Skjd 	eh = mtod(m, struct ether_header *);
781*27477Skjd 	eh->ether_type = htons((u_short)type);
782*27477Skjd  	bcopy((caddr_t)edst, (caddr_t)eh->ether_dhost, sizeof (edst));
783*27477Skjd  	bcopy((caddr_t)is->is_addr, (caddr_t)eh->ether_shost, sizeof (is->is_addr));
784*27477Skjd 
785*27477Skjd 	/*
786*27477Skjd 	 * Queue message on interface, and start output if interface
787*27477Skjd 	 * not yet active.
788*27477Skjd 	 */
789*27477Skjd 	s = splimp();
790*27477Skjd 	if (IF_QFULL(&ifp->if_snd)) {
791*27477Skjd 		IF_DROP(&ifp->if_snd);
792*27477Skjd 		splx(s);
793*27477Skjd 		m_freem(m);
794*27477Skjd 		return (ENOBUFS);
795*27477Skjd 	}
796*27477Skjd 	IF_ENQUEUE(&ifp->if_snd, m);
797*27477Skjd 	qestart(ifp->if_unit);
798*27477Skjd 	splx(s);
799*27477Skjd 	return (0);
800*27477Skjd 
801*27477Skjd bad:
802*27477Skjd 	m_freem(m0);
803*27477Skjd 	return (error);
804*27477Skjd }
805*27477Skjd 
806*27477Skjd 
807*27477Skjd /*
808*27477Skjd  * Process an ioctl request.
809*27477Skjd  */
810*27477Skjd qeioctl(ifp, cmd, data)
811*27477Skjd 	register struct ifnet *ifp;
812*27477Skjd 	int cmd;
813*27477Skjd 	caddr_t data;
814*27477Skjd {
815*27477Skjd 	struct qe_softc *sc = &qe_softc[ifp->if_unit];
816*27477Skjd 	struct uba_device *ui = qeinfo[ifp->if_unit];
817*27477Skjd 	struct qedevice *addr = (struct qedevice *)ui->ui_addr;
818*27477Skjd 	struct sockaddr *sa;
819*27477Skjd 	struct sockaddr_in *sin;
820*27477Skjd 	struct ifreq *ifr = (struct ifreq *)data;
821*27477Skjd 	struct ifdevea *ifd = (struct ifdevea *)data;
822*27477Skjd 	struct ctrreq *ctr = (struct ctrreq *)data;
823*27477Skjd 	struct protosw *pr;
824*27477Skjd 	struct ifaddr *ifa = (struct ifaddr *)data;
825*27477Skjd 	int i,j = -1,s = splimp(), error = 0;
826*27477Skjd 
827*27477Skjd 	switch (cmd) {
828*27477Skjd 
829*27477Skjd 	case SIOCENABLBACK:
830*27477Skjd 		printf("qe%d: internal loopback enable requested\n", ifp->if_unit);
831*27477Skjd                 ifp->if_flags |= IFF_LOOPBACK;
832*27477Skjd #ifdef notdef
833*27477Skjd 		if((ifp->if_flags |= IFF_LOOPBACK) & IFF_RUNNING)
834*27477Skjd 			if_rtinit(ifp, -1);
835*27477Skjd #endif
836*27477Skjd 		qerestart( sc );
837*27477Skjd 		break;
838*27477Skjd 
839*27477Skjd 	case SIOCDISABLBACK:
840*27477Skjd 		printf("qe%d: internal loopback disable requested\n", ifp->if_unit);
841*27477Skjd                 ifp->if_flags &= ~IFF_LOOPBACK;
842*27477Skjd #ifdef notdef
843*27477Skjd 		if((ifp->if_flags &= ~IFF_LOOPBACK) & IFF_RUNNING)
844*27477Skjd 			if_rtinit(ifp, -1);
845*27477Skjd #endif
846*27477Skjd 		qerestart( sc );
847*27477Skjd 		qeinit( ifp->if_unit );
848*27477Skjd 		break;
849*27477Skjd 
850*27477Skjd 	case SIOCRPHYSADDR:
851*27477Skjd 		bcopy(sc->is_addr, ifd->current_pa, 6);
852*27477Skjd 		for( i = 0; i < 6; i++ )
853*27477Skjd 			ifd->default_pa[i] = addr->qe_sta_addr[i] & 0xff;
854*27477Skjd 		break;
855*27477Skjd 
856*27477Skjd 	case SIOCSPHYSADDR:
857*27477Skjd 		bcopy(ifr->ifr_addr.sa_data,sc->is_addr,MULTISIZE);
858*27477Skjd 		for ( i = 0; i < 6; i++ )
859*27477Skjd 			sc->setup_pkt[i][1] = sc->is_addr[i];
860*27477Skjd 		if (ifp->if_flags & IFF_RUNNING) {
861*27477Skjd 			qesetup( sc );
862*27477Skjd #ifdef notdef
863*27477Skjd 			if_rtinit(ifp, -1);
864*27477Skjd #endif
865*27477Skjd 		}
866*27477Skjd 		qeinit(ifp->if_unit);
867*27477Skjd 		break;
868*27477Skjd 
869*27477Skjd 	case SIOCDELMULTI:
870*27477Skjd 	case SIOCADDMULTI:
871*27477Skjd 		if (cmd == SIOCDELMULTI) {
872*27477Skjd 			for (i = 0; i < NMULTI; i++)
873*27477Skjd 				if (bcmp(&sc->multi[i],ifr->ifr_addr.sa_data,MULTISIZE) == 0) {
874*27477Skjd 					if (--sc->muse[i] == 0)
875*27477Skjd 						bcopy(qunused_multi,&sc->multi[i],MULTISIZE);
876*27477Skjd 				}
877*27477Skjd 		} else {
878*27477Skjd 			for (i = 0; i < NMULTI; i++) {
879*27477Skjd 				if (bcmp(&sc->multi[i],ifr->ifr_addr.sa_data,MULTISIZE) == 0) {
880*27477Skjd 					sc->muse[i]++;
881*27477Skjd 					goto done;
882*27477Skjd 				}
883*27477Skjd 				if (bcmp(&sc->multi[i],qunused_multi,MULTISIZE) == 0)
884*27477Skjd 					j = i;
885*27477Skjd 			}
886*27477Skjd 			if (j == -1) {
887*27477Skjd 				printf("qe%d: SIOCADDMULTI failed, multicast list full: %d\n",ui->ui_unit,NMULTI);
888*27477Skjd 				error = ENOBUFS;
889*27477Skjd 				goto done;
890*27477Skjd 			}
891*27477Skjd 			bcopy(ifr->ifr_addr.sa_data, &sc->multi[j], MULTISIZE);
892*27477Skjd 			sc->muse[j]++;
893*27477Skjd 		}
894*27477Skjd 		for ( i = 0; i < 6; i++ )
895*27477Skjd 			sc->setup_pkt[i][1] = sc->is_addr[i];
896*27477Skjd 		if (ifp->if_flags & IFF_RUNNING) {
897*27477Skjd 			qesetup( sc );
898*27477Skjd 		}
899*27477Skjd 		break;
900*27477Skjd 
901*27477Skjd 	case SIOCRDCTRS:
902*27477Skjd 	case SIOCRDZCTRS:
903*27477Skjd 		ctr->ctr_ether = sc->ctrblk;
904*27477Skjd 		ctr->ctr_type = CTR_ETHER;
905*27477Skjd 		ctr->ctr_ether.est_seconds = (time.tv_sec - sc->ztime) > 0xfffe ? 0xffff : (time.tv_sec - sc->ztime);
906*27477Skjd 		if (cmd == SIOCRDZCTRS) {
907*27477Skjd 			sc->ztime = time.tv_sec;
908*27477Skjd 			bzero(&sc->ctrblk, sizeof(struct estat));
909*27477Skjd 		}
910*27477Skjd 		break;
911*27477Skjd 
912*27477Skjd 	case SIOCSIFADDR:
913*27477Skjd 		ifp->if_flags |= IFF_UP;
914*27477Skjd 		qeinit(ifp->if_unit);
915*27477Skjd 		switch(ifa->ifa_addr.sa_family) {
916*27477Skjd #ifdef INET
917*27477Skjd 		case AF_INET:
918*27477Skjd 			((struct arpcom *)ifp)->ac_ipaddr =
919*27477Skjd 				IA_SIN(ifa)->sin_addr;
920*27477Skjd 			arpwhohas((struct arpcom *)ifp, &IA_SIN(ifa)->sin_addr);
921*27477Skjd 			break;
922*27477Skjd #endif
923*27477Skjd 
924*27477Skjd 		default:
925*27477Skjd 			if (pr=iffamily_to_proto(ifa->ifa_addr.sa_family)) {
926*27477Skjd 				error = (*pr->pr_ifioctl)(ifp, cmd, data);
927*27477Skjd 			}
928*27477Skjd 			break;
929*27477Skjd 		}
930*27477Skjd 		break;
931*27477Skjd 	default:
932*27477Skjd 		error = EINVAL;
933*27477Skjd 
934*27477Skjd 	}
935*27477Skjd done:	splx(s);
936*27477Skjd 	return (error);
937*27477Skjd }
938*27477Skjd 
939*27477Skjd 
940*27477Skjd 
941*27477Skjd /*
942*27477Skjd  * Initialize a ring descriptor with mbuf allocation side effects
943*27477Skjd  */
944*27477Skjd qeinitdesc( rp, buf, len )
945*27477Skjd 	register struct qe_ring *rp;
946*27477Skjd 	char *buf; 			/* mapped address	*/
947*27477Skjd 	int len;
948*27477Skjd {
949*27477Skjd 	/*
950*27477Skjd 	 * clear the entire descriptor
951*27477Skjd 	 */
952*27477Skjd 	bzero( rp, sizeof(struct qe_ring));
953*27477Skjd 
954*27477Skjd 	if( len ) {
955*27477Skjd 		rp->qe_buf_len = -(len/2);
956*27477Skjd 		rp->qe_addr_lo = (short)buf;
957*27477Skjd 		rp->qe_addr_hi = (short)((int)buf >> 16);
958*27477Skjd 	}
959*27477Skjd }
960*27477Skjd /*
961*27477Skjd  * Build a setup packet - the physical address will already be present
962*27477Skjd  * in first column.
963*27477Skjd  */
964*27477Skjd qesetup( sc )
965*27477Skjd struct qe_softc *sc;
966*27477Skjd {
967*27477Skjd 	int i, j, offset = 0, next = 3;
968*27477Skjd 
969*27477Skjd 	/*
970*27477Skjd 	 * Copy the target address to the rest of the entries in this row.
971*27477Skjd 	 */
972*27477Skjd 	 for ( j = 0; j < 6 ; j++ )
973*27477Skjd 		for ( i = 2 ; i < 8 ; i++ )
974*27477Skjd 			sc->setup_pkt[j][i] = sc->setup_pkt[j][1];
975*27477Skjd 	/*
976*27477Skjd 	 * Duplicate the first half.
977*27477Skjd 	 */
978*27477Skjd 	bcopy(sc->setup_pkt, sc->setup_pkt[8], 64);
979*27477Skjd 	/*
980*27477Skjd 	 * Fill in the broadcast address.
981*27477Skjd 	 */
982*27477Skjd 	for ( i = 0; i < 6 ; i++ )
983*27477Skjd 		sc->setup_pkt[i][2] = 0xff;
984*27477Skjd 	/*
985*27477Skjd 	 * If the device structure is available fill in the multicast address
986*27477Skjd 	 * in the rest of the setup packet.
987*27477Skjd 	 */
988*27477Skjd 	for ( i = 0; i < NMULTI; i++ ) {
989*27477Skjd 		if (bcmp(&sc->multi[i],qunused_multi,MULTISIZE) != 0) {
990*27477Skjd 			for ( j = 0; j < 6; j++ )
991*27477Skjd 				sc->setup_pkt[offset+j][next] = sc->multi[i].qm_char[j];
992*27477Skjd 			if (++next == 8) {
993*27477Skjd 				next = 1;
994*27477Skjd 				offset = 8;
995*27477Skjd 			}
996*27477Skjd 		}
997*27477Skjd 	}
998*27477Skjd 	sc->setupqueued++;
999*27477Skjd }
1000*27477Skjd /*
1001*27477Skjd  * Routines supporting Q-BUS network interfaces.
1002*27477Skjd  */
1003*27477Skjd 
1004*27477Skjd /*
1005*27477Skjd  * Init Q-BUS for interface on uban whose headers of size hlen are to
1006*27477Skjd  * end on a page boundary.  We allocate a Q-BUS map register for the page
1007*27477Skjd  * with the header, and nmr more Q-BUS map registers for i/o on the adapter,
1008*27477Skjd  * doing this for each receive and transmit buffer.  We also
1009*27477Skjd  * allocate page frames in the mbuffer pool for these pages.
1010*27477Skjd  */
1011*27477Skjd qe_ubainit(ifu, uban, hlen, nmr, mptr)
1012*27477Skjd 	register struct qeuba *ifu;
1013*27477Skjd 	int uban, hlen, nmr;
1014*27477Skjd 	char *mptr;
1015*27477Skjd {
1016*27477Skjd 	register caddr_t cp, dp;
1017*27477Skjd 	register struct ifrw *ifrw;
1018*27477Skjd 	register struct ifxmt *ifxp;
1019*27477Skjd 	int i, ncl;
1020*27477Skjd 
1021*27477Skjd 	ncl = clrnd(nmr + CLSIZE) / CLSIZE;
1022*27477Skjd 	if (ifu->ifu_r[0].ifrw_addr)
1023*27477Skjd 		/*
1024*27477Skjd 		 * If the first read buffer has a non-zero
1025*27477Skjd 		 * address, it means we have already allocated core
1026*27477Skjd 		 */
1027*27477Skjd 		cp = ifu->ifu_r[0].ifrw_addr - (CLBYTES - hlen);
1028*27477Skjd 	else {
1029*27477Skjd 		cp = mptr;
1030*27477Skjd 		if (cp == 0)
1031*27477Skjd 			return (0);
1032*27477Skjd 		ifu->ifu_hlen = hlen;
1033*27477Skjd 		ifu->ifu_uban = uban;
1034*27477Skjd 		ifu->ifu_uba = uba_hd[uban].uh_uba;
1035*27477Skjd 		dp = cp + CLBYTES - hlen;
1036*27477Skjd 		for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[nNRCV]; ifrw++) {
1037*27477Skjd 			ifrw->ifrw_addr = dp;
1038*27477Skjd 			dp += ncl * CLBYTES;
1039*27477Skjd 		}
1040*27477Skjd 		for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[nNXMT]; ifxp++) {
1041*27477Skjd 			ifxp->x_ifrw.ifrw_addr = dp;
1042*27477Skjd 			dp += ncl * CLBYTES;
1043*27477Skjd 		}
1044*27477Skjd 	}
1045*27477Skjd 	/* allocate for receive ring */
1046*27477Skjd 	for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[nNRCV]; ifrw++) {
1047*27477Skjd 		if (qe_ubaalloc(ifu, ifrw, nmr) == 0) {
1048*27477Skjd 			struct ifrw *rw;
1049*27477Skjd 
1050*27477Skjd 			for (rw = ifu->ifu_r; rw < ifrw; rw++)
1051*27477Skjd 				ubarelse(ifu->ifu_uban, &rw->ifrw_info);
1052*27477Skjd 			goto bad;
1053*27477Skjd 		}
1054*27477Skjd 	}
1055*27477Skjd 	/* and now transmit ring */
1056*27477Skjd 	for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[nNXMT]; ifxp++) {
1057*27477Skjd 		ifrw = &ifxp->x_ifrw;
1058*27477Skjd 		if (qe_ubaalloc(ifu, ifrw, nmr) == 0) {
1059*27477Skjd 			struct ifxmt *xp;
1060*27477Skjd 
1061*27477Skjd 			for (xp = ifu->ifu_w; xp < ifxp; xp++)
1062*27477Skjd 				ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info);
1063*27477Skjd 			for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[nNRCV]; ifrw++)
1064*27477Skjd 				ubarelse(ifu->ifu_uban, &ifrw->ifrw_info);
1065*27477Skjd 			goto bad;
1066*27477Skjd 		}
1067*27477Skjd 		for (i = 0; i < nmr; i++)
1068*27477Skjd 			ifxp->x_map[i] = ifrw->ifrw_mr[i];
1069*27477Skjd 		ifxp->x_xswapd = 0;
1070*27477Skjd 	}
1071*27477Skjd 	return (1);
1072*27477Skjd bad:
1073*27477Skjd 	m_pgfree(cp, nNTOT * ncl);
1074*27477Skjd 	ifu->ifu_r[0].ifrw_addr = 0;
1075*27477Skjd 	return(0);
1076*27477Skjd }
1077*27477Skjd 
1078*27477Skjd /*
1079*27477Skjd  * Setup either a ifrw structure by allocating Q-BUS map registers,
1080*27477Skjd  * possibly a buffered data path, and initializing the fields of
1081*27477Skjd  * the ifrw structure to minimize run-time overhead.
1082*27477Skjd  */
1083*27477Skjd static
1084*27477Skjd qe_ubaalloc(ifu, ifrw, nmr)
1085*27477Skjd 	struct qeuba *ifu;
1086*27477Skjd 	register struct ifrw *ifrw;
1087*27477Skjd 	int nmr;
1088*27477Skjd {
1089*27477Skjd 	register int info;
1090*27477Skjd 
1091*27477Skjd 	info = uballoc(ifu->ifu_uban, ifrw->ifrw_addr,
1092*27477Skjd 			nmr*NBPG + ifu->ifu_hlen, ifu->ifu_flags);
1093*27477Skjd 	if (info == 0){
1094*27477Skjd 		return (0);
1095*27477Skjd 	}
1096*27477Skjd 	ifrw->ifrw_info = info;
1097*27477Skjd 	ifrw->ifrw_bdp = UBAI_BDP(info);
1098*27477Skjd 	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);
1099*27477Skjd 	ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1];
1100*27477Skjd 	return (1);
1101*27477Skjd }
1102*27477Skjd 
1103*27477Skjd /*
1104*27477Skjd  * Pull read data off a interface.
1105*27477Skjd  * Len is length of data, with local net header stripped.
1106*27477Skjd  * Off is non-zero if a trailer protocol was used, and
1107*27477Skjd  * gives the offset of the trailer information.
1108*27477Skjd  * We copy the trailer information and then all the normal
1109*27477Skjd  * data into mbufs.  When full cluster sized units are present
1110*27477Skjd  * on the interface on cluster boundaries we can get them more
1111*27477Skjd  * easily by remapping, and take advantage of this here.
1112*27477Skjd  */
1113*27477Skjd struct mbuf *
1114*27477Skjd qeget(ifu, ifrw, totlen, off0)
1115*27477Skjd 	register struct qeuba *ifu;
1116*27477Skjd 	register struct ifrw *ifrw;
1117*27477Skjd 	int totlen, off0;
1118*27477Skjd {
1119*27477Skjd 	struct mbuf *top, **mp, *m;
1120*27477Skjd 	int off = off0, len;
1121*27477Skjd 	register caddr_t cp = ifrw->ifrw_addr + ifu->ifu_hlen;
1122*27477Skjd 
1123*27477Skjd 	top = 0;
1124*27477Skjd 	mp = &top;
1125*27477Skjd 	while (totlen > 0) {
1126*27477Skjd 		MGET(m, M_DONTWAIT, MT_DATA);
1127*27477Skjd 		if (m == 0)
1128*27477Skjd 			goto bad;
1129*27477Skjd 		if (off) {
1130*27477Skjd 			len = totlen - off;
1131*27477Skjd 			cp = ifrw->ifrw_addr + ifu->ifu_hlen + off;
1132*27477Skjd 		} else
1133*27477Skjd 			len = totlen;
1134*27477Skjd 		if (len >= CLBYTES) {
1135*27477Skjd 			struct mbuf *p;
1136*27477Skjd 			struct pte *cpte, *ppte;
1137*27477Skjd 			int x, *ip, i;
1138*27477Skjd 
1139*27477Skjd 			MCLGET(m, p);
1140*27477Skjd 			if (p == 0)
1141*27477Skjd 				goto nopage;
1142*27477Skjd 			len = m->m_len = CLBYTES;
1143*27477Skjd 			if(cpu == MVAX_I || !claligned(cp))
1144*27477Skjd 				goto copy;
1145*27477Skjd 
1146*27477Skjd 			/*
1147*27477Skjd 			 * Switch pages mapped to Q-BUS with new page p,
1148*27477Skjd 			 * as quick form of copy.  Remap Q-BUS and invalidate.
1149*27477Skjd 			 */
1150*27477Skjd 			cpte = &Mbmap[mtocl(cp)*CLSIZE];
1151*27477Skjd 			ppte = &Mbmap[mtocl(p)*CLSIZE];
1152*27477Skjd 			x = btop(cp - ifrw->ifrw_addr);
1153*27477Skjd 			ip = (int *)&ifrw->ifrw_mr[x];
1154*27477Skjd 			for (i = 0; i < CLSIZE; i++) {
1155*27477Skjd 				struct pte t;
1156*27477Skjd 				t = *ppte; *ppte++ = *cpte; *cpte = t;
1157*27477Skjd 				*ip++ =
1158*27477Skjd 				    cpte++->pg_pfnum|ifrw->ifrw_proto;
1159*27477Skjd 				mtpr(TBIS, cp);
1160*27477Skjd 				cp += NBPG;
1161*27477Skjd 				mtpr(TBIS, (caddr_t)p);
1162*27477Skjd 				p += NBPG / sizeof (*p);
1163*27477Skjd 			}
1164*27477Skjd 			goto nocopy;
1165*27477Skjd 		}
1166*27477Skjd nopage:
1167*27477Skjd 		m->m_len = MIN(MLEN, len);
1168*27477Skjd 		m->m_off = MMINOFF;
1169*27477Skjd copy:
1170*27477Skjd 		bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len);
1171*27477Skjd 		cp += m->m_len;
1172*27477Skjd nocopy:
1173*27477Skjd 		*mp = m;
1174*27477Skjd 		mp = &m->m_next;
1175*27477Skjd 		if (off) {
1176*27477Skjd 			/* sort of an ALGOL-W style for statement... */
1177*27477Skjd 			off += m->m_len;
1178*27477Skjd 			if (off == totlen) {
1179*27477Skjd 				cp = ifrw->ifrw_addr + ifu->ifu_hlen;
1180*27477Skjd 				off = 0;
1181*27477Skjd 				totlen = off0;
1182*27477Skjd 			}
1183*27477Skjd 		} else
1184*27477Skjd 			totlen -= m->m_len;
1185*27477Skjd 	}
1186*27477Skjd 	return (top);
1187*27477Skjd bad:
1188*27477Skjd 	m_freem(top);
1189*27477Skjd 	return (0);
1190*27477Skjd }
1191*27477Skjd 
1192*27477Skjd /*
1193*27477Skjd  * Map a chain of mbufs onto a network interface
1194*27477Skjd  * in preparation for an i/o operation.
1195*27477Skjd  * The argument chain of mbufs includes the local network
1196*27477Skjd  * header which is copied to be in the mapped, aligned
1197*27477Skjd  * i/o space.
1198*27477Skjd  */
1199*27477Skjd qeput(ifu, n, m)
1200*27477Skjd 	struct qeuba *ifu;
1201*27477Skjd 	int n;
1202*27477Skjd 	register struct mbuf *m;
1203*27477Skjd {
1204*27477Skjd 	register caddr_t cp;
1205*27477Skjd 	register struct ifxmt *ifxp;
1206*27477Skjd 	register struct ifrw *ifrw;
1207*27477Skjd 	register int i;
1208*27477Skjd 	int xswapd = 0;
1209*27477Skjd 	int x, cc, t;
1210*27477Skjd 	caddr_t dp;
1211*27477Skjd 
1212*27477Skjd 	ifxp = &ifu->ifu_w[n];
1213*27477Skjd 	ifrw = &ifxp->x_ifrw;
1214*27477Skjd 	cp = ifrw->ifrw_addr;
1215*27477Skjd 	ifxp->x_xtofree = m;
1216*27477Skjd 	while (m) {
1217*27477Skjd 		dp = mtod(m, char *);
1218*27477Skjd 		if (cpu != MVAX_I && claligned(cp) && claligned(dp) && m->m_len == CLBYTES) {
1219*27477Skjd 			struct pte *pte; int *ip;
1220*27477Skjd 			pte = &Mbmap[mtocl(dp)*CLSIZE];
1221*27477Skjd 			x = btop(cp - ifrw->ifrw_addr);
1222*27477Skjd 			ip = (int *)&ifrw->ifrw_mr[x];
1223*27477Skjd 			for (i = 0; i < CLSIZE; i++)
1224*27477Skjd 				*ip++ =
1225*27477Skjd 				    ifrw->ifrw_proto | pte++->pg_pfnum;
1226*27477Skjd 			xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT));
1227*27477Skjd 			cp += m->m_len;
1228*27477Skjd 		} else {
1229*27477Skjd 			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
1230*27477Skjd 			cp += m->m_len;
1231*27477Skjd 		}
1232*27477Skjd 		m = m->m_next;
1233*27477Skjd 	}
1234*27477Skjd 
1235*27477Skjd 	/*
1236*27477Skjd 	 * Xswapd is the set of clusters we just mapped out.  Ifxp->x_xswapd
1237*27477Skjd 	 * is the set of clusters mapped out from before.  We compute
1238*27477Skjd 	 * the number of clusters involved in this operation in x.
1239*27477Skjd 	 * Clusters mapped out before and involved in this operation
1240*27477Skjd 	 * should be unmapped so original pages will be accessed by the device.
1241*27477Skjd 	 */
1242*27477Skjd 	cc = cp - ifrw->ifrw_addr;
1243*27477Skjd 	x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT;
1244*27477Skjd 	ifxp->x_xswapd &= ~xswapd;
1245*27477Skjd 	while (i = ffs(ifxp->x_xswapd)) {
1246*27477Skjd 		i--;
1247*27477Skjd 		if (i >= x)
1248*27477Skjd 			break;
1249*27477Skjd 		ifxp->x_xswapd &= ~(1<<i);
1250*27477Skjd 		i *= CLSIZE;
1251*27477Skjd 		for (t = 0; t < CLSIZE; t++) {
1252*27477Skjd 			ifrw->ifrw_mr[i] = ifxp->x_map[i];
1253*27477Skjd 			i++;
1254*27477Skjd 		}
1255*27477Skjd 	}
1256*27477Skjd 	ifxp->x_xswapd |= xswapd;
1257*27477Skjd 	return (cc);
1258*27477Skjd }
1259*27477Skjd /*
1260*27477Skjd  * Pass a packet to the higher levels.
1261*27477Skjd  * We deal with the trailer protocol here.
1262*27477Skjd  */
1263*27477Skjd qeread(sc, ifrw, len, swloop)
1264*27477Skjd 	register struct qe_softc *sc;
1265*27477Skjd 	struct ifrw *ifrw;
1266*27477Skjd 	int len;
1267*27477Skjd 	struct mbuf *swloop;
1268*27477Skjd {
1269*27477Skjd 	struct ether_header *eh, swloop_eh;
1270*27477Skjd     	struct mbuf *m, *swloop_tmp1, *swloop_tmp2;
1271*27477Skjd 	struct protosw *pr;
1272*27477Skjd 	int off, resid;
1273*27477Skjd 	struct ifqueue *inq;
1274*27477Skjd 
1275*27477Skjd 	/*
1276*27477Skjd 	 * Deal with trailer protocol: if type is INET trailer
1277*27477Skjd 	 * get true type from first 16-bit word past data.
1278*27477Skjd 	 * Remember that type was trailer by setting off.
1279*27477Skjd 	 */
1280*27477Skjd 
1281*27477Skjd 
1282*27477Skjd 	if (swloop) {
1283*27477Skjd 		eh = mtod(swloop, struct ether_header *);
1284*27477Skjd 		swloop_eh = *eh;
1285*27477Skjd 		eh = &swloop_eh;
1286*27477Skjd 		if ( swloop->m_len > sizeof(struct ether_header))
1287*27477Skjd 			m_adj(swloop, sizeof(struct ether_header));
1288*27477Skjd 		else {
1289*27477Skjd 			MFREE(swloop, swloop_tmp1);
1290*27477Skjd 			if ( ! swloop_tmp1 )
1291*27477Skjd 				return;
1292*27477Skjd 			else
1293*27477Skjd 				swloop = swloop_tmp1;
1294*27477Skjd 		}
1295*27477Skjd 	} else
1296*27477Skjd 		eh = (struct ether_header *)ifrw->ifrw_addr;
1297*27477Skjd 
1298*27477Skjd 
1299*27477Skjd 	eh = (struct ether_header *)ifrw->ifrw_addr;
1300*27477Skjd 	eh->ether_type = ntohs((u_short)eh->ether_type);
1301*27477Skjd #define	qedataaddr(eh, off, type)	((type)(((caddr_t)((eh)+1)+(off))))
1302*27477Skjd 	if (eh->ether_type >= ETHERTYPE_TRAIL &&
1303*27477Skjd 	    eh->ether_type < ETHERTYPE_TRAIL+ETHERTYPE_NTRAILER) {
1304*27477Skjd 		off = (eh->ether_type - ETHERTYPE_TRAIL) * 512;
1305*27477Skjd 		if (off >= ETHERMTU)
1306*27477Skjd 			return;		/* sanity */
1307*27477Skjd 		if (swloop) {
1308*27477Skjd 			struct mbuf *mprev, *m0 = swloop;
1309*27477Skjd /* need to check this against off */
1310*27477Skjd 			mprev = m0;
1311*27477Skjd 			while (swloop->m_next){/*real header at end of chain*/
1312*27477Skjd 				mprev = swloop;
1313*27477Skjd 				swloop = swloop->m_next;
1314*27477Skjd 			}
1315*27477Skjd 			/* move to beginning of chain */
1316*27477Skjd 			mprev->m_next = 0;
1317*27477Skjd 			swloop->m_next = m0;
1318*27477Skjd 			eh->ether_type = ntohs( *mtod(swloop, u_short *));
1319*27477Skjd 		} else {
1320*27477Skjd 		        eh->ether_type = ntohs(*qedataaddr(eh,off, u_short *));
1321*27477Skjd 			resid = ntohs(*(qedataaddr(eh, off+2, u_short *)));
1322*27477Skjd 			if (off + resid > len)
1323*27477Skjd 			     return;		/* sanity */
1324*27477Skjd 			len = off + resid;
1325*27477Skjd 		}
1326*27477Skjd 	} else {
1327*27477Skjd 		off = 0;
1328*27477Skjd 	}
1329*27477Skjd 	if (len == 0)
1330*27477Skjd 		return;
1331*27477Skjd 
1332*27477Skjd 	/*
1333*27477Skjd 	 * Pull packet off interface.  Off is nonzero if packet
1334*27477Skjd 	 * has trailing header; qeget will then force this header
1335*27477Skjd 	 * information to be at the front, but we still have to drop
1336*27477Skjd 	 * the type and length which are at the front of any trailer data.
1337*27477Skjd 	 */
1338*27477Skjd 	if (swloop) {
1339*27477Skjd 		m = m_copy(swloop, 0, M_COPYALL);
1340*27477Skjd 		m_freem(swloop);
1341*27477Skjd 	} else {
1342*27477Skjd 		m = qeget(&sc->qeuba, ifrw, len, off);
1343*27477Skjd 	}
1344*27477Skjd 
1345*27477Skjd 	if (m == 0)
1346*27477Skjd 		return;
1347*27477Skjd 
1348*27477Skjd 	if (off) {
1349*27477Skjd 		m->m_off += 2 * sizeof (u_short);
1350*27477Skjd 		m->m_len -= 2 * sizeof (u_short);
1351*27477Skjd 	}
1352*27477Skjd 
1353*27477Skjd 
1354*27477Skjd 	/*
1355*27477Skjd 	 * Accumulate stats for DECnet
1356*27477Skjd 	 */
1357*27477Skjd 	sc->ctrblk.est_bytercvd += m->m_len;
1358*27477Skjd 	sc->ctrblk.est_blokrcvd++;
1359*27477Skjd 
1360*27477Skjd 
1361*27477Skjd 	switch (eh->ether_type) {
1362*27477Skjd 
1363*27477Skjd #ifdef INET
1364*27477Skjd 	case ETHERTYPE_IP:
1365*27477Skjd 		if (nINET==0) {
1366*27477Skjd 			m_freem(m);
1367*27477Skjd 			return;
1368*27477Skjd 		}
1369*27477Skjd 		schednetisr(NETISR_IP);
1370*27477Skjd 		inq = &ipintrq;
1371*27477Skjd 		break;
1372*27477Skjd 
1373*27477Skjd 	case ETHERTYPE_ARP:
1374*27477Skjd 		if (nETHER==0) {
1375*27477Skjd 			m_freem(m);
1376*27477Skjd 			return;
1377*27477Skjd 		}
1378*27477Skjd 		arpinput(&sc->is_ac, m);
1379*27477Skjd 		return;
1380*27477Skjd #endif
1381*27477Skjd 	default:
1382*27477Skjd 		/*
1383*27477Skjd 		 * see if other protocol families defined
1384*27477Skjd 		 * and call protocol specific routines.
1385*27477Skjd 		 * If no other protocols defined then dump message.
1386*27477Skjd 		 */
1387*27477Skjd 		if (pr=iftype_to_proto(eh->ether_type))  {
1388*27477Skjd 			if ((m = (struct mbuf *)(*pr->pr_ifinput)(m, &sc->is_if, &inq, eh)) == 0)
1389*27477Skjd 				return;
1390*27477Skjd 		} else {
1391*27477Skjd 			if (sc->ctrblk.est_unrecog != 0xffff)
1392*27477Skjd 				sc->ctrblk.est_unrecog++;
1393*27477Skjd 			m_freem(m);
1394*27477Skjd 			return;
1395*27477Skjd 		}
1396*27477Skjd 	}
1397*27477Skjd 
1398*27477Skjd 	if (IF_QFULL(inq)) {
1399*27477Skjd 		IF_DROP(inq);
1400*27477Skjd 		m_freem(m);
1401*27477Skjd 		return;
1402*27477Skjd 	}
1403*27477Skjd 	IF_ENQUEUE(inq, m);
1404*27477Skjd }
1405*27477Skjd /*
1406*27477Skjd  * Watchdog timer routine. There is a condition in the hardware that
1407*27477Skjd  * causes the board to lock up under heavy load. This routine detects
1408*27477Skjd  * the hang up and restarts the device.
1409*27477Skjd  */
1410*27477Skjd qewatch()
1411*27477Skjd {
1412*27477Skjd 	register struct qe_softc *sc;
1413*27477Skjd 	register int i;
1414*27477Skjd 	int inprogress=0;
1415*27477Skjd 
1416*27477Skjd 	for( i=0 ; i<nNQE ; i++ ) {
1417*27477Skjd 		sc = &qe_softc[i];
1418*27477Skjd 		if( sc->timeout )
1419*27477Skjd 			if( ++sc->timeout > 3 )
1420*27477Skjd 				qerestart( sc );
1421*27477Skjd 			else
1422*27477Skjd 				inprogress++;
1423*27477Skjd 	}
1424*27477Skjd 	if( inprogress ){
1425*27477Skjd 		timeout(qewatch, 0, QE_TIMEO);
1426*27477Skjd 		qewatchrun++;
1427*27477Skjd 	} else
1428*27477Skjd 		qewatchrun=0;
1429*27477Skjd }
1430*27477Skjd /*
1431*27477Skjd  * Restart for board lockup problem.
1432*27477Skjd  */
1433*27477Skjd int qe_restarts;
1434*27477Skjd int qe_show_restarts = 0;	/* 1 ==> log with printf, 0 ==> mprintf */
1435*27477Skjd qerestart( sc )
1436*27477Skjd 	register struct qe_softc *sc;
1437*27477Skjd {
1438*27477Skjd 	register struct ifnet *ifp = &sc->is_if;
1439*27477Skjd 	register struct qedevice *addr = sc->addr;
1440*27477Skjd 	register struct qe_ring *rp;
1441*27477Skjd 	register i;
1442*27477Skjd 
1443*27477Skjd 	qe_restarts++;
1444*27477Skjd 	addr->qe_csr = QE_RESET;
1445*27477Skjd 	sc->timeout = 0;
1446*27477Skjd 	qesetup( sc );
1447*27477Skjd 	for(i = 0, rp = sc->tring; i<nNXMT ; rp++, i++ ){
1448*27477Skjd 		rp->qe_flag = rp->qe_status1 = QE_NOTYET;
1449*27477Skjd 		rp->qe_valid = 0;
1450*27477Skjd 	}
1451*27477Skjd 	sc->nxmit = sc->otindex = sc->tindex = sc->rindex = 0;
1452*27477Skjd 	if ( ifp->if_flags & IFF_LOOPBACK )
1453*27477Skjd 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ELOOP;
1454*27477Skjd 	else
1455*27477Skjd 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ILOOP;
1456*27477Skjd 	addr->qe_rcvlist_lo = (short)sc->rringaddr;
1457*27477Skjd 	addr->qe_rcvlist_hi = (short)((int)sc->rringaddr >> 16);
1458*27477Skjd 	for( i = 0 ; sc != &qe_softc[i] ; i++ )
1459*27477Skjd 		;
1460*27477Skjd 	qestart( i );
1461*27477Skjd 	if (qe_show_restarts)
1462*27477Skjd 		printf("qerestart: restarted qe%d %d\n", i, qe_restarts);
1463*27477Skjd 	else
1464*27477Skjd 		mprintf("qerestart: restarted qe%d %d\n", i, qe_restarts);
1465*27477Skjd }
1466*27477Skjd #endif
1467