xref: /csrg-svn/sys/vax/if/if_qe.c (revision 27477)
1 /*	@(#)if_qe.c	1.1 (Berkeley) 04/28/86 */
2 
3 #ifndef lint
4 static	char	*sccsid = "@(#)if_qe.c	1.15	(ULTRIX)	4/16/86";
5 #endif lint
6 
7 
8 /****************************************************************
9  *								*
10  *        Licensed from Digital Equipment Corporation 		*
11  *                       Copyright (c) 				*
12  *               Digital Equipment Corporation			*
13  *                   Maynard, Massachusetts 			*
14  *                         1985, 1986 				*
15  *                    All rights reserved. 			*
16  *								*
17  *        The Information in this software is subject to change *
18  *   without notice and should not be construed as a commitment *
19  *   by  Digital  Equipment  Corporation.   Digital   makes  no *
20  *   representations about the suitability of this software for *
21  *   any purpose.  It is supplied "As Is" without expressed  or *
22  *   implied  warranty. 					*
23  *								*
24  *        If the Regents of the University of California or its *
25  *   licensees modify the software in a manner creating  	*
26  *   diriviative copyright rights, appropriate copyright  	*
27  *   legends may be placed on  the drivative work in addition   *
28  *   to that set forth above. 					*
29  *								*
30  ****************************************************************/
31 /* ---------------------------------------------------------------------
32  * Modification History
33  *
34  * 15-Apr-86  -- afd
35  *	Rename "unused_multi" to "qunused_multi" for extending Generic
36  *	kernel to MicroVAXen.
37  *
38  * 18-mar-86  -- jaw     br/cvec changed to NOT use registers.
39  *
40  * 12 March 86 -- Jeff Chase
41  *	Modified to handle the new MCLGET macro
42  *	Changed if_qe_data.c to use more receive buffers
43  *	Added a flag to poke with adb to log qe_restarts on console
44  *
45  * 19 Oct 85 -- rjl
46  *	Changed the watch dog timer from 30 seconds to 3.  VMS is using
47  * 	less than 1 second in their's. Also turned the printf into an
48  *	mprintf.
49  *
50  *  09/16/85 -- Larry Cohen
51  * 		Add 43bsd alpha tape changes for subnet routing
52  *
53  *  1 Aug 85 -- rjl
54  *	Panic on a non-existent memory interrupt and the case where a packet
55  *	was chained.  The first should never happen because non-existant
56  *	memory interrupts cause a bus reset. The second should never happen
57  *	because we hang 2k input buffers on the device.
58  *
59  *  1 Aug 85 -- rich
60  *      Fixed the broadcast loopback code to handle Clusters without
61  *      wedging the system.
62  *
63  *  27 Feb. 85 -- ejf
64  *	Return default hardware address on ioctl request.
65  *
66  *  12 Feb. 85 -- ejf
67  *	Added internal extended loopback capability.
68  *
69  *  27 Dec. 84 -- rjl
70  *	Fixed bug that caused every other transmit descriptor to be used
71  *	instead of every descriptor.
72  *
73  *  21 Dec. 84 -- rjl
74  *	Added watchdog timer to mask hardware bug that causes device lockup.
75  *
76  *  18 Dec. 84 -- rjl
77  *	Reworked driver to use q-bus mapping routines.  MicroVAX-I now does
78  *	copying instead of m-buf shuffleing.
79  *	A number of deficencies in the hardware/firmware were compensated
80  *	for. See comments in qestart and qerint.
81  *
82  *  14 Nov. 84 -- jf
83  *	Added usage counts for multicast addresses.
84  *	Updated general protocol support to allow access to the Ethernet
85  *	header.
86  *
87  *  04 Oct. 84 -- jf
88  *	Added support for new ioctls to add and delete multicast addresses
89  *	and set the physical address.
90  *	Add support for general protocols.
91  *
92  *  14 Aug. 84 -- rjl
93  *	Integrated Shannon changes. (allow arp above 1024 and ? )
94  *
95  *  13 Feb. 84 -- rjl
96  *
97  *	Initial version of driver. derived from IL driver.
98  *
99  * ---------------------------------------------------------------------
100  */
101 
102 #include "qe.h"
103 #if	NQE > 0 || defined(BINARY)
104 /*
105  * Digital Q-BUS to NI Adapter
106  */
107 
108 #include "../data/if_qe_data.c"
109 extern struct protosw *iftype_to_proto(), *iffamily_to_proto();
110 extern struct timeval time;
111 extern timeout();
112 
113 int	qeprobe(), qeattach(), qeint(), qewatch();
114 int	qeinit(),qeoutput(),qeioctl(),qereset(),qewatch();
115 struct mbuf *qeget();
116 
117 u_short qestd[] = { 0 };
118 struct	uba_driver qedriver =
119 	{ qeprobe, 0, qeattach, 0, qestd, "qe", qeinfo };
120 
121 u_char qunused_multi[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
122 
123 #define QE_TIMEO	(15)
124 #define	QEUNIT(x)	minor(x)
125 static int mask = 0x3ffff;		/* address mask		*/
126 int qewatchrun = 0;			/* watchdog running	*/
127 /*
128  * The deqna shouldn't recieve more than ETHERMTU + sizeof(struct ether_header)
129  * but will actually take in up to 2048 bytes. To guard against the receiver
130  * chaining buffers (which we aren't prepared to handle) we allocate 2kb
131  * size buffers.
132  */
133 #define MAXPACKETSIZE 2048		/* Should really be ETHERMTU	*/
134 /*
135  * Probe the QNA to see if it's there
136  */
137 qeprobe(reg)
138 	caddr_t reg;
139 {
140 
141 	register struct qedevice *addr = (struct qedevice *)reg;
142 	register struct qe_ring *rp;
143 	register struct qe_ring *prp; 	/* physical rp 		*/
144 	register int i, j, ncl;
145 	static int next=0;		/* softc index		*/
146 	register struct qe_softc *sc = &qe_softc[next++];
147 
148 	/*
149 	 * Set the address mask for the particular cpu
150 	 */
151 	if( cpu == MVAX_I )
152 		mask = 0x3fffff;
153 	else
154 		mask = 0x3ffff;
155 
156 	/*
157 	 * The QNA interrupts on i/o operations. To do an I/O operation
158 	 * we have to setup the interface by transmitting a setup  packet.
159 	 */
160 	addr->qe_csr = QE_RESET;
161 	addr->qe_vector = (uba_hd[numuba].uh_lastiv -= 4);
162 
163 	/*
164 	 * Map the communications area and the setup packet.
165 	 */
166 	sc->setupaddr =
167 		uballoc(0, sc->setup_pkt, sizeof(sc->setup_pkt), 0);
168 	sc->rringaddr = (struct qe_ring *)
169 		uballoc(0, sc->rring, sizeof(struct qe_ring)*(nNTOT+2),0);
170 	prp = (struct qe_ring *)((int)sc->rringaddr & mask);
171 
172 	/*
173 	 * The QNA will loop the setup packet back to the receive ring
174 	 * for verification, therefore we initialize the first
175 	 * receive & transmit ring descriptors and link the setup packet
176 	 * to them.
177 	 */
178 	qeinitdesc( sc->tring, sc->setupaddr & mask, sizeof(sc->setup_pkt));
179 	qeinitdesc( sc->rring, sc->setupaddr & mask, sizeof(sc->setup_pkt));
180 
181 	rp = (struct qe_ring *)sc->tring;
182 	rp->qe_setup = 1;
183 	rp->qe_eomsg = 1;
184 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
185 	rp->qe_valid = 1;
186 
187 	rp = (struct qe_ring *)sc->rring;
188 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
189 	rp->qe_valid = 1;
190 
191 	/*
192 	 * Get the addr off of the interface and place it into the setup
193 	 * packet. This code looks strange due to the fact that the address
194 	 * is placed in the setup packet in col. major order.
195 	 */
196 	for( i = 0 ; i < 6 ; i++ )
197 		sc->setup_pkt[i][1] = addr->qe_sta_addr[i];
198 
199 	qesetup( sc );
200 	/*
201 	 * Start the interface and wait for the packet.
202 	 */
203 	j = cvec;
204 	addr->qe_csr = QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT;
205 	addr->qe_rcvlist_lo = (short)prp;
206 	addr->qe_rcvlist_hi = (short)((int)prp >> 16);
207 	prp += nNRCV+1;
208 	addr->qe_xmtlist_lo = (short)prp;
209 	addr->qe_xmtlist_hi = (short)((int)prp >> 16);
210 	DELAY(10000);
211 	/*
212 	 * All done with the bus resources. If it's a uVAX-I they weren't
213 	 * really allocated otherwise deallocated them.
214 	 */
215 	if( cpu != MVAX_I ) {
216 		ubarelse(0, &sc->setupaddr);
217 		ubarelse(0, &sc->rringaddr);
218 	}
219 	if( cvec == j )
220 		return 0;		/* didn't interrupt	*/
221 
222 	/*
223 	 * Allocate page size buffers now. If we wait until the network
224 	 * is setup they have already fragmented. By doing it here in
225 	 * conjunction with always coping on uVAX-I processors we obtain
226 	 * physically contigous buffers for dma transfers.
227 	 */
228 	ncl = clrnd((int)btoc(MAXPACKETSIZE) + CLSIZE) / CLSIZE;
229 	sc->buffers = m_clalloc(nNTOT * ncl, MPG_SPACE);
230 	return( sizeof(struct qedevice) );
231 }
232 
233 /*
234  * Interface exists: make available by filling in network interface
235  * record.  System will initialize the interface when it is ready
236  * to accept packets.
237  */
238 qeattach(ui)
239 	struct uba_device *ui;
240 {
241 	register struct qe_softc *sc = &qe_softc[ui->ui_unit];
242 	register struct ifnet *ifp = &sc->is_if;
243 	register struct qedevice *addr = (struct qedevice *)ui->ui_addr;
244 	register int i;
245 	struct sockaddr_in *sin;
246 
247 	ifp->if_unit = ui->ui_unit;
248 	ifp->if_name = "qe";
249 	ifp->if_mtu = ETHERMTU;
250 	ifp->if_flags |= IFF_BROADCAST | IFF_DYNPROTO;
251 
252 	/*
253 	 * Read the address from the prom and save it.
254 	 */
255 	for( i=0 ; i<6 ; i++ )
256 		sc->setup_pkt[i][1] = sc->is_addr[i] = addr->qe_sta_addr[i] & 0xff;
257 
258 	/*
259 	 * Save the vector for initialization at reset time.
260 	 */
261 	sc->qe_intvec = addr->qe_vector;
262 
263 	sin = (struct sockaddr_in *)&ifp->if_addr;
264 	sin->sin_family = AF_INET;
265 	ifp->if_init = qeinit;
266 	ifp->if_output = qeoutput;
267 	ifp->if_ioctl = qeioctl;
268 	ifp->if_reset = qereset;
269 	if_attach(ifp);
270 }
271 
272 /*
273  * Reset of interface after UNIBUS reset.
274  * If interface is on specified uba, reset its state.
275  */
276 qereset(unit, uban)
277 	int unit, uban;
278 {
279 	register struct uba_device *ui;
280 
281 	if (unit >= nNQE || (ui = qeinfo[unit]) == 0 || ui->ui_alive == 0 ||
282 		ui->ui_ubanum != uban)
283 		return;
284 	printf(" qe%d", unit);
285 	qeinit(unit);
286 }
287 
288 /*
289  * Initialization of interface.
290  */
291 qeinit(unit)
292 	int unit;
293 {
294 	register struct qe_softc *sc = &qe_softc[unit];
295 	register struct uba_device *ui = qeinfo[unit];
296 	register struct qedevice *addr = (struct qedevice *)ui->ui_addr;
297 	register struct ifnet *ifp = &sc->is_if;
298 	register i;
299 	int s;
300 
301 	/* address not known */
302 	/* DECnet must set this somewhere to make device happy */
303 	if (ifp->if_addrlist == (struct ifaddr *)0)
304 			return;
305 	if (ifp->if_flags & IFF_RUNNING)
306 		return;
307 
308 	/*
309 	 * map the communications area onto the device
310 	 */
311 	sc->rringaddr = (struct qe_ring *)((int)uballoc(0,
312 		sc->rring, sizeof(struct qe_ring)*(nNTOT+2),0)&mask);
313 	sc->tringaddr = sc->rringaddr+nNRCV+1;
314 	sc->setupaddr =	uballoc(0, sc->setup_pkt, sizeof(sc->setup_pkt), 0) & mask;
315 	/*
316 	 * init buffers and maps
317 	 */
318 	if (qe_ubainit(&sc->qeuba, ui->ui_ubanum,
319 	    sizeof (struct ether_header), (int)btoc(MAXPACKETSIZE), sc->buffers) == 0) {
320 		printf("qe%d: can't initialize\n", unit);
321 		sc->is_if.if_flags &= ~IFF_UP;
322 		return;
323 	}
324 	/*
325 	 * Init the buffer descriptors and indexes for each of the lists and
326 	 * loop them back to form a ring.
327 	 */
328 	for( i = 0 ; i < nNRCV ; i++ ){
329 		qeinitdesc( &sc->rring[i],
330 			sc->qeuba.ifu_r[i].ifrw_info & mask, MAXPACKETSIZE);
331 		sc->rring[i].qe_flag = sc->rring[i].qe_status1 = QE_NOTYET;
332 		sc->rring[i].qe_valid = 1;
333 	}
334 	qeinitdesc( &sc->rring[i], NULL, 0 );
335 
336 	sc->rring[i].qe_addr_lo = (short)sc->rringaddr;
337 	sc->rring[i].qe_addr_hi = (short)((int)sc->rringaddr >> 16);
338 	sc->rring[i].qe_chain = 1;
339 	sc->rring[i].qe_flag = sc->rring[i].qe_status1 = QE_NOTYET;
340 	sc->rring[i].qe_valid = 1;
341 
342 	for( i = 0 ; i <= nNXMT ; i++ )
343 		qeinitdesc( &sc->tring[i], NULL, 0 );
344 	i--;
345 
346 	sc->tring[i].qe_addr_lo = (short)sc->tringaddr;
347 	sc->tring[i].qe_addr_hi = (short)((int)sc->tringaddr >> 16);
348 	sc->tring[i].qe_chain = 1;
349 	sc->tring[i].qe_flag = sc->tring[i].qe_status1 = QE_NOTYET;
350 	sc->tring[i].qe_valid = 1;
351 
352 	sc->nxmit = sc->otindex = sc->tindex = sc->rindex = 0;
353 
354 	/*
355 	 * Take the interface out of reset, program the vector,
356 	 * enable interrupts, and tell the world we are up.
357 	 */
358 	s = splimp();
359 	addr->qe_vector = sc->qe_intvec;
360 	sc->addr = addr;
361 	if ( ifp->if_flags & IFF_LOOPBACK )
362 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ELOOP;
363 	else
364 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ILOOP;
365 	addr->qe_rcvlist_lo = (short)sc->rringaddr;
366 	addr->qe_rcvlist_hi = (short)((int)sc->rringaddr >> 16);
367 	ifp->if_flags |= IFF_UP | IFF_RUNNING;
368 	qesetup( sc );
369 	qestart( unit );
370 	sc->ztime = time.tv_sec;
371 	splx( s );
372 
373 }
374 
375 /*
376  * Start output on interface.
377  *
378  */
379 qestart(dev)
380 	dev_t dev;
381 {
382 	int unit = QEUNIT(dev);
383 	struct uba_device *ui = qeinfo[unit];
384 	register struct qe_softc *sc = &qe_softc[unit];
385 	register struct qedevice *addr;
386 	register struct qe_ring *rp;
387 	register index;
388 	struct mbuf *m, *m0;
389 	int buf_addr, len, j,  s;
390 
391 
392 	s = splimp();
393 	addr = (struct qedevice *)ui->ui_addr;
394 	/*
395 	 * The deqna doesn't look at anything but the valid bit
396 	 * to determine if it should transmit this packet. If you have
397 	 * a ring and fill it the device will loop indefinately on the
398 	 * packet and continue to flood the net with packets until you
399 	 * break the ring. For this reason we never queue more than n-1
400 	 * packets in the transmit ring.
401 	 *
402 	 * The microcoders should have obeyed their own defination of the
403 	 * flag and status words, but instead we have to compensate.
404 	 */
405 	for( index = sc->tindex;
406 		sc->tring[index].qe_valid == 0 && sc->nxmit < (nNXMT-1) ;
407 		sc->tindex = index = ++index % nNXMT){
408 		rp = &sc->tring[index];
409 		if( sc->setupqueued ) {
410 			buf_addr = sc->setupaddr;
411 			len = 128;
412 			rp->qe_setup = 1;
413 			sc->setupqueued = 0;
414 		} else {
415 			IF_DEQUEUE(&sc->is_if.if_snd, m);
416 			if( m == 0 ){
417 				splx(s);
418 				return;
419 			}
420 			buf_addr = sc->qeuba.ifu_w[index].x_ifrw.ifrw_info;
421 			len = qeput(&sc->qeuba, index, m);
422 		}
423 		/*
424 		 *  Does buffer end on odd byte ?
425 		 */
426 		if( len & 1 ) {
427 			len++;
428 			rp->qe_odd_end = 1;
429 		}
430 		if( len < MINDATA )
431 			len = MINDATA;
432 		rp->qe_buf_len = -(len/2);
433 		buf_addr &= mask;
434 		rp->qe_flag = rp->qe_status1 = QE_NOTYET;
435 		rp->qe_addr_lo = (short)buf_addr;
436 		rp->qe_addr_hi = (short)(buf_addr >> 16);
437 		rp->qe_eomsg = 1;
438 		rp->qe_flag = rp->qe_status1 = QE_NOTYET;
439 		rp->qe_valid = 1;
440 		sc->nxmit++;
441 		/*
442 		 * If the watchdog time isn't running kick it.
443 		 */
444 		sc->timeout=1;
445 		if( !qewatchrun++ )
446 			timeout(qewatch,0,QE_TIMEO);
447 
448 		/*
449 		 * See if the xmit list is invalid.
450 		 */
451 		if( addr->qe_csr & QE_XL_INVALID ) {
452 			buf_addr = (int)(sc->tringaddr+index);
453 			addr->qe_xmtlist_lo = (short)buf_addr;
454 			addr->qe_xmtlist_hi = (short)(buf_addr >> 16);
455 		}
456 		/*
457 		 * Accumulate statistics for DECnet
458 		 */
459 		if ((sc->ctrblk.est_bytesent + len) > sc->ctrblk.est_bytesent)
460 			sc->ctrblk.est_bytesent += len;
461 		if (sc->ctrblk.est_bloksent != 0xffffffff)
462 			sc->ctrblk.est_bloksent++;
463 	}
464 	splx( s );
465 }
466 
467 /*
468  * Ethernet interface interrupt processor
469  */
470 qeintr(unit)
471 	int unit;
472 {
473 	register struct qe_softc *sc = &qe_softc[unit];
474 	register struct ifnet *ifp = &sc->is_if;
475 	struct qedevice *addr = (struct qedevice *)qeinfo[unit]->ui_addr;
476 	int s, buf_addr, csr;
477 
478 	s = splimp();
479 	csr = addr->qe_csr;
480 	if ( ifp->if_flags & IFF_LOOPBACK )
481 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ELOOP;
482 	else
483 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ILOOP;
484 	if( csr & QE_RCV_INT )
485 		qerint( unit );
486 	if( csr & QE_XMIT_INT )
487 		qetint( unit );
488 	if( csr & QE_NEX_MEM_INT )
489 		panic("qe: Non existant memory interrupt");
490 
491 	if( addr->qe_csr & QE_RL_INVALID && sc->rring[sc->rindex].qe_status1 == QE_NOTYET ) {
492 		buf_addr = (int)&sc->rringaddr[sc->rindex];
493 		addr->qe_rcvlist_lo = (short)buf_addr;
494 		addr->qe_rcvlist_hi = (short)(buf_addr >> 16);
495 	}
496 	splx( s );
497 }
498 
499 /*
500  * Ethernet interface transmit interrupt.
501  */
502 
503 qetint(unit)
504 	int unit;
505 {
506 	register struct qe_softc *sc = &qe_softc[unit];
507 	register struct mbuf *mp, *mp0;
508 	register first, index;
509 	register struct qe_ring *rp;
510 	register struct ifrw *ifrw;
511 	register struct ifxmt *ifxp;
512 	struct ether_header *eh;
513 	int i, status1, status2, setupflag;
514 	short len;
515 
516 
517 	while( sc->otindex != sc->tindex && sc->tring[sc->otindex].qe_status1 != QE_NOTYET && sc->nxmit > 0 ) {
518 		/*
519 		 * Save the status words from the descriptor so that it can
520 		 * be released.
521 		 */
522 		rp = &sc->tring[sc->otindex];
523 		status1 = rp->qe_status1;
524 		status2 = rp->qe_status2;
525 		setupflag = rp->qe_setup;
526 		len = (-rp->qe_buf_len) * 2;
527 		if( rp->qe_odd_end )
528 			len++;
529 		/*
530 		 * Init the buffer descriptor
531 		 */
532 		bzero( rp, sizeof(struct qe_ring));
533 		if( --sc->nxmit == 0 )
534 			sc->timeout = 0;
535 		if( !setupflag ) {
536 			/*
537 			 * Do some statistics.
538 			 */
539 			sc->is_if.if_opackets++;
540 			sc->is_if.if_collisions += ( status1 & QE_CCNT ) >> 4;
541 			/*
542 			 * Accumulate DECnet statistics
543 			 */
544 			if (status1 & QE_CCNT) {
545 				if (((status1 & QE_CCNT) >> 4) == 1) {
546 					if (sc->ctrblk.est_single != 0xffffffff)
547 						sc->ctrblk.est_single++;
548 				} else {
549 					if (sc->ctrblk.est_multiple != 0xffffffff)
550 						sc->ctrblk.est_multiple++;
551 				}
552 			}
553 			if (status1 & QE_FAIL)
554 				if (sc->ctrblk.est_collis != 0xffff)
555 					sc->ctrblk.est_collis++;
556 			if( status1 & QE_ERROR ) {
557 				sc->is_if.if_oerrors++;
558 				if (sc->ctrblk.est_sendfail != 0xffff) {
559 					sc->ctrblk.est_sendfail++;
560 					if (status1 & QE_ABORT)
561 						sc->ctrblk.est_sendfail_bm |= 1;
562 					if (status1 & QE_NOCAR)
563 						sc->ctrblk.est_sendfail_bm |= 2;
564 				}
565 			}
566 			/*
567 			 * If this was a broadcast packet loop it
568 			 * back because the hardware can't hear it's own
569 			 * transmits and the rwho deamon expects to see them.
570 			 * This code will have to be expanded to include multi-
571 			 * cast if the same situation developes.
572 			 */
573 			ifxp = &sc->qeuba.ifu_w[sc->otindex];
574 			ifrw = &sc->qeuba.ifu_w[sc->otindex].x_ifrw;
575 			eh = (struct ether_header *)ifrw->ifrw_addr;
576 
577 /*
578  * This is a Kludge to do a fast check to see if the ethernet
579  * address is all 1's, the ethernet broadcast addr, and loop the
580  * packet back.
581  */
582 
583 #define QUAD(x) (*(long *)((x)->ether_dhost))
584 #define ESHORT(x)	(*(short *)(&((x)->ether_dhost[4])))
585 
586 			if(QUAD(eh) == -1 && ESHORT(eh) == -1){
587 				qeread(sc, ifrw, len, ifxp->x_xtofree);
588 				ifxp->x_xtofree =0;
589 			}else if( ifxp->x_xtofree ) {
590 				m_freem( ifxp->x_xtofree );
591 				ifxp->x_xtofree = 0;
592 			}
593 		}
594 		sc->otindex = ++sc->otindex % nNXMT;
595 	}
596 	qestart( unit );
597 }
598 
599 /*
600  * Ethernet interface receiver interrupt.
601  * If can't determine length from type, then have to drop packet.
602  * Othewise decapsulate packet based on type and pass to type specific
603  * higher-level input routine.
604  */
605 qerint(unit)
606 	int unit;
607 {
608 	register struct qe_softc *sc = &qe_softc[unit];
609 	register struct ifnet *ifp = &sc->is_if;
610 	register struct qe_ring *rp;
611 	int len, status1, status2;
612 	int bufaddr;
613 	struct ether_header *eh;
614 
615 	/*
616 	 * Traverse the receive ring looking for packets to pass back.
617 	 * The search is complete when we find a descriptor not in use.
618 	 *
619 	 * As in the transmit case the deqna doesn't honor it's own protocols
620 	 * so there exists the possibility that the device can beat us around
621 	 * the ring. The proper way to guard against this is to insure that
622 	 * there is always at least one invalid descriptor. We chose instead
623 	 * to make the ring large enough to minimize the problem. With a ring
624 	 * size of 4 we haven't been able to see the problem. To be safe we
625 	 * doubled that to 8.
626 	 *
627 	 */
628 	for( ; sc->rring[sc->rindex].qe_status1 != QE_NOTYET ; sc->rindex = ++sc->rindex % nNRCV ){
629 		rp = &sc->rring[sc->rindex];
630 		status1 = rp->qe_status1;
631 		status2 = rp->qe_status2;
632 		bzero( rp, sizeof(struct qe_ring));
633 		if( (status1 & QE_MASK) == QE_MASK )
634 			panic("qe: chained packet");
635 		len = ((status1 & QE_RBL_HI) | (status2 & QE_RBL_LO));
636 		if( ! (ifp->if_flags & IFF_LOOPBACK) )
637 			len += 60;
638 		sc->is_if.if_ipackets++;
639 
640 		if( ! (ifp->if_flags & IFF_LOOPBACK) ) {
641 			if( status1 & QE_ERROR ) {
642 				sc->is_if.if_ierrors++;
643 				if ((status1 & (QE_OVF | QE_CRCERR | QE_FRAME)) &&
644 					(sc->ctrblk.est_recvfail != 0xffff)) {
645 					sc->ctrblk.est_recvfail++;
646 					if (status1 & QE_OVF)
647 						sc->ctrblk.est_recvfail_bm |= 4;
648 					if (status1 & QE_CRCERR)
649 						sc->ctrblk.est_recvfail_bm |= 1;
650 					if (status1 & QE_FRAME)
651 						sc->ctrblk.est_recvfail_bm |= 2;
652 				}
653 			} else {
654 				/*
655 				 * We don't process setup packets.
656 				 */
657 				if( !(status1 & QE_ESETUP) )
658 					qeread(sc, &sc->qeuba.ifu_r[sc->rindex],
659 						len - sizeof(struct ether_header),0);
660 			}
661 		} else {
662 			eh = (struct ether_header *)sc->qeuba.ifu_r[sc->rindex].ifrw_addr;
663 			if ( bcmp(eh->ether_dhost, sc->is_addr, 6) == NULL )
664 					qeread(sc, &sc->qeuba.ifu_r[sc->rindex],
665 						len - sizeof(struct ether_header),0);
666 		}
667 		/*
668 		 * Return the buffer to the ring
669 		 */
670 		bufaddr = sc->qeuba.ifu_r[sc->rindex].ifrw_info & mask;
671 		rp->qe_buf_len = -((MAXPACKETSIZE)/2);
672 		rp->qe_addr_lo = (short)bufaddr;
673 		rp->qe_addr_hi = (short)((int)bufaddr >> 16);
674 		rp->qe_flag = rp->qe_status1 = QE_NOTYET;
675 		rp->qe_valid = 1;
676 	}
677 }
678 /*
679  * Ethernet output routine.
680  * Encapsulate a packet of type family for the local net.
681  * Use trailer local net encapsulation if enough data in first
682  * packet leaves a multiple of 512 bytes of data in remainder.
683  */
684 qeoutput(ifp, m0, dst)
685 	struct ifnet *ifp;
686 	struct mbuf *m0;
687 	struct sockaddr *dst;
688 {
689 	int type, s, error;
690 	u_char edst[6];
691 	struct in_addr idst;
692 	struct protosw *pr;
693 	register struct qe_softc *is = &qe_softc[ifp->if_unit];
694 	register struct mbuf *m = m0;
695 	register struct ether_header *eh;
696 	register int off;
697 
698 	switch (dst->sa_family) {
699 
700 #ifdef INET
701 	case AF_INET:
702 		if (nINET == 0) {
703 			printf("qe%d: can't handle af%d\n", ifp->if_unit,
704 				dst->sa_family);
705 			error = EAFNOSUPPORT;
706 			goto bad;
707 		}
708 		idst = ((struct sockaddr_in *)dst)->sin_addr;
709 		if (!arpresolve(&is->is_ac, m, &idst, edst))
710 			return (0);	/* if not yet resolved */
711 		off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len;
712 		/* need per host negotiation */
713 		if ((ifp->if_flags & IFF_NOTRAILERS) == 0)
714 		if (off > 0 && (off & 0x1ff) == 0 &&
715 			m->m_off >= MMINOFF + 2 * sizeof (u_short)) {
716 			type = ETHERTYPE_TRAIL + (off>>9);
717 			m->m_off -= 2 * sizeof (u_short);
718 			m->m_len += 2 * sizeof (u_short);
719 			*mtod(m, u_short *) = htons((u_short)ETHERTYPE_IP);
720 			*(mtod(m, u_short *) + 1) = htons((u_short)m->m_len);
721 			goto gottraqeertype;
722 		}
723 		type = ETHERTYPE_IP;
724 		off = 0;
725 		goto gottype;
726 #endif
727 
728 	case AF_UNSPEC:
729 		eh = (struct ether_header *)dst->sa_data;
730  		bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, sizeof (edst));
731 		type = eh->ether_type;
732 		goto gottype;
733 
734 	default:
735 		/*
736 		 * Try to find other address families and call protocol
737 		 * specific output routine.
738 		 */
739 		if (pr = iffamily_to_proto(dst->sa_family)) {
740 			(*pr->pr_ifoutput)(ifp, m0, dst, &type, (char *)edst);
741 			goto gottype;
742 		} else {
743 			printf("qe%d: can't handle af%d\n", ifp->if_unit,
744 				dst->sa_family);
745 			error = EAFNOSUPPORT;
746 			goto bad;
747 		}
748 	}
749 
750 gottraqeertype:
751 	/*
752 	 * Packet to be sent as trailer: move first packet
753 	 * (control information) to end of chain.
754 	 */
755 	while (m->m_next)
756 		m = m->m_next;
757 	m->m_next = m0;
758 	m = m0->m_next;
759 	m0->m_next = 0;
760 	m0 = m;
761 
762 gottype:
763 	/*
764 	 * Add local net header.  If no space in first mbuf,
765 	 * allocate another.
766 	 */
767 	if (m->m_off > MMAXOFF || MMINOFF + sizeof (struct ether_header) > m->m_off) {
768 		m = m_get(M_DONTWAIT, MT_HEADER);
769 		if (m == 0) {
770 			error = ENOBUFS;
771 			goto bad;
772 		}
773 		m->m_next = m0;
774 		m->m_off = MMINOFF;
775 		m->m_len = sizeof (struct ether_header);
776 	} else {
777 		m->m_off -= sizeof (struct ether_header);
778 		m->m_len += sizeof (struct ether_header);
779 	}
780 	eh = mtod(m, struct ether_header *);
781 	eh->ether_type = htons((u_short)type);
782  	bcopy((caddr_t)edst, (caddr_t)eh->ether_dhost, sizeof (edst));
783  	bcopy((caddr_t)is->is_addr, (caddr_t)eh->ether_shost, sizeof (is->is_addr));
784 
785 	/*
786 	 * Queue message on interface, and start output if interface
787 	 * not yet active.
788 	 */
789 	s = splimp();
790 	if (IF_QFULL(&ifp->if_snd)) {
791 		IF_DROP(&ifp->if_snd);
792 		splx(s);
793 		m_freem(m);
794 		return (ENOBUFS);
795 	}
796 	IF_ENQUEUE(&ifp->if_snd, m);
797 	qestart(ifp->if_unit);
798 	splx(s);
799 	return (0);
800 
801 bad:
802 	m_freem(m0);
803 	return (error);
804 }
805 
806 
807 /*
808  * Process an ioctl request.
809  */
810 qeioctl(ifp, cmd, data)
811 	register struct ifnet *ifp;
812 	int cmd;
813 	caddr_t data;
814 {
815 	struct qe_softc *sc = &qe_softc[ifp->if_unit];
816 	struct uba_device *ui = qeinfo[ifp->if_unit];
817 	struct qedevice *addr = (struct qedevice *)ui->ui_addr;
818 	struct sockaddr *sa;
819 	struct sockaddr_in *sin;
820 	struct ifreq *ifr = (struct ifreq *)data;
821 	struct ifdevea *ifd = (struct ifdevea *)data;
822 	struct ctrreq *ctr = (struct ctrreq *)data;
823 	struct protosw *pr;
824 	struct ifaddr *ifa = (struct ifaddr *)data;
825 	int i,j = -1,s = splimp(), error = 0;
826 
827 	switch (cmd) {
828 
829 	case SIOCENABLBACK:
830 		printf("qe%d: internal loopback enable requested\n", ifp->if_unit);
831                 ifp->if_flags |= IFF_LOOPBACK;
832 #ifdef notdef
833 		if((ifp->if_flags |= IFF_LOOPBACK) & IFF_RUNNING)
834 			if_rtinit(ifp, -1);
835 #endif
836 		qerestart( sc );
837 		break;
838 
839 	case SIOCDISABLBACK:
840 		printf("qe%d: internal loopback disable requested\n", ifp->if_unit);
841                 ifp->if_flags &= ~IFF_LOOPBACK;
842 #ifdef notdef
843 		if((ifp->if_flags &= ~IFF_LOOPBACK) & IFF_RUNNING)
844 			if_rtinit(ifp, -1);
845 #endif
846 		qerestart( sc );
847 		qeinit( ifp->if_unit );
848 		break;
849 
850 	case SIOCRPHYSADDR:
851 		bcopy(sc->is_addr, ifd->current_pa, 6);
852 		for( i = 0; i < 6; i++ )
853 			ifd->default_pa[i] = addr->qe_sta_addr[i] & 0xff;
854 		break;
855 
856 	case SIOCSPHYSADDR:
857 		bcopy(ifr->ifr_addr.sa_data,sc->is_addr,MULTISIZE);
858 		for ( i = 0; i < 6; i++ )
859 			sc->setup_pkt[i][1] = sc->is_addr[i];
860 		if (ifp->if_flags & IFF_RUNNING) {
861 			qesetup( sc );
862 #ifdef notdef
863 			if_rtinit(ifp, -1);
864 #endif
865 		}
866 		qeinit(ifp->if_unit);
867 		break;
868 
869 	case SIOCDELMULTI:
870 	case SIOCADDMULTI:
871 		if (cmd == SIOCDELMULTI) {
872 			for (i = 0; i < NMULTI; i++)
873 				if (bcmp(&sc->multi[i],ifr->ifr_addr.sa_data,MULTISIZE) == 0) {
874 					if (--sc->muse[i] == 0)
875 						bcopy(qunused_multi,&sc->multi[i],MULTISIZE);
876 				}
877 		} else {
878 			for (i = 0; i < NMULTI; i++) {
879 				if (bcmp(&sc->multi[i],ifr->ifr_addr.sa_data,MULTISIZE) == 0) {
880 					sc->muse[i]++;
881 					goto done;
882 				}
883 				if (bcmp(&sc->multi[i],qunused_multi,MULTISIZE) == 0)
884 					j = i;
885 			}
886 			if (j == -1) {
887 				printf("qe%d: SIOCADDMULTI failed, multicast list full: %d\n",ui->ui_unit,NMULTI);
888 				error = ENOBUFS;
889 				goto done;
890 			}
891 			bcopy(ifr->ifr_addr.sa_data, &sc->multi[j], MULTISIZE);
892 			sc->muse[j]++;
893 		}
894 		for ( i = 0; i < 6; i++ )
895 			sc->setup_pkt[i][1] = sc->is_addr[i];
896 		if (ifp->if_flags & IFF_RUNNING) {
897 			qesetup( sc );
898 		}
899 		break;
900 
901 	case SIOCRDCTRS:
902 	case SIOCRDZCTRS:
903 		ctr->ctr_ether = sc->ctrblk;
904 		ctr->ctr_type = CTR_ETHER;
905 		ctr->ctr_ether.est_seconds = (time.tv_sec - sc->ztime) > 0xfffe ? 0xffff : (time.tv_sec - sc->ztime);
906 		if (cmd == SIOCRDZCTRS) {
907 			sc->ztime = time.tv_sec;
908 			bzero(&sc->ctrblk, sizeof(struct estat));
909 		}
910 		break;
911 
912 	case SIOCSIFADDR:
913 		ifp->if_flags |= IFF_UP;
914 		qeinit(ifp->if_unit);
915 		switch(ifa->ifa_addr.sa_family) {
916 #ifdef INET
917 		case AF_INET:
918 			((struct arpcom *)ifp)->ac_ipaddr =
919 				IA_SIN(ifa)->sin_addr;
920 			arpwhohas((struct arpcom *)ifp, &IA_SIN(ifa)->sin_addr);
921 			break;
922 #endif
923 
924 		default:
925 			if (pr=iffamily_to_proto(ifa->ifa_addr.sa_family)) {
926 				error = (*pr->pr_ifioctl)(ifp, cmd, data);
927 			}
928 			break;
929 		}
930 		break;
931 	default:
932 		error = EINVAL;
933 
934 	}
935 done:	splx(s);
936 	return (error);
937 }
938 
939 
940 
941 /*
942  * Initialize a ring descriptor with mbuf allocation side effects
943  */
944 qeinitdesc( rp, buf, len )
945 	register struct qe_ring *rp;
946 	char *buf; 			/* mapped address	*/
947 	int len;
948 {
949 	/*
950 	 * clear the entire descriptor
951 	 */
952 	bzero( rp, sizeof(struct qe_ring));
953 
954 	if( len ) {
955 		rp->qe_buf_len = -(len/2);
956 		rp->qe_addr_lo = (short)buf;
957 		rp->qe_addr_hi = (short)((int)buf >> 16);
958 	}
959 }
960 /*
961  * Build a setup packet - the physical address will already be present
962  * in first column.
963  */
964 qesetup( sc )
965 struct qe_softc *sc;
966 {
967 	int i, j, offset = 0, next = 3;
968 
969 	/*
970 	 * Copy the target address to the rest of the entries in this row.
971 	 */
972 	 for ( j = 0; j < 6 ; j++ )
973 		for ( i = 2 ; i < 8 ; i++ )
974 			sc->setup_pkt[j][i] = sc->setup_pkt[j][1];
975 	/*
976 	 * Duplicate the first half.
977 	 */
978 	bcopy(sc->setup_pkt, sc->setup_pkt[8], 64);
979 	/*
980 	 * Fill in the broadcast address.
981 	 */
982 	for ( i = 0; i < 6 ; i++ )
983 		sc->setup_pkt[i][2] = 0xff;
984 	/*
985 	 * If the device structure is available fill in the multicast address
986 	 * in the rest of the setup packet.
987 	 */
988 	for ( i = 0; i < NMULTI; i++ ) {
989 		if (bcmp(&sc->multi[i],qunused_multi,MULTISIZE) != 0) {
990 			for ( j = 0; j < 6; j++ )
991 				sc->setup_pkt[offset+j][next] = sc->multi[i].qm_char[j];
992 			if (++next == 8) {
993 				next = 1;
994 				offset = 8;
995 			}
996 		}
997 	}
998 	sc->setupqueued++;
999 }
1000 /*
1001  * Routines supporting Q-BUS network interfaces.
1002  */
1003 
1004 /*
1005  * Init Q-BUS for interface on uban whose headers of size hlen are to
1006  * end on a page boundary.  We allocate a Q-BUS map register for the page
1007  * with the header, and nmr more Q-BUS map registers for i/o on the adapter,
1008  * doing this for each receive and transmit buffer.  We also
1009  * allocate page frames in the mbuffer pool for these pages.
1010  */
1011 qe_ubainit(ifu, uban, hlen, nmr, mptr)
1012 	register struct qeuba *ifu;
1013 	int uban, hlen, nmr;
1014 	char *mptr;
1015 {
1016 	register caddr_t cp, dp;
1017 	register struct ifrw *ifrw;
1018 	register struct ifxmt *ifxp;
1019 	int i, ncl;
1020 
1021 	ncl = clrnd(nmr + CLSIZE) / CLSIZE;
1022 	if (ifu->ifu_r[0].ifrw_addr)
1023 		/*
1024 		 * If the first read buffer has a non-zero
1025 		 * address, it means we have already allocated core
1026 		 */
1027 		cp = ifu->ifu_r[0].ifrw_addr - (CLBYTES - hlen);
1028 	else {
1029 		cp = mptr;
1030 		if (cp == 0)
1031 			return (0);
1032 		ifu->ifu_hlen = hlen;
1033 		ifu->ifu_uban = uban;
1034 		ifu->ifu_uba = uba_hd[uban].uh_uba;
1035 		dp = cp + CLBYTES - hlen;
1036 		for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[nNRCV]; ifrw++) {
1037 			ifrw->ifrw_addr = dp;
1038 			dp += ncl * CLBYTES;
1039 		}
1040 		for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[nNXMT]; ifxp++) {
1041 			ifxp->x_ifrw.ifrw_addr = dp;
1042 			dp += ncl * CLBYTES;
1043 		}
1044 	}
1045 	/* allocate for receive ring */
1046 	for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[nNRCV]; ifrw++) {
1047 		if (qe_ubaalloc(ifu, ifrw, nmr) == 0) {
1048 			struct ifrw *rw;
1049 
1050 			for (rw = ifu->ifu_r; rw < ifrw; rw++)
1051 				ubarelse(ifu->ifu_uban, &rw->ifrw_info);
1052 			goto bad;
1053 		}
1054 	}
1055 	/* and now transmit ring */
1056 	for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[nNXMT]; ifxp++) {
1057 		ifrw = &ifxp->x_ifrw;
1058 		if (qe_ubaalloc(ifu, ifrw, nmr) == 0) {
1059 			struct ifxmt *xp;
1060 
1061 			for (xp = ifu->ifu_w; xp < ifxp; xp++)
1062 				ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info);
1063 			for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[nNRCV]; ifrw++)
1064 				ubarelse(ifu->ifu_uban, &ifrw->ifrw_info);
1065 			goto bad;
1066 		}
1067 		for (i = 0; i < nmr; i++)
1068 			ifxp->x_map[i] = ifrw->ifrw_mr[i];
1069 		ifxp->x_xswapd = 0;
1070 	}
1071 	return (1);
1072 bad:
1073 	m_pgfree(cp, nNTOT * ncl);
1074 	ifu->ifu_r[0].ifrw_addr = 0;
1075 	return(0);
1076 }
1077 
1078 /*
1079  * Setup either a ifrw structure by allocating Q-BUS map registers,
1080  * possibly a buffered data path, and initializing the fields of
1081  * the ifrw structure to minimize run-time overhead.
1082  */
1083 static
1084 qe_ubaalloc(ifu, ifrw, nmr)
1085 	struct qeuba *ifu;
1086 	register struct ifrw *ifrw;
1087 	int nmr;
1088 {
1089 	register int info;
1090 
1091 	info = uballoc(ifu->ifu_uban, ifrw->ifrw_addr,
1092 			nmr*NBPG + ifu->ifu_hlen, ifu->ifu_flags);
1093 	if (info == 0){
1094 		return (0);
1095 	}
1096 	ifrw->ifrw_info = info;
1097 	ifrw->ifrw_bdp = UBAI_BDP(info);
1098 	ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT);
1099 	ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1];
1100 	return (1);
1101 }
1102 
1103 /*
1104  * Pull read data off a interface.
1105  * Len is length of data, with local net header stripped.
1106  * Off is non-zero if a trailer protocol was used, and
1107  * gives the offset of the trailer information.
1108  * We copy the trailer information and then all the normal
1109  * data into mbufs.  When full cluster sized units are present
1110  * on the interface on cluster boundaries we can get them more
1111  * easily by remapping, and take advantage of this here.
1112  */
1113 struct mbuf *
1114 qeget(ifu, ifrw, totlen, off0)
1115 	register struct qeuba *ifu;
1116 	register struct ifrw *ifrw;
1117 	int totlen, off0;
1118 {
1119 	struct mbuf *top, **mp, *m;
1120 	int off = off0, len;
1121 	register caddr_t cp = ifrw->ifrw_addr + ifu->ifu_hlen;
1122 
1123 	top = 0;
1124 	mp = &top;
1125 	while (totlen > 0) {
1126 		MGET(m, M_DONTWAIT, MT_DATA);
1127 		if (m == 0)
1128 			goto bad;
1129 		if (off) {
1130 			len = totlen - off;
1131 			cp = ifrw->ifrw_addr + ifu->ifu_hlen + off;
1132 		} else
1133 			len = totlen;
1134 		if (len >= CLBYTES) {
1135 			struct mbuf *p;
1136 			struct pte *cpte, *ppte;
1137 			int x, *ip, i;
1138 
1139 			MCLGET(m, p);
1140 			if (p == 0)
1141 				goto nopage;
1142 			len = m->m_len = CLBYTES;
1143 			if(cpu == MVAX_I || !claligned(cp))
1144 				goto copy;
1145 
1146 			/*
1147 			 * Switch pages mapped to Q-BUS with new page p,
1148 			 * as quick form of copy.  Remap Q-BUS and invalidate.
1149 			 */
1150 			cpte = &Mbmap[mtocl(cp)*CLSIZE];
1151 			ppte = &Mbmap[mtocl(p)*CLSIZE];
1152 			x = btop(cp - ifrw->ifrw_addr);
1153 			ip = (int *)&ifrw->ifrw_mr[x];
1154 			for (i = 0; i < CLSIZE; i++) {
1155 				struct pte t;
1156 				t = *ppte; *ppte++ = *cpte; *cpte = t;
1157 				*ip++ =
1158 				    cpte++->pg_pfnum|ifrw->ifrw_proto;
1159 				mtpr(TBIS, cp);
1160 				cp += NBPG;
1161 				mtpr(TBIS, (caddr_t)p);
1162 				p += NBPG / sizeof (*p);
1163 			}
1164 			goto nocopy;
1165 		}
1166 nopage:
1167 		m->m_len = MIN(MLEN, len);
1168 		m->m_off = MMINOFF;
1169 copy:
1170 		bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len);
1171 		cp += m->m_len;
1172 nocopy:
1173 		*mp = m;
1174 		mp = &m->m_next;
1175 		if (off) {
1176 			/* sort of an ALGOL-W style for statement... */
1177 			off += m->m_len;
1178 			if (off == totlen) {
1179 				cp = ifrw->ifrw_addr + ifu->ifu_hlen;
1180 				off = 0;
1181 				totlen = off0;
1182 			}
1183 		} else
1184 			totlen -= m->m_len;
1185 	}
1186 	return (top);
1187 bad:
1188 	m_freem(top);
1189 	return (0);
1190 }
1191 
1192 /*
1193  * Map a chain of mbufs onto a network interface
1194  * in preparation for an i/o operation.
1195  * The argument chain of mbufs includes the local network
1196  * header which is copied to be in the mapped, aligned
1197  * i/o space.
1198  */
1199 qeput(ifu, n, m)
1200 	struct qeuba *ifu;
1201 	int n;
1202 	register struct mbuf *m;
1203 {
1204 	register caddr_t cp;
1205 	register struct ifxmt *ifxp;
1206 	register struct ifrw *ifrw;
1207 	register int i;
1208 	int xswapd = 0;
1209 	int x, cc, t;
1210 	caddr_t dp;
1211 
1212 	ifxp = &ifu->ifu_w[n];
1213 	ifrw = &ifxp->x_ifrw;
1214 	cp = ifrw->ifrw_addr;
1215 	ifxp->x_xtofree = m;
1216 	while (m) {
1217 		dp = mtod(m, char *);
1218 		if (cpu != MVAX_I && claligned(cp) && claligned(dp) && m->m_len == CLBYTES) {
1219 			struct pte *pte; int *ip;
1220 			pte = &Mbmap[mtocl(dp)*CLSIZE];
1221 			x = btop(cp - ifrw->ifrw_addr);
1222 			ip = (int *)&ifrw->ifrw_mr[x];
1223 			for (i = 0; i < CLSIZE; i++)
1224 				*ip++ =
1225 				    ifrw->ifrw_proto | pte++->pg_pfnum;
1226 			xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT));
1227 			cp += m->m_len;
1228 		} else {
1229 			bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len);
1230 			cp += m->m_len;
1231 		}
1232 		m = m->m_next;
1233 	}
1234 
1235 	/*
1236 	 * Xswapd is the set of clusters we just mapped out.  Ifxp->x_xswapd
1237 	 * is the set of clusters mapped out from before.  We compute
1238 	 * the number of clusters involved in this operation in x.
1239 	 * Clusters mapped out before and involved in this operation
1240 	 * should be unmapped so original pages will be accessed by the device.
1241 	 */
1242 	cc = cp - ifrw->ifrw_addr;
1243 	x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT;
1244 	ifxp->x_xswapd &= ~xswapd;
1245 	while (i = ffs(ifxp->x_xswapd)) {
1246 		i--;
1247 		if (i >= x)
1248 			break;
1249 		ifxp->x_xswapd &= ~(1<<i);
1250 		i *= CLSIZE;
1251 		for (t = 0; t < CLSIZE; t++) {
1252 			ifrw->ifrw_mr[i] = ifxp->x_map[i];
1253 			i++;
1254 		}
1255 	}
1256 	ifxp->x_xswapd |= xswapd;
1257 	return (cc);
1258 }
1259 /*
1260  * Pass a packet to the higher levels.
1261  * We deal with the trailer protocol here.
1262  */
1263 qeread(sc, ifrw, len, swloop)
1264 	register struct qe_softc *sc;
1265 	struct ifrw *ifrw;
1266 	int len;
1267 	struct mbuf *swloop;
1268 {
1269 	struct ether_header *eh, swloop_eh;
1270     	struct mbuf *m, *swloop_tmp1, *swloop_tmp2;
1271 	struct protosw *pr;
1272 	int off, resid;
1273 	struct ifqueue *inq;
1274 
1275 	/*
1276 	 * Deal with trailer protocol: if type is INET trailer
1277 	 * get true type from first 16-bit word past data.
1278 	 * Remember that type was trailer by setting off.
1279 	 */
1280 
1281 
1282 	if (swloop) {
1283 		eh = mtod(swloop, struct ether_header *);
1284 		swloop_eh = *eh;
1285 		eh = &swloop_eh;
1286 		if ( swloop->m_len > sizeof(struct ether_header))
1287 			m_adj(swloop, sizeof(struct ether_header));
1288 		else {
1289 			MFREE(swloop, swloop_tmp1);
1290 			if ( ! swloop_tmp1 )
1291 				return;
1292 			else
1293 				swloop = swloop_tmp1;
1294 		}
1295 	} else
1296 		eh = (struct ether_header *)ifrw->ifrw_addr;
1297 
1298 
1299 	eh = (struct ether_header *)ifrw->ifrw_addr;
1300 	eh->ether_type = ntohs((u_short)eh->ether_type);
1301 #define	qedataaddr(eh, off, type)	((type)(((caddr_t)((eh)+1)+(off))))
1302 	if (eh->ether_type >= ETHERTYPE_TRAIL &&
1303 	    eh->ether_type < ETHERTYPE_TRAIL+ETHERTYPE_NTRAILER) {
1304 		off = (eh->ether_type - ETHERTYPE_TRAIL) * 512;
1305 		if (off >= ETHERMTU)
1306 			return;		/* sanity */
1307 		if (swloop) {
1308 			struct mbuf *mprev, *m0 = swloop;
1309 /* need to check this against off */
1310 			mprev = m0;
1311 			while (swloop->m_next){/*real header at end of chain*/
1312 				mprev = swloop;
1313 				swloop = swloop->m_next;
1314 			}
1315 			/* move to beginning of chain */
1316 			mprev->m_next = 0;
1317 			swloop->m_next = m0;
1318 			eh->ether_type = ntohs( *mtod(swloop, u_short *));
1319 		} else {
1320 		        eh->ether_type = ntohs(*qedataaddr(eh,off, u_short *));
1321 			resid = ntohs(*(qedataaddr(eh, off+2, u_short *)));
1322 			if (off + resid > len)
1323 			     return;		/* sanity */
1324 			len = off + resid;
1325 		}
1326 	} else {
1327 		off = 0;
1328 	}
1329 	if (len == 0)
1330 		return;
1331 
1332 	/*
1333 	 * Pull packet off interface.  Off is nonzero if packet
1334 	 * has trailing header; qeget will then force this header
1335 	 * information to be at the front, but we still have to drop
1336 	 * the type and length which are at the front of any trailer data.
1337 	 */
1338 	if (swloop) {
1339 		m = m_copy(swloop, 0, M_COPYALL);
1340 		m_freem(swloop);
1341 	} else {
1342 		m = qeget(&sc->qeuba, ifrw, len, off);
1343 	}
1344 
1345 	if (m == 0)
1346 		return;
1347 
1348 	if (off) {
1349 		m->m_off += 2 * sizeof (u_short);
1350 		m->m_len -= 2 * sizeof (u_short);
1351 	}
1352 
1353 
1354 	/*
1355 	 * Accumulate stats for DECnet
1356 	 */
1357 	sc->ctrblk.est_bytercvd += m->m_len;
1358 	sc->ctrblk.est_blokrcvd++;
1359 
1360 
1361 	switch (eh->ether_type) {
1362 
1363 #ifdef INET
1364 	case ETHERTYPE_IP:
1365 		if (nINET==0) {
1366 			m_freem(m);
1367 			return;
1368 		}
1369 		schednetisr(NETISR_IP);
1370 		inq = &ipintrq;
1371 		break;
1372 
1373 	case ETHERTYPE_ARP:
1374 		if (nETHER==0) {
1375 			m_freem(m);
1376 			return;
1377 		}
1378 		arpinput(&sc->is_ac, m);
1379 		return;
1380 #endif
1381 	default:
1382 		/*
1383 		 * see if other protocol families defined
1384 		 * and call protocol specific routines.
1385 		 * If no other protocols defined then dump message.
1386 		 */
1387 		if (pr=iftype_to_proto(eh->ether_type))  {
1388 			if ((m = (struct mbuf *)(*pr->pr_ifinput)(m, &sc->is_if, &inq, eh)) == 0)
1389 				return;
1390 		} else {
1391 			if (sc->ctrblk.est_unrecog != 0xffff)
1392 				sc->ctrblk.est_unrecog++;
1393 			m_freem(m);
1394 			return;
1395 		}
1396 	}
1397 
1398 	if (IF_QFULL(inq)) {
1399 		IF_DROP(inq);
1400 		m_freem(m);
1401 		return;
1402 	}
1403 	IF_ENQUEUE(inq, m);
1404 }
1405 /*
1406  * Watchdog timer routine. There is a condition in the hardware that
1407  * causes the board to lock up under heavy load. This routine detects
1408  * the hang up and restarts the device.
1409  */
1410 qewatch()
1411 {
1412 	register struct qe_softc *sc;
1413 	register int i;
1414 	int inprogress=0;
1415 
1416 	for( i=0 ; i<nNQE ; i++ ) {
1417 		sc = &qe_softc[i];
1418 		if( sc->timeout )
1419 			if( ++sc->timeout > 3 )
1420 				qerestart( sc );
1421 			else
1422 				inprogress++;
1423 	}
1424 	if( inprogress ){
1425 		timeout(qewatch, 0, QE_TIMEO);
1426 		qewatchrun++;
1427 	} else
1428 		qewatchrun=0;
1429 }
1430 /*
1431  * Restart for board lockup problem.
1432  */
1433 int qe_restarts;
1434 int qe_show_restarts = 0;	/* 1 ==> log with printf, 0 ==> mprintf */
1435 qerestart( sc )
1436 	register struct qe_softc *sc;
1437 {
1438 	register struct ifnet *ifp = &sc->is_if;
1439 	register struct qedevice *addr = sc->addr;
1440 	register struct qe_ring *rp;
1441 	register i;
1442 
1443 	qe_restarts++;
1444 	addr->qe_csr = QE_RESET;
1445 	sc->timeout = 0;
1446 	qesetup( sc );
1447 	for(i = 0, rp = sc->tring; i<nNXMT ; rp++, i++ ){
1448 		rp->qe_flag = rp->qe_status1 = QE_NOTYET;
1449 		rp->qe_valid = 0;
1450 	}
1451 	sc->nxmit = sc->otindex = sc->tindex = sc->rindex = 0;
1452 	if ( ifp->if_flags & IFF_LOOPBACK )
1453 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ELOOP;
1454 	else
1455 		addr->qe_csr = QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT | QE_ILOOP;
1456 	addr->qe_rcvlist_lo = (short)sc->rringaddr;
1457 	addr->qe_rcvlist_hi = (short)((int)sc->rringaddr >> 16);
1458 	for( i = 0 ; sc != &qe_softc[i] ; i++ )
1459 		;
1460 	qestart( i );
1461 	if (qe_show_restarts)
1462 		printf("qerestart: restarted qe%d %d\n", i, qe_restarts);
1463 	else
1464 		mprintf("qerestart: restarted qe%d %d\n", i, qe_restarts);
1465 }
1466 #endif
1467