xref: /netbsd-src/sys/dev/pci/if_dge.c (revision 7f21db1c0118155e0dd40b75182e30c589d9f63e)
1 /*	$NetBSD: if_dge.c,v 1.29 2010/01/19 22:07:00 pooka Exp $ */
2 
3 /*
4  * Copyright (c) 2004, SUNET, Swedish University Computer Network.
5  * All rights reserved.
6  *
7  * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	SUNET, Swedish University Computer Network.
21  * 4. The name of SUNET may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 /*
38  * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
39  * All rights reserved.
40  *
41  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed for the NetBSD Project by
54  *	Wasabi Systems, Inc.
55  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
56  *    or promote products derived from this software without specific prior
57  *    written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
63  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69  * POSSIBILITY OF SUCH DAMAGE.
70  */
71 
72 /*
73  * Device driver for the Intel 82597EX Ten Gigabit Ethernet controller.
74  *
75  * TODO (in no specific order):
76  *	HW VLAN support.
77  *	TSE offloading (needs kernel changes...)
78  *	RAIDC (receive interrupt delay adaptation)
79  *	Use memory > 4GB.
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_dge.c,v 1.29 2010/01/19 22:07:00 pooka Exp $");
84 
85 #include "rnd.h"
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 
99 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
100 
101 #if NRND > 0
102 #include <sys/rnd.h>
103 #endif
104 
105 #include <net/if.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/if_ether.h>
109 
110 #include <net/bpf.h>
111 
112 #include <netinet/in.h>			/* XXX for struct ip */
113 #include <netinet/in_systm.h>		/* XXX for struct ip */
114 #include <netinet/ip.h>			/* XXX for struct ip */
115 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
116 
117 #include <sys/bus.h>
118 #include <sys/intr.h>
119 #include <machine/endian.h>
120 
121 #include <dev/mii/mii.h>
122 #include <dev/mii/miivar.h>
123 #include <dev/mii/mii_bitbang.h>
124 
125 #include <dev/pci/pcireg.h>
126 #include <dev/pci/pcivar.h>
127 #include <dev/pci/pcidevs.h>
128 
129 #include <dev/pci/if_dgereg.h>
130 
131 /*
132  * The receive engine may sometimes become off-by-one when writing back
133  * chained descriptors.	 Avoid this by allocating a large chunk of
134  * memory and use if instead (to avoid chained descriptors).
135  * This only happens with chained descriptors under heavy load.
136  */
137 #define DGE_OFFBYONE_RXBUG
138 
139 #define DGE_EVENT_COUNTERS
140 #define DGE_DEBUG
141 
142 #ifdef DGE_DEBUG
143 #define DGE_DEBUG_LINK		0x01
144 #define DGE_DEBUG_TX		0x02
145 #define DGE_DEBUG_RX		0x04
146 #define DGE_DEBUG_CKSUM		0x08
147 int	dge_debug = 0;
148 
149 #define DPRINTF(x, y)	if (dge_debug & (x)) printf y
150 #else
151 #define DPRINTF(x, y)	/* nothing */
152 #endif /* DGE_DEBUG */
153 
154 /*
155  * Transmit descriptor list size. We allow up to 100 DMA segments per
156  * packet (Intel reports of jumbo frame packets with as
157  * many as 80 DMA segments when using 16k buffers).
158  */
159 #define DGE_NTXSEGS		100
160 #define DGE_IFQUEUELEN		20000
161 #define DGE_TXQUEUELEN		2048
162 #define DGE_TXQUEUELEN_MASK	(DGE_TXQUEUELEN - 1)
163 #define DGE_TXQUEUE_GC		(DGE_TXQUEUELEN / 8)
164 #define DGE_NTXDESC		1024
165 #define DGE_NTXDESC_MASK		(DGE_NTXDESC - 1)
166 #define DGE_NEXTTX(x)		(((x) + 1) & DGE_NTXDESC_MASK)
167 #define DGE_NEXTTXS(x)		(((x) + 1) & DGE_TXQUEUELEN_MASK)
168 
169 /*
170  * Receive descriptor list size.
171  * Packet is of size MCLBYTES, and for jumbo packets buffers may
172  * be chained.	Due to the nature of the card (high-speed), keep this
173  * ring large. With 2k buffers the ring can store 400 jumbo packets,
174  * which at full speed will be received in just under 3ms.
175  */
176 #define DGE_NRXDESC		2048
177 #define DGE_NRXDESC_MASK	(DGE_NRXDESC - 1)
178 #define DGE_NEXTRX(x)		(((x) + 1) & DGE_NRXDESC_MASK)
179 /*
180  * # of descriptors between head and written descriptors.
181  * This is to work-around two erratas.
182  */
183 #define DGE_RXSPACE		10
184 #define DGE_PREVRX(x)		(((x) - DGE_RXSPACE) & DGE_NRXDESC_MASK)
185 /*
186  * Receive descriptor fetch threshholds. These are values recommended
187  * by Intel, do not touch them unless you know what you are doing.
188  */
189 #define RXDCTL_PTHRESH_VAL	128
190 #define RXDCTL_HTHRESH_VAL	16
191 #define RXDCTL_WTHRESH_VAL	16
192 
193 
194 /*
195  * Tweakable parameters; default values.
196  */
197 #define FCRTH	0x30000 /* Send XOFF water mark */
198 #define FCRTL	0x28000 /* Send XON water mark */
199 #define RDTR	0x20	/* Interrupt delay after receive, .8192us units */
200 #define TIDV	0x20	/* Interrupt delay after send, .8192us units */
201 
202 /*
203  * Control structures are DMA'd to the i82597 chip.  We allocate them in
204  * a single clump that maps to a single DMA segment to make serveral things
205  * easier.
206  */
207 struct dge_control_data {
208 	/*
209 	 * The transmit descriptors.
210 	 */
211 	struct dge_tdes wcd_txdescs[DGE_NTXDESC];
212 
213 	/*
214 	 * The receive descriptors.
215 	 */
216 	struct dge_rdes wcd_rxdescs[DGE_NRXDESC];
217 };
218 
219 #define DGE_CDOFF(x)	offsetof(struct dge_control_data, x)
220 #define DGE_CDTXOFF(x)	DGE_CDOFF(wcd_txdescs[(x)])
221 #define DGE_CDRXOFF(x)	DGE_CDOFF(wcd_rxdescs[(x)])
222 
223 /*
224  * The DGE interface have a higher max MTU size than normal jumbo frames.
225  */
226 #define DGE_MAX_MTU	16288	/* Max MTU size for this interface */
227 
228 /*
229  * Software state for transmit jobs.
230  */
231 struct dge_txsoft {
232 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
233 	bus_dmamap_t txs_dmamap;	/* our DMA map */
234 	int txs_firstdesc;		/* first descriptor in packet */
235 	int txs_lastdesc;		/* last descriptor in packet */
236 	int txs_ndesc;			/* # of descriptors used */
237 };
238 
239 /*
240  * Software state for receive buffers.	Each descriptor gets a
241  * 2k (MCLBYTES) buffer and a DMA map.	For packets which fill
242  * more than one buffer, we chain them together.
243  */
244 struct dge_rxsoft {
245 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
246 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
247 };
248 
249 /*
250  * Software state per device.
251  */
252 struct dge_softc {
253 	struct device sc_dev;		/* generic device information */
254 	bus_space_tag_t sc_st;		/* bus space tag */
255 	bus_space_handle_t sc_sh;	/* bus space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 	struct ethercom sc_ethercom;	/* ethernet common data */
258 
259 	int sc_flags;			/* flags; see below */
260 	int sc_bus_speed;		/* PCI/PCIX bus speed */
261 	int sc_pcix_offset;		/* PCIX capability register offset */
262 
263 	pci_chipset_tag_t sc_pc;
264 	pcitag_t sc_pt;
265 	int sc_mmrbc;			/* Max PCIX memory read byte count */
266 
267 	void *sc_ih;			/* interrupt cookie */
268 
269 	struct ifmedia sc_media;
270 
271 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
272 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
273 
274 	int		sc_align_tweak;
275 
276 	/*
277 	 * Software state for the transmit and receive descriptors.
278 	 */
279 	struct dge_txsoft sc_txsoft[DGE_TXQUEUELEN];
280 	struct dge_rxsoft sc_rxsoft[DGE_NRXDESC];
281 
282 	/*
283 	 * Control data structures.
284 	 */
285 	struct dge_control_data *sc_control_data;
286 #define sc_txdescs	sc_control_data->wcd_txdescs
287 #define sc_rxdescs	sc_control_data->wcd_rxdescs
288 
289 #ifdef DGE_EVENT_COUNTERS
290 	/* Event counters. */
291 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
292 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
293 	struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
294 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
295 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
296 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
297 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
298 
299 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
300 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
301 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
302 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
303 
304 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
305 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
306 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
307 
308 	struct evcnt sc_ev_txseg[DGE_NTXSEGS]; /* Tx packets w/ N segments */
309 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
310 #endif /* DGE_EVENT_COUNTERS */
311 
312 	int	sc_txfree;		/* number of free Tx descriptors */
313 	int	sc_txnext;		/* next ready Tx descriptor */
314 
315 	int	sc_txsfree;		/* number of free Tx jobs */
316 	int	sc_txsnext;		/* next free Tx job */
317 	int	sc_txsdirty;		/* dirty Tx jobs */
318 
319 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
320 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
321 
322 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
323 	int	sc_rxdiscard;
324 	int	sc_rxlen;
325 	struct mbuf *sc_rxhead;
326 	struct mbuf *sc_rxtail;
327 	struct mbuf **sc_rxtailp;
328 
329 	uint32_t sc_ctrl0;		/* prototype CTRL0 register */
330 	uint32_t sc_icr;		/* prototype interrupt bits */
331 	uint32_t sc_tctl;		/* prototype TCTL register */
332 	uint32_t sc_rctl;		/* prototype RCTL register */
333 
334 	int sc_mchash_type;		/* multicast filter offset */
335 
336 	uint16_t sc_eeprom[EEPROM_SIZE];
337 
338 #if NRND > 0
339 	rndsource_element_t rnd_source; /* random source */
340 #endif
341 #ifdef DGE_OFFBYONE_RXBUG
342 	void *sc_bugbuf;
343 	SLIST_HEAD(, rxbugentry) sc_buglist;
344 	bus_dmamap_t sc_bugmap;
345 	struct rxbugentry *sc_entry;
346 #endif
347 };
348 
349 #define DGE_RXCHAIN_RESET(sc)						\
350 do {									\
351 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
352 	*(sc)->sc_rxtailp = NULL;					\
353 	(sc)->sc_rxlen = 0;						\
354 } while (/*CONSTCOND*/0)
355 
356 #define DGE_RXCHAIN_LINK(sc, m)						\
357 do {									\
358 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
359 	(sc)->sc_rxtailp = &(m)->m_next;				\
360 } while (/*CONSTCOND*/0)
361 
362 /* sc_flags */
363 #define DGE_F_BUS64		0x20	/* bus is 64-bit */
364 #define DGE_F_PCIX		0x40	/* bus is PCI-X */
365 
366 #ifdef DGE_EVENT_COUNTERS
367 #define DGE_EVCNT_INCR(ev)	(ev)->ev_count++
368 #else
369 #define DGE_EVCNT_INCR(ev)	/* nothing */
370 #endif
371 
372 #define CSR_READ(sc, reg)						\
373 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
374 #define CSR_WRITE(sc, reg, val)						\
375 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
376 
377 #define DGE_CDTXADDR(sc, x)	((sc)->sc_cddma + DGE_CDTXOFF((x)))
378 #define DGE_CDRXADDR(sc, x)	((sc)->sc_cddma + DGE_CDRXOFF((x)))
379 
380 #define DGE_CDTXSYNC(sc, x, n, ops)					\
381 do {									\
382 	int __x, __n;							\
383 									\
384 	__x = (x);							\
385 	__n = (n);							\
386 									\
387 	/* If it will wrap around, sync to the end of the ring. */	\
388 	if ((__x + __n) > DGE_NTXDESC) {				\
389 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
390 		    DGE_CDTXOFF(__x), sizeof(struct dge_tdes) *		\
391 		    (DGE_NTXDESC - __x), (ops));			\
392 		__n -= (DGE_NTXDESC - __x);				\
393 		__x = 0;						\
394 	}								\
395 									\
396 	/* Now sync whatever is left. */				\
397 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
398 	    DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * __n, (ops));	\
399 } while (/*CONSTCOND*/0)
400 
401 #define DGE_CDRXSYNC(sc, x, ops)						\
402 do {									\
403 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
404 	   DGE_CDRXOFF((x)), sizeof(struct dge_rdes), (ops));		\
405 } while (/*CONSTCOND*/0)
406 
407 #ifdef DGE_OFFBYONE_RXBUG
408 #define DGE_INIT_RXDESC(sc, x)						\
409 do {									\
410 	struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
411 	struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)];		\
412 	struct mbuf *__m = __rxs->rxs_mbuf;				\
413 									\
414 	__rxd->dr_baddrl = htole32(sc->sc_bugmap->dm_segs[0].ds_addr +	\
415 	    (mtod((__m), char *) - (char *)sc->sc_bugbuf));		\
416 	__rxd->dr_baddrh = 0;						\
417 	__rxd->dr_len = 0;						\
418 	__rxd->dr_cksum = 0;						\
419 	__rxd->dr_status = 0;						\
420 	__rxd->dr_errors = 0;						\
421 	__rxd->dr_special = 0;						\
422 	DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
423 									\
424 	CSR_WRITE((sc), DGE_RDT, (x));					\
425 } while (/*CONSTCOND*/0)
426 #else
427 #define DGE_INIT_RXDESC(sc, x)						\
428 do {									\
429 	struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
430 	struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)];		\
431 	struct mbuf *__m = __rxs->rxs_mbuf;				\
432 									\
433 	/*								\
434 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
435 	 * so that the payload after the Ethernet header is aligned	\
436 	 * to a 4-byte boundary.					\
437 	 *								\
438 	 * XXX BRAINDAMAGE ALERT!					\
439 	 * The stupid chip uses the same size for every buffer, which	\
440 	 * is set in the Receive Control register.  We are using the 2K \
441 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
442 	 * reason, we can't "scoot" packets longer than the standard	\
443 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
444 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
445 	 * the upper layer copy the headers.				\
446 	 */								\
447 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
448 									\
449 	__rxd->dr_baddrl =						\
450 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr +		\
451 		(sc)->sc_align_tweak);					\
452 	__rxd->dr_baddrh = 0;						\
453 	__rxd->dr_len = 0;						\
454 	__rxd->dr_cksum = 0;						\
455 	__rxd->dr_status = 0;						\
456 	__rxd->dr_errors = 0;						\
457 	__rxd->dr_special = 0;						\
458 	DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
459 									\
460 	CSR_WRITE((sc), DGE_RDT, (x));					\
461 } while (/*CONSTCOND*/0)
462 #endif
463 
464 #ifdef DGE_OFFBYONE_RXBUG
465 /*
466  * Allocation constants.  Much memory may be used for this.
467  */
468 #ifndef DGE_BUFFER_SIZE
469 #define DGE_BUFFER_SIZE DGE_MAX_MTU
470 #endif
471 #define DGE_NBUFFERS	(4*DGE_NRXDESC)
472 #define DGE_RXMEM	(DGE_NBUFFERS*DGE_BUFFER_SIZE)
473 
474 struct rxbugentry {
475 	SLIST_ENTRY(rxbugentry) rb_entry;
476 	int rb_slot;
477 };
478 
479 static int
480 dge_alloc_rcvmem(struct dge_softc *sc)
481 {
482 	char *ptr, *kva;
483 	bus_dma_segment_t seg;
484 	int i, rseg, state, error;
485 	struct rxbugentry *entry;
486 
487 	state = error = 0;
488 
489 	if (bus_dmamem_alloc(sc->sc_dmat, DGE_RXMEM, PAGE_SIZE, 0,
490 	     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
491 		aprint_error_dev(&sc->sc_dev, "can't alloc rx buffers\n");
492 		return ENOBUFS;
493 	}
494 
495 	state = 1;
496 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, DGE_RXMEM, (void **)&kva,
497 	    BUS_DMA_NOWAIT)) {
498 		aprint_error_dev(&sc->sc_dev, "can't map DMA buffers (%d bytes)\n",
499 		    (int)DGE_RXMEM);
500 		error = ENOBUFS;
501 		goto out;
502 	}
503 
504 	state = 2;
505 	if (bus_dmamap_create(sc->sc_dmat, DGE_RXMEM, 1, DGE_RXMEM, 0,
506 	    BUS_DMA_NOWAIT, &sc->sc_bugmap)) {
507 		aprint_error_dev(&sc->sc_dev, "can't create DMA map\n");
508 		error = ENOBUFS;
509 		goto out;
510 	}
511 
512 	state = 3;
513 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_bugmap,
514 	    kva, DGE_RXMEM, NULL, BUS_DMA_NOWAIT)) {
515 		aprint_error_dev(&sc->sc_dev, "can't load DMA map\n");
516 		error = ENOBUFS;
517 		goto out;
518 	}
519 
520 	state = 4;
521 	sc->sc_bugbuf = (void *)kva;
522 	SLIST_INIT(&sc->sc_buglist);
523 
524 	/*
525 	 * Now divide it up into DGE_BUFFER_SIZE pieces and save the addresses
526 	 * in an array.
527 	 */
528 	ptr = sc->sc_bugbuf;
529 	if ((entry = malloc(sizeof(*entry) * DGE_NBUFFERS,
530 	    M_DEVBUF, M_NOWAIT)) == NULL) {
531 		error = ENOBUFS;
532 		goto out;
533 	}
534 	sc->sc_entry = entry;
535 	for (i = 0; i < DGE_NBUFFERS; i++) {
536 		entry[i].rb_slot = i;
537 		SLIST_INSERT_HEAD(&sc->sc_buglist, &entry[i], rb_entry);
538 	}
539 out:
540 	if (error != 0) {
541 		switch (state) {
542 		case 4:
543 			bus_dmamap_unload(sc->sc_dmat, sc->sc_bugmap);
544 		case 3:
545 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_bugmap);
546 		case 2:
547 			bus_dmamem_unmap(sc->sc_dmat, kva, DGE_RXMEM);
548 		case 1:
549 			bus_dmamem_free(sc->sc_dmat, &seg, rseg);
550 			break;
551 		default:
552 			break;
553 		}
554 	}
555 
556 	return error;
557 }
558 
559 /*
560  * Allocate a jumbo buffer.
561  */
562 static void *
563 dge_getbuf(struct dge_softc *sc)
564 {
565 	struct rxbugentry *entry;
566 
567 	entry = SLIST_FIRST(&sc->sc_buglist);
568 
569 	if (entry == NULL) {
570 		printf("%s: no free RX buffers\n", device_xname(&sc->sc_dev));
571 		return(NULL);
572 	}
573 
574 	SLIST_REMOVE_HEAD(&sc->sc_buglist, rb_entry);
575 	return (char *)sc->sc_bugbuf + entry->rb_slot * DGE_BUFFER_SIZE;
576 }
577 
578 /*
579  * Release a jumbo buffer.
580  */
581 static void
582 dge_freebuf(struct mbuf *m, void *buf, size_t size, void *arg)
583 {
584 	struct rxbugentry *entry;
585 	struct dge_softc *sc;
586 	int i, s;
587 
588 	/* Extract the softc struct pointer. */
589 	sc = (struct dge_softc *)arg;
590 
591 	if (sc == NULL)
592 		panic("dge_freebuf: can't find softc pointer!");
593 
594 	/* calculate the slot this buffer belongs to */
595 
596 	i = ((char *)buf - (char *)sc->sc_bugbuf) / DGE_BUFFER_SIZE;
597 
598 	if ((i < 0) || (i >= DGE_NBUFFERS))
599 		panic("dge_freebuf: asked to free buffer %d!", i);
600 
601 	s = splvm();
602 	entry = sc->sc_entry + i;
603 	SLIST_INSERT_HEAD(&sc->sc_buglist, entry, rb_entry);
604 
605 	if (__predict_true(m != NULL))
606 		pool_cache_put(mb_cache, m);
607 	splx(s);
608 }
609 #endif
610 
611 static void	dge_start(struct ifnet *);
612 static void	dge_watchdog(struct ifnet *);
613 static int	dge_ioctl(struct ifnet *, u_long, void *);
614 static int	dge_init(struct ifnet *);
615 static void	dge_stop(struct ifnet *, int);
616 
617 static bool	dge_shutdown(device_t, int);
618 
619 static void	dge_reset(struct dge_softc *);
620 static void	dge_rxdrain(struct dge_softc *);
621 static int	dge_add_rxbuf(struct dge_softc *, int);
622 
623 static void	dge_set_filter(struct dge_softc *);
624 
625 static int	dge_intr(void *);
626 static void	dge_txintr(struct dge_softc *);
627 static void	dge_rxintr(struct dge_softc *);
628 static void	dge_linkintr(struct dge_softc *, uint32_t);
629 
630 static int	dge_match(device_t, cfdata_t, void *);
631 static void	dge_attach(device_t, device_t, void *);
632 
633 static int	dge_read_eeprom(struct dge_softc *sc);
634 static int	dge_eeprom_clockin(struct dge_softc *sc);
635 static void	dge_eeprom_clockout(struct dge_softc *sc, int bit);
636 static uint16_t	dge_eeprom_word(struct dge_softc *sc, int addr);
637 static int	dge_xgmii_mediachange(struct ifnet *);
638 static void	dge_xgmii_mediastatus(struct ifnet *, struct ifmediareq *);
639 static void	dge_xgmii_reset(struct dge_softc *);
640 static void	dge_xgmii_writereg(device_t, int, int, int);
641 
642 
643 CFATTACH_DECL(dge, sizeof(struct dge_softc),
644     dge_match, dge_attach, NULL, NULL);
645 
646 #ifdef DGE_EVENT_COUNTERS
647 #if DGE_NTXSEGS > 100
648 #error Update dge_txseg_evcnt_names
649 #endif
650 static char (*dge_txseg_evcnt_names)[DGE_NTXSEGS][8 /* "txseg00" + \0 */];
651 #endif /* DGE_EVENT_COUNTERS */
652 
653 static int
654 dge_match(device_t parent, cfdata_t cf, void *aux)
655 {
656 	struct pci_attach_args *pa = aux;
657 
658 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
659 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82597EX)
660 		return (1);
661 
662 	return (0);
663 }
664 
665 static void
666 dge_attach(device_t parent, device_t self, void *aux)
667 {
668 	struct dge_softc *sc = device_private(self);
669 	struct pci_attach_args *pa = aux;
670 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
671 	pci_chipset_tag_t pc = pa->pa_pc;
672 	pci_intr_handle_t ih;
673 	const char *intrstr = NULL;
674 	bus_dma_segment_t seg;
675 	int i, rseg, error;
676 	uint8_t enaddr[ETHER_ADDR_LEN];
677 	pcireg_t preg, memtype;
678 	uint32_t reg;
679 
680 	sc->sc_dmat = pa->pa_dmat;
681 	sc->sc_pc = pa->pa_pc;
682 	sc->sc_pt = pa->pa_tag;
683 
684 	preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
685 	aprint_naive(": Ethernet controller\n");
686 	aprint_normal(": Intel i82597EX 10GbE-LR Ethernet, rev. %d\n", preg);
687 
688 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, DGE_PCI_BAR);
689         if (pci_mapreg_map(pa, DGE_PCI_BAR, memtype, 0,
690             &sc->sc_st, &sc->sc_sh, NULL, NULL)) {
691                 aprint_error_dev(&sc->sc_dev, "unable to map device registers\n");
692                 return;
693         }
694 
695 	/* Enable bus mastering */
696 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
697 	preg |= PCI_COMMAND_MASTER_ENABLE;
698 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
699 
700 	/*
701 	 * Map and establish our interrupt.
702 	 */
703 	if (pci_intr_map(pa, &ih)) {
704 		aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n");
705 		return;
706 	}
707 	intrstr = pci_intr_string(pc, ih);
708 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, dge_intr, sc);
709 	if (sc->sc_ih == NULL) {
710 		aprint_error_dev(&sc->sc_dev, "unable to establish interrupt");
711 		if (intrstr != NULL)
712 			aprint_error(" at %s", intrstr);
713 		aprint_error("\n");
714 		return;
715 	}
716 	aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr);
717 
718 	/*
719 	 * Determine a few things about the bus we're connected to.
720 	 */
721 	reg = CSR_READ(sc, DGE_STATUS);
722 	if (reg & STATUS_BUS64)
723 		sc->sc_flags |= DGE_F_BUS64;
724 
725 	sc->sc_flags |= DGE_F_PCIX;
726 	if (pci_get_capability(pa->pa_pc, pa->pa_tag,
727 			       PCI_CAP_PCIX,
728 			       &sc->sc_pcix_offset, NULL) == 0)
729 		aprint_error_dev(&sc->sc_dev, "unable to find PCIX "
730 		    "capability\n");
731 
732 	if (sc->sc_flags & DGE_F_PCIX) {
733 		switch (reg & STATUS_PCIX_MSK) {
734 		case STATUS_PCIX_66:
735 			sc->sc_bus_speed = 66;
736 			break;
737 		case STATUS_PCIX_100:
738 			sc->sc_bus_speed = 100;
739 			break;
740 		case STATUS_PCIX_133:
741 			sc->sc_bus_speed = 133;
742 			break;
743 		default:
744 			aprint_error_dev(&sc->sc_dev,
745 			    "unknown PCIXSPD %d; assuming 66MHz\n",
746 			    reg & STATUS_PCIX_MSK);
747 			sc->sc_bus_speed = 66;
748 		}
749 	} else
750 		sc->sc_bus_speed = (reg & STATUS_BUS64) ? 66 : 33;
751 	aprint_verbose_dev(&sc->sc_dev, "%d-bit %dMHz %s bus\n",
752 	    (sc->sc_flags & DGE_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
753 	    (sc->sc_flags & DGE_F_PCIX) ? "PCIX" : "PCI");
754 
755 	/*
756 	 * Allocate the control data structures, and create and load the
757 	 * DMA map for it.
758 	 */
759 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
760 	    sizeof(struct dge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
761 	    0)) != 0) {
762 		aprint_error_dev(&sc->sc_dev,
763 		    "unable to allocate control data, error = %d\n",
764 		    error);
765 		goto fail_0;
766 	}
767 
768 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
769 	    sizeof(struct dge_control_data), (void **)&sc->sc_control_data,
770 	    0)) != 0) {
771 		aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
772 		    error);
773 		goto fail_1;
774 	}
775 
776 	if ((error = bus_dmamap_create(sc->sc_dmat,
777 	    sizeof(struct dge_control_data), 1,
778 	    sizeof(struct dge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
779 		aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
780 		    "error = %d\n", error);
781 		goto fail_2;
782 	}
783 
784 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
785 	    sc->sc_control_data, sizeof(struct dge_control_data), NULL,
786 	    0)) != 0) {
787 		aprint_error_dev(&sc->sc_dev,
788 		    "unable to load control data DMA map, error = %d\n",
789 		    error);
790 		goto fail_3;
791 	}
792 
793 #ifdef DGE_OFFBYONE_RXBUG
794 	if (dge_alloc_rcvmem(sc) != 0)
795 		return; /* Already complained */
796 #endif
797 	/*
798 	 * Create the transmit buffer DMA maps.
799 	 */
800 	for (i = 0; i < DGE_TXQUEUELEN; i++) {
801 		if ((error = bus_dmamap_create(sc->sc_dmat, DGE_MAX_MTU,
802 		    DGE_NTXSEGS, MCLBYTES, 0, 0,
803 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
804 			aprint_error_dev(&sc->sc_dev, "unable to create Tx DMA map %d, "
805 			    "error = %d\n", i, error);
806 			goto fail_4;
807 		}
808 	}
809 
810 	/*
811 	 * Create the receive buffer DMA maps.
812 	 */
813 	for (i = 0; i < DGE_NRXDESC; i++) {
814 #ifdef DGE_OFFBYONE_RXBUG
815 		if ((error = bus_dmamap_create(sc->sc_dmat, DGE_BUFFER_SIZE, 1,
816 		    DGE_BUFFER_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
817 #else
818 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
819 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
820 #endif
821 			aprint_error_dev(&sc->sc_dev, "unable to create Rx DMA map %d, "
822 			    "error = %d\n", i, error);
823 			goto fail_5;
824 		}
825 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
826 	}
827 
828 	/*
829 	 * Set bits in ctrl0 register.
830 	 * Should get the software defined pins out of EEPROM?
831 	 */
832 	sc->sc_ctrl0 |= CTRL0_RPE | CTRL0_TPE; /* XON/XOFF */
833 	sc->sc_ctrl0 |= CTRL0_SDP3_DIR | CTRL0_SDP2_DIR | CTRL0_SDP1_DIR |
834 	    CTRL0_SDP0_DIR | CTRL0_SDP3 | CTRL0_SDP2 | CTRL0_SDP0;
835 
836 	/*
837 	 * Reset the chip to a known state.
838 	 */
839 	dge_reset(sc);
840 
841 	/*
842 	 * Reset the PHY.
843 	 */
844 	dge_xgmii_reset(sc);
845 
846 	/*
847 	 * Read in EEPROM data.
848 	 */
849 	if (dge_read_eeprom(sc)) {
850 		aprint_error_dev(&sc->sc_dev, "couldn't read EEPROM\n");
851 		return;
852 	}
853 
854 	/*
855 	 * Get the ethernet address.
856 	 */
857 	enaddr[0] = sc->sc_eeprom[EE_ADDR01] & 0377;
858 	enaddr[1] = sc->sc_eeprom[EE_ADDR01] >> 8;
859 	enaddr[2] = sc->sc_eeprom[EE_ADDR23] & 0377;
860 	enaddr[3] = sc->sc_eeprom[EE_ADDR23] >> 8;
861 	enaddr[4] = sc->sc_eeprom[EE_ADDR45] & 0377;
862 	enaddr[5] = sc->sc_eeprom[EE_ADDR45] >> 8;
863 
864 	aprint_normal_dev(&sc->sc_dev, "Ethernet address %s\n",
865 	    ether_sprintf(enaddr));
866 
867 	/*
868 	 * Setup media stuff.
869 	 */
870         ifmedia_init(&sc->sc_media, IFM_IMASK, dge_xgmii_mediachange,
871             dge_xgmii_mediastatus);
872         ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_LR, 0, NULL);
873         ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_10G_LR);
874 
875 	ifp = &sc->sc_ethercom.ec_if;
876 	strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
877 	ifp->if_softc = sc;
878 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
879 	ifp->if_ioctl = dge_ioctl;
880 	ifp->if_start = dge_start;
881 	ifp->if_watchdog = dge_watchdog;
882 	ifp->if_init = dge_init;
883 	ifp->if_stop = dge_stop;
884 	IFQ_SET_MAXLEN(&ifp->if_snd, max(DGE_IFQUEUELEN, IFQ_MAXLEN));
885 	IFQ_SET_READY(&ifp->if_snd);
886 
887 	sc->sc_ethercom.ec_capabilities |=
888 	    ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
889 
890 	/*
891 	 * We can perform TCPv4 and UDPv4 checkums in-bound.
892 	 */
893 	ifp->if_capabilities |=
894 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
895 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
896 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
897 
898 	/*
899 	 * Attach the interface.
900 	 */
901 	if_attach(ifp);
902 	ether_ifattach(ifp, enaddr);
903 #if NRND > 0
904 	rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev),
905 	    RND_TYPE_NET, 0);
906 #endif
907 
908 #ifdef DGE_EVENT_COUNTERS
909 	/* Fix segment event naming */
910 	if (dge_txseg_evcnt_names == NULL) {
911 		dge_txseg_evcnt_names =
912 		    malloc(sizeof(*dge_txseg_evcnt_names), M_DEVBUF, M_WAITOK);
913 		for (i = 0; i < DGE_NTXSEGS; i++)
914 			snprintf((*dge_txseg_evcnt_names)[i],
915 			    sizeof((*dge_txseg_evcnt_names)[i]), "txseg%d", i);
916 	}
917 
918 	/* Attach event counters. */
919 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
920 	    NULL, device_xname(&sc->sc_dev), "txsstall");
921 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
922 	    NULL, device_xname(&sc->sc_dev), "txdstall");
923 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
924 	    NULL, device_xname(&sc->sc_dev), "txforceintr");
925 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
926 	    NULL, device_xname(&sc->sc_dev), "txdw");
927 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
928 	    NULL, device_xname(&sc->sc_dev), "txqe");
929 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
930 	    NULL, device_xname(&sc->sc_dev), "rxintr");
931 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
932 	    NULL, device_xname(&sc->sc_dev), "linkintr");
933 
934 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
935 	    NULL, device_xname(&sc->sc_dev), "rxipsum");
936 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
937 	    NULL, device_xname(&sc->sc_dev), "rxtusum");
938 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
939 	    NULL, device_xname(&sc->sc_dev), "txipsum");
940 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
941 	    NULL, device_xname(&sc->sc_dev), "txtusum");
942 
943 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
944 	    NULL, device_xname(&sc->sc_dev), "txctx init");
945 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
946 	    NULL, device_xname(&sc->sc_dev), "txctx hit");
947 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
948 	    NULL, device_xname(&sc->sc_dev), "txctx miss");
949 
950 	for (i = 0; i < DGE_NTXSEGS; i++)
951 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
952 		    NULL, device_xname(&sc->sc_dev), (*dge_txseg_evcnt_names)[i]);
953 
954 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
955 	    NULL, device_xname(&sc->sc_dev), "txdrop");
956 
957 #endif /* DGE_EVENT_COUNTERS */
958 
959 	/*
960 	 * Make sure the interface is shutdown during reboot.
961 	 */
962 	if (pmf_device_register1(self, NULL, NULL, dge_shutdown))
963 		pmf_class_network_register(self, ifp);
964 	else
965 		aprint_error_dev(self, "couldn't establish power handler\n");
966 
967 	return;
968 
969 	/*
970 	 * Free any resources we've allocated during the failed attach
971 	 * attempt.  Do this in reverse order and fall through.
972 	 */
973  fail_5:
974 	for (i = 0; i < DGE_NRXDESC; i++) {
975 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
976 			bus_dmamap_destroy(sc->sc_dmat,
977 			    sc->sc_rxsoft[i].rxs_dmamap);
978 	}
979  fail_4:
980 	for (i = 0; i < DGE_TXQUEUELEN; i++) {
981 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
982 			bus_dmamap_destroy(sc->sc_dmat,
983 			    sc->sc_txsoft[i].txs_dmamap);
984 	}
985 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
986  fail_3:
987 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
988  fail_2:
989 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
990 	    sizeof(struct dge_control_data));
991  fail_1:
992 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
993  fail_0:
994 	return;
995 }
996 
997 /*
998  * dge_shutdown:
999  *
1000  *	Make sure the interface is stopped at reboot time.
1001  */
1002 static bool
1003 dge_shutdown(device_t self, int howto)
1004 {
1005 	struct dge_softc *sc;
1006 
1007 	sc = device_private(self);
1008 	dge_stop(&sc->sc_ethercom.ec_if, 1);
1009 
1010 	return true;
1011 }
1012 
1013 /*
1014  * dge_tx_cksum:
1015  *
1016  *	Set up TCP/IP checksumming parameters for the
1017  *	specified packet.
1018  */
1019 static int
1020 dge_tx_cksum(struct dge_softc *sc, struct dge_txsoft *txs, uint8_t *fieldsp)
1021 {
1022 	struct mbuf *m0 = txs->txs_mbuf;
1023 	struct dge_ctdes *t;
1024 	uint32_t ipcs, tucs;
1025 	struct ether_header *eh;
1026 	int offset, iphl;
1027 	uint8_t fields = 0;
1028 
1029 	/*
1030 	 * XXX It would be nice if the mbuf pkthdr had offset
1031 	 * fields for the protocol headers.
1032 	 */
1033 
1034 	eh = mtod(m0, struct ether_header *);
1035 	switch (htons(eh->ether_type)) {
1036 	case ETHERTYPE_IP:
1037 		offset = ETHER_HDR_LEN;
1038 		break;
1039 
1040 	case ETHERTYPE_VLAN:
1041 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1042 		break;
1043 
1044 	default:
1045 		/*
1046 		 * Don't support this protocol or encapsulation.
1047 		 */
1048 		*fieldsp = 0;
1049 		return (0);
1050 	}
1051 
1052 	iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1053 
1054 	/*
1055 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1056 	 * offload feature, if we load the context descriptor, we
1057 	 * MUST provide valid values for IPCSS and TUCSS fields.
1058 	 */
1059 
1060 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1061 		DGE_EVCNT_INCR(&sc->sc_ev_txipsum);
1062 		fields |= TDESC_POPTS_IXSM;
1063 		ipcs = DGE_TCPIP_IPCSS(offset) |
1064 		    DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1065 		    DGE_TCPIP_IPCSE(offset + iphl - 1);
1066 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1067 		/* Use the cached value. */
1068 		ipcs = sc->sc_txctx_ipcs;
1069 	} else {
1070 		/* Just initialize it to the likely value anyway. */
1071 		ipcs = DGE_TCPIP_IPCSS(offset) |
1072 		    DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1073 		    DGE_TCPIP_IPCSE(offset + iphl - 1);
1074 	}
1075 	DPRINTF(DGE_DEBUG_CKSUM,
1076 	    ("%s: CKSUM: offset %d ipcs 0x%x\n",
1077 	    device_xname(&sc->sc_dev), offset, ipcs));
1078 
1079 	offset += iphl;
1080 
1081 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1082 		DGE_EVCNT_INCR(&sc->sc_ev_txtusum);
1083 		fields |= TDESC_POPTS_TXSM;
1084 		tucs = DGE_TCPIP_TUCSS(offset) |
1085 		   DGE_TCPIP_TUCSO(offset + M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1086 		   DGE_TCPIP_TUCSE(0) /* rest of packet */;
1087 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1088 		/* Use the cached value. */
1089 		tucs = sc->sc_txctx_tucs;
1090 	} else {
1091 		/* Just initialize it to a valid TCP context. */
1092 		tucs = DGE_TCPIP_TUCSS(offset) |
1093 		    DGE_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1094 		    DGE_TCPIP_TUCSE(0) /* rest of packet */;
1095 	}
1096 
1097 	DPRINTF(DGE_DEBUG_CKSUM,
1098 	    ("%s: CKSUM: offset %d tucs 0x%x\n",
1099 	    device_xname(&sc->sc_dev), offset, tucs));
1100 
1101 	if (sc->sc_txctx_ipcs == ipcs &&
1102 	    sc->sc_txctx_tucs == tucs) {
1103 		/* Cached context is fine. */
1104 		DGE_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1105 	} else {
1106 		/* Fill in the context descriptor. */
1107 #ifdef DGE_EVENT_COUNTERS
1108 		if (sc->sc_txctx_ipcs == 0xffffffff &&
1109 		    sc->sc_txctx_tucs == 0xffffffff)
1110 			DGE_EVCNT_INCR(&sc->sc_ev_txctx_init);
1111 		else
1112 			DGE_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1113 #endif
1114 		t = (struct dge_ctdes *)&sc->sc_txdescs[sc->sc_txnext];
1115 		t->dc_tcpip_ipcs = htole32(ipcs);
1116 		t->dc_tcpip_tucs = htole32(tucs);
1117 		t->dc_tcpip_cmdlen = htole32(TDESC_DTYP_CTD);
1118 		t->dc_tcpip_seg = 0;
1119 		DGE_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1120 
1121 		sc->sc_txctx_ipcs = ipcs;
1122 		sc->sc_txctx_tucs = tucs;
1123 
1124 		sc->sc_txnext = DGE_NEXTTX(sc->sc_txnext);
1125 		txs->txs_ndesc++;
1126 	}
1127 
1128 	*fieldsp = fields;
1129 
1130 	return (0);
1131 }
1132 
1133 /*
1134  * dge_start:		[ifnet interface function]
1135  *
1136  *	Start packet transmission on the interface.
1137  */
1138 static void
1139 dge_start(struct ifnet *ifp)
1140 {
1141 	struct dge_softc *sc = ifp->if_softc;
1142 	struct mbuf *m0;
1143 	struct dge_txsoft *txs;
1144 	bus_dmamap_t dmamap;
1145 	int error, nexttx, lasttx = -1, ofree, seg;
1146 	uint32_t cksumcmd;
1147 	uint8_t cksumfields;
1148 
1149 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1150 		return;
1151 
1152 	/*
1153 	 * Remember the previous number of free descriptors.
1154 	 */
1155 	ofree = sc->sc_txfree;
1156 
1157 	/*
1158 	 * Loop through the send queue, setting up transmit descriptors
1159 	 * until we drain the queue, or use up all available transmit
1160 	 * descriptors.
1161 	 */
1162 	for (;;) {
1163 		/* Grab a packet off the queue. */
1164 		IFQ_POLL(&ifp->if_snd, m0);
1165 		if (m0 == NULL)
1166 			break;
1167 
1168 		DPRINTF(DGE_DEBUG_TX,
1169 		    ("%s: TX: have packet to transmit: %p\n",
1170 		    device_xname(&sc->sc_dev), m0));
1171 
1172 		/* Get a work queue entry. */
1173 		if (sc->sc_txsfree < DGE_TXQUEUE_GC) {
1174 			dge_txintr(sc);
1175 			if (sc->sc_txsfree == 0) {
1176 				DPRINTF(DGE_DEBUG_TX,
1177 				    ("%s: TX: no free job descriptors\n",
1178 					device_xname(&sc->sc_dev)));
1179 				DGE_EVCNT_INCR(&sc->sc_ev_txsstall);
1180 				break;
1181 			}
1182 		}
1183 
1184 		txs = &sc->sc_txsoft[sc->sc_txsnext];
1185 		dmamap = txs->txs_dmamap;
1186 
1187 		/*
1188 		 * Load the DMA map.  If this fails, the packet either
1189 		 * didn't fit in the allotted number of segments, or we
1190 		 * were short on resources.  For the too-many-segments
1191 		 * case, we simply report an error and drop the packet,
1192 		 * since we can't sanely copy a jumbo packet to a single
1193 		 * buffer.
1194 		 */
1195 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1196 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1197 		if (error) {
1198 			if (error == EFBIG) {
1199 				DGE_EVCNT_INCR(&sc->sc_ev_txdrop);
1200 				printf("%s: Tx packet consumes too many "
1201 				    "DMA segments, dropping...\n",
1202 				    device_xname(&sc->sc_dev));
1203 				IFQ_DEQUEUE(&ifp->if_snd, m0);
1204 				m_freem(m0);
1205 				continue;
1206 			}
1207 			/*
1208 			 * Short on resources, just stop for now.
1209 			 */
1210 			DPRINTF(DGE_DEBUG_TX,
1211 			    ("%s: TX: dmamap load failed: %d\n",
1212 			    device_xname(&sc->sc_dev), error));
1213 			break;
1214 		}
1215 
1216 		/*
1217 		 * Ensure we have enough descriptors free to describe
1218 		 * the packet.  Note, we always reserve one descriptor
1219 		 * at the end of the ring due to the semantics of the
1220 		 * TDT register, plus one more in the event we need
1221 		 * to re-load checksum offload context.
1222 		 */
1223 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1224 			/*
1225 			 * Not enough free descriptors to transmit this
1226 			 * packet.  We haven't committed anything yet,
1227 			 * so just unload the DMA map, put the packet
1228 			 * pack on the queue, and punt.  Notify the upper
1229 			 * layer that there are no more slots left.
1230 			 */
1231 			DPRINTF(DGE_DEBUG_TX,
1232 			    ("%s: TX: need %d descriptors, have %d\n",
1233 			    device_xname(&sc->sc_dev), dmamap->dm_nsegs,
1234 			    sc->sc_txfree - 1));
1235 			ifp->if_flags |= IFF_OACTIVE;
1236 			bus_dmamap_unload(sc->sc_dmat, dmamap);
1237 			DGE_EVCNT_INCR(&sc->sc_ev_txdstall);
1238 			break;
1239 		}
1240 
1241 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1242 
1243 		/*
1244 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1245 		 */
1246 
1247 		/* Sync the DMA map. */
1248 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1249 		    BUS_DMASYNC_PREWRITE);
1250 
1251 		DPRINTF(DGE_DEBUG_TX,
1252 		    ("%s: TX: packet has %d DMA segments\n",
1253 		    device_xname(&sc->sc_dev), dmamap->dm_nsegs));
1254 
1255 		DGE_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1256 
1257 		/*
1258 		 * Store a pointer to the packet so that we can free it
1259 		 * later.
1260 		 *
1261 		 * Initially, we consider the number of descriptors the
1262 		 * packet uses the number of DMA segments.  This may be
1263 		 * incremented by 1 if we do checksum offload (a descriptor
1264 		 * is used to set the checksum context).
1265 		 */
1266 		txs->txs_mbuf = m0;
1267 		txs->txs_firstdesc = sc->sc_txnext;
1268 		txs->txs_ndesc = dmamap->dm_nsegs;
1269 
1270 		/*
1271 		 * Set up checksum offload parameters for
1272 		 * this packet.
1273 		 */
1274 		if (m0->m_pkthdr.csum_flags &
1275 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1276 			if (dge_tx_cksum(sc, txs, &cksumfields) != 0) {
1277 				/* Error message already displayed. */
1278 				bus_dmamap_unload(sc->sc_dmat, dmamap);
1279 				continue;
1280 			}
1281 		} else {
1282 			cksumfields = 0;
1283 		}
1284 
1285 		cksumcmd = TDESC_DCMD_IDE | TDESC_DTYP_DATA;
1286 
1287 		/*
1288 		 * Initialize the transmit descriptor.
1289 		 */
1290 		for (nexttx = sc->sc_txnext, seg = 0;
1291 		     seg < dmamap->dm_nsegs;
1292 		     seg++, nexttx = DGE_NEXTTX(nexttx)) {
1293 			/*
1294 			 * Note: we currently only use 32-bit DMA
1295 			 * addresses.
1296 			 */
1297 			sc->sc_txdescs[nexttx].dt_baddrh = 0;
1298 			sc->sc_txdescs[nexttx].dt_baddrl =
1299 			    htole32(dmamap->dm_segs[seg].ds_addr);
1300 			sc->sc_txdescs[nexttx].dt_ctl =
1301 			    htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1302 			sc->sc_txdescs[nexttx].dt_status = 0;
1303 			sc->sc_txdescs[nexttx].dt_popts = cksumfields;
1304 			sc->sc_txdescs[nexttx].dt_vlan = 0;
1305 			lasttx = nexttx;
1306 
1307 			DPRINTF(DGE_DEBUG_TX,
1308 			    ("%s: TX: desc %d: low 0x%08lx, len 0x%04lx\n",
1309 			    device_xname(&sc->sc_dev), nexttx,
1310 			    (unsigned long)le32toh(dmamap->dm_segs[seg].ds_addr),
1311 			    (unsigned long)le32toh(dmamap->dm_segs[seg].ds_len)));
1312 		}
1313 
1314 		KASSERT(lasttx != -1);
1315 
1316 		/*
1317 		 * Set up the command byte on the last descriptor of
1318 		 * the packet.  If we're in the interrupt delay window,
1319 		 * delay the interrupt.
1320 		 */
1321 		sc->sc_txdescs[lasttx].dt_ctl |=
1322 		    htole32(TDESC_DCMD_EOP | TDESC_DCMD_RS);
1323 
1324 		txs->txs_lastdesc = lasttx;
1325 
1326 		DPRINTF(DGE_DEBUG_TX,
1327 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", device_xname(&sc->sc_dev),
1328 		    lasttx, le32toh(sc->sc_txdescs[lasttx].dt_ctl)));
1329 
1330 		/* Sync the descriptors we're using. */
1331 		DGE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1332 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1333 
1334 		/* Give the packet to the chip. */
1335 		CSR_WRITE(sc, DGE_TDT, nexttx);
1336 
1337 		DPRINTF(DGE_DEBUG_TX,
1338 		    ("%s: TX: TDT -> %d\n", device_xname(&sc->sc_dev), nexttx));
1339 
1340 		DPRINTF(DGE_DEBUG_TX,
1341 		    ("%s: TX: finished transmitting packet, job %d\n",
1342 		    device_xname(&sc->sc_dev), sc->sc_txsnext));
1343 
1344 		/* Advance the tx pointer. */
1345 		sc->sc_txfree -= txs->txs_ndesc;
1346 		sc->sc_txnext = nexttx;
1347 
1348 		sc->sc_txsfree--;
1349 		sc->sc_txsnext = DGE_NEXTTXS(sc->sc_txsnext);
1350 
1351 		/* Pass the packet to any BPF listeners. */
1352 		if (ifp->if_bpf)
1353 			bpf_ops->bpf_mtap(ifp->if_bpf, m0);
1354 	}
1355 
1356 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1357 		/* No more slots; notify upper layer. */
1358 		ifp->if_flags |= IFF_OACTIVE;
1359 	}
1360 
1361 	if (sc->sc_txfree != ofree) {
1362 		/* Set a watchdog timer in case the chip flakes out. */
1363 		ifp->if_timer = 5;
1364 	}
1365 }
1366 
1367 /*
1368  * dge_watchdog:		[ifnet interface function]
1369  *
1370  *	Watchdog timer handler.
1371  */
1372 static void
1373 dge_watchdog(struct ifnet *ifp)
1374 {
1375 	struct dge_softc *sc = ifp->if_softc;
1376 
1377 	/*
1378 	 * Since we're using delayed interrupts, sweep up
1379 	 * before we report an error.
1380 	 */
1381 	dge_txintr(sc);
1382 
1383 	if (sc->sc_txfree != DGE_NTXDESC) {
1384 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1385 		    device_xname(&sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
1386 		    sc->sc_txnext);
1387 		ifp->if_oerrors++;
1388 
1389 		/* Reset the interface. */
1390 		(void) dge_init(ifp);
1391 	}
1392 
1393 	/* Try to get more packets going. */
1394 	dge_start(ifp);
1395 }
1396 
1397 /*
1398  * dge_ioctl:		[ifnet interface function]
1399  *
1400  *	Handle control requests from the operator.
1401  */
1402 static int
1403 dge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1404 {
1405 	struct dge_softc *sc = ifp->if_softc;
1406 	struct ifreq *ifr = (struct ifreq *) data;
1407 	pcireg_t preg;
1408 	int s, error, mmrbc;
1409 
1410 	s = splnet();
1411 
1412 	switch (cmd) {
1413 	case SIOCSIFMEDIA:
1414 	case SIOCGIFMEDIA:
1415 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1416 		break;
1417 
1418 	case SIOCSIFMTU:
1419 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > DGE_MAX_MTU)
1420 			error = EINVAL;
1421 		else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
1422 			break;
1423 		else if (ifp->if_flags & IFF_UP)
1424 			error = (*ifp->if_init)(ifp);
1425 		else
1426 			error = 0;
1427 		break;
1428 
1429         case SIOCSIFFLAGS:
1430 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1431 			break;
1432 		/* extract link flags */
1433 		if ((ifp->if_flags & IFF_LINK0) == 0 &&
1434 		    (ifp->if_flags & IFF_LINK1) == 0)
1435 			mmrbc = PCIX_MMRBC_512;
1436 		else if ((ifp->if_flags & IFF_LINK0) == 0 &&
1437 		    (ifp->if_flags & IFF_LINK1) != 0)
1438 			mmrbc = PCIX_MMRBC_1024;
1439 		else if ((ifp->if_flags & IFF_LINK0) != 0 &&
1440 		    (ifp->if_flags & IFF_LINK1) == 0)
1441 			mmrbc = PCIX_MMRBC_2048;
1442 		else
1443 			mmrbc = PCIX_MMRBC_4096;
1444 		if (mmrbc != sc->sc_mmrbc) {
1445 			preg = pci_conf_read(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD);
1446 			preg &= ~PCIX_MMRBC_MSK;
1447 			preg |= mmrbc;
1448 			pci_conf_write(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD, preg);
1449 			sc->sc_mmrbc = mmrbc;
1450 		}
1451                 /* FALLTHROUGH */
1452 	default:
1453 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1454 			break;
1455 
1456 		error = 0;
1457 
1458 		if (cmd == SIOCSIFCAP)
1459 			error = (*ifp->if_init)(ifp);
1460 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1461 			;
1462 		else if (ifp->if_flags & IFF_RUNNING) {
1463 			/*
1464 			 * Multicast list has changed; set the hardware filter
1465 			 * accordingly.
1466 			 */
1467 			dge_set_filter(sc);
1468 		}
1469 		break;
1470 	}
1471 
1472 	/* Try to get more packets going. */
1473 	dge_start(ifp);
1474 
1475 	splx(s);
1476 	return (error);
1477 }
1478 
1479 /*
1480  * dge_intr:
1481  *
1482  *	Interrupt service routine.
1483  */
1484 static int
1485 dge_intr(void *arg)
1486 {
1487 	struct dge_softc *sc = arg;
1488 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1489 	uint32_t icr;
1490 	int wantinit, handled = 0;
1491 
1492 	for (wantinit = 0; wantinit == 0;) {
1493 		icr = CSR_READ(sc, DGE_ICR);
1494 		if ((icr & sc->sc_icr) == 0)
1495 			break;
1496 
1497 #if 0 /*NRND > 0*/
1498 		if (RND_ENABLED(&sc->rnd_source))
1499 			rnd_add_uint32(&sc->rnd_source, icr);
1500 #endif
1501 
1502 		handled = 1;
1503 
1504 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
1505 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1506 			DPRINTF(DGE_DEBUG_RX,
1507 			    ("%s: RX: got Rx intr 0x%08x\n",
1508 			    device_xname(&sc->sc_dev),
1509 			    icr & (ICR_RXDMT0|ICR_RXT0)));
1510 			DGE_EVCNT_INCR(&sc->sc_ev_rxintr);
1511 		}
1512 #endif
1513 		dge_rxintr(sc);
1514 
1515 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
1516 		if (icr & ICR_TXDW) {
1517 			DPRINTF(DGE_DEBUG_TX,
1518 			    ("%s: TX: got TXDW interrupt\n",
1519 			    device_xname(&sc->sc_dev)));
1520 			DGE_EVCNT_INCR(&sc->sc_ev_txdw);
1521 		}
1522 		if (icr & ICR_TXQE)
1523 			DGE_EVCNT_INCR(&sc->sc_ev_txqe);
1524 #endif
1525 		dge_txintr(sc);
1526 
1527 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
1528 			DGE_EVCNT_INCR(&sc->sc_ev_linkintr);
1529 			dge_linkintr(sc, icr);
1530 		}
1531 
1532 		if (icr & ICR_RXO) {
1533 			printf("%s: Receive overrun\n", device_xname(&sc->sc_dev));
1534 			wantinit = 1;
1535 		}
1536 	}
1537 
1538 	if (handled) {
1539 		if (wantinit)
1540 			dge_init(ifp);
1541 
1542 		/* Try to get more packets going. */
1543 		dge_start(ifp);
1544 	}
1545 
1546 	return (handled);
1547 }
1548 
1549 /*
1550  * dge_txintr:
1551  *
1552  *	Helper; handle transmit interrupts.
1553  */
1554 static void
1555 dge_txintr(struct dge_softc *sc)
1556 {
1557 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1558 	struct dge_txsoft *txs;
1559 	uint8_t status;
1560 	int i;
1561 
1562 	ifp->if_flags &= ~IFF_OACTIVE;
1563 
1564 	/*
1565 	 * Go through the Tx list and free mbufs for those
1566 	 * frames which have been transmitted.
1567 	 */
1568 	for (i = sc->sc_txsdirty; sc->sc_txsfree != DGE_TXQUEUELEN;
1569 	     i = DGE_NEXTTXS(i), sc->sc_txsfree++) {
1570 		txs = &sc->sc_txsoft[i];
1571 
1572 		DPRINTF(DGE_DEBUG_TX,
1573 		    ("%s: TX: checking job %d\n", device_xname(&sc->sc_dev), i));
1574 
1575 		DGE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1576 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1577 
1578 		status =
1579 		    sc->sc_txdescs[txs->txs_lastdesc].dt_status;
1580 		if ((status & TDESC_STA_DD) == 0) {
1581 			DGE_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1582 			    BUS_DMASYNC_PREREAD);
1583 			break;
1584 		}
1585 
1586 		DPRINTF(DGE_DEBUG_TX,
1587 		    ("%s: TX: job %d done: descs %d..%d\n",
1588 		    device_xname(&sc->sc_dev), i, txs->txs_firstdesc,
1589 		    txs->txs_lastdesc));
1590 
1591 		ifp->if_opackets++;
1592 		sc->sc_txfree += txs->txs_ndesc;
1593 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1594 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1595 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1596 		m_freem(txs->txs_mbuf);
1597 		txs->txs_mbuf = NULL;
1598 	}
1599 
1600 	/* Update the dirty transmit buffer pointer. */
1601 	sc->sc_txsdirty = i;
1602 	DPRINTF(DGE_DEBUG_TX,
1603 	    ("%s: TX: txsdirty -> %d\n", device_xname(&sc->sc_dev), i));
1604 
1605 	/*
1606 	 * If there are no more pending transmissions, cancel the watchdog
1607 	 * timer.
1608 	 */
1609 	if (sc->sc_txsfree == DGE_TXQUEUELEN)
1610 		ifp->if_timer = 0;
1611 }
1612 
1613 /*
1614  * dge_rxintr:
1615  *
1616  *	Helper; handle receive interrupts.
1617  */
1618 static void
1619 dge_rxintr(struct dge_softc *sc)
1620 {
1621 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1622 	struct dge_rxsoft *rxs;
1623 	struct mbuf *m;
1624 	int i, len;
1625 	uint8_t status, errors;
1626 
1627 	for (i = sc->sc_rxptr;; i = DGE_NEXTRX(i)) {
1628 		rxs = &sc->sc_rxsoft[i];
1629 
1630 		DPRINTF(DGE_DEBUG_RX,
1631 		    ("%s: RX: checking descriptor %d\n",
1632 		    device_xname(&sc->sc_dev), i));
1633 
1634 		DGE_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1635 
1636 		status = sc->sc_rxdescs[i].dr_status;
1637 		errors = sc->sc_rxdescs[i].dr_errors;
1638 		len = le16toh(sc->sc_rxdescs[i].dr_len);
1639 
1640 		if ((status & RDESC_STS_DD) == 0) {
1641 			/*
1642 			 * We have processed all of the receive descriptors.
1643 			 */
1644 			DGE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1645 			break;
1646 		}
1647 
1648 		if (__predict_false(sc->sc_rxdiscard)) {
1649 			DPRINTF(DGE_DEBUG_RX,
1650 			    ("%s: RX: discarding contents of descriptor %d\n",
1651 			    device_xname(&sc->sc_dev), i));
1652 			DGE_INIT_RXDESC(sc, i);
1653 			if (status & RDESC_STS_EOP) {
1654 				/* Reset our state. */
1655 				DPRINTF(DGE_DEBUG_RX,
1656 				    ("%s: RX: resetting rxdiscard -> 0\n",
1657 				    device_xname(&sc->sc_dev)));
1658 				sc->sc_rxdiscard = 0;
1659 			}
1660 			continue;
1661 		}
1662 
1663 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1664 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1665 
1666 		m = rxs->rxs_mbuf;
1667 
1668 		/*
1669 		 * Add a new receive buffer to the ring.
1670 		 */
1671 		if (dge_add_rxbuf(sc, i) != 0) {
1672 			/*
1673 			 * Failed, throw away what we've done so
1674 			 * far, and discard the rest of the packet.
1675 			 */
1676 			ifp->if_ierrors++;
1677 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1678 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1679 			DGE_INIT_RXDESC(sc, i);
1680 			if ((status & RDESC_STS_EOP) == 0)
1681 				sc->sc_rxdiscard = 1;
1682 			if (sc->sc_rxhead != NULL)
1683 				m_freem(sc->sc_rxhead);
1684 			DGE_RXCHAIN_RESET(sc);
1685 			DPRINTF(DGE_DEBUG_RX,
1686 			    ("%s: RX: Rx buffer allocation failed, "
1687 			    "dropping packet%s\n", device_xname(&sc->sc_dev),
1688 			    sc->sc_rxdiscard ? " (discard)" : ""));
1689 			continue;
1690 		}
1691 		DGE_INIT_RXDESC(sc, DGE_PREVRX(i)); /* Write the descriptor */
1692 
1693 		DGE_RXCHAIN_LINK(sc, m);
1694 
1695 		m->m_len = len;
1696 
1697 		DPRINTF(DGE_DEBUG_RX,
1698 		    ("%s: RX: buffer at %p len %d\n",
1699 		    device_xname(&sc->sc_dev), m->m_data, len));
1700 
1701 		/*
1702 		 * If this is not the end of the packet, keep
1703 		 * looking.
1704 		 */
1705 		if ((status & RDESC_STS_EOP) == 0) {
1706 			sc->sc_rxlen += len;
1707 			DPRINTF(DGE_DEBUG_RX,
1708 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
1709 			    device_xname(&sc->sc_dev), sc->sc_rxlen));
1710 			continue;
1711 		}
1712 
1713 		/*
1714 		 * Okay, we have the entire packet now...
1715 		 */
1716 		*sc->sc_rxtailp = NULL;
1717 		m = sc->sc_rxhead;
1718 		len += sc->sc_rxlen;
1719 
1720 		DGE_RXCHAIN_RESET(sc);
1721 
1722 		DPRINTF(DGE_DEBUG_RX,
1723 		    ("%s: RX: have entire packet, len -> %d\n",
1724 		    device_xname(&sc->sc_dev), len));
1725 
1726 		/*
1727 		 * If an error occurred, update stats and drop the packet.
1728 		 */
1729 		if (errors &
1730 		     (RDESC_ERR_CE|RDESC_ERR_SE|RDESC_ERR_P|RDESC_ERR_RXE)) {
1731 			ifp->if_ierrors++;
1732 			if (errors & RDESC_ERR_SE)
1733 				printf("%s: symbol error\n",
1734 				    device_xname(&sc->sc_dev));
1735 			else if (errors & RDESC_ERR_P)
1736 				printf("%s: parity error\n",
1737 				    device_xname(&sc->sc_dev));
1738 			else if (errors & RDESC_ERR_CE)
1739 				printf("%s: CRC error\n",
1740 				    device_xname(&sc->sc_dev));
1741 			m_freem(m);
1742 			continue;
1743 		}
1744 
1745 		/*
1746 		 * No errors.  Receive the packet.
1747 		 */
1748 		m->m_pkthdr.rcvif = ifp;
1749 		m->m_pkthdr.len = len;
1750 
1751 		/*
1752 		 * Set up checksum info for this packet.
1753 		 */
1754 		if (status & RDESC_STS_IPCS) {
1755 			DGE_EVCNT_INCR(&sc->sc_ev_rxipsum);
1756 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1757 			if (errors & RDESC_ERR_IPE)
1758 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1759 		}
1760 		if (status & RDESC_STS_TCPCS) {
1761 			/*
1762 			 * Note: we don't know if this was TCP or UDP,
1763 			 * so we just set both bits, and expect the
1764 			 * upper layers to deal.
1765 			 */
1766 			DGE_EVCNT_INCR(&sc->sc_ev_rxtusum);
1767 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1768 			if (errors & RDESC_ERR_TCPE)
1769 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1770 		}
1771 
1772 		ifp->if_ipackets++;
1773 
1774 		/* Pass this up to any BPF listeners. */
1775 		if (ifp->if_bpf)
1776 			bpf_ops->bpf_mtap(ifp->if_bpf, m);
1777 
1778 		/* Pass it on. */
1779 		(*ifp->if_input)(ifp, m);
1780 	}
1781 
1782 	/* Update the receive pointer. */
1783 	sc->sc_rxptr = i;
1784 
1785 	DPRINTF(DGE_DEBUG_RX,
1786 	    ("%s: RX: rxptr -> %d\n", device_xname(&sc->sc_dev), i));
1787 }
1788 
1789 /*
1790  * dge_linkintr:
1791  *
1792  *	Helper; handle link interrupts.
1793  */
1794 static void
1795 dge_linkintr(struct dge_softc *sc, uint32_t icr)
1796 {
1797 	uint32_t status;
1798 
1799 	if (icr & ICR_LSC) {
1800 		status = CSR_READ(sc, DGE_STATUS);
1801 		if (status & STATUS_LINKUP) {
1802 			DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
1803 			    device_xname(&sc->sc_dev)));
1804 		} else {
1805 			DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1806 			    device_xname(&sc->sc_dev)));
1807 		}
1808 	} else if (icr & ICR_RXSEQ) {
1809 		DPRINTF(DGE_DEBUG_LINK,
1810 		    ("%s: LINK: Receive sequence error\n",
1811 		    device_xname(&sc->sc_dev)));
1812 	}
1813 	/* XXX - fix errata */
1814 }
1815 
1816 /*
1817  * dge_reset:
1818  *
1819  *	Reset the i82597 chip.
1820  */
1821 static void
1822 dge_reset(struct dge_softc *sc)
1823 {
1824 	int i;
1825 
1826 	/*
1827 	 * Do a chip reset.
1828 	 */
1829 	CSR_WRITE(sc, DGE_CTRL0, CTRL0_RST | sc->sc_ctrl0);
1830 
1831 	delay(10000);
1832 
1833 	for (i = 0; i < 1000; i++) {
1834 		if ((CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) == 0)
1835 			break;
1836 		delay(20);
1837 	}
1838 
1839 	if (CSR_READ(sc, DGE_CTRL0) & CTRL0_RST)
1840 		printf("%s: WARNING: reset failed to complete\n",
1841 		    device_xname(&sc->sc_dev));
1842         /*
1843          * Reset the EEPROM logic.
1844          * This will cause the chip to reread its default values,
1845 	 * which doesn't happen otherwise (errata).
1846          */
1847         CSR_WRITE(sc, DGE_CTRL1, CTRL1_EE_RST);
1848         delay(10000);
1849 }
1850 
1851 /*
1852  * dge_init:		[ifnet interface function]
1853  *
1854  *	Initialize the interface.  Must be called at splnet().
1855  */
1856 static int
1857 dge_init(struct ifnet *ifp)
1858 {
1859 	struct dge_softc *sc = ifp->if_softc;
1860 	struct dge_rxsoft *rxs;
1861 	int i, error = 0;
1862 	uint32_t reg;
1863 
1864 	/*
1865 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
1866 	 * There is a small but measurable benefit to avoiding the adjusment
1867 	 * of the descriptor so that the headers are aligned, for normal mtu,
1868 	 * on such platforms.  One possibility is that the DMA itself is
1869 	 * slightly more efficient if the front of the entire packet (instead
1870 	 * of the front of the headers) is aligned.
1871 	 *
1872 	 * Note we must always set align_tweak to 0 if we are using
1873 	 * jumbo frames.
1874 	 */
1875 #ifdef __NO_STRICT_ALIGNMENT
1876 	sc->sc_align_tweak = 0;
1877 #else
1878 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
1879 		sc->sc_align_tweak = 0;
1880 	else
1881 		sc->sc_align_tweak = 2;
1882 #endif /* __NO_STRICT_ALIGNMENT */
1883 
1884 	/* Cancel any pending I/O. */
1885 	dge_stop(ifp, 0);
1886 
1887 	/* Reset the chip to a known state. */
1888 	dge_reset(sc);
1889 
1890 	/* Initialize the transmit descriptor ring. */
1891 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1892 	DGE_CDTXSYNC(sc, 0, DGE_NTXDESC,
1893 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1894 	sc->sc_txfree = DGE_NTXDESC;
1895 	sc->sc_txnext = 0;
1896 
1897 	sc->sc_txctx_ipcs = 0xffffffff;
1898 	sc->sc_txctx_tucs = 0xffffffff;
1899 
1900 	CSR_WRITE(sc, DGE_TDBAH, 0);
1901 	CSR_WRITE(sc, DGE_TDBAL, DGE_CDTXADDR(sc, 0));
1902 	CSR_WRITE(sc, DGE_TDLEN, sizeof(sc->sc_txdescs));
1903 	CSR_WRITE(sc, DGE_TDH, 0);
1904 	CSR_WRITE(sc, DGE_TDT, 0);
1905 	CSR_WRITE(sc, DGE_TIDV, TIDV);
1906 
1907 #if 0
1908 	CSR_WRITE(sc, DGE_TXDCTL, TXDCTL_PTHRESH(0) |
1909 	    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1910 #endif
1911 	CSR_WRITE(sc, DGE_RXDCTL,
1912 	    RXDCTL_PTHRESH(RXDCTL_PTHRESH_VAL) |
1913 	    RXDCTL_HTHRESH(RXDCTL_HTHRESH_VAL) |
1914 	    RXDCTL_WTHRESH(RXDCTL_WTHRESH_VAL));
1915 
1916 	/* Initialize the transmit job descriptors. */
1917 	for (i = 0; i < DGE_TXQUEUELEN; i++)
1918 		sc->sc_txsoft[i].txs_mbuf = NULL;
1919 	sc->sc_txsfree = DGE_TXQUEUELEN;
1920 	sc->sc_txsnext = 0;
1921 	sc->sc_txsdirty = 0;
1922 
1923 	/*
1924 	 * Initialize the receive descriptor and receive job
1925 	 * descriptor rings.
1926 	 */
1927 	CSR_WRITE(sc, DGE_RDBAH, 0);
1928 	CSR_WRITE(sc, DGE_RDBAL, DGE_CDRXADDR(sc, 0));
1929 	CSR_WRITE(sc, DGE_RDLEN, sizeof(sc->sc_rxdescs));
1930 	CSR_WRITE(sc, DGE_RDH, DGE_RXSPACE);
1931 	CSR_WRITE(sc, DGE_RDT, 0);
1932 	CSR_WRITE(sc, DGE_RDTR, RDTR | 0x80000000);
1933 	CSR_WRITE(sc, DGE_FCRTL, FCRTL | FCRTL_XONE);
1934 	CSR_WRITE(sc, DGE_FCRTH, FCRTH);
1935 
1936 	for (i = 0; i < DGE_NRXDESC; i++) {
1937 		rxs = &sc->sc_rxsoft[i];
1938 		if (rxs->rxs_mbuf == NULL) {
1939 			if ((error = dge_add_rxbuf(sc, i)) != 0) {
1940 				printf("%s: unable to allocate or map rx "
1941 				    "buffer %d, error = %d\n",
1942 				    device_xname(&sc->sc_dev), i, error);
1943 				/*
1944 				 * XXX Should attempt to run with fewer receive
1945 				 * XXX buffers instead of just failing.
1946 				 */
1947 				dge_rxdrain(sc);
1948 				goto out;
1949 			}
1950 		}
1951 		DGE_INIT_RXDESC(sc, i);
1952 	}
1953 	sc->sc_rxptr = DGE_RXSPACE;
1954 	sc->sc_rxdiscard = 0;
1955 	DGE_RXCHAIN_RESET(sc);
1956 
1957 	if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) {
1958 		sc->sc_ctrl0 |= CTRL0_JFE;
1959 		CSR_WRITE(sc, DGE_MFS, ETHER_MAX_LEN_JUMBO << 16);
1960 	}
1961 
1962 	/* Write the control registers. */
1963 	CSR_WRITE(sc, DGE_CTRL0, sc->sc_ctrl0);
1964 
1965 	/*
1966 	 * Set up checksum offload parameters.
1967 	 */
1968 	reg = CSR_READ(sc, DGE_RXCSUM);
1969 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
1970 		reg |= RXCSUM_IPOFL;
1971 	else
1972 		reg &= ~RXCSUM_IPOFL;
1973 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
1974 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
1975 	else {
1976 		reg &= ~RXCSUM_TUOFL;
1977 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) == 0)
1978 			reg &= ~RXCSUM_IPOFL;
1979 	}
1980 	CSR_WRITE(sc, DGE_RXCSUM, reg);
1981 
1982 	/*
1983 	 * Set up the interrupt registers.
1984 	 */
1985 	CSR_WRITE(sc, DGE_IMC, 0xffffffffU);
1986 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1987 	    ICR_RXO | ICR_RXT0;
1988 
1989 	CSR_WRITE(sc, DGE_IMS, sc->sc_icr);
1990 
1991 	/*
1992 	 * Set up the transmit control register.
1993 	 */
1994 	sc->sc_tctl = TCTL_TCE|TCTL_TPDE|TCTL_TXEN;
1995 	CSR_WRITE(sc, DGE_TCTL, sc->sc_tctl);
1996 
1997 	/*
1998 	 * Set up the receive control register; we actually program
1999 	 * the register when we set the receive filter.  Use multicast
2000 	 * address offset type 0.
2001 	 */
2002 	sc->sc_mchash_type = 0;
2003 
2004 	sc->sc_rctl = RCTL_RXEN | RCTL_RDMTS_12 | RCTL_RPDA_MC |
2005 	    RCTL_CFF | RCTL_SECRC | RCTL_MO(sc->sc_mchash_type);
2006 
2007 #ifdef DGE_OFFBYONE_RXBUG
2008 	sc->sc_rctl |= RCTL_BSIZE_16k;
2009 #else
2010 	switch(MCLBYTES) {
2011 	case 2048:
2012 		sc->sc_rctl |= RCTL_BSIZE_2k;
2013 		break;
2014 	case 4096:
2015 		sc->sc_rctl |= RCTL_BSIZE_4k;
2016 		break;
2017 	case 8192:
2018 		sc->sc_rctl |= RCTL_BSIZE_8k;
2019 		break;
2020 	case 16384:
2021 		sc->sc_rctl |= RCTL_BSIZE_16k;
2022 		break;
2023 	default:
2024 		panic("dge_init: MCLBYTES %d unsupported", MCLBYTES);
2025 	}
2026 #endif
2027 
2028 	/* Set the receive filter. */
2029 	/* Also sets RCTL */
2030 	dge_set_filter(sc);
2031 
2032 	/* ...all done! */
2033 	ifp->if_flags |= IFF_RUNNING;
2034 	ifp->if_flags &= ~IFF_OACTIVE;
2035 
2036  out:
2037 	if (error)
2038 		printf("%s: interface not running\n", device_xname(&sc->sc_dev));
2039 	return (error);
2040 }
2041 
2042 /*
2043  * dge_rxdrain:
2044  *
2045  *	Drain the receive queue.
2046  */
2047 static void
2048 dge_rxdrain(struct dge_softc *sc)
2049 {
2050 	struct dge_rxsoft *rxs;
2051 	int i;
2052 
2053 	for (i = 0; i < DGE_NRXDESC; i++) {
2054 		rxs = &sc->sc_rxsoft[i];
2055 		if (rxs->rxs_mbuf != NULL) {
2056 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2057 			m_freem(rxs->rxs_mbuf);
2058 			rxs->rxs_mbuf = NULL;
2059 		}
2060 	}
2061 }
2062 
2063 /*
2064  * dge_stop:		[ifnet interface function]
2065  *
2066  *	Stop transmission on the interface.
2067  */
2068 static void
2069 dge_stop(struct ifnet *ifp, int disable)
2070 {
2071 	struct dge_softc *sc = ifp->if_softc;
2072 	struct dge_txsoft *txs;
2073 	int i;
2074 
2075 	/* Stop the transmit and receive processes. */
2076 	CSR_WRITE(sc, DGE_TCTL, 0);
2077 	CSR_WRITE(sc, DGE_RCTL, 0);
2078 
2079 	/* Release any queued transmit buffers. */
2080 	for (i = 0; i < DGE_TXQUEUELEN; i++) {
2081 		txs = &sc->sc_txsoft[i];
2082 		if (txs->txs_mbuf != NULL) {
2083 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2084 			m_freem(txs->txs_mbuf);
2085 			txs->txs_mbuf = NULL;
2086 		}
2087 	}
2088 
2089 	/* Mark the interface as down and cancel the watchdog timer. */
2090 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2091 	ifp->if_timer = 0;
2092 
2093 	if (disable)
2094 		dge_rxdrain(sc);
2095 }
2096 
2097 /*
2098  * dge_add_rxbuf:
2099  *
2100  *	Add a receive buffer to the indiciated descriptor.
2101  */
2102 static int
2103 dge_add_rxbuf(struct dge_softc *sc, int idx)
2104 {
2105 	struct dge_rxsoft *rxs = &sc->sc_rxsoft[idx];
2106 	struct mbuf *m;
2107 	int error;
2108 #ifdef DGE_OFFBYONE_RXBUG
2109 	void *buf;
2110 #endif
2111 
2112 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2113 	if (m == NULL)
2114 		return (ENOBUFS);
2115 
2116 #ifdef DGE_OFFBYONE_RXBUG
2117 	if ((buf = dge_getbuf(sc)) == NULL)
2118 		return ENOBUFS;
2119 
2120 	m->m_len = m->m_pkthdr.len = DGE_BUFFER_SIZE;
2121 	MEXTADD(m, buf, DGE_BUFFER_SIZE, M_DEVBUF, dge_freebuf, sc);
2122 	m->m_flags |= M_EXT_RW;
2123 
2124 	if (rxs->rxs_mbuf != NULL)
2125 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2126 	rxs->rxs_mbuf = m;
2127 
2128 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, buf,
2129 	    DGE_BUFFER_SIZE, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
2130 #else
2131 	MCLGET(m, M_DONTWAIT);
2132 	if ((m->m_flags & M_EXT) == 0) {
2133 		m_freem(m);
2134 		return (ENOBUFS);
2135 	}
2136 
2137 	if (rxs->rxs_mbuf != NULL)
2138 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2139 
2140 	rxs->rxs_mbuf = m;
2141 
2142 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2143 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2144 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
2145 #endif
2146 	if (error) {
2147 		printf("%s: unable to load rx DMA map %d, error = %d\n",
2148 		    device_xname(&sc->sc_dev), idx, error);
2149 		panic("dge_add_rxbuf");	/* XXX XXX XXX */
2150 	}
2151 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2152 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2153 
2154 	return (0);
2155 }
2156 
2157 /*
2158  * dge_set_ral:
2159  *
2160  *	Set an entry in the receive address list.
2161  */
2162 static void
2163 dge_set_ral(struct dge_softc *sc, const uint8_t *enaddr, int idx)
2164 {
2165 	uint32_t ral_lo, ral_hi;
2166 
2167 	if (enaddr != NULL) {
2168 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2169 		    (enaddr[3] << 24);
2170 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2171 		ral_hi |= RAH_AV;
2172 	} else {
2173 		ral_lo = 0;
2174 		ral_hi = 0;
2175 	}
2176 	CSR_WRITE(sc, RA_ADDR(DGE_RAL, idx), ral_lo);
2177 	CSR_WRITE(sc, RA_ADDR(DGE_RAH, idx), ral_hi);
2178 }
2179 
2180 /*
2181  * dge_mchash:
2182  *
2183  *	Compute the hash of the multicast address for the 4096-bit
2184  *	multicast filter.
2185  */
2186 static uint32_t
2187 dge_mchash(struct dge_softc *sc, const uint8_t *enaddr)
2188 {
2189 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2190 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2191 	uint32_t hash;
2192 
2193 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2194 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2195 
2196 	return (hash & 0xfff);
2197 }
2198 
2199 /*
2200  * dge_set_filter:
2201  *
2202  *	Set up the receive filter.
2203  */
2204 static void
2205 dge_set_filter(struct dge_softc *sc)
2206 {
2207 	struct ethercom *ec = &sc->sc_ethercom;
2208 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2209 	struct ether_multi *enm;
2210 	struct ether_multistep step;
2211 	uint32_t hash, reg, bit;
2212 	int i;
2213 
2214 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2215 
2216 	if (ifp->if_flags & IFF_BROADCAST)
2217 		sc->sc_rctl |= RCTL_BAM;
2218 	if (ifp->if_flags & IFF_PROMISC) {
2219 		sc->sc_rctl |= RCTL_UPE;
2220 		goto allmulti;
2221 	}
2222 
2223 	/*
2224 	 * Set the station address in the first RAL slot, and
2225 	 * clear the remaining slots.
2226 	 */
2227 	dge_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2228 	for (i = 1; i < RA_TABSIZE; i++)
2229 		dge_set_ral(sc, NULL, i);
2230 
2231 	/* Clear out the multicast table. */
2232 	for (i = 0; i < MC_TABSIZE; i++)
2233 		CSR_WRITE(sc, DGE_MTA + (i << 2), 0);
2234 
2235 	ETHER_FIRST_MULTI(step, ec, enm);
2236 	while (enm != NULL) {
2237 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2238 			/*
2239 			 * We must listen to a range of multicast addresses.
2240 			 * For now, just accept all multicasts, rather than
2241 			 * trying to set only those filter bits needed to match
2242 			 * the range.  (At this time, the only use of address
2243 			 * ranges is for IP multicast routing, for which the
2244 			 * range is big enough to require all bits set.)
2245 			 */
2246 			goto allmulti;
2247 		}
2248 
2249 		hash = dge_mchash(sc, enm->enm_addrlo);
2250 
2251 		reg = (hash >> 5) & 0x7f;
2252 		bit = hash & 0x1f;
2253 
2254 		hash = CSR_READ(sc, DGE_MTA + (reg << 2));
2255 		hash |= 1U << bit;
2256 
2257 		CSR_WRITE(sc, DGE_MTA + (reg << 2), hash);
2258 
2259 		ETHER_NEXT_MULTI(step, enm);
2260 	}
2261 
2262 	ifp->if_flags &= ~IFF_ALLMULTI;
2263 	goto setit;
2264 
2265  allmulti:
2266 	ifp->if_flags |= IFF_ALLMULTI;
2267 	sc->sc_rctl |= RCTL_MPE;
2268 
2269  setit:
2270 	CSR_WRITE(sc, DGE_RCTL, sc->sc_rctl);
2271 }
2272 
2273 /*
2274  * Read in the EEPROM info and verify checksum.
2275  */
2276 int
2277 dge_read_eeprom(struct dge_softc *sc)
2278 {
2279 	uint16_t cksum;
2280 	int i;
2281 
2282 	cksum = 0;
2283 	for (i = 0; i < EEPROM_SIZE; i++) {
2284 		sc->sc_eeprom[i] = dge_eeprom_word(sc, i);
2285 		cksum += sc->sc_eeprom[i];
2286 	}
2287 	return cksum != EEPROM_CKSUM;
2288 }
2289 
2290 
2291 /*
2292  * Read a 16-bit word from address addr in the serial EEPROM.
2293  */
2294 uint16_t
2295 dge_eeprom_word(struct dge_softc *sc, int addr)
2296 {
2297 	uint32_t reg;
2298 	uint16_t rval = 0;
2299 	int i;
2300 
2301 	reg = CSR_READ(sc, DGE_EECD) & ~(EECD_SK|EECD_DI|EECD_CS);
2302 
2303 	/* Lower clock pulse (and data in to chip) */
2304 	CSR_WRITE(sc, DGE_EECD, reg);
2305 	/* Select chip */
2306 	CSR_WRITE(sc, DGE_EECD, reg|EECD_CS);
2307 
2308 	/* Send read command */
2309 	dge_eeprom_clockout(sc, 1);
2310 	dge_eeprom_clockout(sc, 1);
2311 	dge_eeprom_clockout(sc, 0);
2312 
2313 	/* Send address */
2314 	for (i = 5; i >= 0; i--)
2315 		dge_eeprom_clockout(sc, (addr >> i) & 1);
2316 
2317 	/* Read data */
2318 	for (i = 0; i < 16; i++) {
2319 		rval <<= 1;
2320 		rval |= dge_eeprom_clockin(sc);
2321 	}
2322 
2323 	/* Deselect chip */
2324 	CSR_WRITE(sc, DGE_EECD, reg);
2325 
2326 	return rval;
2327 }
2328 
2329 /*
2330  * Clock out a single bit to the EEPROM.
2331  */
2332 void
2333 dge_eeprom_clockout(struct dge_softc *sc, int bit)
2334 {
2335 	int reg;
2336 
2337 	reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_SK);
2338 	if (bit)
2339 		reg |= EECD_DI;
2340 
2341 	CSR_WRITE(sc, DGE_EECD, reg);
2342 	delay(2);
2343 	CSR_WRITE(sc, DGE_EECD, reg|EECD_SK);
2344 	delay(2);
2345 	CSR_WRITE(sc, DGE_EECD, reg);
2346 	delay(2);
2347 }
2348 
2349 /*
2350  * Clock in a single bit from EEPROM.
2351  */
2352 int
2353 dge_eeprom_clockin(struct dge_softc *sc)
2354 {
2355 	int reg, rv;
2356 
2357 	reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_DO|EECD_SK);
2358 
2359 	CSR_WRITE(sc, DGE_EECD, reg|EECD_SK); /* Raise clock */
2360 	delay(2);
2361 	rv = (CSR_READ(sc, DGE_EECD) & EECD_DO) != 0; /* Get bit */
2362 	CSR_WRITE(sc, DGE_EECD, reg); /* Lower clock */
2363 	delay(2);
2364 
2365 	return rv;
2366 }
2367 
2368 static void
2369 dge_xgmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2370 {
2371 	struct dge_softc *sc = ifp->if_softc;
2372 
2373 	ifmr->ifm_status = IFM_AVALID;
2374 	ifmr->ifm_active = IFM_ETHER|IFM_10G_LR;
2375 
2376 	if (CSR_READ(sc, DGE_STATUS) & STATUS_LINKUP)
2377 		ifmr->ifm_status |= IFM_ACTIVE;
2378 }
2379 
2380 static inline int
2381 phwait(struct dge_softc *sc, int p, int r, int d, int type)
2382 {
2383         int i, mdic;
2384 
2385         CSR_WRITE(sc, DGE_MDIO,
2386 	    MDIO_PHY(p) | MDIO_REG(r) | MDIO_DEV(d) | type | MDIO_CMD);
2387         for (i = 0; i < 10; i++) {
2388                 delay(10);
2389                 if (((mdic = CSR_READ(sc, DGE_MDIO)) & MDIO_CMD) == 0)
2390                         break;
2391         }
2392         return mdic;
2393 }
2394 
2395 
2396 static void
2397 dge_xgmii_writereg(device_t self, int phy, int reg, int val)
2398 {
2399 	struct dge_softc *sc = device_private(self);
2400 	int mdic;
2401 
2402 	CSR_WRITE(sc, DGE_MDIRW, val);
2403 	if (((mdic = phwait(sc, phy, reg, 1, MDIO_ADDR)) & MDIO_CMD)) {
2404 		printf("%s: address cycle timeout; phy %d reg %d\n",
2405 		    device_xname(&sc->sc_dev), phy, reg);
2406 		return;
2407 	}
2408 	if (((mdic = phwait(sc, phy, reg, 1, MDIO_WRITE)) & MDIO_CMD)) {
2409 		printf("%s: read cycle timeout; phy %d reg %d\n",
2410 		    device_xname(&sc->sc_dev), phy, reg);
2411 		return;
2412 	}
2413 }
2414 
2415 static void
2416 dge_xgmii_reset(struct dge_softc *sc)
2417 {
2418 	dge_xgmii_writereg((void *)sc, 0, 0, BMCR_RESET);
2419 }
2420 
2421 static int
2422 dge_xgmii_mediachange(struct ifnet *ifp)
2423 {
2424 	return 0;
2425 }
2426