xref: /netbsd-src/sys/dev/pci/if_dge.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: if_dge.c,v 1.40 2015/04/13 16:33:25 riastradh Exp $ */
2 
3 /*
4  * Copyright (c) 2004, SUNET, Swedish University Computer Network.
5  * All rights reserved.
6  *
7  * Written by Anders Magnusson for SUNET, Swedish University Computer Network.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	SUNET, Swedish University Computer Network.
21  * 4. The name of SUNET may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 /*
38  * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
39  * All rights reserved.
40  *
41  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed for the NetBSD Project by
54  *	Wasabi Systems, Inc.
55  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
56  *    or promote products derived from this software without specific prior
57  *    written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
63  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69  * POSSIBILITY OF SUCH DAMAGE.
70  */
71 
72 /*
73  * Device driver for the Intel 82597EX Ten Gigabit Ethernet controller.
74  *
75  * TODO (in no specific order):
76  *	HW VLAN support.
77  *	TSE offloading (needs kernel changes...)
78  *	RAIDC (receive interrupt delay adaptation)
79  *	Use memory > 4GB.
80  */
81 
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: if_dge.c,v 1.40 2015/04/13 16:33:25 riastradh Exp $");
84 
85 
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 
99 #include <sys/rndsource.h>
100 
101 #include <net/if.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_ether.h>
105 
106 #include <net/bpf.h>
107 
108 #include <netinet/in.h>			/* XXX for struct ip */
109 #include <netinet/in_systm.h>		/* XXX for struct ip */
110 #include <netinet/ip.h>			/* XXX for struct ip */
111 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
112 
113 #include <sys/bus.h>
114 #include <sys/intr.h>
115 #include <machine/endian.h>
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 #include <dev/mii/mii_bitbang.h>
120 
121 #include <dev/pci/pcireg.h>
122 #include <dev/pci/pcivar.h>
123 #include <dev/pci/pcidevs.h>
124 
125 #include <dev/pci/if_dgereg.h>
126 
127 /*
128  * The receive engine may sometimes become off-by-one when writing back
129  * chained descriptors.	 Avoid this by allocating a large chunk of
130  * memory and use if instead (to avoid chained descriptors).
131  * This only happens with chained descriptors under heavy load.
132  */
133 #define DGE_OFFBYONE_RXBUG
134 
135 #define DGE_EVENT_COUNTERS
136 #define DGE_DEBUG
137 
138 #ifdef DGE_DEBUG
139 #define DGE_DEBUG_LINK		0x01
140 #define DGE_DEBUG_TX		0x02
141 #define DGE_DEBUG_RX		0x04
142 #define DGE_DEBUG_CKSUM		0x08
143 int	dge_debug = 0;
144 
145 #define DPRINTF(x, y)	if (dge_debug & (x)) printf y
146 #else
147 #define DPRINTF(x, y)	/* nothing */
148 #endif /* DGE_DEBUG */
149 
150 /*
151  * Transmit descriptor list size. We allow up to 100 DMA segments per
152  * packet (Intel reports of jumbo frame packets with as
153  * many as 80 DMA segments when using 16k buffers).
154  */
155 #define DGE_NTXSEGS		100
156 #define DGE_IFQUEUELEN		20000
157 #define DGE_TXQUEUELEN		2048
158 #define DGE_TXQUEUELEN_MASK	(DGE_TXQUEUELEN - 1)
159 #define DGE_TXQUEUE_GC		(DGE_TXQUEUELEN / 8)
160 #define DGE_NTXDESC		1024
161 #define DGE_NTXDESC_MASK		(DGE_NTXDESC - 1)
162 #define DGE_NEXTTX(x)		(((x) + 1) & DGE_NTXDESC_MASK)
163 #define DGE_NEXTTXS(x)		(((x) + 1) & DGE_TXQUEUELEN_MASK)
164 
165 /*
166  * Receive descriptor list size.
167  * Packet is of size MCLBYTES, and for jumbo packets buffers may
168  * be chained.	Due to the nature of the card (high-speed), keep this
169  * ring large. With 2k buffers the ring can store 400 jumbo packets,
170  * which at full speed will be received in just under 3ms.
171  */
172 #define DGE_NRXDESC		2048
173 #define DGE_NRXDESC_MASK	(DGE_NRXDESC - 1)
174 #define DGE_NEXTRX(x)		(((x) + 1) & DGE_NRXDESC_MASK)
175 /*
176  * # of descriptors between head and written descriptors.
177  * This is to work-around two erratas.
178  */
179 #define DGE_RXSPACE		10
180 #define DGE_PREVRX(x)		(((x) - DGE_RXSPACE) & DGE_NRXDESC_MASK)
181 /*
182  * Receive descriptor fetch threshholds. These are values recommended
183  * by Intel, do not touch them unless you know what you are doing.
184  */
185 #define RXDCTL_PTHRESH_VAL	128
186 #define RXDCTL_HTHRESH_VAL	16
187 #define RXDCTL_WTHRESH_VAL	16
188 
189 
190 /*
191  * Tweakable parameters; default values.
192  */
193 #define FCRTH	0x30000 /* Send XOFF water mark */
194 #define FCRTL	0x28000 /* Send XON water mark */
195 #define RDTR	0x20	/* Interrupt delay after receive, .8192us units */
196 #define TIDV	0x20	/* Interrupt delay after send, .8192us units */
197 
198 /*
199  * Control structures are DMA'd to the i82597 chip.  We allocate them in
200  * a single clump that maps to a single DMA segment to make serveral things
201  * easier.
202  */
203 struct dge_control_data {
204 	/*
205 	 * The transmit descriptors.
206 	 */
207 	struct dge_tdes wcd_txdescs[DGE_NTXDESC];
208 
209 	/*
210 	 * The receive descriptors.
211 	 */
212 	struct dge_rdes wcd_rxdescs[DGE_NRXDESC];
213 };
214 
215 #define DGE_CDOFF(x)	offsetof(struct dge_control_data, x)
216 #define DGE_CDTXOFF(x)	DGE_CDOFF(wcd_txdescs[(x)])
217 #define DGE_CDRXOFF(x)	DGE_CDOFF(wcd_rxdescs[(x)])
218 
219 /*
220  * The DGE interface have a higher max MTU size than normal jumbo frames.
221  */
222 #define DGE_MAX_MTU	16288	/* Max MTU size for this interface */
223 
224 /*
225  * Software state for transmit jobs.
226  */
227 struct dge_txsoft {
228 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
229 	bus_dmamap_t txs_dmamap;	/* our DMA map */
230 	int txs_firstdesc;		/* first descriptor in packet */
231 	int txs_lastdesc;		/* last descriptor in packet */
232 	int txs_ndesc;			/* # of descriptors used */
233 };
234 
235 /*
236  * Software state for receive buffers.	Each descriptor gets a
237  * 2k (MCLBYTES) buffer and a DMA map.	For packets which fill
238  * more than one buffer, we chain them together.
239  */
240 struct dge_rxsoft {
241 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
242 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
243 };
244 
245 /*
246  * Software state per device.
247  */
248 struct dge_softc {
249 	device_t sc_dev;		/* generic device information */
250 	bus_space_tag_t sc_st;		/* bus space tag */
251 	bus_space_handle_t sc_sh;	/* bus space handle */
252 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
253 	struct ethercom sc_ethercom;	/* ethernet common data */
254 
255 	int sc_flags;			/* flags; see below */
256 	int sc_bus_speed;		/* PCI/PCIX bus speed */
257 	int sc_pcix_offset;		/* PCIX capability register offset */
258 
259 	pci_chipset_tag_t sc_pc;
260 	pcitag_t sc_pt;
261 	int sc_mmrbc;			/* Max PCIX memory read byte count */
262 
263 	void *sc_ih;			/* interrupt cookie */
264 
265 	struct ifmedia sc_media;
266 
267 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
268 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
269 
270 	int		sc_align_tweak;
271 
272 	/*
273 	 * Software state for the transmit and receive descriptors.
274 	 */
275 	struct dge_txsoft sc_txsoft[DGE_TXQUEUELEN];
276 	struct dge_rxsoft sc_rxsoft[DGE_NRXDESC];
277 
278 	/*
279 	 * Control data structures.
280 	 */
281 	struct dge_control_data *sc_control_data;
282 #define sc_txdescs	sc_control_data->wcd_txdescs
283 #define sc_rxdescs	sc_control_data->wcd_rxdescs
284 
285 #ifdef DGE_EVENT_COUNTERS
286 	/* Event counters. */
287 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
288 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
289 	struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
290 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
291 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
292 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
293 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
294 
295 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
296 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
297 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
298 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
299 
300 	struct evcnt sc_ev_txctx_init;	/* Tx cksum context cache initialized */
301 	struct evcnt sc_ev_txctx_hit;	/* Tx cksum context cache hit */
302 	struct evcnt sc_ev_txctx_miss;	/* Tx cksum context cache miss */
303 
304 	struct evcnt sc_ev_txseg[DGE_NTXSEGS]; /* Tx packets w/ N segments */
305 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
306 #endif /* DGE_EVENT_COUNTERS */
307 
308 	int	sc_txfree;		/* number of free Tx descriptors */
309 	int	sc_txnext;		/* next ready Tx descriptor */
310 
311 	int	sc_txsfree;		/* number of free Tx jobs */
312 	int	sc_txsnext;		/* next free Tx job */
313 	int	sc_txsdirty;		/* dirty Tx jobs */
314 
315 	uint32_t sc_txctx_ipcs;		/* cached Tx IP cksum ctx */
316 	uint32_t sc_txctx_tucs;		/* cached Tx TCP/UDP cksum ctx */
317 
318 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
319 	int	sc_rxdiscard;
320 	int	sc_rxlen;
321 	struct mbuf *sc_rxhead;
322 	struct mbuf *sc_rxtail;
323 	struct mbuf **sc_rxtailp;
324 
325 	uint32_t sc_ctrl0;		/* prototype CTRL0 register */
326 	uint32_t sc_icr;		/* prototype interrupt bits */
327 	uint32_t sc_tctl;		/* prototype TCTL register */
328 	uint32_t sc_rctl;		/* prototype RCTL register */
329 
330 	int sc_mchash_type;		/* multicast filter offset */
331 
332 	uint16_t sc_eeprom[EEPROM_SIZE];
333 
334 	krndsource_t rnd_source; /* random source */
335 #ifdef DGE_OFFBYONE_RXBUG
336 	void *sc_bugbuf;
337 	SLIST_HEAD(, rxbugentry) sc_buglist;
338 	bus_dmamap_t sc_bugmap;
339 	struct rxbugentry *sc_entry;
340 #endif
341 };
342 
343 #define DGE_RXCHAIN_RESET(sc)						\
344 do {									\
345 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
346 	*(sc)->sc_rxtailp = NULL;					\
347 	(sc)->sc_rxlen = 0;						\
348 } while (/*CONSTCOND*/0)
349 
350 #define DGE_RXCHAIN_LINK(sc, m)						\
351 do {									\
352 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
353 	(sc)->sc_rxtailp = &(m)->m_next;				\
354 } while (/*CONSTCOND*/0)
355 
356 /* sc_flags */
357 #define DGE_F_BUS64		0x20	/* bus is 64-bit */
358 #define DGE_F_PCIX		0x40	/* bus is PCI-X */
359 
360 #ifdef DGE_EVENT_COUNTERS
361 #define DGE_EVCNT_INCR(ev)	(ev)->ev_count++
362 #else
363 #define DGE_EVCNT_INCR(ev)	/* nothing */
364 #endif
365 
366 #define CSR_READ(sc, reg)						\
367 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
368 #define CSR_WRITE(sc, reg, val)						\
369 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
370 
371 #define DGE_CDTXADDR(sc, x)	((sc)->sc_cddma + DGE_CDTXOFF((x)))
372 #define DGE_CDRXADDR(sc, x)	((sc)->sc_cddma + DGE_CDRXOFF((x)))
373 
374 #define DGE_CDTXSYNC(sc, x, n, ops)					\
375 do {									\
376 	int __x, __n;							\
377 									\
378 	__x = (x);							\
379 	__n = (n);							\
380 									\
381 	/* If it will wrap around, sync to the end of the ring. */	\
382 	if ((__x + __n) > DGE_NTXDESC) {				\
383 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
384 		    DGE_CDTXOFF(__x), sizeof(struct dge_tdes) *		\
385 		    (DGE_NTXDESC - __x), (ops));			\
386 		__n -= (DGE_NTXDESC - __x);				\
387 		__x = 0;						\
388 	}								\
389 									\
390 	/* Now sync whatever is left. */				\
391 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
392 	    DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * __n, (ops));	\
393 } while (/*CONSTCOND*/0)
394 
395 #define DGE_CDRXSYNC(sc, x, ops)						\
396 do {									\
397 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
398 	   DGE_CDRXOFF((x)), sizeof(struct dge_rdes), (ops));		\
399 } while (/*CONSTCOND*/0)
400 
401 #ifdef DGE_OFFBYONE_RXBUG
402 #define DGE_INIT_RXDESC(sc, x)						\
403 do {									\
404 	struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
405 	struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)];		\
406 	struct mbuf *__m = __rxs->rxs_mbuf;				\
407 									\
408 	__rxd->dr_baddrl = htole32(sc->sc_bugmap->dm_segs[0].ds_addr +	\
409 	    (mtod((__m), char *) - (char *)sc->sc_bugbuf));		\
410 	__rxd->dr_baddrh = 0;						\
411 	__rxd->dr_len = 0;						\
412 	__rxd->dr_cksum = 0;						\
413 	__rxd->dr_status = 0;						\
414 	__rxd->dr_errors = 0;						\
415 	__rxd->dr_special = 0;						\
416 	DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
417 									\
418 	CSR_WRITE((sc), DGE_RDT, (x));					\
419 } while (/*CONSTCOND*/0)
420 #else
421 #define DGE_INIT_RXDESC(sc, x)						\
422 do {									\
423 	struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
424 	struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)];		\
425 	struct mbuf *__m = __rxs->rxs_mbuf;				\
426 									\
427 	/*								\
428 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
429 	 * so that the payload after the Ethernet header is aligned	\
430 	 * to a 4-byte boundary.					\
431 	 *								\
432 	 * XXX BRAINDAMAGE ALERT!					\
433 	 * The stupid chip uses the same size for every buffer, which	\
434 	 * is set in the Receive Control register.  We are using the 2K \
435 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
436 	 * reason, we can't "scoot" packets longer than the standard	\
437 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
438 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
439 	 * the upper layer copy the headers.				\
440 	 */								\
441 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
442 									\
443 	__rxd->dr_baddrl =						\
444 	    htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr +		\
445 		(sc)->sc_align_tweak);					\
446 	__rxd->dr_baddrh = 0;						\
447 	__rxd->dr_len = 0;						\
448 	__rxd->dr_cksum = 0;						\
449 	__rxd->dr_status = 0;						\
450 	__rxd->dr_errors = 0;						\
451 	__rxd->dr_special = 0;						\
452 	DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
453 									\
454 	CSR_WRITE((sc), DGE_RDT, (x));					\
455 } while (/*CONSTCOND*/0)
456 #endif
457 
458 #ifdef DGE_OFFBYONE_RXBUG
459 /*
460  * Allocation constants.  Much memory may be used for this.
461  */
462 #ifndef DGE_BUFFER_SIZE
463 #define DGE_BUFFER_SIZE DGE_MAX_MTU
464 #endif
465 #define DGE_NBUFFERS	(4*DGE_NRXDESC)
466 #define DGE_RXMEM	(DGE_NBUFFERS*DGE_BUFFER_SIZE)
467 
468 struct rxbugentry {
469 	SLIST_ENTRY(rxbugentry) rb_entry;
470 	int rb_slot;
471 };
472 
473 static int
474 dge_alloc_rcvmem(struct dge_softc *sc)
475 {
476 	char *kva;
477 	bus_dma_segment_t seg;
478 	int i, rseg, state, error;
479 	struct rxbugentry *entry;
480 
481 	state = error = 0;
482 
483 	if (bus_dmamem_alloc(sc->sc_dmat, DGE_RXMEM, PAGE_SIZE, 0,
484 	     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
485 		aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n");
486 		return ENOBUFS;
487 	}
488 
489 	state = 1;
490 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, DGE_RXMEM, (void **)&kva,
491 	    BUS_DMA_NOWAIT)) {
492 		aprint_error_dev(sc->sc_dev, "can't map DMA buffers (%d bytes)\n",
493 		    (int)DGE_RXMEM);
494 		error = ENOBUFS;
495 		goto out;
496 	}
497 
498 	state = 2;
499 	if (bus_dmamap_create(sc->sc_dmat, DGE_RXMEM, 1, DGE_RXMEM, 0,
500 	    BUS_DMA_NOWAIT, &sc->sc_bugmap)) {
501 		aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
502 		error = ENOBUFS;
503 		goto out;
504 	}
505 
506 	state = 3;
507 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_bugmap,
508 	    kva, DGE_RXMEM, NULL, BUS_DMA_NOWAIT)) {
509 		aprint_error_dev(sc->sc_dev, "can't load DMA map\n");
510 		error = ENOBUFS;
511 		goto out;
512 	}
513 
514 	state = 4;
515 	sc->sc_bugbuf = (void *)kva;
516 	SLIST_INIT(&sc->sc_buglist);
517 
518 	/*
519 	 * Now divide it up into DGE_BUFFER_SIZE pieces and save the addresses
520 	 * in an array.
521 	 */
522 	if ((entry = malloc(sizeof(*entry) * DGE_NBUFFERS,
523 	    M_DEVBUF, M_NOWAIT)) == NULL) {
524 		error = ENOBUFS;
525 		goto out;
526 	}
527 	sc->sc_entry = entry;
528 	for (i = 0; i < DGE_NBUFFERS; i++) {
529 		entry[i].rb_slot = i;
530 		SLIST_INSERT_HEAD(&sc->sc_buglist, &entry[i], rb_entry);
531 	}
532 out:
533 	if (error != 0) {
534 		switch (state) {
535 		case 4:
536 			bus_dmamap_unload(sc->sc_dmat, sc->sc_bugmap);
537 		case 3:
538 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_bugmap);
539 		case 2:
540 			bus_dmamem_unmap(sc->sc_dmat, kva, DGE_RXMEM);
541 		case 1:
542 			bus_dmamem_free(sc->sc_dmat, &seg, rseg);
543 			break;
544 		default:
545 			break;
546 		}
547 	}
548 
549 	return error;
550 }
551 
552 /*
553  * Allocate a jumbo buffer.
554  */
555 static void *
556 dge_getbuf(struct dge_softc *sc)
557 {
558 	struct rxbugentry *entry;
559 
560 	entry = SLIST_FIRST(&sc->sc_buglist);
561 
562 	if (entry == NULL) {
563 		printf("%s: no free RX buffers\n", device_xname(sc->sc_dev));
564 		return(NULL);
565 	}
566 
567 	SLIST_REMOVE_HEAD(&sc->sc_buglist, rb_entry);
568 	return (char *)sc->sc_bugbuf + entry->rb_slot * DGE_BUFFER_SIZE;
569 }
570 
571 /*
572  * Release a jumbo buffer.
573  */
574 static void
575 dge_freebuf(struct mbuf *m, void *buf, size_t size, void *arg)
576 {
577 	struct rxbugentry *entry;
578 	struct dge_softc *sc;
579 	int i, s;
580 
581 	/* Extract the softc struct pointer. */
582 	sc = (struct dge_softc *)arg;
583 
584 	if (sc == NULL)
585 		panic("dge_freebuf: can't find softc pointer!");
586 
587 	/* calculate the slot this buffer belongs to */
588 
589 	i = ((char *)buf - (char *)sc->sc_bugbuf) / DGE_BUFFER_SIZE;
590 
591 	if ((i < 0) || (i >= DGE_NBUFFERS))
592 		panic("dge_freebuf: asked to free buffer %d!", i);
593 
594 	s = splvm();
595 	entry = sc->sc_entry + i;
596 	SLIST_INSERT_HEAD(&sc->sc_buglist, entry, rb_entry);
597 
598 	if (__predict_true(m != NULL))
599 		pool_cache_put(mb_cache, m);
600 	splx(s);
601 }
602 #endif
603 
604 static void	dge_start(struct ifnet *);
605 static void	dge_watchdog(struct ifnet *);
606 static int	dge_ioctl(struct ifnet *, u_long, void *);
607 static int	dge_init(struct ifnet *);
608 static void	dge_stop(struct ifnet *, int);
609 
610 static bool	dge_shutdown(device_t, int);
611 
612 static void	dge_reset(struct dge_softc *);
613 static void	dge_rxdrain(struct dge_softc *);
614 static int	dge_add_rxbuf(struct dge_softc *, int);
615 
616 static void	dge_set_filter(struct dge_softc *);
617 
618 static int	dge_intr(void *);
619 static void	dge_txintr(struct dge_softc *);
620 static void	dge_rxintr(struct dge_softc *);
621 static void	dge_linkintr(struct dge_softc *, uint32_t);
622 
623 static int	dge_match(device_t, cfdata_t, void *);
624 static void	dge_attach(device_t, device_t, void *);
625 
626 static int	dge_read_eeprom(struct dge_softc *sc);
627 static int	dge_eeprom_clockin(struct dge_softc *sc);
628 static void	dge_eeprom_clockout(struct dge_softc *sc, int bit);
629 static uint16_t	dge_eeprom_word(struct dge_softc *sc, int addr);
630 static int	dge_xgmii_mediachange(struct ifnet *);
631 static void	dge_xgmii_mediastatus(struct ifnet *, struct ifmediareq *);
632 static void	dge_xgmii_reset(struct dge_softc *);
633 static void	dge_xgmii_writereg(struct dge_softc *, int, int, int);
634 
635 
636 CFATTACH_DECL_NEW(dge, sizeof(struct dge_softc),
637     dge_match, dge_attach, NULL, NULL);
638 
639 #ifdef DGE_EVENT_COUNTERS
640 #if DGE_NTXSEGS > 100
641 #error Update dge_txseg_evcnt_names
642 #endif
643 static char (*dge_txseg_evcnt_names)[DGE_NTXSEGS][8 /* "txseg00" + \0 */];
644 #endif /* DGE_EVENT_COUNTERS */
645 
646 static int
647 dge_match(device_t parent, cfdata_t cf, void *aux)
648 {
649 	struct pci_attach_args *pa = aux;
650 
651 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL &&
652 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82597EX)
653 		return (1);
654 
655 	return (0);
656 }
657 
658 static void
659 dge_attach(device_t parent, device_t self, void *aux)
660 {
661 	struct dge_softc *sc = device_private(self);
662 	struct pci_attach_args *pa = aux;
663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
664 	pci_chipset_tag_t pc = pa->pa_pc;
665 	pci_intr_handle_t ih;
666 	const char *intrstr = NULL;
667 	bus_dma_segment_t seg;
668 	int i, rseg, error;
669 	uint8_t enaddr[ETHER_ADDR_LEN];
670 	pcireg_t preg, memtype;
671 	uint32_t reg;
672 	char intrbuf[PCI_INTRSTR_LEN];
673 
674 	sc->sc_dev = self;
675 	sc->sc_dmat = pa->pa_dmat;
676 	sc->sc_pc = pa->pa_pc;
677 	sc->sc_pt = pa->pa_tag;
678 
679 	pci_aprint_devinfo_fancy(pa, "Ethernet controller",
680 		"Intel i82597EX 10GbE-LR Ethernet", 1);
681 
682 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, DGE_PCI_BAR);
683         if (pci_mapreg_map(pa, DGE_PCI_BAR, memtype, 0,
684             &sc->sc_st, &sc->sc_sh, NULL, NULL)) {
685                 aprint_error_dev(sc->sc_dev, "unable to map device registers\n");
686                 return;
687         }
688 
689 	/* Enable bus mastering */
690 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
691 	preg |= PCI_COMMAND_MASTER_ENABLE;
692 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
693 
694 	/*
695 	 * Map and establish our interrupt.
696 	 */
697 	if (pci_intr_map(pa, &ih)) {
698 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
699 		return;
700 	}
701 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
702 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, dge_intr, sc);
703 	if (sc->sc_ih == NULL) {
704 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
705 		if (intrstr != NULL)
706 			aprint_error(" at %s", intrstr);
707 		aprint_error("\n");
708 		return;
709 	}
710 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
711 
712 	/*
713 	 * Determine a few things about the bus we're connected to.
714 	 */
715 	reg = CSR_READ(sc, DGE_STATUS);
716 	if (reg & STATUS_BUS64)
717 		sc->sc_flags |= DGE_F_BUS64;
718 
719 	sc->sc_flags |= DGE_F_PCIX;
720 	if (pci_get_capability(pa->pa_pc, pa->pa_tag,
721 			       PCI_CAP_PCIX,
722 			       &sc->sc_pcix_offset, NULL) == 0)
723 		aprint_error_dev(sc->sc_dev, "unable to find PCIX "
724 		    "capability\n");
725 
726 	if (sc->sc_flags & DGE_F_PCIX) {
727 		switch (reg & STATUS_PCIX_MSK) {
728 		case STATUS_PCIX_66:
729 			sc->sc_bus_speed = 66;
730 			break;
731 		case STATUS_PCIX_100:
732 			sc->sc_bus_speed = 100;
733 			break;
734 		case STATUS_PCIX_133:
735 			sc->sc_bus_speed = 133;
736 			break;
737 		default:
738 			aprint_error_dev(sc->sc_dev,
739 			    "unknown PCIXSPD %d; assuming 66MHz\n",
740 			    reg & STATUS_PCIX_MSK);
741 			sc->sc_bus_speed = 66;
742 		}
743 	} else
744 		sc->sc_bus_speed = (reg & STATUS_BUS64) ? 66 : 33;
745 	aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
746 	    (sc->sc_flags & DGE_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
747 	    (sc->sc_flags & DGE_F_PCIX) ? "PCIX" : "PCI");
748 
749 	/*
750 	 * Allocate the control data structures, and create and load the
751 	 * DMA map for it.
752 	 */
753 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
754 	    sizeof(struct dge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
755 	    0)) != 0) {
756 		aprint_error_dev(sc->sc_dev,
757 		    "unable to allocate control data, error = %d\n",
758 		    error);
759 		goto fail_0;
760 	}
761 
762 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
763 	    sizeof(struct dge_control_data), (void **)&sc->sc_control_data,
764 	    0)) != 0) {
765 		aprint_error_dev(sc->sc_dev, "unable to map control data, error = %d\n",
766 		    error);
767 		goto fail_1;
768 	}
769 
770 	if ((error = bus_dmamap_create(sc->sc_dmat,
771 	    sizeof(struct dge_control_data), 1,
772 	    sizeof(struct dge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
773 		aprint_error_dev(sc->sc_dev, "unable to create control data DMA map, "
774 		    "error = %d\n", error);
775 		goto fail_2;
776 	}
777 
778 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
779 	    sc->sc_control_data, sizeof(struct dge_control_data), NULL,
780 	    0)) != 0) {
781 		aprint_error_dev(sc->sc_dev,
782 		    "unable to load control data DMA map, error = %d\n",
783 		    error);
784 		goto fail_3;
785 	}
786 
787 #ifdef DGE_OFFBYONE_RXBUG
788 	if (dge_alloc_rcvmem(sc) != 0)
789 		return; /* Already complained */
790 #endif
791 	/*
792 	 * Create the transmit buffer DMA maps.
793 	 */
794 	for (i = 0; i < DGE_TXQUEUELEN; i++) {
795 		if ((error = bus_dmamap_create(sc->sc_dmat, DGE_MAX_MTU,
796 		    DGE_NTXSEGS, MCLBYTES, 0, 0,
797 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
798 			aprint_error_dev(sc->sc_dev, "unable to create Tx DMA map %d, "
799 			    "error = %d\n", i, error);
800 			goto fail_4;
801 		}
802 	}
803 
804 	/*
805 	 * Create the receive buffer DMA maps.
806 	 */
807 	for (i = 0; i < DGE_NRXDESC; i++) {
808 #ifdef DGE_OFFBYONE_RXBUG
809 		if ((error = bus_dmamap_create(sc->sc_dmat, DGE_BUFFER_SIZE, 1,
810 		    DGE_BUFFER_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
811 #else
812 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
813 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
814 #endif
815 			aprint_error_dev(sc->sc_dev, "unable to create Rx DMA map %d, "
816 			    "error = %d\n", i, error);
817 			goto fail_5;
818 		}
819 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
820 	}
821 
822 	/*
823 	 * Set bits in ctrl0 register.
824 	 * Should get the software defined pins out of EEPROM?
825 	 */
826 	sc->sc_ctrl0 |= CTRL0_RPE | CTRL0_TPE; /* XON/XOFF */
827 	sc->sc_ctrl0 |= CTRL0_SDP3_DIR | CTRL0_SDP2_DIR | CTRL0_SDP1_DIR |
828 	    CTRL0_SDP0_DIR | CTRL0_SDP3 | CTRL0_SDP2 | CTRL0_SDP0;
829 
830 	/*
831 	 * Reset the chip to a known state.
832 	 */
833 	dge_reset(sc);
834 
835 	/*
836 	 * Reset the PHY.
837 	 */
838 	dge_xgmii_reset(sc);
839 
840 	/*
841 	 * Read in EEPROM data.
842 	 */
843 	if (dge_read_eeprom(sc)) {
844 		aprint_error_dev(sc->sc_dev, "couldn't read EEPROM\n");
845 		return;
846 	}
847 
848 	/*
849 	 * Get the ethernet address.
850 	 */
851 	enaddr[0] = sc->sc_eeprom[EE_ADDR01] & 0377;
852 	enaddr[1] = sc->sc_eeprom[EE_ADDR01] >> 8;
853 	enaddr[2] = sc->sc_eeprom[EE_ADDR23] & 0377;
854 	enaddr[3] = sc->sc_eeprom[EE_ADDR23] >> 8;
855 	enaddr[4] = sc->sc_eeprom[EE_ADDR45] & 0377;
856 	enaddr[5] = sc->sc_eeprom[EE_ADDR45] >> 8;
857 
858 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
859 	    ether_sprintf(enaddr));
860 
861 	/*
862 	 * Setup media stuff.
863 	 */
864         ifmedia_init(&sc->sc_media, IFM_IMASK, dge_xgmii_mediachange,
865             dge_xgmii_mediastatus);
866         ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_LR, 0, NULL);
867         ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_10G_LR);
868 
869 	ifp = &sc->sc_ethercom.ec_if;
870 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
871 	ifp->if_softc = sc;
872 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
873 	ifp->if_ioctl = dge_ioctl;
874 	ifp->if_start = dge_start;
875 	ifp->if_watchdog = dge_watchdog;
876 	ifp->if_init = dge_init;
877 	ifp->if_stop = dge_stop;
878 	IFQ_SET_MAXLEN(&ifp->if_snd, max(DGE_IFQUEUELEN, IFQ_MAXLEN));
879 	IFQ_SET_READY(&ifp->if_snd);
880 
881 	sc->sc_ethercom.ec_capabilities |=
882 	    ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
883 
884 	/*
885 	 * We can perform TCPv4 and UDPv4 checkums in-bound.
886 	 */
887 	ifp->if_capabilities |=
888 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
889 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
890 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
891 
892 	/*
893 	 * Attach the interface.
894 	 */
895 	if_attach(ifp);
896 	ether_ifattach(ifp, enaddr);
897 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
898 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
899 
900 #ifdef DGE_EVENT_COUNTERS
901 	/* Fix segment event naming */
902 	if (dge_txseg_evcnt_names == NULL) {
903 		dge_txseg_evcnt_names =
904 		    malloc(sizeof(*dge_txseg_evcnt_names), M_DEVBUF, M_WAITOK);
905 		for (i = 0; i < DGE_NTXSEGS; i++)
906 			snprintf((*dge_txseg_evcnt_names)[i],
907 			    sizeof((*dge_txseg_evcnt_names)[i]), "txseg%d", i);
908 	}
909 
910 	/* Attach event counters. */
911 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
912 	    NULL, device_xname(sc->sc_dev), "txsstall");
913 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
914 	    NULL, device_xname(sc->sc_dev), "txdstall");
915 	evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
916 	    NULL, device_xname(sc->sc_dev), "txforceintr");
917 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
918 	    NULL, device_xname(sc->sc_dev), "txdw");
919 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
920 	    NULL, device_xname(sc->sc_dev), "txqe");
921 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
922 	    NULL, device_xname(sc->sc_dev), "rxintr");
923 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
924 	    NULL, device_xname(sc->sc_dev), "linkintr");
925 
926 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
927 	    NULL, device_xname(sc->sc_dev), "rxipsum");
928 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
929 	    NULL, device_xname(sc->sc_dev), "rxtusum");
930 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
931 	    NULL, device_xname(sc->sc_dev), "txipsum");
932 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
933 	    NULL, device_xname(sc->sc_dev), "txtusum");
934 
935 	evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
936 	    NULL, device_xname(sc->sc_dev), "txctx init");
937 	evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
938 	    NULL, device_xname(sc->sc_dev), "txctx hit");
939 	evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
940 	    NULL, device_xname(sc->sc_dev), "txctx miss");
941 
942 	for (i = 0; i < DGE_NTXSEGS; i++)
943 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
944 		    NULL, device_xname(sc->sc_dev), (*dge_txseg_evcnt_names)[i]);
945 
946 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
947 	    NULL, device_xname(sc->sc_dev), "txdrop");
948 
949 #endif /* DGE_EVENT_COUNTERS */
950 
951 	/*
952 	 * Make sure the interface is shutdown during reboot.
953 	 */
954 	if (pmf_device_register1(self, NULL, NULL, dge_shutdown))
955 		pmf_class_network_register(self, ifp);
956 	else
957 		aprint_error_dev(self, "couldn't establish power handler\n");
958 
959 	return;
960 
961 	/*
962 	 * Free any resources we've allocated during the failed attach
963 	 * attempt.  Do this in reverse order and fall through.
964 	 */
965  fail_5:
966 	for (i = 0; i < DGE_NRXDESC; i++) {
967 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
968 			bus_dmamap_destroy(sc->sc_dmat,
969 			    sc->sc_rxsoft[i].rxs_dmamap);
970 	}
971  fail_4:
972 	for (i = 0; i < DGE_TXQUEUELEN; i++) {
973 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
974 			bus_dmamap_destroy(sc->sc_dmat,
975 			    sc->sc_txsoft[i].txs_dmamap);
976 	}
977 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
978  fail_3:
979 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
980  fail_2:
981 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
982 	    sizeof(struct dge_control_data));
983  fail_1:
984 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
985  fail_0:
986 	return;
987 }
988 
989 /*
990  * dge_shutdown:
991  *
992  *	Make sure the interface is stopped at reboot time.
993  */
994 static bool
995 dge_shutdown(device_t self, int howto)
996 {
997 	struct dge_softc *sc;
998 
999 	sc = device_private(self);
1000 	dge_stop(&sc->sc_ethercom.ec_if, 1);
1001 
1002 	return true;
1003 }
1004 
1005 /*
1006  * dge_tx_cksum:
1007  *
1008  *	Set up TCP/IP checksumming parameters for the
1009  *	specified packet.
1010  */
1011 static int
1012 dge_tx_cksum(struct dge_softc *sc, struct dge_txsoft *txs, uint8_t *fieldsp)
1013 {
1014 	struct mbuf *m0 = txs->txs_mbuf;
1015 	struct dge_ctdes *t;
1016 	uint32_t ipcs, tucs;
1017 	struct ether_header *eh;
1018 	int offset, iphl;
1019 	uint8_t fields = 0;
1020 
1021 	/*
1022 	 * XXX It would be nice if the mbuf pkthdr had offset
1023 	 * fields for the protocol headers.
1024 	 */
1025 
1026 	eh = mtod(m0, struct ether_header *);
1027 	switch (htons(eh->ether_type)) {
1028 	case ETHERTYPE_IP:
1029 		offset = ETHER_HDR_LEN;
1030 		break;
1031 
1032 	case ETHERTYPE_VLAN:
1033 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1034 		break;
1035 
1036 	default:
1037 		/*
1038 		 * Don't support this protocol or encapsulation.
1039 		 */
1040 		*fieldsp = 0;
1041 		return (0);
1042 	}
1043 
1044 	iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1045 
1046 	/*
1047 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1048 	 * offload feature, if we load the context descriptor, we
1049 	 * MUST provide valid values for IPCSS and TUCSS fields.
1050 	 */
1051 
1052 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1053 		DGE_EVCNT_INCR(&sc->sc_ev_txipsum);
1054 		fields |= TDESC_POPTS_IXSM;
1055 		ipcs = DGE_TCPIP_IPCSS(offset) |
1056 		    DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1057 		    DGE_TCPIP_IPCSE(offset + iphl - 1);
1058 	} else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1059 		/* Use the cached value. */
1060 		ipcs = sc->sc_txctx_ipcs;
1061 	} else {
1062 		/* Just initialize it to the likely value anyway. */
1063 		ipcs = DGE_TCPIP_IPCSS(offset) |
1064 		    DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1065 		    DGE_TCPIP_IPCSE(offset + iphl - 1);
1066 	}
1067 	DPRINTF(DGE_DEBUG_CKSUM,
1068 	    ("%s: CKSUM: offset %d ipcs 0x%x\n",
1069 	    device_xname(sc->sc_dev), offset, ipcs));
1070 
1071 	offset += iphl;
1072 
1073 	if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1074 		DGE_EVCNT_INCR(&sc->sc_ev_txtusum);
1075 		fields |= TDESC_POPTS_TXSM;
1076 		tucs = DGE_TCPIP_TUCSS(offset) |
1077 		   DGE_TCPIP_TUCSO(offset + M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1078 		   DGE_TCPIP_TUCSE(0) /* rest of packet */;
1079 	} else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1080 		/* Use the cached value. */
1081 		tucs = sc->sc_txctx_tucs;
1082 	} else {
1083 		/* Just initialize it to a valid TCP context. */
1084 		tucs = DGE_TCPIP_TUCSS(offset) |
1085 		    DGE_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1086 		    DGE_TCPIP_TUCSE(0) /* rest of packet */;
1087 	}
1088 
1089 	DPRINTF(DGE_DEBUG_CKSUM,
1090 	    ("%s: CKSUM: offset %d tucs 0x%x\n",
1091 	    device_xname(sc->sc_dev), offset, tucs));
1092 
1093 	if (sc->sc_txctx_ipcs == ipcs &&
1094 	    sc->sc_txctx_tucs == tucs) {
1095 		/* Cached context is fine. */
1096 		DGE_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1097 	} else {
1098 		/* Fill in the context descriptor. */
1099 #ifdef DGE_EVENT_COUNTERS
1100 		if (sc->sc_txctx_ipcs == 0xffffffff &&
1101 		    sc->sc_txctx_tucs == 0xffffffff)
1102 			DGE_EVCNT_INCR(&sc->sc_ev_txctx_init);
1103 		else
1104 			DGE_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1105 #endif
1106 		t = (struct dge_ctdes *)&sc->sc_txdescs[sc->sc_txnext];
1107 		t->dc_tcpip_ipcs = htole32(ipcs);
1108 		t->dc_tcpip_tucs = htole32(tucs);
1109 		t->dc_tcpip_cmdlen = htole32(TDESC_DTYP_CTD);
1110 		t->dc_tcpip_seg = 0;
1111 		DGE_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1112 
1113 		sc->sc_txctx_ipcs = ipcs;
1114 		sc->sc_txctx_tucs = tucs;
1115 
1116 		sc->sc_txnext = DGE_NEXTTX(sc->sc_txnext);
1117 		txs->txs_ndesc++;
1118 	}
1119 
1120 	*fieldsp = fields;
1121 
1122 	return (0);
1123 }
1124 
1125 /*
1126  * dge_start:		[ifnet interface function]
1127  *
1128  *	Start packet transmission on the interface.
1129  */
1130 static void
1131 dge_start(struct ifnet *ifp)
1132 {
1133 	struct dge_softc *sc = ifp->if_softc;
1134 	struct mbuf *m0;
1135 	struct dge_txsoft *txs;
1136 	bus_dmamap_t dmamap;
1137 	int error, nexttx, lasttx = -1, ofree, seg;
1138 	uint32_t cksumcmd;
1139 	uint8_t cksumfields;
1140 
1141 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1142 		return;
1143 
1144 	/*
1145 	 * Remember the previous number of free descriptors.
1146 	 */
1147 	ofree = sc->sc_txfree;
1148 
1149 	/*
1150 	 * Loop through the send queue, setting up transmit descriptors
1151 	 * until we drain the queue, or use up all available transmit
1152 	 * descriptors.
1153 	 */
1154 	for (;;) {
1155 		/* Grab a packet off the queue. */
1156 		IFQ_POLL(&ifp->if_snd, m0);
1157 		if (m0 == NULL)
1158 			break;
1159 
1160 		DPRINTF(DGE_DEBUG_TX,
1161 		    ("%s: TX: have packet to transmit: %p\n",
1162 		    device_xname(sc->sc_dev), m0));
1163 
1164 		/* Get a work queue entry. */
1165 		if (sc->sc_txsfree < DGE_TXQUEUE_GC) {
1166 			dge_txintr(sc);
1167 			if (sc->sc_txsfree == 0) {
1168 				DPRINTF(DGE_DEBUG_TX,
1169 				    ("%s: TX: no free job descriptors\n",
1170 					device_xname(sc->sc_dev)));
1171 				DGE_EVCNT_INCR(&sc->sc_ev_txsstall);
1172 				break;
1173 			}
1174 		}
1175 
1176 		txs = &sc->sc_txsoft[sc->sc_txsnext];
1177 		dmamap = txs->txs_dmamap;
1178 
1179 		/*
1180 		 * Load the DMA map.  If this fails, the packet either
1181 		 * didn't fit in the allotted number of segments, or we
1182 		 * were short on resources.  For the too-many-segments
1183 		 * case, we simply report an error and drop the packet,
1184 		 * since we can't sanely copy a jumbo packet to a single
1185 		 * buffer.
1186 		 */
1187 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1188 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1189 		if (error) {
1190 			if (error == EFBIG) {
1191 				DGE_EVCNT_INCR(&sc->sc_ev_txdrop);
1192 				printf("%s: Tx packet consumes too many "
1193 				    "DMA segments, dropping...\n",
1194 				    device_xname(sc->sc_dev));
1195 				IFQ_DEQUEUE(&ifp->if_snd, m0);
1196 				m_freem(m0);
1197 				continue;
1198 			}
1199 			/*
1200 			 * Short on resources, just stop for now.
1201 			 */
1202 			DPRINTF(DGE_DEBUG_TX,
1203 			    ("%s: TX: dmamap load failed: %d\n",
1204 			    device_xname(sc->sc_dev), error));
1205 			break;
1206 		}
1207 
1208 		/*
1209 		 * Ensure we have enough descriptors free to describe
1210 		 * the packet.  Note, we always reserve one descriptor
1211 		 * at the end of the ring due to the semantics of the
1212 		 * TDT register, plus one more in the event we need
1213 		 * to re-load checksum offload context.
1214 		 */
1215 		if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1216 			/*
1217 			 * Not enough free descriptors to transmit this
1218 			 * packet.  We haven't committed anything yet,
1219 			 * so just unload the DMA map, put the packet
1220 			 * pack on the queue, and punt.  Notify the upper
1221 			 * layer that there are no more slots left.
1222 			 */
1223 			DPRINTF(DGE_DEBUG_TX,
1224 			    ("%s: TX: need %d descriptors, have %d\n",
1225 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
1226 			    sc->sc_txfree - 1));
1227 			ifp->if_flags |= IFF_OACTIVE;
1228 			bus_dmamap_unload(sc->sc_dmat, dmamap);
1229 			DGE_EVCNT_INCR(&sc->sc_ev_txdstall);
1230 			break;
1231 		}
1232 
1233 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1234 
1235 		/*
1236 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1237 		 */
1238 
1239 		/* Sync the DMA map. */
1240 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1241 		    BUS_DMASYNC_PREWRITE);
1242 
1243 		DPRINTF(DGE_DEBUG_TX,
1244 		    ("%s: TX: packet has %d DMA segments\n",
1245 		    device_xname(sc->sc_dev), dmamap->dm_nsegs));
1246 
1247 		DGE_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1248 
1249 		/*
1250 		 * Store a pointer to the packet so that we can free it
1251 		 * later.
1252 		 *
1253 		 * Initially, we consider the number of descriptors the
1254 		 * packet uses the number of DMA segments.  This may be
1255 		 * incremented by 1 if we do checksum offload (a descriptor
1256 		 * is used to set the checksum context).
1257 		 */
1258 		txs->txs_mbuf = m0;
1259 		txs->txs_firstdesc = sc->sc_txnext;
1260 		txs->txs_ndesc = dmamap->dm_nsegs;
1261 
1262 		/*
1263 		 * Set up checksum offload parameters for
1264 		 * this packet.
1265 		 */
1266 		if (m0->m_pkthdr.csum_flags &
1267 		    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1268 			if (dge_tx_cksum(sc, txs, &cksumfields) != 0) {
1269 				/* Error message already displayed. */
1270 				bus_dmamap_unload(sc->sc_dmat, dmamap);
1271 				continue;
1272 			}
1273 		} else {
1274 			cksumfields = 0;
1275 		}
1276 
1277 		cksumcmd = TDESC_DCMD_IDE | TDESC_DTYP_DATA;
1278 
1279 		/*
1280 		 * Initialize the transmit descriptor.
1281 		 */
1282 		for (nexttx = sc->sc_txnext, seg = 0;
1283 		     seg < dmamap->dm_nsegs;
1284 		     seg++, nexttx = DGE_NEXTTX(nexttx)) {
1285 			/*
1286 			 * Note: we currently only use 32-bit DMA
1287 			 * addresses.
1288 			 */
1289 			sc->sc_txdescs[nexttx].dt_baddrh = 0;
1290 			sc->sc_txdescs[nexttx].dt_baddrl =
1291 			    htole32(dmamap->dm_segs[seg].ds_addr);
1292 			sc->sc_txdescs[nexttx].dt_ctl =
1293 			    htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1294 			sc->sc_txdescs[nexttx].dt_status = 0;
1295 			sc->sc_txdescs[nexttx].dt_popts = cksumfields;
1296 			sc->sc_txdescs[nexttx].dt_vlan = 0;
1297 			lasttx = nexttx;
1298 
1299 			DPRINTF(DGE_DEBUG_TX,
1300 			    ("%s: TX: desc %d: low 0x%08lx, len 0x%04lx\n",
1301 			    device_xname(sc->sc_dev), nexttx,
1302 			    (unsigned long)le32toh(dmamap->dm_segs[seg].ds_addr),
1303 			    (unsigned long)le32toh(dmamap->dm_segs[seg].ds_len)));
1304 		}
1305 
1306 		KASSERT(lasttx != -1);
1307 
1308 		/*
1309 		 * Set up the command byte on the last descriptor of
1310 		 * the packet.  If we're in the interrupt delay window,
1311 		 * delay the interrupt.
1312 		 */
1313 		sc->sc_txdescs[lasttx].dt_ctl |=
1314 		    htole32(TDESC_DCMD_EOP | TDESC_DCMD_RS);
1315 
1316 		txs->txs_lastdesc = lasttx;
1317 
1318 		DPRINTF(DGE_DEBUG_TX,
1319 		    ("%s: TX: desc %d: cmdlen 0x%08x\n", device_xname(sc->sc_dev),
1320 		    lasttx, le32toh(sc->sc_txdescs[lasttx].dt_ctl)));
1321 
1322 		/* Sync the descriptors we're using. */
1323 		DGE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1324 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1325 
1326 		/* Give the packet to the chip. */
1327 		CSR_WRITE(sc, DGE_TDT, nexttx);
1328 
1329 		DPRINTF(DGE_DEBUG_TX,
1330 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
1331 
1332 		DPRINTF(DGE_DEBUG_TX,
1333 		    ("%s: TX: finished transmitting packet, job %d\n",
1334 		    device_xname(sc->sc_dev), sc->sc_txsnext));
1335 
1336 		/* Advance the tx pointer. */
1337 		sc->sc_txfree -= txs->txs_ndesc;
1338 		sc->sc_txnext = nexttx;
1339 
1340 		sc->sc_txsfree--;
1341 		sc->sc_txsnext = DGE_NEXTTXS(sc->sc_txsnext);
1342 
1343 		/* Pass the packet to any BPF listeners. */
1344 		bpf_mtap(ifp, m0);
1345 	}
1346 
1347 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1348 		/* No more slots; notify upper layer. */
1349 		ifp->if_flags |= IFF_OACTIVE;
1350 	}
1351 
1352 	if (sc->sc_txfree != ofree) {
1353 		/* Set a watchdog timer in case the chip flakes out. */
1354 		ifp->if_timer = 5;
1355 	}
1356 }
1357 
1358 /*
1359  * dge_watchdog:		[ifnet interface function]
1360  *
1361  *	Watchdog timer handler.
1362  */
1363 static void
1364 dge_watchdog(struct ifnet *ifp)
1365 {
1366 	struct dge_softc *sc = ifp->if_softc;
1367 
1368 	/*
1369 	 * Since we're using delayed interrupts, sweep up
1370 	 * before we report an error.
1371 	 */
1372 	dge_txintr(sc);
1373 
1374 	if (sc->sc_txfree != DGE_NTXDESC) {
1375 		printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1376 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
1377 		    sc->sc_txnext);
1378 		ifp->if_oerrors++;
1379 
1380 		/* Reset the interface. */
1381 		(void) dge_init(ifp);
1382 	}
1383 
1384 	/* Try to get more packets going. */
1385 	dge_start(ifp);
1386 }
1387 
1388 /*
1389  * dge_ioctl:		[ifnet interface function]
1390  *
1391  *	Handle control requests from the operator.
1392  */
1393 static int
1394 dge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1395 {
1396 	struct dge_softc *sc = ifp->if_softc;
1397 	struct ifreq *ifr = (struct ifreq *) data;
1398 	pcireg_t preg;
1399 	int s, error, mmrbc;
1400 
1401 	s = splnet();
1402 
1403 	switch (cmd) {
1404 	case SIOCSIFMEDIA:
1405 	case SIOCGIFMEDIA:
1406 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1407 		break;
1408 
1409 	case SIOCSIFMTU:
1410 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > DGE_MAX_MTU)
1411 			error = EINVAL;
1412 		else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
1413 			break;
1414 		else if (ifp->if_flags & IFF_UP)
1415 			error = (*ifp->if_init)(ifp);
1416 		else
1417 			error = 0;
1418 		break;
1419 
1420         case SIOCSIFFLAGS:
1421 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1422 			break;
1423 		/* extract link flags */
1424 		if ((ifp->if_flags & IFF_LINK0) == 0 &&
1425 		    (ifp->if_flags & IFF_LINK1) == 0)
1426 			mmrbc = PCIX_MMRBC_512;
1427 		else if ((ifp->if_flags & IFF_LINK0) == 0 &&
1428 		    (ifp->if_flags & IFF_LINK1) != 0)
1429 			mmrbc = PCIX_MMRBC_1024;
1430 		else if ((ifp->if_flags & IFF_LINK0) != 0 &&
1431 		    (ifp->if_flags & IFF_LINK1) == 0)
1432 			mmrbc = PCIX_MMRBC_2048;
1433 		else
1434 			mmrbc = PCIX_MMRBC_4096;
1435 		if (mmrbc != sc->sc_mmrbc) {
1436 			preg = pci_conf_read(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD);
1437 			preg &= ~PCIX_MMRBC_MSK;
1438 			preg |= mmrbc;
1439 			pci_conf_write(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD, preg);
1440 			sc->sc_mmrbc = mmrbc;
1441 		}
1442                 /* FALLTHROUGH */
1443 	default:
1444 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1445 			break;
1446 
1447 		error = 0;
1448 
1449 		if (cmd == SIOCSIFCAP)
1450 			error = (*ifp->if_init)(ifp);
1451 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1452 			;
1453 		else if (ifp->if_flags & IFF_RUNNING) {
1454 			/*
1455 			 * Multicast list has changed; set the hardware filter
1456 			 * accordingly.
1457 			 */
1458 			dge_set_filter(sc);
1459 		}
1460 		break;
1461 	}
1462 
1463 	/* Try to get more packets going. */
1464 	dge_start(ifp);
1465 
1466 	splx(s);
1467 	return (error);
1468 }
1469 
1470 /*
1471  * dge_intr:
1472  *
1473  *	Interrupt service routine.
1474  */
1475 static int
1476 dge_intr(void *arg)
1477 {
1478 	struct dge_softc *sc = arg;
1479 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1480 	uint32_t icr;
1481 	int wantinit, handled = 0;
1482 
1483 	for (wantinit = 0; wantinit == 0;) {
1484 		icr = CSR_READ(sc, DGE_ICR);
1485 		if ((icr & sc->sc_icr) == 0)
1486 			break;
1487 
1488 		rnd_add_uint32(&sc->rnd_source, icr);
1489 
1490 		handled = 1;
1491 
1492 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
1493 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1494 			DPRINTF(DGE_DEBUG_RX,
1495 			    ("%s: RX: got Rx intr 0x%08x\n",
1496 			    device_xname(sc->sc_dev),
1497 			    icr & (ICR_RXDMT0|ICR_RXT0)));
1498 			DGE_EVCNT_INCR(&sc->sc_ev_rxintr);
1499 		}
1500 #endif
1501 		dge_rxintr(sc);
1502 
1503 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS)
1504 		if (icr & ICR_TXDW) {
1505 			DPRINTF(DGE_DEBUG_TX,
1506 			    ("%s: TX: got TXDW interrupt\n",
1507 			    device_xname(sc->sc_dev)));
1508 			DGE_EVCNT_INCR(&sc->sc_ev_txdw);
1509 		}
1510 		if (icr & ICR_TXQE)
1511 			DGE_EVCNT_INCR(&sc->sc_ev_txqe);
1512 #endif
1513 		dge_txintr(sc);
1514 
1515 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
1516 			DGE_EVCNT_INCR(&sc->sc_ev_linkintr);
1517 			dge_linkintr(sc, icr);
1518 		}
1519 
1520 		if (icr & ICR_RXO) {
1521 			printf("%s: Receive overrun\n", device_xname(sc->sc_dev));
1522 			wantinit = 1;
1523 		}
1524 	}
1525 
1526 	if (handled) {
1527 		if (wantinit)
1528 			dge_init(ifp);
1529 
1530 		/* Try to get more packets going. */
1531 		dge_start(ifp);
1532 	}
1533 
1534 	return (handled);
1535 }
1536 
1537 /*
1538  * dge_txintr:
1539  *
1540  *	Helper; handle transmit interrupts.
1541  */
1542 static void
1543 dge_txintr(struct dge_softc *sc)
1544 {
1545 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1546 	struct dge_txsoft *txs;
1547 	uint8_t status;
1548 	int i;
1549 
1550 	ifp->if_flags &= ~IFF_OACTIVE;
1551 
1552 	/*
1553 	 * Go through the Tx list and free mbufs for those
1554 	 * frames which have been transmitted.
1555 	 */
1556 	for (i = sc->sc_txsdirty; sc->sc_txsfree != DGE_TXQUEUELEN;
1557 	     i = DGE_NEXTTXS(i), sc->sc_txsfree++) {
1558 		txs = &sc->sc_txsoft[i];
1559 
1560 		DPRINTF(DGE_DEBUG_TX,
1561 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
1562 
1563 		DGE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1564 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1565 
1566 		status =
1567 		    sc->sc_txdescs[txs->txs_lastdesc].dt_status;
1568 		if ((status & TDESC_STA_DD) == 0) {
1569 			DGE_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1570 			    BUS_DMASYNC_PREREAD);
1571 			break;
1572 		}
1573 
1574 		DPRINTF(DGE_DEBUG_TX,
1575 		    ("%s: TX: job %d done: descs %d..%d\n",
1576 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
1577 		    txs->txs_lastdesc));
1578 
1579 		ifp->if_opackets++;
1580 		sc->sc_txfree += txs->txs_ndesc;
1581 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1582 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1583 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1584 		m_freem(txs->txs_mbuf);
1585 		txs->txs_mbuf = NULL;
1586 	}
1587 
1588 	/* Update the dirty transmit buffer pointer. */
1589 	sc->sc_txsdirty = i;
1590 	DPRINTF(DGE_DEBUG_TX,
1591 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
1592 
1593 	/*
1594 	 * If there are no more pending transmissions, cancel the watchdog
1595 	 * timer.
1596 	 */
1597 	if (sc->sc_txsfree == DGE_TXQUEUELEN)
1598 		ifp->if_timer = 0;
1599 }
1600 
1601 /*
1602  * dge_rxintr:
1603  *
1604  *	Helper; handle receive interrupts.
1605  */
1606 static void
1607 dge_rxintr(struct dge_softc *sc)
1608 {
1609 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1610 	struct dge_rxsoft *rxs;
1611 	struct mbuf *m;
1612 	int i, len;
1613 	uint8_t status, errors;
1614 
1615 	for (i = sc->sc_rxptr;; i = DGE_NEXTRX(i)) {
1616 		rxs = &sc->sc_rxsoft[i];
1617 
1618 		DPRINTF(DGE_DEBUG_RX,
1619 		    ("%s: RX: checking descriptor %d\n",
1620 		    device_xname(sc->sc_dev), i));
1621 
1622 		DGE_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1623 
1624 		status = sc->sc_rxdescs[i].dr_status;
1625 		errors = sc->sc_rxdescs[i].dr_errors;
1626 		len = le16toh(sc->sc_rxdescs[i].dr_len);
1627 
1628 		if ((status & RDESC_STS_DD) == 0) {
1629 			/*
1630 			 * We have processed all of the receive descriptors.
1631 			 */
1632 			DGE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1633 			break;
1634 		}
1635 
1636 		if (__predict_false(sc->sc_rxdiscard)) {
1637 			DPRINTF(DGE_DEBUG_RX,
1638 			    ("%s: RX: discarding contents of descriptor %d\n",
1639 			    device_xname(sc->sc_dev), i));
1640 			DGE_INIT_RXDESC(sc, i);
1641 			if (status & RDESC_STS_EOP) {
1642 				/* Reset our state. */
1643 				DPRINTF(DGE_DEBUG_RX,
1644 				    ("%s: RX: resetting rxdiscard -> 0\n",
1645 				    device_xname(sc->sc_dev)));
1646 				sc->sc_rxdiscard = 0;
1647 			}
1648 			continue;
1649 		}
1650 
1651 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1652 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1653 
1654 		m = rxs->rxs_mbuf;
1655 
1656 		/*
1657 		 * Add a new receive buffer to the ring.
1658 		 */
1659 		if (dge_add_rxbuf(sc, i) != 0) {
1660 			/*
1661 			 * Failed, throw away what we've done so
1662 			 * far, and discard the rest of the packet.
1663 			 */
1664 			ifp->if_ierrors++;
1665 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1666 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1667 			DGE_INIT_RXDESC(sc, i);
1668 			if ((status & RDESC_STS_EOP) == 0)
1669 				sc->sc_rxdiscard = 1;
1670 			if (sc->sc_rxhead != NULL)
1671 				m_freem(sc->sc_rxhead);
1672 			DGE_RXCHAIN_RESET(sc);
1673 			DPRINTF(DGE_DEBUG_RX,
1674 			    ("%s: RX: Rx buffer allocation failed, "
1675 			    "dropping packet%s\n", device_xname(sc->sc_dev),
1676 			    sc->sc_rxdiscard ? " (discard)" : ""));
1677 			continue;
1678 		}
1679 		DGE_INIT_RXDESC(sc, DGE_PREVRX(i)); /* Write the descriptor */
1680 
1681 		DGE_RXCHAIN_LINK(sc, m);
1682 
1683 		m->m_len = len;
1684 
1685 		DPRINTF(DGE_DEBUG_RX,
1686 		    ("%s: RX: buffer at %p len %d\n",
1687 		    device_xname(sc->sc_dev), m->m_data, len));
1688 
1689 		/*
1690 		 * If this is not the end of the packet, keep
1691 		 * looking.
1692 		 */
1693 		if ((status & RDESC_STS_EOP) == 0) {
1694 			sc->sc_rxlen += len;
1695 			DPRINTF(DGE_DEBUG_RX,
1696 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
1697 			    device_xname(sc->sc_dev), sc->sc_rxlen));
1698 			continue;
1699 		}
1700 
1701 		/*
1702 		 * Okay, we have the entire packet now...
1703 		 */
1704 		*sc->sc_rxtailp = NULL;
1705 		m = sc->sc_rxhead;
1706 		len += sc->sc_rxlen;
1707 
1708 		DGE_RXCHAIN_RESET(sc);
1709 
1710 		DPRINTF(DGE_DEBUG_RX,
1711 		    ("%s: RX: have entire packet, len -> %d\n",
1712 		    device_xname(sc->sc_dev), len));
1713 
1714 		/*
1715 		 * If an error occurred, update stats and drop the packet.
1716 		 */
1717 		if (errors &
1718 		     (RDESC_ERR_CE|RDESC_ERR_SE|RDESC_ERR_P|RDESC_ERR_RXE)) {
1719 			ifp->if_ierrors++;
1720 			if (errors & RDESC_ERR_SE)
1721 				printf("%s: symbol error\n",
1722 				    device_xname(sc->sc_dev));
1723 			else if (errors & RDESC_ERR_P)
1724 				printf("%s: parity error\n",
1725 				    device_xname(sc->sc_dev));
1726 			else if (errors & RDESC_ERR_CE)
1727 				printf("%s: CRC error\n",
1728 				    device_xname(sc->sc_dev));
1729 			m_freem(m);
1730 			continue;
1731 		}
1732 
1733 		/*
1734 		 * No errors.  Receive the packet.
1735 		 */
1736 		m->m_pkthdr.rcvif = ifp;
1737 		m->m_pkthdr.len = len;
1738 
1739 		/*
1740 		 * Set up checksum info for this packet.
1741 		 */
1742 		if (status & RDESC_STS_IPCS) {
1743 			DGE_EVCNT_INCR(&sc->sc_ev_rxipsum);
1744 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1745 			if (errors & RDESC_ERR_IPE)
1746 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1747 		}
1748 		if (status & RDESC_STS_TCPCS) {
1749 			/*
1750 			 * Note: we don't know if this was TCP or UDP,
1751 			 * so we just set both bits, and expect the
1752 			 * upper layers to deal.
1753 			 */
1754 			DGE_EVCNT_INCR(&sc->sc_ev_rxtusum);
1755 			m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1756 			if (errors & RDESC_ERR_TCPE)
1757 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1758 		}
1759 
1760 		ifp->if_ipackets++;
1761 
1762 		/* Pass this up to any BPF listeners. */
1763 		bpf_mtap(ifp, m);
1764 
1765 		/* Pass it on. */
1766 		(*ifp->if_input)(ifp, m);
1767 	}
1768 
1769 	/* Update the receive pointer. */
1770 	sc->sc_rxptr = i;
1771 
1772 	DPRINTF(DGE_DEBUG_RX,
1773 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
1774 }
1775 
1776 /*
1777  * dge_linkintr:
1778  *
1779  *	Helper; handle link interrupts.
1780  */
1781 static void
1782 dge_linkintr(struct dge_softc *sc, uint32_t icr)
1783 {
1784 	uint32_t status;
1785 
1786 	if (icr & ICR_LSC) {
1787 		status = CSR_READ(sc, DGE_STATUS);
1788 		if (status & STATUS_LINKUP) {
1789 			DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
1790 			    device_xname(sc->sc_dev)));
1791 		} else {
1792 			DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1793 			    device_xname(sc->sc_dev)));
1794 		}
1795 	} else if (icr & ICR_RXSEQ) {
1796 		DPRINTF(DGE_DEBUG_LINK,
1797 		    ("%s: LINK: Receive sequence error\n",
1798 		    device_xname(sc->sc_dev)));
1799 	}
1800 	/* XXX - fix errata */
1801 }
1802 
1803 /*
1804  * dge_reset:
1805  *
1806  *	Reset the i82597 chip.
1807  */
1808 static void
1809 dge_reset(struct dge_softc *sc)
1810 {
1811 	int i;
1812 
1813 	/*
1814 	 * Do a chip reset.
1815 	 */
1816 	CSR_WRITE(sc, DGE_CTRL0, CTRL0_RST | sc->sc_ctrl0);
1817 
1818 	delay(10000);
1819 
1820 	for (i = 0; i < 1000; i++) {
1821 		if ((CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) == 0)
1822 			break;
1823 		delay(20);
1824 	}
1825 
1826 	if (CSR_READ(sc, DGE_CTRL0) & CTRL0_RST)
1827 		printf("%s: WARNING: reset failed to complete\n",
1828 		    device_xname(sc->sc_dev));
1829         /*
1830          * Reset the EEPROM logic.
1831          * This will cause the chip to reread its default values,
1832 	 * which doesn't happen otherwise (errata).
1833          */
1834         CSR_WRITE(sc, DGE_CTRL1, CTRL1_EE_RST);
1835         delay(10000);
1836 }
1837 
1838 /*
1839  * dge_init:		[ifnet interface function]
1840  *
1841  *	Initialize the interface.  Must be called at splnet().
1842  */
1843 static int
1844 dge_init(struct ifnet *ifp)
1845 {
1846 	struct dge_softc *sc = ifp->if_softc;
1847 	struct dge_rxsoft *rxs;
1848 	int i, error = 0;
1849 	uint32_t reg;
1850 
1851 	/*
1852 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
1853 	 * There is a small but measurable benefit to avoiding the adjusment
1854 	 * of the descriptor so that the headers are aligned, for normal mtu,
1855 	 * on such platforms.  One possibility is that the DMA itself is
1856 	 * slightly more efficient if the front of the entire packet (instead
1857 	 * of the front of the headers) is aligned.
1858 	 *
1859 	 * Note we must always set align_tweak to 0 if we are using
1860 	 * jumbo frames.
1861 	 */
1862 #ifdef __NO_STRICT_ALIGNMENT
1863 	sc->sc_align_tweak = 0;
1864 #else
1865 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
1866 		sc->sc_align_tweak = 0;
1867 	else
1868 		sc->sc_align_tweak = 2;
1869 #endif /* __NO_STRICT_ALIGNMENT */
1870 
1871 	/* Cancel any pending I/O. */
1872 	dge_stop(ifp, 0);
1873 
1874 	/* Reset the chip to a known state. */
1875 	dge_reset(sc);
1876 
1877 	/* Initialize the transmit descriptor ring. */
1878 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1879 	DGE_CDTXSYNC(sc, 0, DGE_NTXDESC,
1880 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1881 	sc->sc_txfree = DGE_NTXDESC;
1882 	sc->sc_txnext = 0;
1883 
1884 	sc->sc_txctx_ipcs = 0xffffffff;
1885 	sc->sc_txctx_tucs = 0xffffffff;
1886 
1887 	CSR_WRITE(sc, DGE_TDBAH, 0);
1888 	CSR_WRITE(sc, DGE_TDBAL, DGE_CDTXADDR(sc, 0));
1889 	CSR_WRITE(sc, DGE_TDLEN, sizeof(sc->sc_txdescs));
1890 	CSR_WRITE(sc, DGE_TDH, 0);
1891 	CSR_WRITE(sc, DGE_TDT, 0);
1892 	CSR_WRITE(sc, DGE_TIDV, TIDV);
1893 
1894 #if 0
1895 	CSR_WRITE(sc, DGE_TXDCTL, TXDCTL_PTHRESH(0) |
1896 	    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1897 #endif
1898 	CSR_WRITE(sc, DGE_RXDCTL,
1899 	    RXDCTL_PTHRESH(RXDCTL_PTHRESH_VAL) |
1900 	    RXDCTL_HTHRESH(RXDCTL_HTHRESH_VAL) |
1901 	    RXDCTL_WTHRESH(RXDCTL_WTHRESH_VAL));
1902 
1903 	/* Initialize the transmit job descriptors. */
1904 	for (i = 0; i < DGE_TXQUEUELEN; i++)
1905 		sc->sc_txsoft[i].txs_mbuf = NULL;
1906 	sc->sc_txsfree = DGE_TXQUEUELEN;
1907 	sc->sc_txsnext = 0;
1908 	sc->sc_txsdirty = 0;
1909 
1910 	/*
1911 	 * Initialize the receive descriptor and receive job
1912 	 * descriptor rings.
1913 	 */
1914 	CSR_WRITE(sc, DGE_RDBAH, 0);
1915 	CSR_WRITE(sc, DGE_RDBAL, DGE_CDRXADDR(sc, 0));
1916 	CSR_WRITE(sc, DGE_RDLEN, sizeof(sc->sc_rxdescs));
1917 	CSR_WRITE(sc, DGE_RDH, DGE_RXSPACE);
1918 	CSR_WRITE(sc, DGE_RDT, 0);
1919 	CSR_WRITE(sc, DGE_RDTR, RDTR | 0x80000000);
1920 	CSR_WRITE(sc, DGE_FCRTL, FCRTL | FCRTL_XONE);
1921 	CSR_WRITE(sc, DGE_FCRTH, FCRTH);
1922 
1923 	for (i = 0; i < DGE_NRXDESC; i++) {
1924 		rxs = &sc->sc_rxsoft[i];
1925 		if (rxs->rxs_mbuf == NULL) {
1926 			if ((error = dge_add_rxbuf(sc, i)) != 0) {
1927 				printf("%s: unable to allocate or map rx "
1928 				    "buffer %d, error = %d\n",
1929 				    device_xname(sc->sc_dev), i, error);
1930 				/*
1931 				 * XXX Should attempt to run with fewer receive
1932 				 * XXX buffers instead of just failing.
1933 				 */
1934 				dge_rxdrain(sc);
1935 				goto out;
1936 			}
1937 		}
1938 		DGE_INIT_RXDESC(sc, i);
1939 	}
1940 	sc->sc_rxptr = DGE_RXSPACE;
1941 	sc->sc_rxdiscard = 0;
1942 	DGE_RXCHAIN_RESET(sc);
1943 
1944 	if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) {
1945 		sc->sc_ctrl0 |= CTRL0_JFE;
1946 		CSR_WRITE(sc, DGE_MFS, ETHER_MAX_LEN_JUMBO << 16);
1947 	}
1948 
1949 	/* Write the control registers. */
1950 	CSR_WRITE(sc, DGE_CTRL0, sc->sc_ctrl0);
1951 
1952 	/*
1953 	 * Set up checksum offload parameters.
1954 	 */
1955 	reg = CSR_READ(sc, DGE_RXCSUM);
1956 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
1957 		reg |= RXCSUM_IPOFL;
1958 	else
1959 		reg &= ~RXCSUM_IPOFL;
1960 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
1961 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
1962 	else {
1963 		reg &= ~RXCSUM_TUOFL;
1964 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) == 0)
1965 			reg &= ~RXCSUM_IPOFL;
1966 	}
1967 	CSR_WRITE(sc, DGE_RXCSUM, reg);
1968 
1969 	/*
1970 	 * Set up the interrupt registers.
1971 	 */
1972 	CSR_WRITE(sc, DGE_IMC, 0xffffffffU);
1973 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1974 	    ICR_RXO | ICR_RXT0;
1975 
1976 	CSR_WRITE(sc, DGE_IMS, sc->sc_icr);
1977 
1978 	/*
1979 	 * Set up the transmit control register.
1980 	 */
1981 	sc->sc_tctl = TCTL_TCE|TCTL_TPDE|TCTL_TXEN;
1982 	CSR_WRITE(sc, DGE_TCTL, sc->sc_tctl);
1983 
1984 	/*
1985 	 * Set up the receive control register; we actually program
1986 	 * the register when we set the receive filter.  Use multicast
1987 	 * address offset type 0.
1988 	 */
1989 	sc->sc_mchash_type = 0;
1990 
1991 	sc->sc_rctl = RCTL_RXEN | RCTL_RDMTS_12 | RCTL_RPDA_MC |
1992 	    RCTL_CFF | RCTL_SECRC | RCTL_MO(sc->sc_mchash_type);
1993 
1994 #ifdef DGE_OFFBYONE_RXBUG
1995 	sc->sc_rctl |= RCTL_BSIZE_16k;
1996 #else
1997 	switch(MCLBYTES) {
1998 	case 2048:
1999 		sc->sc_rctl |= RCTL_BSIZE_2k;
2000 		break;
2001 	case 4096:
2002 		sc->sc_rctl |= RCTL_BSIZE_4k;
2003 		break;
2004 	case 8192:
2005 		sc->sc_rctl |= RCTL_BSIZE_8k;
2006 		break;
2007 	case 16384:
2008 		sc->sc_rctl |= RCTL_BSIZE_16k;
2009 		break;
2010 	default:
2011 		panic("dge_init: MCLBYTES %d unsupported", MCLBYTES);
2012 	}
2013 #endif
2014 
2015 	/* Set the receive filter. */
2016 	/* Also sets RCTL */
2017 	dge_set_filter(sc);
2018 
2019 	/* ...all done! */
2020 	ifp->if_flags |= IFF_RUNNING;
2021 	ifp->if_flags &= ~IFF_OACTIVE;
2022 
2023  out:
2024 	if (error)
2025 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
2026 	return (error);
2027 }
2028 
2029 /*
2030  * dge_rxdrain:
2031  *
2032  *	Drain the receive queue.
2033  */
2034 static void
2035 dge_rxdrain(struct dge_softc *sc)
2036 {
2037 	struct dge_rxsoft *rxs;
2038 	int i;
2039 
2040 	for (i = 0; i < DGE_NRXDESC; i++) {
2041 		rxs = &sc->sc_rxsoft[i];
2042 		if (rxs->rxs_mbuf != NULL) {
2043 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2044 			m_freem(rxs->rxs_mbuf);
2045 			rxs->rxs_mbuf = NULL;
2046 		}
2047 	}
2048 }
2049 
2050 /*
2051  * dge_stop:		[ifnet interface function]
2052  *
2053  *	Stop transmission on the interface.
2054  */
2055 static void
2056 dge_stop(struct ifnet *ifp, int disable)
2057 {
2058 	struct dge_softc *sc = ifp->if_softc;
2059 	struct dge_txsoft *txs;
2060 	int i;
2061 
2062 	/* Stop the transmit and receive processes. */
2063 	CSR_WRITE(sc, DGE_TCTL, 0);
2064 	CSR_WRITE(sc, DGE_RCTL, 0);
2065 
2066 	/* Release any queued transmit buffers. */
2067 	for (i = 0; i < DGE_TXQUEUELEN; i++) {
2068 		txs = &sc->sc_txsoft[i];
2069 		if (txs->txs_mbuf != NULL) {
2070 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2071 			m_freem(txs->txs_mbuf);
2072 			txs->txs_mbuf = NULL;
2073 		}
2074 	}
2075 
2076 	/* Mark the interface as down and cancel the watchdog timer. */
2077 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2078 	ifp->if_timer = 0;
2079 
2080 	if (disable)
2081 		dge_rxdrain(sc);
2082 }
2083 
2084 /*
2085  * dge_add_rxbuf:
2086  *
2087  *	Add a receive buffer to the indiciated descriptor.
2088  */
2089 static int
2090 dge_add_rxbuf(struct dge_softc *sc, int idx)
2091 {
2092 	struct dge_rxsoft *rxs = &sc->sc_rxsoft[idx];
2093 	struct mbuf *m;
2094 	int error;
2095 #ifdef DGE_OFFBYONE_RXBUG
2096 	void *buf;
2097 #endif
2098 
2099 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2100 	if (m == NULL)
2101 		return (ENOBUFS);
2102 
2103 #ifdef DGE_OFFBYONE_RXBUG
2104 	if ((buf = dge_getbuf(sc)) == NULL)
2105 		return ENOBUFS;
2106 
2107 	m->m_len = m->m_pkthdr.len = DGE_BUFFER_SIZE;
2108 	MEXTADD(m, buf, DGE_BUFFER_SIZE, M_DEVBUF, dge_freebuf, sc);
2109 	m->m_flags |= M_EXT_RW;
2110 
2111 	if (rxs->rxs_mbuf != NULL)
2112 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2113 	rxs->rxs_mbuf = m;
2114 
2115 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, buf,
2116 	    DGE_BUFFER_SIZE, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT);
2117 #else
2118 	MCLGET(m, M_DONTWAIT);
2119 	if ((m->m_flags & M_EXT) == 0) {
2120 		m_freem(m);
2121 		return (ENOBUFS);
2122 	}
2123 
2124 	if (rxs->rxs_mbuf != NULL)
2125 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2126 
2127 	rxs->rxs_mbuf = m;
2128 
2129 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2130 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2131 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
2132 #endif
2133 	if (error) {
2134 		printf("%s: unable to load rx DMA map %d, error = %d\n",
2135 		    device_xname(sc->sc_dev), idx, error);
2136 		panic("dge_add_rxbuf");	/* XXX XXX XXX */
2137 	}
2138 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2139 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2140 
2141 	return (0);
2142 }
2143 
2144 /*
2145  * dge_set_ral:
2146  *
2147  *	Set an entry in the receive address list.
2148  */
2149 static void
2150 dge_set_ral(struct dge_softc *sc, const uint8_t *enaddr, int idx)
2151 {
2152 	uint32_t ral_lo, ral_hi;
2153 
2154 	if (enaddr != NULL) {
2155 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2156 		    (enaddr[3] << 24);
2157 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2158 		ral_hi |= RAH_AV;
2159 	} else {
2160 		ral_lo = 0;
2161 		ral_hi = 0;
2162 	}
2163 	CSR_WRITE(sc, RA_ADDR(DGE_RAL, idx), ral_lo);
2164 	CSR_WRITE(sc, RA_ADDR(DGE_RAH, idx), ral_hi);
2165 }
2166 
2167 /*
2168  * dge_mchash:
2169  *
2170  *	Compute the hash of the multicast address for the 4096-bit
2171  *	multicast filter.
2172  */
2173 static uint32_t
2174 dge_mchash(struct dge_softc *sc, const uint8_t *enaddr)
2175 {
2176 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2177 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2178 	uint32_t hash;
2179 
2180 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2181 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2182 
2183 	return (hash & 0xfff);
2184 }
2185 
2186 /*
2187  * dge_set_filter:
2188  *
2189  *	Set up the receive filter.
2190  */
2191 static void
2192 dge_set_filter(struct dge_softc *sc)
2193 {
2194 	struct ethercom *ec = &sc->sc_ethercom;
2195 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2196 	struct ether_multi *enm;
2197 	struct ether_multistep step;
2198 	uint32_t hash, reg, bit;
2199 	int i;
2200 
2201 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2202 
2203 	if (ifp->if_flags & IFF_BROADCAST)
2204 		sc->sc_rctl |= RCTL_BAM;
2205 	if (ifp->if_flags & IFF_PROMISC) {
2206 		sc->sc_rctl |= RCTL_UPE;
2207 		goto allmulti;
2208 	}
2209 
2210 	/*
2211 	 * Set the station address in the first RAL slot, and
2212 	 * clear the remaining slots.
2213 	 */
2214 	dge_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
2215 	for (i = 1; i < RA_TABSIZE; i++)
2216 		dge_set_ral(sc, NULL, i);
2217 
2218 	/* Clear out the multicast table. */
2219 	for (i = 0; i < MC_TABSIZE; i++)
2220 		CSR_WRITE(sc, DGE_MTA + (i << 2), 0);
2221 
2222 	ETHER_FIRST_MULTI(step, ec, enm);
2223 	while (enm != NULL) {
2224 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2225 			/*
2226 			 * We must listen to a range of multicast addresses.
2227 			 * For now, just accept all multicasts, rather than
2228 			 * trying to set only those filter bits needed to match
2229 			 * the range.  (At this time, the only use of address
2230 			 * ranges is for IP multicast routing, for which the
2231 			 * range is big enough to require all bits set.)
2232 			 */
2233 			goto allmulti;
2234 		}
2235 
2236 		hash = dge_mchash(sc, enm->enm_addrlo);
2237 
2238 		reg = (hash >> 5) & 0x7f;
2239 		bit = hash & 0x1f;
2240 
2241 		hash = CSR_READ(sc, DGE_MTA + (reg << 2));
2242 		hash |= 1U << bit;
2243 
2244 		CSR_WRITE(sc, DGE_MTA + (reg << 2), hash);
2245 
2246 		ETHER_NEXT_MULTI(step, enm);
2247 	}
2248 
2249 	ifp->if_flags &= ~IFF_ALLMULTI;
2250 	goto setit;
2251 
2252  allmulti:
2253 	ifp->if_flags |= IFF_ALLMULTI;
2254 	sc->sc_rctl |= RCTL_MPE;
2255 
2256  setit:
2257 	CSR_WRITE(sc, DGE_RCTL, sc->sc_rctl);
2258 }
2259 
2260 /*
2261  * Read in the EEPROM info and verify checksum.
2262  */
2263 int
2264 dge_read_eeprom(struct dge_softc *sc)
2265 {
2266 	uint16_t cksum;
2267 	int i;
2268 
2269 	cksum = 0;
2270 	for (i = 0; i < EEPROM_SIZE; i++) {
2271 		sc->sc_eeprom[i] = dge_eeprom_word(sc, i);
2272 		cksum += sc->sc_eeprom[i];
2273 	}
2274 	return cksum != EEPROM_CKSUM;
2275 }
2276 
2277 
2278 /*
2279  * Read a 16-bit word from address addr in the serial EEPROM.
2280  */
2281 uint16_t
2282 dge_eeprom_word(struct dge_softc *sc, int addr)
2283 {
2284 	uint32_t reg;
2285 	uint16_t rval = 0;
2286 	int i;
2287 
2288 	reg = CSR_READ(sc, DGE_EECD) & ~(EECD_SK|EECD_DI|EECD_CS);
2289 
2290 	/* Lower clock pulse (and data in to chip) */
2291 	CSR_WRITE(sc, DGE_EECD, reg);
2292 	/* Select chip */
2293 	CSR_WRITE(sc, DGE_EECD, reg|EECD_CS);
2294 
2295 	/* Send read command */
2296 	dge_eeprom_clockout(sc, 1);
2297 	dge_eeprom_clockout(sc, 1);
2298 	dge_eeprom_clockout(sc, 0);
2299 
2300 	/* Send address */
2301 	for (i = 5; i >= 0; i--)
2302 		dge_eeprom_clockout(sc, (addr >> i) & 1);
2303 
2304 	/* Read data */
2305 	for (i = 0; i < 16; i++) {
2306 		rval <<= 1;
2307 		rval |= dge_eeprom_clockin(sc);
2308 	}
2309 
2310 	/* Deselect chip */
2311 	CSR_WRITE(sc, DGE_EECD, reg);
2312 
2313 	return rval;
2314 }
2315 
2316 /*
2317  * Clock out a single bit to the EEPROM.
2318  */
2319 void
2320 dge_eeprom_clockout(struct dge_softc *sc, int bit)
2321 {
2322 	int reg;
2323 
2324 	reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_SK);
2325 	if (bit)
2326 		reg |= EECD_DI;
2327 
2328 	CSR_WRITE(sc, DGE_EECD, reg);
2329 	delay(2);
2330 	CSR_WRITE(sc, DGE_EECD, reg|EECD_SK);
2331 	delay(2);
2332 	CSR_WRITE(sc, DGE_EECD, reg);
2333 	delay(2);
2334 }
2335 
2336 /*
2337  * Clock in a single bit from EEPROM.
2338  */
2339 int
2340 dge_eeprom_clockin(struct dge_softc *sc)
2341 {
2342 	int reg, rv;
2343 
2344 	reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_DO|EECD_SK);
2345 
2346 	CSR_WRITE(sc, DGE_EECD, reg|EECD_SK); /* Raise clock */
2347 	delay(2);
2348 	rv = (CSR_READ(sc, DGE_EECD) & EECD_DO) != 0; /* Get bit */
2349 	CSR_WRITE(sc, DGE_EECD, reg); /* Lower clock */
2350 	delay(2);
2351 
2352 	return rv;
2353 }
2354 
2355 static void
2356 dge_xgmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2357 {
2358 	struct dge_softc *sc = ifp->if_softc;
2359 
2360 	ifmr->ifm_status = IFM_AVALID;
2361 	ifmr->ifm_active = IFM_ETHER|IFM_10G_LR;
2362 
2363 	if (CSR_READ(sc, DGE_STATUS) & STATUS_LINKUP)
2364 		ifmr->ifm_status |= IFM_ACTIVE;
2365 }
2366 
2367 static inline int
2368 phwait(struct dge_softc *sc, int p, int r, int d, int type)
2369 {
2370         int i, mdic;
2371 
2372         CSR_WRITE(sc, DGE_MDIO,
2373 	    MDIO_PHY(p) | MDIO_REG(r) | MDIO_DEV(d) | type | MDIO_CMD);
2374         for (i = 0; i < 10; i++) {
2375                 delay(10);
2376                 if (((mdic = CSR_READ(sc, DGE_MDIO)) & MDIO_CMD) == 0)
2377                         break;
2378         }
2379         return mdic;
2380 }
2381 
2382 static void
2383 dge_xgmii_writereg(struct dge_softc *sc, int phy, int reg, int val)
2384 {
2385 	int mdic;
2386 
2387 	CSR_WRITE(sc, DGE_MDIRW, val);
2388 	if (((mdic = phwait(sc, phy, reg, 1, MDIO_ADDR)) & MDIO_CMD)) {
2389 		printf("%s: address cycle timeout; phy %d reg %d\n",
2390 		    device_xname(sc->sc_dev), phy, reg);
2391 		return;
2392 	}
2393 	if (((mdic = phwait(sc, phy, reg, 1, MDIO_WRITE)) & MDIO_CMD)) {
2394 		printf("%s: write cycle timeout; phy %d reg %d\n",
2395 		    device_xname(sc->sc_dev), phy, reg);
2396 		return;
2397 	}
2398 }
2399 
2400 static void
2401 dge_xgmii_reset(struct dge_softc *sc)
2402 {
2403 	dge_xgmii_writereg(sc, 0, 0, BMCR_RESET);
2404 }
2405 
2406 static int
2407 dge_xgmii_mediachange(struct ifnet *ifp)
2408 {
2409 	return 0;
2410 }
2411