xref: /openbsd-src/sys/dev/ic/hme.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: hme.c,v 1.62 2011/07/05 05:25:09 bluhm Exp $	*/
2 /*	$NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (c) 1999 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * HME Ethernet module driver.
35  */
36 
37 #include "bpfilter.h"
38 #include "vlan.h"
39 
40 #undef HMEDEBUG
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/mbuf.h>
46 #include <sys/syslog.h>
47 #include <sys/socket.h>
48 #include <sys/device.h>
49 #include <sys/malloc.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #include <netinet/if_ether.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
65 #endif
66 
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 
74 #include <machine/bus.h>
75 
76 #include <dev/ic/hmereg.h>
77 #include <dev/ic/hmevar.h>
78 
79 struct cfdriver hme_cd = {
80 	NULL, "hme", DV_IFNET
81 };
82 
83 #define	HME_RX_OFFSET	2
84 
85 void		hme_start(struct ifnet *);
86 void		hme_stop(struct hme_softc *, int);
87 int		hme_ioctl(struct ifnet *, u_long, caddr_t);
88 void		hme_tick(void *);
89 void		hme_watchdog(struct ifnet *);
90 void		hme_init(struct hme_softc *);
91 void		hme_meminit(struct hme_softc *);
92 void		hme_mifinit(struct hme_softc *);
93 void		hme_reset(struct hme_softc *);
94 void		hme_iff(struct hme_softc *);
95 void		hme_fill_rx_ring(struct hme_softc *);
96 int		hme_newbuf(struct hme_softc *, struct hme_sxd *);
97 
98 /* MII methods & callbacks */
99 static int	hme_mii_readreg(struct device *, int, int);
100 static void	hme_mii_writereg(struct device *, int, int, int);
101 static void	hme_mii_statchg(struct device *);
102 
103 int		hme_mediachange(struct ifnet *);
104 void		hme_mediastatus(struct ifnet *, struct ifmediareq *);
105 
106 int		hme_eint(struct hme_softc *, u_int);
107 int		hme_rint(struct hme_softc *);
108 int		hme_tint(struct hme_softc *);
109 /* TCP/UDP checksum offload support */
110 void 		hme_rxcksum(struct mbuf *, u_int32_t);
111 
112 void
113 hme_config(sc)
114 	struct hme_softc *sc;
115 {
116 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
117 	struct mii_data *mii = &sc->sc_mii;
118 	struct mii_softc *child;
119 	bus_dma_tag_t dmatag = sc->sc_dmatag;
120 	bus_dma_segment_t seg;
121 	bus_size_t size;
122 	int rseg, error, i;
123 
124 	/*
125 	 * HME common initialization.
126 	 *
127 	 * hme_softc fields that must be initialized by the front-end:
128 	 *
129 	 * the bus tag:
130 	 *	sc_bustag
131 	 *
132 	 * the dma bus tag:
133 	 *	sc_dmatag
134 	 *
135 	 * the bus handles:
136 	 *	sc_seb		(Shared Ethernet Block registers)
137 	 *	sc_erx		(Receiver Unit registers)
138 	 *	sc_etx		(Transmitter Unit registers)
139 	 *	sc_mac		(MAC registers)
140 	 *	sc_mif		(Management Interface registers)
141 	 *
142 	 * the maximum bus burst size:
143 	 *	sc_burst
144 	 *
145 	 * the local Ethernet address:
146 	 *	sc_arpcom.ac_enaddr
147 	 *
148 	 */
149 
150 	/* Make sure the chip is stopped. */
151 	hme_stop(sc, 0);
152 
153 	for (i = 0; i < HME_TX_RING_SIZE; i++) {
154 		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS,
155 		    MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
156 		    &sc->sc_txd[i].sd_map) != 0) {
157 			sc->sc_txd[i].sd_map = NULL;
158 			goto fail;
159 		}
160 	}
161 	for (i = 0; i < HME_RX_RING_SIZE; i++) {
162 		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
163 		    MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
164 		    &sc->sc_rxd[i].sd_map) != 0) {
165 			sc->sc_rxd[i].sd_map = NULL;
166 			goto fail;
167 		}
168 	}
169 	if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,
170 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) {
171 		sc->sc_rxmap_spare = NULL;
172 		goto fail;
173 	}
174 
175 	/*
176 	 * Allocate DMA capable memory
177 	 * Buffer descriptors must be aligned on a 2048 byte boundary;
178 	 * take this into account when calculating the size. Note that
179 	 * the maximum number of descriptors (256) occupies 2048 bytes,
180 	 * so we allocate that much regardless of the number of descriptors.
181 	 */
182 	size = (HME_XD_SIZE * HME_RX_RING_MAX) +	/* RX descriptors */
183 	    (HME_XD_SIZE * HME_TX_RING_MAX);		/* TX descriptors */
184 
185 	/* Allocate DMA buffer */
186 	if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,
187 	    BUS_DMA_NOWAIT)) != 0) {
188 		printf("\n%s: DMA buffer alloc error %d\n",
189 		    sc->sc_dev.dv_xname, error);
190 		return;
191 	}
192 
193 	/* Map DMA memory in CPU addressable space */
194 	if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
195 	    &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
196 		printf("\n%s: DMA buffer map error %d\n",
197 		    sc->sc_dev.dv_xname, error);
198 		bus_dmamap_unload(dmatag, sc->sc_dmamap);
199 		bus_dmamem_free(dmatag, &seg, rseg);
200 		return;
201 	}
202 
203 	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
204 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
205 		printf("\n%s: DMA map create error %d\n",
206 		    sc->sc_dev.dv_xname, error);
207 		return;
208 	}
209 
210 	/* Load the buffer */
211 	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
212 	    sc->sc_rb.rb_membase, size, NULL,
213 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
214 		printf("\n%s: DMA buffer map load error %d\n",
215 		    sc->sc_dev.dv_xname, error);
216 		bus_dmamem_free(dmatag, &seg, rseg);
217 		return;
218 	}
219 	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
220 
221 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
222 
223 	/* Initialize ifnet structure. */
224 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
225 	ifp->if_softc = sc;
226 	ifp->if_start = hme_start;
227 	ifp->if_ioctl = hme_ioctl;
228 	ifp->if_watchdog = hme_watchdog;
229 	ifp->if_flags =
230 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
231 	IFQ_SET_READY(&ifp->if_snd);
232 	ifp->if_capabilities = IFCAP_VLAN_MTU;
233 
234 	m_clsetwms(ifp, MCLBYTES, 0, HME_RX_RING_SIZE);
235 
236 	/* Initialize ifmedia structures and MII info */
237 	mii->mii_ifp = ifp;
238 	mii->mii_readreg = hme_mii_readreg;
239 	mii->mii_writereg = hme_mii_writereg;
240 	mii->mii_statchg = hme_mii_statchg;
241 
242 	ifmedia_init(&mii->mii_media, IFM_IMASK,
243 	    hme_mediachange, hme_mediastatus);
244 
245 	hme_mifinit(sc);
246 
247 	if (sc->sc_tcvr == -1)
248 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
249 		    MII_OFFSET_ANY, 0);
250 	else
251 		mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr,
252 		    MII_OFFSET_ANY, 0);
253 
254 	child = LIST_FIRST(&mii->mii_phys);
255 	if (child == NULL) {
256 		/* No PHY attached */
257 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
258 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
259 	} else {
260 		/*
261 		 * Walk along the list of attached MII devices and
262 		 * establish an `MII instance' to `phy number'
263 		 * mapping. We'll use this mapping in media change
264 		 * requests to determine which phy to use to program
265 		 * the MIF configuration register.
266 		 */
267 		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
268 			/*
269 			 * Note: we support just two PHYs: the built-in
270 			 * internal device and an external on the MII
271 			 * connector.
272 			 */
273 			if (child->mii_phy > 1 || child->mii_inst > 1) {
274 				printf("%s: cannot accommodate MII device %s"
275 				    " at phy %d, instance %d\n",
276 				    sc->sc_dev.dv_xname,
277 				    child->mii_dev.dv_xname,
278 				    child->mii_phy, child->mii_inst);
279 				continue;
280 			}
281 
282 			sc->sc_phys[child->mii_inst] = child->mii_phy;
283 		}
284 
285 		/*
286 		 * XXX - we can really do the following ONLY if the
287 		 * phy indeed has the auto negotiation capability!!
288 		 */
289 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
290 	}
291 
292 	/* Attach the interface. */
293 	if_attach(ifp);
294 	ether_ifattach(ifp);
295 
296 	timeout_set(&sc->sc_tick_ch, hme_tick, sc);
297 	return;
298 
299 fail:
300 	if (sc->sc_rxmap_spare != NULL)
301 		bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
302 	for (i = 0; i < HME_TX_RING_SIZE; i++)
303 		if (sc->sc_txd[i].sd_map != NULL)
304 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
305 	for (i = 0; i < HME_RX_RING_SIZE; i++)
306 		if (sc->sc_rxd[i].sd_map != NULL)
307 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
308 }
309 
310 void
311 hme_unconfig(sc)
312 	struct hme_softc *sc;
313 {
314 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
315 	int i;
316 
317 	hme_stop(sc, 1);
318 
319 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
320 	for (i = 0; i < HME_TX_RING_SIZE; i++)
321 		if (sc->sc_txd[i].sd_map != NULL)
322 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
323 	for (i = 0; i < HME_RX_RING_SIZE; i++)
324 		if (sc->sc_rxd[i].sd_map != NULL)
325 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
326 
327 	/* Detach all PHYs */
328 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
329 
330 	/* Delete all remaining media. */
331 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
332 
333 	ether_ifdetach(ifp);
334 	if_detach(ifp);
335 }
336 
337 void
338 hme_tick(arg)
339 	void *arg;
340 {
341 	struct hme_softc *sc = arg;
342 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
343 	bus_space_tag_t t = sc->sc_bustag;
344 	bus_space_handle_t mac = sc->sc_mac;
345 	int s;
346 
347 	s = splnet();
348 	/*
349 	 * Unload collision counters
350 	 */
351 	ifp->if_collisions +=
352 	    bus_space_read_4(t, mac, HME_MACI_NCCNT) +
353 	    bus_space_read_4(t, mac, HME_MACI_FCCNT) +
354 	    bus_space_read_4(t, mac, HME_MACI_EXCNT) +
355 	    bus_space_read_4(t, mac, HME_MACI_LTCNT);
356 
357 	/*
358 	 * then clear the hardware counters.
359 	 */
360 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
361 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
362 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
363 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
364 
365 	/*
366 	 * If buffer allocation fails, the receive ring may become
367 	 * empty. There is no receive interrupt to recover from that.
368 	 */
369 	if (sc->sc_rx_cnt == 0)
370 		hme_fill_rx_ring(sc);
371 
372 	mii_tick(&sc->sc_mii);
373 	splx(s);
374 
375 	timeout_add_sec(&sc->sc_tick_ch, 1);
376 }
377 
378 void
379 hme_reset(sc)
380 	struct hme_softc *sc;
381 {
382 	int s;
383 
384 	s = splnet();
385 	hme_init(sc);
386 	splx(s);
387 }
388 
389 void
390 hme_stop(struct hme_softc *sc, int softonly)
391 {
392 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
393 	bus_space_tag_t t = sc->sc_bustag;
394 	bus_space_handle_t seb = sc->sc_seb;
395 	int n;
396 
397 	timeout_del(&sc->sc_tick_ch);
398 
399 	/*
400 	 * Mark the interface down and cancel the watchdog timer.
401 	 */
402 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
403 	ifp->if_timer = 0;
404 
405 	if (!softonly) {
406 		mii_down(&sc->sc_mii);
407 
408 		/* Mask all interrupts */
409 		bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
410 
411 		/* Reset transmitter and receiver */
412 		bus_space_write_4(t, seb, HME_SEBI_RESET,
413 		    (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
414 
415 		for (n = 0; n < 20; n++) {
416 			u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
417 			if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
418 				break;
419 			DELAY(20);
420 		}
421 		if (n >= 20)
422 			printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
423 	}
424 
425 	for (n = 0; n < HME_TX_RING_SIZE; n++) {
426 		if (sc->sc_txd[n].sd_mbuf != NULL) {
427 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,
428 			    0, sc->sc_txd[n].sd_map->dm_mapsize,
429 			    BUS_DMASYNC_POSTWRITE);
430 			bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map);
431 			m_freem(sc->sc_txd[n].sd_mbuf);
432 			sc->sc_txd[n].sd_mbuf = NULL;
433 		}
434 	}
435 	sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
436 
437 	for (n = 0; n < HME_RX_RING_SIZE; n++) {
438 		if (sc->sc_rxd[n].sd_mbuf != NULL) {
439 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_rxd[n].sd_map,
440 			    0, sc->sc_rxd[n].sd_map->dm_mapsize,
441 			    BUS_DMASYNC_POSTREAD);
442 			bus_dmamap_unload(sc->sc_dmatag, sc->sc_rxd[n].sd_map);
443 			m_freem(sc->sc_rxd[n].sd_mbuf);
444 			sc->sc_rxd[n].sd_mbuf = NULL;
445 		}
446 	}
447 	sc->sc_rx_prod = sc->sc_rx_cons = sc->sc_rx_cnt = 0;
448 }
449 
450 void
451 hme_meminit(sc)
452 	struct hme_softc *sc;
453 {
454 	bus_addr_t dma;
455 	caddr_t p;
456 	unsigned int i;
457 	struct hme_ring *hr = &sc->sc_rb;
458 
459 	p = hr->rb_membase;
460 	dma = hr->rb_dmabase;
461 
462 	/*
463 	 * Allocate transmit descriptors
464 	 */
465 	hr->rb_txd = p;
466 	hr->rb_txddma = dma;
467 	p += HME_TX_RING_SIZE * HME_XD_SIZE;
468 	dma += HME_TX_RING_SIZE * HME_XD_SIZE;
469 	/* We have reserved descriptor space until the next 2048 byte boundary.*/
470 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
471 	p = (caddr_t)roundup((u_long)p, 2048);
472 
473 	/*
474 	 * Allocate receive descriptors
475 	 */
476 	hr->rb_rxd = p;
477 	hr->rb_rxddma = dma;
478 	p += HME_RX_RING_SIZE * HME_XD_SIZE;
479 	dma += HME_RX_RING_SIZE * HME_XD_SIZE;
480 	/* Again move forward to the next 2048 byte boundary.*/
481 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
482 	p = (caddr_t)roundup((u_long)p, 2048);
483 
484 	/*
485 	 * Initialize transmit descriptors
486 	 */
487 	for (i = 0; i < HME_TX_RING_SIZE; i++) {
488 		HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
489 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
490 		sc->sc_txd[i].sd_mbuf = NULL;
491 	}
492 
493 	/*
494 	 * Initialize receive descriptors
495 	 */
496 	for (i = 0; i < HME_RX_RING_SIZE; i++) {
497 		HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, 0);
498 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 0);
499 		sc->sc_rxd[i].sd_mbuf = NULL;
500 	}
501 
502 	hme_fill_rx_ring(sc);
503 }
504 
505 /*
506  * Initialization of interface; set up initialization block
507  * and transmit/receive descriptor rings.
508  */
509 void
510 hme_init(sc)
511 	struct hme_softc *sc;
512 {
513 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
514 	bus_space_tag_t t = sc->sc_bustag;
515 	bus_space_handle_t seb = sc->sc_seb;
516 	bus_space_handle_t etx = sc->sc_etx;
517 	bus_space_handle_t erx = sc->sc_erx;
518 	bus_space_handle_t mac = sc->sc_mac;
519 	u_int8_t *ea;
520 	u_int32_t v, n;
521 
522 	/*
523 	 * Initialization sequence. The numbered steps below correspond
524 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
525 	 * Channel Engine manual (part of the PCIO manual).
526 	 * See also the STP2002-STQ document from Sun Microsystems.
527 	 */
528 
529 	/* step 1 & 2. Reset the Ethernet Channel */
530 	hme_stop(sc, 0);
531 
532 	/* Re-initialize the MIF */
533 	hme_mifinit(sc);
534 
535 	/* Call MI reset function if any */
536 	if (sc->sc_hwreset)
537 		(*sc->sc_hwreset)(sc);
538 
539 #if 0
540 	/* Mask all MIF interrupts, just in case */
541 	bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
542 #endif
543 
544 	/* step 3. Setup data structures in host memory */
545 	hme_meminit(sc);
546 
547 	/* step 4. TX MAC registers & counters */
548 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
549 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
550 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
551 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
552 	bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
553 
554 	/* Load station MAC address */
555 	ea = sc->sc_arpcom.ac_enaddr;
556 	bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
557 	bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
558 	bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
559 
560 	/*
561 	 * Init seed for backoff
562 	 * (source suggested by manual: low 10 bits of MAC address)
563 	 */
564 	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
565 	bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
566 
567 
568 	/* Note: Accepting power-on default for other MAC registers here.. */
569 
570 
571 	/* step 5. RX MAC registers & counters */
572 	hme_iff(sc);
573 
574 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
575 	bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
576 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE);
577 
578 	bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
579 	bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
580 
581 	/* step 8. Global Configuration & Interrupt Mask */
582 	bus_space_write_4(t, seb, HME_SEBI_IMASK,
583 	    ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |
584 	      HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |
585 	      HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS));
586 
587 	switch (sc->sc_burst) {
588 	default:
589 		v = 0;
590 		break;
591 	case 16:
592 		v = HME_SEB_CFG_BURST16;
593 		break;
594 	case 32:
595 		v = HME_SEB_CFG_BURST32;
596 		break;
597 	case 64:
598 		v = HME_SEB_CFG_BURST64;
599 		break;
600 	}
601 	bus_space_write_4(t, seb, HME_SEBI_CFG, v);
602 
603 	/* step 9. ETX Configuration: use mostly default values */
604 
605 	/* Enable DMA */
606 	v = bus_space_read_4(t, etx, HME_ETXI_CFG);
607 	v |= HME_ETX_CFG_DMAENABLE;
608 	bus_space_write_4(t, etx, HME_ETXI_CFG, v);
609 
610 	/* Transmit Descriptor ring size: in increments of 16 */
611 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1);
612 
613 	/* step 10. ERX Configuration */
614 	v = bus_space_read_4(t, erx, HME_ERXI_CFG);
615 	v &= ~HME_ERX_CFG_RINGSIZE256;
616 #if HME_RX_RING_SIZE == 32
617 	v |= HME_ERX_CFG_RINGSIZE32;
618 #elif HME_RX_RING_SIZE == 64
619 	v |= HME_ERX_CFG_RINGSIZE64;
620 #elif HME_RX_RING_SIZE == 128
621 	v |= HME_ERX_CFG_RINGSIZE128;
622 #elif HME_RX_RING_SIZE == 256
623 	v |= HME_ERX_CFG_RINGSIZE256;
624 #else
625 # error	"RX ring size must be 32, 64, 128, or 256"
626 #endif
627 	/* Enable DMA */
628 	v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3);
629 	/* RX TCP/UDP cksum offset */
630 	n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
631 	n = (n << HME_ERX_CFG_CSUM_SHIFT) & HME_ERX_CFG_CSUMSTART;
632 	v |= n;
633 	bus_space_write_4(t, erx, HME_ERXI_CFG, v);
634 
635 	/* step 11. XIF Configuration */
636 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
637 	v |= HME_MAC_XIF_OE;
638 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
639 
640 	/* step 12. RX_MAC Configuration Register */
641 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
642 	v |= HME_MAC_RXCFG_ENABLE;
643 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
644 
645 	/* step 13. TX_MAC Configuration Register */
646 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
647 	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
648 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
649 
650 	/* step 14. Issue Transmit Pending command */
651 
652 	/* Call MI initialization function if any */
653 	if (sc->sc_hwinit)
654 		(*sc->sc_hwinit)(sc);
655 
656 	/* Set the current media. */
657 	mii_mediachg(&sc->sc_mii);
658 
659 	/* Start the one second timer. */
660 	timeout_add_sec(&sc->sc_tick_ch, 1);
661 
662 	ifp->if_flags |= IFF_RUNNING;
663 	ifp->if_flags &= ~IFF_OACTIVE;
664 
665 	hme_start(ifp);
666 }
667 
668 void
669 hme_start(ifp)
670 	struct ifnet *ifp;
671 {
672 	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
673 	struct hme_ring *hr = &sc->sc_rb;
674 	struct mbuf *m;
675 	u_int32_t flags;
676 	bus_dmamap_t map;
677 	u_int32_t frag, cur, i;
678 	int error;
679 
680 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
681 		return;
682 
683 	while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) {
684 		IFQ_POLL(&ifp->if_snd, m);
685 		if (m == NULL)
686 			break;
687 
688 		/*
689 		 * Encapsulate this packet and start it going...
690 		 * or fail...
691 		 */
692 
693 		cur = frag = sc->sc_tx_prod;
694 		map = sc->sc_txd[cur].sd_map;
695 
696 		error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
697 		    BUS_DMA_NOWAIT);
698 		if (error != 0 && error != EFBIG)
699 			goto drop;
700 		if (error != 0) {
701 			/* Too many fragments, linearize. */
702 			if (m_defrag(m, M_DONTWAIT))
703 				goto drop;
704 			error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
705 			    BUS_DMA_NOWAIT);
706 			if (error != 0)
707 				goto drop;
708 		}
709 
710 		if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) {
711 			bus_dmamap_unload(sc->sc_dmatag, map);
712 			ifp->if_flags |= IFF_OACTIVE;
713 			break;
714 		}
715 
716 		/* We are now committed to transmitting the packet. */
717 		IFQ_DEQUEUE(&ifp->if_snd, m);
718 
719 #if NBPFILTER > 0
720 		/*
721 		 * If BPF is listening on this interface, let it see the
722 		 * packet before we commit it to the wire.
723 		 */
724 		if (ifp->if_bpf)
725 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
726 #endif
727 
728 		bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
729 		    BUS_DMASYNC_PREWRITE);
730 
731 		for (i = 0; i < map->dm_nsegs; i++) {
732 			flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len);
733 			if (i == 0)
734 				flags |= HME_XD_SOP;
735 			else
736 				flags |= HME_XD_OWN;
737 
738 			HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,
739 			    map->dm_segs[i].ds_addr);
740 			HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags);
741 
742 			cur = frag;
743 			if (++frag == HME_TX_RING_SIZE)
744 				frag = 0;
745 		}
746 
747 		/* Set end of packet on last descriptor. */
748 		flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur);
749 		flags |= HME_XD_EOP;
750 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags);
751 
752 		sc->sc_tx_cnt += map->dm_nsegs;
753 		sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
754 		sc->sc_txd[cur].sd_map = map;
755 		sc->sc_txd[cur].sd_mbuf = m;
756 
757 		/* Give first frame over to the hardware. */
758 		flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod);
759 		flags |= HME_XD_OWN;
760 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags);
761 
762 		bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
763 		    HME_ETX_TP_DMAWAKEUP);
764 		sc->sc_tx_prod = frag;
765 
766 		ifp->if_timer = 5;
767 	}
768 
769 	return;
770 
771  drop:
772 	IFQ_DEQUEUE(&ifp->if_snd, m);
773 	m_freem(m);
774 	ifp->if_oerrors++;
775 }
776 
777 /*
778  * Transmit interrupt.
779  */
780 int
781 hme_tint(sc)
782 	struct hme_softc *sc;
783 {
784 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
785 	unsigned int ri, txflags;
786 	struct hme_sxd *sd;
787 	int cnt = sc->sc_tx_cnt;
788 
789 	/* Fetch current position in the transmit ring */
790 	ri = sc->sc_tx_cons;
791 	sd = &sc->sc_txd[ri];
792 
793 	for (;;) {
794 		if (cnt <= 0)
795 			break;
796 
797 		txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
798 
799 		if (txflags & HME_XD_OWN)
800 			break;
801 
802 		ifp->if_flags &= ~IFF_OACTIVE;
803 		if (txflags & HME_XD_EOP)
804 			ifp->if_opackets++;
805 
806 		if (sd->sd_mbuf != NULL) {
807 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
808 			    0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
809 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
810 			m_freem(sd->sd_mbuf);
811 			sd->sd_mbuf = NULL;
812 		}
813 
814 		if (++ri == HME_TX_RING_SIZE) {
815 			ri = 0;
816 			sd = sc->sc_txd;
817 		} else
818 			sd++;
819 
820 		--cnt;
821 	}
822 
823 	sc->sc_tx_cnt = cnt;
824 	ifp->if_timer = cnt > 0 ? 5 : 0;
825 
826 	/* Update ring */
827 	sc->sc_tx_cons = ri;
828 
829 	hme_start(ifp);
830 
831 	return (1);
832 }
833 
834 /*
835  * XXX layering violation
836  *
837  * If we can have additional csum data member in 'struct pkthdr' for
838  * these incomplete checksum offload capable hardware, things would be
839  * much simpler. That member variable will carry partial checksum
840  * data and it may be evaluated in TCP/UDP input handler after
841  * computing pseudo header checksumming.
842  */
843 void
844 hme_rxcksum(struct mbuf *m, u_int32_t flags)
845 {
846 	struct ether_header *eh;
847 	struct ip *ip;
848 	struct udphdr *uh;
849 	int32_t hlen, len, pktlen;
850 	u_int16_t cksum, *opts;
851 	u_int32_t temp32;
852 	union pseudoh {
853 		struct hdr {
854 			u_int16_t len;
855 			u_int8_t ttl;
856 			u_int8_t proto;
857 			u_int32_t src;
858 			u_int32_t dst;
859 		} h;
860 		u_int16_t w[6];
861 	} ph;
862 
863 	pktlen = m->m_pkthdr.len;
864 	if (pktlen < sizeof(struct ether_header))
865 		return;
866 	eh = mtod(m, struct ether_header *);
867 	if (eh->ether_type != htons(ETHERTYPE_IP))
868 		return;
869 	ip = (struct ip *)(eh + 1);
870 	if (ip->ip_v != IPVERSION)
871 		return;
872 
873 	hlen = ip->ip_hl << 2;
874 	pktlen -= sizeof(struct ether_header);
875 	if (hlen < sizeof(struct ip))
876 		return;
877 	if (ntohs(ip->ip_len) < hlen)
878 		return;
879 	if (ntohs(ip->ip_len) != pktlen)
880 		return;
881 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
882 		return;	/* can't handle fragmented packet */
883 
884 	switch (ip->ip_p) {
885 	case IPPROTO_TCP:
886 		if (pktlen < (hlen + sizeof(struct tcphdr)))
887 			return;
888 		break;
889 	case IPPROTO_UDP:
890 		if (pktlen < (hlen + sizeof(struct udphdr)))
891 			return;
892 		uh = (struct udphdr *)((caddr_t)ip + hlen);
893 		if (uh->uh_sum == 0)
894 			return; /* no checksum */
895 		break;
896 	default:
897 		return;
898 	}
899 
900 	cksum = htons(~(flags & HME_XD_RXCKSUM));
901 	/* cksum fixup for IP options */
902 	len = hlen - sizeof(struct ip);
903 	if (len > 0) {
904 		opts = (u_int16_t *)(ip + 1);
905 		for (; len > 0; len -= sizeof(u_int16_t), opts++) {
906 			temp32 = cksum - *opts;
907 			temp32 = (temp32 >> 16) + (temp32 & 65535);
908 			cksum = temp32 & 65535;
909 		}
910 	}
911 	/* cksum fixup for pseudo-header, replace with in_cksum_phdr()? */
912 	ph.h.len = htons(ntohs(ip->ip_len) - hlen);
913 	ph.h.ttl = 0;
914 	ph.h.proto = ip->ip_p;
915 	ph.h.src = ip->ip_src.s_addr;
916 	ph.h.dst = ip->ip_dst.s_addr;
917 	temp32 = cksum;
918 	opts = &ph.w[0];
919 	temp32 += opts[0] + opts[1] + opts[2] + opts[3] + opts[4] + opts[5];
920 	temp32 = (temp32 >> 16) + (temp32 & 65535);
921 	temp32 += (temp32 >> 16);
922 	cksum = ~temp32;
923 	if (cksum == 0) {
924 		m->m_pkthdr.csum_flags |=
925 			M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
926 	}
927 }
928 
929 /*
930  * Receive interrupt.
931  */
932 int
933 hme_rint(sc)
934 	struct hme_softc *sc;
935 {
936 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
937 	struct mbuf *m;
938 	struct hme_sxd *sd;
939 	unsigned int ri, len;
940 	u_int32_t flags;
941 
942 	ri = sc->sc_rx_cons;
943 	sd = &sc->sc_rxd[ri];
944 
945 	/*
946 	 * Process all buffers with valid data.
947 	 */
948 	while (sc->sc_rx_cnt > 0) {
949 		flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri);
950 		if (flags & HME_XD_OWN)
951 			break;
952 
953 		bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
954 		    0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
955 		bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
956 
957 		m = sd->sd_mbuf;
958 		sd->sd_mbuf = NULL;
959 
960 		if (++ri == HME_RX_RING_SIZE) {
961 			ri = 0;
962 			sd = sc->sc_rxd;
963 		} else
964 			sd++;
965 		sc->sc_rx_cnt--;
966 
967 		if (flags & HME_XD_OFL) {
968 			ifp->if_ierrors++;
969 			printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
970 			    sc->sc_dev.dv_xname, ri, flags);
971 			m_freem(m);
972 			continue;
973 		}
974 
975 		len = HME_XD_DECODE_RSIZE(flags);
976 		m->m_pkthdr.len = m->m_len = len;
977 
978 		ifp->if_ipackets++;
979 		hme_rxcksum(m, flags);
980 
981 #if NBPFILTER > 0
982 		if (ifp->if_bpf)
983 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
984 #endif
985 
986 		ether_input_mbuf(ifp, m);
987 	}
988 
989 	sc->sc_rx_cons = ri;
990 	hme_fill_rx_ring(sc);
991 	return (1);
992 }
993 
994 int
995 hme_eint(sc, status)
996 	struct hme_softc *sc;
997 	u_int status;
998 {
999 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1000 
1001 	if (status & HME_SEB_STAT_MIFIRQ) {
1002 		printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
1003 		status &= ~HME_SEB_STAT_MIFIRQ;
1004 	}
1005 
1006 	if (status & HME_SEB_STAT_DTIMEXP) {
1007 		ifp->if_oerrors++;
1008 		status &= ~HME_SEB_STAT_DTIMEXP;
1009 	}
1010 
1011 	if (status & HME_SEB_STAT_NORXD) {
1012 		ifp->if_ierrors++;
1013 		status &= ~HME_SEB_STAT_NORXD;
1014 	}
1015 
1016 	status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME |
1017 	    HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX |
1018 	    HME_SEB_STAT_TXALL);
1019 
1020 	if (status == 0)
1021 		return (1);
1022 
1023 #ifdef HME_DEBUG
1024 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS);
1025 #endif
1026 	return (1);
1027 }
1028 
1029 int
1030 hme_intr(v)
1031 	void *v;
1032 {
1033 	struct hme_softc *sc = (struct hme_softc *)v;
1034 	bus_space_tag_t t = sc->sc_bustag;
1035 	bus_space_handle_t seb = sc->sc_seb;
1036 	u_int32_t status;
1037 	int r = 0;
1038 
1039 	status = bus_space_read_4(t, seb, HME_SEBI_STAT);
1040 	if (status == 0xffffffff)
1041 		return (0);
1042 
1043 	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1044 		r |= hme_eint(sc, status);
1045 
1046 	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1047 		r |= hme_tint(sc);
1048 
1049 	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1050 		r |= hme_rint(sc);
1051 
1052 	return (r);
1053 }
1054 
1055 
1056 void
1057 hme_watchdog(ifp)
1058 	struct ifnet *ifp;
1059 {
1060 	struct hme_softc *sc = ifp->if_softc;
1061 
1062 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1063 	ifp->if_oerrors++;
1064 
1065 	hme_reset(sc);
1066 }
1067 
1068 /*
1069  * Initialize the MII Management Interface
1070  */
1071 void
1072 hme_mifinit(sc)
1073 	struct hme_softc *sc;
1074 {
1075 	bus_space_tag_t t = sc->sc_bustag;
1076 	bus_space_handle_t mif = sc->sc_mif;
1077 	bus_space_handle_t mac = sc->sc_mac;
1078 	int phy;
1079 	u_int32_t v;
1080 
1081 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1082 	phy = HME_PHYAD_EXTERNAL;
1083 	if (v & HME_MIF_CFG_MDI1)
1084 		phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL;
1085 	else if (v & HME_MIF_CFG_MDI0)
1086 		phy = sc->sc_tcvr = HME_PHYAD_INTERNAL;
1087 	else
1088 		sc->sc_tcvr = -1;
1089 
1090 	/* Configure the MIF in frame mode, no poll, current phy select */
1091 	v = 0;
1092 	if (phy == HME_PHYAD_EXTERNAL)
1093 		v |= HME_MIF_CFG_PHY;
1094 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1095 
1096 	/* If an external transceiver is selected, enable its MII drivers */
1097 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
1098 	v &= ~HME_MAC_XIF_MIIENABLE;
1099 	if (phy == HME_PHYAD_EXTERNAL)
1100 		v |= HME_MAC_XIF_MIIENABLE;
1101 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1102 }
1103 
1104 /*
1105  * MII interface
1106  */
1107 static int
1108 hme_mii_readreg(self, phy, reg)
1109 	struct device *self;
1110 	int phy, reg;
1111 {
1112 	struct hme_softc *sc = (struct hme_softc *)self;
1113 	bus_space_tag_t t = sc->sc_bustag;
1114 	bus_space_handle_t mif = sc->sc_mif;
1115 	bus_space_handle_t mac = sc->sc_mac;
1116 	u_int32_t v, xif_cfg, mifi_cfg;
1117 	int n;
1118 
1119 	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1120 		return (0);
1121 
1122 	/* Select the desired PHY in the MIF configuration register */
1123 	v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1124 	v &= ~HME_MIF_CFG_PHY;
1125 	if (phy == HME_PHYAD_EXTERNAL)
1126 		v |= HME_MIF_CFG_PHY;
1127 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1128 
1129 	/* Enable MII drivers on external transceiver */
1130 	v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1131 	if (phy == HME_PHYAD_EXTERNAL)
1132 		v |= HME_MAC_XIF_MIIENABLE;
1133 	else
1134 		v &= ~HME_MAC_XIF_MIIENABLE;
1135 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1136 
1137 	/* Construct the frame command */
1138 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1139 	    HME_MIF_FO_TAMSB |
1140 	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1141 	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
1142 	    (reg << HME_MIF_FO_REGAD_SHIFT);
1143 
1144 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
1145 	for (n = 0; n < 100; n++) {
1146 		DELAY(1);
1147 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
1148 		if (v & HME_MIF_FO_TALSB) {
1149 			v &= HME_MIF_FO_DATA;
1150 			goto out;
1151 		}
1152 	}
1153 
1154 	v = 0;
1155 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1156 
1157 out:
1158 	/* Restore MIFI_CFG register */
1159 	bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1160 	/* Restore XIF register */
1161 	bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1162 	return (v);
1163 }
1164 
1165 static void
1166 hme_mii_writereg(self, phy, reg, val)
1167 	struct device *self;
1168 	int phy, reg, val;
1169 {
1170 	struct hme_softc *sc = (void *)self;
1171 	bus_space_tag_t t = sc->sc_bustag;
1172 	bus_space_handle_t mif = sc->sc_mif;
1173 	bus_space_handle_t mac = sc->sc_mac;
1174 	u_int32_t v, xif_cfg, mifi_cfg;
1175 	int n;
1176 
1177 	/* We can at most have two PHYs */
1178 	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1179 		return;
1180 
1181 	/* Select the desired PHY in the MIF configuration register */
1182 	v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1183 	v &= ~HME_MIF_CFG_PHY;
1184 	if (phy == HME_PHYAD_EXTERNAL)
1185 		v |= HME_MIF_CFG_PHY;
1186 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1187 
1188 	/* Enable MII drivers on external transceiver */
1189 	v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1190 	if (phy == HME_PHYAD_EXTERNAL)
1191 		v |= HME_MAC_XIF_MIIENABLE;
1192 	else
1193 		v &= ~HME_MAC_XIF_MIIENABLE;
1194 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1195 
1196 	/* Construct the frame command */
1197 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
1198 	    HME_MIF_FO_TAMSB				|
1199 	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
1200 	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
1201 	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
1202 	    (val & HME_MIF_FO_DATA);
1203 
1204 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
1205 	for (n = 0; n < 100; n++) {
1206 		DELAY(1);
1207 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
1208 		if (v & HME_MIF_FO_TALSB)
1209 			goto out;
1210 	}
1211 
1212 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1213 out:
1214 	/* Restore MIFI_CFG register */
1215 	bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1216 	/* Restore XIF register */
1217 	bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1218 }
1219 
1220 static void
1221 hme_mii_statchg(dev)
1222 	struct device *dev;
1223 {
1224 	struct hme_softc *sc = (void *)dev;
1225 	bus_space_tag_t t = sc->sc_bustag;
1226 	bus_space_handle_t mac = sc->sc_mac;
1227 	u_int32_t v;
1228 
1229 #ifdef HMEDEBUG
1230 	if (sc->sc_debug)
1231 		printf("hme_mii_statchg: status change\n", phy);
1232 #endif
1233 
1234 	/* Set the MAC Full Duplex bit appropriately */
1235 	/* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1236 	   but not otherwise. */
1237 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1238 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1239 		v |= HME_MAC_TXCFG_FULLDPLX;
1240 		sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX;
1241 	} else {
1242 		v &= ~HME_MAC_TXCFG_FULLDPLX;
1243 		sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX;
1244 	}
1245 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1246 }
1247 
1248 int
1249 hme_mediachange(ifp)
1250 	struct ifnet *ifp;
1251 {
1252 	struct hme_softc *sc = ifp->if_softc;
1253 	bus_space_tag_t t = sc->sc_bustag;
1254 	bus_space_handle_t mif = sc->sc_mif;
1255 	bus_space_handle_t mac = sc->sc_mac;
1256 	int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1257 	int phy = sc->sc_phys[instance];
1258 	u_int32_t v;
1259 
1260 #ifdef HMEDEBUG
1261 	if (sc->sc_debug)
1262 		printf("hme_mediachange: phy = %d\n", phy);
1263 #endif
1264 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1265 		return (EINVAL);
1266 
1267 	/* Select the current PHY in the MIF configuration register */
1268 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1269 	v &= ~HME_MIF_CFG_PHY;
1270 	if (phy == HME_PHYAD_EXTERNAL)
1271 		v |= HME_MIF_CFG_PHY;
1272 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1273 
1274 	/* If an external transceiver is selected, enable its MII drivers */
1275 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
1276 	v &= ~HME_MAC_XIF_MIIENABLE;
1277 	if (phy == HME_PHYAD_EXTERNAL)
1278 		v |= HME_MAC_XIF_MIIENABLE;
1279 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1280 
1281 	return (mii_mediachg(&sc->sc_mii));
1282 }
1283 
1284 void
1285 hme_mediastatus(ifp, ifmr)
1286 	struct ifnet *ifp;
1287 	struct ifmediareq *ifmr;
1288 {
1289 	struct hme_softc *sc = ifp->if_softc;
1290 
1291 	if ((ifp->if_flags & IFF_UP) == 0)
1292 		return;
1293 
1294 	mii_pollstat(&sc->sc_mii);
1295 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1296 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1297 }
1298 
1299 /*
1300  * Process an ioctl request.
1301  */
1302 int
1303 hme_ioctl(ifp, cmd, data)
1304 	struct ifnet *ifp;
1305 	u_long cmd;
1306 	caddr_t data;
1307 {
1308 	struct hme_softc *sc = ifp->if_softc;
1309 	struct ifaddr *ifa = (struct ifaddr *)data;
1310 	struct ifreq *ifr = (struct ifreq *)data;
1311 	int s, error = 0;
1312 
1313 	s = splnet();
1314 
1315 	switch (cmd) {
1316 	case SIOCSIFADDR:
1317 		ifp->if_flags |= IFF_UP;
1318 		if (!(ifp->if_flags & IFF_RUNNING))
1319 			hme_init(sc);
1320 #ifdef INET
1321 		if (ifa->ifa_addr->sa_family == AF_INET)
1322 			arp_ifinit(&sc->sc_arpcom, ifa);
1323 #endif
1324 		break;
1325 
1326 	case SIOCSIFFLAGS:
1327 		if (ifp->if_flags & IFF_UP) {
1328 			if (ifp->if_flags & IFF_RUNNING)
1329 				error = ENETRESET;
1330 			else
1331 				hme_init(sc);
1332 		} else {
1333 			if (ifp->if_flags & IFF_RUNNING)
1334 				hme_stop(sc, 0);
1335 		}
1336 #ifdef HMEDEBUG
1337 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1338 #endif
1339 		break;
1340 
1341 	case SIOCGIFMEDIA:
1342 	case SIOCSIFMEDIA:
1343 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1344 		break;
1345 
1346 	default:
1347 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1348 	}
1349 
1350 	if (error == ENETRESET) {
1351 		if (ifp->if_flags & IFF_RUNNING)
1352 			hme_iff(sc);
1353 		error = 0;
1354 	}
1355 
1356 	splx(s);
1357 	return (error);
1358 }
1359 
1360 void
1361 hme_iff(struct hme_softc *sc)
1362 {
1363 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1364 	struct arpcom *ac = &sc->sc_arpcom;
1365 	struct ether_multi *enm;
1366 	struct ether_multistep step;
1367 	bus_space_tag_t t = sc->sc_bustag;
1368 	bus_space_handle_t mac = sc->sc_mac;
1369 	u_int32_t hash[4];
1370 	u_int32_t rxcfg, crc;
1371 
1372 	rxcfg = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1373 	rxcfg &= ~(HME_MAC_RXCFG_HENABLE | HME_MAC_RXCFG_PMISC);
1374 	ifp->if_flags &= ~IFF_ALLMULTI;
1375 	/* Clear hash table */
1376 	hash[0] = hash[1] = hash[2] = hash[3] = 0;
1377 
1378 	if (ifp->if_flags & IFF_PROMISC) {
1379 		ifp->if_flags |= IFF_ALLMULTI;
1380 		rxcfg |= HME_MAC_RXCFG_PMISC;
1381 	} else if (ac->ac_multirangecnt > 0) {
1382 		ifp->if_flags |= IFF_ALLMULTI;
1383 		rxcfg |= HME_MAC_RXCFG_HENABLE;
1384 		hash[0] = hash[1] = hash[2] = hash[3] = 0xffff;
1385 	} else {
1386 		rxcfg |= HME_MAC_RXCFG_HENABLE;
1387 
1388 		ETHER_FIRST_MULTI(step, ac, enm);
1389 		while (enm != NULL) {
1390 			crc = ether_crc32_le(enm->enm_addrlo,
1391 			    ETHER_ADDR_LEN) >> 26;
1392 
1393 			/* Set the corresponding bit in the filter. */
1394 			hash[crc >> 4] |= 1 << (crc & 0xf);
1395 
1396 			ETHER_NEXT_MULTI(step, enm);
1397 		}
1398 	}
1399 
1400 	/* Now load the hash table into the chip */
1401 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1402 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1403 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1404 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1405 	bus_space_write_4(t, mac, HME_MACI_RXCFG, rxcfg);
1406 }
1407 
1408 void
1409 hme_fill_rx_ring(sc)
1410 	struct hme_softc *sc;
1411 {
1412 	struct hme_sxd *sd;
1413 
1414 	while (sc->sc_rx_cnt < HME_RX_RING_SIZE) {
1415 		if (hme_newbuf(sc, &sc->sc_rxd[sc->sc_rx_prod]))
1416 			break;
1417 
1418 		sd = &sc->sc_rxd[sc->sc_rx_prod];
1419 		HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1420 		    sd->sd_map->dm_segs[0].ds_addr);
1421 		HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1422 		    HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
1423 
1424 		if (++sc->sc_rx_prod == HME_RX_RING_SIZE)
1425 			sc->sc_rx_prod = 0;
1426 		sc->sc_rx_cnt++;
1427         }
1428 }
1429 
1430 int
1431 hme_newbuf(sc, d)
1432 	struct hme_softc *sc;
1433 	struct hme_sxd *d;
1434 {
1435 	struct mbuf *m;
1436 	bus_dmamap_t map;
1437 
1438 	/*
1439 	 * All operations should be on local variables and/or rx spare map
1440 	 * until we're sure everything is a success.
1441 	 */
1442 
1443 	m = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES);
1444 	if (!m)
1445 		return (ENOBUFS);
1446 	m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
1447 
1448 	if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,
1449 	    mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,
1450 	    BUS_DMA_NOWAIT) != 0) {
1451 		m_freem(m);
1452 		return (ENOBUFS);
1453 	}
1454 
1455 	/*
1456 	 * At this point we have a new buffer loaded into the spare map.
1457 	 * Just need to clear out the old mbuf/map and put the new one
1458 	 * in place.
1459 	 */
1460 
1461 	map = d->sd_map;
1462 	d->sd_map = sc->sc_rxmap_spare;
1463 	sc->sc_rxmap_spare = map;
1464 
1465 	bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,
1466 	    BUS_DMASYNC_PREREAD);
1467 
1468 	m->m_data += HME_RX_OFFSET;
1469 	d->sd_mbuf = m;
1470 	return (0);
1471 }
1472