xref: /openbsd-src/sys/dev/ic/hme.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: hme.c,v 1.54 2008/12/10 20:37:48 brad Exp $	*/
2 /*	$NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (c) 1999 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * HME Ethernet module driver.
35  */
36 
37 #include "bpfilter.h"
38 #include "vlan.h"
39 
40 #undef HMEDEBUG
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/mbuf.h>
46 #include <sys/syslog.h>
47 #include <sys/socket.h>
48 #include <sys/device.h>
49 #include <sys/malloc.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #include <netinet/if_ether.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
65 #endif
66 
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 
74 #include <machine/bus.h>
75 
76 #include <dev/ic/hmereg.h>
77 #include <dev/ic/hmevar.h>
78 
79 struct cfdriver hme_cd = {
80 	NULL, "hme", DV_IFNET
81 };
82 
83 #define	HME_RX_OFFSET	2
84 
85 void		hme_start(struct ifnet *);
86 void		hme_stop(struct hme_softc *);
87 int		hme_ioctl(struct ifnet *, u_long, caddr_t);
88 void		hme_tick(void *);
89 void		hme_watchdog(struct ifnet *);
90 void		hme_shutdown(void *);
91 void		hme_init(struct hme_softc *);
92 void		hme_meminit(struct hme_softc *);
93 void		hme_mifinit(struct hme_softc *);
94 void		hme_reset(struct hme_softc *);
95 void		hme_setladrf(struct hme_softc *);
96 int		hme_newbuf(struct hme_softc *, struct hme_sxd *, int);
97 
98 /* MII methods & callbacks */
99 static int	hme_mii_readreg(struct device *, int, int);
100 static void	hme_mii_writereg(struct device *, int, int, int);
101 static void	hme_mii_statchg(struct device *);
102 
103 int		hme_mediachange(struct ifnet *);
104 void		hme_mediastatus(struct ifnet *, struct ifmediareq *);
105 
106 int		hme_eint(struct hme_softc *, u_int);
107 int		hme_rint(struct hme_softc *);
108 int		hme_tint(struct hme_softc *);
109 /* TCP/UDP checksum offload support */
110 void 		hme_rxcksum(struct mbuf *, u_int32_t);
111 
112 void
113 hme_config(sc)
114 	struct hme_softc *sc;
115 {
116 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
117 	struct mii_data *mii = &sc->sc_mii;
118 	struct mii_softc *child;
119 	bus_dma_tag_t dmatag = sc->sc_dmatag;
120 	bus_dma_segment_t seg;
121 	bus_size_t size;
122 	int rseg, error, i;
123 
124 	/*
125 	 * HME common initialization.
126 	 *
127 	 * hme_softc fields that must be initialized by the front-end:
128 	 *
129 	 * the bus tag:
130 	 *	sc_bustag
131 	 *
132 	 * the dma bus tag:
133 	 *	sc_dmatag
134 	 *
135 	 * the bus handles:
136 	 *	sc_seb		(Shared Ethernet Block registers)
137 	 *	sc_erx		(Receiver Unit registers)
138 	 *	sc_etx		(Transmitter Unit registers)
139 	 *	sc_mac		(MAC registers)
140 	 *	sc_mif		(Management Interface registers)
141 	 *
142 	 * the maximum bus burst size:
143 	 *	sc_burst
144 	 *
145 	 * the local Ethernet address:
146 	 *	sc_arpcom.ac_enaddr
147 	 *
148 	 */
149 
150 	/* Make sure the chip is stopped. */
151 	hme_stop(sc);
152 
153 	for (i = 0; i < HME_TX_RING_SIZE; i++) {
154 		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS,
155 		    MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
156 		    &sc->sc_txd[i].sd_map) != 0) {
157 			sc->sc_txd[i].sd_map = NULL;
158 			goto fail;
159 		}
160 	}
161 	for (i = 0; i < HME_RX_RING_SIZE; i++) {
162 		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
163 		    MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
164 		    &sc->sc_rxd[i].sd_map) != 0) {
165 			sc->sc_rxd[i].sd_map = NULL;
166 			goto fail;
167 		}
168 	}
169 	if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,
170 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) {
171 		sc->sc_rxmap_spare = NULL;
172 		goto fail;
173 	}
174 
175 	/*
176 	 * Allocate DMA capable memory
177 	 * Buffer descriptors must be aligned on a 2048 byte boundary;
178 	 * take this into account when calculating the size. Note that
179 	 * the maximum number of descriptors (256) occupies 2048 bytes,
180 	 * so we allocate that much regardless of the number of descriptors.
181 	 */
182 	size = (HME_XD_SIZE * HME_RX_RING_MAX) +	/* RX descriptors */
183 	    (HME_XD_SIZE * HME_TX_RING_MAX);		/* TX descriptors */
184 
185 	/* Allocate DMA buffer */
186 	if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,
187 	    BUS_DMA_NOWAIT)) != 0) {
188 		printf("\n%s: DMA buffer alloc error %d\n",
189 		    sc->sc_dev.dv_xname, error);
190 		return;
191 	}
192 
193 	/* Map DMA memory in CPU addressable space */
194 	if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
195 	    &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
196 		printf("\n%s: DMA buffer map error %d\n",
197 		    sc->sc_dev.dv_xname, error);
198 		bus_dmamap_unload(dmatag, sc->sc_dmamap);
199 		bus_dmamem_free(dmatag, &seg, rseg);
200 		return;
201 	}
202 
203 	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
204 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
205 		printf("\n%s: DMA map create error %d\n",
206 		    sc->sc_dev.dv_xname, error);
207 		return;
208 	}
209 
210 	/* Load the buffer */
211 	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
212 	    sc->sc_rb.rb_membase, size, NULL,
213 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
214 		printf("\n%s: DMA buffer map load error %d\n",
215 		    sc->sc_dev.dv_xname, error);
216 		bus_dmamem_free(dmatag, &seg, rseg);
217 		return;
218 	}
219 	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
220 
221 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
222 
223 	/* Initialize ifnet structure. */
224 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
225 	ifp->if_softc = sc;
226 	ifp->if_start = hme_start;
227 	ifp->if_ioctl = hme_ioctl;
228 	ifp->if_watchdog = hme_watchdog;
229 	ifp->if_flags =
230 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
231 	sc->sc_if_flags = ifp->if_flags;
232 	IFQ_SET_READY(&ifp->if_snd);
233 	ifp->if_capabilities = IFCAP_VLAN_MTU;
234 
235 	/* Initialize ifmedia structures and MII info */
236 	mii->mii_ifp = ifp;
237 	mii->mii_readreg = hme_mii_readreg;
238 	mii->mii_writereg = hme_mii_writereg;
239 	mii->mii_statchg = hme_mii_statchg;
240 
241 	ifmedia_init(&mii->mii_media, IFM_IMASK,
242 	    hme_mediachange, hme_mediastatus);
243 
244 	hme_mifinit(sc);
245 
246 	if (sc->sc_tcvr == -1)
247 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
248 		    MII_OFFSET_ANY, 0);
249 	else
250 		mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr,
251 		    MII_OFFSET_ANY, 0);
252 
253 	child = LIST_FIRST(&mii->mii_phys);
254 	if (child == NULL) {
255 		/* No PHY attached */
256 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
257 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
258 	} else {
259 		/*
260 		 * Walk along the list of attached MII devices and
261 		 * establish an `MII instance' to `phy number'
262 		 * mapping. We'll use this mapping in media change
263 		 * requests to determine which phy to use to program
264 		 * the MIF configuration register.
265 		 */
266 		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
267 			/*
268 			 * Note: we support just two PHYs: the built-in
269 			 * internal device and an external on the MII
270 			 * connector.
271 			 */
272 			if (child->mii_phy > 1 || child->mii_inst > 1) {
273 				printf("%s: cannot accommodate MII device %s"
274 				    " at phy %d, instance %d\n",
275 				    sc->sc_dev.dv_xname,
276 				    child->mii_dev.dv_xname,
277 				    child->mii_phy, child->mii_inst);
278 				continue;
279 			}
280 
281 			sc->sc_phys[child->mii_inst] = child->mii_phy;
282 		}
283 
284 		/*
285 		 * XXX - we can really do the following ONLY if the
286 		 * phy indeed has the auto negotiation capability!!
287 		 */
288 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
289 	}
290 
291 	/* Attach the interface. */
292 	if_attach(ifp);
293 	ether_ifattach(ifp);
294 
295 	sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
296 	if (sc->sc_sh == NULL)
297 		panic("hme_config: can't establish shutdownhook");
298 
299 	timeout_set(&sc->sc_tick_ch, hme_tick, sc);
300 	return;
301 
302 fail:
303 	if (sc->sc_rxmap_spare != NULL)
304 		bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
305 	for (i = 0; i < HME_TX_RING_SIZE; i++)
306 		if (sc->sc_txd[i].sd_map != NULL)
307 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
308 	for (i = 0; i < HME_RX_RING_SIZE; i++)
309 		if (sc->sc_rxd[i].sd_map != NULL)
310 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
311 }
312 
313 void
314 hme_tick(arg)
315 	void *arg;
316 {
317 	struct hme_softc *sc = arg;
318 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
319 	bus_space_tag_t t = sc->sc_bustag;
320 	bus_space_handle_t mac = sc->sc_mac;
321 	int s;
322 
323 	s = splnet();
324 	/*
325 	 * Unload collision counters
326 	 */
327 	ifp->if_collisions +=
328 	    bus_space_read_4(t, mac, HME_MACI_NCCNT) +
329 	    bus_space_read_4(t, mac, HME_MACI_FCCNT) +
330 	    bus_space_read_4(t, mac, HME_MACI_EXCNT) +
331 	    bus_space_read_4(t, mac, HME_MACI_LTCNT);
332 
333 	/*
334 	 * then clear the hardware counters.
335 	 */
336 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
337 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
338 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
339 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
340 
341 	mii_tick(&sc->sc_mii);
342 	splx(s);
343 
344 	timeout_add_sec(&sc->sc_tick_ch, 1);
345 }
346 
347 void
348 hme_reset(sc)
349 	struct hme_softc *sc;
350 {
351 	int s;
352 
353 	s = splnet();
354 	hme_init(sc);
355 	splx(s);
356 }
357 
358 void
359 hme_stop(sc)
360 	struct hme_softc *sc;
361 {
362 	bus_space_tag_t t = sc->sc_bustag;
363 	bus_space_handle_t seb = sc->sc_seb;
364 	int n;
365 
366 	timeout_del(&sc->sc_tick_ch);
367 	mii_down(&sc->sc_mii);
368 
369 	/* Mask all interrupts */
370 	bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
371 
372 	/* Reset transmitter and receiver */
373 	bus_space_write_4(t, seb, HME_SEBI_RESET,
374 	    (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
375 
376 	for (n = 0; n < 20; n++) {
377 		u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
378 		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
379 			break;
380 		DELAY(20);
381 	}
382 	if (n >= 20)
383 		printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
384 
385 	for (n = 0; n < HME_TX_RING_SIZE; n++) {
386 		if (sc->sc_txd[n].sd_mbuf != NULL) {
387 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,
388 			    0, sc->sc_txd[n].sd_map->dm_mapsize,
389 			    BUS_DMASYNC_POSTWRITE);
390 			bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map);
391 			m_freem(sc->sc_txd[n].sd_mbuf);
392 			sc->sc_txd[n].sd_mbuf = NULL;
393 		}
394 	}
395 }
396 
397 void
398 hme_meminit(sc)
399 	struct hme_softc *sc;
400 {
401 	bus_addr_t dma;
402 	caddr_t p;
403 	unsigned int i;
404 	struct hme_ring *hr = &sc->sc_rb;
405 
406 	p = hr->rb_membase;
407 	dma = hr->rb_dmabase;
408 
409 	/*
410 	 * Allocate transmit descriptors
411 	 */
412 	hr->rb_txd = p;
413 	hr->rb_txddma = dma;
414 	p += HME_TX_RING_SIZE * HME_XD_SIZE;
415 	dma += HME_TX_RING_SIZE * HME_XD_SIZE;
416 	/* We have reserved descriptor space until the next 2048 byte boundary.*/
417 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
418 	p = (caddr_t)roundup((u_long)p, 2048);
419 
420 	/*
421 	 * Allocate receive descriptors
422 	 */
423 	hr->rb_rxd = p;
424 	hr->rb_rxddma = dma;
425 	p += HME_RX_RING_SIZE * HME_XD_SIZE;
426 	dma += HME_RX_RING_SIZE * HME_XD_SIZE;
427 	/* Again move forward to the next 2048 byte boundary.*/
428 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
429 	p = (caddr_t)roundup((u_long)p, 2048);
430 
431 	/*
432 	 * Initialize transmit descriptors
433 	 */
434 	for (i = 0; i < HME_TX_RING_SIZE; i++) {
435 		HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
436 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
437 		sc->sc_txd[i].sd_mbuf = NULL;
438 	}
439 
440 	/*
441 	 * Initialize receive descriptors
442 	 */
443 	for (i = 0; i < HME_RX_RING_SIZE; i++) {
444 		if (hme_newbuf(sc, &sc->sc_rxd[i], 1)) {
445 			printf("%s: rx allocation failed\n",
446 			    sc->sc_dev.dv_xname);
447 			break;
448 		}
449 		HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i,
450 		    sc->sc_rxd[i].sd_map->dm_segs[0].ds_addr);
451 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i,
452 		    HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
453 	}
454 
455 	sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
456 	sc->sc_last_rd = 0;
457 }
458 
459 /*
460  * Initialization of interface; set up initialization block
461  * and transmit/receive descriptor rings.
462  */
463 void
464 hme_init(sc)
465 	struct hme_softc *sc;
466 {
467 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
468 	bus_space_tag_t t = sc->sc_bustag;
469 	bus_space_handle_t seb = sc->sc_seb;
470 	bus_space_handle_t etx = sc->sc_etx;
471 	bus_space_handle_t erx = sc->sc_erx;
472 	bus_space_handle_t mac = sc->sc_mac;
473 	u_int8_t *ea;
474 	u_int32_t v, n;
475 
476 	/*
477 	 * Initialization sequence. The numbered steps below correspond
478 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
479 	 * Channel Engine manual (part of the PCIO manual).
480 	 * See also the STP2002-STQ document from Sun Microsystems.
481 	 */
482 
483 	/* step 1 & 2. Reset the Ethernet Channel */
484 	hme_stop(sc);
485 
486 	/* Re-initialize the MIF */
487 	hme_mifinit(sc);
488 
489 	/* Call MI reset function if any */
490 	if (sc->sc_hwreset)
491 		(*sc->sc_hwreset)(sc);
492 
493 #if 0
494 	/* Mask all MIF interrupts, just in case */
495 	bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
496 #endif
497 
498 	/* step 3. Setup data structures in host memory */
499 	hme_meminit(sc);
500 
501 	/* step 4. TX MAC registers & counters */
502 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
503 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
504 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
505 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
506 	bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
507 
508 	/* Load station MAC address */
509 	ea = sc->sc_arpcom.ac_enaddr;
510 	bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
511 	bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
512 	bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
513 
514 	/*
515 	 * Init seed for backoff
516 	 * (source suggested by manual: low 10 bits of MAC address)
517 	 */
518 	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
519 	bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
520 
521 
522 	/* Note: Accepting power-on default for other MAC registers here.. */
523 
524 
525 	/* step 5. RX MAC registers & counters */
526 	hme_setladrf(sc);
527 
528 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
529 	bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
530 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE);
531 
532 	bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
533 	bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
534 
535 	/* step 8. Global Configuration & Interrupt Mask */
536 	bus_space_write_4(t, seb, HME_SEBI_IMASK,
537 	    ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |
538 	      HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |
539 	      HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS));
540 
541 	switch (sc->sc_burst) {
542 	default:
543 		v = 0;
544 		break;
545 	case 16:
546 		v = HME_SEB_CFG_BURST16;
547 		break;
548 	case 32:
549 		v = HME_SEB_CFG_BURST32;
550 		break;
551 	case 64:
552 		v = HME_SEB_CFG_BURST64;
553 		break;
554 	}
555 	bus_space_write_4(t, seb, HME_SEBI_CFG, v);
556 
557 	/* step 9. ETX Configuration: use mostly default values */
558 
559 	/* Enable DMA */
560 	v = bus_space_read_4(t, etx, HME_ETXI_CFG);
561 	v |= HME_ETX_CFG_DMAENABLE;
562 	bus_space_write_4(t, etx, HME_ETXI_CFG, v);
563 
564 	/* Transmit Descriptor ring size: in increments of 16 */
565 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1);
566 
567 	/* step 10. ERX Configuration */
568 	v = bus_space_read_4(t, erx, HME_ERXI_CFG);
569 	v &= ~HME_ERX_CFG_RINGSIZE256;
570 #if HME_RX_RING_SIZE == 32
571 	v |= HME_ERX_CFG_RINGSIZE32;
572 #elif HME_RX_RING_SIZE == 64
573 	v |= HME_ERX_CFG_RINGSIZE64;
574 #elif HME_RX_RING_SIZE == 128
575 	v |= HME_ERX_CFG_RINGSIZE128;
576 #elif HME_RX_RING_SIZE == 256
577 	v |= HME_ERX_CFG_RINGSIZE256;
578 #else
579 # error	"RX ring size must be 32, 64, 128, or 256"
580 #endif
581 	/* Enable DMA */
582 	v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3);
583 	/* RX TCP/UDP cksum offset */
584 	n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
585 	n = (n << HME_ERX_CFG_CSUM_SHIFT) & HME_ERX_CFG_CSUMSTART;
586 	v |= n;
587 	bus_space_write_4(t, erx, HME_ERXI_CFG, v);
588 
589 	/* step 11. XIF Configuration */
590 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
591 	v |= HME_MAC_XIF_OE;
592 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
593 
594 	/* step 12. RX_MAC Configuration Register */
595 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
596 	v |= HME_MAC_RXCFG_ENABLE;
597 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
598 
599 	/* step 13. TX_MAC Configuration Register */
600 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
601 	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
602 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
603 
604 	/* step 14. Issue Transmit Pending command */
605 
606 	/* Call MI initialization function if any */
607 	if (sc->sc_hwinit)
608 		(*sc->sc_hwinit)(sc);
609 
610 	/* Set the current media. */
611 	mii_mediachg(&sc->sc_mii);
612 
613 	/* Start the one second timer. */
614 	timeout_add_sec(&sc->sc_tick_ch, 1);
615 
616 	ifp->if_flags |= IFF_RUNNING;
617 	ifp->if_flags &= ~IFF_OACTIVE;
618 	sc->sc_if_flags = ifp->if_flags;
619 	ifp->if_timer = 0;
620 	hme_start(ifp);
621 }
622 
623 void
624 hme_start(ifp)
625 	struct ifnet *ifp;
626 {
627 	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
628 	struct hme_ring *hr = &sc->sc_rb;
629 	struct mbuf *m;
630 	u_int32_t flags;
631 	bus_dmamap_t map;
632 	u_int32_t frag, cur, i;
633 	int error;
634 
635 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
636 		return;
637 
638 	while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) {
639 		IFQ_POLL(&ifp->if_snd, m);
640 		if (m == NULL)
641 			break;
642 
643 		/*
644 		 * Encapsulate this packet and start it going...
645 		 * or fail...
646 		 */
647 
648 		cur = frag = sc->sc_tx_prod;
649 		map = sc->sc_txd[cur].sd_map;
650 
651 		error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
652 		    BUS_DMA_NOWAIT);
653 		if (error != 0 && error != EFBIG)
654 			goto drop;
655 		if (error != 0) {
656 			/* Too many fragments, linearize. */
657 			if (m_defrag(m, M_DONTWAIT))
658 				goto drop;
659 			error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
660 			    BUS_DMA_NOWAIT);
661 			if (error != 0)
662 				goto drop;
663 		}
664 
665 		if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) {
666 			bus_dmamap_unload(sc->sc_dmatag, map);
667 			ifp->if_flags |= IFF_OACTIVE;
668 			break;
669 		}
670 
671 		/* We are now committed to transmitting the packet. */
672 		IFQ_DEQUEUE(&ifp->if_snd, m);
673 
674 #if NBPFILTER > 0
675 		/*
676 		 * If BPF is listening on this interface, let it see the
677 		 * packet before we commit it to the wire.
678 		 */
679 		if (ifp->if_bpf)
680 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
681 #endif
682 
683 		bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
684 		    BUS_DMASYNC_PREWRITE);
685 
686 		for (i = 0; i < map->dm_nsegs; i++) {
687 			flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len);
688 			if (i == 0)
689 				flags |= HME_XD_SOP;
690 			else
691 				flags |= HME_XD_OWN;
692 
693 			HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,
694 			    map->dm_segs[i].ds_addr);
695 			HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags);
696 
697 			cur = frag;
698 			if (++frag == HME_TX_RING_SIZE)
699 				frag = 0;
700 		}
701 
702 		/* Set end of packet on last descriptor. */
703 		flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur);
704 		flags |= HME_XD_EOP;
705 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags);
706 
707 		sc->sc_tx_cnt += map->dm_nsegs;
708 		sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
709 		sc->sc_txd[cur].sd_map = map;
710 		sc->sc_txd[cur].sd_mbuf = m;
711 
712 		/* Give first frame over to the hardware. */
713 		flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod);
714 		flags |= HME_XD_OWN;
715 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags);
716 
717 		bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
718 		    HME_ETX_TP_DMAWAKEUP);
719 		sc->sc_tx_prod = frag;
720 
721 		ifp->if_timer = 5;
722 	}
723 
724 	return;
725 
726  drop:
727 	IFQ_DEQUEUE(&ifp->if_snd, m);
728 	m_freem(m);
729 	ifp->if_oerrors++;
730 }
731 
732 /*
733  * Transmit interrupt.
734  */
735 int
736 hme_tint(sc)
737 	struct hme_softc *sc;
738 {
739 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
740 	unsigned int ri, txflags;
741 	struct hme_sxd *sd;
742 	int cnt = sc->sc_tx_cnt;
743 
744 	/* Fetch current position in the transmit ring */
745 	ri = sc->sc_tx_cons;
746 	sd = &sc->sc_txd[ri];
747 
748 	for (;;) {
749 		if (cnt <= 0)
750 			break;
751 
752 		txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
753 
754 		if (txflags & HME_XD_OWN)
755 			break;
756 
757 		ifp->if_flags &= ~IFF_OACTIVE;
758 		if (txflags & HME_XD_EOP)
759 			ifp->if_opackets++;
760 
761 		if (sd->sd_mbuf != NULL) {
762 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
763 			    0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
764 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
765 			m_freem(sd->sd_mbuf);
766 			sd->sd_mbuf = NULL;
767 		}
768 
769 		if (++ri == HME_TX_RING_SIZE) {
770 			ri = 0;
771 			sd = sc->sc_txd;
772 		} else
773 			sd++;
774 
775 		--cnt;
776 	}
777 
778 	sc->sc_tx_cnt = cnt;
779 	ifp->if_timer = cnt > 0 ? 5 : 0;
780 
781 	/* Update ring */
782 	sc->sc_tx_cons = ri;
783 
784 	hme_start(ifp);
785 
786 	return (1);
787 }
788 
789 /*
790  * XXX layering violation
791  *
792  * If we can have additional csum data member in 'struct pkthdr' for
793  * these incomplete checksum offload capable hardware, things would be
794  * much simpler. That member variable will carry partial checksum
795  * data and it may be evaluated in TCP/UDP input handler after
796  * computing pseudo header checksumming.
797  */
798 void
799 hme_rxcksum(struct mbuf *m, u_int32_t flags)
800 {
801 	struct ether_header *eh;
802 	struct ip *ip;
803 	struct udphdr *uh;
804 	int32_t hlen, len, pktlen;
805 	u_int16_t cksum, *opts;
806 	u_int32_t temp32;
807 	union pseudoh {
808 		struct hdr {
809 			u_int16_t len;
810 			u_int8_t ttl;
811 			u_int8_t proto;
812 			u_int32_t src;
813 			u_int32_t dst;
814 		} h;
815 		u_int16_t w[6];
816 	} ph;
817 
818 	pktlen = m->m_pkthdr.len;
819 	if (pktlen < sizeof(struct ether_header))
820 		return;
821 	eh = mtod(m, struct ether_header *);
822 	if (eh->ether_type != htons(ETHERTYPE_IP))
823 		return;
824 	ip = (struct ip *)(eh + 1);
825 	if (ip->ip_v != IPVERSION)
826 		return;
827 
828 	hlen = ip->ip_hl << 2;
829 	pktlen -= sizeof(struct ether_header);
830 	if (hlen < sizeof(struct ip))
831 		return;
832 	if (ntohs(ip->ip_len) < hlen)
833 		return;
834 	if (ntohs(ip->ip_len) != pktlen)
835 		return;
836 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
837 		return;	/* can't handle fragmented packet */
838 
839 	switch (ip->ip_p) {
840 	case IPPROTO_TCP:
841 		if (pktlen < (hlen + sizeof(struct tcphdr)))
842 			return;
843 		break;
844 	case IPPROTO_UDP:
845 		if (pktlen < (hlen + sizeof(struct udphdr)))
846 			return;
847 		uh = (struct udphdr *)((caddr_t)ip + hlen);
848 		if (uh->uh_sum == 0)
849 			return; /* no checksum */
850 		break;
851 	default:
852 		return;
853 	}
854 
855 	cksum = htons(~(flags & HME_XD_RXCKSUM));
856 	/* cksum fixup for IP options */
857 	len = hlen - sizeof(struct ip);
858 	if (len > 0) {
859 		opts = (u_int16_t *)(ip + 1);
860 		for (; len > 0; len -= sizeof(u_int16_t), opts++) {
861 			temp32 = cksum - *opts;
862 			temp32 = (temp32 >> 16) + (temp32 & 65535);
863 			cksum = temp32 & 65535;
864 		}
865 	}
866 	/* cksum fixup for pseudo-header, replace with in_cksum_phdr()? */
867 	ph.h.len = htons(ntohs(ip->ip_len) - hlen);
868 	ph.h.ttl = 0;
869 	ph.h.proto = ip->ip_p;
870 	ph.h.src = ip->ip_src.s_addr;
871 	ph.h.dst = ip->ip_dst.s_addr;
872 	temp32 = cksum;
873 	opts = &ph.w[0];
874 	temp32 += opts[0] + opts[1] + opts[2] + opts[3] + opts[4] + opts[5];
875 	temp32 = (temp32 >> 16) + (temp32 & 65535);
876 	temp32 += (temp32 >> 16);
877 	cksum = ~temp32;
878 	if (cksum == 0) {
879 		m->m_pkthdr.csum_flags |=
880 			M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
881 	}
882 }
883 
884 /*
885  * Receive interrupt.
886  */
887 int
888 hme_rint(sc)
889 	struct hme_softc *sc;
890 {
891 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
892 	struct mbuf *m;
893 	struct hme_sxd *sd;
894 	unsigned int ri, len;
895 	u_int32_t flags;
896 
897 	ri = sc->sc_last_rd;
898 	sd = &sc->sc_rxd[ri];
899 
900 	/*
901 	 * Process all buffers with valid data.
902 	 */
903 	for (;;) {
904 		flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri);
905 		if (flags & HME_XD_OWN)
906 			break;
907 
908 		if (flags & HME_XD_OFL) {
909 			printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
910 			    sc->sc_dev.dv_xname, ri, flags);
911 			goto again;
912 		}
913 
914 		m = sd->sd_mbuf;
915 		len = HME_XD_DECODE_RSIZE(flags);
916 		m->m_pkthdr.len = m->m_len = len;
917 
918 		if (hme_newbuf(sc, sd, 0)) {
919 			/*
920 			 * Allocation of new mbuf cluster failed, leave the
921 			 * old one in place and keep going.
922 			 */
923 			ifp->if_ierrors++;
924 			goto again;
925 		}
926 
927 		ifp->if_ipackets++;
928 		hme_rxcksum(m, flags);
929 
930 #if NBPFILTER > 0
931 		if (ifp->if_bpf)
932 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
933 #endif
934 
935 		ether_input_mbuf(ifp, m);
936 
937 again:
938 		HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri,
939 		    sd->sd_map->dm_segs[0].ds_addr);
940 		HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri,
941 		    HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
942 
943 		if (++ri == HME_RX_RING_SIZE) {
944 			ri = 0;
945 			sd = sc->sc_rxd;
946 		} else
947 			sd++;
948 	}
949 
950 	sc->sc_last_rd = ri;
951 	return (1);
952 }
953 
954 int
955 hme_eint(sc, status)
956 	struct hme_softc *sc;
957 	u_int status;
958 {
959 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
960 
961 	if (status & HME_SEB_STAT_MIFIRQ) {
962 		printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
963 		status &= ~HME_SEB_STAT_MIFIRQ;
964 	}
965 
966 	if (status & HME_SEB_STAT_DTIMEXP) {
967 		ifp->if_oerrors++;
968 		status &= ~HME_SEB_STAT_DTIMEXP;
969 	}
970 
971 	if (status & HME_SEB_STAT_NORXD) {
972 		ifp->if_ierrors++;
973 		status &= ~HME_SEB_STAT_NORXD;
974 	}
975 
976 	status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME |
977 	    HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX |
978 	    HME_SEB_STAT_TXALL);
979 
980 	if (status == 0)
981 		return (1);
982 
983 #ifdef HME_DEBUG
984 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS);
985 #endif
986 	return (1);
987 }
988 
989 int
990 hme_intr(v)
991 	void *v;
992 {
993 	struct hme_softc *sc = (struct hme_softc *)v;
994 	bus_space_tag_t t = sc->sc_bustag;
995 	bus_space_handle_t seb = sc->sc_seb;
996 	u_int32_t status;
997 	int r = 0;
998 
999 	status = bus_space_read_4(t, seb, HME_SEBI_STAT);
1000 
1001 	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1002 		r |= hme_eint(sc, status);
1003 
1004 	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1005 		r |= hme_tint(sc);
1006 
1007 	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1008 		r |= hme_rint(sc);
1009 
1010 	return (r);
1011 }
1012 
1013 
1014 void
1015 hme_watchdog(ifp)
1016 	struct ifnet *ifp;
1017 {
1018 	struct hme_softc *sc = ifp->if_softc;
1019 
1020 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1021 	ifp->if_oerrors++;
1022 
1023 	hme_reset(sc);
1024 }
1025 
1026 /*
1027  * Initialize the MII Management Interface
1028  */
1029 void
1030 hme_mifinit(sc)
1031 	struct hme_softc *sc;
1032 {
1033 	bus_space_tag_t t = sc->sc_bustag;
1034 	bus_space_handle_t mif = sc->sc_mif;
1035 	bus_space_handle_t mac = sc->sc_mac;
1036 	int phy;
1037 	u_int32_t v;
1038 
1039 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1040 	phy = HME_PHYAD_EXTERNAL;
1041 	if (v & HME_MIF_CFG_MDI1)
1042 		phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL;
1043 	else if (v & HME_MIF_CFG_MDI0)
1044 		phy = sc->sc_tcvr = HME_PHYAD_INTERNAL;
1045 	else
1046 		sc->sc_tcvr = -1;
1047 
1048 	/* Configure the MIF in frame mode, no poll, current phy select */
1049 	v = 0;
1050 	if (phy == HME_PHYAD_EXTERNAL)
1051 		v |= HME_MIF_CFG_PHY;
1052 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1053 
1054 	/* If an external transceiver is selected, enable its MII drivers */
1055 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
1056 	v &= ~HME_MAC_XIF_MIIENABLE;
1057 	if (phy == HME_PHYAD_EXTERNAL)
1058 		v |= HME_MAC_XIF_MIIENABLE;
1059 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1060 }
1061 
1062 /*
1063  * MII interface
1064  */
1065 static int
1066 hme_mii_readreg(self, phy, reg)
1067 	struct device *self;
1068 	int phy, reg;
1069 {
1070 	struct hme_softc *sc = (struct hme_softc *)self;
1071 	bus_space_tag_t t = sc->sc_bustag;
1072 	bus_space_handle_t mif = sc->sc_mif;
1073 	bus_space_handle_t mac = sc->sc_mac;
1074 	u_int32_t v, xif_cfg, mifi_cfg;
1075 	int n;
1076 
1077 	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1078 		return (0);
1079 
1080 	/* Select the desired PHY in the MIF configuration register */
1081 	v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1082 	v &= ~HME_MIF_CFG_PHY;
1083 	if (phy == HME_PHYAD_EXTERNAL)
1084 		v |= HME_MIF_CFG_PHY;
1085 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1086 
1087 	/* Enable MII drivers on external transceiver */
1088 	v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1089 	if (phy == HME_PHYAD_EXTERNAL)
1090 		v |= HME_MAC_XIF_MIIENABLE;
1091 	else
1092 		v &= ~HME_MAC_XIF_MIIENABLE;
1093 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1094 
1095 	/* Construct the frame command */
1096 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1097 	    HME_MIF_FO_TAMSB |
1098 	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1099 	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
1100 	    (reg << HME_MIF_FO_REGAD_SHIFT);
1101 
1102 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
1103 	for (n = 0; n < 100; n++) {
1104 		DELAY(1);
1105 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
1106 		if (v & HME_MIF_FO_TALSB) {
1107 			v &= HME_MIF_FO_DATA;
1108 			goto out;
1109 		}
1110 	}
1111 
1112 	v = 0;
1113 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1114 
1115 out:
1116 	/* Restore MIFI_CFG register */
1117 	bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1118 	/* Restore XIF register */
1119 	bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1120 	return (v);
1121 }
1122 
1123 static void
1124 hme_mii_writereg(self, phy, reg, val)
1125 	struct device *self;
1126 	int phy, reg, val;
1127 {
1128 	struct hme_softc *sc = (void *)self;
1129 	bus_space_tag_t t = sc->sc_bustag;
1130 	bus_space_handle_t mif = sc->sc_mif;
1131 	bus_space_handle_t mac = sc->sc_mac;
1132 	u_int32_t v, xif_cfg, mifi_cfg;
1133 	int n;
1134 
1135 	/* We can at most have two PHYs */
1136 	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1137 		return;
1138 
1139 	/* Select the desired PHY in the MIF configuration register */
1140 	v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1141 	v &= ~HME_MIF_CFG_PHY;
1142 	if (phy == HME_PHYAD_EXTERNAL)
1143 		v |= HME_MIF_CFG_PHY;
1144 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1145 
1146 	/* Enable MII drivers on external transceiver */
1147 	v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1148 	if (phy == HME_PHYAD_EXTERNAL)
1149 		v |= HME_MAC_XIF_MIIENABLE;
1150 	else
1151 		v &= ~HME_MAC_XIF_MIIENABLE;
1152 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1153 
1154 	/* Construct the frame command */
1155 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
1156 	    HME_MIF_FO_TAMSB				|
1157 	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
1158 	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
1159 	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
1160 	    (val & HME_MIF_FO_DATA);
1161 
1162 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
1163 	for (n = 0; n < 100; n++) {
1164 		DELAY(1);
1165 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
1166 		if (v & HME_MIF_FO_TALSB)
1167 			goto out;
1168 	}
1169 
1170 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1171 out:
1172 	/* Restore MIFI_CFG register */
1173 	bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1174 	/* Restore XIF register */
1175 	bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1176 }
1177 
1178 static void
1179 hme_mii_statchg(dev)
1180 	struct device *dev;
1181 {
1182 	struct hme_softc *sc = (void *)dev;
1183 	bus_space_tag_t t = sc->sc_bustag;
1184 	bus_space_handle_t mac = sc->sc_mac;
1185 	u_int32_t v;
1186 
1187 #ifdef HMEDEBUG
1188 	if (sc->sc_debug)
1189 		printf("hme_mii_statchg: status change\n", phy);
1190 #endif
1191 
1192 	/* Set the MAC Full Duplex bit appropriately */
1193 	/* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1194 	   but not otherwise. */
1195 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1196 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1197 		v |= HME_MAC_TXCFG_FULLDPLX;
1198 		sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX;
1199 	} else {
1200 		v &= ~HME_MAC_TXCFG_FULLDPLX;
1201 		sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX;
1202 	}
1203 	sc->sc_if_flags = sc->sc_arpcom.ac_if.if_flags;
1204 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1205 }
1206 
1207 int
1208 hme_mediachange(ifp)
1209 	struct ifnet *ifp;
1210 {
1211 	struct hme_softc *sc = ifp->if_softc;
1212 	bus_space_tag_t t = sc->sc_bustag;
1213 	bus_space_handle_t mif = sc->sc_mif;
1214 	bus_space_handle_t mac = sc->sc_mac;
1215 	int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1216 	int phy = sc->sc_phys[instance];
1217 	u_int32_t v;
1218 
1219 #ifdef HMEDEBUG
1220 	if (sc->sc_debug)
1221 		printf("hme_mediachange: phy = %d\n", phy);
1222 #endif
1223 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1224 		return (EINVAL);
1225 
1226 	/* Select the current PHY in the MIF configuration register */
1227 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1228 	v &= ~HME_MIF_CFG_PHY;
1229 	if (phy == HME_PHYAD_EXTERNAL)
1230 		v |= HME_MIF_CFG_PHY;
1231 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1232 
1233 	/* If an external transceiver is selected, enable its MII drivers */
1234 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
1235 	v &= ~HME_MAC_XIF_MIIENABLE;
1236 	if (phy == HME_PHYAD_EXTERNAL)
1237 		v |= HME_MAC_XIF_MIIENABLE;
1238 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1239 
1240 	return (mii_mediachg(&sc->sc_mii));
1241 }
1242 
1243 void
1244 hme_mediastatus(ifp, ifmr)
1245 	struct ifnet *ifp;
1246 	struct ifmediareq *ifmr;
1247 {
1248 	struct hme_softc *sc = ifp->if_softc;
1249 
1250 	if ((ifp->if_flags & IFF_UP) == 0)
1251 		return;
1252 
1253 	mii_pollstat(&sc->sc_mii);
1254 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1255 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1256 }
1257 
1258 /*
1259  * Process an ioctl request.
1260  */
1261 int
1262 hme_ioctl(ifp, cmd, data)
1263 	struct ifnet *ifp;
1264 	u_long cmd;
1265 	caddr_t data;
1266 {
1267 	struct hme_softc *sc = ifp->if_softc;
1268 	struct ifaddr *ifa = (struct ifaddr *)data;
1269 	struct ifreq *ifr = (struct ifreq *)data;
1270 	int s, error = 0;
1271 
1272 	s = splnet();
1273 
1274 	switch (cmd) {
1275 	case SIOCSIFADDR:
1276 		switch (ifa->ifa_addr->sa_family) {
1277 #ifdef INET
1278 		case AF_INET:
1279 			if (ifp->if_flags & IFF_UP)
1280 				hme_setladrf(sc);
1281 			else {
1282 				ifp->if_flags |= IFF_UP;
1283 				hme_init(sc);
1284 			}
1285 			arp_ifinit(&sc->sc_arpcom, ifa);
1286 			break;
1287 #endif
1288 		default:
1289 			hme_init(sc);
1290 			break;
1291 		}
1292 		break;
1293 
1294 	case SIOCSIFFLAGS:
1295 		if ((ifp->if_flags & IFF_UP) == 0 &&
1296 		    (ifp->if_flags & IFF_RUNNING) != 0) {
1297 			/*
1298 			 * If interface is marked down and it is running, then
1299 			 * stop it.
1300 			 */
1301 			hme_stop(sc);
1302 			ifp->if_flags &= ~IFF_RUNNING;
1303 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
1304 		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
1305 			/*
1306 			 * If interface is marked up and it is stopped, then
1307 			 * start it.
1308 			 */
1309 			hme_init(sc);
1310 		} else if ((ifp->if_flags & IFF_UP) != 0) {
1311 			/*
1312 			 * If setting debug or promiscuous mode, do not reset
1313 			 * the chip; for everything else, call hme_init()
1314 			 * which will trigger a reset.
1315 			 */
1316 #define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG)
1317 			if (ifp->if_flags == sc->sc_if_flags)
1318 				break;
1319 			if ((ifp->if_flags & (~RESETIGN))
1320 			    == (sc->sc_if_flags & (~RESETIGN)))
1321 				hme_setladrf(sc);
1322 			else
1323 				hme_init(sc);
1324 #undef RESETIGN
1325 		}
1326 #ifdef HMEDEBUG
1327 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1328 #endif
1329 		break;
1330 
1331 	case SIOCGIFMEDIA:
1332 	case SIOCSIFMEDIA:
1333 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1334 		break;
1335 
1336 	default:
1337 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1338 	}
1339 
1340 	if (error == ENETRESET) {
1341 		if (ifp->if_flags & IFF_RUNNING)
1342 			hme_setladrf(sc);
1343 		error = 0;
1344 	}
1345 
1346 	sc->sc_if_flags = ifp->if_flags;
1347 	splx(s);
1348 	return (error);
1349 }
1350 
1351 void
1352 hme_shutdown(arg)
1353 	void *arg;
1354 {
1355 	hme_stop((struct hme_softc *)arg);
1356 }
1357 
1358 /*
1359  * Set up the logical address filter.
1360  */
1361 void
1362 hme_setladrf(sc)
1363 	struct hme_softc *sc;
1364 {
1365 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1366 	struct ether_multi *enm;
1367 	struct ether_multistep step;
1368 	struct arpcom *ac = &sc->sc_arpcom;
1369 	bus_space_tag_t t = sc->sc_bustag;
1370 	bus_space_handle_t mac = sc->sc_mac;
1371 	u_int32_t hash[4];
1372 	u_int32_t v, crc;
1373 
1374 	/* Clear hash table */
1375 	hash[3] = hash[2] = hash[1] = hash[0] = 0;
1376 
1377 	/* Get current RX configuration */
1378 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1379 
1380 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1381 		/* Turn on promiscuous mode; turn off the hash filter */
1382 		v |= HME_MAC_RXCFG_PMISC;
1383 		v &= ~HME_MAC_RXCFG_HENABLE;
1384 		ifp->if_flags |= IFF_ALLMULTI;
1385 		goto chipit;
1386 	}
1387 
1388 	/* Turn off promiscuous mode; turn on the hash filter */
1389 	v &= ~HME_MAC_RXCFG_PMISC;
1390 	v |= HME_MAC_RXCFG_HENABLE;
1391 
1392 	/*
1393 	 * Set up multicast address filter by passing all multicast addresses
1394 	 * through a crc generator, and then using the high order 6 bits as an
1395 	 * index into the 64 bit logical address filter.  The high order bit
1396 	 * selects the word, while the rest of the bits select the bit within
1397 	 * the word.
1398 	 */
1399 
1400 	ETHER_FIRST_MULTI(step, ac, enm);
1401 	while (enm != NULL) {
1402 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1403 			/*
1404 			 * We must listen to a range of multicast addresses.
1405 			 * For now, just accept all multicasts, rather than
1406 			 * trying to set only those filter bits needed to match
1407 			 * the range.  (At this time, the only use of address
1408 			 * ranges is for IP multicast routing, for which the
1409 			 * range is big enough to require all bits set.)
1410 			 */
1411 			hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1412 			ifp->if_flags |= IFF_ALLMULTI;
1413 			goto chipit;
1414 		}
1415 
1416 		crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)>> 26;
1417 
1418 		/* Set the corresponding bit in the filter. */
1419 		hash[crc >> 4] |= 1 << (crc & 0xf);
1420 
1421 		ETHER_NEXT_MULTI(step, enm);
1422 	}
1423 
1424 	ifp->if_flags &= ~IFF_ALLMULTI;
1425 
1426 chipit:
1427 	/* Now load the hash table into the chip */
1428 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1429 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1430 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1431 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1432 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1433 }
1434 
1435 int
1436 hme_newbuf(sc, d, freeit)
1437 	struct hme_softc *sc;
1438 	struct hme_sxd *d;
1439 	int freeit;
1440 {
1441 	struct mbuf *m;
1442 	bus_dmamap_t map;
1443 
1444 	/*
1445 	 * All operations should be on local variables and/or rx spare map
1446 	 * until we're sure everything is a success.
1447 	 */
1448 
1449 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1450 	if (m == NULL)
1451 		return (ENOBUFS);
1452 	m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
1453 
1454 	MCLGET(m, M_DONTWAIT);
1455 	if ((m->m_flags & M_EXT) == 0) {
1456 		m_freem(m);
1457 		return (ENOBUFS);
1458 	}
1459 
1460 	if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,
1461 	    mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,
1462 	    BUS_DMA_NOWAIT) != 0) {
1463 		m_freem(m);
1464 		return (ENOBUFS);
1465 	}
1466 
1467 	/*
1468 	 * At this point we have a new buffer loaded into the spare map.
1469 	 * Just need to clear out the old mbuf/map and put the new one
1470 	 * in place.
1471 	 */
1472 
1473 	if (d->sd_mbuf != NULL) {
1474 		bus_dmamap_sync(sc->sc_dmatag, d->sd_map,
1475 		    0, d->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1476 		bus_dmamap_unload(sc->sc_dmatag, d->sd_map);
1477 		if (freeit) {
1478 			m_freem(d->sd_mbuf);
1479 			d->sd_mbuf = NULL;
1480 		}
1481 	}
1482 
1483 	map = d->sd_map;
1484 	d->sd_map = sc->sc_rxmap_spare;
1485 	sc->sc_rxmap_spare = map;
1486 
1487 	bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,
1488 	    BUS_DMASYNC_PREREAD);
1489 
1490 	m->m_data += HME_RX_OFFSET;
1491 	d->sd_mbuf = m;
1492 	return (0);
1493 }
1494