xref: /openbsd-src/sys/dev/ic/hme.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: hme.c,v 1.56 2009/04/23 21:24:14 kettenis Exp $	*/
2 /*	$NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (c) 1999 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Paul Kranenburg.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * HME Ethernet module driver.
35  */
36 
37 #include "bpfilter.h"
38 #include "vlan.h"
39 
40 #undef HMEDEBUG
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/mbuf.h>
46 #include <sys/syslog.h>
47 #include <sys/socket.h>
48 #include <sys/device.h>
49 #include <sys/malloc.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #include <netinet/if_ether.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
65 #endif
66 
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 
74 #include <machine/bus.h>
75 
76 #include <dev/ic/hmereg.h>
77 #include <dev/ic/hmevar.h>
78 
79 struct cfdriver hme_cd = {
80 	NULL, "hme", DV_IFNET
81 };
82 
83 #define	HME_RX_OFFSET	2
84 
85 void		hme_start(struct ifnet *);
86 void		hme_stop(struct hme_softc *);
87 int		hme_ioctl(struct ifnet *, u_long, caddr_t);
88 void		hme_tick(void *);
89 void		hme_watchdog(struct ifnet *);
90 void		hme_shutdown(void *);
91 void		hme_init(struct hme_softc *);
92 void		hme_meminit(struct hme_softc *);
93 void		hme_mifinit(struct hme_softc *);
94 void		hme_reset(struct hme_softc *);
95 void		hme_setladrf(struct hme_softc *);
96 void		hme_fill_rx_ring(struct hme_softc *);
97 int		hme_newbuf(struct hme_softc *, struct hme_sxd *);
98 
99 /* MII methods & callbacks */
100 static int	hme_mii_readreg(struct device *, int, int);
101 static void	hme_mii_writereg(struct device *, int, int, int);
102 static void	hme_mii_statchg(struct device *);
103 
104 int		hme_mediachange(struct ifnet *);
105 void		hme_mediastatus(struct ifnet *, struct ifmediareq *);
106 
107 int		hme_eint(struct hme_softc *, u_int);
108 int		hme_rint(struct hme_softc *);
109 int		hme_tint(struct hme_softc *);
110 /* TCP/UDP checksum offload support */
111 void 		hme_rxcksum(struct mbuf *, u_int32_t);
112 
113 void
114 hme_config(sc)
115 	struct hme_softc *sc;
116 {
117 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
118 	struct mii_data *mii = &sc->sc_mii;
119 	struct mii_softc *child;
120 	bus_dma_tag_t dmatag = sc->sc_dmatag;
121 	bus_dma_segment_t seg;
122 	bus_size_t size;
123 	int rseg, error, i;
124 
125 	/*
126 	 * HME common initialization.
127 	 *
128 	 * hme_softc fields that must be initialized by the front-end:
129 	 *
130 	 * the bus tag:
131 	 *	sc_bustag
132 	 *
133 	 * the dma bus tag:
134 	 *	sc_dmatag
135 	 *
136 	 * the bus handles:
137 	 *	sc_seb		(Shared Ethernet Block registers)
138 	 *	sc_erx		(Receiver Unit registers)
139 	 *	sc_etx		(Transmitter Unit registers)
140 	 *	sc_mac		(MAC registers)
141 	 *	sc_mif		(Management Interface registers)
142 	 *
143 	 * the maximum bus burst size:
144 	 *	sc_burst
145 	 *
146 	 * the local Ethernet address:
147 	 *	sc_arpcom.ac_enaddr
148 	 *
149 	 */
150 
151 	/* Make sure the chip is stopped. */
152 	hme_stop(sc);
153 
154 	for (i = 0; i < HME_TX_RING_SIZE; i++) {
155 		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS,
156 		    MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
157 		    &sc->sc_txd[i].sd_map) != 0) {
158 			sc->sc_txd[i].sd_map = NULL;
159 			goto fail;
160 		}
161 	}
162 	for (i = 0; i < HME_RX_RING_SIZE; i++) {
163 		if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
164 		    MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
165 		    &sc->sc_rxd[i].sd_map) != 0) {
166 			sc->sc_rxd[i].sd_map = NULL;
167 			goto fail;
168 		}
169 	}
170 	if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0,
171 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) {
172 		sc->sc_rxmap_spare = NULL;
173 		goto fail;
174 	}
175 
176 	/*
177 	 * Allocate DMA capable memory
178 	 * Buffer descriptors must be aligned on a 2048 byte boundary;
179 	 * take this into account when calculating the size. Note that
180 	 * the maximum number of descriptors (256) occupies 2048 bytes,
181 	 * so we allocate that much regardless of the number of descriptors.
182 	 */
183 	size = (HME_XD_SIZE * HME_RX_RING_MAX) +	/* RX descriptors */
184 	    (HME_XD_SIZE * HME_TX_RING_MAX);		/* TX descriptors */
185 
186 	/* Allocate DMA buffer */
187 	if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg,
188 	    BUS_DMA_NOWAIT)) != 0) {
189 		printf("\n%s: DMA buffer alloc error %d\n",
190 		    sc->sc_dev.dv_xname, error);
191 		return;
192 	}
193 
194 	/* Map DMA memory in CPU addressable space */
195 	if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
196 	    &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
197 		printf("\n%s: DMA buffer map error %d\n",
198 		    sc->sc_dev.dv_xname, error);
199 		bus_dmamap_unload(dmatag, sc->sc_dmamap);
200 		bus_dmamem_free(dmatag, &seg, rseg);
201 		return;
202 	}
203 
204 	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
205 	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
206 		printf("\n%s: DMA map create error %d\n",
207 		    sc->sc_dev.dv_xname, error);
208 		return;
209 	}
210 
211 	/* Load the buffer */
212 	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
213 	    sc->sc_rb.rb_membase, size, NULL,
214 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
215 		printf("\n%s: DMA buffer map load error %d\n",
216 		    sc->sc_dev.dv_xname, error);
217 		bus_dmamem_free(dmatag, &seg, rseg);
218 		return;
219 	}
220 	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
221 
222 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
223 
224 	/* Initialize ifnet structure. */
225 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
226 	ifp->if_softc = sc;
227 	ifp->if_start = hme_start;
228 	ifp->if_ioctl = hme_ioctl;
229 	ifp->if_watchdog = hme_watchdog;
230 	ifp->if_flags =
231 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
232 	sc->sc_if_flags = ifp->if_flags;
233 	IFQ_SET_READY(&ifp->if_snd);
234 	ifp->if_capabilities = IFCAP_VLAN_MTU;
235 
236 	m_clsetwms(ifp, MCLBYTES, 0, HME_RX_RING_SIZE);
237 
238 	/* Initialize ifmedia structures and MII info */
239 	mii->mii_ifp = ifp;
240 	mii->mii_readreg = hme_mii_readreg;
241 	mii->mii_writereg = hme_mii_writereg;
242 	mii->mii_statchg = hme_mii_statchg;
243 
244 	ifmedia_init(&mii->mii_media, IFM_IMASK,
245 	    hme_mediachange, hme_mediastatus);
246 
247 	hme_mifinit(sc);
248 
249 	if (sc->sc_tcvr == -1)
250 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
251 		    MII_OFFSET_ANY, 0);
252 	else
253 		mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr,
254 		    MII_OFFSET_ANY, 0);
255 
256 	child = LIST_FIRST(&mii->mii_phys);
257 	if (child == NULL) {
258 		/* No PHY attached */
259 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
260 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
261 	} else {
262 		/*
263 		 * Walk along the list of attached MII devices and
264 		 * establish an `MII instance' to `phy number'
265 		 * mapping. We'll use this mapping in media change
266 		 * requests to determine which phy to use to program
267 		 * the MIF configuration register.
268 		 */
269 		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
270 			/*
271 			 * Note: we support just two PHYs: the built-in
272 			 * internal device and an external on the MII
273 			 * connector.
274 			 */
275 			if (child->mii_phy > 1 || child->mii_inst > 1) {
276 				printf("%s: cannot accommodate MII device %s"
277 				    " at phy %d, instance %d\n",
278 				    sc->sc_dev.dv_xname,
279 				    child->mii_dev.dv_xname,
280 				    child->mii_phy, child->mii_inst);
281 				continue;
282 			}
283 
284 			sc->sc_phys[child->mii_inst] = child->mii_phy;
285 		}
286 
287 		/*
288 		 * XXX - we can really do the following ONLY if the
289 		 * phy indeed has the auto negotiation capability!!
290 		 */
291 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
292 	}
293 
294 	/* Attach the interface. */
295 	if_attach(ifp);
296 	ether_ifattach(ifp);
297 
298 	sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
299 	if (sc->sc_sh == NULL)
300 		panic("hme_config: can't establish shutdownhook");
301 
302 	timeout_set(&sc->sc_tick_ch, hme_tick, sc);
303 	return;
304 
305 fail:
306 	if (sc->sc_rxmap_spare != NULL)
307 		bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare);
308 	for (i = 0; i < HME_TX_RING_SIZE; i++)
309 		if (sc->sc_txd[i].sd_map != NULL)
310 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map);
311 	for (i = 0; i < HME_RX_RING_SIZE; i++)
312 		if (sc->sc_rxd[i].sd_map != NULL)
313 			bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map);
314 }
315 
316 void
317 hme_tick(arg)
318 	void *arg;
319 {
320 	struct hme_softc *sc = arg;
321 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
322 	bus_space_tag_t t = sc->sc_bustag;
323 	bus_space_handle_t mac = sc->sc_mac;
324 	int s;
325 
326 	s = splnet();
327 	/*
328 	 * Unload collision counters
329 	 */
330 	ifp->if_collisions +=
331 	    bus_space_read_4(t, mac, HME_MACI_NCCNT) +
332 	    bus_space_read_4(t, mac, HME_MACI_FCCNT) +
333 	    bus_space_read_4(t, mac, HME_MACI_EXCNT) +
334 	    bus_space_read_4(t, mac, HME_MACI_LTCNT);
335 
336 	/*
337 	 * then clear the hardware counters.
338 	 */
339 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
340 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
341 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
342 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
343 
344 	mii_tick(&sc->sc_mii);
345 	splx(s);
346 
347 	timeout_add_sec(&sc->sc_tick_ch, 1);
348 }
349 
350 void
351 hme_reset(sc)
352 	struct hme_softc *sc;
353 {
354 	int s;
355 
356 	s = splnet();
357 	hme_init(sc);
358 	splx(s);
359 }
360 
361 void
362 hme_stop(sc)
363 	struct hme_softc *sc;
364 {
365 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
366 	bus_space_tag_t t = sc->sc_bustag;
367 	bus_space_handle_t seb = sc->sc_seb;
368 	int n;
369 
370 	timeout_del(&sc->sc_tick_ch);
371 
372 	/*
373 	 * Mark the interface down and cancel the watchdog timer.
374 	 */
375 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
376 	ifp->if_timer = 0;
377 
378 	mii_down(&sc->sc_mii);
379 
380 	/* Mask all interrupts */
381 	bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff);
382 
383 	/* Reset transmitter and receiver */
384 	bus_space_write_4(t, seb, HME_SEBI_RESET,
385 	    (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
386 
387 	for (n = 0; n < 20; n++) {
388 		u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
389 		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
390 			break;
391 		DELAY(20);
392 	}
393 	if (n >= 20)
394 		printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
395 
396 	for (n = 0; n < HME_TX_RING_SIZE; n++) {
397 		if (sc->sc_txd[n].sd_mbuf != NULL) {
398 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map,
399 			    0, sc->sc_txd[n].sd_map->dm_mapsize,
400 			    BUS_DMASYNC_POSTWRITE);
401 			bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map);
402 			m_freem(sc->sc_txd[n].sd_mbuf);
403 			sc->sc_txd[n].sd_mbuf = NULL;
404 		}
405 	}
406 	sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0;
407 
408 	for (n = 0; n < HME_RX_RING_SIZE; n++) {
409 		if (sc->sc_rxd[n].sd_mbuf != NULL) {
410 			bus_dmamap_sync(sc->sc_dmatag, sc->sc_rxd[n].sd_map,
411 			    0, sc->sc_rxd[n].sd_map->dm_mapsize,
412 			    BUS_DMASYNC_POSTREAD);
413 			bus_dmamap_unload(sc->sc_dmatag, sc->sc_rxd[n].sd_map);
414 			m_freem(sc->sc_rxd[n].sd_mbuf);
415 			sc->sc_rxd[n].sd_mbuf = NULL;
416 		}
417 	}
418 	sc->sc_rx_prod = sc->sc_rx_cons = sc->sc_rx_cnt = 0;
419 }
420 
421 void
422 hme_meminit(sc)
423 	struct hme_softc *sc;
424 {
425 	bus_addr_t dma;
426 	caddr_t p;
427 	unsigned int i;
428 	struct hme_ring *hr = &sc->sc_rb;
429 
430 	p = hr->rb_membase;
431 	dma = hr->rb_dmabase;
432 
433 	/*
434 	 * Allocate transmit descriptors
435 	 */
436 	hr->rb_txd = p;
437 	hr->rb_txddma = dma;
438 	p += HME_TX_RING_SIZE * HME_XD_SIZE;
439 	dma += HME_TX_RING_SIZE * HME_XD_SIZE;
440 	/* We have reserved descriptor space until the next 2048 byte boundary.*/
441 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
442 	p = (caddr_t)roundup((u_long)p, 2048);
443 
444 	/*
445 	 * Allocate receive descriptors
446 	 */
447 	hr->rb_rxd = p;
448 	hr->rb_rxddma = dma;
449 	p += HME_RX_RING_SIZE * HME_XD_SIZE;
450 	dma += HME_RX_RING_SIZE * HME_XD_SIZE;
451 	/* Again move forward to the next 2048 byte boundary.*/
452 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
453 	p = (caddr_t)roundup((u_long)p, 2048);
454 
455 	/*
456 	 * Initialize transmit descriptors
457 	 */
458 	for (i = 0; i < HME_TX_RING_SIZE; i++) {
459 		HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
460 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
461 		sc->sc_txd[i].sd_mbuf = NULL;
462 	}
463 
464 	/*
465 	 * Initialize receive descriptors
466 	 */
467 	for (i = 0; i < HME_RX_RING_SIZE; i++) {
468 		HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, 0);
469 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 0);
470 		sc->sc_rxd[i].sd_mbuf = NULL;
471 	}
472 
473 	hme_fill_rx_ring(sc);
474 }
475 
476 /*
477  * Initialization of interface; set up initialization block
478  * and transmit/receive descriptor rings.
479  */
480 void
481 hme_init(sc)
482 	struct hme_softc *sc;
483 {
484 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
485 	bus_space_tag_t t = sc->sc_bustag;
486 	bus_space_handle_t seb = sc->sc_seb;
487 	bus_space_handle_t etx = sc->sc_etx;
488 	bus_space_handle_t erx = sc->sc_erx;
489 	bus_space_handle_t mac = sc->sc_mac;
490 	u_int8_t *ea;
491 	u_int32_t v, n;
492 
493 	/*
494 	 * Initialization sequence. The numbered steps below correspond
495 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
496 	 * Channel Engine manual (part of the PCIO manual).
497 	 * See also the STP2002-STQ document from Sun Microsystems.
498 	 */
499 
500 	/* step 1 & 2. Reset the Ethernet Channel */
501 	hme_stop(sc);
502 
503 	/* Re-initialize the MIF */
504 	hme_mifinit(sc);
505 
506 	/* Call MI reset function if any */
507 	if (sc->sc_hwreset)
508 		(*sc->sc_hwreset)(sc);
509 
510 #if 0
511 	/* Mask all MIF interrupts, just in case */
512 	bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
513 #endif
514 
515 	/* step 3. Setup data structures in host memory */
516 	hme_meminit(sc);
517 
518 	/* step 4. TX MAC registers & counters */
519 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
520 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
521 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
522 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
523 	bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
524 
525 	/* Load station MAC address */
526 	ea = sc->sc_arpcom.ac_enaddr;
527 	bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
528 	bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
529 	bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
530 
531 	/*
532 	 * Init seed for backoff
533 	 * (source suggested by manual: low 10 bits of MAC address)
534 	 */
535 	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
536 	bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
537 
538 
539 	/* Note: Accepting power-on default for other MAC registers here.. */
540 
541 
542 	/* step 5. RX MAC registers & counters */
543 	hme_setladrf(sc);
544 
545 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
546 	bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
547 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE);
548 
549 	bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
550 	bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
551 
552 	/* step 8. Global Configuration & Interrupt Mask */
553 	bus_space_write_4(t, seb, HME_SEBI_IMASK,
554 	    ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST |
555 	      HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR |
556 	      HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS));
557 
558 	switch (sc->sc_burst) {
559 	default:
560 		v = 0;
561 		break;
562 	case 16:
563 		v = HME_SEB_CFG_BURST16;
564 		break;
565 	case 32:
566 		v = HME_SEB_CFG_BURST32;
567 		break;
568 	case 64:
569 		v = HME_SEB_CFG_BURST64;
570 		break;
571 	}
572 	bus_space_write_4(t, seb, HME_SEBI_CFG, v);
573 
574 	/* step 9. ETX Configuration: use mostly default values */
575 
576 	/* Enable DMA */
577 	v = bus_space_read_4(t, etx, HME_ETXI_CFG);
578 	v |= HME_ETX_CFG_DMAENABLE;
579 	bus_space_write_4(t, etx, HME_ETXI_CFG, v);
580 
581 	/* Transmit Descriptor ring size: in increments of 16 */
582 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1);
583 
584 	/* step 10. ERX Configuration */
585 	v = bus_space_read_4(t, erx, HME_ERXI_CFG);
586 	v &= ~HME_ERX_CFG_RINGSIZE256;
587 #if HME_RX_RING_SIZE == 32
588 	v |= HME_ERX_CFG_RINGSIZE32;
589 #elif HME_RX_RING_SIZE == 64
590 	v |= HME_ERX_CFG_RINGSIZE64;
591 #elif HME_RX_RING_SIZE == 128
592 	v |= HME_ERX_CFG_RINGSIZE128;
593 #elif HME_RX_RING_SIZE == 256
594 	v |= HME_ERX_CFG_RINGSIZE256;
595 #else
596 # error	"RX ring size must be 32, 64, 128, or 256"
597 #endif
598 	/* Enable DMA */
599 	v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3);
600 	/* RX TCP/UDP cksum offset */
601 	n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
602 	n = (n << HME_ERX_CFG_CSUM_SHIFT) & HME_ERX_CFG_CSUMSTART;
603 	v |= n;
604 	bus_space_write_4(t, erx, HME_ERXI_CFG, v);
605 
606 	/* step 11. XIF Configuration */
607 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
608 	v |= HME_MAC_XIF_OE;
609 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
610 
611 	/* step 12. RX_MAC Configuration Register */
612 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
613 	v |= HME_MAC_RXCFG_ENABLE;
614 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
615 
616 	/* step 13. TX_MAC Configuration Register */
617 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
618 	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
619 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
620 
621 	/* step 14. Issue Transmit Pending command */
622 
623 	/* Call MI initialization function if any */
624 	if (sc->sc_hwinit)
625 		(*sc->sc_hwinit)(sc);
626 
627 	/* Set the current media. */
628 	mii_mediachg(&sc->sc_mii);
629 
630 	/* Start the one second timer. */
631 	timeout_add_sec(&sc->sc_tick_ch, 1);
632 
633 	ifp->if_flags |= IFF_RUNNING;
634 	ifp->if_flags &= ~IFF_OACTIVE;
635 	sc->sc_if_flags = ifp->if_flags;
636 	ifp->if_timer = 0;
637 	hme_start(ifp);
638 }
639 
640 void
641 hme_start(ifp)
642 	struct ifnet *ifp;
643 {
644 	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
645 	struct hme_ring *hr = &sc->sc_rb;
646 	struct mbuf *m;
647 	u_int32_t flags;
648 	bus_dmamap_t map;
649 	u_int32_t frag, cur, i;
650 	int error;
651 
652 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
653 		return;
654 
655 	while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) {
656 		IFQ_POLL(&ifp->if_snd, m);
657 		if (m == NULL)
658 			break;
659 
660 		/*
661 		 * Encapsulate this packet and start it going...
662 		 * or fail...
663 		 */
664 
665 		cur = frag = sc->sc_tx_prod;
666 		map = sc->sc_txd[cur].sd_map;
667 
668 		error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
669 		    BUS_DMA_NOWAIT);
670 		if (error != 0 && error != EFBIG)
671 			goto drop;
672 		if (error != 0) {
673 			/* Too many fragments, linearize. */
674 			if (m_defrag(m, M_DONTWAIT))
675 				goto drop;
676 			error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
677 			    BUS_DMA_NOWAIT);
678 			if (error != 0)
679 				goto drop;
680 		}
681 
682 		if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) {
683 			bus_dmamap_unload(sc->sc_dmatag, map);
684 			ifp->if_flags |= IFF_OACTIVE;
685 			break;
686 		}
687 
688 		/* We are now committed to transmitting the packet. */
689 		IFQ_DEQUEUE(&ifp->if_snd, m);
690 
691 #if NBPFILTER > 0
692 		/*
693 		 * If BPF is listening on this interface, let it see the
694 		 * packet before we commit it to the wire.
695 		 */
696 		if (ifp->if_bpf)
697 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
698 #endif
699 
700 		bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
701 		    BUS_DMASYNC_PREWRITE);
702 
703 		for (i = 0; i < map->dm_nsegs; i++) {
704 			flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len);
705 			if (i == 0)
706 				flags |= HME_XD_SOP;
707 			else
708 				flags |= HME_XD_OWN;
709 
710 			HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag,
711 			    map->dm_segs[i].ds_addr);
712 			HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags);
713 
714 			cur = frag;
715 			if (++frag == HME_TX_RING_SIZE)
716 				frag = 0;
717 		}
718 
719 		/* Set end of packet on last descriptor. */
720 		flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur);
721 		flags |= HME_XD_EOP;
722 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags);
723 
724 		sc->sc_tx_cnt += map->dm_nsegs;
725 		sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map;
726 		sc->sc_txd[cur].sd_map = map;
727 		sc->sc_txd[cur].sd_mbuf = m;
728 
729 		/* Give first frame over to the hardware. */
730 		flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod);
731 		flags |= HME_XD_OWN;
732 		HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags);
733 
734 		bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
735 		    HME_ETX_TP_DMAWAKEUP);
736 		sc->sc_tx_prod = frag;
737 
738 		ifp->if_timer = 5;
739 	}
740 
741 	return;
742 
743  drop:
744 	IFQ_DEQUEUE(&ifp->if_snd, m);
745 	m_freem(m);
746 	ifp->if_oerrors++;
747 }
748 
749 /*
750  * Transmit interrupt.
751  */
752 int
753 hme_tint(sc)
754 	struct hme_softc *sc;
755 {
756 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
757 	unsigned int ri, txflags;
758 	struct hme_sxd *sd;
759 	int cnt = sc->sc_tx_cnt;
760 
761 	/* Fetch current position in the transmit ring */
762 	ri = sc->sc_tx_cons;
763 	sd = &sc->sc_txd[ri];
764 
765 	for (;;) {
766 		if (cnt <= 0)
767 			break;
768 
769 		txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
770 
771 		if (txflags & HME_XD_OWN)
772 			break;
773 
774 		ifp->if_flags &= ~IFF_OACTIVE;
775 		if (txflags & HME_XD_EOP)
776 			ifp->if_opackets++;
777 
778 		if (sd->sd_mbuf != NULL) {
779 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
780 			    0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
781 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
782 			m_freem(sd->sd_mbuf);
783 			sd->sd_mbuf = NULL;
784 		}
785 
786 		if (++ri == HME_TX_RING_SIZE) {
787 			ri = 0;
788 			sd = sc->sc_txd;
789 		} else
790 			sd++;
791 
792 		--cnt;
793 	}
794 
795 	sc->sc_tx_cnt = cnt;
796 	ifp->if_timer = cnt > 0 ? 5 : 0;
797 
798 	/* Update ring */
799 	sc->sc_tx_cons = ri;
800 
801 	hme_start(ifp);
802 
803 	return (1);
804 }
805 
806 /*
807  * XXX layering violation
808  *
809  * If we can have additional csum data member in 'struct pkthdr' for
810  * these incomplete checksum offload capable hardware, things would be
811  * much simpler. That member variable will carry partial checksum
812  * data and it may be evaluated in TCP/UDP input handler after
813  * computing pseudo header checksumming.
814  */
815 void
816 hme_rxcksum(struct mbuf *m, u_int32_t flags)
817 {
818 	struct ether_header *eh;
819 	struct ip *ip;
820 	struct udphdr *uh;
821 	int32_t hlen, len, pktlen;
822 	u_int16_t cksum, *opts;
823 	u_int32_t temp32;
824 	union pseudoh {
825 		struct hdr {
826 			u_int16_t len;
827 			u_int8_t ttl;
828 			u_int8_t proto;
829 			u_int32_t src;
830 			u_int32_t dst;
831 		} h;
832 		u_int16_t w[6];
833 	} ph;
834 
835 	pktlen = m->m_pkthdr.len;
836 	if (pktlen < sizeof(struct ether_header))
837 		return;
838 	eh = mtod(m, struct ether_header *);
839 	if (eh->ether_type != htons(ETHERTYPE_IP))
840 		return;
841 	ip = (struct ip *)(eh + 1);
842 	if (ip->ip_v != IPVERSION)
843 		return;
844 
845 	hlen = ip->ip_hl << 2;
846 	pktlen -= sizeof(struct ether_header);
847 	if (hlen < sizeof(struct ip))
848 		return;
849 	if (ntohs(ip->ip_len) < hlen)
850 		return;
851 	if (ntohs(ip->ip_len) != pktlen)
852 		return;
853 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
854 		return;	/* can't handle fragmented packet */
855 
856 	switch (ip->ip_p) {
857 	case IPPROTO_TCP:
858 		if (pktlen < (hlen + sizeof(struct tcphdr)))
859 			return;
860 		break;
861 	case IPPROTO_UDP:
862 		if (pktlen < (hlen + sizeof(struct udphdr)))
863 			return;
864 		uh = (struct udphdr *)((caddr_t)ip + hlen);
865 		if (uh->uh_sum == 0)
866 			return; /* no checksum */
867 		break;
868 	default:
869 		return;
870 	}
871 
872 	cksum = htons(~(flags & HME_XD_RXCKSUM));
873 	/* cksum fixup for IP options */
874 	len = hlen - sizeof(struct ip);
875 	if (len > 0) {
876 		opts = (u_int16_t *)(ip + 1);
877 		for (; len > 0; len -= sizeof(u_int16_t), opts++) {
878 			temp32 = cksum - *opts;
879 			temp32 = (temp32 >> 16) + (temp32 & 65535);
880 			cksum = temp32 & 65535;
881 		}
882 	}
883 	/* cksum fixup for pseudo-header, replace with in_cksum_phdr()? */
884 	ph.h.len = htons(ntohs(ip->ip_len) - hlen);
885 	ph.h.ttl = 0;
886 	ph.h.proto = ip->ip_p;
887 	ph.h.src = ip->ip_src.s_addr;
888 	ph.h.dst = ip->ip_dst.s_addr;
889 	temp32 = cksum;
890 	opts = &ph.w[0];
891 	temp32 += opts[0] + opts[1] + opts[2] + opts[3] + opts[4] + opts[5];
892 	temp32 = (temp32 >> 16) + (temp32 & 65535);
893 	temp32 += (temp32 >> 16);
894 	cksum = ~temp32;
895 	if (cksum == 0) {
896 		m->m_pkthdr.csum_flags |=
897 			M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
898 	}
899 }
900 
901 /*
902  * Receive interrupt.
903  */
904 int
905 hme_rint(sc)
906 	struct hme_softc *sc;
907 {
908 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
909 	struct mbuf *m;
910 	struct hme_sxd *sd;
911 	unsigned int ri, len;
912 	u_int32_t flags;
913 
914 	ri = sc->sc_rx_cons;
915 	sd = &sc->sc_rxd[ri];
916 
917 	/*
918 	 * Process all buffers with valid data.
919 	 */
920 	while (sc->sc_rx_cnt > 0) {
921 		flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri);
922 		if (flags & HME_XD_OWN)
923 			break;
924 
925 		bus_dmamap_sync(sc->sc_dmatag, sd->sd_map,
926 		    0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
927 		bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
928 
929 		m = sd->sd_mbuf;
930 		sd->sd_mbuf = NULL;
931 
932 		if (++ri == HME_RX_RING_SIZE) {
933 			ri = 0;
934 			sd = sc->sc_rxd;
935 		} else
936 			sd++;
937 		sc->sc_rx_cnt--;
938 
939 		if (flags & HME_XD_OFL) {
940 			ifp->if_ierrors++;
941 			printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
942 			    sc->sc_dev.dv_xname, ri, flags);
943 			m_freem(m);
944 			continue;
945 		}
946 
947 		len = HME_XD_DECODE_RSIZE(flags);
948 		m->m_pkthdr.len = m->m_len = len;
949 
950 		ifp->if_ipackets++;
951 		hme_rxcksum(m, flags);
952 
953 #if NBPFILTER > 0
954 		if (ifp->if_bpf)
955 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
956 #endif
957 
958 		ether_input_mbuf(ifp, m);
959 	}
960 
961 	sc->sc_rx_cons = ri;
962 	hme_fill_rx_ring(sc);
963 	return (1);
964 }
965 
966 int
967 hme_eint(sc, status)
968 	struct hme_softc *sc;
969 	u_int status;
970 {
971 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
972 
973 	if (status & HME_SEB_STAT_MIFIRQ) {
974 		printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
975 		status &= ~HME_SEB_STAT_MIFIRQ;
976 	}
977 
978 	if (status & HME_SEB_STAT_DTIMEXP) {
979 		ifp->if_oerrors++;
980 		status &= ~HME_SEB_STAT_DTIMEXP;
981 	}
982 
983 	if (status & HME_SEB_STAT_NORXD) {
984 		ifp->if_ierrors++;
985 		status &= ~HME_SEB_STAT_NORXD;
986 	}
987 
988 	status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME |
989 	    HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX |
990 	    HME_SEB_STAT_TXALL);
991 
992 	if (status == 0)
993 		return (1);
994 
995 #ifdef HME_DEBUG
996 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS);
997 #endif
998 	return (1);
999 }
1000 
1001 int
1002 hme_intr(v)
1003 	void *v;
1004 {
1005 	struct hme_softc *sc = (struct hme_softc *)v;
1006 	bus_space_tag_t t = sc->sc_bustag;
1007 	bus_space_handle_t seb = sc->sc_seb;
1008 	u_int32_t status;
1009 	int r = 0;
1010 
1011 	status = bus_space_read_4(t, seb, HME_SEBI_STAT);
1012 
1013 	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1014 		r |= hme_eint(sc, status);
1015 
1016 	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1017 		r |= hme_tint(sc);
1018 
1019 	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1020 		r |= hme_rint(sc);
1021 
1022 	return (r);
1023 }
1024 
1025 
1026 void
1027 hme_watchdog(ifp)
1028 	struct ifnet *ifp;
1029 {
1030 	struct hme_softc *sc = ifp->if_softc;
1031 
1032 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1033 	ifp->if_oerrors++;
1034 
1035 	hme_reset(sc);
1036 }
1037 
1038 /*
1039  * Initialize the MII Management Interface
1040  */
1041 void
1042 hme_mifinit(sc)
1043 	struct hme_softc *sc;
1044 {
1045 	bus_space_tag_t t = sc->sc_bustag;
1046 	bus_space_handle_t mif = sc->sc_mif;
1047 	bus_space_handle_t mac = sc->sc_mac;
1048 	int phy;
1049 	u_int32_t v;
1050 
1051 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1052 	phy = HME_PHYAD_EXTERNAL;
1053 	if (v & HME_MIF_CFG_MDI1)
1054 		phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL;
1055 	else if (v & HME_MIF_CFG_MDI0)
1056 		phy = sc->sc_tcvr = HME_PHYAD_INTERNAL;
1057 	else
1058 		sc->sc_tcvr = -1;
1059 
1060 	/* Configure the MIF in frame mode, no poll, current phy select */
1061 	v = 0;
1062 	if (phy == HME_PHYAD_EXTERNAL)
1063 		v |= HME_MIF_CFG_PHY;
1064 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1065 
1066 	/* If an external transceiver is selected, enable its MII drivers */
1067 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
1068 	v &= ~HME_MAC_XIF_MIIENABLE;
1069 	if (phy == HME_PHYAD_EXTERNAL)
1070 		v |= HME_MAC_XIF_MIIENABLE;
1071 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1072 }
1073 
1074 /*
1075  * MII interface
1076  */
1077 static int
1078 hme_mii_readreg(self, phy, reg)
1079 	struct device *self;
1080 	int phy, reg;
1081 {
1082 	struct hme_softc *sc = (struct hme_softc *)self;
1083 	bus_space_tag_t t = sc->sc_bustag;
1084 	bus_space_handle_t mif = sc->sc_mif;
1085 	bus_space_handle_t mac = sc->sc_mac;
1086 	u_int32_t v, xif_cfg, mifi_cfg;
1087 	int n;
1088 
1089 	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1090 		return (0);
1091 
1092 	/* Select the desired PHY in the MIF configuration register */
1093 	v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1094 	v &= ~HME_MIF_CFG_PHY;
1095 	if (phy == HME_PHYAD_EXTERNAL)
1096 		v |= HME_MIF_CFG_PHY;
1097 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1098 
1099 	/* Enable MII drivers on external transceiver */
1100 	v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1101 	if (phy == HME_PHYAD_EXTERNAL)
1102 		v |= HME_MAC_XIF_MIIENABLE;
1103 	else
1104 		v &= ~HME_MAC_XIF_MIIENABLE;
1105 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1106 
1107 	/* Construct the frame command */
1108 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1109 	    HME_MIF_FO_TAMSB |
1110 	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1111 	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
1112 	    (reg << HME_MIF_FO_REGAD_SHIFT);
1113 
1114 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
1115 	for (n = 0; n < 100; n++) {
1116 		DELAY(1);
1117 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
1118 		if (v & HME_MIF_FO_TALSB) {
1119 			v &= HME_MIF_FO_DATA;
1120 			goto out;
1121 		}
1122 	}
1123 
1124 	v = 0;
1125 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1126 
1127 out:
1128 	/* Restore MIFI_CFG register */
1129 	bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1130 	/* Restore XIF register */
1131 	bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1132 	return (v);
1133 }
1134 
1135 static void
1136 hme_mii_writereg(self, phy, reg, val)
1137 	struct device *self;
1138 	int phy, reg, val;
1139 {
1140 	struct hme_softc *sc = (void *)self;
1141 	bus_space_tag_t t = sc->sc_bustag;
1142 	bus_space_handle_t mif = sc->sc_mif;
1143 	bus_space_handle_t mac = sc->sc_mac;
1144 	u_int32_t v, xif_cfg, mifi_cfg;
1145 	int n;
1146 
1147 	/* We can at most have two PHYs */
1148 	if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1149 		return;
1150 
1151 	/* Select the desired PHY in the MIF configuration register */
1152 	v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG);
1153 	v &= ~HME_MIF_CFG_PHY;
1154 	if (phy == HME_PHYAD_EXTERNAL)
1155 		v |= HME_MIF_CFG_PHY;
1156 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1157 
1158 	/* Enable MII drivers on external transceiver */
1159 	v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF);
1160 	if (phy == HME_PHYAD_EXTERNAL)
1161 		v |= HME_MAC_XIF_MIIENABLE;
1162 	else
1163 		v &= ~HME_MAC_XIF_MIIENABLE;
1164 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1165 
1166 	/* Construct the frame command */
1167 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
1168 	    HME_MIF_FO_TAMSB				|
1169 	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
1170 	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
1171 	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
1172 	    (val & HME_MIF_FO_DATA);
1173 
1174 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
1175 	for (n = 0; n < 100; n++) {
1176 		DELAY(1);
1177 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
1178 		if (v & HME_MIF_FO_TALSB)
1179 			goto out;
1180 	}
1181 
1182 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1183 out:
1184 	/* Restore MIFI_CFG register */
1185 	bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg);
1186 	/* Restore XIF register */
1187 	bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg);
1188 }
1189 
1190 static void
1191 hme_mii_statchg(dev)
1192 	struct device *dev;
1193 {
1194 	struct hme_softc *sc = (void *)dev;
1195 	bus_space_tag_t t = sc->sc_bustag;
1196 	bus_space_handle_t mac = sc->sc_mac;
1197 	u_int32_t v;
1198 
1199 #ifdef HMEDEBUG
1200 	if (sc->sc_debug)
1201 		printf("hme_mii_statchg: status change\n", phy);
1202 #endif
1203 
1204 	/* Set the MAC Full Duplex bit appropriately */
1205 	/* Apparently the hme chip is SIMPLEX if working in full duplex mode,
1206 	   but not otherwise. */
1207 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1208 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1209 		v |= HME_MAC_TXCFG_FULLDPLX;
1210 		sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX;
1211 	} else {
1212 		v &= ~HME_MAC_TXCFG_FULLDPLX;
1213 		sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX;
1214 	}
1215 	sc->sc_if_flags = sc->sc_arpcom.ac_if.if_flags;
1216 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1217 }
1218 
1219 int
1220 hme_mediachange(ifp)
1221 	struct ifnet *ifp;
1222 {
1223 	struct hme_softc *sc = ifp->if_softc;
1224 	bus_space_tag_t t = sc->sc_bustag;
1225 	bus_space_handle_t mif = sc->sc_mif;
1226 	bus_space_handle_t mac = sc->sc_mac;
1227 	int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1228 	int phy = sc->sc_phys[instance];
1229 	u_int32_t v;
1230 
1231 #ifdef HMEDEBUG
1232 	if (sc->sc_debug)
1233 		printf("hme_mediachange: phy = %d\n", phy);
1234 #endif
1235 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1236 		return (EINVAL);
1237 
1238 	/* Select the current PHY in the MIF configuration register */
1239 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1240 	v &= ~HME_MIF_CFG_PHY;
1241 	if (phy == HME_PHYAD_EXTERNAL)
1242 		v |= HME_MIF_CFG_PHY;
1243 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1244 
1245 	/* If an external transceiver is selected, enable its MII drivers */
1246 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
1247 	v &= ~HME_MAC_XIF_MIIENABLE;
1248 	if (phy == HME_PHYAD_EXTERNAL)
1249 		v |= HME_MAC_XIF_MIIENABLE;
1250 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
1251 
1252 	return (mii_mediachg(&sc->sc_mii));
1253 }
1254 
1255 void
1256 hme_mediastatus(ifp, ifmr)
1257 	struct ifnet *ifp;
1258 	struct ifmediareq *ifmr;
1259 {
1260 	struct hme_softc *sc = ifp->if_softc;
1261 
1262 	if ((ifp->if_flags & IFF_UP) == 0)
1263 		return;
1264 
1265 	mii_pollstat(&sc->sc_mii);
1266 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1267 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1268 }
1269 
1270 /*
1271  * Process an ioctl request.
1272  */
1273 int
1274 hme_ioctl(ifp, cmd, data)
1275 	struct ifnet *ifp;
1276 	u_long cmd;
1277 	caddr_t data;
1278 {
1279 	struct hme_softc *sc = ifp->if_softc;
1280 	struct ifaddr *ifa = (struct ifaddr *)data;
1281 	struct ifreq *ifr = (struct ifreq *)data;
1282 	int s, error = 0;
1283 
1284 	s = splnet();
1285 
1286 	switch (cmd) {
1287 	case SIOCSIFADDR:
1288 		switch (ifa->ifa_addr->sa_family) {
1289 #ifdef INET
1290 		case AF_INET:
1291 			if (ifp->if_flags & IFF_UP)
1292 				hme_setladrf(sc);
1293 			else {
1294 				ifp->if_flags |= IFF_UP;
1295 				hme_init(sc);
1296 			}
1297 			arp_ifinit(&sc->sc_arpcom, ifa);
1298 			break;
1299 #endif
1300 		default:
1301 			hme_init(sc);
1302 			break;
1303 		}
1304 		break;
1305 
1306 	case SIOCSIFFLAGS:
1307 		if ((ifp->if_flags & IFF_UP) == 0 &&
1308 		    (ifp->if_flags & IFF_RUNNING) != 0) {
1309 			/*
1310 			 * If interface is marked down and it is running, then
1311 			 * stop it.
1312 			 */
1313 			hme_stop(sc);
1314 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
1315 		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
1316 			/*
1317 			 * If interface is marked up and it is stopped, then
1318 			 * start it.
1319 			 */
1320 			hme_init(sc);
1321 		} else if ((ifp->if_flags & IFF_UP) != 0) {
1322 			/*
1323 			 * If setting debug or promiscuous mode, do not reset
1324 			 * the chip; for everything else, call hme_init()
1325 			 * which will trigger a reset.
1326 			 */
1327 #define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG)
1328 			if (ifp->if_flags == sc->sc_if_flags)
1329 				break;
1330 			if ((ifp->if_flags & (~RESETIGN))
1331 			    == (sc->sc_if_flags & (~RESETIGN)))
1332 				hme_setladrf(sc);
1333 			else
1334 				hme_init(sc);
1335 #undef RESETIGN
1336 		}
1337 #ifdef HMEDEBUG
1338 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1339 #endif
1340 		break;
1341 
1342 	case SIOCGIFMEDIA:
1343 	case SIOCSIFMEDIA:
1344 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1345 		break;
1346 
1347 	default:
1348 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1349 	}
1350 
1351 	if (error == ENETRESET) {
1352 		if (ifp->if_flags & IFF_RUNNING)
1353 			hme_setladrf(sc);
1354 		error = 0;
1355 	}
1356 
1357 	sc->sc_if_flags = ifp->if_flags;
1358 	splx(s);
1359 	return (error);
1360 }
1361 
1362 void
1363 hme_shutdown(arg)
1364 	void *arg;
1365 {
1366 	hme_stop((struct hme_softc *)arg);
1367 }
1368 
1369 /*
1370  * Set up the logical address filter.
1371  */
1372 void
1373 hme_setladrf(sc)
1374 	struct hme_softc *sc;
1375 {
1376 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1377 	struct ether_multi *enm;
1378 	struct ether_multistep step;
1379 	struct arpcom *ac = &sc->sc_arpcom;
1380 	bus_space_tag_t t = sc->sc_bustag;
1381 	bus_space_handle_t mac = sc->sc_mac;
1382 	u_int32_t hash[4];
1383 	u_int32_t v, crc;
1384 
1385 	/* Clear hash table */
1386 	hash[3] = hash[2] = hash[1] = hash[0] = 0;
1387 
1388 	/* Get current RX configuration */
1389 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1390 
1391 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1392 		/* Turn on promiscuous mode; turn off the hash filter */
1393 		v |= HME_MAC_RXCFG_PMISC;
1394 		v &= ~HME_MAC_RXCFG_HENABLE;
1395 		ifp->if_flags |= IFF_ALLMULTI;
1396 		goto chipit;
1397 	}
1398 
1399 	/* Turn off promiscuous mode; turn on the hash filter */
1400 	v &= ~HME_MAC_RXCFG_PMISC;
1401 	v |= HME_MAC_RXCFG_HENABLE;
1402 
1403 	/*
1404 	 * Set up multicast address filter by passing all multicast addresses
1405 	 * through a crc generator, and then using the high order 6 bits as an
1406 	 * index into the 64 bit logical address filter.  The high order bit
1407 	 * selects the word, while the rest of the bits select the bit within
1408 	 * the word.
1409 	 */
1410 
1411 	ETHER_FIRST_MULTI(step, ac, enm);
1412 	while (enm != NULL) {
1413 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1414 			/*
1415 			 * We must listen to a range of multicast addresses.
1416 			 * For now, just accept all multicasts, rather than
1417 			 * trying to set only those filter bits needed to match
1418 			 * the range.  (At this time, the only use of address
1419 			 * ranges is for IP multicast routing, for which the
1420 			 * range is big enough to require all bits set.)
1421 			 */
1422 			hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1423 			ifp->if_flags |= IFF_ALLMULTI;
1424 			goto chipit;
1425 		}
1426 
1427 		crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)>> 26;
1428 
1429 		/* Set the corresponding bit in the filter. */
1430 		hash[crc >> 4] |= 1 << (crc & 0xf);
1431 
1432 		ETHER_NEXT_MULTI(step, enm);
1433 	}
1434 
1435 	ifp->if_flags &= ~IFF_ALLMULTI;
1436 
1437 chipit:
1438 	/* Now load the hash table into the chip */
1439 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1440 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1441 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1442 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1443 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1444 }
1445 
1446 void
1447 hme_fill_rx_ring(sc)
1448 	struct hme_softc *sc;
1449 {
1450 	struct hme_sxd *sd;
1451 
1452 	while (sc->sc_rx_cnt < HME_RX_RING_SIZE) {
1453 		if (hme_newbuf(sc, &sc->sc_rxd[sc->sc_rx_prod]))
1454 			break;
1455 
1456 		sd = &sc->sc_rxd[sc->sc_rx_prod];
1457 		HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1458 		    sd->sd_map->dm_segs[0].ds_addr);
1459 		HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod,
1460 		    HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE));
1461 
1462 		if (++sc->sc_rx_prod == HME_RX_RING_SIZE)
1463 			sc->sc_rx_prod = 0;
1464 		sc->sc_rx_cnt++;
1465         }
1466 }
1467 
1468 int
1469 hme_newbuf(sc, d)
1470 	struct hme_softc *sc;
1471 	struct hme_sxd *d;
1472 {
1473 	struct mbuf *m;
1474 	bus_dmamap_t map;
1475 
1476 	/*
1477 	 * All operations should be on local variables and/or rx spare map
1478 	 * until we're sure everything is a success.
1479 	 */
1480 
1481 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1482 	if (m == NULL)
1483 		return (ENOBUFS);
1484 	m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
1485 
1486 	MCLGETI(m, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES);
1487 	if ((m->m_flags & M_EXT) == 0) {
1488 		m_freem(m);
1489 		return (ENOBUFS);
1490 	}
1491 
1492 	if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare,
1493 	    mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL,
1494 	    BUS_DMA_NOWAIT) != 0) {
1495 		m_freem(m);
1496 		return (ENOBUFS);
1497 	}
1498 
1499 	/*
1500 	 * At this point we have a new buffer loaded into the spare map.
1501 	 * Just need to clear out the old mbuf/map and put the new one
1502 	 * in place.
1503 	 */
1504 
1505 	map = d->sd_map;
1506 	d->sd_map = sc->sc_rxmap_spare;
1507 	sc->sc_rxmap_spare = map;
1508 
1509 	bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize,
1510 	    BUS_DMASYNC_PREREAD);
1511 
1512 	m->m_data += HME_RX_OFFSET;
1513 	d->sd_mbuf = m;
1514 	return (0);
1515 }
1516