1*481d3881Srin /* $NetBSD: if_ae.c,v 1.45 2024/07/05 04:31:49 rin Exp $ */
2bc3e1174Sgdamore /*-
3bc3e1174Sgdamore * Copyright (c) 2006 Urbana-Champaign Independent Media Center.
4bc3e1174Sgdamore * Copyright (c) 2006 Garrett D'Amore.
5bc3e1174Sgdamore * All rights reserved.
6bc3e1174Sgdamore *
7bc3e1174Sgdamore * This code was written by Garrett D'Amore for the Champaign-Urbana
8bc3e1174Sgdamore * Community Wireless Network Project.
9bc3e1174Sgdamore *
10bc3e1174Sgdamore * Redistribution and use in source and binary forms, with or
11bc3e1174Sgdamore * without modification, are permitted provided that the following
12bc3e1174Sgdamore * conditions are met:
13bc3e1174Sgdamore * 1. Redistributions of source code must retain the above copyright
14bc3e1174Sgdamore * notice, this list of conditions and the following disclaimer.
15bc3e1174Sgdamore * 2. Redistributions in binary form must reproduce the above
16bc3e1174Sgdamore * copyright notice, this list of conditions and the following
17bc3e1174Sgdamore * disclaimer in the documentation and/or other materials provided
18bc3e1174Sgdamore * with the distribution.
19bc3e1174Sgdamore * 3. All advertising materials mentioning features or use of this
20bc3e1174Sgdamore * software must display the following acknowledgements:
21bc3e1174Sgdamore * This product includes software developed by the Urbana-Champaign
22bc3e1174Sgdamore * Independent Media Center.
23bc3e1174Sgdamore * This product includes software developed by Garrett D'Amore.
24bc3e1174Sgdamore * 4. Urbana-Champaign Independent Media Center's name and Garrett
25bc3e1174Sgdamore * D'Amore's name may not be used to endorse or promote products
26bc3e1174Sgdamore * derived from this software without specific prior written permission.
27bc3e1174Sgdamore *
28bc3e1174Sgdamore * THIS SOFTWARE IS PROVIDED BY THE URBANA-CHAMPAIGN INDEPENDENT
29bc3e1174Sgdamore * MEDIA CENTER AND GARRETT D'AMORE ``AS IS'' AND ANY EXPRESS OR
30bc3e1174Sgdamore * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
31bc3e1174Sgdamore * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32bc3e1174Sgdamore * ARE DISCLAIMED. IN NO EVENT SHALL THE URBANA-CHAMPAIGN INDEPENDENT
33bc3e1174Sgdamore * MEDIA CENTER OR GARRETT D'AMORE BE LIABLE FOR ANY DIRECT, INDIRECT,
34bc3e1174Sgdamore * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
35bc3e1174Sgdamore * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36bc3e1174Sgdamore * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
37bc3e1174Sgdamore * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
38bc3e1174Sgdamore * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39bc3e1174Sgdamore * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
40bc3e1174Sgdamore * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41bc3e1174Sgdamore */
42bc3e1174Sgdamore /*-
43bc3e1174Sgdamore * Copyright (c) 1998, 1999, 2000, 2002 The NetBSD Foundation, Inc.
44bc3e1174Sgdamore * All rights reserved.
45bc3e1174Sgdamore *
46bc3e1174Sgdamore * This code is derived from software contributed to The NetBSD Foundation
47bc3e1174Sgdamore * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
48bc3e1174Sgdamore * NASA Ames Research Center; and by Charles M. Hannum.
49bc3e1174Sgdamore *
50bc3e1174Sgdamore * Redistribution and use in source and binary forms, with or without
51bc3e1174Sgdamore * modification, are permitted provided that the following conditions
52bc3e1174Sgdamore * are met:
53bc3e1174Sgdamore * 1. Redistributions of source code must retain the above copyright
54bc3e1174Sgdamore * notice, this list of conditions and the following disclaimer.
55bc3e1174Sgdamore * 2. Redistributions in binary form must reproduce the above copyright
56bc3e1174Sgdamore * notice, this list of conditions and the following disclaimer in the
57bc3e1174Sgdamore * documentation and/or other materials provided with the distribution.
58bc3e1174Sgdamore *
59bc3e1174Sgdamore * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
60bc3e1174Sgdamore * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
61bc3e1174Sgdamore * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
62bc3e1174Sgdamore * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
63bc3e1174Sgdamore * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64bc3e1174Sgdamore * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65bc3e1174Sgdamore * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66bc3e1174Sgdamore * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67bc3e1174Sgdamore * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68bc3e1174Sgdamore * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
69bc3e1174Sgdamore * POSSIBILITY OF SUCH DAMAGE.
70bc3e1174Sgdamore */
71bc3e1174Sgdamore
72bc3e1174Sgdamore /*
73bc3e1174Sgdamore * Device driver for the onboard ethernet MAC found on the AR5312
74bc3e1174Sgdamore * chip's AHB bus.
75bc3e1174Sgdamore *
76bc3e1174Sgdamore * This device is very simliar to the tulip in most regards, and
77bc3e1174Sgdamore * the code is directly derived from NetBSD's tulip.c. However, it
78bc3e1174Sgdamore * is different enough that it did not seem to be a good idea to
79bc3e1174Sgdamore * add further complexity to the tulip driver, so we have our own.
80bc3e1174Sgdamore *
81bc3e1174Sgdamore * Also tulip has a lot of complexity in it for various parts/options
82bc3e1174Sgdamore * that we don't need, and on these little boxes with only ~8MB RAM, we
83bc3e1174Sgdamore * don't want any extra bloat.
84bc3e1174Sgdamore */
85bc3e1174Sgdamore
86bc3e1174Sgdamore /*
87bc3e1174Sgdamore * TODO:
88bc3e1174Sgdamore *
89bc3e1174Sgdamore * 1) Find out about BUS_MODE_ALIGN16B. This chip can apparently align
90bc3e1174Sgdamore * inbound packets on a half-word boundary, which would make life easier
91bc3e1174Sgdamore * for TCP/IP. (Aligning IP headers on a word.)
92bc3e1174Sgdamore *
93bc3e1174Sgdamore * 2) There is stuff in original tulip to shut down the device when reacting
94659e7a3cSmsaitoh * to a change in link status. Is that needed.
95bc3e1174Sgdamore *
96bc3e1174Sgdamore * 3) Test with variety of 10/100 HDX/FDX scenarios.
97bc3e1174Sgdamore *
98bc3e1174Sgdamore */
99bc3e1174Sgdamore
100bc3e1174Sgdamore #include <sys/cdefs.h>
101*481d3881Srin __KERNEL_RCSID(0, "$NetBSD: if_ae.c,v 1.45 2024/07/05 04:31:49 rin Exp $");
102bc3e1174Sgdamore
103bc3e1174Sgdamore
104bc3e1174Sgdamore #include <sys/param.h>
105fa40faf6Smatt #include <sys/bus.h>
106bc3e1174Sgdamore #include <sys/callout.h>
107bc3e1174Sgdamore #include <sys/device.h>
108fa40faf6Smatt #include <sys/endian.h>
109fa40faf6Smatt #include <sys/errno.h>
110fa40faf6Smatt #include <sys/intr.h>
111fa40faf6Smatt #include <sys/ioctl.h>
112fa40faf6Smatt #include <sys/kernel.h>
113fa40faf6Smatt #include <sys/mbuf.h>
114fa40faf6Smatt #include <sys/socket.h>
115bc3e1174Sgdamore
116bc3e1174Sgdamore #include <uvm/uvm_extern.h>
117bc3e1174Sgdamore
118bc3e1174Sgdamore #include <net/if.h>
119bc3e1174Sgdamore #include <net/if_dl.h>
120bc3e1174Sgdamore #include <net/if_media.h>
121bc3e1174Sgdamore #include <net/if_ether.h>
122bc3e1174Sgdamore
123bc3e1174Sgdamore #include <net/bpf.h>
124bc3e1174Sgdamore
125bc3e1174Sgdamore #include <dev/mii/mii.h>
126bc3e1174Sgdamore #include <dev/mii/miivar.h>
127bc3e1174Sgdamore #include <dev/mii/mii_bitbang.h>
128bc3e1174Sgdamore
129bc3e1174Sgdamore #include <mips/atheros/include/arbusvar.h>
130bc3e1174Sgdamore #include <mips/atheros/dev/aereg.h>
131bc3e1174Sgdamore #include <mips/atheros/dev/aevar.h>
132bc3e1174Sgdamore
133bc3e1174Sgdamore static const struct {
134811add33Smsaitoh uint32_t txth_opmode; /* OPMODE bits */
135bc3e1174Sgdamore const char *txth_name; /* name of mode */
136bc3e1174Sgdamore } ae_txthresh[] = {
137bc3e1174Sgdamore { OPMODE_TR_32, "32 words" },
138bc3e1174Sgdamore { OPMODE_TR_64, "64 words" },
139bc3e1174Sgdamore { OPMODE_TR_128, "128 words" },
140bc3e1174Sgdamore { OPMODE_TR_256, "256 words" },
141bc3e1174Sgdamore { OPMODE_SF, "store and forward mode" },
142bc3e1174Sgdamore { 0, NULL },
143bc3e1174Sgdamore };
144bc3e1174Sgdamore
14525cf32eeSdyoung static int ae_match(device_t, struct cfdata *, void *);
14625cf32eeSdyoung static void ae_attach(device_t, device_t, void *);
14725cf32eeSdyoung static int ae_detach(device_t, int);
14825cf32eeSdyoung static int ae_activate(device_t, enum devact);
149bc3e1174Sgdamore
150de87fe67Sdyoung static int ae_ifflags_cb(struct ethercom *);
151bc3e1174Sgdamore static void ae_reset(struct ae_softc *);
152811add33Smsaitoh static void ae_idle(struct ae_softc *, uint32_t);
153bc3e1174Sgdamore
154bc3e1174Sgdamore static void ae_start(struct ifnet *);
155bc3e1174Sgdamore static void ae_watchdog(struct ifnet *);
15653524e44Schristos static int ae_ioctl(struct ifnet *, u_long, void *);
157bc3e1174Sgdamore static int ae_init(struct ifnet *);
158bc3e1174Sgdamore static void ae_stop(struct ifnet *, int);
159bc3e1174Sgdamore
160bc3e1174Sgdamore static void ae_shutdown(void *);
161bc3e1174Sgdamore
162bc3e1174Sgdamore static void ae_rxdrain(struct ae_softc *);
163bc3e1174Sgdamore static int ae_add_rxbuf(struct ae_softc *, int);
164bc3e1174Sgdamore
165bc3e1174Sgdamore static int ae_enable(struct ae_softc *);
166bc3e1174Sgdamore static void ae_disable(struct ae_softc *);
167bc3e1174Sgdamore static void ae_power(int, void *);
168bc3e1174Sgdamore
169bc3e1174Sgdamore static void ae_filter_setup(struct ae_softc *);
170bc3e1174Sgdamore
171bc3e1174Sgdamore static int ae_intr(void *);
172bc3e1174Sgdamore static void ae_rxintr(struct ae_softc *);
173bc3e1174Sgdamore static void ae_txintr(struct ae_softc *);
174bc3e1174Sgdamore
175bc3e1174Sgdamore static void ae_mii_tick(void *);
1760bc32000Smatt static void ae_mii_statchg(struct ifnet *);
177bc3e1174Sgdamore
178a5cdd4b4Smsaitoh static int ae_mii_readreg(device_t, int, int, uint16_t *);
179a5cdd4b4Smsaitoh static int ae_mii_writereg(device_t, int, int, uint16_t);
180bc3e1174Sgdamore
181bc3e1174Sgdamore #ifdef AE_DEBUG
182bc3e1174Sgdamore #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
183bc3e1174Sgdamore printf x
184bc3e1174Sgdamore #else
185bc3e1174Sgdamore #define DPRINTF(sc, x) /* nothing */
186bc3e1174Sgdamore #endif
187bc3e1174Sgdamore
188bc3e1174Sgdamore #ifdef AE_STATS
189bc3e1174Sgdamore static void ae_print_stats(struct ae_softc *);
190bc3e1174Sgdamore #endif
191bc3e1174Sgdamore
192cbab9cadSchs CFATTACH_DECL_NEW(ae, sizeof(struct ae_softc),
193bc3e1174Sgdamore ae_match, ae_attach, ae_detach, ae_activate);
194bc3e1174Sgdamore
195bc3e1174Sgdamore /*
196bc3e1174Sgdamore * ae_match:
197bc3e1174Sgdamore *
198bc3e1174Sgdamore * Check for a device match.
199bc3e1174Sgdamore */
200bc3e1174Sgdamore int
ae_match(device_t parent,struct cfdata * cf,void * aux)20125cf32eeSdyoung ae_match(device_t parent, struct cfdata *cf, void *aux)
202bc3e1174Sgdamore {
203bc3e1174Sgdamore struct arbus_attach_args *aa = aux;
204bc3e1174Sgdamore
205bc3e1174Sgdamore if (strcmp(aa->aa_name, cf->cf_name) == 0)
206bc3e1174Sgdamore return 1;
207bc3e1174Sgdamore
208bc3e1174Sgdamore return 0;
209bc3e1174Sgdamore
210bc3e1174Sgdamore }
211bc3e1174Sgdamore
212bc3e1174Sgdamore /*
213bc3e1174Sgdamore * ae_attach:
214bc3e1174Sgdamore *
215bc3e1174Sgdamore * Attach an ae interface to the system.
216bc3e1174Sgdamore */
217bc3e1174Sgdamore void
ae_attach(device_t parent,device_t self,void * aux)21825cf32eeSdyoung ae_attach(device_t parent, device_t self, void *aux)
219bc3e1174Sgdamore {
220fb44a857Sthorpej const uint8_t *enaddr;
221fb44a857Sthorpej prop_data_t ea;
22225cf32eeSdyoung struct ae_softc *sc = device_private(self);
223bc3e1174Sgdamore struct arbus_attach_args *aa = aux;
224bc3e1174Sgdamore struct ifnet *ifp = &sc->sc_ethercom.ec_if;
225811add33Smsaitoh struct mii_data * const mii = &sc->sc_mii;
226bc3e1174Sgdamore int i, error;
227bc3e1174Sgdamore
228cbab9cadSchs sc->sc_dev = self;
229cbab9cadSchs
23088ab7da9Sad callout_init(&sc->sc_tick_callout, 0);
231bc3e1174Sgdamore
232bc3e1174Sgdamore printf(": Atheros AR531X 10/100 Ethernet\n");
233bc3e1174Sgdamore
234bc3e1174Sgdamore /*
235fb44a857Sthorpej * Try to get MAC address.
236bc3e1174Sgdamore */
237cbab9cadSchs ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address");
238fb44a857Sthorpej if (ea == NULL) {
239bc3e1174Sgdamore printf("%s: unable to get mac-addr property\n",
240cbab9cadSchs device_xname(sc->sc_dev));
241bc3e1174Sgdamore return;
242bc3e1174Sgdamore }
243fb44a857Sthorpej KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
244fb44a857Sthorpej KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
245fb44a857Sthorpej enaddr = prop_data_data_nocopy(ea);
246bc3e1174Sgdamore
247bc3e1174Sgdamore /* Announce ourselves. */
248cbab9cadSchs printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev),
249bc3e1174Sgdamore ether_sprintf(enaddr));
250bc3e1174Sgdamore
2511f585717Sgdamore sc->sc_cirq = aa->aa_cirq;
2521f585717Sgdamore sc->sc_mirq = aa->aa_mirq;
253bc3e1174Sgdamore sc->sc_st = aa->aa_bst;
254bc3e1174Sgdamore sc->sc_dmat = aa->aa_dmat;
255bc3e1174Sgdamore
256bc3e1174Sgdamore SIMPLEQ_INIT(&sc->sc_txfreeq);
257bc3e1174Sgdamore SIMPLEQ_INIT(&sc->sc_txdirtyq);
258bc3e1174Sgdamore
259bc3e1174Sgdamore /*
260bc3e1174Sgdamore * Map registers.
261bc3e1174Sgdamore */
262bc3e1174Sgdamore sc->sc_size = aa->aa_size;
263bc3e1174Sgdamore if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0,
264bc3e1174Sgdamore &sc->sc_sh)) != 0) {
265bc3e1174Sgdamore printf("%s: unable to map registers, error = %d\n",
266cbab9cadSchs device_xname(sc->sc_dev), error);
267bc3e1174Sgdamore goto fail_0;
268bc3e1174Sgdamore }
269bc3e1174Sgdamore
270bc3e1174Sgdamore /*
271bc3e1174Sgdamore * Allocate the control data structures, and create and load the
272bc3e1174Sgdamore * DMA map for it.
273bc3e1174Sgdamore */
274bc3e1174Sgdamore if ((error = bus_dmamem_alloc(sc->sc_dmat,
275bc3e1174Sgdamore sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg,
276bc3e1174Sgdamore 1, &sc->sc_cdnseg, 0)) != 0) {
277bc3e1174Sgdamore printf("%s: unable to allocate control data, error = %d\n",
278cbab9cadSchs device_xname(sc->sc_dev), error);
279bc3e1174Sgdamore goto fail_1;
280bc3e1174Sgdamore }
281bc3e1174Sgdamore
282bc3e1174Sgdamore if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg,
28353524e44Schristos sizeof(struct ae_control_data), (void **)&sc->sc_control_data,
284bc3e1174Sgdamore BUS_DMA_COHERENT)) != 0) {
285bc3e1174Sgdamore printf("%s: unable to map control data, error = %d\n",
286cbab9cadSchs device_xname(sc->sc_dev), error);
287bc3e1174Sgdamore goto fail_2;
288bc3e1174Sgdamore }
289bc3e1174Sgdamore
290bc3e1174Sgdamore if ((error = bus_dmamap_create(sc->sc_dmat,
291bc3e1174Sgdamore sizeof(struct ae_control_data), 1,
292bc3e1174Sgdamore sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
293bc3e1174Sgdamore printf("%s: unable to create control data DMA map, "
294cbab9cadSchs "error = %d\n", device_xname(sc->sc_dev), error);
295bc3e1174Sgdamore goto fail_3;
296bc3e1174Sgdamore }
297bc3e1174Sgdamore
298bc3e1174Sgdamore if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
299bc3e1174Sgdamore sc->sc_control_data, sizeof(struct ae_control_data), NULL,
300bc3e1174Sgdamore 0)) != 0) {
301bc3e1174Sgdamore printf("%s: unable to load control data DMA map, error = %d\n",
302cbab9cadSchs device_xname(sc->sc_dev), error);
303bc3e1174Sgdamore goto fail_4;
304bc3e1174Sgdamore }
305bc3e1174Sgdamore
306bc3e1174Sgdamore /*
307bc3e1174Sgdamore * Create the transmit buffer DMA maps.
308bc3e1174Sgdamore */
309bc3e1174Sgdamore for (i = 0; i < AE_TXQUEUELEN; i++) {
310bc3e1174Sgdamore if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
311bc3e1174Sgdamore AE_NTXSEGS, MCLBYTES, 0, 0,
312bc3e1174Sgdamore &sc->sc_txsoft[i].txs_dmamap)) != 0) {
313bc3e1174Sgdamore printf("%s: unable to create tx DMA map %d, "
314cbab9cadSchs "error = %d\n", device_xname(sc->sc_dev), i, error);
315bc3e1174Sgdamore goto fail_5;
316bc3e1174Sgdamore }
317bc3e1174Sgdamore }
318bc3e1174Sgdamore
319bc3e1174Sgdamore /*
320bc3e1174Sgdamore * Create the receive buffer DMA maps.
321bc3e1174Sgdamore */
322bc3e1174Sgdamore for (i = 0; i < AE_NRXDESC; i++) {
323bc3e1174Sgdamore if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
324bc3e1174Sgdamore MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
325bc3e1174Sgdamore printf("%s: unable to create rx DMA map %d, "
326cbab9cadSchs "error = %d\n", device_xname(sc->sc_dev), i, error);
327bc3e1174Sgdamore goto fail_6;
328bc3e1174Sgdamore }
329bc3e1174Sgdamore sc->sc_rxsoft[i].rxs_mbuf = NULL;
330bc3e1174Sgdamore }
331bc3e1174Sgdamore
332bc3e1174Sgdamore /*
333bc3e1174Sgdamore * Reset the chip to a known state.
334bc3e1174Sgdamore */
335bc3e1174Sgdamore ae_reset(sc);
336bc3e1174Sgdamore
337bc3e1174Sgdamore /*
338bc3e1174Sgdamore * From this point forward, the attachment cannot fail. A failure
339bc3e1174Sgdamore * before this point releases all resources that may have been
340bc3e1174Sgdamore * allocated.
341bc3e1174Sgdamore */
342bc3e1174Sgdamore sc->sc_flags |= AE_ATTACHED;
343bc3e1174Sgdamore
344bc3e1174Sgdamore /*
345bc3e1174Sgdamore * Initialize our media structures. This may probe the MII, if
346bc3e1174Sgdamore * present.
347bc3e1174Sgdamore */
348811add33Smsaitoh mii->mii_ifp = ifp;
349811add33Smsaitoh mii->mii_readreg = ae_mii_readreg;
350811add33Smsaitoh mii->mii_writereg = ae_mii_writereg;
351811add33Smsaitoh mii->mii_statchg = ae_mii_statchg;
352811add33Smsaitoh sc->sc_ethercom.ec_mii = mii;
353811add33Smsaitoh ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
354811add33Smsaitoh mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
355bc3e1174Sgdamore MII_OFFSET_ANY, 0);
356bc3e1174Sgdamore
357811add33Smsaitoh if (LIST_FIRST(&mii->mii_phys) == NULL) {
358811add33Smsaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
359811add33Smsaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
360bc3e1174Sgdamore } else
361811add33Smsaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
362bc3e1174Sgdamore
363bc3e1174Sgdamore sc->sc_tick = ae_mii_tick;
364bc3e1174Sgdamore
365cbab9cadSchs strcpy(ifp->if_xname, device_xname(sc->sc_dev));
366bc3e1174Sgdamore ifp->if_softc = sc;
367bc3e1174Sgdamore ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
368bc3e1174Sgdamore sc->sc_if_flags = ifp->if_flags;
369bc3e1174Sgdamore ifp->if_ioctl = ae_ioctl;
370bc3e1174Sgdamore ifp->if_start = ae_start;
371bc3e1174Sgdamore ifp->if_watchdog = ae_watchdog;
372bc3e1174Sgdamore ifp->if_init = ae_init;
373bc3e1174Sgdamore ifp->if_stop = ae_stop;
374bc3e1174Sgdamore IFQ_SET_READY(&ifp->if_snd);
375bc3e1174Sgdamore
376bc3e1174Sgdamore /*
377bc3e1174Sgdamore * We can support 802.1Q VLAN-sized frames.
378bc3e1174Sgdamore */
379bc3e1174Sgdamore sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
380bc3e1174Sgdamore
381bc3e1174Sgdamore /*
382bc3e1174Sgdamore * Attach the interface.
383bc3e1174Sgdamore */
384bc3e1174Sgdamore if_attach(ifp);
385c0e7885fSozaki-r if_deferred_start_init(ifp, NULL);
386bc3e1174Sgdamore ether_ifattach(ifp, enaddr);
387de87fe67Sdyoung ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb);
388bc3e1174Sgdamore
389cbab9cadSchs rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
390ea6af427Stls RND_TYPE_NET, RND_FLAG_DEFAULT);
391bc3e1174Sgdamore
392bc3e1174Sgdamore /*
393bc3e1174Sgdamore * Make sure the interface is shutdown during reboot.
394bc3e1174Sgdamore */
395bc3e1174Sgdamore sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc);
396bc3e1174Sgdamore if (sc->sc_sdhook == NULL)
397bc3e1174Sgdamore printf("%s: WARNING: unable to establish shutdown hook\n",
398cbab9cadSchs device_xname(sc->sc_dev));
399bc3e1174Sgdamore
400bc3e1174Sgdamore /*
401bc3e1174Sgdamore * Add a suspend hook to make sure we come back up after a
402bc3e1174Sgdamore * resume.
403bc3e1174Sgdamore */
404cbab9cadSchs sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev),
405f135e0d6Sjmcneill ae_power, sc);
406bc3e1174Sgdamore if (sc->sc_powerhook == NULL)
407bc3e1174Sgdamore printf("%s: WARNING: unable to establish power hook\n",
408cbab9cadSchs device_xname(sc->sc_dev));
409bc3e1174Sgdamore return;
410bc3e1174Sgdamore
411bc3e1174Sgdamore /*
412bc3e1174Sgdamore * Free any resources we've allocated during the failed attach
413bc3e1174Sgdamore * attempt. Do this in reverse order and fall through.
414bc3e1174Sgdamore */
415bc3e1174Sgdamore fail_6:
416bc3e1174Sgdamore for (i = 0; i < AE_NRXDESC; i++) {
417bc3e1174Sgdamore if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
418bc3e1174Sgdamore bus_dmamap_destroy(sc->sc_dmat,
419bc3e1174Sgdamore sc->sc_rxsoft[i].rxs_dmamap);
420bc3e1174Sgdamore }
421bc3e1174Sgdamore fail_5:
422bc3e1174Sgdamore for (i = 0; i < AE_TXQUEUELEN; i++) {
423bc3e1174Sgdamore if (sc->sc_txsoft[i].txs_dmamap != NULL)
424bc3e1174Sgdamore bus_dmamap_destroy(sc->sc_dmat,
425bc3e1174Sgdamore sc->sc_txsoft[i].txs_dmamap);
426bc3e1174Sgdamore }
427bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
428bc3e1174Sgdamore fail_4:
429bc3e1174Sgdamore bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
430bc3e1174Sgdamore fail_3:
43153524e44Schristos bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
432bc3e1174Sgdamore sizeof(struct ae_control_data));
433bc3e1174Sgdamore fail_2:
434bc3e1174Sgdamore bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
435bc3e1174Sgdamore fail_1:
436bc3e1174Sgdamore bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
437bc3e1174Sgdamore fail_0:
438bc3e1174Sgdamore return;
439bc3e1174Sgdamore }
440bc3e1174Sgdamore
441bc3e1174Sgdamore /*
442bc3e1174Sgdamore * ae_activate:
443bc3e1174Sgdamore *
444bc3e1174Sgdamore * Handle device activation/deactivation requests.
445bc3e1174Sgdamore */
446bc3e1174Sgdamore int
ae_activate(device_t self,enum devact act)44725cf32eeSdyoung ae_activate(device_t self, enum devact act)
448bc3e1174Sgdamore {
44925cf32eeSdyoung struct ae_softc *sc = device_private(self);
450bc3e1174Sgdamore
451bc3e1174Sgdamore switch (act) {
452bc3e1174Sgdamore case DVACT_DEACTIVATE:
453bc3e1174Sgdamore if_deactivate(&sc->sc_ethercom.ec_if);
454e4fd5e4fSdyoung return 0;
455e4fd5e4fSdyoung default:
456e4fd5e4fSdyoung return EOPNOTSUPP;
457bc3e1174Sgdamore }
458bc3e1174Sgdamore }
459bc3e1174Sgdamore
460bc3e1174Sgdamore /*
461bc3e1174Sgdamore * ae_detach:
462bc3e1174Sgdamore *
463bc3e1174Sgdamore * Detach a device interface.
464bc3e1174Sgdamore */
465bc3e1174Sgdamore int
ae_detach(device_t self,int flags)46625cf32eeSdyoung ae_detach(device_t self, int flags)
467bc3e1174Sgdamore {
46825cf32eeSdyoung struct ae_softc *sc = device_private(self);
469bc3e1174Sgdamore struct ifnet *ifp = &sc->sc_ethercom.ec_if;
470bc3e1174Sgdamore struct ae_rxsoft *rxs;
471bc3e1174Sgdamore struct ae_txsoft *txs;
472bc3e1174Sgdamore int i;
473bc3e1174Sgdamore
474bc3e1174Sgdamore /*
475bc3e1174Sgdamore * Succeed now if there isn't any work to do.
476bc3e1174Sgdamore */
477bc3e1174Sgdamore if ((sc->sc_flags & AE_ATTACHED) == 0)
478bc3e1174Sgdamore return (0);
479bc3e1174Sgdamore
480bc3e1174Sgdamore /* Unhook our tick handler. */
481bc3e1174Sgdamore if (sc->sc_tick)
482bc3e1174Sgdamore callout_stop(&sc->sc_tick_callout);
483bc3e1174Sgdamore
484bc3e1174Sgdamore /* Detach all PHYs */
485bc3e1174Sgdamore mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
486bc3e1174Sgdamore
487bc3e1174Sgdamore rnd_detach_source(&sc->sc_rnd_source);
488bc3e1174Sgdamore ether_ifdetach(ifp);
489bc3e1174Sgdamore if_detach(ifp);
490bc3e1174Sgdamore
4912fba6779Sthorpej /* Delete all remaining media. */
4922fba6779Sthorpej ifmedia_fini(&sc->sc_mii.mii_media);
4932fba6779Sthorpej
494bc3e1174Sgdamore for (i = 0; i < AE_NRXDESC; i++) {
495bc3e1174Sgdamore rxs = &sc->sc_rxsoft[i];
496bc3e1174Sgdamore if (rxs->rxs_mbuf != NULL) {
497bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
498bc3e1174Sgdamore m_freem(rxs->rxs_mbuf);
499bc3e1174Sgdamore rxs->rxs_mbuf = NULL;
500bc3e1174Sgdamore }
501bc3e1174Sgdamore bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap);
502bc3e1174Sgdamore }
503bc3e1174Sgdamore for (i = 0; i < AE_TXQUEUELEN; i++) {
504bc3e1174Sgdamore txs = &sc->sc_txsoft[i];
505bc3e1174Sgdamore if (txs->txs_mbuf != NULL) {
506bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
507bc3e1174Sgdamore m_freem(txs->txs_mbuf);
508bc3e1174Sgdamore txs->txs_mbuf = NULL;
509bc3e1174Sgdamore }
510bc3e1174Sgdamore bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap);
511bc3e1174Sgdamore }
512bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
513bc3e1174Sgdamore bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
51453524e44Schristos bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
515bc3e1174Sgdamore sizeof(struct ae_control_data));
516bc3e1174Sgdamore bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg);
517bc3e1174Sgdamore
518bc3e1174Sgdamore shutdownhook_disestablish(sc->sc_sdhook);
519bc3e1174Sgdamore powerhook_disestablish(sc->sc_powerhook);
520bc3e1174Sgdamore
521bc3e1174Sgdamore bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size);
522bc3e1174Sgdamore
523bc3e1174Sgdamore
524bc3e1174Sgdamore return (0);
525bc3e1174Sgdamore }
526bc3e1174Sgdamore
527bc3e1174Sgdamore /*
528bc3e1174Sgdamore * ae_shutdown:
529bc3e1174Sgdamore *
530bc3e1174Sgdamore * Make sure the interface is stopped at reboot time.
531bc3e1174Sgdamore */
532bc3e1174Sgdamore static void
ae_shutdown(void * arg)533bc3e1174Sgdamore ae_shutdown(void *arg)
534bc3e1174Sgdamore {
535bc3e1174Sgdamore struct ae_softc *sc = arg;
536bc3e1174Sgdamore
537bc3e1174Sgdamore ae_stop(&sc->sc_ethercom.ec_if, 1);
538bc3e1174Sgdamore }
539bc3e1174Sgdamore
540bc3e1174Sgdamore /*
541bc3e1174Sgdamore * ae_start: [ifnet interface function]
542bc3e1174Sgdamore *
543bc3e1174Sgdamore * Start packet transmission on the interface.
544bc3e1174Sgdamore */
545bc3e1174Sgdamore static void
ae_start(struct ifnet * ifp)546bc3e1174Sgdamore ae_start(struct ifnet *ifp)
547bc3e1174Sgdamore {
548bc3e1174Sgdamore struct ae_softc *sc = ifp->if_softc;
549bc3e1174Sgdamore struct mbuf *m0, *m;
55097627a75Smatt struct ae_txsoft *txs;
551bc3e1174Sgdamore bus_dmamap_t dmamap;
552bc3e1174Sgdamore int error, firsttx, nexttx, lasttx = 1, ofree, seg;
553bc3e1174Sgdamore
554bc3e1174Sgdamore DPRINTF(sc, ("%s: ae_start: sc_flags 0x%08x, if_flags 0x%08x\n",
555cbab9cadSchs device_xname(sc->sc_dev), sc->sc_flags, ifp->if_flags));
556bc3e1174Sgdamore
557bc3e1174Sgdamore
5589fcdc9deSthorpej if ((ifp->if_flags & IFF_RUNNING) == 0)
559bc3e1174Sgdamore return;
560bc3e1174Sgdamore
561bc3e1174Sgdamore /*
562bc3e1174Sgdamore * Remember the previous number of free descriptors and
563bc3e1174Sgdamore * the first descriptor we'll use.
564bc3e1174Sgdamore */
565bc3e1174Sgdamore ofree = sc->sc_txfree;
566bc3e1174Sgdamore firsttx = sc->sc_txnext;
567bc3e1174Sgdamore
568bc3e1174Sgdamore DPRINTF(sc, ("%s: ae_start: txfree %d, txnext %d\n",
569cbab9cadSchs device_xname(sc->sc_dev), ofree, firsttx));
570bc3e1174Sgdamore
571bc3e1174Sgdamore /*
572bc3e1174Sgdamore * Loop through the send queue, setting up transmit descriptors
573bc3e1174Sgdamore * until we drain the queue, or use up all available transmit
574bc3e1174Sgdamore * descriptors.
575bc3e1174Sgdamore */
576bc3e1174Sgdamore while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
577bc3e1174Sgdamore sc->sc_txfree != 0) {
578bc3e1174Sgdamore /*
579bc3e1174Sgdamore * Grab a packet off the queue.
580bc3e1174Sgdamore */
581bc3e1174Sgdamore IFQ_POLL(&ifp->if_snd, m0);
582bc3e1174Sgdamore if (m0 == NULL)
583bc3e1174Sgdamore break;
584bc3e1174Sgdamore m = NULL;
585bc3e1174Sgdamore
586bc3e1174Sgdamore dmamap = txs->txs_dmamap;
587bc3e1174Sgdamore
588bc3e1174Sgdamore /*
589bc3e1174Sgdamore * Load the DMA map. If this fails, the packet either
5905a79e360Sandvar * didn't fit in the allotted number of segments, or we were
591bc3e1174Sgdamore * short on resources. In this case, we'll copy and try
592bc3e1174Sgdamore * again.
593bc3e1174Sgdamore */
594bc3e1174Sgdamore if (((mtod(m0, uintptr_t) & 3) != 0) ||
595bc3e1174Sgdamore bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
596bc3e1174Sgdamore BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
597bc3e1174Sgdamore MGETHDR(m, M_DONTWAIT, MT_DATA);
598bc3e1174Sgdamore if (m == NULL) {
599bc3e1174Sgdamore printf("%s: unable to allocate Tx mbuf\n",
600cbab9cadSchs device_xname(sc->sc_dev));
601bc3e1174Sgdamore break;
602bc3e1174Sgdamore }
603bc3e1174Sgdamore MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
604bc3e1174Sgdamore if (m0->m_pkthdr.len > MHLEN) {
605bc3e1174Sgdamore MCLGET(m, M_DONTWAIT);
606bc3e1174Sgdamore if ((m->m_flags & M_EXT) == 0) {
607bc3e1174Sgdamore printf("%s: unable to allocate Tx "
608cbab9cadSchs "cluster\n", device_xname(sc->sc_dev));
609bc3e1174Sgdamore m_freem(m);
610bc3e1174Sgdamore break;
611bc3e1174Sgdamore }
612bc3e1174Sgdamore }
61353524e44Schristos m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
614bc3e1174Sgdamore m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
615bc3e1174Sgdamore error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
616bc3e1174Sgdamore m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
617bc3e1174Sgdamore if (error) {
618bc3e1174Sgdamore printf("%s: unable to load Tx buffer, "
619cbab9cadSchs "error = %d\n", device_xname(sc->sc_dev),
620bc3e1174Sgdamore error);
621bc3e1174Sgdamore break;
622bc3e1174Sgdamore }
623bc3e1174Sgdamore }
624bc3e1174Sgdamore
625bc3e1174Sgdamore /*
626bc3e1174Sgdamore * Ensure we have enough descriptors free to describe
627bc3e1174Sgdamore * the packet.
628bc3e1174Sgdamore */
629bc3e1174Sgdamore if (dmamap->dm_nsegs > sc->sc_txfree) {
630bc3e1174Sgdamore /*
631bc3e1174Sgdamore * Not enough free descriptors to transmit this
632bc3e1174Sgdamore * packet. We haven't committed to anything yet,
633bc3e1174Sgdamore * so just unload the DMA map, put the packet
634bc3e1174Sgdamore * back on the queue, and punt. Notify the upper
635bc3e1174Sgdamore * layer that there are no more slots left.
636bc3e1174Sgdamore *
637bc3e1174Sgdamore * XXX We could allocate an mbuf and copy, but
638bc3e1174Sgdamore * XXX it is worth it?
639bc3e1174Sgdamore */
640bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, dmamap);
641bc3e1174Sgdamore m_freem(m);
642bc3e1174Sgdamore break;
643bc3e1174Sgdamore }
644bc3e1174Sgdamore
645bc3e1174Sgdamore IFQ_DEQUEUE(&ifp->if_snd, m0);
646bc3e1174Sgdamore if (m != NULL) {
647bc3e1174Sgdamore m_freem(m0);
648bc3e1174Sgdamore m0 = m;
649bc3e1174Sgdamore }
650bc3e1174Sgdamore
651bc3e1174Sgdamore /*
652bc3e1174Sgdamore * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
653bc3e1174Sgdamore */
654bc3e1174Sgdamore
655bc3e1174Sgdamore /* Sync the DMA map. */
656bc3e1174Sgdamore bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
657bc3e1174Sgdamore BUS_DMASYNC_PREWRITE);
658bc3e1174Sgdamore
659bc3e1174Sgdamore /*
660bc3e1174Sgdamore * Initialize the transmit descriptors.
661bc3e1174Sgdamore */
662bc3e1174Sgdamore for (nexttx = sc->sc_txnext, seg = 0;
663bc3e1174Sgdamore seg < dmamap->dm_nsegs;
664bc3e1174Sgdamore seg++, nexttx = AE_NEXTTX(nexttx)) {
665bc3e1174Sgdamore /*
666bc3e1174Sgdamore * If this is the first descriptor we're
667bc3e1174Sgdamore * enqueueing, don't set the OWN bit just
668bc3e1174Sgdamore * yet. That could cause a race condition.
669bc3e1174Sgdamore * We'll do it below.
670bc3e1174Sgdamore */
671bc3e1174Sgdamore sc->sc_txdescs[nexttx].ad_status =
672bc3e1174Sgdamore (nexttx == firsttx) ? 0 : ADSTAT_OWN;
673bc3e1174Sgdamore sc->sc_txdescs[nexttx].ad_bufaddr1 =
674bc3e1174Sgdamore dmamap->dm_segs[seg].ds_addr;
675bc3e1174Sgdamore sc->sc_txdescs[nexttx].ad_ctl =
676bc3e1174Sgdamore (dmamap->dm_segs[seg].ds_len <<
677bc3e1174Sgdamore ADCTL_SIZE1_SHIFT) |
678bc3e1174Sgdamore (nexttx == (AE_NTXDESC - 1) ?
679bc3e1174Sgdamore ADCTL_ER : 0);
680bc3e1174Sgdamore lasttx = nexttx;
681bc3e1174Sgdamore }
682bc3e1174Sgdamore
683bc3e1174Sgdamore KASSERT(lasttx != -1);
684bc3e1174Sgdamore
685bc3e1174Sgdamore /* Set `first segment' and `last segment' appropriately. */
686bc3e1174Sgdamore sc->sc_txdescs[sc->sc_txnext].ad_ctl |= ADCTL_Tx_FS;
687bc3e1174Sgdamore sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_LS;
688bc3e1174Sgdamore
689bc3e1174Sgdamore #ifdef AE_DEBUG
690bc3e1174Sgdamore if (ifp->if_flags & IFF_DEBUG) {
691bc3e1174Sgdamore printf(" txsoft %p transmit chain:\n", txs);
692bc3e1174Sgdamore for (seg = sc->sc_txnext;; seg = AE_NEXTTX(seg)) {
693bc3e1174Sgdamore printf(" descriptor %d:\n", seg);
694bc3e1174Sgdamore printf(" ad_status: 0x%08x\n",
695bc3e1174Sgdamore sc->sc_txdescs[seg].ad_status);
696bc3e1174Sgdamore printf(" ad_ctl: 0x%08x\n",
697bc3e1174Sgdamore sc->sc_txdescs[seg].ad_ctl);
698bc3e1174Sgdamore printf(" ad_bufaddr1: 0x%08x\n",
699bc3e1174Sgdamore sc->sc_txdescs[seg].ad_bufaddr1);
700bc3e1174Sgdamore printf(" ad_bufaddr2: 0x%08x\n",
701bc3e1174Sgdamore sc->sc_txdescs[seg].ad_bufaddr2);
702bc3e1174Sgdamore if (seg == lasttx)
703bc3e1174Sgdamore break;
704bc3e1174Sgdamore }
705bc3e1174Sgdamore }
706bc3e1174Sgdamore #endif
707bc3e1174Sgdamore
708bc3e1174Sgdamore /* Sync the descriptors we're using. */
709bc3e1174Sgdamore AE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
710bc3e1174Sgdamore BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
711bc3e1174Sgdamore
712bc3e1174Sgdamore /*
713bc3e1174Sgdamore * Store a pointer to the packet so we can free it later,
714bc3e1174Sgdamore * and remember what txdirty will be once the packet is
715bc3e1174Sgdamore * done.
716bc3e1174Sgdamore */
717bc3e1174Sgdamore txs->txs_mbuf = m0;
718bc3e1174Sgdamore txs->txs_firstdesc = sc->sc_txnext;
719bc3e1174Sgdamore txs->txs_lastdesc = lasttx;
720bc3e1174Sgdamore txs->txs_ndescs = dmamap->dm_nsegs;
721bc3e1174Sgdamore
722bc3e1174Sgdamore /* Advance the tx pointer. */
723bc3e1174Sgdamore sc->sc_txfree -= dmamap->dm_nsegs;
724bc3e1174Sgdamore sc->sc_txnext = nexttx;
725bc3e1174Sgdamore
726bc3e1174Sgdamore SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
727bc3e1174Sgdamore SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
728bc3e1174Sgdamore
729bc3e1174Sgdamore /*
730bc3e1174Sgdamore * Pass the packet to any BPF listeners.
731bc3e1174Sgdamore */
7323cd62456Smsaitoh bpf_mtap(ifp, m0, BPF_D_OUT);
733bc3e1174Sgdamore }
734bc3e1174Sgdamore
735bc3e1174Sgdamore if (sc->sc_txfree != ofree) {
736bc3e1174Sgdamore DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
737cbab9cadSchs device_xname(sc->sc_dev), lasttx, firsttx));
738bc3e1174Sgdamore /*
739bc3e1174Sgdamore * Cause a transmit interrupt to happen on the
740bc3e1174Sgdamore * last packet we enqueued.
741bc3e1174Sgdamore */
742bc3e1174Sgdamore sc->sc_txdescs[lasttx].ad_ctl |= ADCTL_Tx_IC;
743bc3e1174Sgdamore AE_CDTXSYNC(sc, lasttx, 1,
744bc3e1174Sgdamore BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
745bc3e1174Sgdamore
746bc3e1174Sgdamore /*
747bc3e1174Sgdamore * The entire packet chain is set up. Give the
748bc3e1174Sgdamore * first descriptor to the chip now.
749bc3e1174Sgdamore */
750bc3e1174Sgdamore sc->sc_txdescs[firsttx].ad_status |= ADSTAT_OWN;
751bc3e1174Sgdamore AE_CDTXSYNC(sc, firsttx, 1,
752bc3e1174Sgdamore BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
753bc3e1174Sgdamore
754bc3e1174Sgdamore /* Wake up the transmitter. */
755bc3e1174Sgdamore /* XXX USE AUTOPOLLING? */
756bc3e1174Sgdamore AE_WRITE(sc, CSR_TXPOLL, TXPOLL_TPD);
757bc3e1174Sgdamore AE_BARRIER(sc);
758bc3e1174Sgdamore
759bc3e1174Sgdamore /* Set a watchdog timer in case the chip flakes out. */
760bc3e1174Sgdamore ifp->if_timer = 5;
761bc3e1174Sgdamore }
762bc3e1174Sgdamore }
763bc3e1174Sgdamore
764bc3e1174Sgdamore /*
765bc3e1174Sgdamore * ae_watchdog: [ifnet interface function]
766bc3e1174Sgdamore *
767bc3e1174Sgdamore * Watchdog timer handler.
768bc3e1174Sgdamore */
769bc3e1174Sgdamore static void
ae_watchdog(struct ifnet * ifp)770bc3e1174Sgdamore ae_watchdog(struct ifnet *ifp)
771bc3e1174Sgdamore {
772bc3e1174Sgdamore struct ae_softc *sc = ifp->if_softc;
773bc3e1174Sgdamore int doing_transmit;
774bc3e1174Sgdamore
775bc3e1174Sgdamore doing_transmit = (! SIMPLEQ_EMPTY(&sc->sc_txdirtyq));
776bc3e1174Sgdamore
777bc3e1174Sgdamore if (doing_transmit) {
778cbab9cadSchs printf("%s: transmit timeout\n", device_xname(sc->sc_dev));
779d4bc9d11Sthorpej if_statinc(ifp, if_oerrors);
780bc3e1174Sgdamore }
781bc3e1174Sgdamore else
782cbab9cadSchs printf("%s: spurious watchdog timeout\n", device_xname(sc->sc_dev));
783bc3e1174Sgdamore
784bc3e1174Sgdamore (void) ae_init(ifp);
785bc3e1174Sgdamore
786bc3e1174Sgdamore /* Try to get more packets going. */
787bc3e1174Sgdamore ae_start(ifp);
788bc3e1174Sgdamore }
789bc3e1174Sgdamore
790de87fe67Sdyoung /* If the interface is up and running, only modify the receive
791de87fe67Sdyoung * filter when changing to/from promiscuous mode. Otherwise return
792de87fe67Sdyoung * ENETRESET so that ether_ioctl will reset the chip.
793de87fe67Sdyoung */
794de87fe67Sdyoung static int
ae_ifflags_cb(struct ethercom * ec)795de87fe67Sdyoung ae_ifflags_cb(struct ethercom *ec)
796de87fe67Sdyoung {
797de87fe67Sdyoung struct ifnet *ifp = &ec->ec_if;
798de87fe67Sdyoung struct ae_softc *sc = ifp->if_softc;
79970b25bc9Smsaitoh u_short change = ifp->if_flags ^ sc->sc_if_flags;
800de87fe67Sdyoung
801de87fe67Sdyoung if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0)
802de87fe67Sdyoung return ENETRESET;
803de87fe67Sdyoung else if ((change & IFF_PROMISC) != 0)
804de87fe67Sdyoung ae_filter_setup(sc);
805de87fe67Sdyoung return 0;
806de87fe67Sdyoung }
807de87fe67Sdyoung
808bc3e1174Sgdamore /*
809bc3e1174Sgdamore * ae_ioctl: [ifnet interface function]
810bc3e1174Sgdamore *
811bc3e1174Sgdamore * Handle control requests from the operator.
812bc3e1174Sgdamore */
813bc3e1174Sgdamore static int
ae_ioctl(struct ifnet * ifp,u_long cmd,void * data)81453524e44Schristos ae_ioctl(struct ifnet *ifp, u_long cmd, void *data)
815bc3e1174Sgdamore {
816bc3e1174Sgdamore struct ae_softc *sc = ifp->if_softc;
817bc3e1174Sgdamore int s, error;
818bc3e1174Sgdamore
819bc3e1174Sgdamore s = splnet();
820bc3e1174Sgdamore
821bc3e1174Sgdamore error = ether_ioctl(ifp, cmd, data);
822bc3e1174Sgdamore if (error == ENETRESET) {
823bc3e1174Sgdamore if (ifp->if_flags & IFF_RUNNING) {
824bc3e1174Sgdamore /*
825bc3e1174Sgdamore * Multicast list has changed. Set the
826bc3e1174Sgdamore * hardware filter accordingly.
827bc3e1174Sgdamore */
828bc3e1174Sgdamore ae_filter_setup(sc);
829bc3e1174Sgdamore }
830bc3e1174Sgdamore error = 0;
831bc3e1174Sgdamore }
832bc3e1174Sgdamore
833bc3e1174Sgdamore /* Try to get more packets going. */
834bc3e1174Sgdamore if (AE_IS_ENABLED(sc))
835bc3e1174Sgdamore ae_start(ifp);
836bc3e1174Sgdamore
837bc3e1174Sgdamore sc->sc_if_flags = ifp->if_flags;
838bc3e1174Sgdamore splx(s);
839bc3e1174Sgdamore return (error);
840bc3e1174Sgdamore }
841bc3e1174Sgdamore
842bc3e1174Sgdamore /*
843bc3e1174Sgdamore * ae_intr:
844bc3e1174Sgdamore *
845bc3e1174Sgdamore * Interrupt service routine.
846bc3e1174Sgdamore */
847bc3e1174Sgdamore int
ae_intr(void * arg)848bc3e1174Sgdamore ae_intr(void *arg)
849bc3e1174Sgdamore {
850bc3e1174Sgdamore struct ae_softc *sc = arg;
851bc3e1174Sgdamore struct ifnet *ifp = &sc->sc_ethercom.ec_if;
852811add33Smsaitoh uint32_t status, rxstatus, txstatus;
853bc3e1174Sgdamore int handled = 0, txthresh;
854bc3e1174Sgdamore
855cbab9cadSchs DPRINTF(sc, ("%s: ae_intr\n", device_xname(sc->sc_dev)));
856bc3e1174Sgdamore
857bc3e1174Sgdamore #ifdef DEBUG
858bc3e1174Sgdamore if (AE_IS_ENABLED(sc) == 0)
859cbab9cadSchs panic("%s: ae_intr: not enabled", device_xname(sc->sc_dev));
860bc3e1174Sgdamore #endif
861bc3e1174Sgdamore
862bc3e1174Sgdamore /*
863bc3e1174Sgdamore * If the interface isn't running, the interrupt couldn't
864bc3e1174Sgdamore * possibly have come from us.
865bc3e1174Sgdamore */
866bc3e1174Sgdamore if ((ifp->if_flags & IFF_RUNNING) == 0 ||
867cbab9cadSchs !device_is_active(sc->sc_dev)) {
868bc3e1174Sgdamore printf("spurious?!?\n");
869bc3e1174Sgdamore return (0);
870bc3e1174Sgdamore }
871bc3e1174Sgdamore
872bc3e1174Sgdamore for (;;) {
873bc3e1174Sgdamore status = AE_READ(sc, CSR_STATUS);
874bc3e1174Sgdamore if (status) {
875bc3e1174Sgdamore AE_WRITE(sc, CSR_STATUS, status);
876bc3e1174Sgdamore AE_BARRIER(sc);
877bc3e1174Sgdamore }
878bc3e1174Sgdamore
879bc3e1174Sgdamore if ((status & sc->sc_inten) == 0)
880bc3e1174Sgdamore break;
881bc3e1174Sgdamore
882bc3e1174Sgdamore handled = 1;
883bc3e1174Sgdamore
884bc3e1174Sgdamore rxstatus = status & sc->sc_rxint_mask;
885bc3e1174Sgdamore txstatus = status & sc->sc_txint_mask;
886bc3e1174Sgdamore
887bc3e1174Sgdamore if (rxstatus) {
888bc3e1174Sgdamore /* Grab new any new packets. */
889bc3e1174Sgdamore ae_rxintr(sc);
890bc3e1174Sgdamore
891bc3e1174Sgdamore if (rxstatus & STATUS_RU) {
892bc3e1174Sgdamore printf("%s: receive ring overrun\n",
893cbab9cadSchs device_xname(sc->sc_dev));
894bc3e1174Sgdamore /* Get the receive process going again. */
895bc3e1174Sgdamore AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
896bc3e1174Sgdamore AE_BARRIER(sc);
897bc3e1174Sgdamore break;
898bc3e1174Sgdamore }
899bc3e1174Sgdamore }
900bc3e1174Sgdamore
901bc3e1174Sgdamore if (txstatus) {
902bc3e1174Sgdamore /* Sweep up transmit descriptors. */
903bc3e1174Sgdamore ae_txintr(sc);
904bc3e1174Sgdamore
905bc3e1174Sgdamore if (txstatus & STATUS_TJT)
906bc3e1174Sgdamore printf("%s: transmit jabber timeout\n",
907cbab9cadSchs device_xname(sc->sc_dev));
908bc3e1174Sgdamore
909bc3e1174Sgdamore if (txstatus & STATUS_UNF) {
910bc3e1174Sgdamore /*
911bc3e1174Sgdamore * Increase our transmit threshold if
912bc3e1174Sgdamore * another is available.
913bc3e1174Sgdamore */
914bc3e1174Sgdamore txthresh = sc->sc_txthresh + 1;
915bc3e1174Sgdamore if (ae_txthresh[txthresh].txth_name != NULL) {
916bc3e1174Sgdamore uint32_t opmode;
917bc3e1174Sgdamore /* Idle the transmit process. */
918bc3e1174Sgdamore opmode = AE_READ(sc, CSR_OPMODE);
919bc3e1174Sgdamore ae_idle(sc, OPMODE_ST);
920bc3e1174Sgdamore
921bc3e1174Sgdamore sc->sc_txthresh = txthresh;
922811add33Smsaitoh opmode &= ~(OPMODE_TR | OPMODE_SF);
923bc3e1174Sgdamore opmode |=
924bc3e1174Sgdamore ae_txthresh[txthresh].txth_opmode;
925bc3e1174Sgdamore printf("%s: transmit underrun; new "
926bc3e1174Sgdamore "threshold: %s\n",
927cbab9cadSchs device_xname(sc->sc_dev),
928bc3e1174Sgdamore ae_txthresh[txthresh].txth_name);
929bc3e1174Sgdamore
930bc3e1174Sgdamore /*
931bc3e1174Sgdamore * Set the new threshold and restart
932bc3e1174Sgdamore * the transmit process.
933bc3e1174Sgdamore */
934bc3e1174Sgdamore AE_WRITE(sc, CSR_OPMODE, opmode);
935bc3e1174Sgdamore AE_BARRIER(sc);
936bc3e1174Sgdamore }
937bc3e1174Sgdamore /*
938bc3e1174Sgdamore * XXX Log every Nth underrun from
939bc3e1174Sgdamore * XXX now on?
940bc3e1174Sgdamore */
941bc3e1174Sgdamore }
942bc3e1174Sgdamore }
943bc3e1174Sgdamore
944bc3e1174Sgdamore if (status & (STATUS_TPS | STATUS_RPS)) {
945bc3e1174Sgdamore if (status & STATUS_TPS)
946bc3e1174Sgdamore printf("%s: transmit process stopped\n",
947cbab9cadSchs device_xname(sc->sc_dev));
948bc3e1174Sgdamore if (status & STATUS_RPS)
949bc3e1174Sgdamore printf("%s: receive process stopped\n",
950cbab9cadSchs device_xname(sc->sc_dev));
951bc3e1174Sgdamore (void) ae_init(ifp);
952bc3e1174Sgdamore break;
953bc3e1174Sgdamore }
954bc3e1174Sgdamore
955bc3e1174Sgdamore if (status & STATUS_SE) {
956bc3e1174Sgdamore const char *str;
957bc3e1174Sgdamore
958bc3e1174Sgdamore if (status & STATUS_TX_ABORT)
959bc3e1174Sgdamore str = "tx abort";
960bc3e1174Sgdamore else if (status & STATUS_RX_ABORT)
961bc3e1174Sgdamore str = "rx abort";
962bc3e1174Sgdamore else
963bc3e1174Sgdamore str = "unknown error";
964bc3e1174Sgdamore
965bc3e1174Sgdamore printf("%s: fatal system error: %s\n",
966cbab9cadSchs device_xname(sc->sc_dev), str);
967bc3e1174Sgdamore (void) ae_init(ifp);
968bc3e1174Sgdamore break;
969bc3e1174Sgdamore }
970bc3e1174Sgdamore
971bc3e1174Sgdamore /*
972bc3e1174Sgdamore * Not handled:
973bc3e1174Sgdamore *
974bc3e1174Sgdamore * Transmit buffer unavailable -- normal
975bc3e1174Sgdamore * condition, nothing to do, really.
976bc3e1174Sgdamore *
977bc3e1174Sgdamore * General purpose timer experied -- we don't
978bc3e1174Sgdamore * use the general purpose timer.
979bc3e1174Sgdamore *
980bc3e1174Sgdamore * Early receive interrupt -- not available on
981bc3e1174Sgdamore * all chips, we just use RI. We also only
982bc3e1174Sgdamore * use single-segment receive DMA, so this
983bc3e1174Sgdamore * is mostly useless.
984bc3e1174Sgdamore */
985bc3e1174Sgdamore }
986bc3e1174Sgdamore
987bc3e1174Sgdamore /* Try to get more packets going. */
988c0e7885fSozaki-r if_schedule_deferred_start(ifp);
989bc3e1174Sgdamore
990bc3e1174Sgdamore if (handled)
991bc3e1174Sgdamore rnd_add_uint32(&sc->sc_rnd_source, status);
992bc3e1174Sgdamore return (handled);
993bc3e1174Sgdamore }
994bc3e1174Sgdamore
995bc3e1174Sgdamore /*
996bc3e1174Sgdamore * ae_rxintr:
997bc3e1174Sgdamore *
998bc3e1174Sgdamore * Helper; handle receive interrupts.
999bc3e1174Sgdamore */
1000bc3e1174Sgdamore static void
ae_rxintr(struct ae_softc * sc)1001bc3e1174Sgdamore ae_rxintr(struct ae_softc *sc)
1002bc3e1174Sgdamore {
1003bc3e1174Sgdamore struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1004bc3e1174Sgdamore struct ae_rxsoft *rxs;
1005bc3e1174Sgdamore struct mbuf *m;
1006811add33Smsaitoh uint32_t rxstat;
1007bc3e1174Sgdamore int i, len;
1008bc3e1174Sgdamore
1009bc3e1174Sgdamore for (i = sc->sc_rxptr;; i = AE_NEXTRX(i)) {
1010bc3e1174Sgdamore rxs = &sc->sc_rxsoft[i];
1011bc3e1174Sgdamore
1012bc3e1174Sgdamore AE_CDRXSYNC(sc, i,
1013bc3e1174Sgdamore BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1014bc3e1174Sgdamore
1015bc3e1174Sgdamore rxstat = sc->sc_rxdescs[i].ad_status;
1016bc3e1174Sgdamore
1017bc3e1174Sgdamore if (rxstat & ADSTAT_OWN) {
1018bc3e1174Sgdamore /*
1019bc3e1174Sgdamore * We have processed all of the receive buffers.
1020bc3e1174Sgdamore */
1021bc3e1174Sgdamore break;
1022bc3e1174Sgdamore }
1023bc3e1174Sgdamore
1024bc3e1174Sgdamore /*
1025bc3e1174Sgdamore * If any collisions were seen on the wire, count one.
1026bc3e1174Sgdamore */
1027bc3e1174Sgdamore if (rxstat & ADSTAT_Rx_CS)
1028d4bc9d11Sthorpej if_statinc(ifp, if_collisions);
1029bc3e1174Sgdamore
1030bc3e1174Sgdamore /*
1031bc3e1174Sgdamore * If an error occurred, update stats, clear the status
1032bc3e1174Sgdamore * word, and leave the packet buffer in place. It will
1033bc3e1174Sgdamore * simply be reused the next time the ring comes around.
1034bc3e1174Sgdamore * If 802.1Q VLAN MTU is enabled, ignore the Frame Too Long
1035bc3e1174Sgdamore * error.
1036bc3e1174Sgdamore */
1037bc3e1174Sgdamore if (rxstat & ADSTAT_ES &&
1038bc3e1174Sgdamore ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) == 0 ||
1039bc3e1174Sgdamore (rxstat & (ADSTAT_Rx_DE | ADSTAT_Rx_RF |
1040bc3e1174Sgdamore ADSTAT_Rx_DB | ADSTAT_Rx_CE)) != 0)) {
1041bc3e1174Sgdamore #define PRINTERR(bit, str) \
1042bc3e1174Sgdamore if (rxstat & (bit)) \
1043bc3e1174Sgdamore printf("%s: receive error: %s\n", \
1044cbab9cadSchs device_xname(sc->sc_dev), str)
1045d4bc9d11Sthorpej if_statinc(ifp, if_ierrors);
1046bc3e1174Sgdamore PRINTERR(ADSTAT_Rx_DE, "descriptor error");
1047bc3e1174Sgdamore PRINTERR(ADSTAT_Rx_RF, "runt frame");
1048bc3e1174Sgdamore PRINTERR(ADSTAT_Rx_TL, "frame too long");
1049bc3e1174Sgdamore PRINTERR(ADSTAT_Rx_RE, "MII error");
1050bc3e1174Sgdamore PRINTERR(ADSTAT_Rx_DB, "dribbling bit");
1051bc3e1174Sgdamore PRINTERR(ADSTAT_Rx_CE, "CRC error");
1052bc3e1174Sgdamore #undef PRINTERR
1053bc3e1174Sgdamore AE_INIT_RXDESC(sc, i);
1054bc3e1174Sgdamore continue;
1055bc3e1174Sgdamore }
1056bc3e1174Sgdamore
1057bc3e1174Sgdamore bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1058bc3e1174Sgdamore rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1059bc3e1174Sgdamore
1060bc3e1174Sgdamore /*
1061bc3e1174Sgdamore * No errors; receive the packet. Note the chip
1062bc3e1174Sgdamore * includes the CRC with every packet.
1063bc3e1174Sgdamore */
1064bc3e1174Sgdamore len = ADSTAT_Rx_LENGTH(rxstat) - ETHER_CRC_LEN;
1065bc3e1174Sgdamore
1066bc3e1174Sgdamore /*
1067bc3e1174Sgdamore * XXX: the Atheros part can align on half words. what
1068bc3e1174Sgdamore * is the performance implication of this? Probably
1069bc3e1174Sgdamore * minimal, and we should use it...
1070bc3e1174Sgdamore */
1071bc3e1174Sgdamore #ifdef __NO_STRICT_ALIGNMENT
1072bc3e1174Sgdamore /*
1073bc3e1174Sgdamore * Allocate a new mbuf cluster. If that fails, we are
1074bc3e1174Sgdamore * out of memory, and must drop the packet and recycle
1075bc3e1174Sgdamore * the buffer that's already attached to this descriptor.
1076bc3e1174Sgdamore */
1077bc3e1174Sgdamore m = rxs->rxs_mbuf;
1078bc3e1174Sgdamore if (ae_add_rxbuf(sc, i) != 0) {
1079d4bc9d11Sthorpej if_statinc(ifp, if_ierrors);
1080bc3e1174Sgdamore AE_INIT_RXDESC(sc, i);
1081bc3e1174Sgdamore bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1082bc3e1174Sgdamore rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1083bc3e1174Sgdamore continue;
1084bc3e1174Sgdamore }
1085bc3e1174Sgdamore #else
1086bc3e1174Sgdamore /*
1087bc3e1174Sgdamore * The chip's receive buffers must be 4-byte aligned.
1088bc3e1174Sgdamore * But this means that the data after the Ethernet header
1089bc3e1174Sgdamore * is misaligned. We must allocate a new buffer and
1090bc3e1174Sgdamore * copy the data, shifted forward 2 bytes.
1091bc3e1174Sgdamore */
1092bc3e1174Sgdamore MGETHDR(m, M_DONTWAIT, MT_DATA);
1093bc3e1174Sgdamore if (m == NULL) {
1094bc3e1174Sgdamore dropit:
1095d4bc9d11Sthorpej if_statinc(ifp, if_ierrors);
1096bc3e1174Sgdamore AE_INIT_RXDESC(sc, i);
1097bc3e1174Sgdamore bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1098bc3e1174Sgdamore rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1099bc3e1174Sgdamore continue;
1100bc3e1174Sgdamore }
1101bc3e1174Sgdamore MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1102bc3e1174Sgdamore if (len > (MHLEN - 2)) {
1103bc3e1174Sgdamore MCLGET(m, M_DONTWAIT);
1104bc3e1174Sgdamore if ((m->m_flags & M_EXT) == 0) {
1105bc3e1174Sgdamore m_freem(m);
1106bc3e1174Sgdamore goto dropit;
1107bc3e1174Sgdamore }
1108bc3e1174Sgdamore }
1109bc3e1174Sgdamore m->m_data += 2;
1110bc3e1174Sgdamore
1111bc3e1174Sgdamore /*
1112bc3e1174Sgdamore * Note that we use clusters for incoming frames, so the
1113bc3e1174Sgdamore * buffer is virtually contiguous.
1114bc3e1174Sgdamore */
111553524e44Schristos memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len);
1116bc3e1174Sgdamore
1117bc3e1174Sgdamore /* Allow the receive descriptor to continue using its mbuf. */
1118bc3e1174Sgdamore AE_INIT_RXDESC(sc, i);
1119bc3e1174Sgdamore bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1120bc3e1174Sgdamore rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1121bc3e1174Sgdamore #endif /* __NO_STRICT_ALIGNMENT */
1122bc3e1174Sgdamore
1123d938d837Sozaki-r m_set_rcvif(m, ifp);
1124bc3e1174Sgdamore m->m_pkthdr.len = m->m_len = len;
1125bc3e1174Sgdamore
1126bc3e1174Sgdamore /* Pass it on. */
11279c4cd063Sozaki-r if_percpuq_enqueue(ifp->if_percpuq, m);
1128bc3e1174Sgdamore }
1129bc3e1174Sgdamore
1130bc3e1174Sgdamore /* Update the receive pointer. */
1131bc3e1174Sgdamore sc->sc_rxptr = i;
1132bc3e1174Sgdamore }
1133bc3e1174Sgdamore
1134bc3e1174Sgdamore /*
1135bc3e1174Sgdamore * ae_txintr:
1136bc3e1174Sgdamore *
1137bc3e1174Sgdamore * Helper; handle transmit interrupts.
1138bc3e1174Sgdamore */
1139bc3e1174Sgdamore static void
ae_txintr(struct ae_softc * sc)1140bc3e1174Sgdamore ae_txintr(struct ae_softc *sc)
1141bc3e1174Sgdamore {
1142bc3e1174Sgdamore struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1143bc3e1174Sgdamore struct ae_txsoft *txs;
1144811add33Smsaitoh uint32_t txstat;
1145bc3e1174Sgdamore
1146bc3e1174Sgdamore DPRINTF(sc, ("%s: ae_txintr: sc_flags 0x%08x\n",
1147cbab9cadSchs device_xname(sc->sc_dev), sc->sc_flags));
1148bc3e1174Sgdamore
1149bc3e1174Sgdamore /*
1150bc3e1174Sgdamore * Go through our Tx list and free mbufs for those
1151bc3e1174Sgdamore * frames that have been transmitted.
1152bc3e1174Sgdamore */
1153bc3e1174Sgdamore while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1154bc3e1174Sgdamore AE_CDTXSYNC(sc, txs->txs_lastdesc,
1155bc3e1174Sgdamore txs->txs_ndescs,
1156bc3e1174Sgdamore BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1157bc3e1174Sgdamore
1158bc3e1174Sgdamore #ifdef AE_DEBUG
1159bc3e1174Sgdamore if (ifp->if_flags & IFF_DEBUG) {
1160bc3e1174Sgdamore int i;
1161bc3e1174Sgdamore printf(" txsoft %p transmit chain:\n", txs);
1162bc3e1174Sgdamore for (i = txs->txs_firstdesc;; i = AE_NEXTTX(i)) {
1163bc3e1174Sgdamore printf(" descriptor %d:\n", i);
1164bc3e1174Sgdamore printf(" ad_status: 0x%08x\n",
1165bc3e1174Sgdamore sc->sc_txdescs[i].ad_status);
1166bc3e1174Sgdamore printf(" ad_ctl: 0x%08x\n",
1167bc3e1174Sgdamore sc->sc_txdescs[i].ad_ctl);
1168bc3e1174Sgdamore printf(" ad_bufaddr1: 0x%08x\n",
1169bc3e1174Sgdamore sc->sc_txdescs[i].ad_bufaddr1);
1170bc3e1174Sgdamore printf(" ad_bufaddr2: 0x%08x\n",
1171bc3e1174Sgdamore sc->sc_txdescs[i].ad_bufaddr2);
1172bc3e1174Sgdamore if (i == txs->txs_lastdesc)
1173bc3e1174Sgdamore break;
1174bc3e1174Sgdamore }
1175bc3e1174Sgdamore }
1176bc3e1174Sgdamore #endif
1177bc3e1174Sgdamore
1178bc3e1174Sgdamore txstat = sc->sc_txdescs[txs->txs_lastdesc].ad_status;
1179bc3e1174Sgdamore if (txstat & ADSTAT_OWN)
1180bc3e1174Sgdamore break;
1181bc3e1174Sgdamore
1182bc3e1174Sgdamore SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1183bc3e1174Sgdamore
1184bc3e1174Sgdamore sc->sc_txfree += txs->txs_ndescs;
1185bc3e1174Sgdamore
1186bc3e1174Sgdamore bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1187bc3e1174Sgdamore 0, txs->txs_dmamap->dm_mapsize,
1188bc3e1174Sgdamore BUS_DMASYNC_POSTWRITE);
1189bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1190bc3e1174Sgdamore m_freem(txs->txs_mbuf);
1191bc3e1174Sgdamore txs->txs_mbuf = NULL;
1192bc3e1174Sgdamore
1193bc3e1174Sgdamore SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1194bc3e1174Sgdamore
1195bc3e1174Sgdamore /*
1196bc3e1174Sgdamore * Check for errors and collisions.
1197bc3e1174Sgdamore */
1198bc3e1174Sgdamore #ifdef AE_STATS
1199bc3e1174Sgdamore if (txstat & ADSTAT_Tx_UF)
1200bc3e1174Sgdamore sc->sc_stats.ts_tx_uf++;
1201bc3e1174Sgdamore if (txstat & ADSTAT_Tx_TO)
1202bc3e1174Sgdamore sc->sc_stats.ts_tx_to++;
1203bc3e1174Sgdamore if (txstat & ADSTAT_Tx_EC)
1204bc3e1174Sgdamore sc->sc_stats.ts_tx_ec++;
1205bc3e1174Sgdamore if (txstat & ADSTAT_Tx_LC)
1206bc3e1174Sgdamore sc->sc_stats.ts_tx_lc++;
1207bc3e1174Sgdamore #endif
1208bc3e1174Sgdamore
1209d4bc9d11Sthorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1210bc3e1174Sgdamore if (txstat & (ADSTAT_Tx_UF | ADSTAT_Tx_TO))
1211be6f2fceSriastradh if_statinc_ref(ifp, nsr, if_oerrors);
1212bc3e1174Sgdamore
1213bc3e1174Sgdamore if (txstat & ADSTAT_Tx_EC)
1214be6f2fceSriastradh if_statadd_ref(ifp, nsr, if_collisions, 16);
1215d4bc9d11Sthorpej else if (ADSTAT_Tx_COLLISIONS(txstat))
1216be6f2fceSriastradh if_statadd_ref(ifp, nsr, if_collisions,
1217d4bc9d11Sthorpej ADSTAT_Tx_COLLISIONS(txstat));
1218bc3e1174Sgdamore if (txstat & ADSTAT_Tx_LC)
1219be6f2fceSriastradh if_statinc_ref(ifp, nsr, if_collisions);
1220bc3e1174Sgdamore
1221be6f2fceSriastradh if_statinc_ref(ifp, nsr, if_opackets);
1222d4bc9d11Sthorpej IF_STAT_PUTREF(ifp);
1223bc3e1174Sgdamore }
1224bc3e1174Sgdamore
1225bc3e1174Sgdamore /*
1226bc3e1174Sgdamore * If there are no more pending transmissions, cancel the watchdog
1227bc3e1174Sgdamore * timer.
1228bc3e1174Sgdamore */
1229bc3e1174Sgdamore if (txs == NULL)
1230bc3e1174Sgdamore ifp->if_timer = 0;
1231bc3e1174Sgdamore }
1232bc3e1174Sgdamore
1233bc3e1174Sgdamore #ifdef AE_STATS
1234bc3e1174Sgdamore void
ae_print_stats(struct ae_softc * sc)1235bc3e1174Sgdamore ae_print_stats(struct ae_softc *sc)
1236bc3e1174Sgdamore {
1237bc3e1174Sgdamore
1238bc3e1174Sgdamore printf("%s: tx_uf %lu, tx_to %lu, tx_ec %lu, tx_lc %lu\n",
1239cbab9cadSchs device_xname(sc->sc_dev),
1240bc3e1174Sgdamore sc->sc_stats.ts_tx_uf, sc->sc_stats.ts_tx_to,
1241bc3e1174Sgdamore sc->sc_stats.ts_tx_ec, sc->sc_stats.ts_tx_lc);
1242bc3e1174Sgdamore }
1243bc3e1174Sgdamore #endif
1244bc3e1174Sgdamore
1245bc3e1174Sgdamore /*
1246bc3e1174Sgdamore * ae_reset:
1247bc3e1174Sgdamore *
1248bc3e1174Sgdamore * Perform a soft reset on the chip.
1249bc3e1174Sgdamore */
1250bc3e1174Sgdamore void
ae_reset(struct ae_softc * sc)1251bc3e1174Sgdamore ae_reset(struct ae_softc *sc)
1252bc3e1174Sgdamore {
1253bc3e1174Sgdamore int i;
1254bc3e1174Sgdamore
1255bc3e1174Sgdamore AE_WRITE(sc, CSR_BUSMODE, BUSMODE_SWR);
1256bc3e1174Sgdamore AE_BARRIER(sc);
1257bc3e1174Sgdamore
1258bc3e1174Sgdamore /*
1259bc3e1174Sgdamore * The chip doesn't take itself out of reset automatically.
1260bc3e1174Sgdamore * We need to do so after 2us.
1261bc3e1174Sgdamore */
1262bc3e1174Sgdamore delay(10);
1263bc3e1174Sgdamore AE_WRITE(sc, CSR_BUSMODE, 0);
1264bc3e1174Sgdamore AE_BARRIER(sc);
1265bc3e1174Sgdamore
1266bc3e1174Sgdamore for (i = 0; i < 1000; i++) {
1267bc3e1174Sgdamore /*
1268bc3e1174Sgdamore * Wait a bit for the reset to complete before peeking
1269bc3e1174Sgdamore * at the chip again.
1270bc3e1174Sgdamore */
1271bc3e1174Sgdamore delay(10);
1272bc3e1174Sgdamore if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR) == 0)
1273bc3e1174Sgdamore break;
1274bc3e1174Sgdamore }
1275bc3e1174Sgdamore
1276bc3e1174Sgdamore if (AE_ISSET(sc, CSR_BUSMODE, BUSMODE_SWR))
1277cbab9cadSchs printf("%s: reset failed to complete\n", device_xname(sc->sc_dev));
1278bc3e1174Sgdamore
1279bc3e1174Sgdamore delay(1000);
1280bc3e1174Sgdamore }
1281bc3e1174Sgdamore
1282bc3e1174Sgdamore /*
1283bc3e1174Sgdamore * ae_init: [ ifnet interface function ]
1284bc3e1174Sgdamore *
1285bc3e1174Sgdamore * Initialize the interface. Must be called at splnet().
1286bc3e1174Sgdamore */
1287bc3e1174Sgdamore static int
ae_init(struct ifnet * ifp)1288bc3e1174Sgdamore ae_init(struct ifnet *ifp)
1289bc3e1174Sgdamore {
1290bc3e1174Sgdamore struct ae_softc *sc = ifp->if_softc;
1291bc3e1174Sgdamore struct ae_txsoft *txs;
1292bc3e1174Sgdamore struct ae_rxsoft *rxs;
12936f764931Sdyoung const uint8_t *enaddr;
1294bc3e1174Sgdamore int i, error = 0;
1295bc3e1174Sgdamore
1296bc3e1174Sgdamore if ((error = ae_enable(sc)) != 0)
1297bc3e1174Sgdamore goto out;
1298bc3e1174Sgdamore
1299bc3e1174Sgdamore /*
1300bc3e1174Sgdamore * Cancel any pending I/O.
1301bc3e1174Sgdamore */
1302bc3e1174Sgdamore ae_stop(ifp, 0);
1303bc3e1174Sgdamore
1304bc3e1174Sgdamore /*
1305bc3e1174Sgdamore * Reset the chip to a known state.
1306bc3e1174Sgdamore */
1307bc3e1174Sgdamore ae_reset(sc);
1308bc3e1174Sgdamore
1309bc3e1174Sgdamore /*
1310bc3e1174Sgdamore * Initialize the BUSMODE register.
1311bc3e1174Sgdamore */
1312bc3e1174Sgdamore AE_WRITE(sc, CSR_BUSMODE,
1313bc3e1174Sgdamore /* XXX: not sure if this is a good thing or not... */
1314bc3e1174Sgdamore //BUSMODE_ALIGN_16B |
1315bc3e1174Sgdamore BUSMODE_BAR | BUSMODE_BLE | BUSMODE_PBL_4LW);
1316bc3e1174Sgdamore AE_BARRIER(sc);
1317bc3e1174Sgdamore
1318bc3e1174Sgdamore /*
1319bc3e1174Sgdamore * Initialize the transmit descriptor ring.
1320bc3e1174Sgdamore */
1321bc3e1174Sgdamore memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1322bc3e1174Sgdamore for (i = 0; i < AE_NTXDESC; i++) {
1323bc3e1174Sgdamore sc->sc_txdescs[i].ad_ctl = 0;
1324bc3e1174Sgdamore sc->sc_txdescs[i].ad_bufaddr2 =
1325bc3e1174Sgdamore AE_CDTXADDR(sc, AE_NEXTTX(i));
1326bc3e1174Sgdamore }
1327bc3e1174Sgdamore sc->sc_txdescs[AE_NTXDESC - 1].ad_ctl |= ADCTL_ER;
1328bc3e1174Sgdamore AE_CDTXSYNC(sc, 0, AE_NTXDESC,
1329bc3e1174Sgdamore BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1330bc3e1174Sgdamore sc->sc_txfree = AE_NTXDESC;
1331bc3e1174Sgdamore sc->sc_txnext = 0;
1332bc3e1174Sgdamore
1333bc3e1174Sgdamore /*
1334bc3e1174Sgdamore * Initialize the transmit job descriptors.
1335bc3e1174Sgdamore */
1336bc3e1174Sgdamore SIMPLEQ_INIT(&sc->sc_txfreeq);
1337bc3e1174Sgdamore SIMPLEQ_INIT(&sc->sc_txdirtyq);
1338bc3e1174Sgdamore for (i = 0; i < AE_TXQUEUELEN; i++) {
1339bc3e1174Sgdamore txs = &sc->sc_txsoft[i];
1340bc3e1174Sgdamore txs->txs_mbuf = NULL;
1341bc3e1174Sgdamore SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1342bc3e1174Sgdamore }
1343bc3e1174Sgdamore
1344bc3e1174Sgdamore /*
1345bc3e1174Sgdamore * Initialize the receive descriptor and receive job
1346bc3e1174Sgdamore * descriptor rings.
1347bc3e1174Sgdamore */
1348bc3e1174Sgdamore for (i = 0; i < AE_NRXDESC; i++) {
1349bc3e1174Sgdamore rxs = &sc->sc_rxsoft[i];
1350bc3e1174Sgdamore if (rxs->rxs_mbuf == NULL) {
1351bc3e1174Sgdamore if ((error = ae_add_rxbuf(sc, i)) != 0) {
1352bc3e1174Sgdamore printf("%s: unable to allocate or map rx "
1353bc3e1174Sgdamore "buffer %d, error = %d\n",
1354cbab9cadSchs device_xname(sc->sc_dev), i, error);
1355bc3e1174Sgdamore /*
1356bc3e1174Sgdamore * XXX Should attempt to run with fewer receive
1357bc3e1174Sgdamore * XXX buffers instead of just failing.
1358bc3e1174Sgdamore */
1359bc3e1174Sgdamore ae_rxdrain(sc);
1360bc3e1174Sgdamore goto out;
1361bc3e1174Sgdamore }
1362bc3e1174Sgdamore } else
1363bc3e1174Sgdamore AE_INIT_RXDESC(sc, i);
1364bc3e1174Sgdamore }
1365bc3e1174Sgdamore sc->sc_rxptr = 0;
1366bc3e1174Sgdamore
1367bc3e1174Sgdamore /*
1368bc3e1174Sgdamore * Initialize the interrupt mask and enable interrupts.
1369bc3e1174Sgdamore */
1370bc3e1174Sgdamore /* normal interrupts */
1371bc3e1174Sgdamore sc->sc_inten = STATUS_TI | STATUS_TU | STATUS_RI | STATUS_NIS;
1372bc3e1174Sgdamore
1373bc3e1174Sgdamore /* abnormal interrupts */
1374bc3e1174Sgdamore sc->sc_inten |= STATUS_TPS | STATUS_TJT | STATUS_UNF |
1375bc3e1174Sgdamore STATUS_RU | STATUS_RPS | STATUS_SE | STATUS_AIS;
1376bc3e1174Sgdamore
1377bc3e1174Sgdamore sc->sc_rxint_mask = STATUS_RI | STATUS_RU;
1378bc3e1174Sgdamore sc->sc_txint_mask = STATUS_TI | STATUS_UNF | STATUS_TJT;
1379bc3e1174Sgdamore
1380bc3e1174Sgdamore sc->sc_rxint_mask &= sc->sc_inten;
1381bc3e1174Sgdamore sc->sc_txint_mask &= sc->sc_inten;
1382bc3e1174Sgdamore
1383bc3e1174Sgdamore AE_WRITE(sc, CSR_INTEN, sc->sc_inten);
1384bc3e1174Sgdamore AE_WRITE(sc, CSR_STATUS, 0xffffffff);
1385bc3e1174Sgdamore
1386bc3e1174Sgdamore /*
1387bc3e1174Sgdamore * Give the transmit and receive rings to the chip.
1388bc3e1174Sgdamore */
1389bc3e1174Sgdamore AE_WRITE(sc, CSR_TXLIST, AE_CDTXADDR(sc, sc->sc_txnext));
1390bc3e1174Sgdamore AE_WRITE(sc, CSR_RXLIST, AE_CDRXADDR(sc, sc->sc_rxptr));
1391bc3e1174Sgdamore AE_BARRIER(sc);
1392bc3e1174Sgdamore
1393bc3e1174Sgdamore /*
1394bc3e1174Sgdamore * Set the station address.
1395bc3e1174Sgdamore */
13966f764931Sdyoung enaddr = CLLADDR(ifp->if_sadl);
1397bc3e1174Sgdamore AE_WRITE(sc, CSR_MACHI, enaddr[5] << 16 | enaddr[4]);
1398bc3e1174Sgdamore AE_WRITE(sc, CSR_MACLO, enaddr[3] << 24 | enaddr[2] << 16 |
1399bc3e1174Sgdamore enaddr[1] << 8 | enaddr[0]);
1400bc3e1174Sgdamore AE_BARRIER(sc);
1401bc3e1174Sgdamore
1402bc3e1174Sgdamore /*
1403bc3e1174Sgdamore * Set the receive filter. This will start the transmit and
1404bc3e1174Sgdamore * receive processes.
1405bc3e1174Sgdamore */
1406bc3e1174Sgdamore ae_filter_setup(sc);
1407bc3e1174Sgdamore
1408bc3e1174Sgdamore /*
1409bc3e1174Sgdamore * Set the current media.
1410bc3e1174Sgdamore */
1411b480b622Sdyoung if ((error = ether_mediachange(ifp)) != 0)
1412b480b622Sdyoung goto out;
1413bc3e1174Sgdamore
1414bc3e1174Sgdamore /*
1415bc3e1174Sgdamore * Start the mac.
1416bc3e1174Sgdamore */
1417bc3e1174Sgdamore AE_SET(sc, CSR_MACCTL, MACCTL_RE | MACCTL_TE);
1418bc3e1174Sgdamore AE_BARRIER(sc);
1419bc3e1174Sgdamore
1420bc3e1174Sgdamore /*
1421bc3e1174Sgdamore * Write out the opmode.
1422bc3e1174Sgdamore */
1423bc3e1174Sgdamore AE_WRITE(sc, CSR_OPMODE, OPMODE_SR | OPMODE_ST |
1424bc3e1174Sgdamore ae_txthresh[sc->sc_txthresh].txth_opmode);
1425bc3e1174Sgdamore /*
1426bc3e1174Sgdamore * Start the receive process.
1427bc3e1174Sgdamore */
1428bc3e1174Sgdamore AE_WRITE(sc, CSR_RXPOLL, RXPOLL_RPD);
1429bc3e1174Sgdamore AE_BARRIER(sc);
1430bc3e1174Sgdamore
1431bc3e1174Sgdamore if (sc->sc_tick != NULL) {
1432bc3e1174Sgdamore /* Start the one second clock. */
1433bc3e1174Sgdamore callout_reset(&sc->sc_tick_callout, hz >> 3, sc->sc_tick, sc);
1434bc3e1174Sgdamore }
1435bc3e1174Sgdamore
1436bc3e1174Sgdamore /*
1437bc3e1174Sgdamore * Note that the interface is now running.
1438bc3e1174Sgdamore */
1439bc3e1174Sgdamore ifp->if_flags |= IFF_RUNNING;
1440bc3e1174Sgdamore sc->sc_if_flags = ifp->if_flags;
1441bc3e1174Sgdamore
1442bc3e1174Sgdamore out:
1443bc3e1174Sgdamore if (error) {
14449fcdc9deSthorpej ifp->if_flags &= ~IFF_RUNNING;
1445bc3e1174Sgdamore ifp->if_timer = 0;
1446cbab9cadSchs printf("%s: interface not running\n", device_xname(sc->sc_dev));
1447bc3e1174Sgdamore }
1448bc3e1174Sgdamore return (error);
1449bc3e1174Sgdamore }
1450bc3e1174Sgdamore
1451bc3e1174Sgdamore /*
1452bc3e1174Sgdamore * ae_enable:
1453bc3e1174Sgdamore *
1454bc3e1174Sgdamore * Enable the chip.
1455bc3e1174Sgdamore */
1456bc3e1174Sgdamore static int
ae_enable(struct ae_softc * sc)1457bc3e1174Sgdamore ae_enable(struct ae_softc *sc)
1458bc3e1174Sgdamore {
1459bc3e1174Sgdamore
1460bc3e1174Sgdamore if (AE_IS_ENABLED(sc) == 0) {
14611f585717Sgdamore sc->sc_ih = arbus_intr_establish(sc->sc_cirq, sc->sc_mirq,
14621f585717Sgdamore ae_intr, sc);
1463bc3e1174Sgdamore if (sc->sc_ih == NULL) {
1464bc3e1174Sgdamore printf("%s: unable to establish interrupt\n",
1465cbab9cadSchs device_xname(sc->sc_dev));
1466bc3e1174Sgdamore return (EIO);
1467bc3e1174Sgdamore }
1468bc3e1174Sgdamore sc->sc_flags |= AE_ENABLED;
1469bc3e1174Sgdamore }
1470bc3e1174Sgdamore return (0);
1471bc3e1174Sgdamore }
1472bc3e1174Sgdamore
1473bc3e1174Sgdamore /*
1474bc3e1174Sgdamore * ae_disable:
1475bc3e1174Sgdamore *
1476bc3e1174Sgdamore * Disable the chip.
1477bc3e1174Sgdamore */
1478bc3e1174Sgdamore static void
ae_disable(struct ae_softc * sc)1479bc3e1174Sgdamore ae_disable(struct ae_softc *sc)
1480bc3e1174Sgdamore {
1481bc3e1174Sgdamore
1482bc3e1174Sgdamore if (AE_IS_ENABLED(sc)) {
1483bc3e1174Sgdamore arbus_intr_disestablish(sc->sc_ih);
1484bc3e1174Sgdamore sc->sc_flags &= ~AE_ENABLED;
1485bc3e1174Sgdamore }
1486bc3e1174Sgdamore }
1487bc3e1174Sgdamore
1488bc3e1174Sgdamore /*
1489bc3e1174Sgdamore * ae_power:
1490bc3e1174Sgdamore *
1491bc3e1174Sgdamore * Power management (suspend/resume) hook.
1492bc3e1174Sgdamore */
1493bc3e1174Sgdamore static void
ae_power(int why,void * arg)1494bc3e1174Sgdamore ae_power(int why, void *arg)
1495bc3e1174Sgdamore {
1496bc3e1174Sgdamore struct ae_softc *sc = arg;
1497bc3e1174Sgdamore struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1498bc3e1174Sgdamore int s;
1499bc3e1174Sgdamore
1500bc3e1174Sgdamore printf("power called: %d, %x\n", why, (uint32_t)arg);
1501bc3e1174Sgdamore s = splnet();
1502bc3e1174Sgdamore switch (why) {
1503bc3e1174Sgdamore case PWR_STANDBY:
1504bc3e1174Sgdamore /* do nothing! */
1505bc3e1174Sgdamore break;
1506bc3e1174Sgdamore case PWR_SUSPEND:
1507bc3e1174Sgdamore ae_stop(ifp, 0);
1508bc3e1174Sgdamore ae_disable(sc);
1509bc3e1174Sgdamore break;
1510bc3e1174Sgdamore case PWR_RESUME:
1511bc3e1174Sgdamore if (ifp->if_flags & IFF_UP) {
1512bc3e1174Sgdamore ae_enable(sc);
1513bc3e1174Sgdamore ae_init(ifp);
1514bc3e1174Sgdamore }
1515bc3e1174Sgdamore break;
1516bc3e1174Sgdamore case PWR_SOFTSUSPEND:
1517bc3e1174Sgdamore case PWR_SOFTSTANDBY:
1518bc3e1174Sgdamore case PWR_SOFTRESUME:
1519bc3e1174Sgdamore break;
1520bc3e1174Sgdamore }
1521bc3e1174Sgdamore splx(s);
1522bc3e1174Sgdamore }
1523bc3e1174Sgdamore
1524bc3e1174Sgdamore /*
1525bc3e1174Sgdamore * ae_rxdrain:
1526bc3e1174Sgdamore *
1527bc3e1174Sgdamore * Drain the receive queue.
1528bc3e1174Sgdamore */
1529bc3e1174Sgdamore static void
ae_rxdrain(struct ae_softc * sc)1530bc3e1174Sgdamore ae_rxdrain(struct ae_softc *sc)
1531bc3e1174Sgdamore {
1532bc3e1174Sgdamore struct ae_rxsoft *rxs;
1533bc3e1174Sgdamore int i;
1534bc3e1174Sgdamore
1535bc3e1174Sgdamore for (i = 0; i < AE_NRXDESC; i++) {
1536bc3e1174Sgdamore rxs = &sc->sc_rxsoft[i];
1537bc3e1174Sgdamore if (rxs->rxs_mbuf != NULL) {
1538bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1539bc3e1174Sgdamore m_freem(rxs->rxs_mbuf);
1540bc3e1174Sgdamore rxs->rxs_mbuf = NULL;
1541bc3e1174Sgdamore }
1542bc3e1174Sgdamore }
1543bc3e1174Sgdamore }
1544bc3e1174Sgdamore
1545bc3e1174Sgdamore /*
1546bc3e1174Sgdamore * ae_stop: [ ifnet interface function ]
1547bc3e1174Sgdamore *
1548bc3e1174Sgdamore * Stop transmission on the interface.
1549bc3e1174Sgdamore */
1550bc3e1174Sgdamore static void
ae_stop(struct ifnet * ifp,int disable)1551bc3e1174Sgdamore ae_stop(struct ifnet *ifp, int disable)
1552bc3e1174Sgdamore {
1553bc3e1174Sgdamore struct ae_softc *sc = ifp->if_softc;
1554bc3e1174Sgdamore struct ae_txsoft *txs;
1555bc3e1174Sgdamore
1556bc3e1174Sgdamore if (sc->sc_tick != NULL) {
1557bc3e1174Sgdamore /* Stop the one second clock. */
1558bc3e1174Sgdamore callout_stop(&sc->sc_tick_callout);
1559bc3e1174Sgdamore }
1560bc3e1174Sgdamore
1561bc3e1174Sgdamore /* Down the MII. */
1562bc3e1174Sgdamore mii_down(&sc->sc_mii);
1563bc3e1174Sgdamore
1564bc3e1174Sgdamore /* Disable interrupts. */
1565bc3e1174Sgdamore AE_WRITE(sc, CSR_INTEN, 0);
1566bc3e1174Sgdamore
1567bc3e1174Sgdamore /* Stop the transmit and receive processes. */
1568bc3e1174Sgdamore AE_WRITE(sc, CSR_OPMODE, 0);
1569bc3e1174Sgdamore AE_WRITE(sc, CSR_RXLIST, 0);
1570bc3e1174Sgdamore AE_WRITE(sc, CSR_TXLIST, 0);
1571bc3e1174Sgdamore AE_CLR(sc, CSR_MACCTL, MACCTL_TE | MACCTL_RE);
1572bc3e1174Sgdamore AE_BARRIER(sc);
1573bc3e1174Sgdamore
1574bc3e1174Sgdamore /*
1575bc3e1174Sgdamore * Release any queued transmit buffers.
1576bc3e1174Sgdamore */
1577bc3e1174Sgdamore while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1578bc3e1174Sgdamore SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1579bc3e1174Sgdamore if (txs->txs_mbuf != NULL) {
1580bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1581bc3e1174Sgdamore m_freem(txs->txs_mbuf);
1582bc3e1174Sgdamore txs->txs_mbuf = NULL;
1583bc3e1174Sgdamore }
1584bc3e1174Sgdamore SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1585bc3e1174Sgdamore }
1586bc3e1174Sgdamore
1587bc3e1174Sgdamore /*
1588bc3e1174Sgdamore * Mark the interface down and cancel the watchdog timer.
1589bc3e1174Sgdamore */
15909fcdc9deSthorpej ifp->if_flags &= ~IFF_RUNNING;
1591bc3e1174Sgdamore sc->sc_if_flags = ifp->if_flags;
1592bc3e1174Sgdamore ifp->if_timer = 0;
1593bc3e1174Sgdamore
1594d347ff77Sdyoung if (disable) {
1595d347ff77Sdyoung ae_rxdrain(sc);
1596d347ff77Sdyoung ae_disable(sc);
1597d347ff77Sdyoung }
1598d347ff77Sdyoung
1599bc3e1174Sgdamore /*
1600bc3e1174Sgdamore * Reset the chip (needed on some flavors to actually disable it).
1601bc3e1174Sgdamore */
1602bc3e1174Sgdamore ae_reset(sc);
1603bc3e1174Sgdamore }
1604bc3e1174Sgdamore
1605bc3e1174Sgdamore /*
1606bc3e1174Sgdamore * ae_add_rxbuf:
1607bc3e1174Sgdamore *
1608bc3e1174Sgdamore * Add a receive buffer to the indicated descriptor.
1609bc3e1174Sgdamore */
1610bc3e1174Sgdamore static int
ae_add_rxbuf(struct ae_softc * sc,int idx)1611bc3e1174Sgdamore ae_add_rxbuf(struct ae_softc *sc, int idx)
1612bc3e1174Sgdamore {
1613bc3e1174Sgdamore struct ae_rxsoft *rxs = &sc->sc_rxsoft[idx];
1614bc3e1174Sgdamore struct mbuf *m;
1615bc3e1174Sgdamore int error;
1616bc3e1174Sgdamore
1617bc3e1174Sgdamore MGETHDR(m, M_DONTWAIT, MT_DATA);
1618bc3e1174Sgdamore if (m == NULL)
1619bc3e1174Sgdamore return (ENOBUFS);
1620bc3e1174Sgdamore
1621bc3e1174Sgdamore MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1622bc3e1174Sgdamore MCLGET(m, M_DONTWAIT);
1623bc3e1174Sgdamore if ((m->m_flags & M_EXT) == 0) {
1624bc3e1174Sgdamore m_freem(m);
1625bc3e1174Sgdamore return (ENOBUFS);
1626bc3e1174Sgdamore }
1627bc3e1174Sgdamore
1628bc3e1174Sgdamore if (rxs->rxs_mbuf != NULL)
1629bc3e1174Sgdamore bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1630bc3e1174Sgdamore
1631bc3e1174Sgdamore rxs->rxs_mbuf = m;
1632bc3e1174Sgdamore
1633bc3e1174Sgdamore error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1634bc3e1174Sgdamore m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1635bc3e1174Sgdamore BUS_DMA_READ | BUS_DMA_NOWAIT);
1636bc3e1174Sgdamore if (error) {
1637bc3e1174Sgdamore printf("%s: can't load rx DMA map %d, error = %d\n",
1638cbab9cadSchs device_xname(sc->sc_dev), idx, error);
1639bc3e1174Sgdamore panic("ae_add_rxbuf"); /* XXX */
1640bc3e1174Sgdamore }
1641bc3e1174Sgdamore
1642bc3e1174Sgdamore bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1643bc3e1174Sgdamore rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1644bc3e1174Sgdamore
1645bc3e1174Sgdamore AE_INIT_RXDESC(sc, idx);
1646bc3e1174Sgdamore
1647bc3e1174Sgdamore return (0);
1648bc3e1174Sgdamore }
1649bc3e1174Sgdamore
1650bc3e1174Sgdamore /*
1651bc3e1174Sgdamore * ae_filter_setup:
1652bc3e1174Sgdamore *
1653bc3e1174Sgdamore * Set the chip's receive filter.
1654bc3e1174Sgdamore */
1655bc3e1174Sgdamore static void
ae_filter_setup(struct ae_softc * sc)1656bc3e1174Sgdamore ae_filter_setup(struct ae_softc *sc)
1657bc3e1174Sgdamore {
1658bc3e1174Sgdamore struct ethercom *ec = &sc->sc_ethercom;
1659bc3e1174Sgdamore struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1660bc3e1174Sgdamore struct ether_multi *enm;
1661bc3e1174Sgdamore struct ether_multistep step;
1662bc3e1174Sgdamore uint32_t hash, mchash[2];
1663bc3e1174Sgdamore uint32_t macctl = 0;
1664bc3e1174Sgdamore
1665bc3e1174Sgdamore /*
1666bc3e1174Sgdamore * If the chip is running, we need to reset the interface,
1667bc3e1174Sgdamore * and will revisit here (with IFF_RUNNING) clear. The
1668bc3e1174Sgdamore * chip seems to really not like to have its multicast
1669bc3e1174Sgdamore * filter programmed without a reset.
1670bc3e1174Sgdamore */
1671bc3e1174Sgdamore if (ifp->if_flags & IFF_RUNNING) {
1672bc3e1174Sgdamore (void) ae_init(ifp);
1673bc3e1174Sgdamore return;
1674bc3e1174Sgdamore }
1675bc3e1174Sgdamore
1676bc3e1174Sgdamore DPRINTF(sc, ("%s: ae_filter_setup: sc_flags 0x%08x\n",
1677cbab9cadSchs device_xname(sc->sc_dev), sc->sc_flags));
1678bc3e1174Sgdamore
1679bc3e1174Sgdamore macctl = AE_READ(sc, CSR_MACCTL);
1680bc3e1174Sgdamore macctl &= ~(MACCTL_PR | MACCTL_PM);
1681bc3e1174Sgdamore macctl |= MACCTL_HASH;
1682bc3e1174Sgdamore macctl |= MACCTL_HBD;
1683bc3e1174Sgdamore macctl |= MACCTL_PR;
1684bc3e1174Sgdamore
1685bc3e1174Sgdamore if (ifp->if_flags & IFF_PROMISC) {
1686bc3e1174Sgdamore macctl |= MACCTL_PR;
1687bc3e1174Sgdamore goto allmulti;
1688bc3e1174Sgdamore }
1689bc3e1174Sgdamore
1690bc3e1174Sgdamore mchash[0] = mchash[1] = 0;
1691bc3e1174Sgdamore
169283759283Smsaitoh ETHER_LOCK(ec);
1693bc3e1174Sgdamore ETHER_FIRST_MULTI(step, ec, enm);
1694bc3e1174Sgdamore while (enm != NULL) {
1695bc3e1174Sgdamore if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1696bc3e1174Sgdamore /*
1697bc3e1174Sgdamore * We must listen to a range of multicast addresses.
1698bc3e1174Sgdamore * For now, just accept all multicasts, rather than
1699bc3e1174Sgdamore * trying to set only those filter bits needed to match
1700bc3e1174Sgdamore * the range. (At this time, the only use of address
1701bc3e1174Sgdamore * ranges is for IP multicast routing, for which the
1702bc3e1174Sgdamore * range is big enough to require all bits set.)
1703bc3e1174Sgdamore */
170483759283Smsaitoh ETHER_UNLOCK(ec);
1705bc3e1174Sgdamore goto allmulti;
1706bc3e1174Sgdamore }
1707bc3e1174Sgdamore
1708bc3e1174Sgdamore /* Verify whether we use big or little endian hashes */
1709bc3e1174Sgdamore hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f;
1710bc3e1174Sgdamore mchash[hash >> 5] |= 1 << (hash & 0x1f);
1711bc3e1174Sgdamore ETHER_NEXT_MULTI(step, enm);
1712bc3e1174Sgdamore }
171383759283Smsaitoh ETHER_UNLOCK(ec);
1714bc3e1174Sgdamore ifp->if_flags &= ~IFF_ALLMULTI;
1715bc3e1174Sgdamore goto setit;
1716bc3e1174Sgdamore
1717bc3e1174Sgdamore allmulti:
1718bc3e1174Sgdamore ifp->if_flags |= IFF_ALLMULTI;
1719bc3e1174Sgdamore mchash[0] = mchash[1] = 0xffffffff;
1720bc3e1174Sgdamore macctl |= MACCTL_PM;
1721bc3e1174Sgdamore
1722bc3e1174Sgdamore setit:
1723bc3e1174Sgdamore AE_WRITE(sc, CSR_HTHI, mchash[0]);
1724bc3e1174Sgdamore AE_WRITE(sc, CSR_HTHI, mchash[1]);
1725bc3e1174Sgdamore
1726bc3e1174Sgdamore AE_WRITE(sc, CSR_MACCTL, macctl);
1727bc3e1174Sgdamore AE_BARRIER(sc);
1728bc3e1174Sgdamore
1729bc3e1174Sgdamore DPRINTF(sc, ("%s: ae_filter_setup: returning %x\n",
1730cbab9cadSchs device_xname(sc->sc_dev), macctl));
1731bc3e1174Sgdamore }
1732bc3e1174Sgdamore
1733bc3e1174Sgdamore /*
1734bc3e1174Sgdamore * ae_idle:
1735bc3e1174Sgdamore *
1736bc3e1174Sgdamore * Cause the transmit and/or receive processes to go idle.
1737bc3e1174Sgdamore */
1738bc3e1174Sgdamore void
ae_idle(struct ae_softc * sc,uint32_t bits)1739811add33Smsaitoh ae_idle(struct ae_softc *sc, uint32_t bits)
1740bc3e1174Sgdamore {
1741bc3e1174Sgdamore static const char * const txstate_names[] = {
1742bc3e1174Sgdamore "STOPPED",
1743bc3e1174Sgdamore "RUNNING - FETCH",
1744bc3e1174Sgdamore "RUNNING - WAIT",
1745bc3e1174Sgdamore "RUNNING - READING",
1746bc3e1174Sgdamore "-- RESERVED --",
1747bc3e1174Sgdamore "RUNNING - SETUP",
1748bc3e1174Sgdamore "SUSPENDED",
1749bc3e1174Sgdamore "RUNNING - CLOSE",
1750bc3e1174Sgdamore };
1751bc3e1174Sgdamore static const char * const rxstate_names[] = {
1752bc3e1174Sgdamore "STOPPED",
1753bc3e1174Sgdamore "RUNNING - FETCH",
1754bc3e1174Sgdamore "RUNNING - CHECK",
1755bc3e1174Sgdamore "RUNNING - WAIT",
1756bc3e1174Sgdamore "SUSPENDED",
1757bc3e1174Sgdamore "RUNNING - CLOSE",
1758bc3e1174Sgdamore "RUNNING - FLUSH",
1759bc3e1174Sgdamore "RUNNING - QUEUE",
1760bc3e1174Sgdamore };
1761bc3e1174Sgdamore
1762811add33Smsaitoh uint32_t csr, ackmask = 0;
1763bc3e1174Sgdamore int i;
1764bc3e1174Sgdamore
1765bc3e1174Sgdamore if (bits & OPMODE_ST)
1766bc3e1174Sgdamore ackmask |= STATUS_TPS;
1767bc3e1174Sgdamore
1768bc3e1174Sgdamore if (bits & OPMODE_SR)
1769bc3e1174Sgdamore ackmask |= STATUS_RPS;
1770bc3e1174Sgdamore
1771bc3e1174Sgdamore AE_CLR(sc, CSR_OPMODE, bits);
1772bc3e1174Sgdamore
1773bc3e1174Sgdamore for (i = 0; i < 1000; i++) {
1774bc3e1174Sgdamore if (AE_ISSET(sc, CSR_STATUS, ackmask) == ackmask)
1775bc3e1174Sgdamore break;
1776bc3e1174Sgdamore delay(10);
1777bc3e1174Sgdamore }
1778bc3e1174Sgdamore
1779bc3e1174Sgdamore csr = AE_READ(sc, CSR_STATUS);
1780bc3e1174Sgdamore if ((csr & ackmask) != ackmask) {
1781bc3e1174Sgdamore if ((bits & OPMODE_ST) != 0 && (csr & STATUS_TPS) == 0 &&
1782bc3e1174Sgdamore (csr & STATUS_TS) != STATUS_TS_STOPPED) {
1783bc3e1174Sgdamore printf("%s: transmit process failed to idle: "
1784cbab9cadSchs "state %s\n", device_xname(sc->sc_dev),
1785bc3e1174Sgdamore txstate_names[(csr & STATUS_TS) >> 20]);
1786bc3e1174Sgdamore }
1787bc3e1174Sgdamore if ((bits & OPMODE_SR) != 0 && (csr & STATUS_RPS) == 0 &&
1788bc3e1174Sgdamore (csr & STATUS_RS) != STATUS_RS_STOPPED) {
1789bc3e1174Sgdamore printf("%s: receive process failed to idle: "
1790cbab9cadSchs "state %s\n", device_xname(sc->sc_dev),
1791bc3e1174Sgdamore rxstate_names[(csr & STATUS_RS) >> 17]);
1792bc3e1174Sgdamore }
1793bc3e1174Sgdamore }
1794bc3e1174Sgdamore }
1795bc3e1174Sgdamore
1796bc3e1174Sgdamore /*****************************************************************************
1797bc3e1174Sgdamore * Support functions for MII-attached media.
1798bc3e1174Sgdamore *****************************************************************************/
1799bc3e1174Sgdamore
1800bc3e1174Sgdamore /*
1801bc3e1174Sgdamore * ae_mii_tick:
1802bc3e1174Sgdamore *
1803bc3e1174Sgdamore * One second timer, used to tick the MII.
1804bc3e1174Sgdamore */
1805bc3e1174Sgdamore static void
ae_mii_tick(void * arg)1806bc3e1174Sgdamore ae_mii_tick(void *arg)
1807bc3e1174Sgdamore {
1808bc3e1174Sgdamore struct ae_softc *sc = arg;
1809bc3e1174Sgdamore int s;
1810bc3e1174Sgdamore
1811cbab9cadSchs if (!device_is_active(sc->sc_dev))
1812bc3e1174Sgdamore return;
1813bc3e1174Sgdamore
1814bc3e1174Sgdamore s = splnet();
1815bc3e1174Sgdamore mii_tick(&sc->sc_mii);
1816bc3e1174Sgdamore splx(s);
1817bc3e1174Sgdamore
1818bc3e1174Sgdamore callout_reset(&sc->sc_tick_callout, hz, sc->sc_tick, sc);
1819bc3e1174Sgdamore }
1820bc3e1174Sgdamore
1821bc3e1174Sgdamore /*
1822bc3e1174Sgdamore * ae_mii_statchg: [mii interface function]
1823bc3e1174Sgdamore *
1824bc3e1174Sgdamore * Callback from PHY when media changes.
1825bc3e1174Sgdamore */
1826bc3e1174Sgdamore static void
ae_mii_statchg(struct ifnet * ifp)18270bc32000Smatt ae_mii_statchg(struct ifnet *ifp)
1828bc3e1174Sgdamore {
18290bc32000Smatt struct ae_softc *sc = ifp->if_softc;
1830bc3e1174Sgdamore uint32_t macctl, flowc;
1831bc3e1174Sgdamore
1832bc3e1174Sgdamore //opmode = AE_READ(sc, CSR_OPMODE);
1833bc3e1174Sgdamore macctl = AE_READ(sc, CSR_MACCTL);
1834bc3e1174Sgdamore
1835bc3e1174Sgdamore /* XXX: do we need to do this? */
1836bc3e1174Sgdamore /* Idle the transmit and receive processes. */
1837bc3e1174Sgdamore //ae_idle(sc, OPMODE_ST | OPMODE_SR);
1838bc3e1174Sgdamore
1839bc3e1174Sgdamore if (sc->sc_mii.mii_media_active & IFM_FDX) {
1840bc3e1174Sgdamore flowc = FLOWC_FCE;
1841bc3e1174Sgdamore macctl &= ~MACCTL_DRO;
1842bc3e1174Sgdamore macctl |= MACCTL_FDX;
1843bc3e1174Sgdamore } else {
1844bc3e1174Sgdamore flowc = 0; /* cannot do flow control in HDX */
1845bc3e1174Sgdamore macctl |= MACCTL_DRO;
1846bc3e1174Sgdamore macctl &= ~MACCTL_FDX;
1847bc3e1174Sgdamore }
1848bc3e1174Sgdamore
1849bc3e1174Sgdamore AE_WRITE(sc, CSR_FLOWC, flowc);
1850bc3e1174Sgdamore AE_WRITE(sc, CSR_MACCTL, macctl);
1851bc3e1174Sgdamore
1852bc3e1174Sgdamore /* restore operational mode */
1853bc3e1174Sgdamore //AE_WRITE(sc, CSR_OPMODE, opmode);
1854bc3e1174Sgdamore AE_BARRIER(sc);
1855bc3e1174Sgdamore }
1856bc3e1174Sgdamore
1857bc3e1174Sgdamore /*
1858bc3e1174Sgdamore * ae_mii_readreg:
1859bc3e1174Sgdamore *
1860bc3e1174Sgdamore * Read a PHY register.
1861bc3e1174Sgdamore */
1862bc3e1174Sgdamore static int
ae_mii_readreg(device_t self,int phy,int reg,uint16_t * val)1863a5cdd4b4Smsaitoh ae_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1864bc3e1174Sgdamore {
186525cf32eeSdyoung struct ae_softc *sc = device_private(self);
1866bc3e1174Sgdamore uint32_t addr;
1867bc3e1174Sgdamore int i;
1868bc3e1174Sgdamore
1869bc3e1174Sgdamore addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT);
1870bc3e1174Sgdamore AE_WRITE(sc, CSR_MIIADDR, addr);
1871bc3e1174Sgdamore AE_BARRIER(sc);
1872bc3e1174Sgdamore for (i = 0; i < 100000000; i++) {
1873bc3e1174Sgdamore if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
1874bc3e1174Sgdamore break;
1875bc3e1174Sgdamore }
1876bc3e1174Sgdamore
1877a5cdd4b4Smsaitoh if (i >= 100000000)
1878a5cdd4b4Smsaitoh return ETIMEDOUT;
1879a5cdd4b4Smsaitoh
1880a5cdd4b4Smsaitoh *val = AE_READ(sc, CSR_MIIDATA) & 0xffff;
1881a5cdd4b4Smsaitoh return 0;
1882bc3e1174Sgdamore }
1883bc3e1174Sgdamore
1884bc3e1174Sgdamore /*
1885bc3e1174Sgdamore * ae_mii_writereg:
1886bc3e1174Sgdamore *
1887bc3e1174Sgdamore * Write a PHY register.
1888bc3e1174Sgdamore */
1889a5cdd4b4Smsaitoh static int
ae_mii_writereg(device_t self,int phy,int reg,uint16_t val)1890a5cdd4b4Smsaitoh ae_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1891bc3e1174Sgdamore {
189225cf32eeSdyoung struct ae_softc *sc = device_private(self);
1893bc3e1174Sgdamore uint32_t addr;
1894bc3e1174Sgdamore int i;
1895bc3e1174Sgdamore
1896bc3e1174Sgdamore /* write the data register */
1897bc3e1174Sgdamore AE_WRITE(sc, CSR_MIIDATA, val);
1898bc3e1174Sgdamore
1899bc3e1174Sgdamore /* write the address to latch it in */
1900bc3e1174Sgdamore addr = (phy << MIIADDR_PHY_SHIFT) | (reg << MIIADDR_REG_SHIFT) |
1901bc3e1174Sgdamore MIIADDR_WRITE;
1902bc3e1174Sgdamore AE_WRITE(sc, CSR_MIIADDR, addr);
1903bc3e1174Sgdamore AE_BARRIER(sc);
1904bc3e1174Sgdamore
1905bc3e1174Sgdamore for (i = 0; i < 100000000; i++) {
1906bc3e1174Sgdamore if ((AE_READ(sc, CSR_MIIADDR) & MIIADDR_BUSY) == 0)
1907bc3e1174Sgdamore break;
1908bc3e1174Sgdamore }
1909a5cdd4b4Smsaitoh
1910a5cdd4b4Smsaitoh if (i >= 100000000)
1911a5cdd4b4Smsaitoh return ETIMEDOUT;
1912a5cdd4b4Smsaitoh
1913a5cdd4b4Smsaitoh return 0;
1914bc3e1174Sgdamore }
1915