xref: /netbsd-src/sys/dev/usb/if_axe.c (revision e89934bbf778a6d6d6894877c4da59d0c7835b0f)
1 /*	$NetBSD: if_axe.c,v 1.80 2017/01/12 18:26:08 maya Exp $	*/
2 /*	$OpenBSD: if_axe.c,v 1.137 2016/04/13 11:03:37 mpi Exp $ */
3 
4 /*
5  * Copyright (c) 2005, 2006, 2007 Jonathan Gray <jsg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Copyright (c) 1997, 1998, 1999, 2000-2003
22  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 3. All advertising materials mentioning features or use of this software
33  *    must display the following acknowledgement:
34  *	This product includes software developed by Bill Paul.
35  * 4. Neither the name of the author nor the names of any co-contributors
36  *    may be used to endorse or promote products derived from this software
37  *    without specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
40  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
43  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
49  * THE POSSIBILITY OF SUCH DAMAGE.
50  */
51 
52 /*
53  * ASIX Electronics AX88172/AX88178/AX88778 USB 2.0 ethernet driver.
54  * Used in the LinkSys USB200M and various other adapters.
55  *
56  * Written by Bill Paul <wpaul@windriver.com>
57  * Senior Engineer
58  * Wind River Systems
59  */
60 
61 /*
62  * The AX88172 provides USB ethernet supports at 10 and 100Mbps.
63  * It uses an external PHY (reference designs use a RealTek chip),
64  * and has a 64-bit multicast hash filter. There is some information
65  * missing from the manual which one needs to know in order to make
66  * the chip function:
67  *
68  * - You must set bit 7 in the RX control register, otherwise the
69  *   chip won't receive any packets.
70  * - You must initialize all 3 IPG registers, or you won't be able
71  *   to send any packets.
72  *
73  * Note that this device appears to only support loading the station
74  * address via autoload from the EEPROM (i.e. there's no way to manually
75  * set it).
76  *
77  * (Adam Weinberger wanted me to name this driver if_gir.c.)
78  */
79 
80 /*
81  * Ax88178 and Ax88772 support backported from the OpenBSD driver.
82  * 2007/02/12, J.R. Oldroyd, fbsd@opal.com
83  *
84  * Manual here:
85  * http://www.asix.com.tw/FrootAttach/datasheet/AX88178_datasheet_Rev10.pdf
86  * http://www.asix.com.tw/FrootAttach/datasheet/AX88772_datasheet_Rev10.pdf
87  */
88 
89 #include <sys/cdefs.h>
90 __KERNEL_RCSID(0, "$NetBSD: if_axe.c,v 1.80 2017/01/12 18:26:08 maya Exp $");
91 
92 #ifdef _KERNEL_OPT
93 #include "opt_inet.h"
94 #include "opt_usb.h"
95 #endif
96 
97 #include <sys/param.h>
98 #include <sys/bus.h>
99 #include <sys/device.h>
100 #include <sys/kernel.h>
101 #include <sys/mbuf.h>
102 #include <sys/module.h>
103 #include <sys/mutex.h>
104 #include <sys/socket.h>
105 #include <sys/sockio.h>
106 #include <sys/systm.h>
107 
108 #include <sys/rndsource.h>
109 
110 #include <net/if.h>
111 #include <net/if_dl.h>
112 #include <net/if_ether.h>
113 #include <net/if_media.h>
114 
115 #include <net/bpf.h>
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 
120 #include <dev/usb/usb.h>
121 #include <dev/usb/usbhist.h>
122 #include <dev/usb/usbdi.h>
123 #include <dev/usb/usbdi_util.h>
124 #include <dev/usb/usbdivar.h>
125 #include <dev/usb/usbdevs.h>
126 
127 #include <dev/usb/if_axereg.h>
128 
129 /*
130  * AXE_178_MAX_FRAME_BURST
131  * max frame burst size for Ax88178 and Ax88772
132  *	0	2048 bytes
133  *	1	4096 bytes
134  *	2	8192 bytes
135  *	3	16384 bytes
136  * use the largest your system can handle without USB stalling.
137  *
138  * NB: 88772 parts appear to generate lots of input errors with
139  * a 2K rx buffer and 8K is only slightly faster than 4K on an
140  * EHCI port on a T42 so change at your own risk.
141  */
142 #define AXE_178_MAX_FRAME_BURST	1
143 
144 
145 #ifdef USB_DEBUG
146 #ifndef AXE_DEBUG
147 #define axedebug 0
148 #else
149 static int axedebug = 20;
150 
151 SYSCTL_SETUP(sysctl_hw_axe_setup, "sysctl hw.axe setup")
152 {
153 	int err;
154 	const struct sysctlnode *rnode;
155 	const struct sysctlnode *cnode;
156 
157 	err = sysctl_createv(clog, 0, NULL, &rnode,
158 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "axe",
159 	    SYSCTL_DESCR("axe global controls"),
160 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
161 
162 	if (err)
163 		goto fail;
164 
165 	/* control debugging printfs */
166 	err = sysctl_createv(clog, 0, &rnode, &cnode,
167 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
168 	    "debug", SYSCTL_DESCR("Enable debugging output"),
169 	    NULL, 0, &axedebug, sizeof(axedebug), CTL_CREATE, CTL_EOL);
170 	if (err)
171 		goto fail;
172 
173 	return;
174 fail:
175 	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
176 }
177 
178 #endif /* AXE_DEBUG */
179 #endif /* USB_DEBUG */
180 
181 #define DPRINTF(FMT,A,B,C,D)	USBHIST_LOGN(axedebug,1,FMT,A,B,C,D)
182 #define DPRINTFN(N,FMT,A,B,C,D)	USBHIST_LOGN(axedebug,N,FMT,A,B,C,D)
183 #define AXEHIST_FUNC()		USBHIST_FUNC()
184 #define AXEHIST_CALLED(name)	USBHIST_CALLED(axedebug)
185 
186 /*
187  * Various supported device vendors/products.
188  */
189 static const struct axe_type axe_devs[] = {
190 	{ { USB_VENDOR_ABOCOM,		USB_PRODUCT_ABOCOM_UFE2000}, 0 },
191 	{ { USB_VENDOR_ACERCM,		USB_PRODUCT_ACERCM_EP1427X2}, 0 },
192 	{ { USB_VENDOR_APPLE,		USB_PRODUCT_APPLE_ETHERNET }, AX772 },
193 	{ { USB_VENDOR_ASIX,		USB_PRODUCT_ASIX_AX88172}, 0 },
194 	{ { USB_VENDOR_ASIX,		USB_PRODUCT_ASIX_AX88772}, AX772 },
195 	{ { USB_VENDOR_ASIX,		USB_PRODUCT_ASIX_AX88772A}, AX772 },
196 	{ { USB_VENDOR_ASIX,		USB_PRODUCT_ASIX_AX88772B}, AX772B },
197 	{ { USB_VENDOR_ASIX,		USB_PRODUCT_ASIX_AX88772B_1}, AX772B },
198 	{ { USB_VENDOR_ASIX,		USB_PRODUCT_ASIX_AX88178}, AX178 },
199 	{ { USB_VENDOR_ATEN,		USB_PRODUCT_ATEN_UC210T}, 0 },
200 	{ { USB_VENDOR_BELKIN,		USB_PRODUCT_BELKIN_F5D5055 }, AX178 },
201 	{ { USB_VENDOR_BILLIONTON,	USB_PRODUCT_BILLIONTON_USB2AR}, 0},
202 	{ { USB_VENDOR_CISCOLINKSYS,	USB_PRODUCT_CISCOLINKSYS_USB200MV2}, AX772A },
203 	{ { USB_VENDOR_COREGA,		USB_PRODUCT_COREGA_FETHER_USB2_TX }, 0},
204 	{ { USB_VENDOR_DLINK,		USB_PRODUCT_DLINK_DUBE100}, 0 },
205 	{ { USB_VENDOR_DLINK,		USB_PRODUCT_DLINK_DUBE100B1 }, AX772 },
206 	{ { USB_VENDOR_DLINK2,		USB_PRODUCT_DLINK2_DUBE100B1 }, AX772 },
207 	{ { USB_VENDOR_DLINK,		USB_PRODUCT_DLINK_DUBE100C1 }, AX772B },
208 	{ { USB_VENDOR_GOODWAY,		USB_PRODUCT_GOODWAY_GWUSB2E}, 0 },
209 	{ { USB_VENDOR_IODATA,		USB_PRODUCT_IODATA_ETGUS2 }, AX178 },
210 	{ { USB_VENDOR_JVC,		USB_PRODUCT_JVC_MP_PRX1}, 0 },
211 	{ { USB_VENDOR_LENOVO,		USB_PRODUCT_LENOVO_ETHERNET }, AX772B },
212 	{ { USB_VENDOR_LINKSYS, 	USB_PRODUCT_LINKSYS_HG20F9}, AX772B },
213 	{ { USB_VENDOR_LINKSYS2,	USB_PRODUCT_LINKSYS2_USB200M}, 0 },
214 	{ { USB_VENDOR_LINKSYS4,	USB_PRODUCT_LINKSYS4_USB1000 }, AX178 },
215 	{ { USB_VENDOR_LOGITEC,		USB_PRODUCT_LOGITEC_LAN_GTJU2}, AX178 },
216 	{ { USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_LUAU2GT}, AX178 },
217 	{ { USB_VENDOR_MELCO,		USB_PRODUCT_MELCO_LUAU2KTX}, 0 },
218 	{ { USB_VENDOR_MSI,		USB_PRODUCT_MSI_AX88772A}, AX772 },
219 	{ { USB_VENDOR_NETGEAR,		USB_PRODUCT_NETGEAR_FA120}, 0 },
220 	{ { USB_VENDOR_OQO,		USB_PRODUCT_OQO_ETHER01PLUS }, AX772 },
221 	{ { USB_VENDOR_PLANEX3,		USB_PRODUCT_PLANEX3_GU1000T }, AX178 },
222 	{ { USB_VENDOR_SITECOM,		USB_PRODUCT_SITECOM_LN029}, 0 },
223 	{ { USB_VENDOR_SITECOMEU,	USB_PRODUCT_SITECOMEU_LN028 }, AX178 },
224 	{ { USB_VENDOR_SITECOMEU,	USB_PRODUCT_SITECOMEU_LN031 }, AX178 },
225 	{ { USB_VENDOR_SYSTEMTALKS,	USB_PRODUCT_SYSTEMTALKS_SGCX2UL}, 0 },
226 };
227 #define axe_lookup(v, p) ((const struct axe_type *)usb_lookup(axe_devs, v, p))
228 
229 static const struct ax88772b_mfb ax88772b_mfb_table[] = {
230 	{ 0x8000, 0x8001, 2048 },
231 	{ 0x8100, 0x8147, 4096 },
232 	{ 0x8200, 0x81EB, 6144 },
233 	{ 0x8300, 0x83D7, 8192 },
234 	{ 0x8400, 0x851E, 16384 },
235 	{ 0x8500, 0x8666, 20480 },
236 	{ 0x8600, 0x87AE, 24576 },
237 	{ 0x8700, 0x8A3D, 32768 }
238 };
239 
240 int	axe_match(device_t, cfdata_t, void *);
241 void	axe_attach(device_t, device_t, void *);
242 int	axe_detach(device_t, int);
243 int	axe_activate(device_t, devact_t);
244 
245 CFATTACH_DECL_NEW(axe, sizeof(struct axe_softc),
246 	axe_match, axe_attach, axe_detach, axe_activate);
247 
248 static int	axe_tx_list_init(struct axe_softc *);
249 static int	axe_rx_list_init(struct axe_softc *);
250 static int	axe_encap(struct axe_softc *, struct mbuf *, int);
251 static void	axe_rxeof(struct usbd_xfer *, void *, usbd_status);
252 static void	axe_txeof(struct usbd_xfer *, void *, usbd_status);
253 static void	axe_tick(void *);
254 static void	axe_tick_task(void *);
255 static void	axe_start(struct ifnet *);
256 static int	axe_ioctl(struct ifnet *, u_long, void *);
257 static int	axe_init(struct ifnet *);
258 static void	axe_stop(struct ifnet *, int);
259 static void	axe_watchdog(struct ifnet *);
260 static int	axe_miibus_readreg_locked(device_t, int, int);
261 static int	axe_miibus_readreg(device_t, int, int);
262 static void	axe_miibus_writereg_locked(device_t, int, int, int);
263 static void	axe_miibus_writereg(device_t, int, int, int);
264 static void	axe_miibus_statchg(struct ifnet *);
265 static int	axe_cmd(struct axe_softc *, int, int, int, void *);
266 static void	axe_reset(struct axe_softc *);
267 
268 static void	axe_setmulti(struct axe_softc *);
269 static void	axe_lock_mii(struct axe_softc *);
270 static void	axe_unlock_mii(struct axe_softc *);
271 
272 static void	axe_ax88178_init(struct axe_softc *);
273 static void	axe_ax88772_init(struct axe_softc *);
274 
275 /* Get exclusive access to the MII registers */
276 static void
277 axe_lock_mii(struct axe_softc *sc)
278 {
279 
280 	sc->axe_refcnt++;
281 	mutex_enter(&sc->axe_mii_lock);
282 }
283 
284 static void
285 axe_unlock_mii(struct axe_softc *sc)
286 {
287 
288 	mutex_exit(&sc->axe_mii_lock);
289 	if (--sc->axe_refcnt < 0)
290 		usb_detach_wakeupold((sc->axe_dev));
291 }
292 
293 static int
294 axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf)
295 {
296 	AXEHIST_FUNC(); AXEHIST_CALLED();
297 	usb_device_request_t req;
298 	usbd_status err;
299 
300 	KASSERT(mutex_owned(&sc->axe_mii_lock));
301 
302 	if (sc->axe_dying)
303 		return 0;
304 
305 	DPRINTFN(20, "cmd %#x index %#x val %#x", cmd, index, val, 0);
306 
307 	if (AXE_CMD_DIR(cmd))
308 		req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
309 	else
310 		req.bmRequestType = UT_READ_VENDOR_DEVICE;
311 	req.bRequest = AXE_CMD_CMD(cmd);
312 	USETW(req.wValue, val);
313 	USETW(req.wIndex, index);
314 	USETW(req.wLength, AXE_CMD_LEN(cmd));
315 
316 	err = usbd_do_request(sc->axe_udev, &req, buf);
317 
318 	if (err) {
319 		DPRINTF("cmd %d err %d", cmd, err, 0, 0);
320 		return -1;
321 	}
322 	return 0;
323 }
324 
325 static int
326 axe_miibus_readreg_locked(device_t dev, int phy, int reg)
327 {
328 	AXEHIST_FUNC(); AXEHIST_CALLED();
329 	struct axe_softc *sc = device_private(dev);
330 	usbd_status err;
331 	uint16_t val;
332 
333 	DPRINTFN(30, "phy 0x%x reg 0x%x\n", phy, reg, 0, 0);
334 
335 	axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
336 
337 	err = axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, (void *)&val);
338 	axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
339 	if (err) {
340 		aprint_error_dev(sc->axe_dev, "read PHY failed\n");
341 		return -1;
342 	}
343 
344 	val = le16toh(val);
345 	if (AXE_IS_772(sc) && reg == MII_BMSR) {
346 		/*
347 		 * BMSR of AX88772 indicates that it supports extended
348 		 * capability but the extended status register is
349 		 * reserved for embedded ethernet PHY. So clear the
350 		 * extended capability bit of BMSR.
351 		 */
352 		 val &= ~BMSR_EXTCAP;
353 	}
354 
355 	DPRINTFN(30, "phy 0x%x reg 0x%x val %#x", phy, reg, val, 0);
356 
357 	return val;
358 }
359 
360 static int
361 axe_miibus_readreg(device_t dev, int phy, int reg)
362 {
363 	struct axe_softc *sc = device_private(dev);
364 	int val;
365 
366 	if (sc->axe_dying)
367 		return 0;
368 
369 	if (sc->axe_phyno != phy)
370 		return 0;
371 
372 	axe_lock_mii(sc);
373 	val = axe_miibus_readreg_locked(dev, phy, reg);
374 	axe_unlock_mii(sc);
375 
376 	return val;
377 }
378 
379 static void
380 axe_miibus_writereg_locked(device_t dev, int phy, int reg, int aval)
381 {
382 	struct axe_softc *sc = device_private(dev);
383 	usbd_status err;
384 	uint16_t val;
385 
386 	val = htole16(aval);
387 
388 	axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
389 	err = axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, (void *)&val);
390 	axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
391 
392 	if (err) {
393 		aprint_error_dev(sc->axe_dev, "write PHY failed\n");
394 		return;
395 	}
396 }
397 
398 static void
399 axe_miibus_writereg(device_t dev, int phy, int reg, int aval)
400 {
401 	struct axe_softc *sc = device_private(dev);
402 
403 	if (sc->axe_dying)
404 		return;
405 
406 	if (sc->axe_phyno != phy)
407 		return;
408 
409 	axe_lock_mii(sc);
410 	axe_miibus_writereg_locked(dev, phy, reg, aval);
411 	axe_unlock_mii(sc);
412 }
413 
414 static void
415 axe_miibus_statchg(struct ifnet *ifp)
416 {
417 	AXEHIST_FUNC(); AXEHIST_CALLED();
418 
419 	struct axe_softc *sc = ifp->if_softc;
420 	struct mii_data *mii = &sc->axe_mii;
421 	int val, err;
422 
423 	val = 0;
424 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
425 		val |= AXE_MEDIA_FULL_DUPLEX;
426 		if (AXE_IS_178_FAMILY(sc)) {
427 			if ((IFM_OPTIONS(mii->mii_media_active) &
428 			    IFM_ETH_TXPAUSE) != 0)
429 				val |= AXE_178_MEDIA_TXFLOW_CONTROL_EN;
430 			if ((IFM_OPTIONS(mii->mii_media_active) &
431 			    IFM_ETH_RXPAUSE) != 0)
432 				val |= AXE_178_MEDIA_RXFLOW_CONTROL_EN;
433 		}
434 	}
435 	if (AXE_IS_178_FAMILY(sc)) {
436 		val |= AXE_178_MEDIA_RX_EN | AXE_178_MEDIA_MAGIC;
437 		if (sc->axe_flags & AX178)
438 			val |= AXE_178_MEDIA_ENCK;
439 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
440 		case IFM_1000_T:
441 			val |= AXE_178_MEDIA_GMII | AXE_178_MEDIA_ENCK;
442 			break;
443 		case IFM_100_TX:
444 			val |= AXE_178_MEDIA_100TX;
445 			break;
446 		case IFM_10_T:
447 			/* doesn't need to be handled */
448 			break;
449 		}
450 	}
451 
452 	DPRINTF("val=0x%x", val, 0, 0, 0);
453 	axe_lock_mii(sc);
454 	err = axe_cmd(sc, AXE_CMD_WRITE_MEDIA, 0, val, NULL);
455 	axe_unlock_mii(sc);
456 	if (err) {
457 		aprint_error_dev(sc->axe_dev, "media change failed\n");
458 		return;
459 	}
460 }
461 
462 static void
463 axe_setmulti(struct axe_softc *sc)
464 {
465 	AXEHIST_FUNC(); AXEHIST_CALLED();
466 	struct ifnet *ifp = &sc->sc_if;
467 	struct ether_multi *enm;
468 	struct ether_multistep step;
469 	uint32_t h = 0;
470 	uint16_t rxmode;
471 	uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
472 
473 	if (sc->axe_dying)
474 		return;
475 
476 	axe_lock_mii(sc);
477 	axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, (void *)&rxmode);
478 	rxmode = le16toh(rxmode);
479 
480 	rxmode &=
481 	    ~(AXE_RXCMD_ALLMULTI | AXE_RXCMD_PROMISC |
482 	    AXE_RXCMD_BROADCAST | AXE_RXCMD_MULTICAST);
483 
484 	rxmode |=
485 	    (ifp->if_flags & IFF_BROADCAST) ? AXE_RXCMD_BROADCAST : 0;
486 
487 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
488 		if (ifp->if_flags & IFF_PROMISC)
489 			rxmode |= AXE_RXCMD_PROMISC;
490 		goto allmulti;
491 	}
492 
493 	/* Now program new ones */
494 	ETHER_FIRST_MULTI(step, &sc->axe_ec, enm);
495 	while (enm != NULL) {
496 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
497 		    ETHER_ADDR_LEN) != 0)
498 			goto allmulti;
499 
500 		h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
501 		hashtbl[h >> 3] |= 1U << (h & 7);
502 		ETHER_NEXT_MULTI(step, enm);
503 	}
504 	ifp->if_flags &= ~IFF_ALLMULTI;
505 	rxmode |= AXE_RXCMD_MULTICAST;
506 
507 	axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, (void *)&hashtbl);
508 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
509 	axe_unlock_mii(sc);
510 	return;
511 
512  allmulti:
513 	ifp->if_flags |= IFF_ALLMULTI;
514 	rxmode |= AXE_RXCMD_ALLMULTI;
515 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
516 	axe_unlock_mii(sc);
517 }
518 
519 
520 static void
521 axe_reset(struct axe_softc *sc)
522 {
523 
524 	if (sc->axe_dying)
525 		return;
526 
527 	/*
528 	 * softnet_lock can be taken when NET_MPAFE is not defined when calling
529 	 * if_addr_init -> if_init.  This doesn't mixe well with the
530 	 * usbd_delay_ms calls in the init routines as things like nd6_slowtimo
531 	 * can fire during the wait and attempt to take softnet_lock and then
532 	 * block the softclk thread meaing the wait never ends.
533 	 */
534 #ifndef NET_MPSAFE
535 	/* XXX What to reset? */
536 
537 	/* Wait a little while for the chip to get its brains in order. */
538 	DELAY(1000);
539 #else
540 	axe_lock_mii(sc);
541 
542 	if (sc->axe_flags & AX178) {
543 		axe_ax88178_init(sc);
544 	} else if (sc->axe_flags & AX772) {
545 		axe_ax88772_init(sc);
546 	} else if (sc->axe_flags & AX772A) {
547 		axe_ax88772a_init(sc);
548 	} else if (sc->axe_flags & AX772B) {
549 		axe_ax88772b_init(sc);
550 	}
551 	axe_unlock_mii(sc);
552 #endif
553 }
554 
555 static int
556 axe_get_phyno(struct axe_softc *sc, int sel)
557 {
558 	int phyno;
559 
560 	switch (AXE_PHY_TYPE(sc->axe_phyaddrs[sel])) {
561 	case PHY_TYPE_100_HOME:
562 		/* FALLTHROUGH */
563 	case PHY_TYPE_GIG:
564 		phyno = AXE_PHY_NO(sc->axe_phyaddrs[sel]);
565 		break;
566 	case PHY_TYPE_SPECIAL:
567 		/* FALLTHROUGH */
568 	case PHY_TYPE_RSVD:
569 		/* FALLTHROUGH */
570 	case PHY_TYPE_NON_SUP:
571 		/* FALLTHROUGH */
572 	default:
573 		phyno = -1;
574 		break;
575 	}
576 
577 	return phyno;
578 }
579 
580 #define	AXE_GPIO_WRITE(x, y)	do {				\
581 	axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, (x), NULL);		\
582 	usbd_delay_ms(sc->axe_udev, hztoms(y));			\
583 } while (0)
584 
585 static void
586 axe_ax88178_init(struct axe_softc *sc)
587 {
588 	AXEHIST_FUNC(); AXEHIST_CALLED();
589 	int gpio0, ledmode, phymode;
590 	uint16_t eeprom, val;
591 
592 	axe_cmd(sc, AXE_CMD_SROM_WR_ENABLE, 0, 0, NULL);
593 	/* XXX magic */
594 	axe_cmd(sc, AXE_CMD_SROM_READ, 0, 0x0017, &eeprom);
595 	axe_cmd(sc, AXE_CMD_SROM_WR_DISABLE, 0, 0, NULL);
596 
597 	eeprom = le16toh(eeprom);
598 
599 	DPRINTF("EEPROM is 0x%x", eeprom, 0, 0, 0);
600 
601 	/* if EEPROM is invalid we have to use to GPIO0 */
602 	if (eeprom == 0xffff) {
603 		phymode = AXE_PHY_MODE_MARVELL;
604 		gpio0 = 1;
605 		ledmode = 0;
606 	} else {
607 		phymode = eeprom & 0x7f;
608 		gpio0 = (eeprom & 0x80) ? 0 : 1;
609 		ledmode = eeprom >> 8;
610 	}
611 
612 	DPRINTF("use gpio0: %d, phymode %d", gpio0, phymode, 0, 0);
613 
614 	/* Program GPIOs depending on PHY hardware. */
615 	switch (phymode) {
616 	case AXE_PHY_MODE_MARVELL:
617 		if (gpio0 == 1) {
618 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0_EN,
619 			    hz / 32);
620 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
621 			    hz / 32);
622 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2_EN, hz / 4);
623 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
624 			    hz / 32);
625 		} else {
626 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
627 			    AXE_GPIO1_EN, hz / 3);
628 			if (ledmode == 1) {
629 				AXE_GPIO_WRITE(AXE_GPIO1_EN, hz / 3);
630 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN,
631 				    hz / 3);
632 			} else {
633 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
634 				    AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
635 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
636 				    AXE_GPIO2_EN, hz / 4);
637 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
638 				    AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
639 			}
640 		}
641 		break;
642 	case AXE_PHY_MODE_CICADA:
643 	case AXE_PHY_MODE_CICADA_V2:
644 	case AXE_PHY_MODE_CICADA_V2_ASIX:
645 		if (gpio0 == 1)
646 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0 |
647 			    AXE_GPIO0_EN, hz / 32);
648 		else
649 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
650 			    AXE_GPIO1_EN, hz / 32);
651 		break;
652 	case AXE_PHY_MODE_AGERE:
653 		AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
654 		    AXE_GPIO1_EN, hz / 32);
655 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
656 		    AXE_GPIO2_EN, hz / 32);
657 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2_EN, hz / 4);
658 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
659 		    AXE_GPIO2_EN, hz / 32);
660 		break;
661 	case AXE_PHY_MODE_REALTEK_8211CL:
662 	case AXE_PHY_MODE_REALTEK_8211BN:
663 	case AXE_PHY_MODE_REALTEK_8251CL:
664 		val = gpio0 == 1 ? AXE_GPIO0 | AXE_GPIO0_EN :
665 		    AXE_GPIO1 | AXE_GPIO1_EN;
666 		AXE_GPIO_WRITE(val, hz / 32);
667 		AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
668 		AXE_GPIO_WRITE(val | AXE_GPIO2_EN, hz / 4);
669 		AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
670 		if (phymode == AXE_PHY_MODE_REALTEK_8211CL) {
671 			axe_miibus_writereg_locked(sc->axe_dev,
672 			    sc->axe_phyno, 0x1F, 0x0005);
673 			axe_miibus_writereg_locked(sc->axe_dev,
674 			    sc->axe_phyno, 0x0C, 0x0000);
675 			val = axe_miibus_readreg_locked(sc->axe_dev,
676 			    sc->axe_phyno, 0x0001);
677 			axe_miibus_writereg_locked(sc->axe_dev,
678 			    sc->axe_phyno, 0x01, val | 0x0080);
679 			axe_miibus_writereg_locked(sc->axe_dev,
680 			    sc->axe_phyno, 0x1F, 0x0000);
681 		}
682 		break;
683 	default:
684 		/* Unknown PHY model or no need to program GPIOs. */
685 		break;
686 	}
687 
688 	/* soft reset */
689 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
690 	usbd_delay_ms(sc->axe_udev, 150);
691 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
692 	    AXE_SW_RESET_PRL | AXE_178_RESET_MAGIC, NULL);
693 	usbd_delay_ms(sc->axe_udev, 150);
694 	/* Enable MII/GMII/RGMII interface to work with external PHY. */
695 	axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0, NULL);
696 	usbd_delay_ms(sc->axe_udev, 10);
697 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
698 }
699 
700 static void
701 axe_ax88772_init(struct axe_softc *sc)
702 {
703 	AXEHIST_FUNC(); AXEHIST_CALLED();
704 
705 	axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, 0x00b0, NULL);
706 	usbd_delay_ms(sc->axe_udev, 40);
707 
708 	if (sc->axe_phyno == AXE_772_PHY_NO_EPHY) {
709 		/* ask for the embedded PHY */
710 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0,
711 		    AXE_SW_PHY_SELECT_EMBEDDED, NULL);
712 		usbd_delay_ms(sc->axe_udev, 10);
713 
714 		/* power down and reset state, pin reset state */
715 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
716 		usbd_delay_ms(sc->axe_udev, 60);
717 
718 		/* power down/reset state, pin operating state */
719 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
720 		    AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
721 		usbd_delay_ms(sc->axe_udev, 150);
722 
723 		/* power up, reset */
724 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRL, NULL);
725 
726 		/* power up, operating */
727 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
728 		    AXE_SW_RESET_IPRL | AXE_SW_RESET_PRL, NULL);
729 	} else {
730 		/* ask for external PHY */
731 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_EXT,
732 		    NULL);
733 		usbd_delay_ms(sc->axe_udev, 10);
734 
735 		/* power down internal PHY */
736 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
737 		    AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
738 	}
739 
740 	usbd_delay_ms(sc->axe_udev, 150);
741 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
742 }
743 
744 static void
745 axe_ax88772_phywake(struct axe_softc *sc)
746 {
747 	AXEHIST_FUNC(); AXEHIST_CALLED();
748 
749 	if (sc->axe_phyno == AXE_772_PHY_NO_EPHY) {
750 		/* Manually select internal(embedded) PHY - MAC mode. */
751 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0,
752 		    AXE_SW_PHY_SELECT_EMBEDDED,
753 		    NULL);
754 		usbd_delay_ms(sc->axe_udev, hztoms(hz / 32));
755 	} else {
756 		/*
757 		 * Manually select external PHY - MAC mode.
758 		 * Reverse MII/RMII is for AX88772A PHY mode.
759 		 */
760 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB |
761 		    AXE_SW_PHY_SELECT_EXT | AXE_SW_PHY_SELECT_SS_MII, NULL);
762 		usbd_delay_ms(sc->axe_udev, hztoms(hz / 32));
763 	}
764 
765 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD |
766 	    AXE_SW_RESET_IPRL, NULL);
767 
768 	/* T1 = min 500ns everywhere */
769 	usbd_delay_ms(sc->axe_udev, 150);
770 
771 	/* Take PHY out of power down. */
772 	if (sc->axe_phyno == AXE_772_PHY_NO_EPHY) {
773 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
774 	} else {
775 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRTE, NULL);
776 	}
777 
778 	/* 772 T2 is 60ms. 772A T2 is 160ms, 772B T2 is 600ms */
779 	usbd_delay_ms(sc->axe_udev, 600);
780 
781 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
782 
783 	/* T3 = 500ns everywhere */
784 	usbd_delay_ms(sc->axe_udev, hztoms(hz / 32));
785 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
786 	usbd_delay_ms(sc->axe_udev, hztoms(hz / 32));
787 }
788 
789 static void
790 axe_ax88772a_init(struct axe_softc *sc)
791 {
792 	AXEHIST_FUNC(); AXEHIST_CALLED();
793 
794 	/* Reload EEPROM. */
795 	AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32);
796 	axe_ax88772_phywake(sc);
797 	/* Stop MAC. */
798 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
799 }
800 
801 static void
802 axe_ax88772b_init(struct axe_softc *sc)
803 {
804 	AXEHIST_FUNC(); AXEHIST_CALLED();
805 	uint16_t eeprom;
806 	int i;
807 
808 	/* Reload EEPROM. */
809 	AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM , hz / 32);
810 
811 	/*
812 	 * Save PHY power saving configuration(high byte) and
813 	 * clear EEPROM checksum value(low byte).
814 	 */
815 	axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_PHY_PWRCFG, &eeprom);
816 	sc->sc_pwrcfg = le16toh(eeprom) & 0xFF00;
817 
818 	/*
819 	 * Auto-loaded default station address from internal ROM is
820 	 * 00:00:00:00:00:00 such that an explicit access to EEPROM
821 	 * is required to get real station address.
822 	 */
823 	uint8_t *eaddr = sc->axe_enaddr;
824 	for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
825 		axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_NODE_ID + i,
826 		    &eeprom);
827 		eeprom = le16toh(eeprom);
828 		*eaddr++ = (uint8_t)(eeprom & 0xFF);
829 		*eaddr++ = (uint8_t)((eeprom >> 8) & 0xFF);
830 	}
831 	/* Wakeup PHY. */
832 	axe_ax88772_phywake(sc);
833 	/* Stop MAC. */
834 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
835 }
836 
837 #undef	AXE_GPIO_WRITE
838 
839 /*
840  * Probe for a AX88172 chip.
841  */
842 int
843 axe_match(device_t parent, cfdata_t match, void *aux)
844 {
845 	struct usb_attach_arg *uaa = aux;
846 
847 	return axe_lookup(uaa->uaa_vendor, uaa->uaa_product) != NULL ?
848 	    UMATCH_VENDOR_PRODUCT : UMATCH_NONE;
849 }
850 
851 /*
852  * Attach the interface. Allocate softc structures, do ifmedia
853  * setup and ethernet/BPF attach.
854  */
855 void
856 axe_attach(device_t parent, device_t self, void *aux)
857 {
858 	AXEHIST_FUNC(); AXEHIST_CALLED();
859 	struct axe_softc *sc = device_private(self);
860 	struct usb_attach_arg *uaa = aux;
861 	struct usbd_device *dev = uaa->uaa_device;
862 	usbd_status err;
863 	usb_interface_descriptor_t *id;
864 	usb_endpoint_descriptor_t *ed;
865 	struct mii_data	*mii;
866 	char *devinfop;
867 	const char *devname = device_xname(self);
868 	struct ifnet *ifp;
869 	int i, s;
870 
871 	aprint_naive("\n");
872 	aprint_normal("\n");
873 
874 	sc->axe_dev = self;
875 	sc->axe_udev = dev;
876 
877 	devinfop = usbd_devinfo_alloc(dev, 0);
878 	aprint_normal_dev(self, "%s\n", devinfop);
879 	usbd_devinfo_free(devinfop);
880 
881 	err = usbd_set_config_no(dev, AXE_CONFIG_NO, 1);
882 	if (err) {
883 		aprint_error_dev(self, "failed to set configuration"
884 		    ", err=%s\n", usbd_errstr(err));
885 		return;
886 	}
887 
888 	sc->axe_flags = axe_lookup(uaa->uaa_vendor, uaa->uaa_product)->axe_flags;
889 
890 	mutex_init(&sc->axe_mii_lock, MUTEX_DEFAULT, IPL_NONE);
891 	usb_init_task(&sc->axe_tick_task, axe_tick_task, sc, 0);
892 
893 	err = usbd_device2interface_handle(dev, AXE_IFACE_IDX, &sc->axe_iface);
894 	if (err) {
895 		aprint_error_dev(self, "getting interface handle failed\n");
896 		return;
897 	}
898 
899 	sc->axe_product = uaa->uaa_product;
900 	sc->axe_vendor = uaa->uaa_vendor;
901 
902 	id = usbd_get_interface_descriptor(sc->axe_iface);
903 
904 	/* decide on what our bufsize will be */
905 	if (AXE_IS_178_FAMILY(sc))
906 		sc->axe_bufsz = (sc->axe_udev->ud_speed == USB_SPEED_HIGH) ?
907 		    AXE_178_MAX_BUFSZ : AXE_178_MIN_BUFSZ;
908 	else
909 		sc->axe_bufsz = AXE_172_BUFSZ;
910 
911 	sc->axe_ed[AXE_ENDPT_RX] = -1;
912 	sc->axe_ed[AXE_ENDPT_TX] = -1;
913 	sc->axe_ed[AXE_ENDPT_INTR] = -1;
914 
915 	/* Find endpoints. */
916 	for (i = 0; i < id->bNumEndpoints; i++) {
917 		ed = usbd_interface2endpoint_descriptor(sc->axe_iface, i);
918 		if (ed == NULL) {
919 			aprint_error_dev(self, "couldn't get ep %d\n", i);
920 			return;
921 		}
922 		const uint8_t xt = UE_GET_XFERTYPE(ed->bmAttributes);
923 		const uint8_t dir = UE_GET_DIR(ed->bEndpointAddress);
924 
925 		if (dir == UE_DIR_IN && xt == UE_BULK &&
926 		    sc->axe_ed[AXE_ENDPT_RX] == -1) {
927 			sc->axe_ed[AXE_ENDPT_RX] = ed->bEndpointAddress;
928 		} else if (dir == UE_DIR_OUT && xt == UE_BULK &&
929 		    sc->axe_ed[AXE_ENDPT_TX] == -1) {
930 			sc->axe_ed[AXE_ENDPT_TX] = ed->bEndpointAddress;
931 		} else if (dir == UE_DIR_IN && xt == UE_INTERRUPT) {
932 			sc->axe_ed[AXE_ENDPT_INTR] = ed->bEndpointAddress;
933 		}
934 	}
935 
936 	s = splnet();
937 
938 	/* We need the PHYID for init dance in some cases */
939 	axe_lock_mii(sc);
940 	axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, (void *)&sc->axe_phyaddrs);
941 
942 	DPRINTF(" phyaddrs[0]: %x phyaddrs[1]: %x",
943 	    sc->axe_phyaddrs[0], sc->axe_phyaddrs[1], 0, 0);
944 	sc->axe_phyno = axe_get_phyno(sc, AXE_PHY_SEL_PRI);
945 	if (sc->axe_phyno == -1)
946 		sc->axe_phyno = axe_get_phyno(sc, AXE_PHY_SEL_SEC);
947 	if (sc->axe_phyno == -1) {
948 		DPRINTF(" no valid PHY address found, assuming PHY address 0",
949 		    0, 0, 0, 0);
950 		sc->axe_phyno = 0;
951 	}
952 
953 	/* Initialize controller and get station address. */
954 
955 	if (sc->axe_flags & AX178) {
956 		axe_ax88178_init(sc);
957 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, sc->axe_enaddr);
958 	} else if (sc->axe_flags & AX772) {
959 		axe_ax88772_init(sc);
960 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, sc->axe_enaddr);
961 	} else if (sc->axe_flags & AX772A) {
962 		axe_ax88772a_init(sc);
963 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, sc->axe_enaddr);
964 	} else if (sc->axe_flags & AX772B) {
965 		axe_ax88772b_init(sc);
966 	} else
967 		axe_cmd(sc, AXE_172_CMD_READ_NODEID, 0, 0, sc->axe_enaddr);
968 
969 	/*
970 	 * Fetch IPG values.
971 	 */
972 	if (sc->axe_flags & (AX772A | AX772B)) {
973 		/* Set IPG values. */
974 		sc->axe_ipgs[0] = AXE_IPG0_DEFAULT;
975 		sc->axe_ipgs[1] = AXE_IPG1_DEFAULT;
976 		sc->axe_ipgs[2] = AXE_IPG2_DEFAULT;
977 	} else
978 		axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, sc->axe_ipgs);
979 
980 	axe_unlock_mii(sc);
981 
982 	/*
983 	 * An ASIX chip was detected. Inform the world.
984 	 */
985 	aprint_normal_dev(self, "Ethernet address %s\n",
986 	    ether_sprintf(sc->axe_enaddr));
987 
988 	/* Initialize interface info.*/
989 	ifp = &sc->sc_if;
990 	ifp->if_softc = sc;
991 	strlcpy(ifp->if_xname, devname, IFNAMSIZ);
992 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
993 	ifp->if_ioctl = axe_ioctl;
994 	ifp->if_start = axe_start;
995 	ifp->if_init = axe_init;
996 	ifp->if_stop = axe_stop;
997 	ifp->if_watchdog = axe_watchdog;
998 
999 	IFQ_SET_READY(&ifp->if_snd);
1000 
1001 	if (AXE_IS_178_FAMILY(sc))
1002 		sc->axe_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
1003 	if (sc->axe_flags & AX772B) {
1004 		ifp->if_capabilities =
1005 		    IFCAP_CSUM_IPv4_Rx |
1006 		    IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
1007 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1008 		/*
1009 		 * Checksum offloading of AX88772B also works with VLAN
1010 		 * tagged frames but there is no way to take advantage
1011 		 * of the feature because vlan(4) assumes
1012 		 * IFCAP_VLAN_HWTAGGING is prerequisite condition to
1013 		 * support checksum offloading with VLAN. VLAN hardware
1014 		 * tagging support of AX88772B is very limited so it's
1015 		 * not possible to announce IFCAP_VLAN_HWTAGGING.
1016 		 */
1017 	}
1018 	u_int adv_pause;
1019 	if (sc->axe_flags & (AX772A | AX772B | AX178))
1020 		adv_pause = MIIF_DOPAUSE;
1021 	else
1022 		adv_pause = 0;
1023 	adv_pause = 0;
1024 
1025 	/* Initialize MII/media info. */
1026 	mii = &sc->axe_mii;
1027 	mii->mii_ifp = ifp;
1028 	mii->mii_readreg = axe_miibus_readreg;
1029 	mii->mii_writereg = axe_miibus_writereg;
1030 	mii->mii_statchg = axe_miibus_statchg;
1031 	mii->mii_flags = MIIF_AUTOTSLEEP;
1032 
1033 	sc->axe_ec.ec_mii = mii;
1034 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
1035 
1036 	mii_attach(sc->axe_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1037 	    adv_pause);
1038 
1039 	if (LIST_EMPTY(&mii->mii_phys)) {
1040 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
1041 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
1042 	} else
1043 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1044 
1045 	/* Attach the interface. */
1046 	if_attach(ifp);
1047 	ether_ifattach(ifp, sc->axe_enaddr);
1048 	rnd_attach_source(&sc->rnd_source, device_xname(sc->axe_dev),
1049 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
1050 
1051 	callout_init(&sc->axe_stat_ch, 0);
1052 	callout_setfunc(&sc->axe_stat_ch, axe_tick, sc);
1053 
1054 	sc->axe_attached = true;
1055 	splx(s);
1056 
1057 	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->axe_udev, sc->axe_dev);
1058 
1059 	if (!pmf_device_register(self, NULL, NULL))
1060 		aprint_error_dev(self, "couldn't establish power handler\n");
1061 }
1062 
1063 int
1064 axe_detach(device_t self, int flags)
1065 {
1066 	AXEHIST_FUNC(); AXEHIST_CALLED();
1067 	struct axe_softc *sc = device_private(self);
1068 	int s;
1069 	struct ifnet *ifp = &sc->sc_if;
1070 
1071 	/* Detached before attached finished, so just bail out. */
1072 	if (!sc->axe_attached)
1073 		return 0;
1074 
1075 	pmf_device_deregister(self);
1076 
1077 	sc->axe_dying = true;
1078 
1079 	if (sc->axe_ep[AXE_ENDPT_TX] != NULL)
1080 		usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_TX]);
1081 	if (sc->axe_ep[AXE_ENDPT_RX] != NULL)
1082 		usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_RX]);
1083 	if (sc->axe_ep[AXE_ENDPT_INTR] != NULL)
1084 		usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_INTR]);
1085 
1086 	/*
1087 	 * Remove any pending tasks.  They cannot be executing because they run
1088 	 * in the same thread as detach.
1089 	 */
1090 	usb_rem_task(sc->axe_udev, &sc->axe_tick_task);
1091 
1092 	s = splusb();
1093 
1094 	if (ifp->if_flags & IFF_RUNNING)
1095 		axe_stop(ifp, 1);
1096 
1097 
1098 	if (--sc->axe_refcnt >= 0) {
1099 		/* Wait for processes to go away. */
1100 		usb_detach_waitold(sc->axe_dev);
1101 	}
1102 
1103 	callout_destroy(&sc->axe_stat_ch);
1104 	mutex_destroy(&sc->axe_mii_lock);
1105 	rnd_detach_source(&sc->rnd_source);
1106 	mii_detach(&sc->axe_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1107 	ifmedia_delete_instance(&sc->axe_mii.mii_media, IFM_INST_ANY);
1108 	ether_ifdetach(ifp);
1109 	if_detach(ifp);
1110 
1111 #ifdef DIAGNOSTIC
1112 	if (sc->axe_ep[AXE_ENDPT_TX] != NULL ||
1113 	    sc->axe_ep[AXE_ENDPT_RX] != NULL ||
1114 	    sc->axe_ep[AXE_ENDPT_INTR] != NULL)
1115 		aprint_debug_dev(self, "detach has active endpoints\n");
1116 #endif
1117 
1118 	sc->axe_attached = false;
1119 
1120 	splx(s);
1121 
1122 	usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->axe_udev, sc->axe_dev);
1123 
1124 	return 0;
1125 }
1126 
1127 int
1128 axe_activate(device_t self, devact_t act)
1129 {
1130 	AXEHIST_FUNC(); AXEHIST_CALLED();
1131 	struct axe_softc *sc = device_private(self);
1132 
1133 	switch (act) {
1134 	case DVACT_DEACTIVATE:
1135 		if_deactivate(&sc->axe_ec.ec_if);
1136 		sc->axe_dying = true;
1137 		return 0;
1138 	default:
1139 		return EOPNOTSUPP;
1140 	}
1141 }
1142 
1143 static int
1144 axe_rx_list_init(struct axe_softc *sc)
1145 {
1146 	AXEHIST_FUNC(); AXEHIST_CALLED();
1147 
1148 	struct axe_cdata *cd;
1149 	struct axe_chain *c;
1150 	int i;
1151 
1152 	cd = &sc->axe_cdata;
1153 	for (i = 0; i < AXE_RX_LIST_CNT; i++) {
1154 		c = &cd->axe_rx_chain[i];
1155 		c->axe_sc = sc;
1156 		c->axe_idx = i;
1157 		if (c->axe_xfer == NULL) {
1158 			int err = usbd_create_xfer(sc->axe_ep[AXE_ENDPT_RX],
1159 			    sc->axe_bufsz, USBD_SHORT_XFER_OK, 0, &c->axe_xfer);
1160 			if (err)
1161 				return err;
1162 			c->axe_buf = usbd_get_buffer(c->axe_xfer);
1163 		}
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static int
1170 axe_tx_list_init(struct axe_softc *sc)
1171 {
1172 	AXEHIST_FUNC(); AXEHIST_CALLED();
1173 	struct axe_cdata *cd;
1174 	struct axe_chain *c;
1175 	int i;
1176 
1177 	cd = &sc->axe_cdata;
1178 	for (i = 0; i < AXE_TX_LIST_CNT; i++) {
1179 		c = &cd->axe_tx_chain[i];
1180 		c->axe_sc = sc;
1181 		c->axe_idx = i;
1182 		if (c->axe_xfer == NULL) {
1183 			int err = usbd_create_xfer(sc->axe_ep[AXE_ENDPT_TX],
1184 			    sc->axe_bufsz, USBD_FORCE_SHORT_XFER, 0,
1185 			    &c->axe_xfer);
1186 			if (err)
1187 				return err;
1188 			c->axe_buf = usbd_get_buffer(c->axe_xfer);
1189 		}
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 /*
1196  * A frame has been uploaded: pass the resulting mbuf chain up to
1197  * the higher level protocols.
1198  */
1199 static void
1200 axe_rxeof(struct usbd_xfer *xfer, void * priv, usbd_status status)
1201 {
1202 	AXEHIST_FUNC(); AXEHIST_CALLED();
1203 	struct axe_softc *sc;
1204 	struct axe_chain *c;
1205 	struct ifnet *ifp;
1206 	uint8_t *buf;
1207 	uint32_t total_len;
1208 	struct mbuf *m;
1209 	int s;
1210 
1211 	c = (struct axe_chain *)priv;
1212 	sc = c->axe_sc;
1213 	buf = c->axe_buf;
1214 	ifp = &sc->sc_if;
1215 
1216 	if (sc->axe_dying)
1217 		return;
1218 
1219 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1220 		return;
1221 
1222 	if (status != USBD_NORMAL_COMPLETION) {
1223 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
1224 			return;
1225 		if (usbd_ratecheck(&sc->axe_rx_notice)) {
1226 			aprint_error_dev(sc->axe_dev, "usb errors on rx: %s\n",
1227 			    usbd_errstr(status));
1228 		}
1229 		if (status == USBD_STALLED)
1230 			usbd_clear_endpoint_stall_async(sc->axe_ep[AXE_ENDPT_RX]);
1231 		goto done;
1232 	}
1233 
1234 	usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);
1235 
1236 	do {
1237 		u_int pktlen = 0;
1238 		u_int rxlen = 0;
1239 		int flags = 0;
1240 		if ((sc->axe_flags & AXSTD_FRAME) != 0) {
1241 			struct axe_sframe_hdr hdr;
1242 
1243 			if (total_len < sizeof(hdr)) {
1244 				ifp->if_ierrors++;
1245 				goto done;
1246 			}
1247 
1248 			memcpy(&hdr, buf, sizeof(hdr));
1249 
1250 			DPRINTFN(20, "total_len %#x len %x ilen %#x",
1251 			    total_len,
1252 			    (le16toh(hdr.len) & AXE_RH1M_RXLEN_MASK),
1253 			    (le16toh(hdr.ilen) & AXE_RH1M_RXLEN_MASK), 0);
1254 
1255 			total_len -= sizeof(hdr);
1256 			buf += sizeof(hdr);
1257 
1258 			if (((le16toh(hdr.len) & AXE_RH1M_RXLEN_MASK) ^
1259 			    (le16toh(hdr.ilen) & AXE_RH1M_RXLEN_MASK)) !=
1260 			    AXE_RH1M_RXLEN_MASK) {
1261 				ifp->if_ierrors++;
1262 				goto done;
1263 			}
1264 
1265 			rxlen = le16toh(hdr.len) & AXE_RH1M_RXLEN_MASK;
1266 			if (total_len < rxlen) {
1267 				pktlen = total_len;
1268 				total_len = 0;
1269 			} else {
1270 				pktlen = rxlen;
1271 				rxlen = roundup2(rxlen, 2);
1272 				total_len -= rxlen;
1273 			}
1274 
1275 		} else if ((sc->axe_flags & AXCSUM_FRAME) != 0) {
1276 			struct axe_csum_hdr csum_hdr;
1277 
1278 			if (total_len <  sizeof(csum_hdr)) {
1279 				ifp->if_ierrors++;
1280 				goto done;
1281 			}
1282 
1283 			memcpy(&csum_hdr, buf, sizeof(csum_hdr));
1284 
1285 			csum_hdr.len = le16toh(csum_hdr.len);
1286 			csum_hdr.ilen = le16toh(csum_hdr.ilen);
1287 			csum_hdr.cstatus = le16toh(csum_hdr.cstatus);
1288 
1289 			DPRINTFN(20, "total_len %#x len %#x ilen %#x"
1290 			    " cstatus %#x", total_len,
1291 			    csum_hdr.len, csum_hdr.ilen, csum_hdr.cstatus);
1292 
1293 			if ((AXE_CSUM_RXBYTES(csum_hdr.len) ^
1294 			    AXE_CSUM_RXBYTES(csum_hdr.ilen)) !=
1295 			    sc->sc_lenmask) {
1296 				/* we lost sync */
1297 				ifp->if_ierrors++;
1298 				DPRINTFN(20, "len %#x ilen %#x lenmask %#x err",
1299 				    AXE_CSUM_RXBYTES(csum_hdr.len),
1300 				    AXE_CSUM_RXBYTES(csum_hdr.ilen),
1301 				    sc->sc_lenmask, 0);
1302 				goto done;
1303 			}
1304 			/*
1305 			 * Get total transferred frame length including
1306 			 * checksum header.  The length should be multiple
1307 			 * of 4.
1308 			 */
1309 			pktlen = AXE_CSUM_RXBYTES(csum_hdr.len);
1310 			u_int len = sizeof(csum_hdr) + pktlen;
1311 			len = (len + 3) & ~3;
1312 			if (total_len < len) {
1313 				DPRINTFN(20, "total_len %#x < len %#x",
1314 				    total_len, len, 0, 0);
1315 				/* invalid length */
1316 				ifp->if_ierrors++;
1317 				goto done;
1318 			}
1319 			buf += sizeof(csum_hdr);
1320 
1321 			const uint16_t cstatus = csum_hdr.cstatus;
1322 
1323 			if (cstatus & AXE_CSUM_HDR_L3_TYPE_IPV4) {
1324 				if (cstatus & AXE_CSUM_HDR_L4_CSUM_ERR)
1325 					flags |= M_CSUM_TCP_UDP_BAD;
1326 				if (cstatus & AXE_CSUM_HDR_L3_CSUM_ERR)
1327 					flags |= M_CSUM_IPv4_BAD;
1328 
1329 				const uint16_t l4type =
1330 				    cstatus & AXE_CSUM_HDR_L4_TYPE_MASK;
1331 
1332 				if (l4type == AXE_CSUM_HDR_L4_TYPE_TCP)
1333 					flags |= M_CSUM_TCPv4;
1334 				if (l4type == AXE_CSUM_HDR_L4_TYPE_UDP)
1335 					flags |= M_CSUM_UDPv4;
1336 			}
1337 			if (total_len < len) {
1338 				pktlen = total_len;
1339 				total_len = 0;
1340 			} else {
1341 				total_len -= len;
1342 				rxlen = len - sizeof(csum_hdr);
1343 			}
1344 			DPRINTFN(20, "total_len %#x len %#x pktlen %#x"
1345 			    " rxlen %#x", total_len, len, pktlen, rxlen);
1346 		} else { /* AX172 */
1347 			pktlen = rxlen = total_len;
1348 			total_len = 0;
1349 		}
1350 
1351 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1352 		if (m == NULL) {
1353 			ifp->if_ierrors++;
1354 			goto done;
1355 		}
1356 
1357 		if (pktlen > MHLEN - ETHER_ALIGN) {
1358 			MCLGET(m, M_DONTWAIT);
1359 			if ((m->m_flags & M_EXT) == 0) {
1360 				m_freem(m);
1361 				ifp->if_ierrors++;
1362 				goto done;
1363 			}
1364 		}
1365 		m->m_data += ETHER_ALIGN;
1366 
1367 		m_set_rcvif(m, ifp);
1368 		m->m_pkthdr.len = m->m_len = pktlen;
1369 		m->m_pkthdr.csum_flags = flags;
1370 
1371 		memcpy(mtod(m, uint8_t *), buf, pktlen);
1372 		buf += rxlen;
1373 
1374 		DPRINTFN(10, "deliver %d (%#x)", m->m_len, m->m_len, 0, 0);
1375 
1376 		s = splnet();
1377 
1378 		if_percpuq_enqueue((ifp)->if_percpuq, (m));
1379 
1380 		splx(s);
1381 
1382 	} while (total_len > 0);
1383 
1384  done:
1385 
1386 	/* Setup new transfer. */
1387 	usbd_setup_xfer(xfer, c, c->axe_buf, sc->axe_bufsz,
1388 	    USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof);
1389 	usbd_transfer(xfer);
1390 
1391 	DPRINTFN(10, "start rx", 0, 0, 0, 0);
1392 }
1393 
1394 /*
1395  * A frame was downloaded to the chip. It's safe for us to clean up
1396  * the list buffers.
1397  */
1398 
1399 static void
1400 axe_txeof(struct usbd_xfer *xfer, void * priv, usbd_status status)
1401 {
1402 	AXEHIST_FUNC(); AXEHIST_CALLED();
1403 	struct axe_chain *c = priv;
1404 	struct axe_softc *sc = c->axe_sc;
1405 	struct ifnet *ifp = &sc->sc_if;
1406 	int s;
1407 
1408 
1409 	if (sc->axe_dying)
1410 		return;
1411 
1412 	s = splnet();
1413 
1414 	ifp->if_timer = 0;
1415 	ifp->if_flags &= ~IFF_OACTIVE;
1416 
1417 	if (status != USBD_NORMAL_COMPLETION) {
1418 		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) {
1419 			splx(s);
1420 			return;
1421 		}
1422 		ifp->if_oerrors++;
1423 		aprint_error_dev(sc->axe_dev, "usb error on tx: %s\n",
1424 		    usbd_errstr(status));
1425 		if (status == USBD_STALLED)
1426 			usbd_clear_endpoint_stall_async(sc->axe_ep[AXE_ENDPT_TX]);
1427 		splx(s);
1428 		return;
1429 	}
1430 	ifp->if_opackets++;
1431 
1432 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1433 		axe_start(ifp);
1434 
1435 	splx(s);
1436 }
1437 
1438 static void
1439 axe_tick(void *xsc)
1440 {
1441 	AXEHIST_FUNC(); AXEHIST_CALLED();
1442 	struct axe_softc *sc = xsc;
1443 
1444 	if (sc == NULL)
1445 		return;
1446 
1447 	if (sc->axe_dying)
1448 		return;
1449 
1450 	/* Perform periodic stuff in process context */
1451 	usb_add_task(sc->axe_udev, &sc->axe_tick_task, USB_TASKQ_DRIVER);
1452 }
1453 
1454 static void
1455 axe_tick_task(void *xsc)
1456 {
1457 	AXEHIST_FUNC(); AXEHIST_CALLED();
1458 	int s;
1459 	struct axe_softc *sc = xsc;
1460 	struct ifnet *ifp;
1461 	struct mii_data *mii;
1462 
1463 	if (sc == NULL)
1464 		return;
1465 
1466 	if (sc->axe_dying)
1467 		return;
1468 
1469 	ifp = &sc->sc_if;
1470 	mii = &sc->axe_mii;
1471 
1472 	if (mii == NULL)
1473 		return;
1474 
1475 	s = splnet();
1476 
1477 	mii_tick(mii);
1478 	if (sc->axe_link == 0 &&
1479 	    (mii->mii_media_status & IFM_ACTIVE) != 0 &&
1480 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1481 		DPRINTF("got link", 0, 0, 0, 0);
1482 		sc->axe_link++;
1483 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1484 			axe_start(ifp);
1485 	}
1486 
1487 	callout_schedule(&sc->axe_stat_ch, hz);
1488 
1489 	splx(s);
1490 }
1491 
1492 static int
1493 axe_encap(struct axe_softc *sc, struct mbuf *m, int idx)
1494 {
1495 	struct ifnet *ifp = &sc->sc_if;
1496 	struct axe_chain *c;
1497 	usbd_status err;
1498 	int length, boundary;
1499 
1500 	c = &sc->axe_cdata.axe_tx_chain[idx];
1501 
1502 	/*
1503 	 * Copy the mbuf data into a contiguous buffer, leaving two
1504 	 * bytes at the beginning to hold the frame length.
1505 	 */
1506 	if (AXE_IS_178_FAMILY(sc)) {
1507 	    	struct axe_sframe_hdr hdr;
1508 
1509 		boundary = (sc->axe_udev->ud_speed == USB_SPEED_HIGH) ? 512 : 64;
1510 
1511 		hdr.len = htole16(m->m_pkthdr.len);
1512 		hdr.ilen = ~hdr.len;
1513 
1514 		memcpy(c->axe_buf, &hdr, sizeof(hdr));
1515 		length = sizeof(hdr);
1516 
1517 		m_copydata(m, 0, m->m_pkthdr.len, c->axe_buf + length);
1518 		length += m->m_pkthdr.len;
1519 
1520 		if ((length % boundary) == 0) {
1521 			hdr.len = 0x0000;
1522 			hdr.ilen = 0xffff;
1523 			memcpy(c->axe_buf + length, &hdr, sizeof(hdr));
1524 			length += sizeof(hdr);
1525 		}
1526 	} else {
1527 		m_copydata(m, 0, m->m_pkthdr.len, c->axe_buf);
1528 		length = m->m_pkthdr.len;
1529 	}
1530 
1531 	usbd_setup_xfer(c->axe_xfer, c, c->axe_buf, length,
1532 	    USBD_FORCE_SHORT_XFER, 10000, axe_txeof);
1533 
1534 	/* Transmit */
1535 	err = usbd_transfer(c->axe_xfer);
1536 	if (err != USBD_IN_PROGRESS) {
1537 		axe_stop(ifp, 0);
1538 		return EIO;
1539 	}
1540 
1541 	sc->axe_cdata.axe_tx_cnt++;
1542 
1543 	return 0;
1544 }
1545 
1546 
1547 static void
1548 axe_csum_cfg(struct axe_softc *sc)
1549 {
1550 	struct ifnet *ifp = &sc->sc_if;
1551 	uint16_t csum1, csum2;
1552 
1553 	if ((sc->axe_flags & AX772B) != 0) {
1554 		csum1 = 0;
1555 		csum2 = 0;
1556 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) != 0)
1557 			csum1 |= AXE_TXCSUM_IP;
1558 		if ((ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) != 0)
1559 			csum1 |= AXE_TXCSUM_TCP;
1560 		if ((ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) != 0)
1561 			csum1 |= AXE_TXCSUM_UDP;
1562 		if ((ifp->if_capenable & IFCAP_CSUM_TCPv6_Tx) != 0)
1563 			csum1 |= AXE_TXCSUM_TCPV6;
1564 		if ((ifp->if_capenable & IFCAP_CSUM_UDPv6_Tx) != 0)
1565 			csum1 |= AXE_TXCSUM_UDPV6;
1566 		axe_cmd(sc, AXE_772B_CMD_WRITE_TXCSUM, csum2, csum1, NULL);
1567 		csum1 = 0;
1568 		csum2 = 0;
1569 
1570 		if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) != 0)
1571 			csum1 |= AXE_RXCSUM_IP;
1572 		if ((ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) != 0)
1573 			csum1 |= AXE_RXCSUM_TCP;
1574 		if ((ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) != 0)
1575 			csum1 |= AXE_RXCSUM_UDP;
1576 		if ((ifp->if_capenable & IFCAP_CSUM_TCPv6_Rx) != 0)
1577 			csum1 |= AXE_RXCSUM_TCPV6;
1578 		if ((ifp->if_capenable & IFCAP_CSUM_UDPv6_Rx) != 0)
1579 			csum1 |= AXE_RXCSUM_UDPV6;
1580 		axe_cmd(sc, AXE_772B_CMD_WRITE_RXCSUM, csum2, csum1, NULL);
1581 	}
1582 }
1583 
1584 static void
1585 axe_start(struct ifnet *ifp)
1586 {
1587 	struct axe_softc *sc;
1588 	struct mbuf *m;
1589 
1590 	sc = ifp->if_softc;
1591 
1592 	if ((ifp->if_flags & (IFF_OACTIVE|IFF_RUNNING)) != IFF_RUNNING)
1593 		return;
1594 
1595 	IFQ_POLL(&ifp->if_snd, m);
1596 	if (m == NULL) {
1597 		return;
1598 	}
1599 
1600 	if (axe_encap(sc, m, 0)) {
1601 		ifp->if_flags |= IFF_OACTIVE;
1602 		return;
1603 	}
1604 	IFQ_DEQUEUE(&ifp->if_snd, m);
1605 
1606 	/*
1607 	 * If there's a BPF listener, bounce a copy of this frame
1608 	 * to him.
1609 	 */
1610 	bpf_mtap(ifp, m);
1611 	m_freem(m);
1612 
1613 	ifp->if_flags |= IFF_OACTIVE;
1614 
1615 	/*
1616 	 * Set a timeout in case the chip goes out to lunch.
1617 	 */
1618 	ifp->if_timer = 5;
1619 
1620 	return;
1621 }
1622 
1623 static int
1624 axe_init(struct ifnet *ifp)
1625 {
1626 	AXEHIST_FUNC(); AXEHIST_CALLED();
1627 	struct axe_softc *sc = ifp->if_softc;
1628 	struct axe_chain *c;
1629 	usbd_status err;
1630 	int rxmode;
1631 	int i, s;
1632 
1633 	s = splnet();
1634 
1635 	if (ifp->if_flags & IFF_RUNNING)
1636 		axe_stop(ifp, 0);
1637 
1638 	/*
1639 	 * Cancel pending I/O and free all RX/TX buffers.
1640 	 */
1641 	axe_reset(sc);
1642 
1643 	axe_lock_mii(sc);
1644 
1645 #if 0
1646 	ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 |
1647 			      AX_GPIO_GPO2EN, 5, in_pm);
1648 #endif
1649 	/* Set MAC address and transmitter IPG values. */
1650 	if (AXE_IS_178_FAMILY(sc)) {
1651 		axe_cmd(sc, AXE_178_CMD_WRITE_NODEID, 0, 0, sc->axe_enaddr);
1652 		axe_cmd(sc, AXE_178_CMD_WRITE_IPG012, sc->axe_ipgs[2],
1653 		    (sc->axe_ipgs[1] << 8) | (sc->axe_ipgs[0]), NULL);
1654 	} else {
1655 		axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, sc->axe_enaddr);
1656 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG0, 0, sc->axe_ipgs[0], NULL);
1657 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG1, 0, sc->axe_ipgs[1], NULL);
1658 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG2, 0, sc->axe_ipgs[2], NULL);
1659 	}
1660 	if (AXE_IS_178_FAMILY(sc)) {
1661 		sc->axe_flags &= ~(AXSTD_FRAME | AXCSUM_FRAME);
1662 		if ((sc->axe_flags & AX772B) != 0 &&
1663 		    (ifp->if_capenable & AX_RXCSUM) != 0) {
1664 			sc->sc_lenmask = AXE_CSUM_HDR_LEN_MASK;
1665 			sc->axe_flags |= AXCSUM_FRAME;
1666 		} else {
1667 			sc->sc_lenmask = AXE_HDR_LEN_MASK;
1668 			sc->axe_flags |= AXSTD_FRAME;
1669 		}
1670 	}
1671 
1672 	/* Configure TX/RX checksum offloading. */
1673 	axe_csum_cfg(sc);
1674 
1675 	if (sc->axe_flags & AX772B) {
1676 		/* AX88772B uses different maximum frame burst configuration. */
1677 		axe_cmd(sc, AXE_772B_CMD_RXCTL_WRITE_CFG,
1678 		    ax88772b_mfb_table[AX88772B_MFB_16K].threshold,
1679 		    ax88772b_mfb_table[AX88772B_MFB_16K].byte_cnt, NULL);
1680 	}
1681 	/* Enable receiver, set RX mode */
1682 	rxmode = (AXE_RXCMD_MULTICAST | AXE_RXCMD_ENABLE);
1683 	if (AXE_IS_178_FAMILY(sc)) {
1684 		if (sc->axe_flags & AX772B) {
1685 			/*
1686 			 * Select RX header format type 1.  Aligning IP
1687 			 * header on 4 byte boundary is not needed when
1688 			 * checksum offloading feature is not used
1689 			 * because we always copy the received frame in
1690 			 * RX handler.  When RX checksum offloading is
1691 			 * active, aligning IP header is required to
1692 			 * reflect actual frame length including RX
1693 			 * header size.
1694 			 */
1695 			rxmode |= AXE_772B_RXCMD_HDR_TYPE_1;
1696 			if (sc->axe_flags & AXCSUM_FRAME)
1697 				rxmode |= AXE_772B_RXCMD_IPHDR_ALIGN;
1698 		} else {
1699 			/*
1700 			 * Default Rx buffer size is too small to get
1701 			 * maximum performance.
1702 			 */
1703 #if 0
1704 			if (sc->axe_udev->ud_speed == USB_SPEED_HIGH) {
1705 				/* Largest possible USB buffer size for AX88178 */
1706 #endif
1707 			rxmode |= AXE_178_RXCMD_MFB_16384;
1708 		}
1709 	} else {
1710 		rxmode |= AXE_172_RXCMD_UNICAST;
1711 	}
1712 
1713 
1714 	/* If we want promiscuous mode, set the allframes bit. */
1715 	if (ifp->if_flags & IFF_PROMISC)
1716 		rxmode |= AXE_RXCMD_PROMISC;
1717 
1718 	if (ifp->if_flags & IFF_BROADCAST)
1719 		rxmode |= AXE_RXCMD_BROADCAST;
1720 
1721 	DPRINTF("rxmode 0x%#x", rxmode, 0, 0, 0);
1722 
1723 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
1724 	axe_unlock_mii(sc);
1725 
1726 	/* Load the multicast filter. */
1727 	axe_setmulti(sc);
1728 
1729 	/* Open RX and TX pipes. */
1730 	err = usbd_open_pipe(sc->axe_iface, sc->axe_ed[AXE_ENDPT_RX],
1731 	    USBD_EXCLUSIVE_USE, &sc->axe_ep[AXE_ENDPT_RX]);
1732 	if (err) {
1733 		aprint_error_dev(sc->axe_dev, "open rx pipe failed: %s\n",
1734 		    usbd_errstr(err));
1735 		splx(s);
1736 		return EIO;
1737 	}
1738 
1739 	err = usbd_open_pipe(sc->axe_iface, sc->axe_ed[AXE_ENDPT_TX],
1740 	    USBD_EXCLUSIVE_USE, &sc->axe_ep[AXE_ENDPT_TX]);
1741 	if (err) {
1742 		aprint_error_dev(sc->axe_dev, "open tx pipe failed: %s\n",
1743 		    usbd_errstr(err));
1744 		splx(s);
1745 		return EIO;
1746 	}
1747 
1748 	/* Init RX ring. */
1749 	if (axe_rx_list_init(sc) != 0) {
1750 		aprint_error_dev(sc->axe_dev, "rx list init failed\n");
1751 		splx(s);
1752 		return ENOBUFS;
1753 	}
1754 
1755 	/* Init TX ring. */
1756 	if (axe_tx_list_init(sc) != 0) {
1757 		aprint_error_dev(sc->axe_dev, "tx list init failed\n");
1758 		splx(s);
1759 		return ENOBUFS;
1760 	}
1761 
1762 	/* Start up the receive pipe. */
1763 	for (i = 0; i < AXE_RX_LIST_CNT; i++) {
1764 		c = &sc->axe_cdata.axe_rx_chain[i];
1765 		usbd_setup_xfer(c->axe_xfer, c, c->axe_buf, sc->axe_bufsz,
1766 		    USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof);
1767 		usbd_transfer(c->axe_xfer);
1768 	}
1769 
1770 	ifp->if_flags |= IFF_RUNNING;
1771 	ifp->if_flags &= ~IFF_OACTIVE;
1772 
1773 	splx(s);
1774 
1775 	callout_schedule(&sc->axe_stat_ch, hz);
1776 	return 0;
1777 }
1778 
1779 static int
1780 axe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1781 {
1782 	struct axe_softc *sc = ifp->if_softc;
1783 	int s;
1784 	int error = 0;
1785 
1786 	s = splnet();
1787 
1788 	switch(cmd) {
1789 	case SIOCSIFFLAGS:
1790 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1791 			break;
1792 
1793 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1794 		case IFF_RUNNING:
1795 			axe_stop(ifp, 1);
1796 			break;
1797 		case IFF_UP:
1798 			axe_init(ifp);
1799 			break;
1800 		case IFF_UP | IFF_RUNNING:
1801 			if ((ifp->if_flags ^ sc->axe_if_flags) == IFF_PROMISC)
1802 				axe_setmulti(sc);
1803 			else
1804 				axe_init(ifp);
1805 			break;
1806 		}
1807 		sc->axe_if_flags = ifp->if_flags;
1808 		break;
1809 
1810 	default:
1811 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1812 			break;
1813 
1814 		error = 0;
1815 
1816 		if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI)
1817 			axe_setmulti(sc);
1818 
1819 	}
1820 	splx(s);
1821 
1822 	return error;
1823 }
1824 
1825 static void
1826 axe_watchdog(struct ifnet *ifp)
1827 {
1828 	struct axe_softc *sc;
1829 	struct axe_chain *c;
1830 	usbd_status stat;
1831 	int s;
1832 
1833 	sc = ifp->if_softc;
1834 
1835 	ifp->if_oerrors++;
1836 	aprint_error_dev(sc->axe_dev, "watchdog timeout\n");
1837 
1838 	s = splusb();
1839 	c = &sc->axe_cdata.axe_tx_chain[0];
1840 	usbd_get_xfer_status(c->axe_xfer, NULL, NULL, NULL, &stat);
1841 	axe_txeof(c->axe_xfer, c, stat);
1842 
1843 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1844 		axe_start(ifp);
1845 	splx(s);
1846 }
1847 
1848 /*
1849  * Stop the adapter and free any mbufs allocated to the
1850  * RX and TX lists.
1851  */
1852 static void
1853 axe_stop(struct ifnet *ifp, int disable)
1854 {
1855 	struct axe_softc *sc = ifp->if_softc;
1856 	usbd_status err;
1857 	int i;
1858 
1859 	ifp->if_timer = 0;
1860 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1861 
1862 	callout_stop(&sc->axe_stat_ch);
1863 
1864 	/* Stop transfers. */
1865 	if (sc->axe_ep[AXE_ENDPT_RX] != NULL) {
1866 		err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_RX]);
1867 		if (err) {
1868 			aprint_error_dev(sc->axe_dev,
1869 			    "abort rx pipe failed: %s\n", usbd_errstr(err));
1870 		}
1871 	}
1872 
1873 	if (sc->axe_ep[AXE_ENDPT_TX] != NULL) {
1874 		err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_TX]);
1875 		if (err) {
1876 			aprint_error_dev(sc->axe_dev,
1877 			    "abort tx pipe failed: %s\n", usbd_errstr(err));
1878 		}
1879 	}
1880 
1881 	if (sc->axe_ep[AXE_ENDPT_INTR] != NULL) {
1882 		err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_INTR]);
1883 		if (err) {
1884 			aprint_error_dev(sc->axe_dev,
1885 			    "abort intr pipe failed: %s\n", usbd_errstr(err));
1886 		}
1887 	}
1888 
1889 	axe_reset(sc);
1890 
1891 	/* Free RX resources. */
1892 	for (i = 0; i < AXE_RX_LIST_CNT; i++) {
1893 		if (sc->axe_cdata.axe_rx_chain[i].axe_xfer != NULL) {
1894 			usbd_destroy_xfer(sc->axe_cdata.axe_rx_chain[i].axe_xfer);
1895 			sc->axe_cdata.axe_rx_chain[i].axe_xfer = NULL;
1896 		}
1897 	}
1898 
1899 	/* Free TX resources. */
1900 	for (i = 0; i < AXE_TX_LIST_CNT; i++) {
1901 		if (sc->axe_cdata.axe_tx_chain[i].axe_xfer != NULL) {
1902 			usbd_destroy_xfer(sc->axe_cdata.axe_tx_chain[i].axe_xfer);
1903 			sc->axe_cdata.axe_tx_chain[i].axe_xfer = NULL;
1904 		}
1905 	}
1906 
1907 	/* Close pipes. */
1908 	if (sc->axe_ep[AXE_ENDPT_RX] != NULL) {
1909 		err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_RX]);
1910 		if (err) {
1911 			aprint_error_dev(sc->axe_dev,
1912 			    "close rx pipe failed: %s\n", usbd_errstr(err));
1913 		}
1914 		sc->axe_ep[AXE_ENDPT_RX] = NULL;
1915 	}
1916 
1917 	if (sc->axe_ep[AXE_ENDPT_TX] != NULL) {
1918 		err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_TX]);
1919 		if (err) {
1920 			aprint_error_dev(sc->axe_dev,
1921 			    "close tx pipe failed: %s\n", usbd_errstr(err));
1922 		}
1923 		sc->axe_ep[AXE_ENDPT_TX] = NULL;
1924 	}
1925 
1926 	if (sc->axe_ep[AXE_ENDPT_INTR] != NULL) {
1927 		err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_INTR]);
1928 		if (err) {
1929 			aprint_error_dev(sc->axe_dev,
1930 			    "close intr pipe failed: %s\n", usbd_errstr(err));
1931 		}
1932 		sc->axe_ep[AXE_ENDPT_INTR] = NULL;
1933 	}
1934 
1935 	sc->axe_link = 0;
1936 }
1937 
1938 MODULE(MODULE_CLASS_DRIVER, if_axe, "bpf");
1939 
1940 #ifdef _MODULE
1941 #include "ioconf.c"
1942 #endif
1943 
1944 static int
1945 if_axe_modcmd(modcmd_t cmd, void *aux)
1946 {
1947 	int error = 0;
1948 
1949 	switch (cmd) {
1950 	case MODULE_CMD_INIT:
1951 #ifdef _MODULE
1952 		error = config_init_component(cfdriver_ioconf_axe,
1953 		    cfattach_ioconf_axe, cfdata_ioconf_axe);
1954 #endif
1955 		return error;
1956 	case MODULE_CMD_FINI:
1957 #ifdef _MODULE
1958 		error = config_fini_component(cfdriver_ioconf_axe,
1959 		    cfattach_ioconf_axe, cfdata_ioconf_axe);
1960 #endif
1961 		return error;
1962 	default:
1963 		return ENOTTY;
1964 	}
1965 }
1966