xref: /openbsd-src/sys/dev/pci/if_se.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: if_se.c,v 1.19 2016/04/13 10:34:32 mpi Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de>
5  * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com>
6  * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net>
7  * Copyright (c) 1997, 1998, 1999
8  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by Bill Paul.
21  * 4. Neither the name of the author nor the names of any co-contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS''
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
28  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL AUTHORS OR
29  * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
34  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
36  * OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * SiS 190/191 PCI Ethernet NIC driver.
41  *
42  * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original
43  * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by
44  * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu
45  * <kmliu@sis.com>.  Thanks to Pyun YongHyeon <pyunyh@gmail.com> for
46  * review and very useful comments.
47  *
48  * Ported to OpenBSD by Christopher Zimmermann 2009/10
49  *
50  * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the
51  * Linux and Solaris drivers.
52  */
53 
54 #include "bpfilter.h"
55 
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/ioctl.h>
60 #include <sys/kernel.h>
61 #include <sys/mbuf.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #include <sys/timeout.h>
65 
66 #include <net/if.h>
67 #include <net/if_media.h>
68 
69 #include <netinet/in.h>
70 #include <netinet/if_ether.h>
71 
72 #if NBPFILTER > 0
73 #include <net/bpf.h>
74 #endif
75 
76 #include <dev/mii/miivar.h>
77 
78 #include <dev/pci/pcidevs.h>
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81 
82 #include <dev/pci/if_sereg.h>
83 
84 #define SE_RX_RING_CNT		256 /* [8, 1024] */
85 #define SE_TX_RING_CNT		256 /* [8, 8192] */
86 #define	SE_RX_BUF_ALIGN		sizeof(uint64_t)
87 
88 #define SE_RX_RING_SZ		(SE_RX_RING_CNT * sizeof(struct se_desc))
89 #define SE_TX_RING_SZ		(SE_TX_RING_CNT * sizeof(struct se_desc))
90 
91 struct se_list_data {
92 	struct se_desc		*se_rx_ring;
93 	struct se_desc		*se_tx_ring;
94 	bus_dmamap_t		se_rx_dmamap;
95 	bus_dmamap_t		se_tx_dmamap;
96 };
97 
98 struct se_chain_data {
99 	struct mbuf		*se_rx_mbuf[SE_RX_RING_CNT];
100 	struct mbuf		*se_tx_mbuf[SE_TX_RING_CNT];
101 	bus_dmamap_t		se_rx_map[SE_RX_RING_CNT];
102 	bus_dmamap_t		se_tx_map[SE_TX_RING_CNT];
103 	uint			se_rx_prod;
104 	uint			se_tx_prod;
105 	uint			se_tx_cons;
106 	uint			se_tx_cnt;
107 };
108 
109 struct se_softc {
110     	struct device		 sc_dev;
111 	void			*sc_ih;
112 	bus_space_tag_t		 sc_iot;
113 	bus_space_handle_t	 sc_ioh;
114 	bus_dma_tag_t		 sc_dmat;
115 
116 	struct mii_data		 sc_mii;
117 	struct arpcom		 sc_ac;
118 
119 	struct se_list_data	 se_ldata;
120 	struct se_chain_data	 se_cdata;
121 
122 	struct timeout		 sc_tick_tmo;
123 
124 	int			 sc_flags;
125 #define	SE_FLAG_FASTETHER	0x0001
126 #define	SE_FLAG_RGMII		0x0010
127 #define	SE_FLAG_LINK		0x8000
128 };
129 
130 /*
131  * Various supported device vendors/types and their names.
132  */
133 const struct pci_matchid se_devices[] = {
134 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190 },
135 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_191 }
136 };
137 
138 int	se_match(struct device *, void *, void *);
139 void	se_attach(struct device *, struct device *, void *);
140 int	se_activate(struct device *, int);
141 
142 const struct cfattach se_ca = {
143 	sizeof(struct se_softc),
144 	se_match, se_attach, NULL, se_activate
145 };
146 
147 struct cfdriver se_cd = {
148 	0, "se", DV_IFNET
149 };
150 
151 uint32_t
152 	se_miibus_cmd(struct se_softc *, uint32_t);
153 int	se_miibus_readreg(struct device *, int, int);
154 void	se_miibus_writereg(struct device *, int, int, int);
155 void	se_miibus_statchg(struct device *);
156 
157 int	se_newbuf(struct se_softc *, uint);
158 void	se_discard_rxbuf(struct se_softc *, uint);
159 int	se_encap(struct se_softc *, struct mbuf *, uint *);
160 void	se_rxeof(struct se_softc *);
161 void	se_txeof(struct se_softc *);
162 int	se_intr(void *);
163 void	se_tick(void *);
164 void	se_start(struct ifnet *);
165 int	se_ioctl(struct ifnet *, u_long, caddr_t);
166 int	se_init(struct ifnet *);
167 void	se_stop(struct se_softc *);
168 void	se_watchdog(struct ifnet *);
169 int	se_ifmedia_upd(struct ifnet *);
170 void	se_ifmedia_sts(struct ifnet *, struct ifmediareq *);
171 
172 int	se_pcib_match(struct pci_attach_args *);
173 int	se_get_mac_addr_apc(struct se_softc *, uint8_t *);
174 int	se_get_mac_addr_eeprom(struct se_softc *, uint8_t *);
175 uint16_t
176 	se_read_eeprom(struct se_softc *, int);
177 
178 void	se_iff(struct se_softc *);
179 void	se_reset(struct se_softc *);
180 int	se_list_rx_init(struct se_softc *);
181 int	se_list_rx_free(struct se_softc *);
182 int	se_list_tx_init(struct se_softc *);
183 int	se_list_tx_free(struct se_softc *);
184 
185 /*
186  * Register space access macros.
187  */
188 
189 #define	CSR_WRITE_4(sc, reg, val) \
190 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val)
191 #define	CSR_WRITE_2(sc, reg, val) \
192 	bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val)
193 #define	CSR_WRITE_1(sc, reg, val) \
194 	bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val)
195 
196 #define	CSR_READ_4(sc, reg) \
197 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg)
198 #define	CSR_READ_2(sc, reg) \
199 	bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg)
200 #define	CSR_READ_1(sc, reg) \
201 	bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg)
202 
203 /*
204  * Read a sequence of words from the EEPROM.
205  */
206 uint16_t
207 se_read_eeprom(struct se_softc *sc, int offset)
208 {
209 	uint32_t val;
210 	int i;
211 
212 	KASSERT(offset <= EI_OFFSET);
213 
214 	CSR_WRITE_4(sc, ROMInterface,
215 	    EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT));
216 	DELAY(500);
217 	for (i = 0; i < SE_TIMEOUT; i++) {
218 		val = CSR_READ_4(sc, ROMInterface);
219 		if ((val & EI_REQ) == 0)
220 			break;
221 		DELAY(100);
222 	}
223 	if (i == SE_TIMEOUT) {
224 		printf("%s: EEPROM read timeout: 0x%08x\n",
225 		    sc->sc_dev.dv_xname, val);
226 		return 0xffff;
227 	}
228 
229 	return (val & EI_DATA) >> EI_DATA_SHIFT;
230 }
231 
232 int
233 se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest)
234 {
235 	uint16_t val;
236 	int i;
237 
238 	val = se_read_eeprom(sc, EEPROMSignature);
239 	if (val == 0xffff || val == 0x0000) {
240 		printf("%s: invalid EEPROM signature : 0x%04x\n",
241 		    sc->sc_dev.dv_xname, val);
242 		return (EINVAL);
243 	}
244 
245 	for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
246 		val = se_read_eeprom(sc, EEPROMMACAddr + i / 2);
247 		dest[i + 0] = (uint8_t)val;
248 		dest[i + 1] = (uint8_t)(val >> 8);
249 	}
250 
251 	if ((se_read_eeprom(sc, EEPROMInfo) & 0x80) != 0)
252 		sc->sc_flags |= SE_FLAG_RGMII;
253 	return (0);
254 }
255 
256 /*
257  * For SiS96x, APC CMOS RAM is used to store Ethernet address.
258  * APC CMOS RAM is accessed through ISA bridge.
259  */
260 #if defined(__amd64__) || defined(__i386__)
261 int
262 se_pcib_match(struct pci_attach_args *pa)
263 {
264 	const struct pci_matchid apc_devices[] = {
265 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_965 },
266 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_966 },
267 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_968 }
268 	};
269 
270 	return pci_matchbyid(pa, apc_devices, nitems(apc_devices));
271 }
272 #endif
273 
274 int
275 se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest)
276 {
277 #if defined(__amd64__) || defined(__i386__)
278 	struct pci_attach_args pa;
279 	pcireg_t reg;
280 	bus_space_handle_t ioh;
281 	int rc, i;
282 
283 	if (pci_find_device(&pa, se_pcib_match) == 0) {
284 		printf("\n%s: couldn't find PCI-ISA bridge\n",
285 		    sc->sc_dev.dv_xname);
286 		return EINVAL;
287 	}
288 
289 	/* Enable port 0x78 and 0x79 to access APC registers. */
290 	reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
291 	pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02);
292 	DELAY(50);
293 	(void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
294 
295 	/* XXX this abuses bus_space implementation knowledge */
296 	rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh);
297 	if (rc == 0) {
298 		/* Read stored Ethernet address. */
299 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
300 			bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i);
301 			dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1);
302 		}
303 		bus_space_write_1(pa.pa_iot, ioh, 0, 0x12);
304 		if ((bus_space_read_1(pa.pa_iot, ioh, 1) & 0x80) != 0)
305 			sc->sc_flags |= SE_FLAG_RGMII;
306 		_bus_space_unmap(pa.pa_iot, ioh, 2, NULL);
307 	} else
308 		rc = EINVAL;
309 
310 	/* Restore access to APC registers. */
311 	pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg);
312 
313 	return rc;
314 #endif
315 	return EINVAL;
316 }
317 
318 uint32_t
319 se_miibus_cmd(struct se_softc *sc, uint32_t ctrl)
320 {
321 	int i;
322 	uint32_t val;
323 
324 	CSR_WRITE_4(sc, GMIIControl, ctrl);
325 	DELAY(10);
326 	for (i = 0; i < SE_TIMEOUT; i++) {
327 		val = CSR_READ_4(sc, GMIIControl);
328 		if ((val & GMI_REQ) == 0)
329 			return val;
330 		DELAY(10);
331 	}
332 
333 	return GMI_REQ;
334 }
335 
336 int
337 se_miibus_readreg(struct device *self, int phy, int reg)
338 {
339 	struct se_softc *sc = (struct se_softc *)self;
340 	uint32_t ctrl, val;
341 
342 	ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) |
343 	    GMI_OP_RD | GMI_REQ;
344 	val = se_miibus_cmd(sc, ctrl);
345 	if ((val & GMI_REQ) != 0) {
346 		printf("%s: PHY read timeout : %d\n",
347 		    sc->sc_dev.dv_xname, reg);
348 		return 0;
349 	}
350 	return (val & GMI_DATA) >> GMI_DATA_SHIFT;
351 }
352 
353 void
354 se_miibus_writereg(struct device *self, int phy, int reg, int data)
355 {
356 	struct se_softc *sc = (struct se_softc *)self;
357 	uint32_t ctrl, val;
358 
359 	ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) |
360 	    GMI_OP_WR | (data << GMI_DATA_SHIFT) | GMI_REQ;
361 	val = se_miibus_cmd(sc, ctrl);
362 	if ((val & GMI_REQ) != 0) {
363 		printf("%s: PHY write timeout : %d\n",
364 		    sc->sc_dev.dv_xname, reg);
365 	}
366 }
367 
368 void
369 se_miibus_statchg(struct device *self)
370 {
371 	struct se_softc *sc = (struct se_softc *)self;
372 #ifdef SE_DEBUG
373 	struct ifnet *ifp = &sc->sc_ac.ac_if;
374 #endif
375 	struct mii_data *mii = &sc->sc_mii;
376 	uint32_t ctl, speed;
377 
378 	speed = 0;
379 	sc->sc_flags &= ~SE_FLAG_LINK;
380 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
381 	    (IFM_ACTIVE | IFM_AVALID)) {
382 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
383 		case IFM_10_T:
384 #ifdef SE_DEBUG
385 			if (ifp->if_flags & IFF_DEBUG)
386 				printf("%s: 10baseT link\n", ifp->if_xname);
387 #endif
388 			sc->sc_flags |= SE_FLAG_LINK;
389 			speed = SC_SPEED_10;
390 			break;
391 		case IFM_100_TX:
392 #ifdef SE_DEBUG
393 			if (ifp->if_flags & IFF_DEBUG)
394 				printf("%s: 100baseTX link\n", ifp->if_xname);
395 #endif
396 			sc->sc_flags |= SE_FLAG_LINK;
397 			speed = SC_SPEED_100;
398 			break;
399 		case IFM_1000_T:
400 #ifdef SE_DEBUG
401 			if (ifp->if_flags & IFF_DEBUG)
402 				printf("%s: 1000baseT link\n", ifp->if_xname);
403 #endif
404 			if ((sc->sc_flags & SE_FLAG_FASTETHER) == 0) {
405 				sc->sc_flags |= SE_FLAG_LINK;
406 				speed = SC_SPEED_1000;
407 			}
408 			break;
409 		default:
410 			break;
411 		}
412 	}
413 	if ((sc->sc_flags & SE_FLAG_LINK) == 0) {
414 #ifdef SE_DEBUG
415 		if (ifp->if_flags & IFF_DEBUG)
416 			printf("%s: no link\n", ifp->if_xname);
417 #endif
418 		return;
419 	}
420 	/* Reprogram MAC to resolved speed/duplex/flow-control paramters. */
421 	ctl = CSR_READ_4(sc, StationControl);
422 	ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK);
423 	if (speed == SC_SPEED_1000)
424 		ctl |= 0x07000000;
425 	else
426 		ctl |= 0x04000000;
427 #ifdef notyet
428 	if ((sc->sc_flags & SE_FLAG_GMII) != 0)
429 		ctl |= 0x03000000;
430 #endif
431 	ctl |= speed;
432 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
433 		ctl |= SC_FDX;
434 	CSR_WRITE_4(sc, StationControl, ctl);
435 	if ((sc->sc_flags & SE_FLAG_RGMII) != 0) {
436 		CSR_WRITE_4(sc, RGMIIDelay, 0x0441);
437 		CSR_WRITE_4(sc, RGMIIDelay, 0x0440);
438 	}
439 }
440 
441 void
442 se_iff(struct se_softc *sc)
443 {
444 	struct arpcom *ac = &sc->sc_ac;
445 	struct ifnet *ifp = &ac->ac_if;
446 	struct ether_multi *enm;
447 	struct ether_multistep step;
448 	uint32_t crc, hashes[2];
449 	uint16_t rxfilt;
450 
451 	rxfilt = CSR_READ_2(sc, RxMacControl);
452 	rxfilt &= ~(AcceptAllPhys | AcceptBroadcast | AcceptMulticast);
453 	ifp->if_flags &= ~IFF_ALLMULTI;
454 
455 	/*
456 	 * Always accept broadcast frames.
457 	 * Always accept frames destined to our station address.
458 	 */
459 	rxfilt |= AcceptBroadcast | AcceptMyPhys;
460 
461 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
462 		ifp->if_flags |= IFF_ALLMULTI;
463 		if (ifp->if_flags & IFF_PROMISC)
464 			rxfilt |= AcceptAllPhys;
465 		rxfilt |= AcceptMulticast;
466 		hashes[0] = hashes[1] = 0xffffffff;
467 	} else {
468 		rxfilt |= AcceptMulticast;
469 		hashes[0] = hashes[1] = 0;
470 
471 		ETHER_FIRST_MULTI(step, ac, enm);
472 		while (enm != NULL) {
473 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
474 
475 			hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
476 
477 			ETHER_NEXT_MULTI(step, enm);
478 		}
479 	}
480 
481 	CSR_WRITE_2(sc, RxMacControl, rxfilt);
482 	CSR_WRITE_4(sc, RxHashTable, hashes[0]);
483 	CSR_WRITE_4(sc, RxHashTable2, hashes[1]);
484 }
485 
486 void
487 se_reset(struct se_softc *sc)
488 {
489 	CSR_WRITE_4(sc, IntrMask, 0);
490 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
491 
492 	/* Soft reset. */
493 	CSR_WRITE_4(sc, IntrControl, 0x8000);
494 	CSR_READ_4(sc, IntrControl);
495 	DELAY(100);
496 	CSR_WRITE_4(sc, IntrControl, 0);
497 	/* Stop MAC. */
498 	CSR_WRITE_4(sc, TX_CTL, 0x1a00);
499 	CSR_WRITE_4(sc, RX_CTL, 0x1a00);
500 
501 	CSR_WRITE_4(sc, IntrMask, 0);
502 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
503 
504 	CSR_WRITE_4(sc, GMIIControl, 0);
505 }
506 
507 /*
508  * Probe for an SiS chip. Check the PCI vendor and device
509  * IDs against our list and return a device name if we find a match.
510  */
511 int
512 se_match(struct device *parent, void *match, void *aux)
513 {
514 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
515 
516 	return pci_matchbyid(pa, se_devices, nitems(se_devices));
517 }
518 
519 /*
520  * Attach the interface. Do ifmedia setup and ethernet/BPF attach.
521  */
522 void
523 se_attach(struct device *parent, struct device *self, void *aux)
524 {
525 	struct se_softc *sc = (struct se_softc *)self;
526 	struct arpcom *ac = &sc->sc_ac;
527 	struct ifnet *ifp = &ac->ac_if;
528 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
529 	uint8_t eaddr[ETHER_ADDR_LEN];
530 	const char *intrstr;
531 	pci_intr_handle_t ih;
532 	bus_size_t iosize;
533 	bus_dma_segment_t seg;
534 	struct se_list_data *ld;
535 	struct se_chain_data *cd;
536 	int nseg;
537 	uint i;
538 	int rc;
539 
540 	printf(": ");
541 
542 	/*
543 	 * Map control/status registers.
544 	 */
545 
546 	rc = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM, 0,
547 	    &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0);
548 	if (rc != 0) {
549 		printf("can't map i/o space\n");
550 		return;
551 	}
552 
553 	if (pci_intr_map(pa, &ih)) {
554 		printf("can't map interrupt\n");
555 		goto fail1;
556 	}
557 	intrstr = pci_intr_string(pa->pa_pc, ih);
558 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, se_intr, sc,
559 	    self->dv_xname);
560 	if (sc->sc_ih == NULL) {
561 		printf("can't establish interrupt");
562 		if (intrstr != NULL)
563 			printf(" at %s", intrstr);
564 		printf("\n");
565 		goto fail1;
566 	}
567 
568 	printf("%s", intrstr);
569 
570 	if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190))
571 		sc->sc_flags |= SE_FLAG_FASTETHER;
572 
573 	/* Reset the adapter. */
574 	se_reset(sc);
575 
576 	/* Get MAC address from the EEPROM. */
577 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0)
578 		se_get_mac_addr_apc(sc, eaddr);
579 	else
580 		se_get_mac_addr_eeprom(sc, eaddr);
581 	printf(", address %s\n", ether_sprintf(eaddr));
582 	bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN);
583 
584 	/*
585 	 * Now do all the DMA mapping stuff
586 	 */
587 
588 	sc->sc_dmat = pa->pa_dmat;
589 	ld = &sc->se_ldata;
590 	cd = &sc->se_cdata;
591 
592 	/* First create TX/RX busdma maps. */
593 	for (i = 0; i < SE_RX_RING_CNT; i++) {
594 		rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
595 		    0, BUS_DMA_NOWAIT, &cd->se_rx_map[i]);
596 		if (rc != 0) {
597 			printf("%s: cannot init the RX map array\n",
598 			    self->dv_xname);
599 			goto fail2;
600 		}
601 	}
602 
603 	for (i = 0; i < SE_TX_RING_CNT; i++) {
604 		rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
605 		    0, BUS_DMA_NOWAIT, &cd->se_tx_map[i]);
606 		if (rc != 0) {
607 			printf("%s: cannot init the TX map array\n",
608 			    self->dv_xname);
609 			goto fail2;
610 		}
611 	}
612 
613 	/*
614 	 * Now allocate a chunk of DMA-able memory for RX and TX ring
615 	 * descriptors, as a contiguous block of memory.
616 	 * XXX fix deallocation upon error
617 	 */
618 
619 	/* RX */
620 	rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0,
621 	    &seg, 1, &nseg, BUS_DMA_NOWAIT);
622 	if (rc != 0) {
623 		printf("%s: no memory for RX descriptors\n", self->dv_xname);
624 		goto fail2;
625 	}
626 
627 	rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ,
628 	    (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT);
629 	if (rc != 0) {
630 		printf("%s: can't map RX descriptors\n", self->dv_xname);
631 		goto fail2;
632 	}
633 
634 	rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1,
635 	    SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap);
636 	if (rc != 0) {
637 		printf("%s: can't alloc RX DMA map\n", self->dv_xname);
638 		goto fail2;
639 	}
640 
641 	rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap,
642 	    (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
643 	if (rc != 0) {
644 		printf("%s: can't load RX DMA map\n", self->dv_xname);
645 		bus_dmamem_unmap(sc->sc_dmat,
646 		    (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ);
647 		bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap);
648 		bus_dmamem_free(sc->sc_dmat, &seg, nseg);
649 		goto fail2;
650 	}
651 
652 	/* TX */
653 	rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0,
654 	    &seg, 1, &nseg, BUS_DMA_NOWAIT);
655 	if (rc != 0) {
656 		printf("%s: no memory for TX descriptors\n", self->dv_xname);
657 		goto fail2;
658 	}
659 
660 	rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ,
661 	    (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT);
662 	if (rc != 0) {
663 		printf("%s: can't map TX descriptors\n", self->dv_xname);
664 		goto fail2;
665 	}
666 
667 	rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1,
668 	    SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap);
669 	if (rc != 0) {
670 		printf("%s: can't alloc TX DMA map\n", self->dv_xname);
671 		goto fail2;
672 	}
673 
674 	rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap,
675 	    (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
676 	if (rc != 0) {
677 		printf("%s: can't load TX DMA map\n", self->dv_xname);
678 		bus_dmamem_unmap(sc->sc_dmat,
679 		    (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ);
680 		bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap);
681 		bus_dmamem_free(sc->sc_dmat, &seg, nseg);
682 		goto fail2;
683 	}
684 
685 	timeout_set(&sc->sc_tick_tmo, se_tick, sc);
686 
687 	ifp = &sc->sc_ac.ac_if;
688 	ifp->if_softc = sc;
689 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
690 	ifp->if_ioctl = se_ioctl;
691 	ifp->if_start = se_start;
692 	ifp->if_watchdog = se_watchdog;
693 	IFQ_SET_MAXLEN(&ifp->if_snd, SE_TX_RING_CNT - 1);
694 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
695 
696 	ifp->if_capabilities = IFCAP_VLAN_MTU;
697 
698 	/*
699 	 * Do MII setup.
700 	 */
701 
702 	sc->sc_mii.mii_ifp = ifp;
703 	sc->sc_mii.mii_readreg = se_miibus_readreg;
704 	sc->sc_mii.mii_writereg = se_miibus_writereg;
705 	sc->sc_mii.mii_statchg = se_miibus_statchg;
706 	ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd,
707 	    se_ifmedia_sts);
708 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
709 	    MII_OFFSET_ANY, 0);
710 
711 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
712 		/* No PHY attached */
713 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
714 		    0, NULL);
715 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
716 	} else
717 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
718 
719 	/*
720 	 * Call MI attach routine.
721 	 */
722 	if_attach(ifp);
723 	ether_ifattach(ifp);
724 
725 	return;
726 
727 fail2:
728 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
729 fail1:
730 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
731 }
732 
733 int
734 se_activate(struct device *self, int act)
735 {
736 	struct se_softc *sc = (struct se_softc *)self;
737 	struct ifnet *ifp = &sc->sc_ac.ac_if;
738 	int rv = 0;
739 
740 	switch (act) {
741 	case DVACT_SUSPEND:
742 		if (ifp->if_flags & IFF_RUNNING)
743 			se_stop(sc);
744 		rv = config_activate_children(self, act);
745 		break;
746 	case DVACT_RESUME:
747 		if (ifp->if_flags & IFF_UP)
748 			(void)se_init(ifp);
749 		break;
750 	default:
751 		rv = config_activate_children(self, act);
752 		break;
753 	}
754 
755 	return (rv);
756 }
757 
758 /*
759  * Initialize the TX descriptors.
760  */
761 int
762 se_list_tx_init(struct se_softc *sc)
763 {
764 	struct se_list_data *ld = &sc->se_ldata;
765 	struct se_chain_data *cd = &sc->se_cdata;
766 
767 	bzero(ld->se_tx_ring, SE_TX_RING_SZ);
768 	ld->se_tx_ring[SE_TX_RING_CNT - 1].se_flags = htole32(RING_END);
769 	bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,
770 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
771 	cd->se_tx_prod = 0;
772 	cd->se_tx_cons = 0;
773 	cd->se_tx_cnt = 0;
774 
775 	return 0;
776 }
777 
778 int
779 se_list_tx_free(struct se_softc *sc)
780 {
781 	struct se_chain_data *cd = &sc->se_cdata;
782 	uint i;
783 
784 	for (i = 0; i < SE_TX_RING_CNT; i++) {
785 		if (cd->se_tx_mbuf[i] != NULL) {
786 			bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]);
787 			m_free(cd->se_tx_mbuf[i]);
788 			cd->se_tx_mbuf[i] = NULL;
789 		}
790 	}
791 
792 	return 0;
793 }
794 
795 /*
796  * Initialize the RX descriptors and allocate mbufs for them.
797  */
798 int
799 se_list_rx_init(struct se_softc *sc)
800 {
801 	struct se_list_data *ld = &sc->se_ldata;
802 	struct se_chain_data *cd = &sc->se_cdata;
803 	uint i;
804 
805 	bzero(ld->se_rx_ring, SE_RX_RING_SZ);
806 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,
807 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
808 	for (i = 0; i < SE_RX_RING_CNT; i++) {
809 		if (se_newbuf(sc, i) != 0)
810 			return ENOBUFS;
811 	}
812 
813 	cd->se_rx_prod = 0;
814 
815 	return 0;
816 }
817 
818 int
819 se_list_rx_free(struct se_softc *sc)
820 {
821 	struct se_chain_data *cd = &sc->se_cdata;
822 	uint i;
823 
824 	for (i = 0; i < SE_RX_RING_CNT; i++) {
825 		if (cd->se_rx_mbuf[i] != NULL) {
826 			bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i]);
827 			m_free(cd->se_rx_mbuf[i]);
828 			cd->se_rx_mbuf[i] = NULL;
829 		}
830 	}
831 
832 	return 0;
833 }
834 
835 /*
836  * Initialize an RX descriptor and attach an MBUF cluster.
837  */
838 int
839 se_newbuf(struct se_softc *sc, uint i)
840 {
841 #ifdef SE_DEBUG
842 	struct ifnet *ifp = &sc->sc_ac.ac_if;
843 #endif
844 	struct se_list_data *ld = &sc->se_ldata;
845 	struct se_chain_data *cd = &sc->se_cdata;
846 	struct se_desc *desc;
847 	struct mbuf *m;
848 	int rc;
849 
850 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
851 	if (m == NULL) {
852 #ifdef SE_DEBUG
853 		if (ifp->if_flags & IFF_DEBUG)
854 			printf("%s: MCLGETI failed\n", ifp->if_xname);
855 #endif
856 		return ENOBUFS;
857 	}
858 	m->m_len = m->m_pkthdr.len = MCLBYTES;
859 	m_adj(m, SE_RX_BUF_ALIGN);
860 
861 	rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i],
862 	    m, BUS_DMA_NOWAIT);
863 	KASSERT(cd->se_rx_map[i]->dm_nsegs == 1);
864 	if (rc != 0) {
865 		m_freem(m);
866 		return ENOBUFS;
867 	}
868 	bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,
869 	    cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
870 
871 	cd->se_rx_mbuf[i] = m;
872 	desc = &ld->se_rx_ring[i];
873 	desc->se_sts_size = 0;
874 	desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR);
875 	desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr);
876 	desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len);
877 	if (i == SE_RX_RING_CNT - 1)
878 		desc->se_flags |= htole32(RING_END);
879 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),
880 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
881 
882 	return 0;
883 }
884 
885 void
886 se_discard_rxbuf(struct se_softc *sc, uint i)
887 {
888 	struct se_list_data *ld = &sc->se_ldata;
889 	struct se_desc *desc;
890 
891 	desc = &ld->se_rx_ring[i];
892 	desc->se_sts_size = 0;
893 	desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR);
894 	desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN);
895 	if (i == SE_RX_RING_CNT - 1)
896 		desc->se_flags |= htole32(RING_END);
897 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),
898 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
899 }
900 
901 /*
902  * A frame has been uploaded: pass the resulting mbuf chain up to
903  * the higher level protocols.
904  */
905 void
906 se_rxeof(struct se_softc *sc)
907 {
908 	struct mbuf *m;
909 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
910 	struct ifnet *ifp = &sc->sc_ac.ac_if;
911 	struct se_list_data *ld = &sc->se_ldata;
912 	struct se_chain_data *cd = &sc->se_cdata;
913 	struct se_desc *cur_rx;
914 	uint32_t rxinfo, rxstat;
915 	uint i;
916 
917 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,
918 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
919 	for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)) {
920 		cur_rx = &ld->se_rx_ring[i];
921 		rxinfo = letoh32(cur_rx->se_cmdsts);
922 		if ((rxinfo & RDC_OWN) != 0)
923 			break;
924 		rxstat = letoh32(cur_rx->se_sts_size);
925 
926 		/*
927 		 * If an error occurs, update stats, clear the
928 		 * status word and leave the mbuf cluster in place:
929 		 * it should simply get re-used next time this descriptor
930 		 * comes up in the ring.
931 		 */
932 		if ((rxstat & RDS_CRCOK) == 0 || SE_RX_ERROR(rxstat) != 0 ||
933 		    SE_RX_NSEGS(rxstat) != 1) {
934 			/* XXX We don't support multi-segment frames yet. */
935 			if (ifp->if_flags & IFF_DEBUG)
936 				printf("%s: rx error %b\n",
937 				    ifp->if_xname, rxstat, RX_ERR_BITS);
938 			se_discard_rxbuf(sc, i);
939 			ifp->if_ierrors++;
940 			continue;
941 		}
942 
943 		/* No errors; receive the packet. */
944 		bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,
945 		    cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
946 		m = cd->se_rx_mbuf[i];
947 		if (se_newbuf(sc, i) != 0) {
948 			se_discard_rxbuf(sc, i);
949 			ifp->if_iqdrops++;
950 			continue;
951 		}
952 		/*
953 		 * Account for 10 bytes auto padding which is used
954 		 * to align IP header on a 32bit boundary.  Also note,
955 		 * CRC bytes are automatically removed by the hardware.
956 		 */
957 		m->m_data += SE_RX_PAD_BYTES;
958 		m->m_pkthdr.len = m->m_len =
959 		    SE_RX_BYTES(rxstat) - SE_RX_PAD_BYTES;
960 
961 		ml_enqueue(&ml, m);
962 	}
963 
964 	if_input(ifp, &ml);
965 
966 	cd->se_rx_prod = i;
967 }
968 
969 /*
970  * A frame was downloaded to the chip. It's safe for us to clean up
971  * the list buffers.
972  */
973 
974 void
975 se_txeof(struct se_softc *sc)
976 {
977 	struct ifnet *ifp = &sc->sc_ac.ac_if;
978 	struct se_list_data *ld = &sc->se_ldata;
979 	struct se_chain_data *cd = &sc->se_cdata;
980 	struct se_desc *cur_tx;
981 	uint32_t txstat;
982 	uint i;
983 
984 	/*
985 	 * Go through our tx list and free mbufs for those
986 	 * frames that have been transmitted.
987 	 */
988 	bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,
989 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
990 	for (i = cd->se_tx_cons; cd->se_tx_cnt > 0;
991 	    cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)) {
992 		cur_tx = &ld->se_tx_ring[i];
993 		txstat = letoh32(cur_tx->se_cmdsts);
994 		if ((txstat & TDC_OWN) != 0)
995 			break;
996 
997 		ifq_clr_oactive(&ifp->if_snd);
998 
999 		if (SE_TX_ERROR(txstat) != 0) {
1000 			if (ifp->if_flags & IFF_DEBUG)
1001 				printf("%s: tx error %b\n",
1002 				    ifp->if_xname, txstat, TX_ERR_BITS);
1003 			ifp->if_oerrors++;
1004 			/* TODO: better error differentiation */
1005 		} else
1006 			ifp->if_opackets++;
1007 
1008 		if (cd->se_tx_mbuf[i] != NULL) {
1009 			bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,
1010 			    cd->se_tx_map[i]->dm_mapsize,
1011 			    BUS_DMASYNC_POSTWRITE);
1012 			bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]);
1013 			m_free(cd->se_tx_mbuf[i]);
1014 			cd->se_tx_mbuf[i] = NULL;
1015 		}
1016 
1017 		cur_tx->se_sts_size = 0;
1018 		cur_tx->se_cmdsts = 0;
1019 		cur_tx->se_ptr = 0;
1020 		cur_tx->se_flags &= htole32(RING_END);
1021 		bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,
1022 		    i * sizeof(*cur_tx), sizeof(*cur_tx),
1023 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1024 	}
1025 
1026 	cd->se_tx_cons = i;
1027 	if (cd->se_tx_cnt == 0)
1028 		ifp->if_timer = 0;
1029 }
1030 
1031 void
1032 se_tick(void *xsc)
1033 {
1034 	struct se_softc *sc = xsc;
1035 	struct mii_data *mii;
1036 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1037 	int s;
1038 
1039 	s = splnet();
1040 	mii = &sc->sc_mii;
1041 	mii_tick(mii);
1042 	if ((sc->sc_flags & SE_FLAG_LINK) == 0) {
1043 		se_miibus_statchg(&sc->sc_dev);
1044 		if ((sc->sc_flags & SE_FLAG_LINK) != 0 &&
1045 		    !IFQ_IS_EMPTY(&ifp->if_snd))
1046 			se_start(ifp);
1047 	}
1048 	splx(s);
1049 
1050 	timeout_add_sec(&sc->sc_tick_tmo, 1);
1051 }
1052 
1053 int
1054 se_intr(void *arg)
1055 {
1056 	struct se_softc *sc = arg;
1057 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1058 	uint32_t status;
1059 
1060 	status = CSR_READ_4(sc, IntrStatus);
1061 	if (status == 0xffffffff || (status & SE_INTRS) == 0) {
1062 		/* Not ours. */
1063 		return 0;
1064 	}
1065 	/* Ack interrupts/ */
1066 	CSR_WRITE_4(sc, IntrStatus, status);
1067 	/* Disable further interrupts. */
1068 	CSR_WRITE_4(sc, IntrMask, 0);
1069 
1070 	for (;;) {
1071 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1072 			break;
1073 		if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) {
1074 			se_rxeof(sc);
1075 			/* Wakeup Rx MAC. */
1076 			if ((status & INTR_RX_IDLE) != 0)
1077 				CSR_WRITE_4(sc, RX_CTL,
1078 				    0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1079 		}
1080 		if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0)
1081 			se_txeof(sc);
1082 		status = CSR_READ_4(sc, IntrStatus);
1083 		if ((status & SE_INTRS) == 0)
1084 			break;
1085 		/* Ack interrupts. */
1086 		CSR_WRITE_4(sc, IntrStatus, status);
1087 	}
1088 
1089 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
1090 		/* Re-enable interrupts */
1091 		CSR_WRITE_4(sc, IntrMask, SE_INTRS);
1092 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1093 			se_start(ifp);
1094 	}
1095 
1096 	return 1;
1097 }
1098 
1099 /*
1100  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1101  * pointers to the fragment pointers.
1102  */
1103 int
1104 se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1105 {
1106 #ifdef SE_DEBUG
1107 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1108 #endif
1109 	struct mbuf *m;
1110 	struct se_list_data *ld = &sc->se_ldata;
1111 	struct se_chain_data *cd = &sc->se_cdata;
1112 	struct se_desc *desc;
1113 	uint i, cnt = 0;
1114 	int rc;
1115 
1116 	/*
1117 	 * If there's no way we can send any packets, return now.
1118 	 */
1119 	if (SE_TX_RING_CNT - cd->se_tx_cnt < 2) {
1120 #ifdef SE_DEBUG
1121 		if (ifp->if_flags & IFF_DEBUG)
1122 			printf("%s: encap failed, not enough TX desc\n",
1123 			    ifp->if_xname);
1124 #endif
1125 		return ENOBUFS;
1126 	}
1127 
1128 	if (m_defrag(m_head, M_DONTWAIT) != 0) {
1129 #ifdef SE_DEBUG
1130 		if (ifp->if_flags & IFF_DEBUG)
1131 			printf("%s: m_defrag failed\n", ifp->if_xname);
1132 #endif
1133 		return ENOBUFS;	/* XXX should not be fatal */
1134 	}
1135 
1136 	/*
1137 	 * Start packing the mbufs in this chain into
1138 	 * the fragment pointers. Stop when we run out
1139 	 * of fragments or hit the end of the mbuf chain.
1140 	 */
1141 	i = *txidx;
1142 
1143 	for (m = m_head; m != NULL; m = m->m_next) {
1144 		if (m->m_len == 0)
1145 			continue;
1146 		if ((SE_TX_RING_CNT - (cd->se_tx_cnt + cnt)) < 2) {
1147 #ifdef SE_DEBUG
1148 			if (ifp->if_flags & IFF_DEBUG)
1149 				printf("%s: encap failed, not enough TX desc\n",
1150 				    ifp->if_xname);
1151 #endif
1152 			return ENOBUFS;
1153 		}
1154 		cd->se_tx_mbuf[i] = m;
1155 		rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i],
1156 		    m, BUS_DMA_NOWAIT);
1157 		if (rc != 0)
1158 			return ENOBUFS;
1159 		KASSERT(cd->se_tx_map[i]->dm_nsegs == 1);
1160 		bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,
1161 		    cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE);
1162 
1163 		desc = &ld->se_tx_ring[i];
1164 		desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len);
1165 		desc->se_ptr =
1166 		    htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr);
1167 		desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len);
1168 		if (i == SE_TX_RING_CNT - 1)
1169 			desc->se_flags |= htole32(RING_END);
1170 		desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF |
1171 		    TDC_CRC | TDC_PAD | TDC_BST);
1172 		bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,
1173 		    i * sizeof(*desc), sizeof(*desc),
1174 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1175 
1176 		SE_INC(i, SE_TX_RING_CNT);
1177 		cnt++;
1178 	}
1179 
1180 	/* can't happen */
1181 	if (m != NULL)
1182 		return ENOBUFS;
1183 
1184 	cd->se_tx_cnt += cnt;
1185 	*txidx = i;
1186 
1187 	return 0;
1188 }
1189 
1190 /*
1191  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1192  * to the mbuf data regions directly in the transmit lists. We also save a
1193  * copy of the pointers since the transmit list fragment pointers are
1194  * physical addresses.
1195  */
1196 void
1197 se_start(struct ifnet *ifp)
1198 {
1199 	struct se_softc *sc = ifp->if_softc;
1200 	struct mbuf *m_head = NULL;
1201 	struct se_chain_data *cd = &sc->se_cdata;
1202 	uint i, queued = 0;
1203 
1204 	if ((sc->sc_flags & SE_FLAG_LINK) == 0 ||
1205 	    !(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) {
1206 #ifdef SE_DEBUG
1207 		if (ifp->if_flags & IFF_DEBUG)
1208 			printf("%s: can't tx, flags 0x%x 0x%04x\n",
1209 			    ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags);
1210 #endif
1211 		return;
1212 	}
1213 
1214 	i = cd->se_tx_prod;
1215 
1216 	while (cd->se_tx_mbuf[i] == NULL) {
1217 		m_head = ifq_deq_begin(&ifp->if_snd);
1218 		if (m_head == NULL)
1219 			break;
1220 
1221 		if (se_encap(sc, m_head, &i) != 0) {
1222 			ifq_deq_rollback(&ifp->if_snd, m_head);
1223 			ifq_set_oactive(&ifp->if_snd);
1224 			break;
1225 		}
1226 
1227 		/* now we are committed to transmit the packet */
1228 		ifq_deq_commit(&ifp->if_snd, m_head);
1229 		queued++;
1230 
1231 		/*
1232 		 * If there's a BPF listener, bounce a copy of this frame
1233 		 * to him.
1234 		 */
1235 #if NBPFILTER > 0
1236 		if (ifp->if_bpf)
1237 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1238 #endif
1239 	}
1240 
1241 	if (queued > 0) {
1242 		/* Transmit */
1243 		cd->se_tx_prod = i;
1244 		CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL);
1245 		ifp->if_timer = 5;
1246 	}
1247 }
1248 
1249 int
1250 se_init(struct ifnet *ifp)
1251 {
1252 	struct se_softc *sc = ifp->if_softc;
1253 	uint16_t rxfilt;
1254 	int i;
1255 
1256 	splassert(IPL_NET);
1257 
1258 	/*
1259 	 * Cancel pending I/O and free all RX/TX buffers.
1260 	 */
1261 	se_stop(sc);
1262 	se_reset(sc);
1263 
1264 	/* Init circular RX list. */
1265 	if (se_list_rx_init(sc) == ENOBUFS) {
1266 		se_stop(sc);	/* XXX necessary? */
1267 		return ENOBUFS;
1268 	}
1269 
1270 	/* Init TX descriptors. */
1271 	se_list_tx_init(sc);
1272 
1273 	/*
1274 	 * Load the address of the RX and TX lists.
1275 	 */
1276 	CSR_WRITE_4(sc, TX_DESC,
1277 	    (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr);
1278 	CSR_WRITE_4(sc, RX_DESC,
1279 	    (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr);
1280 
1281 	CSR_WRITE_4(sc, TxMacControl, 0x60);
1282 	CSR_WRITE_4(sc, RxWakeOnLan, 0);
1283 	CSR_WRITE_4(sc, RxWakeOnLanData, 0);
1284 	CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN +
1285 	    SE_RX_PAD_BYTES);
1286 
1287 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1288 		CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i]);
1289 	/* Configure RX MAC. */
1290 	rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB;
1291 	CSR_WRITE_2(sc, RxMacControl, rxfilt);
1292 
1293 	/* Program promiscuous mode and multicast filters. */
1294 	se_iff(sc);
1295 
1296 	/*
1297 	 * Clear and enable interrupts.
1298 	 */
1299 	CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF);
1300 	CSR_WRITE_4(sc, IntrMask, SE_INTRS);
1301 
1302 	/* Enable receiver and transmitter. */
1303 	CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB);
1304 	CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1305 
1306 	ifp->if_flags |= IFF_RUNNING;
1307 	ifq_clr_oactive(&ifp->if_snd);
1308 
1309 	sc->sc_flags &= ~SE_FLAG_LINK;
1310 	mii_mediachg(&sc->sc_mii);
1311 	timeout_add_sec(&sc->sc_tick_tmo, 1);
1312 
1313 	return 0;
1314 }
1315 
1316 /*
1317  * Set media options.
1318  */
1319 int
1320 se_ifmedia_upd(struct ifnet *ifp)
1321 {
1322 	struct se_softc *sc = ifp->if_softc;
1323 	struct mii_data *mii;
1324 
1325 	mii = &sc->sc_mii;
1326 	sc->sc_flags &= ~SE_FLAG_LINK;
1327 	if (mii->mii_instance) {
1328 		struct mii_softc *miisc;
1329 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1330 			mii_phy_reset(miisc);
1331 	}
1332 	return mii_mediachg(mii);
1333 }
1334 
1335 /*
1336  * Report current media status.
1337  */
1338 void
1339 se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1340 {
1341 	struct se_softc *sc = ifp->if_softc;
1342 	struct mii_data *mii;
1343 
1344 	mii = &sc->sc_mii;
1345 	mii_pollstat(mii);
1346 	ifmr->ifm_active = mii->mii_media_active;
1347 	ifmr->ifm_status = mii->mii_media_status;
1348 }
1349 
1350 int
1351 se_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1352 {
1353 	struct se_softc *sc = ifp->if_softc;
1354 	struct ifreq *ifr = (struct ifreq *) data;
1355 	int s, rc = 0;
1356 
1357 	s = splnet();
1358 
1359 	switch (command) {
1360 	case SIOCSIFADDR:
1361 		ifp->if_flags |= IFF_UP;
1362 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1363 			rc = se_init(ifp);
1364 		break;
1365 	case SIOCSIFFLAGS:
1366 		if (ifp->if_flags & IFF_UP) {
1367 			if (ifp->if_flags & IFF_RUNNING)
1368 				rc = ENETRESET;
1369 			else
1370 				rc = se_init(ifp);
1371 		} else {
1372 			if (ifp->if_flags & IFF_RUNNING)
1373 				se_stop(sc);
1374 		}
1375 		break;
1376 	case SIOCGIFMEDIA:
1377 	case SIOCSIFMEDIA:
1378 		rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1379 		break;
1380 	default:
1381 		rc = ether_ioctl(ifp, &sc->sc_ac, command, data);
1382 		break;
1383 	}
1384 
1385 	if (rc == ENETRESET) {
1386 		if (ifp->if_flags & IFF_RUNNING)
1387 			se_iff(sc);
1388 		rc = 0;
1389 	}
1390 
1391 	splx(s);
1392 	return rc;
1393 }
1394 
1395 void
1396 se_watchdog(struct ifnet *ifp)
1397 {
1398 	struct se_softc *sc = ifp->if_softc;
1399 	int s;
1400 
1401 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1402 	ifp->if_oerrors++;
1403 
1404 	s = splnet();
1405 	se_init(ifp);
1406 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1407 		se_start(ifp);
1408 	splx(s);
1409 }
1410 
1411 /*
1412  * Stop the adapter and free any mbufs allocated to the
1413  * RX and TX lists.
1414  */
1415 void
1416 se_stop(struct se_softc *sc)
1417 {
1418 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1419 
1420 	ifp->if_timer = 0;
1421 	ifp->if_flags &= ~IFF_RUNNING;
1422 	ifq_clr_oactive(&ifp->if_snd);
1423 	timeout_del(&sc->sc_tick_tmo);
1424 	mii_down(&sc->sc_mii);
1425 
1426 	CSR_WRITE_4(sc, IntrMask, 0);
1427 	CSR_READ_4(sc, IntrMask);
1428 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1429 	/* Stop TX/RX MAC. */
1430 	CSR_WRITE_4(sc, TX_CTL, 0x1a00);
1431 	CSR_WRITE_4(sc, RX_CTL, 0x1a00);
1432 	/* XXX Can we assume active DMA cycles gone? */
1433 	DELAY(2000);
1434 	CSR_WRITE_4(sc, IntrMask, 0);
1435 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1436 
1437 	sc->sc_flags &= ~SE_FLAG_LINK;
1438 	se_list_rx_free(sc);
1439 	se_list_tx_free(sc);
1440 }
1441