xref: /openbsd-src/sys/dev/pci/if_se.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_se.c,v 1.9 2013/12/28 03:34:54 deraadt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de>
5  * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com>
6  * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net>
7  * Copyright (c) 1997, 1998, 1999
8  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by Bill Paul.
21  * 4. Neither the name of the author nor the names of any co-contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS''
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
28  * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL AUTHORS OR
29  * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
34  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
36  * OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * SiS 190/191 PCI Ethernet NIC driver.
41  *
42  * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original
43  * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by
44  * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu
45  * <kmliu@sis.com>.  Thanks to Pyun YongHyeon <pyunyh@gmail.com> for
46  * review and very useful comments.
47  *
48  * Ported to OpenBSD by Christopher Zimmermann 2009/10
49  *
50  * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the
51  * Linux and Solaris drivers.
52  */
53 
54 #include "bpfilter.h"
55 
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/device.h>
59 #include <sys/ioctl.h>
60 #include <sys/kernel.h>
61 #include <sys/mbuf.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #include <sys/timeout.h>
65 
66 #include <net/if.h>
67 #include <net/if_dl.h>
68 #include <net/if_media.h>
69 #include <net/if_types.h>
70 
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 
74 #if NBPFILTER > 0
75 #include <net/bpf.h>
76 #endif
77 
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 
81 #include <dev/pci/pcidevs.h>
82 #include <dev/pci/pcireg.h>
83 #include <dev/pci/pcivar.h>
84 
85 #include <dev/pci/if_sereg.h>
86 
87 #define SE_RX_RING_CNT		256 /* [8, 1024] */
88 #define SE_TX_RING_CNT		256 /* [8, 8192] */
89 #define	SE_RX_BUF_ALIGN		sizeof(uint64_t)
90 
91 #define SE_RX_RING_SZ		(SE_RX_RING_CNT * sizeof(struct se_desc))
92 #define SE_TX_RING_SZ		(SE_TX_RING_CNT * sizeof(struct se_desc))
93 
94 struct se_list_data {
95 	struct se_desc		*se_rx_ring;
96 	struct se_desc		*se_tx_ring;
97 	bus_dmamap_t		se_rx_dmamap;
98 	bus_dmamap_t		se_tx_dmamap;
99 };
100 
101 struct se_chain_data {
102 	struct mbuf		*se_rx_mbuf[SE_RX_RING_CNT];
103 	struct mbuf		*se_tx_mbuf[SE_TX_RING_CNT];
104 	bus_dmamap_t		se_rx_map[SE_RX_RING_CNT];
105 	bus_dmamap_t		se_tx_map[SE_TX_RING_CNT];
106 	uint			se_rx_prod;
107 	uint			se_tx_prod;
108 	uint			se_tx_cons;
109 	uint			se_tx_cnt;
110 };
111 
112 struct se_softc {
113     	struct device		 sc_dev;
114 	void			*sc_ih;
115 	bus_space_tag_t		 sc_iot;
116 	bus_space_handle_t	 sc_ioh;
117 	bus_dma_tag_t		 sc_dmat;
118 
119 	struct mii_data		 sc_mii;
120 	struct arpcom		 sc_ac;
121 
122 	struct se_list_data	 se_ldata;
123 	struct se_chain_data	 se_cdata;
124 
125 	struct timeout		 sc_tick_tmo;
126 
127 	int			 sc_flags;
128 #define	SE_FLAG_FASTETHER	0x0001
129 #define	SE_FLAG_RGMII		0x0010
130 #define	SE_FLAG_LINK		0x8000
131 };
132 
133 /*
134  * Various supported device vendors/types and their names.
135  */
136 const struct pci_matchid se_devices[] = {
137 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190 },
138 	{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_191 }
139 };
140 
141 int	se_match(struct device *, void *, void *);
142 void	se_attach(struct device *, struct device *, void *);
143 int	se_activate(struct device *, int);
144 
145 const struct cfattach se_ca = {
146 	sizeof(struct se_softc),
147 	se_match, se_attach, NULL, se_activate
148 };
149 
150 struct cfdriver se_cd = {
151 	0, "se", DV_IFNET
152 };
153 
154 uint32_t
155 	se_miibus_cmd(struct se_softc *, uint32_t);
156 int	se_miibus_readreg(struct device *, int, int);
157 void	se_miibus_writereg(struct device *, int, int, int);
158 void	se_miibus_statchg(struct device *);
159 
160 int	se_newbuf(struct se_softc *, uint);
161 void	se_discard_rxbuf(struct se_softc *, uint);
162 int	se_encap(struct se_softc *, struct mbuf *, uint *);
163 void	se_rxeof(struct se_softc *);
164 void	se_txeof(struct se_softc *);
165 int	se_intr(void *);
166 void	se_tick(void *);
167 void	se_start(struct ifnet *);
168 int	se_ioctl(struct ifnet *, u_long, caddr_t);
169 int	se_init(struct ifnet *);
170 void	se_stop(struct se_softc *);
171 void	se_watchdog(struct ifnet *);
172 int	se_ifmedia_upd(struct ifnet *);
173 void	se_ifmedia_sts(struct ifnet *, struct ifmediareq *);
174 
175 int	se_pcib_match(struct pci_attach_args *);
176 int	se_get_mac_addr_apc(struct se_softc *, uint8_t *);
177 int	se_get_mac_addr_eeprom(struct se_softc *, uint8_t *);
178 uint16_t
179 	se_read_eeprom(struct se_softc *, int);
180 
181 void	se_iff(struct se_softc *);
182 void	se_reset(struct se_softc *);
183 int	se_list_rx_init(struct se_softc *);
184 int	se_list_rx_free(struct se_softc *);
185 int	se_list_tx_init(struct se_softc *);
186 int	se_list_tx_free(struct se_softc *);
187 
188 /*
189  * Register space access macros.
190  */
191 
192 #define	CSR_WRITE_4(sc, reg, val) \
193 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val)
194 #define	CSR_WRITE_2(sc, reg, val) \
195 	bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val)
196 #define	CSR_WRITE_1(sc, reg, val) \
197 	bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val)
198 
199 #define	CSR_READ_4(sc, reg) \
200 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg)
201 #define	CSR_READ_2(sc, reg) \
202 	bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg)
203 #define	CSR_READ_1(sc, reg) \
204 	bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg)
205 
206 /*
207  * Read a sequence of words from the EEPROM.
208  */
209 uint16_t
210 se_read_eeprom(struct se_softc *sc, int offset)
211 {
212 	uint32_t val;
213 	int i;
214 
215 	KASSERT(offset <= EI_OFFSET);
216 
217 	CSR_WRITE_4(sc, ROMInterface,
218 	    EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT));
219 	DELAY(500);
220 	for (i = 0; i < SE_TIMEOUT; i++) {
221 		val = CSR_READ_4(sc, ROMInterface);
222 		if ((val & EI_REQ) == 0)
223 			break;
224 		DELAY(100);
225 	}
226 	if (i == SE_TIMEOUT) {
227 		printf("%s: EEPROM read timeout: 0x%08x\n",
228 		    sc->sc_dev.dv_xname, val);
229 		return 0xffff;
230 	}
231 
232 	return (val & EI_DATA) >> EI_DATA_SHIFT;
233 }
234 
235 int
236 se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest)
237 {
238 	uint16_t val;
239 	int i;
240 
241 	val = se_read_eeprom(sc, EEPROMSignature);
242 	if (val == 0xffff || val == 0x0000) {
243 		printf("%s: invalid EEPROM signature : 0x%04x\n",
244 		    sc->sc_dev.dv_xname, val);
245 		return (EINVAL);
246 	}
247 
248 	for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
249 		val = se_read_eeprom(sc, EEPROMMACAddr + i / 2);
250 		dest[i + 0] = (uint8_t)val;
251 		dest[i + 1] = (uint8_t)(val >> 8);
252 	}
253 
254 	if ((se_read_eeprom(sc, EEPROMInfo) & 0x80) != 0)
255 		sc->sc_flags |= SE_FLAG_RGMII;
256 	return (0);
257 }
258 
259 /*
260  * For SiS96x, APC CMOS RAM is used to store Ethernet address.
261  * APC CMOS RAM is accessed through ISA bridge.
262  */
263 #if defined(__amd64__) || defined(__i386__)
264 int
265 se_pcib_match(struct pci_attach_args *pa)
266 {
267 	const struct pci_matchid apc_devices[] = {
268 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_965 },
269 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_966 },
270 		{ PCI_VENDOR_SIS, PCI_PRODUCT_SIS_968 }
271 	};
272 
273 	return pci_matchbyid(pa, apc_devices, nitems(apc_devices));
274 }
275 #endif
276 
277 int
278 se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest)
279 {
280 #if defined(__amd64__) || defined(__i386__)
281 	struct pci_attach_args pa;
282 	pcireg_t reg;
283 	bus_space_handle_t ioh;
284 	int rc, i;
285 
286 	if (pci_find_device(&pa, se_pcib_match) == 0) {
287 		printf("\n%s: couldn't find PCI-ISA bridge\n",
288 		    sc->sc_dev.dv_xname);
289 		return EINVAL;
290 	}
291 
292 	/* Enable port 0x78 and 0x79 to access APC registers. */
293 	reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
294 	pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02);
295 	DELAY(50);
296 	(void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48);
297 
298 	/* XXX this abuses bus_space implementation knowledge */
299 	rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh);
300 	if (rc == 0) {
301 		/* Read stored Ethernet address. */
302 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
303 			bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i);
304 			dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1);
305 		}
306 		bus_space_write_1(pa.pa_iot, ioh, 0, 0x12);
307 		if ((bus_space_read_1(pa.pa_iot, ioh, 1) & 0x80) != 0)
308 			sc->sc_flags |= SE_FLAG_RGMII;
309 		_bus_space_unmap(pa.pa_iot, ioh, 2, NULL);
310 	} else
311 		rc = EINVAL;
312 
313 	/* Restore access to APC registers. */
314 	pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg);
315 
316 	return rc;
317 #endif
318 	return EINVAL;
319 }
320 
321 uint32_t
322 se_miibus_cmd(struct se_softc *sc, uint32_t ctrl)
323 {
324 	int i;
325 	uint32_t val;
326 
327 	CSR_WRITE_4(sc, GMIIControl, ctrl);
328 	DELAY(10);
329 	for (i = 0; i < SE_TIMEOUT; i++) {
330 		val = CSR_READ_4(sc, GMIIControl);
331 		if ((val & GMI_REQ) == 0)
332 			return val;
333 		DELAY(10);
334 	}
335 
336 	return GMI_REQ;
337 }
338 
339 int
340 se_miibus_readreg(struct device *self, int phy, int reg)
341 {
342 	struct se_softc *sc = (struct se_softc *)self;
343 	uint32_t ctrl, val;
344 
345 	ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) |
346 	    GMI_OP_RD | GMI_REQ;
347 	val = se_miibus_cmd(sc, ctrl);
348 	if ((val & GMI_REQ) != 0) {
349 		printf("%s: PHY read timeout : %d\n",
350 		    sc->sc_dev.dv_xname, reg);
351 		return 0;
352 	}
353 	return (val & GMI_DATA) >> GMI_DATA_SHIFT;
354 }
355 
356 void
357 se_miibus_writereg(struct device *self, int phy, int reg, int data)
358 {
359 	struct se_softc *sc = (struct se_softc *)self;
360 	uint32_t ctrl, val;
361 
362 	ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) |
363 	    GMI_OP_WR | (data << GMI_DATA_SHIFT) | GMI_REQ;
364 	val = se_miibus_cmd(sc, ctrl);
365 	if ((val & GMI_REQ) != 0) {
366 		printf("%s: PHY write timeout : %d\n",
367 		    sc->sc_dev.dv_xname, reg);
368 	}
369 }
370 
371 void
372 se_miibus_statchg(struct device *self)
373 {
374 	struct se_softc *sc = (struct se_softc *)self;
375 #ifdef SE_DEBUG
376 	struct ifnet *ifp = &sc->sc_ac.ac_if;
377 #endif
378 	struct mii_data *mii = &sc->sc_mii;
379 	uint32_t ctl, speed;
380 
381 	speed = 0;
382 	sc->sc_flags &= ~SE_FLAG_LINK;
383 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
384 	    (IFM_ACTIVE | IFM_AVALID)) {
385 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
386 		case IFM_10_T:
387 #ifdef SE_DEBUG
388 			if (ifp->if_flags & IFF_DEBUG)
389 				printf("%s: 10baseT link\n", ifp->if_xname);
390 #endif
391 			sc->sc_flags |= SE_FLAG_LINK;
392 			speed = SC_SPEED_10;
393 			break;
394 		case IFM_100_TX:
395 #ifdef SE_DEBUG
396 			if (ifp->if_flags & IFF_DEBUG)
397 				printf("%s: 100baseTX link\n", ifp->if_xname);
398 #endif
399 			sc->sc_flags |= SE_FLAG_LINK;
400 			speed = SC_SPEED_100;
401 			break;
402 		case IFM_1000_T:
403 #ifdef SE_DEBUG
404 			if (ifp->if_flags & IFF_DEBUG)
405 				printf("%s: 1000baseT link\n", ifp->if_xname);
406 #endif
407 			if ((sc->sc_flags & SE_FLAG_FASTETHER) == 0) {
408 				sc->sc_flags |= SE_FLAG_LINK;
409 				speed = SC_SPEED_1000;
410 			}
411 			break;
412 		default:
413 			break;
414 		}
415 	}
416 	if ((sc->sc_flags & SE_FLAG_LINK) == 0) {
417 #ifdef SE_DEBUG
418 		if (ifp->if_flags & IFF_DEBUG)
419 			printf("%s: no link\n", ifp->if_xname);
420 #endif
421 		return;
422 	}
423 	/* Reprogram MAC to resolved speed/duplex/flow-control paramters. */
424 	ctl = CSR_READ_4(sc, StationControl);
425 	ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK);
426 	if (speed == SC_SPEED_1000)
427 		ctl |= 0x07000000;
428 	else
429 		ctl |= 0x04000000;
430 #ifdef notyet
431 	if ((sc->sc_flags & SE_FLAG_GMII) != 0)
432 		ctl |= 0x03000000;
433 #endif
434 	ctl |= speed;
435 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
436 		ctl |= SC_FDX;
437 	CSR_WRITE_4(sc, StationControl, ctl);
438 	if ((sc->sc_flags & SE_FLAG_RGMII) != 0) {
439 		CSR_WRITE_4(sc, RGMIIDelay, 0x0441);
440 		CSR_WRITE_4(sc, RGMIIDelay, 0x0440);
441 	}
442 }
443 
444 void
445 se_iff(struct se_softc *sc)
446 {
447 	struct arpcom *ac = &sc->sc_ac;
448 	struct ifnet *ifp = &ac->ac_if;
449 	struct ether_multi *enm;
450 	struct ether_multistep step;
451 	uint32_t crc, hashes[2];
452 	uint16_t rxfilt;
453 
454 	rxfilt = CSR_READ_2(sc, RxMacControl);
455 	rxfilt &= ~(AcceptAllPhys | AcceptBroadcast | AcceptMulticast);
456 	ifp->if_flags &= ~IFF_ALLMULTI;
457 
458 	/*
459 	 * Always accept broadcast frames.
460 	 * Always accept frames destined to our station address.
461 	 */
462 	rxfilt |= AcceptBroadcast | AcceptMyPhys;
463 
464 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
465 		ifp->if_flags |= IFF_ALLMULTI;
466 		if (ifp->if_flags & IFF_PROMISC)
467 			rxfilt |= AcceptAllPhys;
468 		rxfilt |= AcceptMulticast;
469 		hashes[0] = hashes[1] = 0xffffffff;
470 	} else {
471 		rxfilt |= AcceptMulticast;
472 		hashes[0] = hashes[1] = 0;
473 
474 		ETHER_FIRST_MULTI(step, ac, enm);
475 		while (enm != NULL) {
476 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
477 
478 			hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
479 
480 			ETHER_NEXT_MULTI(step, enm);
481 		}
482 	}
483 
484 	CSR_WRITE_2(sc, RxMacControl, rxfilt);
485 	CSR_WRITE_4(sc, RxHashTable, hashes[0]);
486 	CSR_WRITE_4(sc, RxHashTable2, hashes[1]);
487 }
488 
489 void
490 se_reset(struct se_softc *sc)
491 {
492 	CSR_WRITE_4(sc, IntrMask, 0);
493 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
494 
495 	/* Soft reset. */
496 	CSR_WRITE_4(sc, IntrControl, 0x8000);
497 	CSR_READ_4(sc, IntrControl);
498 	DELAY(100);
499 	CSR_WRITE_4(sc, IntrControl, 0);
500 	/* Stop MAC. */
501 	CSR_WRITE_4(sc, TX_CTL, 0x1a00);
502 	CSR_WRITE_4(sc, RX_CTL, 0x1a00);
503 
504 	CSR_WRITE_4(sc, IntrMask, 0);
505 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
506 
507 	CSR_WRITE_4(sc, GMIIControl, 0);
508 }
509 
510 /*
511  * Probe for an SiS chip. Check the PCI vendor and device
512  * IDs against our list and return a device name if we find a match.
513  */
514 int
515 se_match(struct device *parent, void *match, void *aux)
516 {
517 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
518 
519 	return pci_matchbyid(pa, se_devices, nitems(se_devices));
520 }
521 
522 /*
523  * Attach the interface. Do ifmedia setup and ethernet/BPF attach.
524  */
525 void
526 se_attach(struct device *parent, struct device *self, void *aux)
527 {
528 	struct se_softc *sc = (struct se_softc *)self;
529 	struct arpcom *ac = &sc->sc_ac;
530 	struct ifnet *ifp = &ac->ac_if;
531 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
532 	uint8_t eaddr[ETHER_ADDR_LEN];
533 	const char *intrstr;
534 	pci_intr_handle_t ih;
535 	bus_size_t iosize;
536 	bus_dma_segment_t seg;
537 	struct se_list_data *ld;
538 	struct se_chain_data *cd;
539 	int nseg;
540 	uint i;
541 	int rc;
542 
543 	printf(": ");
544 
545 	/*
546 	 * Map control/status registers.
547 	 */
548 
549 	rc = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM, 0,
550 	    &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0);
551 	if (rc != 0) {
552 		printf("can't map i/o space\n");
553 		return;
554 	}
555 
556 	if (pci_intr_map(pa, &ih)) {
557 		printf("can't map interrupt\n");
558 		goto fail1;
559 	}
560 	intrstr = pci_intr_string(pa->pa_pc, ih);
561 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, se_intr, sc,
562 	    self->dv_xname);
563 	if (sc->sc_ih == NULL) {
564 		printf("can't establish interrupt");
565 		if (intrstr != NULL)
566 			printf(" at %s", intrstr);
567 		printf("\n");
568 		goto fail1;
569 	}
570 
571 	printf("%s", intrstr);
572 
573 	if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190))
574 		sc->sc_flags |= SE_FLAG_FASTETHER;
575 
576 	/* Reset the adapter. */
577 	se_reset(sc);
578 
579 	/* Get MAC address from the EEPROM. */
580 	if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0)
581 		se_get_mac_addr_apc(sc, eaddr);
582 	else
583 		se_get_mac_addr_eeprom(sc, eaddr);
584 	printf(", address %s\n", ether_sprintf(eaddr));
585 	bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN);
586 
587 	/*
588 	 * Now do all the DMA mapping stuff
589 	 */
590 
591 	sc->sc_dmat = pa->pa_dmat;
592 	ld = &sc->se_ldata;
593 	cd = &sc->se_cdata;
594 
595 	/* First create TX/RX busdma maps. */
596 	for (i = 0; i < SE_RX_RING_CNT; i++) {
597 		rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
598 		    0, BUS_DMA_NOWAIT, &cd->se_rx_map[i]);
599 		if (rc != 0) {
600 			printf("%s: cannot init the RX map array\n",
601 			    self->dv_xname);
602 			goto fail2;
603 		}
604 	}
605 
606 	for (i = 0; i < SE_TX_RING_CNT; i++) {
607 		rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
608 		    0, BUS_DMA_NOWAIT, &cd->se_tx_map[i]);
609 		if (rc != 0) {
610 			printf("%s: cannot init the TX map array\n",
611 			    self->dv_xname);
612 			goto fail2;
613 		}
614 	}
615 
616 	/*
617 	 * Now allocate a chunk of DMA-able memory for RX and TX ring
618 	 * descriptors, as a contiguous block of memory.
619 	 * XXX fix deallocation upon error
620 	 */
621 
622 	/* RX */
623 	rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0,
624 	    &seg, 1, &nseg, BUS_DMA_NOWAIT);
625 	if (rc != 0) {
626 		printf("%s: no memory for RX descriptors\n", self->dv_xname);
627 		goto fail2;
628 	}
629 
630 	rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ,
631 	    (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT);
632 	if (rc != 0) {
633 		printf("%s: can't map RX descriptors\n", self->dv_xname);
634 		goto fail2;
635 	}
636 
637 	rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1,
638 	    SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap);
639 	if (rc != 0) {
640 		printf("%s: can't alloc RX DMA map\n", self->dv_xname);
641 		goto fail2;
642 	}
643 
644 	rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap,
645 	    (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
646 	if (rc != 0) {
647 		printf("%s: can't load RX DMA map\n", self->dv_xname);
648 		bus_dmamem_unmap(sc->sc_dmat,
649 		    (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ);
650 		bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap);
651 		bus_dmamem_free(sc->sc_dmat, &seg, nseg);
652 		goto fail2;
653 	}
654 
655 	/* TX */
656 	rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0,
657 	    &seg, 1, &nseg, BUS_DMA_NOWAIT);
658 	if (rc != 0) {
659 		printf("%s: no memory for TX descriptors\n", self->dv_xname);
660 		goto fail2;
661 	}
662 
663 	rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ,
664 	    (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT);
665 	if (rc != 0) {
666 		printf("%s: can't map TX descriptors\n", self->dv_xname);
667 		goto fail2;
668 	}
669 
670 	rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1,
671 	    SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap);
672 	if (rc != 0) {
673 		printf("%s: can't alloc TX DMA map\n", self->dv_xname);
674 		goto fail2;
675 	}
676 
677 	rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap,
678 	    (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
679 	if (rc != 0) {
680 		printf("%s: can't load TX DMA map\n", self->dv_xname);
681 		bus_dmamem_unmap(sc->sc_dmat,
682 		    (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ);
683 		bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap);
684 		bus_dmamem_free(sc->sc_dmat, &seg, nseg);
685 		goto fail2;
686 	}
687 
688 	timeout_set(&sc->sc_tick_tmo, se_tick, sc);
689 
690 	ifp = &sc->sc_ac.ac_if;
691 	ifp->if_softc = sc;
692 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
693 	ifp->if_ioctl = se_ioctl;
694 	ifp->if_start = se_start;
695 	ifp->if_watchdog = se_watchdog;
696 	IFQ_SET_MAXLEN(&ifp->if_snd, SE_TX_RING_CNT - 1);
697 	IFQ_SET_READY(&ifp->if_snd);
698 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
699 
700 	ifp->if_capabilities = IFCAP_VLAN_MTU;
701 
702 	/*
703 	 * Do MII setup.
704 	 */
705 
706 	sc->sc_mii.mii_ifp = ifp;
707 	sc->sc_mii.mii_readreg = se_miibus_readreg;
708 	sc->sc_mii.mii_writereg = se_miibus_writereg;
709 	sc->sc_mii.mii_statchg = se_miibus_statchg;
710 	ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd,
711 	    se_ifmedia_sts);
712 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
713 	    MII_OFFSET_ANY, 0);
714 
715 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
716 		/* No PHY attached */
717 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
718 		    0, NULL);
719 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
720 	} else
721 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
722 
723 	/*
724 	 * Call MI attach routine.
725 	 */
726 	if_attach(ifp);
727 	ether_ifattach(ifp);
728 
729 	return;
730 
731 fail2:
732 	pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
733 fail1:
734 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
735 }
736 
737 int
738 se_activate(struct device *self, int act)
739 {
740 	struct se_softc *sc = (struct se_softc *)self;
741 	struct ifnet *ifp = &sc->sc_ac.ac_if;
742 	int rv = 0;
743 
744 	switch (act) {
745 	case DVACT_SUSPEND:
746 		if (ifp->if_flags & IFF_RUNNING)
747 			se_stop(sc);
748 		rv = config_activate_children(self, act);
749 		break;
750 	case DVACT_RESUME:
751 		if (ifp->if_flags & IFF_UP)
752 			(void)se_init(ifp);
753 		break;
754 	default:
755 		rv = config_activate_children(self, act);
756 		break;
757 	}
758 
759 	return (rv);
760 }
761 
762 /*
763  * Initialize the TX descriptors.
764  */
765 int
766 se_list_tx_init(struct se_softc *sc)
767 {
768 	struct se_list_data *ld = &sc->se_ldata;
769 	struct se_chain_data *cd = &sc->se_cdata;
770 
771 	bzero(ld->se_tx_ring, SE_TX_RING_SZ);
772 	ld->se_tx_ring[SE_TX_RING_CNT - 1].se_flags = htole32(RING_END);
773 	bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,
774 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
775 	cd->se_tx_prod = 0;
776 	cd->se_tx_cons = 0;
777 	cd->se_tx_cnt = 0;
778 
779 	return 0;
780 }
781 
782 int
783 se_list_tx_free(struct se_softc *sc)
784 {
785 	struct se_chain_data *cd = &sc->se_cdata;
786 	uint i;
787 
788 	for (i = 0; i < SE_TX_RING_CNT; i++) {
789 		if (cd->se_tx_mbuf[i] != NULL) {
790 			bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]);
791 			m_free(cd->se_tx_mbuf[i]);
792 			cd->se_tx_mbuf[i] = NULL;
793 		}
794 	}
795 
796 	return 0;
797 }
798 
799 /*
800  * Initialize the RX descriptors and allocate mbufs for them.
801  */
802 int
803 se_list_rx_init(struct se_softc *sc)
804 {
805 	struct se_list_data *ld = &sc->se_ldata;
806 	struct se_chain_data *cd = &sc->se_cdata;
807 	uint i;
808 
809 	bzero(ld->se_rx_ring, SE_RX_RING_SZ);
810 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,
811 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
812 	for (i = 0; i < SE_RX_RING_CNT; i++) {
813 		if (se_newbuf(sc, i) != 0)
814 			return ENOBUFS;
815 	}
816 
817 	cd->se_rx_prod = 0;
818 
819 	return 0;
820 }
821 
822 int
823 se_list_rx_free(struct se_softc *sc)
824 {
825 	struct se_chain_data *cd = &sc->se_cdata;
826 	uint i;
827 
828 	for (i = 0; i < SE_RX_RING_CNT; i++) {
829 		if (cd->se_rx_mbuf[i] != NULL) {
830 			bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i]);
831 			m_free(cd->se_rx_mbuf[i]);
832 			cd->se_rx_mbuf[i] = NULL;
833 		}
834 	}
835 
836 	return 0;
837 }
838 
839 /*
840  * Initialize an RX descriptor and attach an MBUF cluster.
841  */
842 int
843 se_newbuf(struct se_softc *sc, uint i)
844 {
845 #ifdef SE_DEBUG
846 	struct ifnet *ifp = &sc->sc_ac.ac_if;
847 #endif
848 	struct se_list_data *ld = &sc->se_ldata;
849 	struct se_chain_data *cd = &sc->se_cdata;
850 	struct se_desc *desc;
851 	struct mbuf *m;
852 	int rc;
853 
854 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
855 	if (m == NULL) {
856 #ifdef SE_DEBUG
857 		if (ifp->if_flags & IFF_DEBUG)
858 			printf("%s: MCLGETI failed\n", ifp->if_xname);
859 #endif
860 		return ENOBUFS;
861 	}
862 	m->m_len = m->m_pkthdr.len = MCLBYTES;
863 	m_adj(m, SE_RX_BUF_ALIGN);
864 
865 	rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i],
866 	    m, BUS_DMA_NOWAIT);
867 	KASSERT(cd->se_rx_map[i]->dm_nsegs == 1);
868 	if (rc != 0) {
869 		m_freem(m);
870 		return ENOBUFS;
871 	}
872 	bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,
873 	    cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
874 
875 	cd->se_rx_mbuf[i] = m;
876 	desc = &ld->se_rx_ring[i];
877 	desc->se_sts_size = 0;
878 	desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR);
879 	desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr);
880 	desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len);
881 	if (i == SE_RX_RING_CNT - 1)
882 		desc->se_flags |= htole32(RING_END);
883 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),
884 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
885 
886 	return 0;
887 }
888 
889 void
890 se_discard_rxbuf(struct se_softc *sc, uint i)
891 {
892 	struct se_list_data *ld = &sc->se_ldata;
893 	struct se_desc *desc;
894 
895 	desc = &ld->se_rx_ring[i];
896 	desc->se_sts_size = 0;
897 	desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR);
898 	desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN);
899 	if (i == SE_RX_RING_CNT - 1)
900 		desc->se_flags |= htole32(RING_END);
901 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc),
902 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
903 }
904 
905 /*
906  * A frame has been uploaded: pass the resulting mbuf chain up to
907  * the higher level protocols.
908  */
909 void
910 se_rxeof(struct se_softc *sc)
911 {
912 	struct mbuf *m;
913 	struct ifnet *ifp = &sc->sc_ac.ac_if;
914 	struct se_list_data *ld = &sc->se_ldata;
915 	struct se_chain_data *cd = &sc->se_cdata;
916 	struct se_desc *cur_rx;
917 	uint32_t rxinfo, rxstat;
918 	uint i;
919 
920 	bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ,
921 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
922 	for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)) {
923 		cur_rx = &ld->se_rx_ring[i];
924 		rxinfo = letoh32(cur_rx->se_cmdsts);
925 		if ((rxinfo & RDC_OWN) != 0)
926 			break;
927 		rxstat = letoh32(cur_rx->se_sts_size);
928 
929 		/*
930 		 * If an error occurs, update stats, clear the
931 		 * status word and leave the mbuf cluster in place:
932 		 * it should simply get re-used next time this descriptor
933 		 * comes up in the ring.
934 		 */
935 		if ((rxstat & RDS_CRCOK) == 0 || SE_RX_ERROR(rxstat) != 0 ||
936 		    SE_RX_NSEGS(rxstat) != 1) {
937 			/* XXX We don't support multi-segment frames yet. */
938 			if (ifp->if_flags & IFF_DEBUG)
939 				printf("%s: rx error %b\n",
940 				    ifp->if_xname, rxstat, RX_ERR_BITS);
941 			se_discard_rxbuf(sc, i);
942 			ifp->if_ierrors++;
943 			continue;
944 		}
945 
946 		/* No errors; receive the packet. */
947 		bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0,
948 		    cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
949 		m = cd->se_rx_mbuf[i];
950 		if (se_newbuf(sc, i) != 0) {
951 			se_discard_rxbuf(sc, i);
952 			ifp->if_iqdrops++;
953 			continue;
954 		}
955 		/*
956 		 * Account for 10 bytes auto padding which is used
957 		 * to align IP header on a 32bit boundary.  Also note,
958 		 * CRC bytes are automatically removed by the hardware.
959 		 */
960 		m->m_data += SE_RX_PAD_BYTES;
961 		m->m_pkthdr.len = m->m_len =
962 		    SE_RX_BYTES(rxstat) - SE_RX_PAD_BYTES;
963 
964 		ifp->if_ipackets++;
965 		m->m_pkthdr.rcvif = ifp;
966 
967 #if NBPFILTER > 0
968 		if (ifp->if_bpf)
969 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
970 #endif
971 		ether_input_mbuf(ifp, m);
972 	}
973 
974 	cd->se_rx_prod = i;
975 }
976 
977 /*
978  * A frame was downloaded to the chip. It's safe for us to clean up
979  * the list buffers.
980  */
981 
982 void
983 se_txeof(struct se_softc *sc)
984 {
985 	struct ifnet *ifp = &sc->sc_ac.ac_if;
986 	struct se_list_data *ld = &sc->se_ldata;
987 	struct se_chain_data *cd = &sc->se_cdata;
988 	struct se_desc *cur_tx;
989 	uint32_t txstat;
990 	uint i;
991 
992 	/*
993 	 * Go through our tx list and free mbufs for those
994 	 * frames that have been transmitted.
995 	 */
996 	bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ,
997 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
998 	for (i = cd->se_tx_cons; cd->se_tx_cnt > 0;
999 	    cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)) {
1000 		cur_tx = &ld->se_tx_ring[i];
1001 		txstat = letoh32(cur_tx->se_cmdsts);
1002 		if ((txstat & TDC_OWN) != 0)
1003 			break;
1004 
1005 		ifp->if_flags &= ~IFF_OACTIVE;
1006 
1007 		if (SE_TX_ERROR(txstat) != 0) {
1008 			if (ifp->if_flags & IFF_DEBUG)
1009 				printf("%s: tx error %b\n",
1010 				    ifp->if_xname, txstat, TX_ERR_BITS);
1011 			ifp->if_oerrors++;
1012 			/* TODO: better error differentiation */
1013 		} else
1014 			ifp->if_opackets++;
1015 
1016 		if (cd->se_tx_mbuf[i] != NULL) {
1017 			bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,
1018 			    cd->se_tx_map[i]->dm_mapsize,
1019 			    BUS_DMASYNC_POSTWRITE);
1020 			bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]);
1021 			m_free(cd->se_tx_mbuf[i]);
1022 			cd->se_tx_mbuf[i] = NULL;
1023 		}
1024 
1025 		cur_tx->se_sts_size = 0;
1026 		cur_tx->se_cmdsts = 0;
1027 		cur_tx->se_ptr = 0;
1028 		cur_tx->se_flags &= htole32(RING_END);
1029 		bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,
1030 		    i * sizeof(*cur_tx), sizeof(*cur_tx),
1031 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1032 	}
1033 
1034 	cd->se_tx_cons = i;
1035 	if (cd->se_tx_cnt == 0)
1036 		ifp->if_timer = 0;
1037 }
1038 
1039 void
1040 se_tick(void *xsc)
1041 {
1042 	struct se_softc *sc = xsc;
1043 	struct mii_data *mii;
1044 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1045 	int s;
1046 
1047 	s = splnet();
1048 	mii = &sc->sc_mii;
1049 	mii_tick(mii);
1050 	if ((sc->sc_flags & SE_FLAG_LINK) == 0) {
1051 		se_miibus_statchg(&sc->sc_dev);
1052 		if ((sc->sc_flags & SE_FLAG_LINK) != 0 &&
1053 		    !IFQ_IS_EMPTY(&ifp->if_snd))
1054 			se_start(ifp);
1055 	}
1056 	splx(s);
1057 
1058 	timeout_add_sec(&sc->sc_tick_tmo, 1);
1059 }
1060 
1061 int
1062 se_intr(void *arg)
1063 {
1064 	struct se_softc *sc = arg;
1065 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1066 	uint32_t status;
1067 
1068 	status = CSR_READ_4(sc, IntrStatus);
1069 	if (status == 0xffffffff || (status & SE_INTRS) == 0) {
1070 		/* Not ours. */
1071 		return 0;
1072 	}
1073 	/* Ack interrupts/ */
1074 	CSR_WRITE_4(sc, IntrStatus, status);
1075 	/* Disable further interrupts. */
1076 	CSR_WRITE_4(sc, IntrMask, 0);
1077 
1078 	for (;;) {
1079 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1080 			break;
1081 		if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) {
1082 			se_rxeof(sc);
1083 			/* Wakeup Rx MAC. */
1084 			if ((status & INTR_RX_IDLE) != 0)
1085 				CSR_WRITE_4(sc, RX_CTL,
1086 				    0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1087 		}
1088 		if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0)
1089 			se_txeof(sc);
1090 		status = CSR_READ_4(sc, IntrStatus);
1091 		if ((status & SE_INTRS) == 0)
1092 			break;
1093 		/* Ack interrupts. */
1094 		CSR_WRITE_4(sc, IntrStatus, status);
1095 	}
1096 
1097 	if ((ifp->if_flags & IFF_RUNNING) != 0) {
1098 		/* Re-enable interrupts */
1099 		CSR_WRITE_4(sc, IntrMask, SE_INTRS);
1100 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1101 			se_start(ifp);
1102 	}
1103 
1104 	return 1;
1105 }
1106 
1107 /*
1108  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1109  * pointers to the fragment pointers.
1110  */
1111 int
1112 se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx)
1113 {
1114 #ifdef SE_DEBUG
1115 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1116 #endif
1117 	struct mbuf *m;
1118 	struct se_list_data *ld = &sc->se_ldata;
1119 	struct se_chain_data *cd = &sc->se_cdata;
1120 	struct se_desc *desc;
1121 	uint i, cnt = 0;
1122 	int rc;
1123 
1124 	/*
1125 	 * If there's no way we can send any packets, return now.
1126 	 */
1127 	if (SE_TX_RING_CNT - cd->se_tx_cnt < 2) {
1128 #ifdef SE_DEBUG
1129 		if (ifp->if_flags & IFF_DEBUG)
1130 			printf("%s: encap failed, not enough TX desc\n",
1131 			    ifp->if_xname);
1132 #endif
1133 		return ENOBUFS;
1134 	}
1135 
1136 	if (m_defrag(m_head, M_DONTWAIT) != 0) {
1137 #ifdef SE_DEBUG
1138 		if (ifp->if_flags & IFF_DEBUG)
1139 			printf("%s: m_defrag failed\n", ifp->if_xname);
1140 #endif
1141 		return ENOBUFS;	/* XXX should not be fatal */
1142 	}
1143 
1144 	/*
1145 	 * Start packing the mbufs in this chain into
1146 	 * the fragment pointers. Stop when we run out
1147 	 * of fragments or hit the end of the mbuf chain.
1148 	 */
1149 	i = *txidx;
1150 
1151 	for (m = m_head; m != NULL; m = m->m_next) {
1152 		if (m->m_len == 0)
1153 			continue;
1154 		if ((SE_TX_RING_CNT - (cd->se_tx_cnt + cnt)) < 2) {
1155 #ifdef SE_DEBUG
1156 			if (ifp->if_flags & IFF_DEBUG)
1157 				printf("%s: encap failed, not enough TX desc\n",
1158 				    ifp->if_xname);
1159 #endif
1160 			return ENOBUFS;
1161 		}
1162 		cd->se_tx_mbuf[i] = m;
1163 		rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i],
1164 		    m, BUS_DMA_NOWAIT);
1165 		if (rc != 0)
1166 			return ENOBUFS;
1167 		KASSERT(cd->se_tx_map[i]->dm_nsegs == 1);
1168 		bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0,
1169 		    cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE);
1170 
1171 		desc = &ld->se_tx_ring[i];
1172 		desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len);
1173 		desc->se_ptr =
1174 		    htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr);
1175 		desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len);
1176 		if (i == SE_TX_RING_CNT - 1)
1177 			desc->se_flags |= htole32(RING_END);
1178 		desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF |
1179 		    TDC_CRC | TDC_PAD | TDC_BST);
1180 		bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap,
1181 		    i * sizeof(*desc), sizeof(*desc),
1182 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1183 
1184 		SE_INC(i, SE_TX_RING_CNT);
1185 		cnt++;
1186 	}
1187 
1188 	/* can't happen */
1189 	if (m != NULL)
1190 		return ENOBUFS;
1191 
1192 	cd->se_tx_cnt += cnt;
1193 	*txidx = i;
1194 
1195 	return 0;
1196 }
1197 
1198 /*
1199  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1200  * to the mbuf data regions directly in the transmit lists. We also save a
1201  * copy of the pointers since the transmit list fragment pointers are
1202  * physical addresses.
1203  */
1204 void
1205 se_start(struct ifnet *ifp)
1206 {
1207 	struct se_softc *sc = ifp->if_softc;
1208 	struct mbuf *m_head = NULL;
1209 	struct se_chain_data *cd = &sc->se_cdata;
1210 	uint i, queued = 0;
1211 
1212 	if ((sc->sc_flags & SE_FLAG_LINK) == 0 ||
1213 	    (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) {
1214 #ifdef SE_DEBUG
1215 		if (ifp->if_flags & IFF_DEBUG)
1216 			printf("%s: can't tx, flags 0x%x 0x%04x\n",
1217 			    ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags);
1218 #endif
1219 		return;
1220 	}
1221 
1222 	i = cd->se_tx_prod;
1223 
1224 	while (cd->se_tx_mbuf[i] == NULL) {
1225 		IFQ_POLL(&ifp->if_snd, m_head);
1226 		if (m_head == NULL)
1227 			break;
1228 
1229 		if (se_encap(sc, m_head, &i) != 0) {
1230 			ifp->if_flags |= IFF_OACTIVE;
1231 			break;
1232 		}
1233 
1234 		/* now we are committed to transmit the packet */
1235 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1236 		queued++;
1237 
1238 		/*
1239 		 * If there's a BPF listener, bounce a copy of this frame
1240 		 * to him.
1241 		 */
1242 #if NBPFILTER > 0
1243 		if (ifp->if_bpf)
1244 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1245 #endif
1246 	}
1247 
1248 	if (queued > 0) {
1249 		/* Transmit */
1250 		cd->se_tx_prod = i;
1251 		CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL);
1252 		ifp->if_timer = 5;
1253 	}
1254 }
1255 
1256 int
1257 se_init(struct ifnet *ifp)
1258 {
1259 	struct se_softc *sc = ifp->if_softc;
1260 	uint16_t rxfilt;
1261 	int i;
1262 
1263 	splassert(IPL_NET);
1264 
1265 	/*
1266 	 * Cancel pending I/O and free all RX/TX buffers.
1267 	 */
1268 	se_stop(sc);
1269 	se_reset(sc);
1270 
1271 	/* Init circular RX list. */
1272 	if (se_list_rx_init(sc) == ENOBUFS) {
1273 		se_stop(sc);	/* XXX necessary? */
1274 		return ENOBUFS;
1275 	}
1276 
1277 	/* Init TX descriptors. */
1278 	se_list_tx_init(sc);
1279 
1280 	/*
1281 	 * Load the address of the RX and TX lists.
1282 	 */
1283 	CSR_WRITE_4(sc, TX_DESC,
1284 	    (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr);
1285 	CSR_WRITE_4(sc, RX_DESC,
1286 	    (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr);
1287 
1288 	CSR_WRITE_4(sc, TxMacControl, 0x60);
1289 	CSR_WRITE_4(sc, RxWakeOnLan, 0);
1290 	CSR_WRITE_4(sc, RxWakeOnLanData, 0);
1291 	CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN +
1292 	    SE_RX_PAD_BYTES);
1293 
1294 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1295 		CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i]);
1296 	/* Configure RX MAC. */
1297 	rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB;
1298 	CSR_WRITE_2(sc, RxMacControl, rxfilt);
1299 
1300 	/* Program promiscuous mode and multicast filters. */
1301 	se_iff(sc);
1302 
1303 	/*
1304 	 * Clear and enable interrupts.
1305 	 */
1306 	CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF);
1307 	CSR_WRITE_4(sc, IntrMask, SE_INTRS);
1308 
1309 	/* Enable receiver and transmitter. */
1310 	CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB);
1311 	CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB);
1312 
1313 	ifp->if_flags |= IFF_RUNNING;
1314 	ifp->if_flags &= ~IFF_OACTIVE;
1315 
1316 	sc->sc_flags &= ~SE_FLAG_LINK;
1317 	mii_mediachg(&sc->sc_mii);
1318 	timeout_add_sec(&sc->sc_tick_tmo, 1);
1319 
1320 	return 0;
1321 }
1322 
1323 /*
1324  * Set media options.
1325  */
1326 int
1327 se_ifmedia_upd(struct ifnet *ifp)
1328 {
1329 	struct se_softc *sc = ifp->if_softc;
1330 	struct mii_data *mii;
1331 
1332 	mii = &sc->sc_mii;
1333 	sc->sc_flags &= ~SE_FLAG_LINK;
1334 	if (mii->mii_instance) {
1335 		struct mii_softc *miisc;
1336 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1337 			mii_phy_reset(miisc);
1338 	}
1339 	return mii_mediachg(mii);
1340 }
1341 
1342 /*
1343  * Report current media status.
1344  */
1345 void
1346 se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1347 {
1348 	struct se_softc *sc = ifp->if_softc;
1349 	struct mii_data *mii;
1350 
1351 	mii = &sc->sc_mii;
1352 	mii_pollstat(mii);
1353 	ifmr->ifm_active = mii->mii_media_active;
1354 	ifmr->ifm_status = mii->mii_media_status;
1355 }
1356 
1357 int
1358 se_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1359 {
1360 	struct se_softc *sc = ifp->if_softc;
1361 	struct ifreq *ifr = (struct ifreq *) data;
1362 #ifdef INET
1363 	struct ifaddr *ifa = (struct ifaddr *)data;
1364 #endif
1365 	int s, rc = 0;
1366 
1367 	s = splnet();
1368 
1369 	switch (command) {
1370 	case SIOCSIFADDR:
1371 		ifp->if_flags |= IFF_UP;
1372 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1373 			rc = se_init(ifp);
1374 		if (rc == 0) {
1375 #ifdef INET
1376 			if (ifa->ifa_addr->sa_family == AF_INET)
1377 				arp_ifinit(&sc->sc_ac, ifa);
1378 #endif
1379 		}
1380 		break;
1381 	case SIOCSIFFLAGS:
1382 		if (ifp->if_flags & IFF_UP) {
1383 			if (ifp->if_flags & IFF_RUNNING)
1384 				rc = ENETRESET;
1385 			else
1386 				rc = se_init(ifp);
1387 		} else {
1388 			if (ifp->if_flags & IFF_RUNNING)
1389 				se_stop(sc);
1390 		}
1391 		break;
1392 	case SIOCGIFMEDIA:
1393 	case SIOCSIFMEDIA:
1394 		rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1395 		break;
1396 	default:
1397 		rc = ether_ioctl(ifp, &sc->sc_ac, command, data);
1398 		break;
1399 	}
1400 
1401 	if (rc == ENETRESET) {
1402 		if (ifp->if_flags & IFF_RUNNING)
1403 			se_iff(sc);
1404 		rc = 0;
1405 	}
1406 
1407 	splx(s);
1408 	return rc;
1409 }
1410 
1411 void
1412 se_watchdog(struct ifnet *ifp)
1413 {
1414 	struct se_softc *sc = ifp->if_softc;
1415 	int s;
1416 
1417 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1418 	ifp->if_oerrors++;
1419 
1420 	s = splnet();
1421 	se_init(ifp);
1422 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1423 		se_start(ifp);
1424 	splx(s);
1425 }
1426 
1427 /*
1428  * Stop the adapter and free any mbufs allocated to the
1429  * RX and TX lists.
1430  */
1431 void
1432 se_stop(struct se_softc *sc)
1433 {
1434 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1435 
1436 	ifp->if_timer = 0;
1437 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1438 	timeout_del(&sc->sc_tick_tmo);
1439 	mii_down(&sc->sc_mii);
1440 
1441 	CSR_WRITE_4(sc, IntrMask, 0);
1442 	CSR_READ_4(sc, IntrMask);
1443 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1444 	/* Stop TX/RX MAC. */
1445 	CSR_WRITE_4(sc, TX_CTL, 0x1a00);
1446 	CSR_WRITE_4(sc, RX_CTL, 0x1a00);
1447 	/* XXX Can we assume active DMA cycles gone? */
1448 	DELAY(2000);
1449 	CSR_WRITE_4(sc, IntrMask, 0);
1450 	CSR_WRITE_4(sc, IntrStatus, 0xffffffff);
1451 
1452 	sc->sc_flags &= ~SE_FLAG_LINK;
1453 	se_list_rx_free(sc);
1454 	se_list_tx_free(sc);
1455 }
1456