xref: /openbsd-src/sys/dev/pci/if_txp.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: if_txp.c,v 1.50 2001/08/12 20:03:49 mickey Exp $	*/
2 
3 /*
4  * Copyright (c) 2001
5  *	Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6  *	Aaron Campbell <aaron@monkey.org>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Jason L. Wright,
19  *	Theo de Raadt and Aaron Campbell.
20  * 4. Neither the name of the author nor the names of any co-contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
34  * THE POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 /*
38  * Driver for 3c990 (Typhoon) Ethernet ASIC
39  */
40 
41 #include "bpfilter.h"
42 #include "vlan.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sockio.h>
47 #include <sys/mbuf.h>
48 #include <sys/malloc.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/device.h>
52 #include <sys/timeout.h>
53 
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_types.h>
57 
58 #ifdef INET
59 #include <netinet/in.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/in_var.h>
62 #include <netinet/ip.h>
63 #include <netinet/if_ether.h>
64 #endif
65 
66 #include <net/if_media.h>
67 
68 #if NBPFILTER > 0
69 #include <net/bpf.h>
70 #endif
71 
72 #if NVLAN > 0
73 #include <net/if_vlan_var.h>
74 #endif
75 
76 #include <vm/vm.h>              /* for vtophys */
77 #include <vm/vm_kern.h>
78 #include <machine/bus.h>
79 
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82 #include <dev/pci/pcireg.h>
83 #include <dev/pci/pcivar.h>
84 #include <dev/pci/pcidevs.h>
85 
86 #include <dev/pci/if_txpreg.h>
87 
88 #include <dev/microcode/typhoon/3c990img.h>
89 
90 int txp_probe		__P((struct device *, void *, void *));
91 void txp_attach		__P((struct device *, struct device *, void *));
92 int txp_intr		__P((void *));
93 void txp_tick		__P((void *));
94 void txp_shutdown	__P((void *));
95 int txp_ioctl		__P((struct ifnet *, u_long, caddr_t));
96 void txp_start		__P((struct ifnet *));
97 void txp_stop		__P((struct txp_softc *));
98 void txp_init		__P((struct txp_softc *));
99 void txp_watchdog	__P((struct ifnet *));
100 
101 int txp_chip_init __P((struct txp_softc *));
102 int txp_reset_adapter __P((struct txp_softc *));
103 int txp_download_fw __P((struct txp_softc *));
104 int txp_download_fw_wait __P((struct txp_softc *));
105 int txp_download_fw_section __P((struct txp_softc *,
106     struct txp_fw_section_header *, int));
107 int txp_alloc_rings __P((struct txp_softc *));
108 void txp_dma_free __P((struct txp_softc *, struct txp_dma_alloc *));
109 int txp_dma_malloc __P((struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int));
110 void txp_set_filter __P((struct txp_softc *));
111 
112 int txp_cmd_desc_numfree __P((struct txp_softc *));
113 int txp_command __P((struct txp_softc *, u_int16_t, u_int16_t, u_int32_t,
114     u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int));
115 int txp_command2 __P((struct txp_softc *, u_int16_t, u_int16_t,
116     u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t,
117     struct txp_rsp_desc **, int));
118 int txp_response __P((struct txp_softc *, u_int32_t, u_int16_t, u_int16_t,
119     struct txp_rsp_desc **));
120 void txp_rsp_fixup __P((struct txp_softc *, struct txp_rsp_desc *,
121     struct txp_rsp_desc *));
122 void txp_capabilities __P((struct txp_softc *));
123 
124 void txp_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
125 int txp_ifmedia_upd __P((struct ifnet *));
126 void txp_show_descriptor __P((void *));
127 void txp_tx_reclaim __P((struct txp_softc *, struct txp_tx_ring *));
128 void txp_rxbuf_reclaim __P((struct txp_softc *));
129 void txp_rx_reclaim __P((struct txp_softc *, struct txp_rx_ring *));
130 
131 struct cfattach txp_ca = {
132 	sizeof(struct txp_softc), txp_probe, txp_attach,
133 };
134 
135 struct cfdriver txp_cd = {
136 	0, "txp", DV_IFNET
137 };
138 
139 int
140 txp_probe(parent, match, aux)
141 	struct device *parent;
142 	void *match, *aux;
143 {
144 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
145 
146 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_3COM)
147 		return (0);
148 
149 	switch (PCI_PRODUCT(pa->pa_id)) {
150 	case PCI_PRODUCT_3COM_3CR990TX95:
151 	case PCI_PRODUCT_3COM_3CR990TX97:
152 	case PCI_PRODUCT_3COM_3CR990SVR95:
153 	case PCI_PRODUCT_3COM_3CR990SVR97:
154 	case PCI_PRODUCT_3COM_3C990BTXM:
155 	case PCI_PRODUCT_3COM_3C990BSVR:
156 		return (1);
157 	}
158 
159 	return (0);
160 }
161 
162 void
163 txp_attach(parent, self, aux)
164 	struct device *parent, *self;
165 	void *aux;
166 {
167 	struct txp_softc *sc = (struct txp_softc *)self;
168 	struct pci_attach_args *pa = aux;
169 	pci_chipset_tag_t pc = pa->pa_pc;
170 	pci_intr_handle_t ih;
171 	const char *intrstr = NULL;
172 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
173 	bus_size_t iosize;
174 	u_int32_t command;
175 	u_int16_t p1;
176 	u_int32_t p2;
177 
178 	sc->sc_cold = 1;
179 
180 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
181 
182 	if (!(command & PCI_COMMAND_MASTER_ENABLE)) {
183 		printf(": failed to enable bus mastering\n");
184 		return;
185 	}
186 
187 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
188 		printf(": failed to enable memory mapping\n");
189 		return;
190 	}
191 	if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
192 	    &sc->sc_bt, &sc->sc_bh, NULL, &iosize, 0)) {
193 		printf(": can't map mem space %d\n", 0);
194 		return;
195 	}
196 
197 	sc->sc_dmat = pa->pa_dmat;
198 
199 	/*
200 	 * Allocate our interrupt.
201 	 */
202 	if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
203 	    pa->pa_intrline, &ih)) {
204 		printf(": couldn't map interrupt\n");
205 		return;
206 	}
207 
208 	intrstr = pci_intr_string(pc, ih);
209 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc,
210 	    self->dv_xname);
211 	if (sc->sc_ih == NULL) {
212 		printf(": couldn't establish interrupt");
213 		if (intrstr != NULL)
214 			printf(" at %s", intrstr);
215 		printf("\n");
216 		return;
217 	}
218 	printf(": %s", intrstr);
219 
220 	if (txp_chip_init(sc))
221 		return;
222 
223 	if (txp_download_fw(sc))
224 		return;
225 
226 	if (txp_alloc_rings(sc))
227 		return;
228 
229 	if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
230 	    NULL, NULL, NULL, 1))
231 		return;
232 
233 	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
234 	    &p1, &p2, NULL, 1))
235 		return;
236 
237 	txp_set_filter(sc);
238 
239 	sc->sc_arpcom.ac_enaddr[0] = ((u_int8_t *)&p1)[1];
240 	sc->sc_arpcom.ac_enaddr[1] = ((u_int8_t *)&p1)[0];
241 	sc->sc_arpcom.ac_enaddr[2] = ((u_int8_t *)&p2)[3];
242 	sc->sc_arpcom.ac_enaddr[3] = ((u_int8_t *)&p2)[2];
243 	sc->sc_arpcom.ac_enaddr[4] = ((u_int8_t *)&p2)[1];
244 	sc->sc_arpcom.ac_enaddr[5] = ((u_int8_t *)&p2)[0];
245 
246 	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
247 	sc->sc_cold = 0;
248 
249 	ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
250 	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
251 	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
252 	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
253 	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
254 	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL);
255 	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
256 	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
257 
258 	sc->sc_xcvr = TXP_XCVR_AUTO;
259 	txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
260 	    NULL, NULL, NULL, 0);
261 	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
262 
263 	ifp->if_softc = sc;
264 	ifp->if_mtu = ETHERMTU;
265 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
266 	ifp->if_ioctl = txp_ioctl;
267 	ifp->if_output = ether_output;
268 	ifp->if_start = txp_start;
269 	ifp->if_watchdog = txp_watchdog;
270 	ifp->if_baudrate = 10000000;
271 	IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES);
272 	IFQ_SET_READY(&ifp->if_snd);
273 	ifp->if_capabilities = 0;
274 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
275 
276 	txp_capabilities(sc);
277 
278 	timeout_set(&sc->sc_tick, txp_tick, sc);
279 
280 	/*
281 	 * Attach us everywhere
282 	 */
283 	if_attach(ifp);
284 	ether_ifattach(ifp);
285 
286 	shutdownhook_establish(txp_shutdown, sc);
287 }
288 
289 int
290 txp_chip_init(sc)
291 	struct txp_softc *sc;
292 {
293 	/* disable interrupts */
294 	WRITE_REG(sc, TXP_IER, 0);
295 	WRITE_REG(sc, TXP_IMR,
296 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
297 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
298 	    TXP_INT_LATCH);
299 
300 	/* ack all interrupts */
301 	WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
302 	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
303 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
304 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
305 	    TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
306 
307 	if (txp_reset_adapter(sc))
308 		return (-1);
309 
310 	/* disable interrupts */
311 	WRITE_REG(sc, TXP_IER, 0);
312 	WRITE_REG(sc, TXP_IMR,
313 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
314 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
315 	    TXP_INT_LATCH);
316 
317 	/* ack all interrupts */
318 	WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
319 	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
320 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
321 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
322 	    TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
323 
324 	return (0);
325 }
326 
327 int
328 txp_reset_adapter(sc)
329 	struct txp_softc *sc;
330 {
331 	u_int32_t r;
332 	int i;
333 
334 	WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
335 	DELAY(1000);
336 	WRITE_REG(sc, TXP_SRR, 0);
337 
338 	/* Should wait max 6 seconds */
339 	for (i = 0; i < 6000; i++) {
340 		r = READ_REG(sc, TXP_A2H_0);
341 		if (r == STAT_WAITING_FOR_HOST_REQUEST)
342 			break;
343 		DELAY(1000);
344 	}
345 
346 	if (r != STAT_WAITING_FOR_HOST_REQUEST) {
347 		printf("%s: reset hung\n", TXP_DEVNAME(sc));
348 		return (-1);
349 	}
350 
351 	return (0);
352 }
353 
354 int
355 txp_download_fw(sc)
356 	struct txp_softc *sc;
357 {
358 	struct txp_fw_file_header *fileheader;
359 	struct txp_fw_section_header *secthead;
360 	int sect;
361 	u_int32_t r, i, ier, imr;
362 
363 	ier = READ_REG(sc, TXP_IER);
364 	WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
365 
366 	imr = READ_REG(sc, TXP_IMR);
367 	WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
368 
369 	for (i = 0; i < 10000; i++) {
370 		r = READ_REG(sc, TXP_A2H_0);
371 		if (r == STAT_WAITING_FOR_HOST_REQUEST)
372 			break;
373 		DELAY(50);
374 	}
375 	if (r != STAT_WAITING_FOR_HOST_REQUEST) {
376 		printf(": not waiting for host request\n");
377 		return (-1);
378 	}
379 
380 	/* Ack the status */
381 	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
382 
383 	fileheader = (struct txp_fw_file_header *)tc990image;
384 	if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) {
385 		printf(": fw invalid magic\n");
386 		return (-1);
387 	}
388 
389 	/* Tell boot firmware to get ready for image */
390 	WRITE_REG(sc, TXP_H2A_1, fileheader->addr);
391 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
392 
393 	if (txp_download_fw_wait(sc)) {
394 		printf(": fw wait failed, initial\n");
395 		return (-1);
396 	}
397 
398 	secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) +
399 	    sizeof(struct txp_fw_file_header));
400 
401 	for (sect = 0; sect < fileheader->nsections; sect++) {
402 		if (txp_download_fw_section(sc, secthead, sect))
403 			return (-1);
404 		secthead = (struct txp_fw_section_header *)
405 		    (((u_int8_t *)secthead) + secthead->nbytes + sizeof(*secthead));
406 	}
407 
408 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
409 
410 	for (i = 0; i < 10000; i++) {
411 		r = READ_REG(sc, TXP_A2H_0);
412 		if (r == STAT_WAITING_FOR_BOOT)
413 			break;
414 		DELAY(50);
415 	}
416 	if (r != STAT_WAITING_FOR_BOOT) {
417 		printf(": not waiting for boot\n");
418 		return (-1);
419 	}
420 
421 	WRITE_REG(sc, TXP_IER, ier);
422 	WRITE_REG(sc, TXP_IMR, imr);
423 
424 	return (0);
425 }
426 
427 int
428 txp_download_fw_wait(sc)
429 	struct txp_softc *sc;
430 {
431 	u_int32_t i, r;
432 
433 	for (i = 0; i < 10000; i++) {
434 		r = READ_REG(sc, TXP_ISR);
435 		if (r & TXP_INT_A2H_0)
436 			break;
437 		DELAY(50);
438 	}
439 
440 	if (!(r & TXP_INT_A2H_0)) {
441 		printf(": fw wait failed comm0\n", sc->sc_dev.dv_xname);
442 		return (-1);
443 	}
444 
445 	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
446 
447 	r = READ_REG(sc, TXP_A2H_0);
448 	if (r != STAT_WAITING_FOR_SEGMENT) {
449 		printf(": fw not waiting for segment\n", sc->sc_dev.dv_xname);
450 		return (-1);
451 	}
452 	return (0);
453 }
454 
455 int
456 txp_download_fw_section(sc, sect, sectnum)
457 	struct txp_softc *sc;
458 	struct txp_fw_section_header *sect;
459 	int sectnum;
460 {
461 	struct txp_dma_alloc dma;
462 	int rseg, err = 0;
463 	struct mbuf m;
464 	u_int16_t csum;
465 
466 	/* Skip zero length sections */
467 	if (sect->nbytes == 0)
468 		return (0);
469 
470 	/* Make sure we aren't past the end of the image */
471 	rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image);
472 	if (rseg >= sizeof(tc990image)) {
473 		printf(": fw invalid section address, section %d\n", sectnum);
474 		return (-1);
475 	}
476 
477 	/* Make sure this section doesn't go past the end */
478 	rseg += sect->nbytes;
479 	if (rseg >= sizeof(tc990image)) {
480 		printf(": fw truncated section %d\n", sectnum);
481 		return (-1);
482 	}
483 
484 	/* map a buffer, copy segment to it, get physaddr */
485 	if (txp_dma_malloc(sc, sect->nbytes, &dma, 0)) {
486 		printf(": fw dma malloc failed, section %d\n", sectnum);
487 		return (-1);
488 	}
489 
490 	bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr, sect->nbytes);
491 
492 	/*
493 	 * dummy up mbuf and verify section checksum
494 	 */
495 	m.m_type = MT_DATA;
496 	m.m_next = m.m_nextpkt = NULL;
497 	m.m_len = sect->nbytes;
498 	m.m_data = dma.dma_vaddr;
499 	m.m_flags = 0;
500 	csum = in_cksum(&m, sect->nbytes);
501 	if (csum != sect->cksum) {
502 		printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n",
503 		    sectnum, sect->cksum, csum);
504 		err = -1;
505 		goto bail;
506 	}
507 
508 	bus_dmamap_sync(sc->sc_dmat, dma.dma_map, BUS_DMASYNC_PREREAD);
509 
510 	WRITE_REG(sc, TXP_H2A_1, sect->nbytes);
511 	WRITE_REG(sc, TXP_H2A_2, sect->cksum);
512 	WRITE_REG(sc, TXP_H2A_3, sect->addr);
513 	WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32);
514 	WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff);
515 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
516 
517 	if (txp_download_fw_wait(sc)) {
518 		printf(": fw wait failed, section %d\n", sectnum);
519 		err = -1;
520 	}
521 
522 	bus_dmamap_sync(sc->sc_dmat, dma.dma_map, BUS_DMASYNC_POSTREAD);
523 
524 bail:
525 	txp_dma_free(sc, &dma);
526 
527 	return (err);
528 }
529 
530 int
531 txp_intr(vsc)
532 	void *vsc;
533 {
534 	struct txp_softc *sc = vsc;
535 	struct txp_hostvar *hv = sc->sc_hostvar;
536 	u_int32_t isr;
537 	int claimed = 0;
538 
539 	/* mask all interrupts */
540 	WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF |
541 	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
542 	    TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
543 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
544 	    TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
545 
546 	isr = READ_REG(sc, TXP_ISR);
547 	while (isr) {
548 		claimed = 1;
549 		WRITE_REG(sc, TXP_ISR, isr);
550 
551 		if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
552 			txp_rx_reclaim(sc, &sc->sc_rxhir);
553 		if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
554 			txp_rx_reclaim(sc, &sc->sc_rxlor);
555 
556 		if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
557 			txp_rxbuf_reclaim(sc);
558 
559 		if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
560 		    TXP_OFFSET2IDX(*(sc->sc_txhir.r_off))))
561 			txp_tx_reclaim(sc, &sc->sc_txhir);
562 
563 		if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
564 		    TXP_OFFSET2IDX(*(sc->sc_txlor.r_off))))
565 			txp_tx_reclaim(sc, &sc->sc_txlor);
566 
567 		isr = READ_REG(sc, TXP_ISR);
568 	}
569 
570 	/* unmask all interrupts */
571 	WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
572 
573 	txp_start(&sc->sc_arpcom.ac_if);
574 
575 	return (claimed);
576 }
577 
578 void
579 txp_rx_reclaim(sc, r)
580 	struct txp_softc *sc;
581 	struct txp_rx_ring *r;
582 {
583 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
584 	struct txp_rx_desc *rxd;
585 	struct mbuf *m;
586 	struct txp_swdesc *sd;
587 	u_int32_t roff, woff;
588 	int sumflags = 0;
589 
590 	roff = *r->r_roff;
591 	woff = *r->r_woff;
592 	rxd = r->r_desc + (roff / sizeof(struct txp_rx_desc));
593 
594 	while (roff != woff) {
595 		if (rxd->rx_flags & RX_FLAGS_ERROR) {
596 			printf("%s: error 0x%x\n", sc->sc_dev.dv_xname,
597 			    rxd->rx_stat);
598 			ifp->if_ierrors++;
599 			goto next;
600 		}
601 
602 		/* retrieve stashed pointer */
603 		bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd));
604 
605 		bus_dmamap_sync(sc->sc_dmat, sd->sd_map,
606 		    BUS_DMASYNC_POSTWRITE);
607 		bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
608 		bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
609 		m = sd->sd_mbuf;
610 		free(sd, M_DEVBUF);
611 		m->m_pkthdr.len = m->m_len = rxd->rx_len;
612 
613 #ifdef __STRICT_ALIGNMENT
614 		{
615 			/*
616 			 * XXX Nice chip, except it won't accept "off by 2"
617 			 * buffers, so we're force to copy.  Supposedly
618 			 * this will be fixed in a newer firmware rev
619 			 * and this will be temporary.
620 			 */
621 			struct mbuf *mnew;
622 
623 			MGETHDR(mnew, M_DONTWAIT, MT_DATA);
624 			if (mnew == NULL) {
625 				m_freem(m);
626 				goto next;
627 			}
628 			if (m->m_len > (MHLEN - 2)) {
629 				MCLGET(mnew, M_DONTWAIT);
630 				if (!(mnew->m_flags & M_EXT)) {
631 					m_freem(mnew);
632 					m_freem(m);
633 					goto next;
634 				}
635 			}
636 			mnew->m_pkthdr.rcvif = ifp;
637 			mnew->m_pkthdr.len = mnew->m_len = m->m_len;
638 			mnew->m_data += 2;
639 			bcopy(m->m_data, mnew->m_data, m->m_len);
640 			m_freem(m);
641 			m = mnew;
642 		}
643 #endif
644 
645 #if NBPFILTER > 0
646 		/*
647 		 * Handle BPF listeners. Let the BPF user see the packet.
648 		 */
649 		if (ifp->if_bpf)
650 			bpf_mtap(ifp->if_bpf, m);
651 #endif
652 
653 		if (rxd->rx_stat & RX_STAT_IPCKSUMBAD)
654 			sumflags |= M_IPV4_CSUM_IN_BAD;
655 		else if (rxd->rx_stat & RX_STAT_IPCKSUMGOOD)
656 			sumflags |= M_IPV4_CSUM_IN_OK;
657 
658 		if (rxd->rx_stat & RX_STAT_TCPCKSUMBAD)
659 			sumflags |= M_TCP_CSUM_IN_BAD;
660 		else if (rxd->rx_stat & RX_STAT_TCPCKSUMGOOD)
661 			sumflags |= M_TCP_CSUM_IN_OK;
662 
663 		if (rxd->rx_stat & RX_STAT_UDPCKSUMBAD)
664 			sumflags |= M_UDP_CSUM_IN_BAD;
665 		else if (rxd->rx_stat & RX_STAT_UDPCKSUMGOOD)
666 			sumflags |= M_UDP_CSUM_IN_OK;
667 
668 		m->m_pkthdr.csum = sumflags;
669 
670 #if NVLAN > 0
671 		if (rxd->rx_stat & RX_STAT_VLAN) {
672 			if (vlan_input_tag(m, htons(rxd->rx_vlan >> 16)) < 0)
673 				ifp->if_noproto++;
674 			goto next;
675 		}
676 #endif
677 
678 		ether_input_mbuf(ifp, m);
679 
680 next:
681 
682 		roff += sizeof(struct txp_rx_desc);
683 		if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
684 			roff = 0;
685 			rxd = r->r_desc;
686 		} else
687 			rxd++;
688 		woff = *r->r_woff;
689 	}
690 
691 	*r->r_roff = woff;
692 }
693 
694 void
695 txp_rxbuf_reclaim(sc)
696 	struct txp_softc *sc;
697 {
698 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
699 	struct txp_hostvar *hv = sc->sc_hostvar;
700 	struct txp_rxbuf_desc *rbd;
701 	struct txp_swdesc *sd;
702 	u_int32_t i, end;
703 
704 	end = TXP_OFFSET2IDX(hv->hv_rx_buf_read_idx);
705 	i = TXP_OFFSET2IDX(hv->hv_rx_buf_write_idx);
706 
707 	if (++i == RXBUF_ENTRIES)
708 		i = 0;
709 
710 	rbd = sc->sc_rxbufs + i;
711 
712 	while (i != end) {
713 		sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
714 		    M_DEVBUF, M_NOWAIT);
715 		if (sd == NULL)
716 			break;
717 
718 		MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
719 		if (sd->sd_mbuf == NULL)
720 			goto err_sd;
721 
722 		MCLGET(sd->sd_mbuf, M_DONTWAIT);
723 		if ((sd->sd_mbuf->m_flags & M_EXT) == 0)
724 			goto err_mbuf;
725 		sd->sd_mbuf->m_pkthdr.rcvif = ifp;
726 		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
727 		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
728 		    TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map))
729 			goto err_mbuf;
730 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
731 		    BUS_DMA_NOWAIT)) {
732 			bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
733 			goto err_mbuf;
734 		}
735 		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, BUS_DMASYNC_PREWRITE);
736 
737 		/* stash away pointer */
738 		bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd));
739 
740 		rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
741 		    & 0xffffffff;
742 		rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
743 		    >> 32;
744 
745 		hv->hv_rx_buf_write_idx = TXP_IDX2OFFSET(i);
746 
747 		if (++i == RXBUF_ENTRIES) {
748 			i = 0;
749 			rbd = sc->sc_rxbufs;
750 		} else
751 			rbd++;
752 	}
753 	return;
754 
755 err_mbuf:
756 	m_freem(sd->sd_mbuf);
757 err_sd:
758 	free(sd, M_DEVBUF);
759 }
760 
761 /*
762  * Reclaim mbufs and entries from a transmit ring.
763  */
764 void
765 txp_tx_reclaim(sc, r)
766 	struct txp_softc *sc;
767 	struct txp_tx_ring *r;
768 {
769 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
770 	u_int32_t idx = TXP_OFFSET2IDX(*(r->r_off));
771 	u_int32_t cons = r->r_cons, cnt = r->r_cnt;
772 	struct txp_tx_desc *txd = r->r_desc + cons;
773 	struct txp_swdesc *sd = sc->sc_txd + cons;
774 	struct mbuf *m;
775 
776 	while (cons != idx) {
777 		if (cnt == 0)
778 			break;
779 
780 		if ((txd->tx_flags & TX_FLAGS_TYPE_M) ==
781 		    TX_FLAGS_TYPE_DATA) {
782 			bus_dmamap_sync(sc->sc_dmat, sd->sd_map,
783 			    BUS_DMASYNC_POSTREAD);
784 			bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
785 			m = sd->sd_mbuf;
786 			if (m != NULL) {
787 				m_freem(m);
788 				txd->tx_addrlo = 0;
789 				txd->tx_addrhi = 0;
790 				ifp->if_opackets++;
791 			}
792 		}
793 		ifp->if_flags &= ~IFF_OACTIVE;
794 
795 		if (++cons == TX_ENTRIES) {
796 			txd = r->r_desc;
797 			cons = 0;
798 			sd = sc->sc_txd;
799 		} else {
800 			txd++;
801 			sd++;
802 		}
803 
804 		cnt--;
805 	}
806 
807 	r->r_cons = cons;
808 	r->r_cnt = cnt;
809 	if (cnt == 0)
810 		ifp->if_timer = 0;
811 }
812 
813 void
814 txp_shutdown(vsc)
815 	void *vsc;
816 {
817 	struct txp_softc *sc = (struct txp_softc *)vsc;
818 
819 	/* mask all interrupts */
820 	WRITE_REG(sc, TXP_IMR,
821 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
822 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
823 	    TXP_INT_LATCH);
824 
825 	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
826 	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
827 	txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0);
828 }
829 
830 int
831 txp_alloc_rings(sc)
832 	struct txp_softc *sc;
833 {
834 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
835 	struct txp_boot_record *boot;
836 	struct txp_swdesc *sd;
837 	u_int32_t r;
838 	int i, j;
839 
840 	/* boot record */
841 	if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma,
842 	    BUS_DMA_COHERENT)) {
843 		printf(": can't allocate boot record\n");
844 		return (-1);
845 	}
846 	boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr;
847 	bzero(boot, sizeof(*boot));
848 	sc->sc_boot = boot;
849 
850 	/* host variables */
851 	if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma,
852 	    BUS_DMA_COHERENT)) {
853 		printf(": can't allocate host ring\n");
854 		goto bail_boot;
855 	}
856 	bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar));
857 	boot->br_hostvar_lo = sc->sc_host_dma.dma_paddr & 0xffffffff;
858 	boot->br_hostvar_hi = sc->sc_host_dma.dma_paddr >> 32;
859 	sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr;
860 
861 	/* high priority tx ring */
862 	if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
863 	    &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) {
864 		printf(": can't allocate high tx ring\n");
865 		goto bail_host;
866 	}
867 	bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
868 	boot->br_txhipri_lo = sc->sc_txhiring_dma.dma_paddr & 0xffffffff;
869 	boot->br_txhipri_hi = sc->sc_txhiring_dma.dma_paddr >> 32;
870 	boot->br_txhipri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc);
871 	sc->sc_txhir.r_reg = TXP_H2A_1;
872 	sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr;
873 	sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
874 	sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
875 	for (i = 0; i < TX_ENTRIES; i++) {
876 		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_SEGLEN,
877 		    TX_ENTRIES - 4, TXP_MAX_PKTLEN, 0,
878 		    BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) {
879 			for (j = 0; j < i; j++) {
880 				bus_dmamap_destroy(sc->sc_dmat,
881 				    sc->sc_txd[j].sd_map);
882 				sc->sc_txd[j].sd_map = NULL;
883 			}
884 			goto bail_txhiring;
885 		}
886 	}
887 
888 	/* low priority tx ring */
889 	if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
890 	    &sc->sc_txloring_dma, BUS_DMA_COHERENT)) {
891 		printf(": can't allocate low tx ring\n");
892 		goto bail_txhiring;
893 	}
894 	bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES);
895 	boot->br_txlopri_lo = sc->sc_txloring_dma.dma_paddr & 0xffffffff;
896 	boot->br_txlopri_hi = sc->sc_txloring_dma.dma_paddr >> 32;
897 	boot->br_txlopri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc);
898 	sc->sc_txlor.r_reg = TXP_H2A_3;
899 	sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr;
900 	sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
901 	sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
902 
903 	/* high priority rx ring */
904 	if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
905 	    &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) {
906 		printf(": can't allocate high rx ring\n");
907 		goto bail_txloring;
908 	}
909 	bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
910 	boot->br_rxhipri_lo = sc->sc_rxhiring_dma.dma_paddr & 0xffffffff;
911 	boot->br_rxhipri_hi = sc->sc_rxhiring_dma.dma_paddr >> 32;
912 	boot->br_rxhipri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc);
913 	sc->sc_rxhir.r_desc =
914 	    (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr;
915 	sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
916 	sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
917 
918 	/* low priority ring */
919 	if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
920 	    &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) {
921 		printf(": can't allocate low rx ring\n");
922 		goto bail_rxhiring;
923 	}
924 	bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES);
925 	boot->br_rxlopri_lo = sc->sc_rxloring_dma.dma_paddr & 0xffffffff;
926 	boot->br_rxlopri_hi = sc->sc_rxloring_dma.dma_paddr >> 32;
927 	boot->br_rxlopri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc);
928 	sc->sc_rxlor.r_desc =
929 	    (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr;
930 	sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
931 	sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
932 
933 	/* command ring */
934 	if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
935 	    &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) {
936 		printf(": can't allocate command ring\n");
937 		goto bail_rxloring;
938 	}
939 	bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES);
940 	boot->br_cmd_lo = sc->sc_cmdring_dma.dma_paddr & 0xffffffff;
941 	boot->br_cmd_hi = sc->sc_cmdring_dma.dma_paddr >> 32;
942 	boot->br_cmd_siz = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
943 	sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr;
944 	sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
945 	sc->sc_cmdring.lastwrite = 0;
946 
947 	/* response ring */
948 	if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
949 	    &sc->sc_rspring_dma, BUS_DMA_COHERENT)) {
950 		printf(": can't allocate response ring\n");
951 		goto bail_cmdring;
952 	}
953 	bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES);
954 	boot->br_resp_lo = sc->sc_rspring_dma.dma_paddr & 0xffffffff;
955 	boot->br_resp_hi = sc->sc_rspring_dma.dma_paddr >> 32;
956 	boot->br_resp_siz = CMD_ENTRIES * sizeof(struct txp_rsp_desc);
957 	sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr;
958 	sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
959 	sc->sc_rspring.lastwrite = 0;
960 
961 	/* receive buffer ring */
962 	if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
963 	    &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) {
964 		printf(": can't allocate rx buffer ring\n");
965 		goto bail_rspring;
966 	}
967 	bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES);
968 	boot->br_rxbuf_lo = sc->sc_rxbufring_dma.dma_paddr & 0xffffffff;
969 	boot->br_rxbuf_hi = sc->sc_rxbufring_dma.dma_paddr >> 32;
970 	boot->br_rxbuf_siz = RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc);
971 	sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr;
972 	for (i = 0; i < RXBUF_ENTRIES; i++) {
973 		sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
974 		    M_DEVBUF, M_NOWAIT);
975 		if (sd == NULL)
976 			break;
977 
978 		MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
979 		if (sd->sd_mbuf == NULL) {
980 			goto bail_rxbufring;
981 		}
982 
983 		MCLGET(sd->sd_mbuf, M_DONTWAIT);
984 		if ((sd->sd_mbuf->m_flags & M_EXT) == 0) {
985 			goto bail_rxbufring;
986 		}
987 		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
988 		sd->sd_mbuf->m_pkthdr.rcvif = ifp;
989 		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
990 		    TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) {
991 			goto bail_rxbufring;
992 		}
993 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
994 		    BUS_DMA_NOWAIT)) {
995 			bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
996 			goto bail_rxbufring;
997 		}
998 		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, BUS_DMASYNC_PREWRITE);
999 
1000 		/* stash away pointer */
1001 		bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd));
1002 
1003 		sc->sc_rxbufs[i].rb_paddrlo =
1004 		    ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff;
1005 		sc->sc_rxbufs[i].rb_paddrhi =
1006 		    ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32;
1007 	}
1008 	sc->sc_hostvar->hv_rx_buf_write_idx = (RXBUF_ENTRIES - 1) *
1009 	    sizeof(struct txp_rxbuf_desc);
1010 
1011 	/* zero dma */
1012 	if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma,
1013 	    BUS_DMA_COHERENT)) {
1014 		printf(": can't allocate response ring\n");
1015 		goto bail_rxbufring;
1016 	}
1017 	bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t));
1018 	boot->br_zero_lo = sc->sc_zero_dma.dma_paddr & 0xffffffff;
1019 	boot->br_zero_hi = sc->sc_zero_dma.dma_paddr >> 32;
1020 
1021 	/* See if it's waiting for boot, and try to boot it */
1022 	for (i = 0; i < 10000; i++) {
1023 		r = READ_REG(sc, TXP_A2H_0);
1024 		if (r == STAT_WAITING_FOR_BOOT)
1025 			break;
1026 		DELAY(50);
1027 	}
1028 	if (r != STAT_WAITING_FOR_BOOT) {
1029 		printf(": not waiting for boot\n");
1030 		goto bail;
1031 	}
1032 	WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32);
1033 	WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff);
1034 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
1035 
1036 	/* See if it booted */
1037 	for (i = 0; i < 10000; i++) {
1038 		r = READ_REG(sc, TXP_A2H_0);
1039 		if (r == STAT_RUNNING)
1040 			break;
1041 		DELAY(50);
1042 	}
1043 	if (r != STAT_RUNNING) {
1044 		printf(": fw not running\n");
1045 		goto bail;
1046 	}
1047 
1048 	/* Clear TX and CMD ring write registers */
1049 	WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
1050 	WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
1051 	WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
1052 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
1053 
1054 	return (0);
1055 
1056 bail:
1057 	txp_dma_free(sc, &sc->sc_zero_dma);
1058 bail_rxbufring:
1059 	txp_dma_free(sc, &sc->sc_rxbufring_dma);
1060 bail_rspring:
1061 	txp_dma_free(sc, &sc->sc_rspring_dma);
1062 bail_cmdring:
1063 	txp_dma_free(sc, &sc->sc_cmdring_dma);
1064 bail_rxloring:
1065 	txp_dma_free(sc, &sc->sc_rxloring_dma);
1066 bail_rxhiring:
1067 	txp_dma_free(sc, &sc->sc_rxhiring_dma);
1068 bail_txloring:
1069 	txp_dma_free(sc, &sc->sc_txloring_dma);
1070 bail_txhiring:
1071 	txp_dma_free(sc, &sc->sc_txhiring_dma);
1072 bail_host:
1073 	txp_dma_free(sc, &sc->sc_host_dma);
1074 bail_boot:
1075 	txp_dma_free(sc, &sc->sc_boot_dma);
1076 	return (-1);
1077 }
1078 
1079 int
1080 txp_dma_malloc(sc, size, dma, mapflags)
1081 	struct txp_softc *sc;
1082 	bus_size_t size;
1083 	struct txp_dma_alloc *dma;
1084 	int mapflags;
1085 {
1086 	int r;
1087 
1088 	if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
1089 	    &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0)
1090 		goto fail_0;
1091 
1092 	if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
1093 	    size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
1094 		goto fail_1;
1095 
1096 	if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1097 	    BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
1098 		goto fail_2;
1099 
1100 	if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
1101 	    size, NULL, BUS_DMA_NOWAIT)) != 0)
1102 		goto fail_3;
1103 
1104 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1105 	return (0);
1106 
1107 fail_3:
1108 	bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1109 fail_2:
1110 	bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
1111 fail_1:
1112 	bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1113 fail_0:
1114 	return (r);
1115 }
1116 
1117 void
1118 txp_dma_free(sc, dma)
1119 	struct txp_softc *sc;
1120 	struct txp_dma_alloc *dma;
1121 {
1122 	bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
1123 	bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize);
1124 	bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1125 	bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1126 }
1127 
1128 int
1129 txp_ioctl(ifp, command, data)
1130 	struct ifnet *ifp;
1131 	u_long command;
1132 	caddr_t data;
1133 {
1134 	struct txp_softc *sc = ifp->if_softc;
1135 	struct ifreq *ifr = (struct ifreq *)data;
1136 	struct ifaddr *ifa = (struct ifaddr *)data;
1137 	int s, error = 0;
1138 
1139 	s = splnet();
1140 
1141 	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
1142 		splx(s);
1143 		return error;
1144 	}
1145 
1146 	switch(command) {
1147 	case SIOCSIFADDR:
1148 		ifp->if_flags |= IFF_UP;
1149 		switch (ifa->ifa_addr->sa_family) {
1150 #ifdef INET
1151 		case AF_INET:
1152 			txp_init(sc);
1153 			arp_ifinit(&sc->sc_arpcom, ifa);
1154 			break;
1155 #endif /* INET */
1156 		default:
1157 			txp_init(sc);
1158 			break;
1159 		}
1160 		break;
1161 	case SIOCSIFFLAGS:
1162 		if (ifp->if_flags & IFF_UP) {
1163 			txp_init(sc);
1164 		} else {
1165 			if (ifp->if_flags & IFF_RUNNING)
1166 				txp_stop(sc);
1167 		}
1168 		break;
1169 	case SIOCADDMULTI:
1170 	case SIOCDELMULTI:
1171 		error = (command == SIOCADDMULTI) ?
1172 		    ether_addmulti(ifr, &sc->sc_arpcom) :
1173 		    ether_delmulti(ifr, &sc->sc_arpcom);
1174 
1175 		if (error == ENETRESET) {
1176 			/*
1177 			 * Multicast list has changed; set the hardware
1178 			 * filter accordingly.
1179 			 */
1180 			txp_set_filter(sc);
1181 			error = 0;
1182 		}
1183 		break;
1184 	case SIOCGIFMEDIA:
1185 	case SIOCSIFMEDIA:
1186 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1187 		break;
1188 	default:
1189 		error = EINVAL;
1190 		break;
1191 	}
1192 
1193 	(void)splx(s);
1194 
1195 	return(error);
1196 }
1197 
1198 void
1199 txp_init(sc)
1200 	struct txp_softc *sc;
1201 {
1202 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1203 	int s;
1204 
1205 	txp_stop(sc);
1206 
1207 	s = splnet();
1208 
1209 	txp_set_filter(sc);
1210 
1211 	txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1212 	txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1213 
1214 	WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF |
1215 	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
1216 	    TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
1217 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
1218 	    TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
1219 	WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
1220 
1221 	ifp->if_flags |= IFF_RUNNING;
1222 	ifp->if_flags &= ~IFF_OACTIVE;
1223 	ifp->if_timer = 0;
1224 
1225 	if (!timeout_pending(&sc->sc_tick))
1226 		timeout_add(&sc->sc_tick, hz);
1227 
1228 	splx(s);
1229 }
1230 
1231 void
1232 txp_tick(vsc)
1233 	void *vsc;
1234 {
1235 	struct txp_softc *sc = vsc;
1236 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1237 	struct txp_rsp_desc *rsp = NULL;
1238 	struct txp_ext_desc *ext;
1239 	int s;
1240 
1241 	s = splnet();
1242 	txp_rxbuf_reclaim(sc);
1243 
1244 	if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
1245 	    &rsp, 1))
1246 		goto out;
1247 	if (rsp->rsp_numdesc != 6)
1248 		goto out;
1249 	if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1250 	    NULL, NULL, NULL, 1))
1251 		goto out;
1252 	ext = (struct txp_ext_desc *)(rsp + 1);
1253 
1254 	ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 +
1255 	    ext[4].ext_1 + ext[4].ext_4;
1256 	ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 +
1257 	    ext[2].ext_1;
1258 	ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 +
1259 	    ext[1].ext_3;
1260 	ifp->if_opackets += rsp->rsp_par2;
1261 	ifp->if_ipackets += ext[2].ext_3;
1262 
1263 out:
1264 	if (rsp != NULL)
1265 		free(rsp, M_DEVBUF);
1266 
1267 	splx(s);
1268 	timeout_add(&sc->sc_tick, hz);
1269 }
1270 
1271 void
1272 txp_start(ifp)
1273 	struct ifnet *ifp;
1274 {
1275 	struct txp_softc *sc = ifp->if_softc;
1276 	struct txp_tx_ring *r = &sc->sc_txhir;
1277 	struct txp_tx_desc *txd;
1278 	struct txp_frag_desc *fxd;
1279 	struct mbuf *m, *mnew;
1280 	struct txp_swdesc *sd;
1281 	u_int32_t firstprod, firstcnt, prod, cnt, i;
1282 #if NVLAN > 0
1283 	struct ifvlan		*ifv;
1284 #endif
1285 
1286 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1287 		return;
1288 
1289 	prod = r->r_prod;
1290 	cnt = r->r_cnt;
1291 
1292 	while (1) {
1293 		IFQ_DEQUEUE(&ifp->if_snd, m);
1294 		if (m == NULL)
1295 			break;
1296 
1297 		firstprod = prod;
1298 		firstcnt = cnt;
1299 
1300 		sd = sc->sc_txd + prod;
1301 		sd->sd_mbuf = m;
1302 
1303 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
1304 		    BUS_DMA_NOWAIT)) {
1305 			MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1306 			if (mnew == NULL)
1307 				goto oactive1;
1308 			if (m->m_pkthdr.len > MHLEN) {
1309 				MCLGET(mnew, M_DONTWAIT);
1310 				if ((mnew->m_flags & M_EXT) == 0) {
1311 					m_freem(mnew);
1312 					goto oactive1;
1313 				}
1314 			}
1315 			m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t));
1316 			mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len;
1317 			m_freem(m);
1318 			m = mnew;
1319 			if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
1320 			    BUS_DMA_NOWAIT))
1321 				goto oactive1;
1322 		}
1323 
1324 		if ((TX_ENTRIES - cnt) < 4)
1325 			goto oactive;
1326 
1327 		txd = r->r_desc + prod;
1328 
1329 		txd->tx_flags = TX_FLAGS_TYPE_DATA;
1330 		txd->tx_numdesc = 0;
1331 		txd->tx_addrlo = 0;
1332 		txd->tx_addrhi = 0;
1333 		txd->tx_totlen = 0;
1334 		txd->tx_pflags = 0;
1335 
1336 		if (++prod == TX_ENTRIES)
1337 			prod = 0;
1338 
1339 		if (++cnt >= (TX_ENTRIES - 4))
1340 			goto oactive;
1341 
1342 #if NVLAN > 0
1343 		if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1344 		    m->m_pkthdr.rcvif != NULL) {
1345 			ifv = m->m_pkthdr.rcvif->if_softc;
1346 			txd->tx_pflags = TX_PFLAGS_VLAN |
1347 			    (htons(ifv->ifv_tag) << TX_PFLAGS_VLANTAG_S);
1348 		}
1349 #endif
1350 
1351 		if (m->m_pkthdr.csum & M_IPV4_CSUM_OUT)
1352 			txd->tx_pflags |= TX_PFLAGS_IPCKSUM;
1353 #if 0
1354 		if (m->m_pkthdr.csum & M_TCPV4_CSUM_OUT)
1355 			txd->tx_pflags |= TX_PFLAGS_TCPCKSUM;
1356 		if (m->m_pkthdr.csum & M_UDPV4_CSUM_OUT)
1357 			txd->tx_pflags |= TX_PFLAGS_UDPCKSUM;
1358 #endif
1359 
1360 		fxd = (struct txp_frag_desc *)(r->r_desc + prod);
1361 		for (i = 0; i < sd->sd_map->dm_nsegs; i++) {
1362 			if (++cnt >= (TX_ENTRIES - 4))
1363 				goto oactive;
1364 
1365 			txd->tx_numdesc++;
1366 
1367 			fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG;
1368 			fxd->frag_rsvd1 = 0;
1369 			fxd->frag_len = sd->sd_map->dm_segs[i].ds_len;
1370 			fxd->frag_addrlo =
1371 			    ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) &
1372 			    0xffffffff;
1373 			fxd->frag_addrhi =
1374 			    ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >>
1375 			    32;
1376 			fxd->frag_rsvd2 = 0;
1377 
1378 			if (++prod == TX_ENTRIES) {
1379 				fxd = (struct txp_frag_desc *)r->r_desc;
1380 				prod = 0;
1381 			} else
1382 				fxd++;
1383 
1384 		}
1385 
1386 		ifp->if_timer = 5;
1387 
1388 #if NBPFILTER > 0
1389 		if (ifp->if_bpf)
1390 			bpf_mtap(ifp->if_bpf, m);
1391 #endif
1392 
1393 		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, BUS_DMASYNC_PREREAD);
1394 		WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod));
1395 	}
1396 
1397 	r->r_prod = prod;
1398 	r->r_cnt = cnt;
1399 	return;
1400 
1401 oactive:
1402 	bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
1403 oactive1:
1404 	ifp->if_flags |= IFF_OACTIVE;
1405 	r->r_prod = firstprod;
1406 	r->r_cnt = firstcnt;
1407 	IF_PREPEND(&ifp->if_snd, m);
1408 }
1409 
1410 /*
1411  * Handle simple commands sent to the typhoon
1412  */
1413 int
1414 txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait)
1415 	struct txp_softc *sc;
1416 	u_int16_t id, in1, *out1;
1417 	u_int32_t in2, in3, *out2, *out3;
1418 	int wait;
1419 {
1420 	struct txp_rsp_desc *rsp = NULL;
1421 
1422 	if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait))
1423 		return (-1);
1424 
1425 	if (!wait)
1426 		return (0);
1427 
1428 	if (out1 != NULL)
1429 		*out1 = rsp->rsp_par1;
1430 	if (out2 != NULL)
1431 		*out2 = rsp->rsp_par2;
1432 	if (out3 != NULL)
1433 		*out3 = rsp->rsp_par3;
1434 	free(rsp, M_DEVBUF);
1435 	return (0);
1436 }
1437 
1438 int
1439 txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait)
1440 	struct txp_softc *sc;
1441 	u_int16_t id, in1;
1442 	u_int32_t in2, in3;
1443 	struct txp_ext_desc *in_extp;
1444 	u_int8_t in_extn;
1445 	struct txp_rsp_desc **rspp;
1446 	int wait;
1447 {
1448 	struct txp_hostvar *hv = sc->sc_hostvar;
1449 	struct txp_cmd_desc *cmd;
1450 	struct txp_ext_desc *ext;
1451 	u_int32_t idx, i;
1452 	u_int16_t seq;
1453 
1454 	if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
1455 		printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc));
1456 		return (-1);
1457 	}
1458 
1459 	idx = sc->sc_cmdring.lastwrite;
1460 	cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
1461 	bzero(cmd, sizeof(*cmd));
1462 
1463 	cmd->cmd_numdesc = in_extn;
1464 	cmd->cmd_seq = seq = sc->sc_seq++;
1465 	cmd->cmd_id = id;
1466 	cmd->cmd_par1 = in1;
1467 	cmd->cmd_par2 = in2;
1468 	cmd->cmd_par3 = in3;
1469 	cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
1470 	    (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
1471 
1472 	idx += sizeof(struct txp_cmd_desc);
1473 	if (idx == sc->sc_cmdring.size)
1474 		idx = 0;
1475 
1476 	for (i = 0; i < in_extn; i++) {
1477 		ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
1478 		bcopy(in_extp, ext, sizeof(struct txp_ext_desc));
1479 		in_extp++;
1480 		idx += sizeof(struct txp_cmd_desc);
1481 		if (idx == sc->sc_cmdring.size)
1482 			idx = 0;
1483 	}
1484 
1485 	sc->sc_cmdring.lastwrite = idx;
1486 
1487 	WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
1488 
1489 	if (!wait)
1490 		return (0);
1491 
1492 	for (i = 0; i < 10000; i++) {
1493 		idx = hv->hv_resp_read_idx;
1494 		if (idx != hv->hv_resp_write_idx) {
1495 			*rspp = NULL;
1496 			if (txp_response(sc, idx, id, seq, rspp))
1497 				return (-1);
1498 			if (*rspp != NULL)
1499 				break;
1500 		}
1501 		DELAY(50);
1502 	}
1503 	if (i == 1000 || (*rspp) == NULL) {
1504 		printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id);
1505 		return (-1);
1506 	}
1507 
1508 	return (0);
1509 }
1510 
1511 int
1512 txp_response(sc, ridx, id, seq, rspp)
1513 	struct txp_softc *sc;
1514 	u_int32_t ridx;
1515 	u_int16_t id;
1516 	u_int16_t seq;
1517 	struct txp_rsp_desc **rspp;
1518 {
1519 	struct txp_hostvar *hv = sc->sc_hostvar;
1520 	struct txp_rsp_desc *rsp;
1521 
1522 	while (ridx != hv->hv_resp_write_idx) {
1523 		rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx);
1524 
1525 		if (id == rsp->rsp_id && rsp->rsp_seq == seq) {
1526 			*rspp = (struct txp_rsp_desc *)malloc(
1527 			    sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
1528 			    M_DEVBUF, M_NOWAIT);
1529 			if ((*rspp) == NULL)
1530 				return (-1);
1531 			txp_rsp_fixup(sc, rsp, *rspp);
1532 			return (0);
1533 		}
1534 
1535 		if (rsp->rsp_flags & RSP_FLAGS_ERROR) {
1536 			printf("%s: response error!\n", TXP_DEVNAME(sc));
1537 			txp_rsp_fixup(sc, rsp, NULL);
1538 			ridx = hv->hv_resp_read_idx;
1539 			continue;
1540 		}
1541 
1542 		switch (rsp->rsp_id) {
1543 		case TXP_CMD_CYCLE_STATISTICS:
1544 		case TXP_CMD_MEDIA_STATUS_READ:
1545 			break;
1546 		case TXP_CMD_HELLO_RESPONSE:
1547 			printf("%s: hello\n", TXP_DEVNAME(sc));
1548 			break;
1549 		default:
1550 			printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc),
1551 			    rsp->rsp_id);
1552 		}
1553 
1554 		txp_rsp_fixup(sc, rsp, NULL);
1555 		ridx = hv->hv_resp_read_idx;
1556 		hv->hv_resp_read_idx = ridx;
1557 	}
1558 
1559 	return (0);
1560 }
1561 
1562 void
1563 txp_rsp_fixup(sc, rsp, dst)
1564 	struct txp_softc *sc;
1565 	struct txp_rsp_desc *rsp, *dst;
1566 {
1567 	struct txp_rsp_desc *src = rsp;
1568 	struct txp_hostvar *hv = sc->sc_hostvar;
1569 	u_int32_t i, ridx;
1570 
1571 	ridx = hv->hv_resp_read_idx;
1572 
1573 	for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
1574 		if (dst != NULL)
1575 			bcopy(src, dst++, sizeof(struct txp_rsp_desc));
1576 		ridx += sizeof(struct txp_rsp_desc);
1577 		if (ridx == sc->sc_rspring.size) {
1578 			src = sc->sc_rspring.base;
1579 			ridx = 0;
1580 		} else
1581 			src++;
1582 		sc->sc_rspring.lastwrite = hv->hv_resp_read_idx = ridx;
1583 	}
1584 
1585 	hv->hv_resp_read_idx = ridx;
1586 }
1587 
1588 int
1589 txp_cmd_desc_numfree(sc)
1590 	struct txp_softc *sc;
1591 {
1592 	struct txp_hostvar *hv = sc->sc_hostvar;
1593 	struct txp_boot_record *br = sc->sc_boot;
1594 	u_int32_t widx, ridx, nfree;
1595 
1596 	widx = sc->sc_cmdring.lastwrite;
1597 	ridx = hv->hv_cmd_read_idx;
1598 
1599 	if (widx == ridx) {
1600 		/* Ring is completely free */
1601 		nfree = br->br_cmd_siz - sizeof(struct txp_cmd_desc);
1602 	} else {
1603 		if (widx > ridx)
1604 			nfree = br->br_cmd_siz -
1605 			    (widx - ridx + sizeof(struct txp_cmd_desc));
1606 		else
1607 			nfree = ridx - widx - sizeof(struct txp_cmd_desc);
1608 	}
1609 
1610 	return (nfree / sizeof(struct txp_cmd_desc));
1611 }
1612 
1613 void
1614 txp_stop(sc)
1615 	struct txp_softc *sc;
1616 {
1617 	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1618 	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1619 
1620 	if (timeout_pending(&sc->sc_tick))
1621 		timeout_del(&sc->sc_tick);
1622 }
1623 
1624 void
1625 txp_watchdog(ifp)
1626 	struct ifnet *ifp;
1627 {
1628 }
1629 
1630 int
1631 txp_ifmedia_upd(ifp)
1632 	struct ifnet *ifp;
1633 {
1634 	struct txp_softc *sc = ifp->if_softc;
1635 	struct ifmedia *ifm = &sc->sc_ifmedia;
1636 	u_int16_t new_xcvr;
1637 
1638 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1639 		return (EINVAL);
1640 
1641 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
1642 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1643 			new_xcvr = TXP_XCVR_10_FDX;
1644 		else
1645 			new_xcvr = TXP_XCVR_10_HDX;
1646 	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) {
1647 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1648 			new_xcvr = TXP_XCVR_100_FDX;
1649 		else
1650 			new_xcvr = TXP_XCVR_100_HDX;
1651 	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1652 		new_xcvr = TXP_XCVR_AUTO;
1653 	} else
1654 		return (EINVAL);
1655 
1656 	/* nothing to do */
1657 	if (sc->sc_xcvr == new_xcvr)
1658 		return (0);
1659 
1660 	txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
1661 	    NULL, NULL, NULL, 0);
1662 	sc->sc_xcvr = new_xcvr;
1663 
1664 	return (0);
1665 }
1666 
1667 void
1668 txp_ifmedia_sts(ifp, ifmr)
1669 	struct ifnet *ifp;
1670 	struct ifmediareq *ifmr;
1671 {
1672 	struct txp_softc *sc = ifp->if_softc;
1673 	struct ifmedia *ifm = &sc->sc_ifmedia;
1674 	u_int16_t bmsr, bmcr, anlpar;
1675 
1676 	ifmr->ifm_status = IFM_AVALID;
1677 	ifmr->ifm_active = IFM_ETHER;
1678 
1679 	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
1680 	    &bmsr, NULL, NULL, 1))
1681 		goto bail;
1682 	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
1683 	    &bmsr, NULL, NULL, 1))
1684 		goto bail;
1685 
1686 	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
1687 	    &bmcr, NULL, NULL, 1))
1688 		goto bail;
1689 
1690 	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
1691 	    &anlpar, NULL, NULL, 1))
1692 		goto bail;
1693 
1694 	if (bmsr & BMSR_LINK)
1695 		ifmr->ifm_status |= IFM_ACTIVE;
1696 
1697 	if (bmcr & BMCR_ISO) {
1698 		ifmr->ifm_active |= IFM_NONE;
1699 		ifmr->ifm_status = 0;
1700 		return;
1701 	}
1702 
1703 	if (bmcr & BMCR_LOOP)
1704 		ifmr->ifm_active |= IFM_LOOP;
1705 
1706 	if (bmcr & BMCR_AUTOEN) {
1707 		if ((bmsr & BMSR_ACOMP) == 0) {
1708 			ifmr->ifm_active |= IFM_NONE;
1709 			return;
1710 		}
1711 
1712 		if (anlpar & ANLPAR_T4)
1713 			ifmr->ifm_active |= IFM_100_T4;
1714 		else if (anlpar & ANLPAR_TX_FD)
1715 			ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1716 		else if (anlpar & ANLPAR_TX)
1717 			ifmr->ifm_active |= IFM_100_TX;
1718 		else if (anlpar & ANLPAR_10_FD)
1719 			ifmr->ifm_active |= IFM_10_T|IFM_FDX;
1720 		else if (anlpar & ANLPAR_10)
1721 			ifmr->ifm_active |= IFM_10_T;
1722 		else
1723 			ifmr->ifm_active |= IFM_NONE;
1724 	} else
1725 		ifmr->ifm_active = ifm->ifm_cur->ifm_media;
1726 	return;
1727 
1728 bail:
1729 	ifmr->ifm_active |= IFM_NONE;
1730 	ifmr->ifm_status &= ~IFM_AVALID;
1731 }
1732 
1733 void
1734 txp_show_descriptor(d)
1735 	void *d;
1736 {
1737 	struct txp_cmd_desc *cmd = d;
1738 	struct txp_rsp_desc *rsp = d;
1739 	struct txp_tx_desc *txd = d;
1740 	struct txp_frag_desc *frgd = d;
1741 
1742 	switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
1743 	case CMD_FLAGS_TYPE_CMD:
1744 		/* command descriptor */
1745 		printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1746 		    cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq,
1747 		    cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3);
1748 		break;
1749 	case CMD_FLAGS_TYPE_RESP:
1750 		/* response descriptor */
1751 		printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1752 		    rsp->rsp_flags, rsp->rsp_numdesc, rsp->rsp_id, rsp->rsp_seq,
1753 		    rsp->rsp_par1, rsp->rsp_par2, rsp->rsp_par3);
1754 		break;
1755 	case CMD_FLAGS_TYPE_DATA:
1756 		/* data header (assuming tx for now) */
1757 		printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
1758 		    txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
1759 		    txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags);
1760 		break;
1761 	case CMD_FLAGS_TYPE_FRAG:
1762 		/* fragment descriptor */
1763 		printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
1764 		    frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len,
1765 		    frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2);
1766 		break;
1767 	default:
1768 		printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1769 		    cmd->cmd_flags & CMD_FLAGS_TYPE_M,
1770 		    cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq,
1771 		    cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3);
1772 		break;
1773 	}
1774 }
1775 
1776 void
1777 txp_set_filter(sc)
1778 	struct txp_softc *sc;
1779 {
1780 	struct arpcom *ac = &sc->sc_arpcom;
1781 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1782 	u_int32_t crc, carry, hashbit, hash[2];
1783 	u_int16_t filter;
1784 	u_int8_t octet;
1785 	int i, j, mcnt = 0;
1786 	struct ether_multi *enm;
1787 	struct ether_multistep step;
1788 
1789 	if (ifp->if_flags & IFF_PROMISC) {
1790 		filter = TXP_RXFILT_PROMISC;
1791 		goto setit;
1792 	}
1793 
1794 again:
1795 	filter = TXP_RXFILT_DIRECT;
1796 
1797 	if (ifp->if_flags & IFF_BROADCAST)
1798 		filter |= TXP_RXFILT_BROADCAST;
1799 
1800 	if (ifp->if_flags & IFF_ALLMULTI)
1801 		filter |= TXP_RXFILT_ALLMULTI;
1802 	else {
1803 		hash[0] = hash[1] = 0;
1804 
1805 		ETHER_FIRST_MULTI(step, ac, enm);
1806 		while (enm != NULL) {
1807 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1808 				/*
1809 				 * We must listen to a range of multicast
1810 				 * addresses.  For now, just accept all
1811 				 * multicasts, rather than trying to set only
1812 				 * those filter bits needed to match the range.
1813 				 * (At this time, the only use of address
1814 				 * ranges is for IP multicast routing, for
1815 				 * which the range is big enough to require
1816 				 * all bits set.)
1817 				 */
1818 				ifp->if_flags |= IFF_ALLMULTI;
1819 				goto again;
1820 			}
1821 
1822 			mcnt++;
1823 			crc = 0xffffffff;
1824 
1825 			for (i = 0; i < ETHER_ADDR_LEN; i++) {
1826 				octet = enm->enm_addrlo[i];
1827 				for (j = 0; j < 8; j++) {
1828 					carry = ((crc & 0x80000000) ? 1 : 0) ^
1829 					    (octet & 1);
1830 					crc <<= 1;
1831 					octet >>= 1;
1832 					if (carry)
1833 						crc = (crc ^ TXP_POLYNOMIAL) |
1834 						    carry;
1835 				}
1836 			}
1837 			hashbit = (u_int16_t)(crc & (64 - 1));
1838 			hash[hashbit / 32] |= (1 << hashbit % 32);
1839 			ETHER_NEXT_MULTI(step, enm);
1840 		}
1841 
1842 		if (mcnt > 0) {
1843 			filter |= TXP_RXFILT_HASHMULTI;
1844 			txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE,
1845 			    2, hash[0], hash[1], NULL, NULL, NULL, 0);
1846 		}
1847 	}
1848 
1849 setit:
1850 	txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
1851 	    NULL, NULL, NULL, 1);
1852 }
1853 
1854 void
1855 txp_capabilities(sc)
1856 	struct txp_softc *sc;
1857 {
1858 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1859 	struct txp_rsp_desc *rsp = NULL;
1860 	struct txp_ext_desc *ext;
1861 
1862 	if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1))
1863 		goto out;
1864 
1865 	if (rsp->rsp_numdesc != 1)
1866 		goto out;
1867 	ext = (struct txp_ext_desc *)(rsp + 1);
1868 
1869 	sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK;
1870 	sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK;
1871 
1872 #if NVLAN > 0
1873 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1874 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) {
1875 		sc->sc_tx_capability |= OFFLOAD_VLAN;
1876 		sc->sc_rx_capability |= OFFLOAD_VLAN;
1877 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1878 	}
1879 #endif
1880 
1881 #if 0
1882 	/* not ready yet */
1883 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) {
1884 		sc->sc_tx_capability |= OFFLOAD_IPSEC;
1885 		sc->sc_rx_capability |= OFFLOAD_IPSEC;
1886 		ifp->if_capabilities |= IFCAP_IPSEC;
1887 	}
1888 #endif
1889 
1890 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) {
1891 		sc->sc_tx_capability |= OFFLOAD_IPCKSUM;
1892 		sc->sc_rx_capability |= OFFLOAD_IPCKSUM;
1893 		ifp->if_capabilities |= IFCAP_CSUM_IPv4;
1894 	}
1895 
1896 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) {
1897 #if 0
1898 		sc->sc_tx_capability |= OFFLOAD_TCPCKSUM;
1899 #endif
1900 		sc->sc_rx_capability |= OFFLOAD_TCPCKSUM;
1901 #if 0
1902 		ifp->if_capabilities |= IFCAP_CSUM_TCPv4;
1903 #endif
1904 	}
1905 
1906 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) {
1907 #if 0
1908 		sc->sc_tx_capability |= OFFLOAD_UDPCKSUM;
1909 #endif
1910 		sc->sc_rx_capability |= OFFLOAD_UDPCKSUM;
1911 #if 0
1912 		ifp->if_capabilities |= IFCAP_CSUM_UDPv4;
1913 #endif
1914 	}
1915 
1916 	if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0,
1917 	    sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1))
1918 		goto out;
1919 
1920 out:
1921 	if (rsp != NULL)
1922 		free(rsp, M_DEVBUF);
1923 }
1924