xref: /netbsd-src/sys/dev/pci/if_txp.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* $NetBSD: if_txp.c,v 1.49 2018/06/26 06:48:01 msaitoh Exp $ */
2 
3 /*
4  * Copyright (c) 2001
5  *	Jason L. Wright <jason@thought.net>, Theo de Raadt, and
6  *	Aaron Campbell <aaron@monkey.org>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * Driver for 3c990 (Typhoon) Ethernet ASIC
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_txp.c,v 1.49 2018/06/26 06:48:01 msaitoh Exp $");
36 
37 #include "opt_inet.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/sockio.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/device.h>
47 #include <sys/callout.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 #include <net/if_ether.h>
53 #include <net/if_arp.h>
54 
55 #ifdef INET
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip.h>
60 #include <netinet/if_inarp.h>
61 #endif
62 
63 #include <net/if_media.h>
64 
65 #include <net/bpf.h>
66 
67 #include <sys/bus.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 #include <dev/pci/pcidevs.h>
74 
75 #include <dev/pci/if_txpreg.h>
76 
77 #include <dev/microcode/typhoon/3c990img.h>
78 
79 /*
80  * These currently break the 3c990 firmware, hopefully will be resolved
81  * at some point.
82  */
83 #undef	TRY_TX_UDP_CSUM
84 #undef	TRY_TX_TCP_CSUM
85 
86 int txp_probe(device_t, cfdata_t, void *);
87 void txp_attach(device_t, device_t, void *);
88 int txp_intr(void *);
89 void txp_tick(void *);
90 bool txp_shutdown(device_t, int);
91 int txp_ioctl(struct ifnet *, u_long, void *);
92 void txp_start(struct ifnet *);
93 void txp_stop(struct txp_softc *);
94 void txp_init(struct txp_softc *);
95 void txp_watchdog(struct ifnet *);
96 
97 int txp_chip_init(struct txp_softc *);
98 int txp_reset_adapter(struct txp_softc *);
99 int txp_download_fw(struct txp_softc *);
100 int txp_download_fw_wait(struct txp_softc *);
101 int txp_download_fw_section(struct txp_softc *,
102     const struct txp_fw_section_header *, int);
103 int txp_alloc_rings(struct txp_softc *);
104 void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *);
105 int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int);
106 void txp_set_filter(struct txp_softc *);
107 
108 int txp_cmd_desc_numfree(struct txp_softc *);
109 int txp_command(struct txp_softc *, u_int16_t, u_int16_t, u_int32_t,
110     u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int);
111 int txp_command2(struct txp_softc *, u_int16_t, u_int16_t,
112     u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t,
113     struct txp_rsp_desc **, int);
114 int txp_response(struct txp_softc *, u_int32_t, u_int16_t, u_int16_t,
115     struct txp_rsp_desc **);
116 void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *,
117     struct txp_rsp_desc *);
118 void txp_capabilities(struct txp_softc *);
119 
120 void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *);
121 int txp_ifmedia_upd(struct ifnet *);
122 void txp_show_descriptor(void *);
123 void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *,
124     struct txp_dma_alloc *);
125 void txp_rxbuf_reclaim(struct txp_softc *);
126 void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *,
127     struct txp_dma_alloc *);
128 
129 CFATTACH_DECL_NEW(txp, sizeof(struct txp_softc), txp_probe, txp_attach,
130 	      NULL, NULL);
131 
132 const struct txp_pci_match {
133 	int vid, did, flags;
134 } txp_devices[] = {
135 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990, 0 },
136 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95, 0 },
137 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97, 0 },
138 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95, TXP_SERVERVERSION },
139 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97, TXP_SERVERVERSION },
140 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990B, TXP_USESUBSYSTEM },
141 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR, TXP_SERVERVERSION },
142 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX, TXP_USESUBSYSTEM },
143 };
144 
145 static const struct txp_pci_match *txp_pcilookup(pcireg_t);
146 
147 static const struct {
148 	u_int16_t mask, value;
149 	int flags;
150 } txp_subsysinfo[] = {
151 	{0xf000, 0x2000, TXP_SERVERVERSION},
152 	{0x0100, 0x0100, TXP_FIBER},
153 #if 0 /* information from 3com header, unused */
154 	{0x0010, 0x0010, /* secured firmware */},
155 	{0x0003, 0x0000, /* variable DES */},
156 	{0x0003, 0x0001, /* single DES - "95" */},
157 	{0x0003, 0x0002, /* triple DES - "97" */},
158 #endif
159 };
160 
161 static const struct txp_pci_match *
162 txp_pcilookup(pcireg_t id)
163 {
164 	int i;
165 
166 	for (i = 0; i < __arraycount(txp_devices); i++)
167 		if (PCI_VENDOR(id) == txp_devices[i].vid &&
168 		    PCI_PRODUCT(id) == txp_devices[i].did)
169 			return &txp_devices[i];
170 	return (0);
171 }
172 
173 int
174 txp_probe(device_t parent, cfdata_t match, void *aux)
175 {
176 	struct pci_attach_args *pa = aux;
177 
178 	if (txp_pcilookup(pa->pa_id))
179 			return (1);
180 	return (0);
181 }
182 
183 void
184 txp_attach(device_t parent, device_t self, void *aux)
185 {
186 	struct txp_softc *sc = device_private(self);
187 	struct pci_attach_args *pa = aux;
188 	pci_chipset_tag_t pc = pa->pa_pc;
189 	pci_intr_handle_t ih;
190 	const char *intrstr = NULL;
191 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
192 	u_int32_t command;
193 	u_int16_t p1;
194 	u_int32_t p2;
195 	u_char enaddr[6];
196 	const struct txp_pci_match *match;
197 	u_int16_t subsys;
198 	int i, flags;
199 	char devinfo[256];
200 	char intrbuf[PCI_INTRSTR_LEN];
201 
202 	sc->sc_dev = self;
203 	sc->sc_cold = 1;
204 
205 	match = txp_pcilookup(pa->pa_id);
206 	flags = match->flags;
207 	if (match->flags & TXP_USESUBSYSTEM) {
208 		subsys = PCI_PRODUCT(pci_conf_read(pc, pa->pa_tag,
209 						   PCI_SUBSYS_ID_REG));
210 		for (i = 0;
211 		     i < sizeof(txp_subsysinfo)/sizeof(txp_subsysinfo[0]);
212 		     i++)
213 			if ((subsys & txp_subsysinfo[i].mask) ==
214 			    txp_subsysinfo[i].value)
215 				flags |= txp_subsysinfo[i].flags;
216 	}
217 	sc->sc_flags = flags;
218 
219 	aprint_naive("\n");
220 	pci_devinfo(pa->pa_id, 0, 0, devinfo, sizeof(devinfo));
221 #define TXP_EXTRAINFO ((flags & (TXP_USESUBSYSTEM|TXP_SERVERVERSION)) == \
222   (TXP_USESUBSYSTEM|TXP_SERVERVERSION) ? " (SVR)" : "")
223 	aprint_normal(": %s%s\n%s", devinfo, TXP_EXTRAINFO,
224 	    device_xname(sc->sc_dev));
225 
226 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
227 
228 	if (!(command & PCI_COMMAND_MASTER_ENABLE)) {
229 		aprint_error(": failed to enable bus mastering\n");
230 		return;
231 	}
232 
233 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
234 		aprint_error(": failed to enable memory mapping\n");
235 		return;
236 	}
237 	if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
238 	    &sc->sc_bt, &sc->sc_bh, NULL, NULL)) {
239 		aprint_error(": can't map mem space %d\n", 0);
240 		return;
241 	}
242 
243 	sc->sc_dmat = pa->pa_dmat;
244 
245 	/*
246 	 * Allocate our interrupt.
247 	 */
248 	if (pci_intr_map(pa, &ih)) {
249 		aprint_error(": couldn't map interrupt\n");
250 		return;
251 	}
252 
253 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
254 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc);
255 	if (sc->sc_ih == NULL) {
256 		aprint_error(": couldn't establish interrupt");
257 		if (intrstr != NULL)
258 			aprint_normal(" at %s", intrstr);
259 		aprint_normal("\n");
260 		return;
261 	}
262 	aprint_error(": interrupting at %s\n", intrstr);
263 
264 	if (txp_chip_init(sc))
265 		goto cleanupintr;
266 
267 	if (txp_download_fw(sc))
268 		goto cleanupintr;
269 
270 	if (txp_alloc_rings(sc))
271 		goto cleanupintr;
272 
273 	if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0,
274 	    NULL, NULL, NULL, 1))
275 		goto cleanupintr;
276 
277 	if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0,
278 	    &p1, &p2, NULL, 1))
279 		goto cleanupintr;
280 
281 	txp_set_filter(sc);
282 
283 	p1 = htole16(p1);
284 	enaddr[0] = ((u_int8_t *)&p1)[1];
285 	enaddr[1] = ((u_int8_t *)&p1)[0];
286 	p2 = htole32(p2);
287 	enaddr[2] = ((u_int8_t *)&p2)[3];
288 	enaddr[3] = ((u_int8_t *)&p2)[2];
289 	enaddr[4] = ((u_int8_t *)&p2)[1];
290 	enaddr[5] = ((u_int8_t *)&p2)[0];
291 
292 	aprint_normal_dev(self, "Ethernet address %s\n",
293 	    ether_sprintf(enaddr));
294 	sc->sc_cold = 0;
295 
296 	ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts);
297 	if (flags & TXP_FIBER) {
298 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX,
299 			    0, NULL);
300 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_HDX,
301 			    0, NULL);
302 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_FDX,
303 			    0, NULL);
304 	} else {
305 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T,
306 			    0, NULL);
307 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX,
308 			    0, NULL);
309 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX,
310 			    0, NULL);
311 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX,
312 			    0, NULL);
313 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX,
314 			    0, NULL);
315 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX,
316 			    0, NULL);
317 	}
318 	ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
319 
320 	sc->sc_xcvr = TXP_XCVR_AUTO;
321 	txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0,
322 	    NULL, NULL, NULL, 0);
323 	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
324 
325 	ifp->if_softc = sc;
326 	ifp->if_mtu = ETHERMTU;
327 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
328 	ifp->if_ioctl = txp_ioctl;
329 	ifp->if_start = txp_start;
330 	ifp->if_watchdog = txp_watchdog;
331 	ifp->if_baudrate = 10000000;
332 	IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES);
333 	IFQ_SET_READY(&ifp->if_snd);
334 	ifp->if_capabilities = 0;
335 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
336 
337 	txp_capabilities(sc);
338 
339 	callout_init(&sc->sc_tick, 0);
340 	callout_setfunc(&sc->sc_tick, txp_tick, sc);
341 
342 	/*
343 	 * Attach us everywhere
344 	 */
345 	if_attach(ifp);
346 	if_deferred_start_init(ifp, NULL);
347 	ether_ifattach(ifp, enaddr);
348 
349 	if (pmf_device_register1(self, NULL, NULL, txp_shutdown))
350 		pmf_class_network_register(self, ifp);
351 	else
352 		aprint_error_dev(self, "couldn't establish power handler\n");
353 
354 	return;
355 
356 cleanupintr:
357 	pci_intr_disestablish(pc,sc->sc_ih);
358 
359 	return;
360 
361 }
362 
363 int
364 txp_chip_init(struct txp_softc *sc)
365 {
366 	/* disable interrupts */
367 	WRITE_REG(sc, TXP_IER, 0);
368 	WRITE_REG(sc, TXP_IMR,
369 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
370 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
371 	    TXP_INT_LATCH);
372 
373 	/* ack all interrupts */
374 	WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
375 	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
376 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
377 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
378 	    TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
379 
380 	if (txp_reset_adapter(sc))
381 		return (-1);
382 
383 	/* disable interrupts */
384 	WRITE_REG(sc, TXP_IER, 0);
385 	WRITE_REG(sc, TXP_IMR,
386 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
387 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
388 	    TXP_INT_LATCH);
389 
390 	/* ack all interrupts */
391 	WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH |
392 	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
393 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
394 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
395 	    TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0);
396 
397 	return (0);
398 }
399 
400 int
401 txp_reset_adapter(struct txp_softc *sc)
402 {
403 	u_int32_t r;
404 	int i;
405 
406 	WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL);
407 	DELAY(1000);
408 	WRITE_REG(sc, TXP_SRR, 0);
409 
410 	/* Should wait max 6 seconds */
411 	for (i = 0; i < 6000; i++) {
412 		r = READ_REG(sc, TXP_A2H_0);
413 		if (r == STAT_WAITING_FOR_HOST_REQUEST)
414 			break;
415 		DELAY(1000);
416 	}
417 
418 	if (r != STAT_WAITING_FOR_HOST_REQUEST) {
419 		printf("%s: reset hung\n", TXP_DEVNAME(sc));
420 		return (-1);
421 	}
422 
423 	return (0);
424 }
425 
426 int
427 txp_download_fw(struct txp_softc *sc)
428 {
429 	const struct txp_fw_file_header *fileheader;
430 	const struct txp_fw_section_header *secthead;
431 	int sect;
432 	u_int32_t r, i, ier, imr;
433 
434 	ier = READ_REG(sc, TXP_IER);
435 	WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0);
436 
437 	imr = READ_REG(sc, TXP_IMR);
438 	WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0);
439 
440 	for (i = 0; i < 10000; i++) {
441 		r = READ_REG(sc, TXP_A2H_0);
442 		if (r == STAT_WAITING_FOR_HOST_REQUEST)
443 			break;
444 		DELAY(50);
445 	}
446 	if (r != STAT_WAITING_FOR_HOST_REQUEST) {
447 		printf(": not waiting for host request\n");
448 		return (-1);
449 	}
450 
451 	/* Ack the status */
452 	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
453 
454 	fileheader = (const struct txp_fw_file_header *)tc990image;
455 	if (memcmp("TYPHOON", fileheader->magicid,
456 	    sizeof(fileheader->magicid))) {
457 		printf(": fw invalid magic\n");
458 		return (-1);
459 	}
460 
461 	/* Tell boot firmware to get ready for image */
462 	WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr));
463 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE);
464 
465 	if (txp_download_fw_wait(sc)) {
466 		printf("%s: fw wait failed, initial\n",
467 		    device_xname(sc->sc_dev));
468 		return (-1);
469 	}
470 
471 	secthead = (const struct txp_fw_section_header *)
472 		(((const u_int8_t *)tc990image) +
473 		 sizeof(struct txp_fw_file_header));
474 
475 	for (sect = 0; sect < le32toh(fileheader->nsections); sect++) {
476 		if (txp_download_fw_section(sc, secthead, sect))
477 			return (-1);
478 		secthead = (const struct txp_fw_section_header *)
479 		    (((const u_int8_t *)secthead) + le32toh(secthead->nbytes) +
480 			sizeof(*secthead));
481 	}
482 
483 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE);
484 
485 	for (i = 0; i < 10000; i++) {
486 		r = READ_REG(sc, TXP_A2H_0);
487 		if (r == STAT_WAITING_FOR_BOOT)
488 			break;
489 		DELAY(50);
490 	}
491 	if (r != STAT_WAITING_FOR_BOOT) {
492 		printf(": not waiting for boot\n");
493 		return (-1);
494 	}
495 
496 	WRITE_REG(sc, TXP_IER, ier);
497 	WRITE_REG(sc, TXP_IMR, imr);
498 
499 	return (0);
500 }
501 
502 int
503 txp_download_fw_wait(struct txp_softc *sc)
504 {
505 	u_int32_t i, r;
506 
507 	for (i = 0; i < 10000; i++) {
508 		r = READ_REG(sc, TXP_ISR);
509 		if (r & TXP_INT_A2H_0)
510 			break;
511 		DELAY(50);
512 	}
513 
514 	if (!(r & TXP_INT_A2H_0)) {
515 		printf(": fw wait failed comm0\n");
516 		return (-1);
517 	}
518 
519 	WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0);
520 
521 	r = READ_REG(sc, TXP_A2H_0);
522 	if (r != STAT_WAITING_FOR_SEGMENT) {
523 		printf(": fw not waiting for segment\n");
524 		return (-1);
525 	}
526 	return (0);
527 }
528 
529 int
530 txp_download_fw_section(struct txp_softc *sc,
531     const struct txp_fw_section_header *sect, int sectnum)
532 {
533 	struct txp_dma_alloc dma;
534 	int rseg, err = 0;
535 	struct mbuf m;
536 #ifdef INET
537 	u_int16_t csum;
538 #endif
539 
540 	/* Skip zero length sections */
541 	if (sect->nbytes == 0)
542 		return (0);
543 
544 	/* Make sure we aren't past the end of the image */
545 	rseg = ((const u_int8_t *)sect) - ((const u_int8_t *)tc990image);
546 	if (rseg >= sizeof(tc990image)) {
547 		printf(": fw invalid section address, section %d\n", sectnum);
548 		return (-1);
549 	}
550 
551 	/* Make sure this section doesn't go past the end */
552 	rseg += le32toh(sect->nbytes);
553 	if (rseg >= sizeof(tc990image)) {
554 		printf(": fw truncated section %d\n", sectnum);
555 		return (-1);
556 	}
557 
558 	/* map a buffer, copy segment to it, get physaddr */
559 	if (txp_dma_malloc(sc, le32toh(sect->nbytes), &dma, 0)) {
560 		printf(": fw dma malloc failed, section %d\n", sectnum);
561 		return (-1);
562 	}
563 
564 	memcpy(dma.dma_vaddr, ((const u_int8_t *)sect) + sizeof(*sect),
565 	    le32toh(sect->nbytes));
566 
567 	/*
568 	 * dummy up mbuf and verify section checksum
569 	 */
570 	m.m_type = MT_DATA;
571 	m.m_next = m.m_nextpkt = NULL;
572 	m.m_len = le32toh(sect->nbytes);
573 	m.m_data = dma.dma_vaddr;
574 	m.m_flags = 0;
575 #ifdef INET
576 	csum = in_cksum(&m, le32toh(sect->nbytes));
577 	if (csum != sect->cksum) {
578 		printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n",
579 		    sectnum, sect->cksum, csum);
580 		txp_dma_free(sc, &dma);
581 		return -1;
582 	}
583 #endif
584 
585 	bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
586 	    dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
587 
588 	WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes));
589 	WRITE_REG(sc, TXP_H2A_2, le32toh(sect->cksum));
590 	WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr));
591 	WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32);
592 	WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff);
593 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE);
594 
595 	if (txp_download_fw_wait(sc)) {
596 		printf("%s: fw wait failed, section %d\n",
597 		    device_xname(sc->sc_dev), sectnum);
598 		err = -1;
599 	}
600 
601 	bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0,
602 	    dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
603 
604 	txp_dma_free(sc, &dma);
605 	return (err);
606 }
607 
608 int
609 txp_intr(void *vsc)
610 {
611 	struct txp_softc *sc = vsc;
612 	struct txp_hostvar *hv = sc->sc_hostvar;
613 	u_int32_t isr;
614 	int claimed = 0;
615 
616 	/* mask all interrupts */
617 	WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF |
618 	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
619 	    TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
620 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
621 	    TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
622 
623 	bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
624 	    sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
625 
626 	isr = READ_REG(sc, TXP_ISR);
627 	while (isr) {
628 		claimed = 1;
629 		WRITE_REG(sc, TXP_ISR, isr);
630 
631 		if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff))
632 			txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma);
633 		if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff))
634 			txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma);
635 
636 		if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx)
637 			txp_rxbuf_reclaim(sc);
638 
639 		if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons !=
640 		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off)))))
641 			txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma);
642 
643 		if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons !=
644 		    TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off)))))
645 			txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma);
646 
647 		isr = READ_REG(sc, TXP_ISR);
648 	}
649 
650 	bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
651 	    sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD);
652 
653 	/* unmask all interrupts */
654 	WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
655 
656 	if_schedule_deferred_start(&sc->sc_arpcom.ec_if);
657 
658 	return (claimed);
659 }
660 
661 void
662 txp_rx_reclaim(struct txp_softc *sc, struct txp_rx_ring *r,
663     struct txp_dma_alloc *dma)
664 {
665 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
666 	struct txp_rx_desc *rxd;
667 	struct mbuf *m;
668 	struct txp_swdesc *sd;
669 	u_int32_t roff, woff;
670 	int sumflags = 0;
671 	int idx;
672 
673 	roff = le32toh(*r->r_roff);
674 	woff = le32toh(*r->r_woff);
675 	idx = roff / sizeof(struct txp_rx_desc);
676 	rxd = r->r_desc + idx;
677 
678 	while (roff != woff) {
679 
680 		bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
681 		    idx * sizeof(struct txp_rx_desc),
682 		    sizeof(struct txp_rx_desc), BUS_DMASYNC_POSTREAD);
683 
684 		if (rxd->rx_flags & RX_FLAGS_ERROR) {
685 			printf("%s: error 0x%x\n", device_xname(sc->sc_dev),
686 			    le32toh(rxd->rx_stat));
687 			ifp->if_ierrors++;
688 			goto next;
689 		}
690 
691 		/* retrieve stashed pointer */
692 		memcpy(&sd, __UNVOLATILE(&rxd->rx_vaddrlo), sizeof(sd));
693 
694 		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
695 		    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
696 		bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
697 		bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
698 		m = sd->sd_mbuf;
699 		free(sd, M_DEVBUF);
700 		m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len);
701 
702 #ifdef __STRICT_ALIGNMENT
703 		{
704 			/*
705 			 * XXX Nice chip, except it won't accept "off by 2"
706 			 * buffers, so we're force to copy.  Supposedly
707 			 * this will be fixed in a newer firmware rev
708 			 * and this will be temporary.
709 			 */
710 			struct mbuf *mnew;
711 
712 			MGETHDR(mnew, M_DONTWAIT, MT_DATA);
713 			if (mnew == NULL) {
714 				m_freem(m);
715 				goto next;
716 			}
717 			if (m->m_len > (MHLEN - 2)) {
718 				MCLGET(mnew, M_DONTWAIT);
719 				if (!(mnew->m_flags & M_EXT)) {
720 					m_freem(mnew);
721 					m_freem(m);
722 					goto next;
723 				}
724 			}
725 			m_set_rcvif(mnew, ifp);
726 			mnew->m_pkthdr.len = mnew->m_len = m->m_len;
727 			mnew->m_data += 2;
728 			memcpy(mnew->m_data, m->m_data, m->m_len);
729 			m_freem(m);
730 			m = mnew;
731 		}
732 #endif
733 
734 		if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD))
735 			sumflags |= (M_CSUM_IPv4|M_CSUM_IPv4_BAD);
736 		else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD))
737 			sumflags |= M_CSUM_IPv4;
738 
739 		if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD))
740 			sumflags |= (M_CSUM_TCPv4|M_CSUM_TCP_UDP_BAD);
741 		else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD))
742 			sumflags |= M_CSUM_TCPv4;
743 
744 		if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD))
745 			sumflags |= (M_CSUM_UDPv4|M_CSUM_TCP_UDP_BAD);
746 		else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD))
747 			sumflags |= M_CSUM_UDPv4;
748 
749 		m->m_pkthdr.csum_flags = sumflags;
750 
751 		if (rxd->rx_stat & htole32(RX_STAT_VLAN)) {
752 			vlan_set_tag(m, htons(rxd->rx_vlan >> 16));
753 		}
754 
755 		if_percpuq_enqueue(ifp->if_percpuq, m);
756 
757 next:
758 		bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
759 		    idx * sizeof(struct txp_rx_desc),
760 		    sizeof(struct txp_rx_desc), BUS_DMASYNC_PREREAD);
761 
762 		roff += sizeof(struct txp_rx_desc);
763 		if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) {
764 			idx = 0;
765 			roff = 0;
766 			rxd = r->r_desc;
767 		} else {
768 			idx++;
769 			rxd++;
770 		}
771 		woff = le32toh(*r->r_woff);
772 	}
773 
774 	*r->r_roff = htole32(woff);
775 }
776 
777 void
778 txp_rxbuf_reclaim(struct txp_softc *sc)
779 {
780 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
781 	struct txp_hostvar *hv = sc->sc_hostvar;
782 	struct txp_rxbuf_desc *rbd;
783 	struct txp_swdesc *sd;
784 	u_int32_t i, end;
785 
786 	end = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx));
787 	i = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_write_idx));
788 
789 	if (++i == RXBUF_ENTRIES)
790 		i = 0;
791 
792 	rbd = sc->sc_rxbufs + i;
793 
794 	while (i != end) {
795 		sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
796 		    M_DEVBUF, M_NOWAIT);
797 		if (sd == NULL)
798 			break;
799 
800 		MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
801 		if (sd->sd_mbuf == NULL)
802 			goto err_sd;
803 
804 		MCLGET(sd->sd_mbuf, M_DONTWAIT);
805 		if ((sd->sd_mbuf->m_flags & M_EXT) == 0)
806 			goto err_mbuf;
807 		m_set_rcvif(sd->sd_mbuf, ifp);
808 		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
809 		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
810 		    TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map))
811 			goto err_mbuf;
812 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
813 		    BUS_DMA_NOWAIT)) {
814 			bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
815 			goto err_mbuf;
816 		}
817 
818 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
819 		    i * sizeof(struct txp_rxbuf_desc),
820 		    sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE);
821 
822 		/* stash away pointer */
823 		memcpy(__UNVOLATILE(&rbd->rb_vaddrlo), &sd, sizeof(sd));
824 
825 		rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
826 		    & 0xffffffff;
827 		rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr)
828 		    >> 32;
829 
830 		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
831 		    sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
832 
833 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
834 		    i * sizeof(struct txp_rxbuf_desc),
835 		    sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE);
836 
837 		hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i));
838 
839 		if (++i == RXBUF_ENTRIES) {
840 			i = 0;
841 			rbd = sc->sc_rxbufs;
842 		} else
843 			rbd++;
844 	}
845 	return;
846 
847 err_mbuf:
848 	m_freem(sd->sd_mbuf);
849 err_sd:
850 	free(sd, M_DEVBUF);
851 }
852 
853 /*
854  * Reclaim mbufs and entries from a transmit ring.
855  */
856 void
857 txp_tx_reclaim(struct txp_softc *sc, struct txp_tx_ring *r,
858     struct txp_dma_alloc *dma)
859 {
860 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
861 	u_int32_t idx = TXP_OFFSET2IDX(le32toh(*(r->r_off)));
862 	u_int32_t cons = r->r_cons, cnt = r->r_cnt;
863 	struct txp_tx_desc *txd = r->r_desc + cons;
864 	struct txp_swdesc *sd = sc->sc_txd + cons;
865 	struct mbuf *m;
866 
867 	while (cons != idx) {
868 		if (cnt == 0)
869 			break;
870 
871 		bus_dmamap_sync(sc->sc_dmat, dma->dma_map,
872 		    cons * sizeof(struct txp_tx_desc),
873 		    sizeof(struct txp_tx_desc),
874 		    BUS_DMASYNC_POSTWRITE);
875 
876 		if ((txd->tx_flags & TX_FLAGS_TYPE_M) ==
877 		    TX_FLAGS_TYPE_DATA) {
878 			bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
879 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
880 			bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
881 			m = sd->sd_mbuf;
882 			if (m != NULL) {
883 				m_freem(m);
884 				txd->tx_addrlo = 0;
885 				txd->tx_addrhi = 0;
886 				ifp->if_opackets++;
887 			}
888 		}
889 		ifp->if_flags &= ~IFF_OACTIVE;
890 
891 		if (++cons == TX_ENTRIES) {
892 			txd = r->r_desc;
893 			cons = 0;
894 			sd = sc->sc_txd;
895 		} else {
896 			txd++;
897 			sd++;
898 		}
899 
900 		cnt--;
901 	}
902 
903 	r->r_cons = cons;
904 	r->r_cnt = cnt;
905 	if (cnt == 0)
906 		ifp->if_timer = 0;
907 }
908 
909 bool
910 txp_shutdown(device_t self, int howto)
911 {
912 	struct txp_softc *sc;
913 
914 	sc = device_private(self);
915 
916 	/* mask all interrupts */
917 	WRITE_REG(sc, TXP_IMR,
918 	    TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |
919 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
920 	    TXP_INT_LATCH);
921 
922 	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
923 	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0);
924 	txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0);
925 
926 	return true;
927 }
928 
929 int
930 txp_alloc_rings(struct txp_softc *sc)
931 {
932 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
933 	struct txp_boot_record *boot;
934 	struct txp_swdesc *sd;
935 	u_int32_t r;
936 	int i, j, nb;
937 
938 	/* boot record */
939 	if (txp_dma_malloc(sc, sizeof(struct txp_boot_record),
940 	    &sc->sc_boot_dma, BUS_DMA_COHERENT)) {
941 		printf(": can't allocate boot record\n");
942 		return (-1);
943 	}
944 	boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr;
945 	memset(boot, 0, sizeof(*boot));
946 	sc->sc_boot = boot;
947 
948 	/* host variables */
949 	if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma,
950 	    BUS_DMA_COHERENT)) {
951 		printf(": can't allocate host ring\n");
952 		goto bail_boot;
953 	}
954 	memset(sc->sc_host_dma.dma_vaddr, 0, sizeof(struct txp_hostvar));
955 	boot->br_hostvar_lo = htole32(sc->sc_host_dma.dma_paddr & 0xffffffff);
956 	boot->br_hostvar_hi = htole32(sc->sc_host_dma.dma_paddr >> 32);
957 	sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr;
958 
959 	/* high priority tx ring */
960 	if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
961 	    &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) {
962 		printf(": can't allocate high tx ring\n");
963 		goto bail_host;
964 	}
965 	memset(sc->sc_txhiring_dma.dma_vaddr, 0,
966 	    sizeof(struct txp_tx_desc) * TX_ENTRIES);
967 	boot->br_txhipri_lo = htole32(sc->sc_txhiring_dma.dma_paddr & 0xffffffff);
968 	boot->br_txhipri_hi = htole32(sc->sc_txhiring_dma.dma_paddr >> 32);
969 	boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
970 	sc->sc_txhir.r_reg = TXP_H2A_1;
971 	sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr;
972 	sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0;
973 	sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx;
974 	for (i = 0; i < TX_ENTRIES; i++) {
975 		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN,
976 		    TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0,
977 		    BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) {
978 			for (j = 0; j < i; j++) {
979 				bus_dmamap_destroy(sc->sc_dmat,
980 				    sc->sc_txd[j].sd_map);
981 				sc->sc_txd[j].sd_map = NULL;
982 			}
983 			goto bail_txhiring;
984 		}
985 	}
986 
987 	/* low priority tx ring */
988 	if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES,
989 	    &sc->sc_txloring_dma, BUS_DMA_COHERENT)) {
990 		printf(": can't allocate low tx ring\n");
991 		goto bail_txhiring;
992 	}
993 	memset(sc->sc_txloring_dma.dma_vaddr, 0,
994 	    sizeof(struct txp_tx_desc) * TX_ENTRIES);
995 	boot->br_txlopri_lo = htole32(sc->sc_txloring_dma.dma_paddr & 0xffffffff);
996 	boot->br_txlopri_hi = htole32(sc->sc_txloring_dma.dma_paddr >> 32);
997 	boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc));
998 	sc->sc_txlor.r_reg = TXP_H2A_3;
999 	sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr;
1000 	sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0;
1001 	sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx;
1002 
1003 	/* high priority rx ring */
1004 	if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
1005 	    &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) {
1006 		printf(": can't allocate high rx ring\n");
1007 		goto bail_txloring;
1008 	}
1009 	memset(sc->sc_rxhiring_dma.dma_vaddr, 0,
1010 	    sizeof(struct txp_rx_desc) * RX_ENTRIES);
1011 	boot->br_rxhipri_lo = htole32(sc->sc_rxhiring_dma.dma_paddr & 0xffffffff);
1012 	boot->br_rxhipri_hi = htole32(sc->sc_rxhiring_dma.dma_paddr >> 32);
1013 	boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1014 	sc->sc_rxhir.r_desc =
1015 	    (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr;
1016 	sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx;
1017 	sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx;
1018 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map,
1019 	    0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1020 
1021 	/* low priority ring */
1022 	if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES,
1023 	    &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) {
1024 		printf(": can't allocate low rx ring\n");
1025 		goto bail_rxhiring;
1026 	}
1027 	memset(sc->sc_rxloring_dma.dma_vaddr, 0,
1028 	    sizeof(struct txp_rx_desc) * RX_ENTRIES);
1029 	boot->br_rxlopri_lo = htole32(sc->sc_rxloring_dma.dma_paddr & 0xffffffff);
1030 	boot->br_rxlopri_hi = htole32(sc->sc_rxloring_dma.dma_paddr >> 32);
1031 	boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc));
1032 	sc->sc_rxlor.r_desc =
1033 	    (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr;
1034 	sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx;
1035 	sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx;
1036 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map,
1037 	    0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1038 
1039 	/* command ring */
1040 	if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES,
1041 	    &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) {
1042 		printf(": can't allocate command ring\n");
1043 		goto bail_rxloring;
1044 	}
1045 	memset(sc->sc_cmdring_dma.dma_vaddr, 0,
1046 	    sizeof(struct txp_cmd_desc) * CMD_ENTRIES);
1047 	boot->br_cmd_lo = htole32(sc->sc_cmdring_dma.dma_paddr & 0xffffffff);
1048 	boot->br_cmd_hi = htole32(sc->sc_cmdring_dma.dma_paddr >> 32);
1049 	boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc));
1050 	sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr;
1051 	sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc);
1052 	sc->sc_cmdring.lastwrite = 0;
1053 
1054 	/* response ring */
1055 	if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES,
1056 	    &sc->sc_rspring_dma, BUS_DMA_COHERENT)) {
1057 		printf(": can't allocate response ring\n");
1058 		goto bail_cmdring;
1059 	}
1060 	memset(sc->sc_rspring_dma.dma_vaddr, 0,
1061 	    sizeof(struct txp_rsp_desc) * RSP_ENTRIES);
1062 	boot->br_resp_lo = htole32(sc->sc_rspring_dma.dma_paddr & 0xffffffff);
1063 	boot->br_resp_hi = htole32(sc->sc_rspring_dma.dma_paddr >> 32);
1064 	boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc));
1065 	sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr;
1066 	sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc);
1067 	sc->sc_rspring.lastwrite = 0;
1068 
1069 	/* receive buffer ring */
1070 	if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES,
1071 	    &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) {
1072 		printf(": can't allocate rx buffer ring\n");
1073 		goto bail_rspring;
1074 	}
1075 	memset(sc->sc_rxbufring_dma.dma_vaddr, 0,
1076 	    sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES);
1077 	boot->br_rxbuf_lo = htole32(sc->sc_rxbufring_dma.dma_paddr & 0xffffffff);
1078 	boot->br_rxbuf_hi = htole32(sc->sc_rxbufring_dma.dma_paddr >> 32);
1079 	boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc));
1080 	sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr;
1081 	for (nb = 0; nb < RXBUF_ENTRIES; nb++) {
1082 		sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc),
1083 		    M_DEVBUF, M_NOWAIT);
1084 		/* stash away pointer */
1085 		memcpy(__UNVOLATILE(&sc->sc_rxbufs[nb].rb_vaddrlo), &sd,
1086 		    sizeof(sd));
1087 		if (sd == NULL)
1088 			break;
1089 
1090 		MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA);
1091 		if (sd->sd_mbuf == NULL) {
1092 			goto bail_rxbufring;
1093 		}
1094 
1095 		MCLGET(sd->sd_mbuf, M_DONTWAIT);
1096 		if ((sd->sd_mbuf->m_flags & M_EXT) == 0) {
1097 			goto bail_rxbufring;
1098 		}
1099 		sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES;
1100 		m_set_rcvif(sd->sd_mbuf, ifp);
1101 		if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1,
1102 		    TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) {
1103 			goto bail_rxbufring;
1104 		}
1105 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf,
1106 		    BUS_DMA_NOWAIT)) {
1107 			bus_dmamap_destroy(sc->sc_dmat, sd->sd_map);
1108 			goto bail_rxbufring;
1109 		}
1110 		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
1111 		    sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1112 
1113 
1114 		sc->sc_rxbufs[nb].rb_paddrlo =
1115 		    ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff;
1116 		sc->sc_rxbufs[nb].rb_paddrhi =
1117 		    ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32;
1118 	}
1119 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map,
1120 	    0, sc->sc_rxbufring_dma.dma_map->dm_mapsize,
1121 	    BUS_DMASYNC_PREWRITE);
1122 	sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) *
1123 	    sizeof(struct txp_rxbuf_desc));
1124 
1125 	/* zero dma */
1126 	if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma,
1127 	    BUS_DMA_COHERENT)) {
1128 		printf(": can't allocate response ring\n");
1129 		goto bail_rxbufring;
1130 	}
1131 	memset(sc->sc_zero_dma.dma_vaddr, 0, sizeof(u_int32_t));
1132 	boot->br_zero_lo = htole32(sc->sc_zero_dma.dma_paddr & 0xffffffff);
1133 	boot->br_zero_hi = htole32(sc->sc_zero_dma.dma_paddr >> 32);
1134 
1135 	/* See if it's waiting for boot, and try to boot it */
1136 	for (i = 0; i < 10000; i++) {
1137 		r = READ_REG(sc, TXP_A2H_0);
1138 		if (r == STAT_WAITING_FOR_BOOT)
1139 			break;
1140 		DELAY(50);
1141 	}
1142 	if (r != STAT_WAITING_FOR_BOOT) {
1143 		printf(": not waiting for boot\n");
1144 		goto bail;
1145 	}
1146 	WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32);
1147 	WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff);
1148 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD);
1149 
1150 	/* See if it booted */
1151 	for (i = 0; i < 10000; i++) {
1152 		r = READ_REG(sc, TXP_A2H_0);
1153 		if (r == STAT_RUNNING)
1154 			break;
1155 		DELAY(50);
1156 	}
1157 	if (r != STAT_RUNNING) {
1158 		printf(": fw not running\n");
1159 		goto bail;
1160 	}
1161 
1162 	/* Clear TX and CMD ring write registers */
1163 	WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL);
1164 	WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL);
1165 	WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL);
1166 	WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL);
1167 
1168 	return (0);
1169 
1170 bail:
1171 	txp_dma_free(sc, &sc->sc_zero_dma);
1172 bail_rxbufring:
1173 	if (nb == RXBUF_ENTRIES)
1174 		nb--;
1175 	for (i = 0; i <= nb; i++) {
1176 		memcpy(&sd, __UNVOLATILE(&sc->sc_rxbufs[i].rb_vaddrlo),
1177 		    sizeof(sd));
1178 		if (sd)
1179 			free(sd, M_DEVBUF);
1180 	}
1181 	txp_dma_free(sc, &sc->sc_rxbufring_dma);
1182 bail_rspring:
1183 	txp_dma_free(sc, &sc->sc_rspring_dma);
1184 bail_cmdring:
1185 	txp_dma_free(sc, &sc->sc_cmdring_dma);
1186 bail_rxloring:
1187 	txp_dma_free(sc, &sc->sc_rxloring_dma);
1188 bail_rxhiring:
1189 	txp_dma_free(sc, &sc->sc_rxhiring_dma);
1190 bail_txloring:
1191 	txp_dma_free(sc, &sc->sc_txloring_dma);
1192 bail_txhiring:
1193 	txp_dma_free(sc, &sc->sc_txhiring_dma);
1194 bail_host:
1195 	txp_dma_free(sc, &sc->sc_host_dma);
1196 bail_boot:
1197 	txp_dma_free(sc, &sc->sc_boot_dma);
1198 	return (-1);
1199 }
1200 
1201 int
1202 txp_dma_malloc(struct txp_softc *sc, bus_size_t size,
1203     struct txp_dma_alloc *dma, int mapflags)
1204 {
1205 	int r;
1206 
1207 	if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
1208 	    &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0)
1209 		goto fail_0;
1210 
1211 	if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg,
1212 	    size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0)
1213 		goto fail_1;
1214 
1215 	if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1216 	    BUS_DMA_NOWAIT, &dma->dma_map)) != 0)
1217 		goto fail_2;
1218 
1219 	if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr,
1220 	    size, NULL, BUS_DMA_NOWAIT)) != 0)
1221 		goto fail_3;
1222 
1223 	dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr;
1224 	return (0);
1225 
1226 fail_3:
1227 	bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1228 fail_2:
1229 	bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size);
1230 fail_1:
1231 	bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1232 fail_0:
1233 	return (r);
1234 }
1235 
1236 void
1237 txp_dma_free(struct txp_softc *sc, struct txp_dma_alloc *dma)
1238 {
1239 	bus_dmamap_unload(sc->sc_dmat, dma->dma_map);
1240 	bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize);
1241 	bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg);
1242 	bus_dmamap_destroy(sc->sc_dmat, dma->dma_map);
1243 }
1244 
1245 int
1246 txp_ioctl(struct ifnet *ifp, u_long command, void *data)
1247 {
1248 	struct txp_softc *sc = ifp->if_softc;
1249 	struct ifreq *ifr = (struct ifreq *)data;
1250 	struct ifaddr *ifa = (struct ifaddr *)data;
1251 	int s, error = 0;
1252 
1253 	s = splnet();
1254 
1255 #if 0
1256 	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
1257 		splx(s);
1258 		return error;
1259 	}
1260 #endif
1261 
1262 	switch(command) {
1263 	case SIOCINITIFADDR:
1264 		ifp->if_flags |= IFF_UP;
1265 		txp_init(sc);
1266 		switch (ifa->ifa_addr->sa_family) {
1267 #ifdef INET
1268 		case AF_INET:
1269 			arp_ifinit(ifp, ifa);
1270 			break;
1271 #endif /* INET */
1272 		default:
1273 			break;
1274 		}
1275 		break;
1276 	case SIOCSIFFLAGS:
1277 		if ((error = ifioctl_common(ifp, command, data)) != 0)
1278 			break;
1279 		if (ifp->if_flags & IFF_UP) {
1280 			txp_init(sc);
1281 		} else {
1282 			if (ifp->if_flags & IFF_RUNNING)
1283 				txp_stop(sc);
1284 		}
1285 		break;
1286 	case SIOCADDMULTI:
1287 	case SIOCDELMULTI:
1288 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
1289 			break;
1290 
1291 		error = 0;
1292 
1293 		if (command != SIOCADDMULTI && command != SIOCDELMULTI)
1294 			;
1295 		else if (ifp->if_flags & IFF_RUNNING) {
1296 			/*
1297 			 * Multicast list has changed; set the hardware
1298 			 * filter accordingly.
1299 			 */
1300 			txp_set_filter(sc);
1301 		}
1302 		break;
1303 	case SIOCGIFMEDIA:
1304 	case SIOCSIFMEDIA:
1305 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command);
1306 		break;
1307 	default:
1308 		error = ether_ioctl(ifp, command, data);
1309 		break;
1310 	}
1311 
1312 	splx(s);
1313 
1314 	return(error);
1315 }
1316 
1317 void
1318 txp_init(struct txp_softc *sc)
1319 {
1320 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
1321 	int s;
1322 
1323 	txp_stop(sc);
1324 
1325 	s = splnet();
1326 
1327 	txp_set_filter(sc);
1328 
1329 	txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1330 	txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1331 
1332 	WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF |
1333 	    TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 |
1334 	    TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 |
1335 	    TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 |
1336 	    TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT |  TXP_INT_LATCH);
1337 	WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3);
1338 
1339 	ifp->if_flags |= IFF_RUNNING;
1340 	ifp->if_flags &= ~IFF_OACTIVE;
1341 	ifp->if_timer = 0;
1342 
1343 	if (!callout_pending(&sc->sc_tick))
1344 		callout_schedule(&sc->sc_tick, hz);
1345 
1346 	splx(s);
1347 }
1348 
1349 void
1350 txp_tick(void *vsc)
1351 {
1352 	struct txp_softc *sc = vsc;
1353 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
1354 	struct txp_rsp_desc *rsp = NULL;
1355 	struct txp_ext_desc *ext;
1356 	int s;
1357 
1358 	s = splnet();
1359 	txp_rxbuf_reclaim(sc);
1360 
1361 	if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0,
1362 	    &rsp, 1))
1363 		goto out;
1364 	if (rsp->rsp_numdesc != 6)
1365 		goto out;
1366 	if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0,
1367 	    NULL, NULL, NULL, 1))
1368 		goto out;
1369 	ext = (struct txp_ext_desc *)(rsp + 1);
1370 
1371 	ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 +
1372 	    ext[4].ext_1 + ext[4].ext_4;
1373 	ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 +
1374 	    ext[2].ext_1;
1375 	ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 +
1376 	    ext[1].ext_3;
1377 	ifp->if_opackets += rsp->rsp_par2;
1378 	ifp->if_ipackets += ext[2].ext_3;
1379 
1380 out:
1381 	if (rsp != NULL)
1382 		free(rsp, M_DEVBUF);
1383 
1384 	splx(s);
1385 	callout_schedule(&sc->sc_tick, hz);
1386 }
1387 
1388 void
1389 txp_start(struct ifnet *ifp)
1390 {
1391 	struct txp_softc *sc = ifp->if_softc;
1392 	struct txp_tx_ring *r = &sc->sc_txhir;
1393 	struct txp_tx_desc *txd;
1394 	int txdidx;
1395 	struct txp_frag_desc *fxd;
1396 	struct mbuf *m, *mnew;
1397 	struct txp_swdesc *sd;
1398 	u_int32_t firstprod, firstcnt, prod, cnt, i;
1399 
1400 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1401 		return;
1402 
1403 	prod = r->r_prod;
1404 	cnt = r->r_cnt;
1405 
1406 	while (1) {
1407 		IFQ_POLL(&ifp->if_snd, m);
1408 		if (m == NULL)
1409 			break;
1410 		mnew = NULL;
1411 
1412 		firstprod = prod;
1413 		firstcnt = cnt;
1414 
1415 		sd = sc->sc_txd + prod;
1416 		sd->sd_mbuf = m;
1417 
1418 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
1419 		    BUS_DMA_NOWAIT)) {
1420 			MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1421 			if (mnew == NULL)
1422 				goto oactive1;
1423 			if (m->m_pkthdr.len > MHLEN) {
1424 				MCLGET(mnew, M_DONTWAIT);
1425 				if ((mnew->m_flags & M_EXT) == 0) {
1426 					m_freem(mnew);
1427 					goto oactive1;
1428 				}
1429 			}
1430 			m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, void *));
1431 			mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len;
1432 			IFQ_DEQUEUE(&ifp->if_snd, m);
1433 			m_freem(m);
1434 			m = mnew;
1435 			if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m,
1436 			    BUS_DMA_NOWAIT))
1437 				goto oactive1;
1438 		}
1439 
1440 		if ((TX_ENTRIES - cnt) < 4)
1441 			goto oactive;
1442 
1443 		txd = r->r_desc + prod;
1444 		txdidx = prod;
1445 		txd->tx_flags = TX_FLAGS_TYPE_DATA;
1446 		txd->tx_numdesc = 0;
1447 		txd->tx_addrlo = 0;
1448 		txd->tx_addrhi = 0;
1449 		txd->tx_totlen = m->m_pkthdr.len;
1450 		txd->tx_pflags = 0;
1451 		txd->tx_numdesc = sd->sd_map->dm_nsegs;
1452 
1453 		if (++prod == TX_ENTRIES)
1454 			prod = 0;
1455 
1456 		if (++cnt >= (TX_ENTRIES - 4))
1457 			goto oactive;
1458 
1459 		if (vlan_has_tag(m))
1460 			txd->tx_pflags = TX_PFLAGS_VLAN |
1461 			  (htons(vlan_get_tag(m)) << TX_PFLAGS_VLANTAG_S);
1462 
1463 		if (m->m_pkthdr.csum_flags & M_CSUM_IPv4)
1464 			txd->tx_pflags |= TX_PFLAGS_IPCKSUM;
1465 #ifdef TRY_TX_TCP_CSUM
1466 		if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4)
1467 			txd->tx_pflags |= TX_PFLAGS_TCPCKSUM;
1468 #endif
1469 #ifdef TRY_TX_UDP_CSUM
1470 		if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4)
1471 			txd->tx_pflags |= TX_PFLAGS_UDPCKSUM;
1472 #endif
1473 
1474 		bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0,
1475 		    sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1476 
1477 		fxd = (struct txp_frag_desc *)(r->r_desc + prod);
1478 		for (i = 0; i < sd->sd_map->dm_nsegs; i++) {
1479 			if (++cnt >= (TX_ENTRIES - 4)) {
1480 				bus_dmamap_sync(sc->sc_dmat, sd->sd_map,
1481 				    0, sd->sd_map->dm_mapsize,
1482 				    BUS_DMASYNC_POSTWRITE);
1483 				goto oactive;
1484 			}
1485 
1486 			fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG |
1487 			    FRAG_FLAGS_VALID;
1488 			fxd->frag_rsvd1 = 0;
1489 			fxd->frag_len = sd->sd_map->dm_segs[i].ds_len;
1490 			fxd->frag_addrlo =
1491 			    ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) &
1492 			    0xffffffff;
1493 			fxd->frag_addrhi =
1494 			    ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >>
1495 			    32;
1496 			fxd->frag_rsvd2 = 0;
1497 
1498 			bus_dmamap_sync(sc->sc_dmat,
1499 			    sc->sc_txhiring_dma.dma_map,
1500 			    prod * sizeof(struct txp_frag_desc),
1501 			    sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE);
1502 
1503 			if (++prod == TX_ENTRIES) {
1504 				fxd = (struct txp_frag_desc *)r->r_desc;
1505 				prod = 0;
1506 			} else
1507 				fxd++;
1508 
1509 		}
1510 
1511 		/*
1512 		 * if mnew isn't NULL, we already dequeued and copied
1513 		 * the packet.
1514 		 */
1515 		if (mnew == NULL)
1516 			IFQ_DEQUEUE(&ifp->if_snd, m);
1517 
1518 		ifp->if_timer = 5;
1519 
1520 		bpf_mtap(ifp, m, BPF_D_OUT);
1521 
1522 		txd->tx_flags |= TX_FLAGS_VALID;
1523 		bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map,
1524 		    txdidx * sizeof(struct txp_tx_desc),
1525 		    sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE);
1526 
1527 #if 0
1528 		{
1529 			struct mbuf *mx;
1530 			int i;
1531 
1532 			printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n",
1533 			    txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
1534 			    txd->tx_pflags);
1535 			for (mx = m; mx != NULL; mx = mx->m_next) {
1536 				for (i = 0; i < mx->m_len; i++) {
1537 					printf(":%02x",
1538 					    (u_int8_t)m->m_data[i]);
1539 				}
1540 			}
1541 			printf("\n");
1542 		}
1543 #endif
1544 
1545 		WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod));
1546 	}
1547 
1548 	r->r_prod = prod;
1549 	r->r_cnt = cnt;
1550 	return;
1551 
1552 oactive:
1553 	bus_dmamap_unload(sc->sc_dmat, sd->sd_map);
1554 oactive1:
1555 	ifp->if_flags |= IFF_OACTIVE;
1556 	r->r_prod = firstprod;
1557 	r->r_cnt = firstcnt;
1558 }
1559 
1560 /*
1561  * Handle simple commands sent to the typhoon
1562  */
1563 int
1564 txp_command(struct txp_softc *sc, u_int16_t id, u_int16_t in1, u_int32_t in2,
1565     u_int32_t in3, u_int16_t *out1, u_int32_t *out2, u_int32_t *out3, int wait)
1566 {
1567 	struct txp_rsp_desc *rsp = NULL;
1568 
1569 	if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait))
1570 		return (-1);
1571 
1572 	if (!wait)
1573 		return (0);
1574 
1575 	if (out1 != NULL)
1576 		*out1 = le16toh(rsp->rsp_par1);
1577 	if (out2 != NULL)
1578 		*out2 = le32toh(rsp->rsp_par2);
1579 	if (out3 != NULL)
1580 		*out3 = le32toh(rsp->rsp_par3);
1581 	free(rsp, M_DEVBUF);
1582 	return (0);
1583 }
1584 
1585 int
1586 txp_command2(struct txp_softc *sc, u_int16_t id, u_int16_t in1, u_int32_t in2,
1587     u_int32_t in3, struct txp_ext_desc *in_extp, u_int8_t in_extn,
1588     struct txp_rsp_desc **rspp, int wait)
1589 {
1590 	struct txp_hostvar *hv = sc->sc_hostvar;
1591 	struct txp_cmd_desc *cmd;
1592 	struct txp_ext_desc *ext;
1593 	u_int32_t idx, i;
1594 	u_int16_t seq;
1595 
1596 	if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) {
1597 		printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc));
1598 		return (-1);
1599 	}
1600 
1601 	idx = sc->sc_cmdring.lastwrite;
1602 	cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
1603 	memset(cmd, 0, sizeof(*cmd));
1604 
1605 	cmd->cmd_numdesc = in_extn;
1606 	seq = sc->sc_seq++;
1607 	cmd->cmd_seq = htole16(seq);
1608 	cmd->cmd_id = htole16(id);
1609 	cmd->cmd_par1 = htole16(in1);
1610 	cmd->cmd_par2 = htole32(in2);
1611 	cmd->cmd_par3 = htole32(in3);
1612 	cmd->cmd_flags = CMD_FLAGS_TYPE_CMD |
1613 	    (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID;
1614 
1615 	idx += sizeof(struct txp_cmd_desc);
1616 	if (idx == sc->sc_cmdring.size)
1617 		idx = 0;
1618 
1619 	for (i = 0; i < in_extn; i++) {
1620 		ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx);
1621 		memcpy(ext, in_extp, sizeof(struct txp_ext_desc));
1622 		in_extp++;
1623 		idx += sizeof(struct txp_cmd_desc);
1624 		if (idx == sc->sc_cmdring.size)
1625 			idx = 0;
1626 	}
1627 
1628 	sc->sc_cmdring.lastwrite = idx;
1629 
1630 	WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite);
1631 	bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1632 	    sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
1633 
1634 	if (!wait)
1635 		return (0);
1636 
1637 	for (i = 0; i < 10000; i++) {
1638 		bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1639 		    sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD);
1640 		idx = le32toh(hv->hv_resp_read_idx);
1641 		if (idx != le32toh(hv->hv_resp_write_idx)) {
1642 			*rspp = NULL;
1643 			if (txp_response(sc, idx, id, seq, rspp))
1644 				return (-1);
1645 			if (*rspp != NULL)
1646 				break;
1647 		}
1648 		bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0,
1649 		    sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD);
1650 		DELAY(50);
1651 	}
1652 	if (i == 1000 || (*rspp) == NULL) {
1653 		printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id);
1654 		return (-1);
1655 	}
1656 
1657 	return (0);
1658 }
1659 
1660 int
1661 txp_response(struct txp_softc *sc, u_int32_t ridx, u_int16_t id, u_int16_t seq,
1662     struct txp_rsp_desc **rspp)
1663 {
1664 	struct txp_hostvar *hv = sc->sc_hostvar;
1665 	struct txp_rsp_desc *rsp;
1666 
1667 	while (ridx != le32toh(hv->hv_resp_write_idx)) {
1668 		rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx);
1669 
1670 		if (id == le16toh(rsp->rsp_id) && le16toh(rsp->rsp_seq) == seq) {
1671 			*rspp = (struct txp_rsp_desc *)malloc(
1672 			    sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1),
1673 			    M_DEVBUF, M_NOWAIT);
1674 			if ((*rspp) == NULL)
1675 				return (-1);
1676 			txp_rsp_fixup(sc, rsp, *rspp);
1677 			return (0);
1678 		}
1679 
1680 		if (rsp->rsp_flags & RSP_FLAGS_ERROR) {
1681 			printf("%s: response error: id 0x%x\n",
1682 			    TXP_DEVNAME(sc), le16toh(rsp->rsp_id));
1683 			txp_rsp_fixup(sc, rsp, NULL);
1684 			ridx = le32toh(hv->hv_resp_read_idx);
1685 			continue;
1686 		}
1687 
1688 		switch (le16toh(rsp->rsp_id)) {
1689 		case TXP_CMD_CYCLE_STATISTICS:
1690 		case TXP_CMD_MEDIA_STATUS_READ:
1691 			break;
1692 		case TXP_CMD_HELLO_RESPONSE:
1693 			printf("%s: hello\n", TXP_DEVNAME(sc));
1694 			break;
1695 		default:
1696 			printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc),
1697 			    le16toh(rsp->rsp_id));
1698 		}
1699 
1700 		txp_rsp_fixup(sc, rsp, NULL);
1701 		ridx = le32toh(hv->hv_resp_read_idx);
1702 		hv->hv_resp_read_idx = le32toh(ridx);
1703 	}
1704 
1705 	return (0);
1706 }
1707 
1708 void
1709 txp_rsp_fixup(struct txp_softc *sc, struct txp_rsp_desc *rsp,
1710     struct txp_rsp_desc *dst)
1711 {
1712 	struct txp_rsp_desc *src = rsp;
1713 	struct txp_hostvar *hv = sc->sc_hostvar;
1714 	u_int32_t i, ridx;
1715 
1716 	ridx = le32toh(hv->hv_resp_read_idx);
1717 
1718 	for (i = 0; i < rsp->rsp_numdesc + 1; i++) {
1719 		if (dst != NULL)
1720 			memcpy(dst++, src, sizeof(struct txp_rsp_desc));
1721 		ridx += sizeof(struct txp_rsp_desc);
1722 		if (ridx == sc->sc_rspring.size) {
1723 			src = sc->sc_rspring.base;
1724 			ridx = 0;
1725 		} else
1726 			src++;
1727 		sc->sc_rspring.lastwrite = ridx;
1728 		hv->hv_resp_read_idx = htole32(ridx);
1729 	}
1730 
1731 	hv->hv_resp_read_idx = htole32(ridx);
1732 }
1733 
1734 int
1735 txp_cmd_desc_numfree(struct txp_softc *sc)
1736 {
1737 	struct txp_hostvar *hv = sc->sc_hostvar;
1738 	struct txp_boot_record *br = sc->sc_boot;
1739 	u_int32_t widx, ridx, nfree;
1740 
1741 	widx = sc->sc_cmdring.lastwrite;
1742 	ridx = le32toh(hv->hv_cmd_read_idx);
1743 
1744 	if (widx == ridx) {
1745 		/* Ring is completely free */
1746 		nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc);
1747 	} else {
1748 		if (widx > ridx)
1749 			nfree = le32toh(br->br_cmd_siz) -
1750 			    (widx - ridx + sizeof(struct txp_cmd_desc));
1751 		else
1752 			nfree = ridx - widx - sizeof(struct txp_cmd_desc);
1753 	}
1754 
1755 	return (nfree / sizeof(struct txp_cmd_desc));
1756 }
1757 
1758 void
1759 txp_stop(struct txp_softc *sc)
1760 {
1761 	txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1762 	txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1);
1763 
1764 	if (callout_pending(&sc->sc_tick))
1765 		callout_stop(&sc->sc_tick);
1766 }
1767 
1768 void
1769 txp_watchdog(struct ifnet *ifp)
1770 {
1771 }
1772 
1773 int
1774 txp_ifmedia_upd(struct ifnet *ifp)
1775 {
1776 	struct txp_softc *sc = ifp->if_softc;
1777 	struct ifmedia *ifm = &sc->sc_ifmedia;
1778 	u_int16_t new_xcvr;
1779 
1780 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1781 		return (EINVAL);
1782 
1783 	if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) {
1784 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1785 			new_xcvr = TXP_XCVR_10_FDX;
1786 		else
1787 			new_xcvr = TXP_XCVR_10_HDX;
1788 	} else if ((IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) ||
1789 		   (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX)) {
1790 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1791 			new_xcvr = TXP_XCVR_100_FDX;
1792 		else
1793 			new_xcvr = TXP_XCVR_100_HDX;
1794 	} else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) {
1795 		new_xcvr = TXP_XCVR_AUTO;
1796 	} else
1797 		return (EINVAL);
1798 
1799 	/* nothing to do */
1800 	if (sc->sc_xcvr == new_xcvr)
1801 		return (0);
1802 
1803 	txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0,
1804 	    NULL, NULL, NULL, 0);
1805 	sc->sc_xcvr = new_xcvr;
1806 
1807 	return (0);
1808 }
1809 
1810 void
1811 txp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1812 {
1813 	struct txp_softc *sc = ifp->if_softc;
1814 	struct ifmedia *ifm = &sc->sc_ifmedia;
1815 	u_int16_t bmsr, bmcr, anlpar;
1816 
1817 	ifmr->ifm_status = IFM_AVALID;
1818 	ifmr->ifm_active = IFM_ETHER;
1819 
1820 	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
1821 	    &bmsr, NULL, NULL, 1))
1822 		goto bail;
1823 	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0,
1824 	    &bmsr, NULL, NULL, 1))
1825 		goto bail;
1826 
1827 	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0,
1828 	    &bmcr, NULL, NULL, 1))
1829 		goto bail;
1830 
1831 	if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0,
1832 	    &anlpar, NULL, NULL, 1))
1833 		goto bail;
1834 
1835 	if (bmsr & BMSR_LINK)
1836 		ifmr->ifm_status |= IFM_ACTIVE;
1837 
1838 	if (bmcr & BMCR_ISO) {
1839 		ifmr->ifm_active |= IFM_NONE;
1840 		ifmr->ifm_status = 0;
1841 		return;
1842 	}
1843 
1844 	if (bmcr & BMCR_LOOP)
1845 		ifmr->ifm_active |= IFM_LOOP;
1846 
1847 	if (!(sc->sc_flags & TXP_FIBER) && (bmcr & BMCR_AUTOEN)) {
1848 		if ((bmsr & BMSR_ACOMP) == 0) {
1849 			ifmr->ifm_active |= IFM_NONE;
1850 			return;
1851 		}
1852 
1853 		if (anlpar & ANLPAR_TX_FD)
1854 			ifmr->ifm_active |= IFM_100_TX|IFM_FDX;
1855 		else if (anlpar & ANLPAR_T4)
1856 			ifmr->ifm_active |= IFM_100_T4|IFM_HDX;
1857 		else if (anlpar & ANLPAR_TX)
1858 			ifmr->ifm_active |= IFM_100_TX|IFM_HDX;
1859 		else if (anlpar & ANLPAR_10_FD)
1860 			ifmr->ifm_active |= IFM_10_T|IFM_FDX;
1861 		else if (anlpar & ANLPAR_10)
1862 			ifmr->ifm_active |= IFM_10_T|IFM_HDX;
1863 		else
1864 			ifmr->ifm_active |= IFM_NONE;
1865 	} else
1866 		ifmr->ifm_active = ifm->ifm_cur->ifm_media;
1867 	return;
1868 
1869 bail:
1870 	ifmr->ifm_active |= IFM_NONE;
1871 	ifmr->ifm_status &= ~IFM_AVALID;
1872 }
1873 
1874 void
1875 txp_show_descriptor(void *d)
1876 {
1877 	struct txp_cmd_desc *cmd = d;
1878 	struct txp_rsp_desc *rsp = d;
1879 	struct txp_tx_desc *txd = d;
1880 	struct txp_frag_desc *frgd = d;
1881 
1882 	switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) {
1883 	case CMD_FLAGS_TYPE_CMD:
1884 		/* command descriptor */
1885 		printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1886 		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
1887 		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
1888 		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
1889 		break;
1890 	case CMD_FLAGS_TYPE_RESP:
1891 		/* response descriptor */
1892 		printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1893 		    rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id),
1894 		    le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1),
1895 		    le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3));
1896 		break;
1897 	case CMD_FLAGS_TYPE_DATA:
1898 		/* data header (assuming tx for now) */
1899 		printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]",
1900 		    txd->tx_flags, txd->tx_numdesc, txd->tx_totlen,
1901 		    txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags);
1902 		break;
1903 	case CMD_FLAGS_TYPE_FRAG:
1904 		/* fragment descriptor */
1905 		printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]",
1906 		    frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len,
1907 		    frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2);
1908 		break;
1909 	default:
1910 		printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n",
1911 		    cmd->cmd_flags & CMD_FLAGS_TYPE_M,
1912 		    cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id),
1913 		    le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1),
1914 		    le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3));
1915 		break;
1916 	}
1917 }
1918 
1919 void
1920 txp_set_filter(struct txp_softc *sc)
1921 {
1922 	struct ethercom *ac = &sc->sc_arpcom;
1923 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
1924 	u_int32_t crc, carry, hashbit, hash[2];
1925 	u_int16_t filter;
1926 	u_int8_t octet;
1927 	int i, j, mcnt = 0;
1928 	struct ether_multi *enm;
1929 	struct ether_multistep step;
1930 
1931 	if (ifp->if_flags & IFF_PROMISC) {
1932 		filter = TXP_RXFILT_PROMISC;
1933 		goto setit;
1934 	}
1935 
1936 again:
1937 	filter = TXP_RXFILT_DIRECT;
1938 
1939 	if (ifp->if_flags & IFF_BROADCAST)
1940 		filter |= TXP_RXFILT_BROADCAST;
1941 
1942 	if (ifp->if_flags & IFF_ALLMULTI)
1943 		filter |= TXP_RXFILT_ALLMULTI;
1944 	else {
1945 		hash[0] = hash[1] = 0;
1946 
1947 		ETHER_FIRST_MULTI(step, ac, enm);
1948 		while (enm != NULL) {
1949 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1950 			    ETHER_ADDR_LEN)) {
1951 				/*
1952 				 * We must listen to a range of multicast
1953 				 * addresses.  For now, just accept all
1954 				 * multicasts, rather than trying to set only
1955 				 * those filter bits needed to match the range.
1956 				 * (At this time, the only use of address
1957 				 * ranges is for IP multicast routing, for
1958 				 * which the range is big enough to require
1959 				 * all bits set.)
1960 				 */
1961 				ifp->if_flags |= IFF_ALLMULTI;
1962 				goto again;
1963 			}
1964 
1965 			mcnt++;
1966 			crc = 0xffffffff;
1967 
1968 			for (i = 0; i < ETHER_ADDR_LEN; i++) {
1969 				octet = enm->enm_addrlo[i];
1970 				for (j = 0; j < 8; j++) {
1971 					carry = ((crc & 0x80000000) ? 1 : 0) ^
1972 					    (octet & 1);
1973 					crc <<= 1;
1974 					octet >>= 1;
1975 					if (carry)
1976 						crc = (crc ^ TXP_POLYNOMIAL) |
1977 						    carry;
1978 				}
1979 			}
1980 			hashbit = (u_int16_t)(crc & (64 - 1));
1981 			hash[hashbit / 32] |= (1 << hashbit % 32);
1982 			ETHER_NEXT_MULTI(step, enm);
1983 		}
1984 
1985 		if (mcnt > 0) {
1986 			filter |= TXP_RXFILT_HASHMULTI;
1987 			txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE,
1988 			    2, hash[0], hash[1], NULL, NULL, NULL, 0);
1989 		}
1990 	}
1991 
1992 setit:
1993 	txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0,
1994 	    NULL, NULL, NULL, 1);
1995 }
1996 
1997 void
1998 txp_capabilities(struct txp_softc *sc)
1999 {
2000 	struct ifnet *ifp = &sc->sc_arpcom.ec_if;
2001 	struct txp_rsp_desc *rsp = NULL;
2002 	struct txp_ext_desc *ext;
2003 
2004 	if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1))
2005 		goto out;
2006 
2007 	if (rsp->rsp_numdesc != 1)
2008 		goto out;
2009 	ext = (struct txp_ext_desc *)(rsp + 1);
2010 
2011 	sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK;
2012 	sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK;
2013 
2014 	sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_MTU;
2015 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) {
2016 		sc->sc_tx_capability |= OFFLOAD_VLAN;
2017 		sc->sc_rx_capability |= OFFLOAD_VLAN;
2018 		sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
2019 	}
2020 
2021 #if 0
2022 	/* not ready yet */
2023 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) {
2024 		sc->sc_tx_capability |= OFFLOAD_IPSEC;
2025 		sc->sc_rx_capability |= OFFLOAD_IPSEC;
2026 		ifp->if_capabilities |= IFCAP_IPSEC;
2027 	}
2028 #endif
2029 
2030 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) {
2031 		sc->sc_tx_capability |= OFFLOAD_IPCKSUM;
2032 		sc->sc_rx_capability |= OFFLOAD_IPCKSUM;
2033 		ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx;
2034 	}
2035 
2036 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) {
2037 		sc->sc_rx_capability |= OFFLOAD_TCPCKSUM;
2038 #ifdef TRY_TX_TCP_CSUM
2039 		sc->sc_tx_capability |= OFFLOAD_TCPCKSUM;
2040 		ifp->if_capabilities |=
2041 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
2042 #endif
2043 	}
2044 
2045 	if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) {
2046 		sc->sc_rx_capability |= OFFLOAD_UDPCKSUM;
2047 #ifdef TRY_TX_UDP_CSUM
2048 		sc->sc_tx_capability |= OFFLOAD_UDPCKSUM;
2049 		ifp->if_capabilities |=
2050 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
2051 #endif
2052 	}
2053 
2054 	if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0,
2055 	    sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1))
2056 		goto out;
2057 
2058 out:
2059 	if (rsp != NULL)
2060 		free(rsp, M_DEVBUF);
2061 }
2062