xref: /openbsd-src/sys/dev/pci/if_vic.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*	$OpenBSD: if_vic.c,v 1.54 2008/09/10 14:01:23 blambert Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
5  * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Driver for the VMware Virtual NIC ("vmxnet")
22  */
23 
24 #include "bpfilter.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/malloc.h>
33 #include <sys/timeout.h>
34 #include <sys/device.h>
35 
36 #include <machine/bus.h>
37 #include <machine/intr.h>
38 
39 #include <net/if.h>
40 #include <net/if_dl.h>
41 #include <net/if_media.h>
42 #include <net/if_types.h>
43 
44 #if NBPFILTER > 0
45 #include <net/bpf.h>
46 #endif
47 
48 #ifdef INET
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 #endif
52 
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pcidevs.h>
56 
57 #define VIC_PCI_BAR		PCI_MAPREG_START /* Base Address Register */
58 
59 #define VIC_LANCE_SIZE		0x20
60 #define VIC_MORPH_SIZE		0x04
61 #define  VIC_MORPH_MASK			0xffff
62 #define  VIC_MORPH_LANCE		0x2934
63 #define  VIC_MORPH_VMXNET		0x4392
64 #define VIC_VMXNET_SIZE		0x40
65 #define VIC_LANCE_MINLEN	(VIC_LANCE_SIZE + VIC_MORPH_SIZE + \
66 				    VIC_VMXNET_SIZE)
67 
68 #define VIC_MAGIC		0xbabe864f
69 
70 /* Register address offsets */
71 #define VIC_DATA_ADDR		0x0000		/* Shared data address */
72 #define VIC_DATA_LENGTH		0x0004		/* Shared data length */
73 #define VIC_Tx_ADDR		0x0008		/* Tx pointer address */
74 
75 /* Command register */
76 #define VIC_CMD			0x000c		/* Command register */
77 #define  VIC_CMD_INTR_ACK	0x0001	/* Acknowledge interrupt */
78 #define  VIC_CMD_MCASTFIL	0x0002	/* Multicast address filter */
79 #define   VIC_CMD_MCASTFIL_LENGTH	2
80 #define  VIC_CMD_IFF		0x0004	/* Interface flags */
81 #define   VIC_CMD_IFF_PROMISC	0x0001		/* Promiscous enabled */
82 #define   VIC_CMD_IFF_BROADCAST	0x0002		/* Broadcast enabled */
83 #define   VIC_CMD_IFF_MULTICAST	0x0004		/* Multicast enabled */
84 #define  VIC_CMD_INTR_DISABLE	0x0020	/* Enable interrupts */
85 #define  VIC_CMD_INTR_ENABLE	0x0040	/* Disable interrupts */
86 #define  VIC_CMD_Tx_DONE	0x0100	/* Tx done register */
87 #define  VIC_CMD_NUM_Rx_BUF	0x0200	/* Number of Rx buffers */
88 #define  VIC_CMD_NUM_Tx_BUF	0x0400	/* Number of Tx buffers */
89 #define  VIC_CMD_NUM_PINNED_BUF	0x0800	/* Number of pinned buffers */
90 #define  VIC_CMD_HWCAP		0x1000	/* Capability register */
91 #define   VIC_CMD_HWCAP_SG		(1<<0) /* Scatter-gather transmits */
92 #define   VIC_CMD_HWCAP_CSUM_IPv4	(1<<1) /* TCP/UDP cksum */
93 #define   VIC_CMD_HWCAP_CSUM_ALL	(1<<3) /* Hardware cksum */
94 #define   VIC_CMD_HWCAP_CSUM \
95 	(VIC_CMD_HWCAP_CSUM_IPv4 | VIC_CMD_HWCAP_CSUM_ALL)
96 #define   VIC_CMD_HWCAP_DMA_HIGH		(1<<4) /* High DMA mapping */
97 #define   VIC_CMD_HWCAP_TOE		(1<<5) /* TCP offload engine */
98 #define   VIC_CMD_HWCAP_TSO		(1<<6) /* TCP segmentation offload */
99 #define   VIC_CMD_HWCAP_TSO_SW		(1<<7) /* Software TCP segmentation */
100 #define   VIC_CMD_HWCAP_VPROM		(1<<8) /* Virtual PROM available */
101 #define   VIC_CMD_HWCAP_VLAN_Tx		(1<<9) /* Hardware VLAN MTU Rx */
102 #define   VIC_CMD_HWCAP_VLAN_Rx		(1<<10) /* Hardware VLAN MTU Tx */
103 #define   VIC_CMD_HWCAP_VLAN_SW		(1<<11)	/* Software VLAN MTU */
104 #define   VIC_CMD_HWCAP_VLAN \
105 	(VIC_CMD_HWCAP_VLAN_Tx | VIC_CMD_HWCAP_VLAN_Rx | \
106 	VIC_CMD_HWCAP_VLAN_SW)
107 #define  VIC_CMD_HWCAP_BITS \
108 	"\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE\06TSO" \
109 	"\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW"
110 #define  VIC_CMD_FEATURE	0x2000	/* Additional feature register */
111 #define   VIC_CMD_FEATURE_0_Tx		(1<<0)
112 #define   VIC_CMD_FEATURE_TSO		(1<<1)
113 
114 #define VIC_LLADDR		0x0010		/* MAC address register */
115 #define VIC_VERSION_MINOR	0x0018		/* Minor version register */
116 #define VIC_VERSION_MAJOR	0x001c		/* Major version register */
117 #define VIC_VERSION_MAJOR_M	0xffff0000
118 
119 /* Status register */
120 #define VIC_STATUS		0x0020
121 #define  VIC_STATUS_CONNECTED		(1<<0)
122 #define  VIC_STATUS_ENABLED		(1<<1)
123 
124 #define VIC_TOE_ADDR		0x0024		/* TCP offload address */
125 
126 /* Virtual PROM address */
127 #define VIC_VPROM		0x0028
128 #define VIC_VPROM_LENGTH	6
129 
130 /* Shared DMA data structures */
131 
132 struct vic_sg {
133 	u_int32_t	sg_addr_low;
134 	u_int16_t	sg_addr_high;
135 	u_int16_t	sg_length;
136 } __packed;
137 
138 #define VIC_SG_MAX		6
139 #define VIC_SG_ADDR_MACH	0
140 #define VIC_SG_ADDR_PHYS	1
141 #define VIC_SG_ADDR_VIRT	3
142 
143 struct vic_sgarray {
144 	u_int16_t	sa_addr_type;
145 	u_int16_t	sa_length;
146 	struct vic_sg	sa_sg[VIC_SG_MAX];
147 } __packed;
148 
149 struct vic_rxdesc {
150 	u_int64_t	rx_physaddr;
151 	u_int32_t	rx_buflength;
152 	u_int32_t	rx_length;
153 	u_int16_t	rx_owner;
154 	u_int16_t	rx_flags;
155 	u_int32_t	rx_priv;
156 } __packed;
157 
158 #define VIC_RX_FLAGS_CSUMHW_OK	0x0001
159 
160 struct vic_txdesc {
161 	u_int16_t		tx_flags;
162 	u_int16_t		tx_owner;
163 	u_int32_t		tx_priv;
164 	u_int32_t		tx_tsomss;
165 	struct vic_sgarray	tx_sa;
166 } __packed;
167 
168 #define VIC_TX_FLAGS_KEEP	0x0001
169 #define VIC_TX_FLAGS_TXURN	0x0002
170 #define VIC_TX_FLAGS_CSUMHW	0x0004
171 #define VIC_TX_FLAGS_TSO	0x0008
172 #define VIC_TX_FLAGS_PINNED	0x0010
173 #define VIC_TX_FLAGS_QRETRY	0x1000
174 
175 struct vic_stats {
176 	u_int32_t		vs_tx_count;
177 	u_int32_t		vs_tx_packets;
178 	u_int32_t		vs_tx_0copy;
179 	u_int32_t		vs_tx_copy;
180 	u_int32_t		vs_tx_maxpending;
181 	u_int32_t		vs_tx_stopped;
182 	u_int32_t		vs_tx_overrun;
183 	u_int32_t		vs_intr;
184 	u_int32_t		vs_rx_packets;
185 	u_int32_t		vs_rx_underrun;
186 } __packed;
187 
188 struct vic_data {
189 	u_int32_t		vd_magic;
190 
191 	u_int32_t		vd_rx_length;
192 	u_int32_t		vd_rx_nextidx;
193 	u_int32_t		vd_rx_length2;
194 	u_int32_t		vd_rx_nextidx2;
195 
196 	u_int32_t		vd_irq;
197 	u_int32_t		vd_iff;
198 
199 	u_int32_t		vd_mcastfil[VIC_CMD_MCASTFIL_LENGTH];
200 
201 	u_int32_t		vd_reserved1[1];
202 
203 	u_int32_t		vd_tx_length;
204 	u_int32_t		vd_tx_curidx;
205 	u_int32_t		vd_tx_nextidx;
206 	u_int32_t		vd_tx_stopped;
207 	u_int32_t		vd_tx_triggerlvl;
208 	u_int32_t		vd_tx_queued;
209 	u_int32_t		vd_tx_minlength;
210 
211 	u_int32_t		vd_reserved2[6];
212 
213 	u_int32_t		vd_rx_saved_nextidx;
214 	u_int32_t		vd_rx_saved_nextidx2;
215 	u_int32_t		vd_tx_saved_nextidx;
216 
217 	u_int32_t		vd_length;
218 	u_int32_t		vd_rx_offset;
219 	u_int32_t		vd_rx_offset2;
220 	u_int32_t		vd_tx_offset;
221 	u_int32_t		vd_debug;
222 	u_int32_t		vd_tx_physaddr;
223 	u_int32_t		vd_tx_physaddr_length;
224 	u_int32_t		vd_tx_maxlength;
225 
226 	struct vic_stats	vd_stats;
227 } __packed;
228 
229 #define VIC_OWNER_DRIVER	0
230 #define VIC_OWNER_DRIVER_PEND	1
231 #define VIC_OWNER_NIC		2
232 #define VIC_OWNER_NIC_PEND	3
233 
234 #define VIC_JUMBO_FRAMELEN	9018
235 #define VIC_JUMBO_MTU		(VIC_JUMBO_FRAMELEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
236 
237 #define VIC_NBUF		100
238 #define VIC_NBUF_MAX		128
239 #define VIC_MAX_SCATTER		1	/* 8? */
240 #define VIC_QUEUE_SIZE		VIC_NBUF_MAX
241 #define VIC_QUEUE2_SIZE		1
242 #define VIC_INC(_x, _y)		(_x) = ((_x) + 1) % (_y)
243 #define VIC_TX_TIMEOUT		5
244 
245 #define VIC_MIN_FRAMELEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
246 
247 #define VIC_TXURN_WARN(_sc)	((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5))
248 #define VIC_TXURN(_sc)		((_sc)->sc_txpending >= (_sc)->sc_ntxbuf)
249 
250 struct vic_rxbuf {
251 	bus_dmamap_t		rxb_dmamap;
252 	struct mbuf		*rxb_m;
253 };
254 
255 struct vic_txbuf {
256 	bus_dmamap_t		txb_dmamap;
257 	struct mbuf		*txb_m;
258 };
259 
260 struct vic_softc {
261 	struct device		sc_dev;
262 
263 	pci_chipset_tag_t	sc_pc;
264 	pcitag_t		sc_tag;
265 
266 	bus_space_tag_t		sc_iot;
267 	bus_space_handle_t	sc_ioh;
268 	bus_size_t		sc_ios;
269 	bus_dma_tag_t		sc_dmat;
270 
271 	void			*sc_ih;
272 
273 	struct timeout		sc_tick;
274 
275 	struct arpcom		sc_ac;
276 	struct ifmedia		sc_media;
277 
278 	u_int32_t		sc_nrxbuf;
279 	u_int32_t		sc_ntxbuf;
280 	u_int32_t		sc_cap;
281 	u_int32_t		sc_feature;
282 	u_int8_t		sc_lladdr[ETHER_ADDR_LEN];
283 
284 	bus_dmamap_t		sc_dma_map;
285 	bus_dma_segment_t	sc_dma_seg;
286 	size_t			sc_dma_size;
287 	caddr_t			sc_dma_kva;
288 #define VIC_DMA_DVA(_sc)	((_sc)->sc_dma_map->dm_segs[0].ds_addr)
289 #define VIC_DMA_KVA(_sc)	((void *)(_sc)->sc_dma_kva)
290 
291 	struct vic_data		*sc_data;
292 
293 	struct vic_rxbuf	*sc_rxbuf;
294 	struct vic_rxdesc	*sc_rxq;
295 	struct vic_rxdesc	*sc_rxq2;
296 
297 	struct vic_txbuf	*sc_txbuf;
298 	struct vic_txdesc	*sc_txq;
299 	volatile u_int		sc_txpending;
300 };
301 
302 struct cfdriver vic_cd = {
303 	0, "vic", DV_IFNET
304 };
305 
306 int		vic_match(struct device *, void *, void *);
307 void		vic_attach(struct device *, struct device *, void *);
308 
309 struct cfattach vic_ca = {
310 	sizeof(struct vic_softc), vic_match, vic_attach
311 };
312 
313 int		vic_intr(void *);
314 void		vic_shutdown(void *);
315 
316 int		vic_query(struct vic_softc *);
317 int		vic_alloc_data(struct vic_softc *);
318 int		vic_init_data(struct vic_softc *sc);
319 int		vic_uninit_data(struct vic_softc *sc);
320 
321 u_int32_t	vic_read(struct vic_softc *, bus_size_t);
322 void		vic_write(struct vic_softc *, bus_size_t, u_int32_t);
323 
324 u_int32_t	vic_read_cmd(struct vic_softc *, u_int32_t);
325 
326 int		vic_alloc_dmamem(struct vic_softc *);
327 void		vic_free_dmamem(struct vic_softc *);
328 
329 void		vic_link_state(struct vic_softc *);
330 void		vic_rx_proc(struct vic_softc *);
331 void		vic_tx_proc(struct vic_softc *);
332 void		vic_iff(struct vic_softc *);
333 void		vic_getlladdr(struct vic_softc *);
334 void		vic_setlladdr(struct vic_softc *);
335 int		vic_media_change(struct ifnet *);
336 void		vic_media_status(struct ifnet *, struct ifmediareq *);
337 void		vic_start(struct ifnet *);
338 int		vic_load_txb(struct vic_softc *, struct vic_txbuf *,
339 		    struct mbuf *);
340 void		vic_watchdog(struct ifnet *);
341 int		vic_ioctl(struct ifnet *, u_long, caddr_t);
342 void		vic_init(struct ifnet *);
343 void		vic_stop(struct ifnet *);
344 void		vic_tick(void *);
345 
346 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
347 
348 struct mbuf *vic_alloc_mbuf(struct vic_softc *, bus_dmamap_t);
349 
350 const struct pci_matchid vic_devices[] = {
351 	{ PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET }
352 };
353 
354 int
355 vic_match(struct device *parent, void *match, void *aux)
356 {
357 	struct pci_attach_args		*pa = aux;
358 	pcireg_t			memtype;
359 	bus_size_t			pcisize;
360 	bus_addr_t			pciaddr;
361 
362 	switch (pa->pa_id) {
363 	case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
364 		return (1);
365 
366 	case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
367 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR);
368 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR,
369 		    memtype, &pciaddr, &pcisize, NULL) != 0)
370 			break;
371 
372 		if (pcisize > VIC_LANCE_MINLEN)
373 			return (2);
374 
375 		break;
376 	}
377 
378 	return (0);
379 }
380 
381 void
382 vic_attach(struct device *parent, struct device *self, void *aux)
383 {
384 	struct vic_softc		*sc = (struct vic_softc *)self;
385 	struct pci_attach_args		*pa = aux;
386 	bus_space_handle_t		ioh;
387 	pcireg_t			r;
388 	pci_intr_handle_t		ih;
389 	struct ifnet			*ifp;
390 
391 	sc->sc_pc = pa->pa_pc;
392 	sc->sc_tag = pa->pa_tag;
393 	sc->sc_dmat = pa->pa_dmat;
394 
395 	r = pci_mapreg_type(sc->sc_pc, sc->sc_tag, VIC_PCI_BAR);
396 	if (pci_mapreg_map(pa, VIC_PCI_BAR, r, 0, &sc->sc_iot,
397 	    &ioh, NULL, &sc->sc_ios, 0) != 0) {
398 		printf(": unable to map system interface register\n");
399 		return;
400 	}
401 
402 	switch (pa->pa_id) {
403 	case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
404 		if (bus_space_subregion(sc->sc_iot, ioh, 0, sc->sc_ios,
405 		    &sc->sc_ioh) != 0) {
406 			printf(": unable to map register window\n");
407 			goto unmap;
408 		}
409 		break;
410 
411 	case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
412 		if (bus_space_subregion(sc->sc_iot, ioh,
413 		    VIC_LANCE_SIZE + VIC_MORPH_SIZE, VIC_VMXNET_SIZE,
414 		    &sc->sc_ioh) != 0) {
415 			printf(": unable to map register window\n");
416 			goto unmap;
417 		}
418 
419 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
420 		    BUS_SPACE_BARRIER_READ);
421 		r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
422 
423 		if ((r & VIC_MORPH_MASK) == VIC_MORPH_VMXNET)
424 			break;
425 		if ((r & VIC_MORPH_MASK) != VIC_MORPH_LANCE) {
426 			printf(": unexpect morph value (0x%08x)\n", r);
427 			goto unmap;
428 		}
429 
430 		r &= ~VIC_MORPH_MASK;
431 		r |= VIC_MORPH_VMXNET;
432 
433 		bus_space_write_4(sc->sc_iot, ioh, VIC_LANCE_SIZE, r);
434 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
435 		    BUS_SPACE_BARRIER_WRITE);
436 
437 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
438 		    BUS_SPACE_BARRIER_READ);
439 		r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
440 
441 		if ((r & VIC_MORPH_MASK) != VIC_MORPH_VMXNET) {
442 			printf(": unable to morph vlance chip\n", r);
443 			goto unmap;
444 		}
445 
446 		break;
447 	}
448 
449 	if (pci_intr_map(pa, &ih) != 0) {
450 		printf(": unable to map interrupt\n");
451 		goto unmap;
452 	}
453 
454 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
455 	    vic_intr, sc, DEVNAME(sc));
456 	if (sc->sc_ih == NULL) {
457 		printf(": unable to establish interrupt\n");
458 		goto unmap;
459 	}
460 
461 	if (vic_query(sc) != 0) {
462 		/* error printed by vic_query */
463 		goto unmap;
464 	}
465 
466 	if (vic_alloc_data(sc) != 0) {
467 		/* error printed by vic_alloc */
468 		goto unmap;
469 	}
470 
471 	timeout_set(&sc->sc_tick, vic_tick, sc);
472 
473 	bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
474 
475 	ifp = &sc->sc_ac.ac_if;
476 	ifp->if_softc = sc;
477 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
478 	ifp->if_ioctl = vic_ioctl;
479 	ifp->if_start = vic_start;
480 	ifp->if_watchdog = vic_watchdog;
481 	ifp->if_hardmtu = VIC_JUMBO_MTU;
482 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
483 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_ntxbuf - 1);
484 	IFQ_SET_READY(&ifp->if_snd);
485 
486 	ifp->if_capabilities = IFCAP_VLAN_MTU;
487 
488 #if 0
489 	/* XXX interface capabilities */
490 	if (sc->sc_cap & VIC_CMD_HWCAP_VLAN)
491 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
492 	if (sc->sc_cap & VIC_CMD_HWCAP_CSUM)
493 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
494 		    IFCAP_CSUM_UDPv4;
495 #endif
496 
497 	ifmedia_init(&sc->sc_media, 0, vic_media_change, vic_media_status);
498 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
499 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
500 
501 	if_attach(ifp);
502 	ether_ifattach(ifp);
503 
504 	printf(": %s, address %s\n", pci_intr_string(pa->pa_pc, ih),
505 	    ether_sprintf(sc->sc_lladdr));
506 
507 #ifdef VIC_DEBUG
508 	printf("%s: feature 0x%8x, cap 0x%8x, rx/txbuf %d/%d\n", DEVNAME(sc),
509 	    sc->sc_feature, sc->sc_cap, sc->sc_nrxbuf, sc->sc_ntxbuf);
510 #endif
511 
512 	return;
513 
514 unmap:
515 	bus_space_unmap(sc->sc_iot, ioh, sc->sc_ios);
516 	sc->sc_ios = 0;
517 }
518 
519 int
520 vic_query(struct vic_softc *sc)
521 {
522 	u_int32_t			major, minor;
523 
524 	major = vic_read(sc, VIC_VERSION_MAJOR);
525 	minor = vic_read(sc, VIC_VERSION_MINOR);
526 
527 	/* Check for a supported version */
528 	if ((major & VIC_VERSION_MAJOR_M) !=
529 	    (VIC_MAGIC & VIC_VERSION_MAJOR_M)) {
530 		printf(": magic mismatch\n");
531 		return (1);
532 	}
533 
534 	if (VIC_MAGIC > major || VIC_MAGIC < minor) {
535 		printf(": unsupported version (%X)\n",
536 		    major & ~VIC_VERSION_MAJOR_M);
537 		return (1);
538 	}
539 
540 	sc->sc_nrxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Rx_BUF);
541 	sc->sc_ntxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Tx_BUF);
542 	sc->sc_feature = vic_read_cmd(sc, VIC_CMD_FEATURE);
543 	sc->sc_cap = vic_read_cmd(sc, VIC_CMD_HWCAP);
544 
545 	vic_getlladdr(sc);
546 
547 	if (sc->sc_nrxbuf > VIC_NBUF_MAX || sc->sc_nrxbuf == 0)
548 		sc->sc_nrxbuf = VIC_NBUF;
549 	if (sc->sc_ntxbuf > VIC_NBUF_MAX || sc->sc_ntxbuf == 0)
550 		sc->sc_ntxbuf = VIC_NBUF;
551 
552 	return (0);
553 }
554 
555 int
556 vic_alloc_data(struct vic_softc *sc)
557 {
558 	u_int8_t			*kva;
559 	u_int				offset;
560 	struct vic_rxdesc		*rxd;
561 	int				i;
562 
563 	sc->sc_rxbuf = malloc(sizeof(struct vic_rxbuf) * sc->sc_nrxbuf,
564 	    M_NOWAIT, M_DEVBUF);
565 	if (sc->sc_rxbuf == NULL) {
566 		printf(": unable to allocate rxbuf\n");
567 		goto err;
568 	}
569 
570 	sc->sc_txbuf = malloc(sizeof(struct vic_txbuf) * sc->sc_ntxbuf,
571 	    M_NOWAIT, M_DEVBUF);
572 	if (sc->sc_txbuf == NULL) {
573 		printf(": unable to allocate txbuf\n");
574 		goto freerx;
575 	}
576 
577 	sc->sc_dma_size = sizeof(struct vic_data) +
578 	    (sc->sc_nrxbuf + VIC_QUEUE2_SIZE) * sizeof(struct vic_rxdesc) +
579 	    sc->sc_ntxbuf * sizeof(struct vic_txdesc);
580 
581 	if (vic_alloc_dmamem(sc) != 0) {
582 		printf(": unable to allocate dma region\n");
583 		goto freetx;
584 	}
585 	kva = VIC_DMA_KVA(sc);
586 
587 	/* set up basic vic data */
588 	sc->sc_data = VIC_DMA_KVA(sc);
589 
590 	sc->sc_data->vd_magic = VIC_MAGIC;
591 	sc->sc_data->vd_length = sc->sc_dma_size;
592 
593 	offset = sizeof(struct vic_data);
594 
595 	/* set up the rx ring */
596 	sc->sc_rxq = (struct vic_rxdesc *)&kva[offset];
597 
598 	sc->sc_data->vd_rx_offset = offset;
599 	sc->sc_data->vd_rx_length = sc->sc_nrxbuf;
600 
601 	offset += sizeof(struct vic_rxdesc) * sc->sc_nrxbuf;
602 
603 	/* set up the dummy rx ring 2 with an unusable entry */
604 	sc->sc_rxq2 = (struct vic_rxdesc *)&kva[offset];
605 
606 	sc->sc_data->vd_rx_offset2 = offset;
607 	sc->sc_data->vd_rx_length2 = VIC_QUEUE2_SIZE;
608 
609 	for (i = 0; i < VIC_QUEUE2_SIZE; i++) {
610 		rxd = &sc->sc_rxq2[i];
611 
612 		rxd->rx_physaddr = 0;
613 		rxd->rx_buflength = 0;
614 		rxd->rx_length = 0;
615 		rxd->rx_owner = VIC_OWNER_DRIVER;
616 
617 		offset += sizeof(struct vic_rxdesc);
618 	}
619 
620 	/* set up the tx ring */
621 	sc->sc_txq = (struct vic_txdesc *)&kva[offset];
622 
623 	sc->sc_data->vd_tx_offset = offset;
624 	sc->sc_data->vd_tx_length = sc->sc_ntxbuf;
625 
626 	return (0);
627 freetx:
628 	free(sc->sc_txbuf, M_DEVBUF);
629 freerx:
630 	free(sc->sc_rxbuf, M_DEVBUF);
631 err:
632 	return (1);
633 }
634 
635 int
636 vic_init_data(struct vic_softc *sc)
637 {
638 	struct vic_rxbuf		*rxb;
639 	struct vic_rxdesc		*rxd;
640 	struct vic_txbuf		*txb;
641 
642 	int				i;
643 
644 	for (i = 0; i < sc->sc_nrxbuf; i++) {
645 		rxb = &sc->sc_rxbuf[i];
646 		rxd = &sc->sc_rxq[i];
647 
648 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
649 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxb->rxb_dmamap) != 0) {
650 			printf("%s: unable to create dmamap for rxb %d\n",
651 			    DEVNAME(sc), i);
652 			goto freerxbs;
653 		}
654 
655 		rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap);
656 		if (rxb->rxb_m == NULL) {
657 			/* error already printed */
658 			bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
659 			goto freerxbs;
660 		}
661 
662 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
663 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
664 
665 		rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr;
666 		rxd->rx_buflength = rxb->rxb_m->m_pkthdr.len; /* XXX? */
667 		rxd->rx_length = 0;
668 		rxd->rx_owner = VIC_OWNER_NIC;
669 	}
670 
671 	for (i = 0; i < sc->sc_ntxbuf; i++) {
672 		txb = &sc->sc_txbuf[i];
673 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
674 		    (sc->sc_cap & VIC_CMD_HWCAP_SG) ? VIC_SG_MAX : 1,
675 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &txb->txb_dmamap) != 0) {
676 			printf("%s: unable to create dmamap for tx %d\n",
677 			    DEVNAME(sc), i);
678 			goto freetxbs;
679 		}
680 		txb->txb_m = NULL;
681 	}
682 
683 	return (0);
684 
685 freetxbs:
686 	while (i--) {
687 		txb = &sc->sc_txbuf[i];
688 		bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
689 	}
690 
691 	i = sc->sc_nrxbuf;
692 freerxbs:
693 	while (i--) {
694 		rxb = &sc->sc_rxbuf[i];
695 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
696 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
697 		bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
698 		bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
699 	}
700 
701 	return (1);
702 }
703 
704 int
705 vic_uninit_data(struct vic_softc *sc)
706 {
707 	struct vic_rxbuf		*rxb;
708 	struct vic_rxdesc		*rxd;
709 	struct vic_txbuf		*txb;
710 
711 	int				i;
712 
713 	for (i = 0; i < sc->sc_nrxbuf; i++) {
714 		rxb = &sc->sc_rxbuf[i];
715 		rxd = &sc->sc_rxq[i];
716 
717 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
718 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
719 		bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
720 		bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
721 
722 		m_freem(rxb->rxb_m);
723 		rxb->rxb_m = NULL;
724 	}
725 
726 	for (i = 0; i < sc->sc_ntxbuf; i++) {
727 		txb = &sc->sc_txbuf[i];
728 		bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
729 	}
730 
731 	return (0);
732 }
733 
734 void
735 vic_link_state(struct vic_softc *sc)
736 {
737 	struct ifnet *ifp = &sc->sc_ac.ac_if;
738 	u_int32_t status;
739 	int link_state = LINK_STATE_DOWN;
740 
741 	status = vic_read(sc, VIC_STATUS);
742 	if (status & VIC_STATUS_CONNECTED)
743 		link_state = LINK_STATE_FULL_DUPLEX;
744 	if (ifp->if_link_state != link_state) {
745 		ifp->if_link_state = link_state;
746 		if_link_state_change(ifp);
747 	}
748 }
749 
750 void
751 vic_shutdown(void *self)
752 {
753 	struct vic_softc *sc = (struct vic_softc *)self;
754 
755 	vic_stop(&sc->sc_ac.ac_if);
756 }
757 
758 int
759 vic_intr(void *arg)
760 {
761 	struct vic_softc *sc = (struct vic_softc *)arg;
762 
763 	vic_rx_proc(sc);
764 	vic_tx_proc(sc);
765 
766 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ACK);
767 
768 	return (1);
769 }
770 
771 void
772 vic_rx_proc(struct vic_softc *sc)
773 {
774 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
775 	struct vic_rxdesc		*rxd;
776 	struct vic_rxbuf		*rxb;
777 	struct mbuf			*m;
778 	int				len, idx;
779 
780 	if ((ifp->if_flags & IFF_RUNNING) == 0)
781 		return;
782 
783 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
784 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
785 
786 	for (;;) {
787 		idx = sc->sc_data->vd_rx_nextidx;
788 		if (idx >= sc->sc_data->vd_rx_length) {
789 			ifp->if_ierrors++;
790 			if (ifp->if_flags & IFF_DEBUG)
791 				printf("%s: receive index error\n",
792 				    sc->sc_dev.dv_xname);
793 			break;
794 		}
795 
796 		rxd = &sc->sc_rxq[idx];
797 		if (rxd->rx_owner != VIC_OWNER_DRIVER)
798 			break;
799 
800 		rxb = &sc->sc_rxbuf[idx];
801 
802 		len = rxd->rx_length;
803 		if (len < VIC_MIN_FRAMELEN) {
804 			ifp->if_iqdrops++;
805 			goto nextp;
806 		}
807 
808 		if (rxb->rxb_m == NULL) {
809 			ifp->if_ierrors++;
810 			printf("%s: rxb %d has no mbuf\n", DEVNAME(sc), idx);
811 			break;
812 		}
813 
814 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
815 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
816 		bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
817 
818 		m = rxb->rxb_m;
819 		rxb->rxb_m = NULL;
820 		m->m_pkthdr.rcvif = ifp;
821 		m->m_pkthdr.len = m->m_len = len;
822 
823 		/* Get new mbuf for the Rx queue */
824 		rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap);
825 		if (rxb->rxb_m == NULL) {
826 			ifp->if_ierrors++;
827 			printf("%s: mbuf alloc failed\n", DEVNAME(sc));
828 			break;
829 		}
830 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
831 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
832 
833 		rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr;
834 		rxd->rx_buflength = rxb->rxb_m->m_pkthdr.len;
835 		rxd->rx_length = 0;
836 		rxd->rx_owner = VIC_OWNER_DRIVER;
837 
838 		ifp->if_ipackets++;
839 
840 #if NBPFILTER > 0
841 		if (ifp->if_bpf)
842 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
843 #endif
844 
845 		ether_input_mbuf(ifp, m);
846 
847 nextp:
848 		rxd->rx_owner = VIC_OWNER_NIC;
849 		VIC_INC(sc->sc_data->vd_rx_nextidx, sc->sc_data->vd_rx_length);
850 	}
851 
852 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
853 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
854 }
855 
856 void
857 vic_tx_proc(struct vic_softc *sc)
858 {
859 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
860 	struct vic_txdesc		*txd;
861 	struct vic_txbuf		*txb;
862 	int				idx;
863 
864 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
865 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
866 
867 	while (sc->sc_txpending > 0) {
868 		idx = sc->sc_data->vd_tx_curidx;
869 		if (idx >= sc->sc_data->vd_tx_length) {
870 			ifp->if_oerrors++;
871 			break;
872 		}
873 
874 		txd = &sc->sc_txq[idx];
875 		if (txd->tx_owner != VIC_OWNER_DRIVER)
876 			break;
877 
878 		txb = &sc->sc_txbuf[idx];
879 		if (txb->txb_m == NULL) {
880 			printf("%s: tx ring is corrupt\n", DEVNAME(sc));
881 			ifp->if_oerrors++;
882 			break;
883 		}
884 
885 		bus_dmamap_sync(sc->sc_dmat, txb->txb_dmamap, 0,
886 		    txb->txb_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
887 		bus_dmamap_unload(sc->sc_dmat, txb->txb_dmamap);
888 
889 		m_freem(txb->txb_m);
890 		txb->txb_m = NULL;
891 		ifp->if_flags &= ~IFF_OACTIVE;
892 
893 		sc->sc_txpending--;
894 		sc->sc_data->vd_tx_stopped = 0;
895 
896 		VIC_INC(sc->sc_data->vd_tx_curidx, sc->sc_data->vd_tx_length);
897 	}
898 
899 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
900 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
901 
902 	vic_start(ifp);
903 }
904 
905 void
906 vic_iff(struct vic_softc *sc)
907 {
908 	struct arpcom *ac = &sc->sc_ac;
909 	struct ifnet *ifp = &sc->sc_ac.ac_if;
910 	struct ether_multi *enm;
911 	struct ether_multistep step;
912 	u_int32_t crc;
913 	u_int16_t *mcastfil = (u_int16_t *)sc->sc_data->vd_mcastfil;
914 	u_int flags = 0;
915 
916 	bzero(&sc->sc_data->vd_mcastfil, sizeof(sc->sc_data->vd_mcastfil));
917 	ifp->if_flags &= ~IFF_ALLMULTI;
918 
919 	if ((ifp->if_flags & IFF_RUNNING) == 0)
920 		goto domulti;
921 	if (ifp->if_flags & IFF_PROMISC)
922 		goto allmulti;
923 
924 	ETHER_FIRST_MULTI(step, ac, enm);
925 	while (enm != NULL) {
926 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN))
927 			goto allmulti;
928 
929 		crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
930 		crc >>= 26;
931 		mcastfil[crc >> 4] |= htole16(1 << (crc & 0xf));
932 
933 		ETHER_NEXT_MULTI(step, enm);
934 	}
935 
936 	goto domulti;
937 
938  allmulti:
939 	ifp->if_flags |= IFF_ALLMULTI;
940 	memset(&sc->sc_data->vd_mcastfil, 0xff,
941 	    sizeof(sc->sc_data->vd_mcastfil));
942 
943  domulti:
944 	vic_write(sc, VIC_CMD, VIC_CMD_MCASTFIL);
945 
946 	if (ifp->if_flags & IFF_RUNNING) {
947 		flags = (ifp->if_flags & IFF_PROMISC) ?
948 		    VIC_CMD_IFF_PROMISC :
949 		    (VIC_CMD_IFF_BROADCAST | VIC_CMD_IFF_MULTICAST);
950 	}
951 	sc->sc_data->vd_iff = flags;
952 	vic_write(sc, VIC_CMD, VIC_CMD_IFF);
953 }
954 
955 void
956 vic_getlladdr(struct vic_softc *sc)
957 {
958 	u_int32_t reg;
959 
960 	/* Get MAC address */
961 	reg = (sc->sc_cap & VIC_CMD_HWCAP_VPROM) ? VIC_VPROM : VIC_LLADDR;
962 
963 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, reg, ETHER_ADDR_LEN,
964 	    BUS_SPACE_BARRIER_READ);
965 	bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, reg, sc->sc_lladdr,
966 	    ETHER_ADDR_LEN);
967 
968 	/* Update the MAC address register */
969 	if (reg == VIC_VPROM)
970 		vic_setlladdr(sc);
971 }
972 
973 void
974 vic_setlladdr(struct vic_softc *sc)
975 {
976 	bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, VIC_LLADDR,
977 	    sc->sc_lladdr, ETHER_ADDR_LEN);
978 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, VIC_LLADDR, ETHER_ADDR_LEN,
979 	    BUS_SPACE_BARRIER_WRITE);
980 }
981 
982 int
983 vic_media_change(struct ifnet *ifp)
984 {
985 	/* Ignore */
986 	return (0);
987 }
988 
989 void
990 vic_media_status(struct ifnet *ifp, struct ifmediareq *imr)
991 {
992 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
993 
994 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
995 	imr->ifm_status = IFM_AVALID;
996 
997 	vic_link_state(sc);
998 
999 	if (LINK_STATE_IS_UP(ifp->if_link_state) &&
1000 	    ifp->if_flags & IFF_UP)
1001 		imr->ifm_status |= IFM_ACTIVE;
1002 }
1003 
1004 void
1005 vic_start(struct ifnet *ifp)
1006 {
1007 	struct vic_softc		*sc;
1008 	struct mbuf			*m;
1009 	struct vic_txbuf		*txb;
1010 	struct vic_txdesc		*txd;
1011 	struct vic_sg			*sge;
1012 	bus_dmamap_t			dmap;
1013 	int				i, idx;
1014 	int				tx = 0;
1015 
1016 	if (!(ifp->if_flags & IFF_RUNNING))
1017 		return;
1018 
1019 	if (ifp->if_flags & IFF_OACTIVE)
1020 		return;
1021 
1022 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1023 		return;
1024 
1025 	sc = (struct vic_softc *)ifp->if_softc;
1026 
1027 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1028 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1029 
1030 	for (;;) {
1031 		if (VIC_TXURN(sc)) {
1032 			ifp->if_flags |= IFF_OACTIVE;
1033 			break;
1034 		}
1035 
1036 		IFQ_POLL(&ifp->if_snd, m);
1037 		if (m == NULL)
1038 			break;
1039 
1040 		idx = sc->sc_data->vd_tx_nextidx;
1041 		if (idx >= sc->sc_data->vd_tx_length) {
1042 			printf("%s: tx idx is corrupt\n", DEVNAME(sc));
1043 			ifp->if_oerrors++;
1044 			break;
1045 		}
1046 
1047 		txd = &sc->sc_txq[idx];
1048 		txb = &sc->sc_txbuf[idx];
1049 
1050 		if (txb->txb_m != NULL) {
1051 			printf("%s: tx ring is corrupt\n", DEVNAME(sc));
1052 			sc->sc_data->vd_tx_stopped = 1;
1053 			ifp->if_oerrors++;
1054 			break;
1055 		}
1056 
1057 		/*
1058 		 * we're committed to sending it now. if we cant map it into
1059 		 * dma memory then we drop it.
1060 		 */
1061 		IFQ_DEQUEUE(&ifp->if_snd, m);
1062 		if (vic_load_txb(sc, txb, m) != 0) {
1063 			m_freem(m);
1064 			ifp->if_oerrors++;
1065 			/* continue? */
1066 			break;
1067 		}
1068 
1069 #if NBPFILTER > 0
1070 		if (ifp->if_bpf)
1071 			bpf_mtap(ifp->if_bpf, txb->txb_m, BPF_DIRECTION_OUT);
1072 #endif
1073 
1074 		dmap = txb->txb_dmamap;
1075 		txd->tx_flags = VIC_TX_FLAGS_KEEP;
1076 		txd->tx_owner = VIC_OWNER_NIC;
1077 		txd->tx_sa.sa_addr_type = VIC_SG_ADDR_PHYS;
1078 		txd->tx_sa.sa_length = dmap->dm_nsegs;
1079 		for (i = 0; i < dmap->dm_nsegs; i++) {
1080 			sge = &txd->tx_sa.sa_sg[i];
1081 			sge->sg_length = dmap->dm_segs[i].ds_len;
1082 			sge->sg_addr_low = dmap->dm_segs[i].ds_addr;
1083 		}
1084 
1085 		if (VIC_TXURN_WARN(sc)) {
1086 			txd->tx_flags |= VIC_TX_FLAGS_TXURN;
1087 		}
1088 
1089 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1090 		    BUS_DMASYNC_PREWRITE);
1091 
1092 		ifp->if_opackets++;
1093 		sc->sc_txpending++;
1094 
1095 		VIC_INC(sc->sc_data->vd_tx_nextidx, sc->sc_data->vd_tx_length);
1096 
1097 		tx = 1;
1098 	}
1099 
1100 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1101 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1102 
1103 	if (tx)
1104 		vic_read(sc, VIC_Tx_ADDR);
1105 }
1106 
1107 int
1108 vic_load_txb(struct vic_softc *sc, struct vic_txbuf *txb, struct mbuf *m)
1109 {
1110 	bus_dmamap_t			dmap = txb->txb_dmamap;
1111 	struct mbuf			*m0 = NULL;
1112 	int				error;
1113 
1114 	error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT);
1115 	switch (error) {
1116 	case 0:
1117 		txb->txb_m = m;
1118 		break;
1119 
1120 	case EFBIG: /* mbuf chain is too fragmented */
1121 		MGETHDR(m0, M_DONTWAIT, MT_DATA);
1122 		if (m0 == NULL)
1123 			return (ENOBUFS);
1124 		if (m->m_pkthdr.len > MHLEN) {
1125 			MCLGET(m0, M_DONTWAIT);
1126 			if (!(m0->m_flags & M_EXT)) {
1127 				m_freem(m0);
1128 				return (ENOBUFS);
1129 			}
1130 		}
1131 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1132 		m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1133 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m0,
1134 		    BUS_DMA_NOWAIT);
1135 		if (error != 0) {
1136 			m_freem(m0);
1137 			printf("%s: tx dmamap load error %d\n", DEVNAME(sc),
1138 			    error);
1139 			return (ENOBUFS);
1140 		}
1141 		m_freem(m);
1142 		txb->txb_m = m0;
1143 		break;
1144 
1145 	default:
1146 		printf("%s: tx dmamap load error %d\n", DEVNAME(sc), error);
1147 		return (ENOBUFS);
1148 	}
1149 
1150 	return (0);
1151 }
1152 
1153 void
1154 vic_watchdog(struct ifnet *ifp)
1155 {
1156 #if 0
1157 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1158 
1159 	if (sc->sc_txpending && sc->sc_txtimeout > 0) {
1160 		if (--sc->sc_txtimeout == 0) {
1161 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1162 			ifp->if_flags &= ~IFF_RUNNING;
1163 			vic_init(ifp);
1164 			ifp->if_oerrors++;
1165 			return;
1166 		}
1167 	}
1168 
1169 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1170 		vic_start(ifp);
1171 #endif
1172 }
1173 
1174 int
1175 vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1176 {
1177 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1178 	struct ifreq *ifr = (struct ifreq *)data;
1179 	struct ifaddr *ifa;
1180 	int s, error = 0;
1181 
1182 	s = splnet();
1183 
1184 	if ((error = ether_ioctl(ifp, &sc->sc_ac, cmd, data)) > 0) {
1185 		splx(s);
1186 		return (error);
1187 	}
1188 
1189 	switch (cmd) {
1190 	case SIOCSIFADDR:
1191 		ifa = (struct ifaddr *)data;
1192 		ifp->if_flags |= IFF_UP;
1193 #ifdef INET
1194 		if (ifa->ifa_addr->sa_family == AF_INET)
1195 			arp_ifinit(&sc->sc_ac, ifa);
1196 #endif
1197 		/* FALLTHROUGH */
1198 	case SIOCSIFFLAGS:
1199 		if (ifp->if_flags & IFF_UP) {
1200 			if (ifp->if_flags & IFF_RUNNING)
1201 				vic_iff(sc);
1202 			else
1203 				vic_init(ifp);
1204 		} else {
1205 			if (ifp->if_flags & IFF_RUNNING)
1206 				vic_stop(ifp);
1207 		}
1208 		break;
1209 
1210 	case SIOCSIFMTU:
1211 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
1212 			error = EINVAL;
1213 		else if (ifp->if_mtu != ifr->ifr_mtu)
1214 			ifp->if_mtu = ifr->ifr_mtu;
1215 		break;
1216 
1217 	case SIOCADDMULTI:
1218 	case SIOCDELMULTI:
1219 		ifr = (struct ifreq *)data;
1220 		error = (cmd == SIOCADDMULTI) ?
1221 		    ether_addmulti(ifr, &sc->sc_ac) :
1222 		    ether_delmulti(ifr, &sc->sc_ac);
1223 
1224 		if (error == ENETRESET) {
1225 			if (ifp->if_flags & IFF_RUNNING)
1226 				vic_iff(sc);
1227 			error = 0;
1228 		}
1229 		break;
1230 
1231 	case SIOCGIFMEDIA:
1232 	case SIOCSIFMEDIA:
1233 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1234 		break;
1235 
1236 	default:
1237 		error = ENOTTY;
1238 		break;
1239 	}
1240 
1241 	if (error == ENETRESET) {
1242 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1243 		    (IFF_UP | IFF_RUNNING))
1244 			vic_iff(ifp->if_softc);
1245 		error = 0;
1246 	}
1247 
1248 	splx(s);
1249 
1250 	return (error);
1251 }
1252 
1253 void
1254 vic_init(struct ifnet *ifp)
1255 {
1256 	struct vic_softc	*sc = (struct vic_softc *)ifp->if_softc;
1257 	int			s;
1258 
1259 	if (vic_init_data(sc) != 0)
1260 		return;
1261 
1262 	sc->sc_data->vd_tx_curidx = 0;
1263 	sc->sc_data->vd_tx_nextidx = 0;
1264 	sc->sc_data->vd_tx_stopped = sc->sc_data->vd_tx_queued = 0;
1265 
1266 	sc->sc_data->vd_rx_nextidx = 0;
1267 	sc->sc_data->vd_rx_nextidx2 = 0;
1268 
1269 	sc->sc_data->vd_rx_saved_nextidx = 0;
1270 	sc->sc_data->vd_rx_saved_nextidx2 = 0;
1271 	sc->sc_data->vd_tx_saved_nextidx = 0;
1272 
1273 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1274 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1275 
1276 	s = splnet();
1277 
1278 	vic_write(sc, VIC_DATA_ADDR, VIC_DMA_DVA(sc));
1279 	vic_write(sc, VIC_DATA_LENGTH, sc->sc_dma_size);
1280 
1281 	ifp->if_flags |= IFF_RUNNING;
1282 	ifp->if_flags &= ~IFF_OACTIVE;
1283 
1284 	vic_iff(sc);
1285 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ENABLE);
1286 
1287 	splx(s);
1288 
1289 	timeout_add_sec(&sc->sc_tick, 1);
1290 }
1291 
1292 void
1293 vic_stop(struct ifnet *ifp)
1294 {
1295 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1296 	int s;
1297 
1298 	s = splnet();
1299 
1300 	timeout_del(&sc->sc_tick);
1301 
1302 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1303 
1304 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1305 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1306 
1307 	/* XXX wait for tx to complete */
1308 	while (sc->sc_txpending > 0) {
1309 		splx(s);
1310 		delay(1000);
1311 		s = splnet();
1312 	}
1313 
1314 	sc->sc_data->vd_tx_stopped = 1;
1315 
1316 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_DISABLE);
1317 
1318 	vic_iff(sc);
1319 	vic_write(sc, VIC_DATA_ADDR, 0);
1320 
1321 	vic_uninit_data(sc);
1322 
1323 	splx(s);
1324 }
1325 
1326 struct mbuf *
1327 vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map)
1328 {
1329 	struct mbuf *m = NULL;
1330 
1331 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1332 	if (m == NULL)
1333 		return (NULL);
1334 
1335 	MCLGET(m, M_DONTWAIT);
1336 	if ((m->m_flags & M_EXT) == 0) {
1337 		m_freem(m);
1338 		return (NULL);
1339 	}
1340 	m->m_data += ETHER_ALIGN;
1341 	m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
1342 
1343 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1344 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1345 		m_freem(m);
1346 		return (NULL);
1347 	}
1348 
1349 	return (m);
1350 }
1351 
1352 void
1353 vic_tick(void *arg)
1354 {
1355 	struct vic_softc		*sc = (struct vic_softc *)arg;
1356 
1357 	vic_link_state(sc);
1358 
1359 	timeout_add_sec(&sc->sc_tick, 1);
1360 }
1361 
1362 u_int32_t
1363 vic_read(struct vic_softc *sc, bus_size_t r)
1364 {
1365 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1366 	    BUS_SPACE_BARRIER_READ);
1367 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1368 }
1369 
1370 void
1371 vic_write(struct vic_softc *sc, bus_size_t r, u_int32_t v)
1372 {
1373 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1374 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1375 	    BUS_SPACE_BARRIER_WRITE);
1376 }
1377 
1378 u_int32_t
1379 vic_read_cmd(struct vic_softc *sc, u_int32_t cmd)
1380 {
1381 	vic_write(sc, VIC_CMD, cmd);
1382 	return (vic_read(sc, VIC_CMD));
1383 }
1384 
1385 int
1386 vic_alloc_dmamem(struct vic_softc *sc)
1387 {
1388 	int nsegs;
1389 
1390 	if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size, 1,
1391 	    sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1392 	    &sc->sc_dma_map) != 0)
1393 		goto err;
1394 
1395 	if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size, 16, 0,
1396 	    &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1397 		goto destroy;
1398 
1399 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_seg, nsegs,
1400 	    sc->sc_dma_size, &sc->sc_dma_kva, BUS_DMA_NOWAIT) != 0)
1401 		goto free;
1402 
1403 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,
1404 	    sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
1405 		goto unmap;
1406 
1407 	bzero(sc->sc_dma_kva, sc->sc_dma_size);
1408 
1409 	return (0);
1410 
1411 unmap:
1412 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1413 free:
1414 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1415 destroy:
1416 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1417 err:
1418 	return (1);
1419 }
1420 
1421 void
1422 vic_free_dmamem(struct vic_softc *sc)
1423 {
1424 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_map);
1425 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1426 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1427 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1428 }
1429