xref: /openbsd-src/sys/dev/pci/if_vic.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: if_vic.c,v 1.70 2009/02/01 14:05:52 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 Reyk Floeter <reyk@openbsd.org>
5  * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*
21  * Driver for the VMware Virtual NIC ("vmxnet")
22  */
23 
24 #include "bpfilter.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/kernel.h>
31 #include <sys/socket.h>
32 #include <sys/malloc.h>
33 #include <sys/timeout.h>
34 #include <sys/device.h>
35 
36 #include <machine/bus.h>
37 #include <machine/intr.h>
38 
39 #include <net/if.h>
40 #include <net/if_dl.h>
41 #include <net/if_media.h>
42 #include <net/if_types.h>
43 
44 #if NBPFILTER > 0
45 #include <net/bpf.h>
46 #endif
47 
48 #ifdef INET
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 #endif
52 
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pcidevs.h>
56 
57 #define VIC_PCI_BAR		PCI_MAPREG_START /* Base Address Register */
58 
59 #define VIC_LANCE_SIZE		0x20
60 #define VIC_MORPH_SIZE		0x04
61 #define  VIC_MORPH_MASK			0xffff
62 #define  VIC_MORPH_LANCE		0x2934
63 #define  VIC_MORPH_VMXNET		0x4392
64 #define VIC_VMXNET_SIZE		0x40
65 #define VIC_LANCE_MINLEN	(VIC_LANCE_SIZE + VIC_MORPH_SIZE + \
66 				    VIC_VMXNET_SIZE)
67 
68 #define VIC_MAGIC		0xbabe864f
69 
70 /* Register address offsets */
71 #define VIC_DATA_ADDR		0x0000		/* Shared data address */
72 #define VIC_DATA_LENGTH		0x0004		/* Shared data length */
73 #define VIC_Tx_ADDR		0x0008		/* Tx pointer address */
74 
75 /* Command register */
76 #define VIC_CMD			0x000c		/* Command register */
77 #define  VIC_CMD_INTR_ACK	0x0001	/* Acknowledge interrupt */
78 #define  VIC_CMD_MCASTFIL	0x0002	/* Multicast address filter */
79 #define   VIC_CMD_MCASTFIL_LENGTH	2
80 #define  VIC_CMD_IFF		0x0004	/* Interface flags */
81 #define   VIC_CMD_IFF_PROMISC	0x0001		/* Promiscous enabled */
82 #define   VIC_CMD_IFF_BROADCAST	0x0002		/* Broadcast enabled */
83 #define   VIC_CMD_IFF_MULTICAST	0x0004		/* Multicast enabled */
84 #define  VIC_CMD_INTR_DISABLE	0x0020	/* Enable interrupts */
85 #define  VIC_CMD_INTR_ENABLE	0x0040	/* Disable interrupts */
86 #define  VIC_CMD_Tx_DONE	0x0100	/* Tx done register */
87 #define  VIC_CMD_NUM_Rx_BUF	0x0200	/* Number of Rx buffers */
88 #define  VIC_CMD_NUM_Tx_BUF	0x0400	/* Number of Tx buffers */
89 #define  VIC_CMD_NUM_PINNED_BUF	0x0800	/* Number of pinned buffers */
90 #define  VIC_CMD_HWCAP		0x1000	/* Capability register */
91 #define   VIC_CMD_HWCAP_SG		(1<<0) /* Scatter-gather transmits */
92 #define   VIC_CMD_HWCAP_CSUM_IPv4	(1<<1) /* TCP/UDP cksum */
93 #define   VIC_CMD_HWCAP_CSUM_ALL	(1<<3) /* Hardware cksum */
94 #define   VIC_CMD_HWCAP_CSUM \
95 	(VIC_CMD_HWCAP_CSUM_IPv4 | VIC_CMD_HWCAP_CSUM_ALL)
96 #define   VIC_CMD_HWCAP_DMA_HIGH		(1<<4) /* High DMA mapping */
97 #define   VIC_CMD_HWCAP_TOE		(1<<5) /* TCP offload engine */
98 #define   VIC_CMD_HWCAP_TSO		(1<<6) /* TCP segmentation offload */
99 #define   VIC_CMD_HWCAP_TSO_SW		(1<<7) /* Software TCP segmentation */
100 #define   VIC_CMD_HWCAP_VPROM		(1<<8) /* Virtual PROM available */
101 #define   VIC_CMD_HWCAP_VLAN_Tx		(1<<9) /* Hardware VLAN MTU Rx */
102 #define   VIC_CMD_HWCAP_VLAN_Rx		(1<<10) /* Hardware VLAN MTU Tx */
103 #define   VIC_CMD_HWCAP_VLAN_SW		(1<<11)	/* Software VLAN MTU */
104 #define   VIC_CMD_HWCAP_VLAN \
105 	(VIC_CMD_HWCAP_VLAN_Tx | VIC_CMD_HWCAP_VLAN_Rx | \
106 	VIC_CMD_HWCAP_VLAN_SW)
107 #define  VIC_CMD_HWCAP_BITS \
108 	"\20\01SG\02CSUM4\03CSUM\04HDMA\05TOE\06TSO" \
109 	"\07TSOSW\10VPROM\13VLANTx\14VLANRx\15VLANSW"
110 #define  VIC_CMD_FEATURE	0x2000	/* Additional feature register */
111 #define   VIC_CMD_FEATURE_0_Tx		(1<<0)
112 #define   VIC_CMD_FEATURE_TSO		(1<<1)
113 
114 #define VIC_LLADDR		0x0010		/* MAC address register */
115 #define VIC_VERSION_MINOR	0x0018		/* Minor version register */
116 #define VIC_VERSION_MAJOR	0x001c		/* Major version register */
117 #define VIC_VERSION_MAJOR_M	0xffff0000
118 
119 /* Status register */
120 #define VIC_STATUS		0x0020
121 #define  VIC_STATUS_CONNECTED		(1<<0)
122 #define  VIC_STATUS_ENABLED		(1<<1)
123 
124 #define VIC_TOE_ADDR		0x0024		/* TCP offload address */
125 
126 /* Virtual PROM address */
127 #define VIC_VPROM		0x0028
128 #define VIC_VPROM_LENGTH	6
129 
130 /* Shared DMA data structures */
131 
132 struct vic_sg {
133 	u_int32_t	sg_addr_low;
134 	u_int16_t	sg_addr_high;
135 	u_int16_t	sg_length;
136 } __packed;
137 
138 #define VIC_SG_MAX		6
139 #define VIC_SG_ADDR_MACH	0
140 #define VIC_SG_ADDR_PHYS	1
141 #define VIC_SG_ADDR_VIRT	3
142 
143 struct vic_sgarray {
144 	u_int16_t	sa_addr_type;
145 	u_int16_t	sa_length;
146 	struct vic_sg	sa_sg[VIC_SG_MAX];
147 } __packed;
148 
149 struct vic_rxdesc {
150 	u_int64_t	rx_physaddr;
151 	u_int32_t	rx_buflength;
152 	u_int32_t	rx_length;
153 	u_int16_t	rx_owner;
154 	u_int16_t	rx_flags;
155 	u_int32_t	rx_priv;
156 } __packed;
157 
158 #define VIC_RX_FLAGS_CSUMHW_OK	0x0001
159 
160 struct vic_txdesc {
161 	u_int16_t		tx_flags;
162 	u_int16_t		tx_owner;
163 	u_int32_t		tx_priv;
164 	u_int32_t		tx_tsomss;
165 	struct vic_sgarray	tx_sa;
166 } __packed;
167 
168 #define VIC_TX_FLAGS_KEEP	0x0001
169 #define VIC_TX_FLAGS_TXURN	0x0002
170 #define VIC_TX_FLAGS_CSUMHW	0x0004
171 #define VIC_TX_FLAGS_TSO	0x0008
172 #define VIC_TX_FLAGS_PINNED	0x0010
173 #define VIC_TX_FLAGS_QRETRY	0x1000
174 
175 struct vic_stats {
176 	u_int32_t		vs_tx_count;
177 	u_int32_t		vs_tx_packets;
178 	u_int32_t		vs_tx_0copy;
179 	u_int32_t		vs_tx_copy;
180 	u_int32_t		vs_tx_maxpending;
181 	u_int32_t		vs_tx_stopped;
182 	u_int32_t		vs_tx_overrun;
183 	u_int32_t		vs_intr;
184 	u_int32_t		vs_rx_packets;
185 	u_int32_t		vs_rx_underrun;
186 } __packed;
187 
188 #define VIC_NRXRINGS		2
189 
190 struct vic_data {
191 	u_int32_t		vd_magic;
192 
193 	struct {
194 		u_int32_t		length;
195 		u_int32_t		nextidx;
196 	}			vd_rx[VIC_NRXRINGS];
197 
198 	u_int32_t		vd_irq;
199 	u_int32_t		vd_iff;
200 
201 	u_int32_t		vd_mcastfil[VIC_CMD_MCASTFIL_LENGTH];
202 
203 	u_int32_t		vd_reserved1[1];
204 
205 	u_int32_t		vd_tx_length;
206 	u_int32_t		vd_tx_curidx;
207 	u_int32_t		vd_tx_nextidx;
208 	u_int32_t		vd_tx_stopped;
209 	u_int32_t		vd_tx_triggerlvl;
210 	u_int32_t		vd_tx_queued;
211 	u_int32_t		vd_tx_minlength;
212 
213 	u_int32_t		vd_reserved2[6];
214 
215 	u_int32_t		vd_rx_saved_nextidx[VIC_NRXRINGS];
216 	u_int32_t		vd_tx_saved_nextidx;
217 
218 	u_int32_t		vd_length;
219 	u_int32_t		vd_rx_offset[VIC_NRXRINGS];
220 	u_int32_t		vd_tx_offset;
221 	u_int32_t		vd_debug;
222 	u_int32_t		vd_tx_physaddr;
223 	u_int32_t		vd_tx_physaddr_length;
224 	u_int32_t		vd_tx_maxlength;
225 
226 	struct vic_stats	vd_stats;
227 } __packed;
228 
229 #define VIC_OWNER_DRIVER	0
230 #define VIC_OWNER_DRIVER_PEND	1
231 #define VIC_OWNER_NIC		2
232 #define VIC_OWNER_NIC_PEND	3
233 
234 #define VIC_JUMBO_FRAMELEN	9018
235 #define VIC_JUMBO_MTU		(VIC_JUMBO_FRAMELEN - ETHER_HDR_LEN - ETHER_CRC_LEN)
236 
237 #define VIC_NBUF		100
238 #define VIC_NBUF_MAX		128
239 #define VIC_MAX_SCATTER		1	/* 8? */
240 #define VIC_QUEUE_SIZE		VIC_NBUF_MAX
241 #define VIC_INC(_x, _y)		(_x) = ((_x) + 1) % (_y)
242 #define VIC_TX_TIMEOUT		5
243 
244 #define VIC_MIN_FRAMELEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
245 
246 #define VIC_TXURN_WARN(_sc)	((_sc)->sc_txpending >= ((_sc)->sc_ntxbuf - 5))
247 #define VIC_TXURN(_sc)		((_sc)->sc_txpending >= (_sc)->sc_ntxbuf)
248 
249 struct vic_rxbuf {
250 	bus_dmamap_t		rxb_dmamap;
251 	struct mbuf		*rxb_m;
252 };
253 
254 struct vic_txbuf {
255 	bus_dmamap_t		txb_dmamap;
256 	struct mbuf		*txb_m;
257 };
258 
259 struct vic_softc {
260 	struct device		sc_dev;
261 
262 	pci_chipset_tag_t	sc_pc;
263 	pcitag_t		sc_tag;
264 
265 	bus_space_tag_t		sc_iot;
266 	bus_space_handle_t	sc_ioh;
267 	bus_size_t		sc_ios;
268 	bus_dma_tag_t		sc_dmat;
269 
270 	void			*sc_ih;
271 
272 	struct timeout		sc_tick;
273 
274 	struct arpcom		sc_ac;
275 	struct ifmedia		sc_media;
276 
277 	u_int32_t		sc_nrxbuf;
278 	u_int32_t		sc_ntxbuf;
279 	u_int32_t		sc_cap;
280 	u_int32_t		sc_feature;
281 	u_int8_t		sc_lladdr[ETHER_ADDR_LEN];
282 
283 	bus_dmamap_t		sc_dma_map;
284 	bus_dma_segment_t	sc_dma_seg;
285 	size_t			sc_dma_size;
286 	caddr_t			sc_dma_kva;
287 #define VIC_DMA_DVA(_sc)	((_sc)->sc_dma_map->dm_segs[0].ds_addr)
288 #define VIC_DMA_KVA(_sc)	((void *)(_sc)->sc_dma_kva)
289 
290 	struct vic_data		*sc_data;
291 
292 	struct {
293 		struct vic_rxbuf	*bufs;
294 		struct vic_rxdesc	*slots;
295 		int			end;
296 		int			len;
297 		u_int			pktlen;
298 	}			sc_rxq[VIC_NRXRINGS];
299 
300 	struct vic_txbuf	*sc_txbuf;
301 	struct vic_txdesc	*sc_txq;
302 	volatile u_int		sc_txpending;
303 };
304 
305 struct cfdriver vic_cd = {
306 	0, "vic", DV_IFNET
307 };
308 
309 int		vic_match(struct device *, void *, void *);
310 void		vic_attach(struct device *, struct device *, void *);
311 
312 struct cfattach vic_ca = {
313 	sizeof(struct vic_softc), vic_match, vic_attach
314 };
315 
316 int		vic_intr(void *);
317 void		vic_shutdown(void *);
318 
319 int		vic_query(struct vic_softc *);
320 int		vic_alloc_data(struct vic_softc *);
321 int		vic_init_data(struct vic_softc *sc);
322 int		vic_uninit_data(struct vic_softc *sc);
323 
324 u_int32_t	vic_read(struct vic_softc *, bus_size_t);
325 void		vic_write(struct vic_softc *, bus_size_t, u_int32_t);
326 
327 u_int32_t	vic_read_cmd(struct vic_softc *, u_int32_t);
328 
329 int		vic_alloc_dmamem(struct vic_softc *);
330 void		vic_free_dmamem(struct vic_softc *);
331 
332 void		vic_link_state(struct vic_softc *);
333 void		vic_rx_fill(struct vic_softc *, int);
334 void		vic_rx_proc(struct vic_softc *, int);
335 void		vic_tx_proc(struct vic_softc *);
336 void		vic_iff(struct vic_softc *);
337 void		vic_getlladdr(struct vic_softc *);
338 void		vic_setlladdr(struct vic_softc *);
339 int		vic_media_change(struct ifnet *);
340 void		vic_media_status(struct ifnet *, struct ifmediareq *);
341 void		vic_start(struct ifnet *);
342 int		vic_load_txb(struct vic_softc *, struct vic_txbuf *,
343 		    struct mbuf *);
344 void		vic_watchdog(struct ifnet *);
345 int		vic_ioctl(struct ifnet *, u_long, caddr_t);
346 void		vic_init(struct ifnet *);
347 void		vic_stop(struct ifnet *);
348 void		vic_tick(void *);
349 
350 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
351 
352 struct mbuf *vic_alloc_mbuf(struct vic_softc *, bus_dmamap_t, u_int);
353 
354 const struct pci_matchid vic_devices[] = {
355 	{ PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET }
356 };
357 
358 int
359 vic_match(struct device *parent, void *match, void *aux)
360 {
361 	struct pci_attach_args		*pa = aux;
362 	pcireg_t			memtype;
363 	bus_size_t			pcisize;
364 	bus_addr_t			pciaddr;
365 
366 	switch (pa->pa_id) {
367 	case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
368 		return (1);
369 
370 	case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
371 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR);
372 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, VIC_PCI_BAR,
373 		    memtype, &pciaddr, &pcisize, NULL) != 0)
374 			break;
375 
376 		if (pcisize > VIC_LANCE_MINLEN)
377 			return (2);
378 
379 		break;
380 	}
381 
382 	return (0);
383 }
384 
385 void
386 vic_attach(struct device *parent, struct device *self, void *aux)
387 {
388 	struct vic_softc		*sc = (struct vic_softc *)self;
389 	struct pci_attach_args		*pa = aux;
390 	bus_space_handle_t		ioh;
391 	pcireg_t			r;
392 	pci_intr_handle_t		ih;
393 	struct ifnet			*ifp;
394 
395 	sc->sc_pc = pa->pa_pc;
396 	sc->sc_tag = pa->pa_tag;
397 	sc->sc_dmat = pa->pa_dmat;
398 
399 	r = pci_mapreg_type(sc->sc_pc, sc->sc_tag, VIC_PCI_BAR);
400 	if (pci_mapreg_map(pa, VIC_PCI_BAR, r, 0, &sc->sc_iot,
401 	    &ioh, NULL, &sc->sc_ios, 0) != 0) {
402 		printf(": unable to map system interface register\n");
403 		return;
404 	}
405 
406 	switch (pa->pa_id) {
407 	case PCI_ID_CODE(PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET):
408 		if (bus_space_subregion(sc->sc_iot, ioh, 0, sc->sc_ios,
409 		    &sc->sc_ioh) != 0) {
410 			printf(": unable to map register window\n");
411 			goto unmap;
412 		}
413 		break;
414 
415 	case PCI_ID_CODE(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI):
416 		if (bus_space_subregion(sc->sc_iot, ioh,
417 		    VIC_LANCE_SIZE + VIC_MORPH_SIZE, VIC_VMXNET_SIZE,
418 		    &sc->sc_ioh) != 0) {
419 			printf(": unable to map register window\n");
420 			goto unmap;
421 		}
422 
423 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
424 		    BUS_SPACE_BARRIER_READ);
425 		r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
426 
427 		if ((r & VIC_MORPH_MASK) == VIC_MORPH_VMXNET)
428 			break;
429 		if ((r & VIC_MORPH_MASK) != VIC_MORPH_LANCE) {
430 			printf(": unexpect morph value (0x%08x)\n", r);
431 			goto unmap;
432 		}
433 
434 		r &= ~VIC_MORPH_MASK;
435 		r |= VIC_MORPH_VMXNET;
436 
437 		bus_space_write_4(sc->sc_iot, ioh, VIC_LANCE_SIZE, r);
438 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
439 		    BUS_SPACE_BARRIER_WRITE);
440 
441 		bus_space_barrier(sc->sc_iot, ioh, VIC_LANCE_SIZE, 4,
442 		    BUS_SPACE_BARRIER_READ);
443 		r = bus_space_read_4(sc->sc_iot, ioh, VIC_LANCE_SIZE);
444 
445 		if ((r & VIC_MORPH_MASK) != VIC_MORPH_VMXNET) {
446 			printf(": unable to morph vlance chip\n", r);
447 			goto unmap;
448 		}
449 
450 		break;
451 	}
452 
453 	if (pci_intr_map(pa, &ih) != 0) {
454 		printf(": unable to map interrupt\n");
455 		goto unmap;
456 	}
457 
458 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET,
459 	    vic_intr, sc, DEVNAME(sc));
460 	if (sc->sc_ih == NULL) {
461 		printf(": unable to establish interrupt\n");
462 		goto unmap;
463 	}
464 
465 	if (vic_query(sc) != 0) {
466 		/* error printed by vic_query */
467 		goto unmap;
468 	}
469 
470 	if (vic_alloc_data(sc) != 0) {
471 		/* error printed by vic_alloc */
472 		goto unmap;
473 	}
474 
475 	timeout_set(&sc->sc_tick, vic_tick, sc);
476 
477 	bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
478 
479 	ifp = &sc->sc_ac.ac_if;
480 	ifp->if_softc = sc;
481 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
482 	ifp->if_ioctl = vic_ioctl;
483 	ifp->if_start = vic_start;
484 	ifp->if_watchdog = vic_watchdog;
485 	ifp->if_hardmtu = VIC_JUMBO_MTU;
486 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
487 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_ntxbuf - 1);
488 	IFQ_SET_READY(&ifp->if_snd);
489 
490 	m_clsetwms(ifp, MCLBYTES, 2, sc->sc_nrxbuf - 1);
491 	m_clsetwms(ifp, 4096, 2, sc->sc_nrxbuf - 1);
492 
493 	ifp->if_capabilities = IFCAP_VLAN_MTU;
494 
495 #if 0
496 	/* XXX interface capabilities */
497 	if (sc->sc_cap & VIC_CMD_HWCAP_VLAN)
498 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
499 	if (sc->sc_cap & VIC_CMD_HWCAP_CSUM)
500 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
501 		    IFCAP_CSUM_UDPv4;
502 #endif
503 
504 	ifmedia_init(&sc->sc_media, 0, vic_media_change, vic_media_status);
505 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
506 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
507 
508 	if_attach(ifp);
509 	ether_ifattach(ifp);
510 
511 	printf(": %s, address %s\n", pci_intr_string(pa->pa_pc, ih),
512 	    ether_sprintf(sc->sc_lladdr));
513 
514 #ifdef VIC_DEBUG
515 	printf("%s: feature 0x%8x, cap 0x%8x, rx/txbuf %d/%d\n", DEVNAME(sc),
516 	    sc->sc_feature, sc->sc_cap, sc->sc_nrxbuf, sc->sc_ntxbuf);
517 #endif
518 
519 	return;
520 
521 unmap:
522 	bus_space_unmap(sc->sc_iot, ioh, sc->sc_ios);
523 	sc->sc_ios = 0;
524 }
525 
526 int
527 vic_query(struct vic_softc *sc)
528 {
529 	u_int32_t			major, minor;
530 
531 	major = vic_read(sc, VIC_VERSION_MAJOR);
532 	minor = vic_read(sc, VIC_VERSION_MINOR);
533 
534 	/* Check for a supported version */
535 	if ((major & VIC_VERSION_MAJOR_M) !=
536 	    (VIC_MAGIC & VIC_VERSION_MAJOR_M)) {
537 		printf(": magic mismatch\n");
538 		return (1);
539 	}
540 
541 	if (VIC_MAGIC > major || VIC_MAGIC < minor) {
542 		printf(": unsupported version (%X)\n",
543 		    major & ~VIC_VERSION_MAJOR_M);
544 		return (1);
545 	}
546 
547 	sc->sc_nrxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Rx_BUF);
548 	sc->sc_ntxbuf = vic_read_cmd(sc, VIC_CMD_NUM_Tx_BUF);
549 	sc->sc_feature = vic_read_cmd(sc, VIC_CMD_FEATURE);
550 	sc->sc_cap = vic_read_cmd(sc, VIC_CMD_HWCAP);
551 
552 	vic_getlladdr(sc);
553 
554 	if (sc->sc_nrxbuf > VIC_NBUF_MAX || sc->sc_nrxbuf == 0)
555 		sc->sc_nrxbuf = VIC_NBUF;
556 	if (sc->sc_ntxbuf > VIC_NBUF_MAX || sc->sc_ntxbuf == 0)
557 		sc->sc_ntxbuf = VIC_NBUF;
558 
559 	return (0);
560 }
561 
562 int
563 vic_alloc_data(struct vic_softc *sc)
564 {
565 	u_int8_t			*kva;
566 	u_int				offset;
567 	struct vic_rxdesc		*rxd;
568 	int				i, q;
569 
570 	sc->sc_rxq[0].pktlen = MCLBYTES;
571 	sc->sc_rxq[1].pktlen = 4096;
572 
573 	for (q = 0; q < VIC_NRXRINGS; q++) {
574 		sc->sc_rxq[q].bufs = malloc(sizeof(struct vic_rxbuf) *
575 		    sc->sc_nrxbuf, M_DEVBUF, M_NOWAIT | M_ZERO);
576 		if (sc->sc_rxq[q].bufs == NULL) {
577 			printf(": unable to allocate rxbuf for ring %d\n", q);
578 			goto freerx;
579 		}
580 	}
581 
582 	sc->sc_txbuf = malloc(sizeof(struct vic_txbuf) * sc->sc_ntxbuf,
583 	    M_DEVBUF, M_NOWAIT);
584 	if (sc->sc_txbuf == NULL) {
585 		printf(": unable to allocate txbuf\n");
586 		goto freerx;
587 	}
588 
589 	sc->sc_dma_size = sizeof(struct vic_data) +
590 	    (sc->sc_nrxbuf * VIC_NRXRINGS) * sizeof(struct vic_rxdesc) +
591 	    sc->sc_ntxbuf * sizeof(struct vic_txdesc);
592 
593 	if (vic_alloc_dmamem(sc) != 0) {
594 		printf(": unable to allocate dma region\n");
595 		goto freetx;
596 	}
597 	kva = VIC_DMA_KVA(sc);
598 
599 	/* set up basic vic data */
600 	sc->sc_data = VIC_DMA_KVA(sc);
601 
602 	sc->sc_data->vd_magic = VIC_MAGIC;
603 	sc->sc_data->vd_length = sc->sc_dma_size;
604 
605 	offset = sizeof(struct vic_data);
606 
607 	/* set up the rx rings */
608 
609 	for (q = 0; q < VIC_NRXRINGS; q++) {
610 		sc->sc_rxq[q].slots = (struct vic_rxdesc *)&kva[offset];
611 		sc->sc_data->vd_rx_offset[q] = offset;
612 		sc->sc_data->vd_rx[q].length = sc->sc_nrxbuf;
613 
614 		for (i = 0; i < sc->sc_nrxbuf; i++) {
615 			rxd = &sc->sc_rxq[q].slots[i];
616 
617 			rxd->rx_physaddr = 0;
618 			rxd->rx_buflength = 0;
619 			rxd->rx_length = 0;
620 			rxd->rx_owner = VIC_OWNER_DRIVER;
621 
622 			offset += sizeof(struct vic_rxdesc);
623 		}
624 	}
625 
626 	/* set up the tx ring */
627 	sc->sc_txq = (struct vic_txdesc *)&kva[offset];
628 
629 	sc->sc_data->vd_tx_offset = offset;
630 	sc->sc_data->vd_tx_length = sc->sc_ntxbuf;
631 
632 	return (0);
633 freetx:
634 	free(sc->sc_txbuf, M_DEVBUF);
635 	q = VIC_NRXRINGS;
636 freerx:
637 	while (q--)
638 		free(sc->sc_rxq[q].bufs, M_DEVBUF);
639 
640 	return (1);
641 }
642 
643 void
644 vic_rx_fill(struct vic_softc *sc, int q)
645 {
646 	struct vic_rxbuf		*rxb;
647 	struct vic_rxdesc		*rxd;
648 
649 	while (sc->sc_rxq[q].len < sc->sc_data->vd_rx[q].length) {
650 		rxb = &sc->sc_rxq[q].bufs[sc->sc_rxq[q].end];
651 		rxd = &sc->sc_rxq[q].slots[sc->sc_rxq[q].end];
652 
653 		rxb->rxb_m = vic_alloc_mbuf(sc, rxb->rxb_dmamap,
654 		    sc->sc_rxq[q].pktlen);
655 		if (rxb->rxb_m == NULL)
656 			break;
657 
658 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
659 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
660 
661 		rxd->rx_physaddr = rxb->rxb_dmamap->dm_segs[0].ds_addr;
662 		rxd->rx_buflength = rxb->rxb_m->m_pkthdr.len;
663 		rxd->rx_length = 0;
664 		rxd->rx_owner = VIC_OWNER_NIC;
665 
666 		VIC_INC(sc->sc_rxq[q].end, sc->sc_data->vd_rx[q].length);
667 		sc->sc_rxq[q].len++;
668 	}
669 }
670 
671 int
672 vic_init_data(struct vic_softc *sc)
673 {
674 	struct vic_rxbuf		*rxb;
675 	struct vic_rxdesc		*rxd;
676 	struct vic_txbuf		*txb;
677 
678 	int				q, i;
679 
680 	for (q = 0; q < VIC_NRXRINGS; q++) {
681 		for (i = 0; i < sc->sc_nrxbuf; i++) {
682 			rxb = &sc->sc_rxq[q].bufs[i];
683 			rxd = &sc->sc_rxq[q].slots[i];
684 
685 			if (bus_dmamap_create(sc->sc_dmat,
686 			    sc->sc_rxq[q].pktlen, 1, sc->sc_rxq[q].pktlen, 0,
687 			    BUS_DMA_NOWAIT, &rxb->rxb_dmamap) != 0) {
688 				printf("%s: unable to create dmamap for "
689 				    "ring %d slot %d\n", DEVNAME(sc), q, i);
690 				goto freerxbs;
691 			}
692 
693 			/* scrub the ring */
694 			rxd->rx_physaddr = 0;
695 			rxd->rx_buflength = 0;
696 			rxd->rx_length = 0;
697 			rxd->rx_owner = VIC_OWNER_DRIVER;
698 		}
699 
700 		sc->sc_rxq[q].len = 0;
701 		sc->sc_rxq[q].end = 0;
702 		vic_rx_fill(sc, q);
703 	}
704 
705 	for (i = 0; i < sc->sc_ntxbuf; i++) {
706 		txb = &sc->sc_txbuf[i];
707 		if (bus_dmamap_create(sc->sc_dmat, VIC_JUMBO_FRAMELEN,
708 		    (sc->sc_cap & VIC_CMD_HWCAP_SG) ? VIC_SG_MAX : 1,
709 		    VIC_JUMBO_FRAMELEN, 0, BUS_DMA_NOWAIT,
710 		    &txb->txb_dmamap) != 0) {
711 			printf("%s: unable to create dmamap for tx %d\n",
712 			    DEVNAME(sc), i);
713 			goto freetxbs;
714 		}
715 		txb->txb_m = NULL;
716 	}
717 
718 	return (0);
719 
720 freetxbs:
721 	while (i--) {
722 		txb = &sc->sc_txbuf[i];
723 		bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
724 	}
725 
726 	i = sc->sc_nrxbuf;
727 	q = VIC_NRXRINGS - 1;
728 freerxbs:
729 	while (q >= 0) {
730 		while (i--) {
731 			rxb = &sc->sc_rxq[q].bufs[i];
732 
733 			if (rxb->rxb_m != NULL) {
734 				bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,
735 				    0, rxb->rxb_m->m_pkthdr.len,
736 				    BUS_DMASYNC_POSTREAD);
737 				bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
738 				m_freem(rxb->rxb_m);
739 				rxb->rxb_m = NULL;
740 			}
741 			bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
742 		}
743 		q--;
744 	}
745 
746 	return (1);
747 }
748 
749 int
750 vic_uninit_data(struct vic_softc *sc)
751 {
752 	struct vic_rxbuf		*rxb;
753 	struct vic_rxdesc		*rxd;
754 	struct vic_txbuf		*txb;
755 
756 	int				i, q;
757 
758 	for (q = 0; q < VIC_NRXRINGS; q++) {
759 		for (i = 0; i < sc->sc_nrxbuf; i++) {
760 			rxb = &sc->sc_rxq[q].bufs[i];
761 			rxd = &sc->sc_rxq[q].slots[i];
762 
763 			if (rxb->rxb_m != NULL) {
764 				bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap,
765 				    0, rxb->rxb_m->m_pkthdr.len,
766 				    BUS_DMASYNC_POSTREAD);
767 				bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
768 				m_freem(rxb->rxb_m);
769 				rxb->rxb_m = NULL;
770 			}
771 			bus_dmamap_destroy(sc->sc_dmat, rxb->rxb_dmamap);
772 		}
773 	}
774 
775 	for (i = 0; i < sc->sc_ntxbuf; i++) {
776 		txb = &sc->sc_txbuf[i];
777 		bus_dmamap_destroy(sc->sc_dmat, txb->txb_dmamap);
778 	}
779 
780 	return (0);
781 }
782 
783 void
784 vic_link_state(struct vic_softc *sc)
785 {
786 	struct ifnet *ifp = &sc->sc_ac.ac_if;
787 	u_int32_t status;
788 	int link_state = LINK_STATE_DOWN;
789 
790 	status = vic_read(sc, VIC_STATUS);
791 	if (status & VIC_STATUS_CONNECTED)
792 		link_state = LINK_STATE_FULL_DUPLEX;
793 	if (ifp->if_link_state != link_state) {
794 		ifp->if_link_state = link_state;
795 		if_link_state_change(ifp);
796 	}
797 }
798 
799 void
800 vic_shutdown(void *self)
801 {
802 	struct vic_softc *sc = (struct vic_softc *)self;
803 
804 	vic_stop(&sc->sc_ac.ac_if);
805 }
806 
807 int
808 vic_intr(void *arg)
809 {
810 	struct vic_softc *sc = (struct vic_softc *)arg;
811 	int q;
812 
813 	for (q = 0; q < VIC_NRXRINGS; q++)
814 		vic_rx_proc(sc, q);
815 	vic_tx_proc(sc);
816 
817 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ACK);
818 
819 	return (1);
820 }
821 
822 void
823 vic_rx_proc(struct vic_softc *sc, int q)
824 {
825 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
826 	struct vic_rxdesc		*rxd;
827 	struct vic_rxbuf		*rxb;
828 	struct mbuf			*m;
829 	int				len, idx;
830 
831 	if ((ifp->if_flags & IFF_RUNNING) == 0)
832 		return;
833 
834 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
835 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
836 
837 	while (sc->sc_rxq[q].len > 0) {
838 		idx = sc->sc_data->vd_rx[q].nextidx;
839 		if (idx >= sc->sc_data->vd_rx[q].length) {
840 			ifp->if_ierrors++;
841 			if (ifp->if_flags & IFF_DEBUG)
842 				printf("%s: receive index error\n",
843 				    sc->sc_dev.dv_xname);
844 			break;
845 		}
846 
847 		rxd = &sc->sc_rxq[q].slots[idx];
848 		if (rxd->rx_owner != VIC_OWNER_DRIVER)
849 			break;
850 
851 		rxb = &sc->sc_rxq[q].bufs[idx];
852 
853 		if (rxb->rxb_m == NULL) {
854 			ifp->if_ierrors++;
855 			printf("%s: rxb %d has no mbuf\n", DEVNAME(sc), idx);
856 			break;
857 		}
858 
859 		bus_dmamap_sync(sc->sc_dmat, rxb->rxb_dmamap, 0,
860 		    rxb->rxb_m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
861 		bus_dmamap_unload(sc->sc_dmat, rxb->rxb_dmamap);
862 
863 		m = rxb->rxb_m;
864 		rxb->rxb_m = NULL;
865 		len = rxd->rx_length;
866 
867 		if (len < VIC_MIN_FRAMELEN) {
868 			m_freem(m);
869 
870 			ifp->if_iqdrops++;
871 			goto nextp;
872 		}
873 
874 		m->m_pkthdr.rcvif = ifp;
875 		m->m_pkthdr.len = m->m_len = len;
876 
877 		ifp->if_ipackets++;
878 
879 #if NBPFILTER > 0
880 		if (ifp->if_bpf)
881 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
882 #endif
883 
884 		ether_input_mbuf(ifp, m);
885 
886 nextp:
887 		sc->sc_rxq[q].len--;
888 		VIC_INC(sc->sc_data->vd_rx[q].nextidx,
889 		    sc->sc_data->vd_rx[q].length);
890 	}
891 
892 	vic_rx_fill(sc, q);
893 
894 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
895 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
896 }
897 
898 void
899 vic_tx_proc(struct vic_softc *sc)
900 {
901 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
902 	struct vic_txdesc		*txd;
903 	struct vic_txbuf		*txb;
904 	int				idx;
905 
906 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
907 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
908 
909 	while (sc->sc_txpending > 0) {
910 		idx = sc->sc_data->vd_tx_curidx;
911 		if (idx >= sc->sc_data->vd_tx_length) {
912 			ifp->if_oerrors++;
913 			break;
914 		}
915 
916 		txd = &sc->sc_txq[idx];
917 		if (txd->tx_owner != VIC_OWNER_DRIVER)
918 			break;
919 
920 		txb = &sc->sc_txbuf[idx];
921 		if (txb->txb_m == NULL) {
922 			printf("%s: tx ring is corrupt\n", DEVNAME(sc));
923 			ifp->if_oerrors++;
924 			break;
925 		}
926 
927 		bus_dmamap_sync(sc->sc_dmat, txb->txb_dmamap, 0,
928 		    txb->txb_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
929 		bus_dmamap_unload(sc->sc_dmat, txb->txb_dmamap);
930 
931 		m_freem(txb->txb_m);
932 		txb->txb_m = NULL;
933 		ifp->if_flags &= ~IFF_OACTIVE;
934 
935 		sc->sc_txpending--;
936 		sc->sc_data->vd_tx_stopped = 0;
937 
938 		VIC_INC(sc->sc_data->vd_tx_curidx, sc->sc_data->vd_tx_length);
939 	}
940 
941 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
942 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
943 
944 	vic_start(ifp);
945 }
946 
947 void
948 vic_iff(struct vic_softc *sc)
949 {
950 	struct arpcom *ac = &sc->sc_ac;
951 	struct ifnet *ifp = &sc->sc_ac.ac_if;
952 	struct ether_multi *enm;
953 	struct ether_multistep step;
954 	u_int32_t crc;
955 	u_int16_t *mcastfil = (u_int16_t *)sc->sc_data->vd_mcastfil;
956 	u_int flags = 0;
957 
958 	bzero(&sc->sc_data->vd_mcastfil, sizeof(sc->sc_data->vd_mcastfil));
959 	ifp->if_flags &= ~IFF_ALLMULTI;
960 
961 	if ((ifp->if_flags & IFF_RUNNING) == 0)
962 		goto domulti;
963 	if (ifp->if_flags & IFF_PROMISC)
964 		goto allmulti;
965 
966 	ETHER_FIRST_MULTI(step, ac, enm);
967 	while (enm != NULL) {
968 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN))
969 			goto allmulti;
970 
971 		crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
972 		crc >>= 26;
973 		mcastfil[crc >> 4] |= htole16(1 << (crc & 0xf));
974 
975 		ETHER_NEXT_MULTI(step, enm);
976 	}
977 
978 	goto domulti;
979 
980  allmulti:
981 	ifp->if_flags |= IFF_ALLMULTI;
982 	memset(&sc->sc_data->vd_mcastfil, 0xff,
983 	    sizeof(sc->sc_data->vd_mcastfil));
984 
985  domulti:
986 	vic_write(sc, VIC_CMD, VIC_CMD_MCASTFIL);
987 
988 	if (ifp->if_flags & IFF_RUNNING) {
989 		flags = (ifp->if_flags & IFF_PROMISC) ?
990 		    VIC_CMD_IFF_PROMISC :
991 		    (VIC_CMD_IFF_BROADCAST | VIC_CMD_IFF_MULTICAST);
992 	}
993 	sc->sc_data->vd_iff = flags;
994 	vic_write(sc, VIC_CMD, VIC_CMD_IFF);
995 }
996 
997 void
998 vic_getlladdr(struct vic_softc *sc)
999 {
1000 	u_int32_t reg;
1001 
1002 	/* Get MAC address */
1003 	reg = (sc->sc_cap & VIC_CMD_HWCAP_VPROM) ? VIC_VPROM : VIC_LLADDR;
1004 
1005 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, reg, ETHER_ADDR_LEN,
1006 	    BUS_SPACE_BARRIER_READ);
1007 	bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, reg, sc->sc_lladdr,
1008 	    ETHER_ADDR_LEN);
1009 
1010 	/* Update the MAC address register */
1011 	if (reg == VIC_VPROM)
1012 		vic_setlladdr(sc);
1013 }
1014 
1015 void
1016 vic_setlladdr(struct vic_softc *sc)
1017 {
1018 	bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, VIC_LLADDR,
1019 	    sc->sc_lladdr, ETHER_ADDR_LEN);
1020 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, VIC_LLADDR, ETHER_ADDR_LEN,
1021 	    BUS_SPACE_BARRIER_WRITE);
1022 }
1023 
1024 int
1025 vic_media_change(struct ifnet *ifp)
1026 {
1027 	/* Ignore */
1028 	return (0);
1029 }
1030 
1031 void
1032 vic_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1033 {
1034 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1035 
1036 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
1037 	imr->ifm_status = IFM_AVALID;
1038 
1039 	vic_link_state(sc);
1040 
1041 	if (LINK_STATE_IS_UP(ifp->if_link_state) &&
1042 	    ifp->if_flags & IFF_UP)
1043 		imr->ifm_status |= IFM_ACTIVE;
1044 }
1045 
1046 void
1047 vic_start(struct ifnet *ifp)
1048 {
1049 	struct vic_softc		*sc;
1050 	struct mbuf			*m;
1051 	struct vic_txbuf		*txb;
1052 	struct vic_txdesc		*txd;
1053 	struct vic_sg			*sge;
1054 	bus_dmamap_t			dmap;
1055 	int				i, idx;
1056 	int				tx = 0;
1057 
1058 	if (!(ifp->if_flags & IFF_RUNNING))
1059 		return;
1060 
1061 	if (ifp->if_flags & IFF_OACTIVE)
1062 		return;
1063 
1064 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1065 		return;
1066 
1067 	sc = (struct vic_softc *)ifp->if_softc;
1068 
1069 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1070 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1071 
1072 	for (;;) {
1073 		if (VIC_TXURN(sc)) {
1074 			ifp->if_flags |= IFF_OACTIVE;
1075 			break;
1076 		}
1077 
1078 		IFQ_POLL(&ifp->if_snd, m);
1079 		if (m == NULL)
1080 			break;
1081 
1082 		idx = sc->sc_data->vd_tx_nextidx;
1083 		if (idx >= sc->sc_data->vd_tx_length) {
1084 			printf("%s: tx idx is corrupt\n", DEVNAME(sc));
1085 			ifp->if_oerrors++;
1086 			break;
1087 		}
1088 
1089 		txd = &sc->sc_txq[idx];
1090 		txb = &sc->sc_txbuf[idx];
1091 
1092 		if (txb->txb_m != NULL) {
1093 			printf("%s: tx ring is corrupt\n", DEVNAME(sc));
1094 			sc->sc_data->vd_tx_stopped = 1;
1095 			ifp->if_oerrors++;
1096 			break;
1097 		}
1098 
1099 		/*
1100 		 * we're committed to sending it now. if we cant map it into
1101 		 * dma memory then we drop it.
1102 		 */
1103 		IFQ_DEQUEUE(&ifp->if_snd, m);
1104 		if (vic_load_txb(sc, txb, m) != 0) {
1105 			m_freem(m);
1106 			ifp->if_oerrors++;
1107 			/* continue? */
1108 			break;
1109 		}
1110 
1111 #if NBPFILTER > 0
1112 		if (ifp->if_bpf)
1113 			bpf_mtap(ifp->if_bpf, txb->txb_m, BPF_DIRECTION_OUT);
1114 #endif
1115 
1116 		dmap = txb->txb_dmamap;
1117 		txd->tx_flags = VIC_TX_FLAGS_KEEP;
1118 		txd->tx_owner = VIC_OWNER_NIC;
1119 		txd->tx_sa.sa_addr_type = VIC_SG_ADDR_PHYS;
1120 		txd->tx_sa.sa_length = dmap->dm_nsegs;
1121 		for (i = 0; i < dmap->dm_nsegs; i++) {
1122 			sge = &txd->tx_sa.sa_sg[i];
1123 			sge->sg_length = dmap->dm_segs[i].ds_len;
1124 			sge->sg_addr_low = dmap->dm_segs[i].ds_addr;
1125 		}
1126 
1127 		if (VIC_TXURN_WARN(sc)) {
1128 			txd->tx_flags |= VIC_TX_FLAGS_TXURN;
1129 		}
1130 
1131 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1132 		    BUS_DMASYNC_PREWRITE);
1133 
1134 		ifp->if_opackets++;
1135 		sc->sc_txpending++;
1136 
1137 		VIC_INC(sc->sc_data->vd_tx_nextidx, sc->sc_data->vd_tx_length);
1138 
1139 		tx = 1;
1140 	}
1141 
1142 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1143 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1144 
1145 	if (tx)
1146 		vic_read(sc, VIC_Tx_ADDR);
1147 }
1148 
1149 int
1150 vic_load_txb(struct vic_softc *sc, struct vic_txbuf *txb, struct mbuf *m)
1151 {
1152 	bus_dmamap_t			dmap = txb->txb_dmamap;
1153 	struct mbuf			*m0 = NULL;
1154 	int				error;
1155 
1156 	error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m, BUS_DMA_NOWAIT);
1157 	switch (error) {
1158 	case 0:
1159 		txb->txb_m = m;
1160 		break;
1161 
1162 	case EFBIG: /* mbuf chain is too fragmented */
1163 		MGETHDR(m0, M_DONTWAIT, MT_DATA);
1164 		if (m0 == NULL)
1165 			return (ENOBUFS);
1166 		if (m->m_pkthdr.len > MHLEN) {
1167 			MCLGETI(m0, M_DONTWAIT, NULL, m->m_pkthdr.len);
1168 			if (!(m0->m_flags & M_EXT)) {
1169 				m_freem(m0);
1170 				return (ENOBUFS);
1171 			}
1172 		}
1173 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1174 		m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1175 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmap, m0,
1176 		    BUS_DMA_NOWAIT);
1177 		if (error != 0) {
1178 			m_freem(m0);
1179 			printf("%s: tx dmamap load error %d\n", DEVNAME(sc),
1180 			    error);
1181 			return (ENOBUFS);
1182 		}
1183 		m_freem(m);
1184 		txb->txb_m = m0;
1185 		break;
1186 
1187 	default:
1188 		printf("%s: tx dmamap load error %d\n", DEVNAME(sc), error);
1189 		return (ENOBUFS);
1190 	}
1191 
1192 	return (0);
1193 }
1194 
1195 void
1196 vic_watchdog(struct ifnet *ifp)
1197 {
1198 #if 0
1199 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1200 
1201 	if (sc->sc_txpending && sc->sc_txtimeout > 0) {
1202 		if (--sc->sc_txtimeout == 0) {
1203 			printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1204 			ifp->if_flags &= ~IFF_RUNNING;
1205 			vic_init(ifp);
1206 			ifp->if_oerrors++;
1207 			return;
1208 		}
1209 	}
1210 
1211 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1212 		vic_start(ifp);
1213 #endif
1214 }
1215 
1216 int
1217 vic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1218 {
1219 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1220 	struct ifaddr *ifa = (struct ifaddr *)data;
1221 	struct ifreq *ifr = (struct ifreq *)data;
1222 	int s, error = 0;
1223 
1224 	s = splnet();
1225 
1226 	switch (cmd) {
1227 	case SIOCSIFADDR:
1228 		ifp->if_flags |= IFF_UP;
1229 #ifdef INET
1230 		if (ifa->ifa_addr->sa_family == AF_INET)
1231 			arp_ifinit(&sc->sc_ac, ifa);
1232 #endif
1233 		/* FALLTHROUGH */
1234 	case SIOCSIFFLAGS:
1235 		if (ifp->if_flags & IFF_UP) {
1236 			if (ifp->if_flags & IFF_RUNNING)
1237 				vic_iff(sc);
1238 			else
1239 				vic_init(ifp);
1240 		} else {
1241 			if (ifp->if_flags & IFF_RUNNING)
1242 				vic_stop(ifp);
1243 		}
1244 		break;
1245 
1246 	case SIOCGIFMEDIA:
1247 	case SIOCSIFMEDIA:
1248 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1249 		break;
1250 
1251 	default:
1252 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1253 	}
1254 
1255 	if (error == ENETRESET) {
1256 		if (ifp->if_flags & IFF_RUNNING)
1257 			vic_iff(sc);
1258 		error = 0;
1259 	}
1260 
1261 	splx(s);
1262 	return (error);
1263 }
1264 
1265 void
1266 vic_init(struct ifnet *ifp)
1267 {
1268 	struct vic_softc	*sc = (struct vic_softc *)ifp->if_softc;
1269 	int			q;
1270 	int			s;
1271 
1272 	sc->sc_data->vd_tx_curidx = 0;
1273 	sc->sc_data->vd_tx_nextidx = 0;
1274 	sc->sc_data->vd_tx_stopped = sc->sc_data->vd_tx_queued = 0;
1275 	sc->sc_data->vd_tx_saved_nextidx = 0;
1276 
1277 	for (q = 0; q < VIC_NRXRINGS; q++) {
1278 		sc->sc_data->vd_rx[q].nextidx = 0;
1279 		sc->sc_data->vd_rx_saved_nextidx[q] = 0;
1280 	}
1281 
1282 	if (vic_init_data(sc) != 0)
1283 		return;
1284 
1285 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1286 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1287 
1288 	s = splnet();
1289 
1290 	vic_write(sc, VIC_DATA_ADDR, VIC_DMA_DVA(sc));
1291 	vic_write(sc, VIC_DATA_LENGTH, sc->sc_dma_size);
1292 
1293 	ifp->if_flags |= IFF_RUNNING;
1294 	ifp->if_flags &= ~IFF_OACTIVE;
1295 
1296 	vic_iff(sc);
1297 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_ENABLE);
1298 
1299 	splx(s);
1300 
1301 	timeout_add_sec(&sc->sc_tick, 1);
1302 }
1303 
1304 void
1305 vic_stop(struct ifnet *ifp)
1306 {
1307 	struct vic_softc *sc = (struct vic_softc *)ifp->if_softc;
1308 	int s;
1309 
1310 	s = splnet();
1311 
1312 	timeout_del(&sc->sc_tick);
1313 
1314 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1315 
1316 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_map, 0, sc->sc_dma_size,
1317 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1318 
1319 	/* XXX wait for tx to complete */
1320 	while (sc->sc_txpending > 0) {
1321 		splx(s);
1322 		delay(1000);
1323 		s = splnet();
1324 	}
1325 
1326 	sc->sc_data->vd_tx_stopped = 1;
1327 
1328 	vic_write(sc, VIC_CMD, VIC_CMD_INTR_DISABLE);
1329 
1330 	vic_iff(sc);
1331 	vic_write(sc, VIC_DATA_ADDR, 0);
1332 
1333 	vic_uninit_data(sc);
1334 
1335 	splx(s);
1336 }
1337 
1338 struct mbuf *
1339 vic_alloc_mbuf(struct vic_softc *sc, bus_dmamap_t map, u_int pktlen)
1340 {
1341 	struct mbuf *m = NULL;
1342 
1343 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1344 	if (m == NULL)
1345 		return (NULL);
1346 
1347 	MCLGETI(m, M_DONTWAIT, &sc->sc_ac.ac_if, pktlen);
1348 	if ((m->m_flags & M_EXT) == 0) {
1349 		m_freem(m);
1350 		return (NULL);
1351 	}
1352 	m->m_data += ETHER_ALIGN;
1353 	m->m_len = m->m_pkthdr.len = pktlen - ETHER_ALIGN;
1354 
1355 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1356 		printf("%s: could not load mbuf DMA map\n", DEVNAME(sc));
1357 		m_freem(m);
1358 		return (NULL);
1359 	}
1360 
1361 	return (m);
1362 }
1363 
1364 void
1365 vic_tick(void *arg)
1366 {
1367 	struct vic_softc		*sc = (struct vic_softc *)arg;
1368 
1369 	vic_link_state(sc);
1370 
1371 	timeout_add_sec(&sc->sc_tick, 1);
1372 }
1373 
1374 u_int32_t
1375 vic_read(struct vic_softc *sc, bus_size_t r)
1376 {
1377 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1378 	    BUS_SPACE_BARRIER_READ);
1379 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1380 }
1381 
1382 void
1383 vic_write(struct vic_softc *sc, bus_size_t r, u_int32_t v)
1384 {
1385 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1386 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1387 	    BUS_SPACE_BARRIER_WRITE);
1388 }
1389 
1390 u_int32_t
1391 vic_read_cmd(struct vic_softc *sc, u_int32_t cmd)
1392 {
1393 	vic_write(sc, VIC_CMD, cmd);
1394 	return (vic_read(sc, VIC_CMD));
1395 }
1396 
1397 int
1398 vic_alloc_dmamem(struct vic_softc *sc)
1399 {
1400 	int nsegs;
1401 
1402 	if (bus_dmamap_create(sc->sc_dmat, sc->sc_dma_size, 1,
1403 	    sc->sc_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1404 	    &sc->sc_dma_map) != 0)
1405 		goto err;
1406 
1407 	if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_dma_size, 16, 0,
1408 	    &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0)
1409 		goto destroy;
1410 
1411 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_seg, nsegs,
1412 	    sc->sc_dma_size, &sc->sc_dma_kva, BUS_DMA_NOWAIT) != 0)
1413 		goto free;
1414 
1415 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dma_map, sc->sc_dma_kva,
1416 	    sc->sc_dma_size, NULL, BUS_DMA_NOWAIT) != 0)
1417 		goto unmap;
1418 
1419 	bzero(sc->sc_dma_kva, sc->sc_dma_size);
1420 
1421 	return (0);
1422 
1423 unmap:
1424 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1425 free:
1426 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1427 destroy:
1428 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1429 err:
1430 	return (1);
1431 }
1432 
1433 void
1434 vic_free_dmamem(struct vic_softc *sc)
1435 {
1436 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_map);
1437 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dma_kva, sc->sc_dma_size);
1438 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_seg, 1);
1439 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dma_map);
1440 }
1441