xref: /netbsd-src/sys/dev/pci/if_kse.c (revision 9fb66d812c00ebfb445c0b47dea128f32aa6fe96)
1 /*	$NetBSD: if_kse.c,v 1.56 2020/09/20 23:48:09 nisimura Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Tohru Nishimura.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Micrel 8841/8842 10/100 PCI ethernet driver
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.56 2020/09/20 23:48:09 nisimura Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/intr.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/ioctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/rndsource.h>
48 #include <sys/errno.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 
52 #include <net/if.h>
53 #include <net/if_media.h>
54 #include <net/if_dl.h>
55 #include <net/if_ether.h>
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58 #include <net/bpf.h>
59 
60 #include <dev/pci/pcivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcidevs.h>
63 
64 #define KSE_LINKDEBUG 0
65 
66 #define CSR_READ_4(sc, off) \
67 	    bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
68 #define CSR_WRITE_4(sc, off, val) \
69 	    bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
70 #define CSR_READ_2(sc, off) \
71 	    bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off))
72 #define CSR_WRITE_2(sc, off, val) \
73 	    bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val))
74 
75 #define MDTXC		0x000		/* DMA transmit control */
76 #define MDRXC		0x004		/* DMA receive control */
77 #define MDTSC		0x008		/* trigger DMA transmit (SC) */
78 #define MDRSC		0x00c		/* trigger DMA receive (SC) */
79 #define TDLB		0x010		/* transmit descriptor list base */
80 #define RDLB		0x014		/* receive descriptor list base */
81 #define MTR0		0x020		/* multicast table 31:0 */
82 #define MTR1		0x024		/* multicast table 63:32 */
83 #define INTEN		0x028		/* interrupt enable */
84 #define INTST		0x02c		/* interrupt status */
85 #define MAAL0		0x080		/* additional MAC address 0 low */
86 #define MAAH0		0x084		/* additional MAC address 0 high */
87 #define MARL		0x200		/* MAC address low */
88 #define MARM		0x202		/* MAC address middle */
89 #define MARH		0x204		/* MAC address high */
90 #define GRR		0x216		/* global reset */
91 #define SIDER		0x400		/* switch ID and function enable */
92 #define SGCR3		0x406		/* switch function control 3 */
93 #define  CR3_USEHDX	(1U<<6)		/* use half-duplex 8842 host port */
94 #define  CR3_USEFC	(1U<<5) 	/* use flowcontrol 8842 host port */
95 #define IACR		0x4a0		/* indirect access control */
96 #define IADR1		0x4a2		/* indirect access data 66:63 */
97 #define IADR2		0x4a4		/* indirect access data 47:32 */
98 #define IADR3		0x4a6		/* indirect access data 63:48 */
99 #define IADR4		0x4a8		/* indirect access data 15:0 */
100 #define IADR5		0x4aa		/* indirect access data 31:16 */
101 #define  IADR_LATCH	(1U<<30)	/* latch completed indication */
102 #define  IADR_OVF	(1U<<31)	/* overflow detected */
103 #define P1CR4		0x512		/* port 1 control 4 */
104 #define P1SR		0x514		/* port 1 status */
105 #define P2CR4		0x532		/* port 2 control 4 */
106 #define P2SR		0x534		/* port 2 status */
107 #define  PxCR_STARTNEG	(1U<<9)		/* restart auto negotiation */
108 #define  PxCR_AUTOEN	(1U<<7)		/* auto negotiation enable */
109 #define  PxCR_SPD100	(1U<<6)		/* force speed 100 */
110 #define  PxCR_USEFDX	(1U<<5)		/* force full duplex */
111 #define  PxCR_USEFC	(1U<<4)		/* advertise pause flow control */
112 #define  PxSR_ACOMP	(1U<<6)		/* auto negotiation completed */
113 #define  PxSR_SPD100	(1U<<10)	/* speed is 100Mbps */
114 #define  PxSR_FDX	(1U<<9)		/* full duplex */
115 #define  PxSR_LINKUP	(1U<<5)		/* link is good */
116 #define  PxSR_RXFLOW	(1U<<12)	/* receive flow control active */
117 #define  PxSR_TXFLOW	(1U<<11)	/* transmit flow control active */
118 #define P1VIDCR		0x504		/* port 1 vtag */
119 #define P2VIDCR		0x524		/* port 2 vtag */
120 #define P3VIDCR		0x544		/* 8842 host vtag */
121 #define EVCNTBR		0x1c00		/* 3 sets of 34 event counters */
122 
123 #define TXC_BS_MSK	0x3f000000	/* burst size */
124 #define TXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
125 #define TXC_UCG		(1U<<18)	/* generate UDP checksum */
126 #define TXC_TCG		(1U<<17)	/* generate TCP checksum */
127 #define TXC_ICG		(1U<<16)	/* generate IP checksum */
128 #define TXC_FCE		(1U<<9)		/* generate PAUSE to moderate Rx lvl */
129 #define TXC_EP		(1U<<2)		/* enable automatic padding */
130 #define TXC_AC		(1U<<1)		/* add CRC to frame */
131 #define TXC_TEN		(1)		/* enable DMA to run */
132 
133 #define RXC_BS_MSK	0x3f000000	/* burst size */
134 #define RXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
135 #define RXC_IHAE	(1U<<19)	/* IP header alignment enable */
136 #define RXC_UCC		(1U<<18)	/* run UDP checksum */
137 #define RXC_TCC		(1U<<17)	/* run TDP checksum */
138 #define RXC_ICC		(1U<<16)	/* run IP checksum */
139 #define RXC_FCE		(1U<<9)		/* accept PAUSE to throttle Tx */
140 #define RXC_RB		(1U<<6)		/* receive broadcast frame */
141 #define RXC_RM		(1U<<5)		/* receive all multicast (inc. RB) */
142 #define RXC_RU		(1U<<4)		/* receive 16 additional unicasts */
143 #define RXC_RE		(1U<<3)		/* accept error frame */
144 #define RXC_RA		(1U<<2)		/* receive all frame */
145 #define RXC_MHTE	(1U<<1)		/* use multicast hash table */
146 #define RXC_REN		(1)		/* enable DMA to run */
147 
148 #define INT_DMLCS	(1U<<31)	/* link status change */
149 #define INT_DMTS	(1U<<30)	/* sending desc. has posted Tx done */
150 #define INT_DMRS	(1U<<29)	/* frame was received */
151 #define INT_DMRBUS	(1U<<27)	/* Rx descriptor pool is full */
152 #define INT_DMxPSS	(3U<<25)	/* 26:25 DMA Tx/Rx have stopped */
153 
154 struct tdes {
155 	uint32_t t0, t1, t2, t3;
156 };
157 
158 struct rdes {
159 	uint32_t r0, r1, r2, r3;
160 };
161 
162 #define T0_OWN		(1U<<31)	/* desc is ready to Tx */
163 
164 #define R0_OWN		(1U<<31)	/* desc is empty */
165 #define R0_FS		(1U<<30)	/* first segment of frame */
166 #define R0_LS		(1U<<29)	/* last segment of frame */
167 #define R0_IPE		(1U<<28)	/* IP checksum error */
168 #define R0_TCPE		(1U<<27)	/* TCP checksum error */
169 #define R0_UDPE		(1U<<26)	/* UDP checksum error */
170 #define R0_ES		(1U<<25)	/* error summary */
171 #define R0_MF		(1U<<24)	/* multicast frame */
172 #define R0_SPN		0x00300000	/* 21:20 switch port 1/2 */
173 #define R0_ALIGN	0x00300000	/* 21:20 (KSZ8692P) Rx align amount */
174 #define R0_RE		(1U<<19)	/* MII reported error */
175 #define R0_TL		(1U<<18)	/* frame too long, beyond 1518 */
176 #define R0_RF		(1U<<17)	/* damaged runt frame */
177 #define R0_CE		(1U<<16)	/* CRC error */
178 #define R0_FT		(1U<<15)	/* frame type */
179 #define R0_FL_MASK	0x7ff		/* frame length 10:0 */
180 
181 #define T1_IC		(1U<<31)	/* post interrupt on complete */
182 #define T1_FS		(1U<<30)	/* first segment of frame */
183 #define T1_LS		(1U<<29)	/* last segment of frame */
184 #define T1_IPCKG	(1U<<28)	/* generate IP checksum */
185 #define T1_TCPCKG	(1U<<27)	/* generate TCP checksum */
186 #define T1_UDPCKG	(1U<<26)	/* generate UDP checksum */
187 #define T1_TER		(1U<<25)	/* end of ring */
188 #define T1_SPN		0x00300000	/* 21:20 switch port 1/2 */
189 #define T1_TBS_MASK	0x7ff		/* segment size 10:0 */
190 
191 #define R1_RER		(1U<<25)	/* end of ring */
192 #define R1_RBS_MASK	0x7fc		/* segment size 10:0 */
193 
194 #define KSE_NTXSEGS		16
195 #define KSE_TXQUEUELEN		64
196 #define KSE_TXQUEUELEN_MASK	(KSE_TXQUEUELEN - 1)
197 #define KSE_TXQUEUE_GC		(KSE_TXQUEUELEN / 4)
198 #define KSE_NTXDESC		256
199 #define KSE_NTXDESC_MASK	(KSE_NTXDESC - 1)
200 #define KSE_NEXTTX(x)		(((x) + 1) & KSE_NTXDESC_MASK)
201 #define KSE_NEXTTXS(x)		(((x) + 1) & KSE_TXQUEUELEN_MASK)
202 
203 #define KSE_NRXDESC		64
204 #define KSE_NRXDESC_MASK	(KSE_NRXDESC - 1)
205 #define KSE_NEXTRX(x)		(((x) + 1) & KSE_NRXDESC_MASK)
206 
207 struct kse_control_data {
208 	struct tdes kcd_txdescs[KSE_NTXDESC];
209 	struct rdes kcd_rxdescs[KSE_NRXDESC];
210 };
211 #define KSE_CDOFF(x)		offsetof(struct kse_control_data, x)
212 #define KSE_CDTXOFF(x)		KSE_CDOFF(kcd_txdescs[(x)])
213 #define KSE_CDRXOFF(x)		KSE_CDOFF(kcd_rxdescs[(x)])
214 
215 struct kse_txsoft {
216 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
217 	bus_dmamap_t txs_dmamap;	/* our DMA map */
218 	int txs_firstdesc;		/* first descriptor in packet */
219 	int txs_lastdesc;		/* last descriptor in packet */
220 	int txs_ndesc;			/* # of descriptors used */
221 };
222 
223 struct kse_rxsoft {
224 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
225 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
226 };
227 
228 struct kse_softc {
229 	device_t sc_dev;		/* generic device information */
230 	bus_space_tag_t sc_st;		/* bus space tag */
231 	bus_space_handle_t sc_sh;	/* bus space handle */
232 	bus_size_t sc_memsize;		/* csr map size */
233 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
234 	pci_chipset_tag_t sc_pc;	/* PCI chipset tag */
235 	struct ethercom sc_ethercom;	/* Ethernet common data */
236 	void *sc_ih;			/* interrupt cookie */
237 
238 	struct mii_data sc_mii;		/* mii 8841 */
239 	struct ifmedia sc_media;	/* ifmedia 8842 */
240 	int sc_flowflags;		/* 802.3x PAUSE flow control */
241 
242 	callout_t  sc_tick_ch;		/* MII tick callout */
243 
244 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
245 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
246 
247 	struct kse_control_data *sc_control_data;
248 #define sc_txdescs	sc_control_data->kcd_txdescs
249 #define sc_rxdescs	sc_control_data->kcd_rxdescs
250 
251 	struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
252 	struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
253 	int sc_txfree;			/* number of free Tx descriptors */
254 	int sc_txnext;			/* next ready Tx descriptor */
255 	int sc_txsfree;			/* number of free Tx jobs */
256 	int sc_txsnext;			/* next ready Tx job */
257 	int sc_txsdirty;		/* dirty Tx jobs */
258 	int sc_rxptr;			/* next ready Rx descriptor/descsoft */
259 
260 	uint32_t sc_txc, sc_rxc;
261 	uint32_t sc_t1csum;
262 	int sc_mcsum;
263 	uint32_t sc_inten;
264 	uint32_t sc_chip;
265 
266 	krndsource_t rnd_source;	/* random source */
267 
268 #ifdef KSE_EVENT_COUNTERS
269 	struct ksext {
270 		char evcntname[3][8];
271 		struct evcnt pev[3][34];
272 	} sc_ext;			/* switch statistics */
273 #endif
274 };
275 
276 #define KSE_CDTXADDR(sc, x)	((sc)->sc_cddma + KSE_CDTXOFF((x)))
277 #define KSE_CDRXADDR(sc, x)	((sc)->sc_cddma + KSE_CDRXOFF((x)))
278 
279 #define KSE_CDTXSYNC(sc, x, n, ops)					\
280 do {									\
281 	int __x, __n;							\
282 									\
283 	__x = (x);							\
284 	__n = (n);							\
285 									\
286 	/* If it will wrap around, sync to the end of the ring. */	\
287 	if ((__x + __n) > KSE_NTXDESC) {				\
288 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
289 		    KSE_CDTXOFF(__x), sizeof(struct tdes) *		\
290 		    (KSE_NTXDESC - __x), (ops));			\
291 		__n -= (KSE_NTXDESC - __x);				\
292 		__x = 0;						\
293 	}								\
294 									\
295 	/* Now sync whatever is left. */				\
296 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
297 	    KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops));	\
298 } while (/*CONSTCOND*/0)
299 
300 #define KSE_CDRXSYNC(sc, x, ops)					\
301 do {									\
302 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
303 	    KSE_CDRXOFF((x)), sizeof(struct rdes), (ops));		\
304 } while (/*CONSTCOND*/0)
305 
306 #define KSE_INIT_RXDESC(sc, x)						\
307 do {									\
308 	struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
309 	struct rdes *__rxd = &(sc)->sc_rxdescs[(x)];			\
310 	struct mbuf *__m = __rxs->rxs_mbuf;				\
311 									\
312 	__m->m_data = __m->m_ext.ext_buf;				\
313 	__rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr;		\
314 	__rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */;		\
315 	__rxd->r0 = R0_OWN;						\
316 	KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
317 } while (/*CONSTCOND*/0)
318 
319 u_int kse_burstsize = 8;	/* DMA burst length tuning knob */
320 
321 #ifdef KSEDIAGNOSTIC
322 u_int kse_monitor_rxintr;	/* fragmented UDP csum HW bug hook */
323 #endif
324 
325 static int kse_match(device_t, cfdata_t, void *);
326 static void kse_attach(device_t, device_t, void *);
327 
328 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
329     kse_match, kse_attach, NULL, NULL);
330 
331 static int kse_ioctl(struct ifnet *, u_long, void *);
332 static void kse_start(struct ifnet *);
333 static void kse_watchdog(struct ifnet *);
334 static int kse_init(struct ifnet *);
335 static void kse_stop(struct ifnet *, int);
336 static void kse_reset(struct kse_softc *);
337 static void kse_set_rcvfilt(struct kse_softc *);
338 static int add_rxbuf(struct kse_softc *, int);
339 static void rxdrain(struct kse_softc *);
340 static int kse_intr(void *);
341 static void rxintr(struct kse_softc *);
342 static void txreap(struct kse_softc *);
343 static void lnkchg(struct kse_softc *);
344 static int kse_ifmedia_upd(struct ifnet *);
345 static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *);
346 static void nopifmedia_sts(struct ifnet *, struct ifmediareq *);
347 static void phy_tick(void *);
348 int kse_mii_readreg(device_t, int, int, uint16_t *);
349 int kse_mii_writereg(device_t, int, int, uint16_t);
350 void kse_mii_statchg(struct ifnet *);
351 #ifdef KSE_EVENT_COUNTERS
352 static void stat_tick(void *);
353 static void zerostats(struct kse_softc *);
354 #endif
355 
356 static int
357 kse_match(device_t parent, cfdata_t match, void *aux)
358 {
359 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
360 
361 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL &&
362 	     (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 ||
363 	      PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) &&
364 	    PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK)
365 		return 1;
366 
367 	return 0;
368 }
369 
370 static void
371 kse_attach(device_t parent, device_t self, void *aux)
372 {
373 	struct kse_softc *sc = device_private(self);
374 	struct pci_attach_args *pa = aux;
375 	pci_chipset_tag_t pc = pa->pa_pc;
376 	pci_intr_handle_t ih;
377 	const char *intrstr;
378 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
379 	struct mii_data * const mii = &sc->sc_mii;
380 	struct ifmedia *ifm;
381 	uint8_t enaddr[ETHER_ADDR_LEN];
382 	bus_dma_segment_t seg;
383 	int i, error, nseg;
384 	char intrbuf[PCI_INTRSTR_LEN];
385 
386 	aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
387 	    PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class));
388 
389 	if (pci_mapreg_map(pa, 0x10,
390 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
391 	    0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) {
392 		aprint_error_dev(self, "unable to map device registers\n");
393 		return;
394 	}
395 
396 	/* Make sure bus mastering is enabled. */
397 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
398 	    pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
399 	    PCI_COMMAND_MASTER_ENABLE);
400 
401 	/* Power up chip if necessary. */
402 	if ((error = pci_activate(pc, pa->pa_tag, self, NULL))
403 	    && error != EOPNOTSUPP) {
404 		aprint_error_dev(self, "cannot activate %d\n", error);
405 		return;
406 	}
407 
408 	/* Map and establish our interrupt. */
409 	if (pci_intr_map(pa, &ih)) {
410 		aprint_error_dev(self, "unable to map interrupt\n");
411 		goto fail;
412 	}
413 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
414 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc,
415 	    device_xname(self));
416 	if (sc->sc_ih == NULL) {
417 		aprint_error_dev(self, "unable to establish interrupt");
418 		if (intrstr != NULL)
419 			aprint_error(" at %s", intrstr);
420 		aprint_error("\n");
421 		goto fail;
422 	}
423 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
424 
425 	sc->sc_dev = self;
426 	sc->sc_dmat = pa->pa_dmat;
427 	sc->sc_pc = pa->pa_pc;
428 	sc->sc_chip = PCI_PRODUCT(pa->pa_id);
429 
430 	/*
431 	 * Read the Ethernet address from the EEPROM.
432 	 */
433 	i = CSR_READ_2(sc, MARL);
434 	enaddr[5] = i;
435 	enaddr[4] = i >> 8;
436 	i = CSR_READ_2(sc, MARM);
437 	enaddr[3] = i;
438 	enaddr[2] = i >> 8;
439 	i = CSR_READ_2(sc, MARH);
440 	enaddr[1] = i;
441 	enaddr[0] = i >> 8;
442 	aprint_normal_dev(self,
443 	    "Ethernet address %s\n", ether_sprintf(enaddr));
444 
445 	/*
446 	 * Enable chip function.
447 	 */
448 	CSR_WRITE_2(sc, SIDER, 1);
449 
450 	/*
451 	 * Allocate the control data structures, and create and load the
452 	 * DMA map for it.
453 	 */
454 	error = bus_dmamem_alloc(sc->sc_dmat,
455 	    sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
456 	if (error != 0) {
457 		aprint_error_dev(self,
458 		    "unable to allocate control data, error = %d\n", error);
459 		goto fail_0;
460 	}
461 	error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
462 	    sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
463 	    BUS_DMA_COHERENT);
464 	if (error != 0) {
465 		aprint_error_dev(self,
466 		    "unable to map control data, error = %d\n", error);
467 		goto fail_1;
468 	}
469 	error = bus_dmamap_create(sc->sc_dmat,
470 	    sizeof(struct kse_control_data), 1,
471 	    sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
472 	if (error != 0) {
473 		aprint_error_dev(self,
474 		    "unable to create control data DMA map, "
475 		    "error = %d\n", error);
476 		goto fail_2;
477 	}
478 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
479 	    sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
480 	if (error != 0) {
481 		aprint_error_dev(self,
482 		    "unable to load control data DMA map, error = %d\n",
483 		    error);
484 		goto fail_3;
485 	}
486 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
487 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
488 		    KSE_NTXSEGS, MCLBYTES, 0, 0,
489 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
490 			aprint_error_dev(self,
491 			    "unable to create tx DMA map %d, error = %d\n",
492 			    i, error);
493 			goto fail_4;
494 		}
495 	}
496 	for (i = 0; i < KSE_NRXDESC; i++) {
497 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
498 		    1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
499 			aprint_error_dev(self,
500 			    "unable to create rx DMA map %d, error = %d\n",
501 			    i, error);
502 			goto fail_5;
503 		}
504 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
505 	}
506 
507 	mii->mii_ifp = ifp;
508 	mii->mii_readreg = kse_mii_readreg;
509 	mii->mii_writereg = kse_mii_writereg;
510 	mii->mii_statchg = kse_mii_statchg;
511 
512 	/* Initialize ifmedia structures. */
513 	if (sc->sc_chip == 0x8841) {
514 		/* use port 1 builtin PHY as index 1 device */
515 		sc->sc_ethercom.ec_mii = mii;
516 		ifm = &mii->mii_media;
517 		ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts);
518 		mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */,
519 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
520 		if (LIST_FIRST(&mii->mii_phys) == NULL) {
521 			ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
522 			ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
523 		} else
524 			ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
525 	} else {
526 		/*
527 		 * pretend 100FDX w/ no alternative media selection.
528 		 * 8842 MAC is tied with a builtin 3 port switch. It can do
529 		 * 4 degree priotised rate control over either of tx/rx
530 		 * direction for any of ports, respectively. Tough, this
531 		 * driver leaves the rate unlimited intending 100Mbps maximum.
532 		 * 2 external ports behave in AN mode and this driver provides
533 		 * no mean to manipulate and see their operational details.
534 		 */
535 		sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media;
536 		ifmedia_init(ifm, 0, NULL, nopifmedia_sts);
537 		ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
538 		ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX);
539 
540 		aprint_normal_dev(self,
541 		    "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n");
542 	}
543 	ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
544 
545 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
546 	ifp->if_softc = sc;
547 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
548 	ifp->if_ioctl = kse_ioctl;
549 	ifp->if_start = kse_start;
550 	ifp->if_watchdog = kse_watchdog;
551 	ifp->if_init = kse_init;
552 	ifp->if_stop = kse_stop;
553 	IFQ_SET_READY(&ifp->if_snd);
554 
555 	/*
556 	 * capable of 802.1Q VLAN-sized frames and hw assisted tagging.
557 	 * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
558 	 */
559 	sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
560 	ifp->if_capabilities =
561 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
562 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
563 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
564 
565 	sc->sc_flowflags = 0;
566 
567 	if_attach(ifp);
568 	if_deferred_start_init(ifp, NULL);
569 	ether_ifattach(ifp, enaddr);
570 
571 	callout_init(&sc->sc_tick_ch, 0);
572 	callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
573 
574 	rnd_attach_source(&sc->rnd_source, device_xname(self),
575 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
576 
577 #ifdef KSE_EVENT_COUNTERS
578 	const char *events[34] = {
579 		"RxLoPriotyByte",
580 		"RxHiPriotyByte",
581 		"RxUndersizePkt",
582 		"RxFragments",
583 		"RxOversize",
584 		"RxJabbers",
585 		"RxSymbolError",
586 		"RxCRCError",
587 		"RxAlignmentError",
588 		"RxControl8808Pkts",
589 		"RxPausePkts",
590 		"RxBroadcast",
591 		"RxMulticast",
592 		"RxUnicast",
593 		"Rx64Octets",
594 		"Rx65To127Octets",
595 		"Rx128To255Octets",
596 		"Rx255To511Octets",
597 		"Rx512To1023Octets",
598 		"Rx1024To1522Octets",
599 		"TxLoPriotyByte",
600 		"TxHiPriotyByte",
601 		"TxLateCollision",
602 		"TxPausePkts",
603 		"TxBroadcastPkts",
604 		"TxMulticastPkts",
605 		"TxUnicastPkts",
606 		"TxDeferred",
607 		"TxTotalCollision",
608 		"TxExcessiveCollision",
609 		"TxSingleCollision",
610 		"TxMultipleCollision",
611 		"TxDropPkts",
612 		"RxDropPkts",
613 	};
614 	struct ksext *ee = &sc->sc_ext;
615 	int p = (sc->sc_chip == 0x8842) ? 3 : 1;
616 	for (i = 0; i < p; i++) {
617 		snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]),
618 		    "%s.%d", device_xname(sc->sc_dev), i+1);
619 		for (int ev = 0; ev < 34; ev++) {
620 			evcnt_attach_dynamic(&ee->pev[i][ev], EVCNT_TYPE_MISC,
621 			    NULL, ee->evcntname[i], events[ev]);
622 		}
623 	}
624 #endif
625 	return;
626 
627  fail_5:
628 	for (i = 0; i < KSE_NRXDESC; i++) {
629 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
630 			bus_dmamap_destroy(sc->sc_dmat,
631 			    sc->sc_rxsoft[i].rxs_dmamap);
632 	}
633  fail_4:
634 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
635 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
636 			bus_dmamap_destroy(sc->sc_dmat,
637 			    sc->sc_txsoft[i].txs_dmamap);
638 	}
639 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
640  fail_3:
641 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
642  fail_2:
643 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
644 	    sizeof(struct kse_control_data));
645  fail_1:
646 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
647  fail_0:
648 	pci_intr_disestablish(pc, sc->sc_ih);
649  fail:
650 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize);
651 	return;
652 }
653 
654 static int
655 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
656 {
657 	struct kse_softc *sc = ifp->if_softc;
658 	struct ifreq *ifr = (struct ifreq *)data;
659 	struct ifmedia *ifm;
660 	int s, error;
661 
662 	s = splnet();
663 
664 	switch (cmd) {
665 	case SIOCSIFMEDIA:
666 		/* Flow control requires full-duplex mode. */
667 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
668 		    (ifr->ifr_media & IFM_FDX) == 0)
669 			ifr->ifr_media &= ~IFM_ETH_FMASK;
670 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
671 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
672 				/* We can do both TXPAUSE and RXPAUSE. */
673 				ifr->ifr_media |=
674 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
675 			}
676 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
677 		}
678 		ifm = (sc->sc_chip == 0x8841)
679 		    ? &sc->sc_mii.mii_media : &sc->sc_media;
680 		error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
681 		break;
682 	default:
683 		error = ether_ioctl(ifp, cmd, data);
684 		if (error != ENETRESET)
685 			break;
686 		error = 0;
687 		if (cmd == SIOCSIFCAP)
688 			error = (*ifp->if_init)(ifp);
689 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
690 			;
691 		else if (ifp->if_flags & IFF_RUNNING) {
692 			/*
693 			 * Multicast list has changed; set the hardware filter
694 			 * accordingly.
695 			 */
696 			kse_set_rcvfilt(sc);
697 		}
698 		break;
699 	}
700 
701 	splx(s);
702 
703 	return error;
704 }
705 
706 static int
707 kse_init(struct ifnet *ifp)
708 {
709 	struct kse_softc *sc = ifp->if_softc;
710 	uint32_t paddr;
711 	int i, error = 0;
712 
713 	/* cancel pending I/O */
714 	kse_stop(ifp, 0);
715 
716 	/* reset all registers but PCI configuration */
717 	kse_reset(sc);
718 
719 	/* craft Tx descriptor ring */
720 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
721 	for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
722 		sc->sc_txdescs[i].t3 = paddr;
723 		paddr += sizeof(struct tdes);
724 	}
725 	sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
726 	KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
727 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
728 	sc->sc_txfree = KSE_NTXDESC;
729 	sc->sc_txnext = 0;
730 
731 	for (i = 0; i < KSE_TXQUEUELEN; i++)
732 		sc->sc_txsoft[i].txs_mbuf = NULL;
733 	sc->sc_txsfree = KSE_TXQUEUELEN;
734 	sc->sc_txsnext = 0;
735 	sc->sc_txsdirty = 0;
736 
737 	/* craft Rx descriptor ring */
738 	memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
739 	for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
740 		sc->sc_rxdescs[i].r3 = paddr;
741 		paddr += sizeof(struct rdes);
742 	}
743 	sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
744 	for (i = 0; i < KSE_NRXDESC; i++) {
745 		if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
746 			if ((error = add_rxbuf(sc, i)) != 0) {
747 				aprint_error_dev(sc->sc_dev,
748 				    "unable to allocate or map rx "
749 				    "buffer %d, error = %d\n",
750 				    i, error);
751 				rxdrain(sc);
752 				goto out;
753 			}
754 		}
755 		else
756 			KSE_INIT_RXDESC(sc, i);
757 	}
758 	sc->sc_rxptr = 0;
759 
760 	/* hand Tx/Rx rings to HW */
761 	CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
762 	CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
763 
764 	sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC;
765 	sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB;
766 	sc->sc_t1csum = sc->sc_mcsum = 0;
767 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
768 		sc->sc_rxc |= RXC_ICC;
769 		sc->sc_mcsum |= M_CSUM_IPv4;
770 	}
771 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
772 		sc->sc_txc |= TXC_ICG;
773 		sc->sc_t1csum |= T1_IPCKG;
774 	}
775 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
776 		sc->sc_rxc |= RXC_TCC;
777 		sc->sc_mcsum |= M_CSUM_TCPv4;
778 	}
779 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
780 		sc->sc_txc |= TXC_TCG;
781 		sc->sc_t1csum |= T1_TCPCKG;
782 	}
783 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
784 		sc->sc_rxc |= RXC_UCC;
785 		sc->sc_mcsum |= M_CSUM_UDPv4;
786 	}
787 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
788 		sc->sc_txc |= TXC_UCG;
789 		sc->sc_t1csum |= T1_UDPCKG;
790 	}
791 	sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
792 	sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
793 
794 	if (sc->sc_chip == 0x8842) {
795 		/* make PAUSE flow control to run */
796 		sc->sc_txc |= TXC_FCE;
797 		sc->sc_rxc |= RXC_FCE;
798 		i = CSR_READ_2(sc, SGCR3);
799 		CSR_WRITE_2(sc, SGCR3, i | CR3_USEFC);
800 	}
801 
802 	/* accept multicast frame or run promisc mode */
803 	kse_set_rcvfilt(sc);
804 
805 	/* set current media */
806 	if (sc->sc_chip == 0x8841)
807 		(void)kse_ifmedia_upd(ifp);
808 
809 	/* enable transmitter and receiver */
810 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
811 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
812 	CSR_WRITE_4(sc, MDRSC, 1);
813 
814 	/* enable interrupts */
815 	sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS;
816 	if (sc->sc_chip == 0x8841)
817 		sc->sc_inten |= INT_DMLCS;
818 	CSR_WRITE_4(sc, INTST, ~0);
819 	CSR_WRITE_4(sc, INTEN, sc->sc_inten);
820 
821 	ifp->if_flags |= IFF_RUNNING;
822 	ifp->if_flags &= ~IFF_OACTIVE;
823 
824 	/* start one second timer */
825 	callout_schedule(&sc->sc_tick_ch, hz);
826 
827 #ifdef KSE_EVENT_COUNTERS
828 	zerostats(sc);
829 #endif
830 
831  out:
832 	if (error) {
833 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
834 		ifp->if_timer = 0;
835 		aprint_error_dev(sc->sc_dev, "interface not running\n");
836 	}
837 	return error;
838 }
839 
840 static void
841 kse_stop(struct ifnet *ifp, int disable)
842 {
843 	struct kse_softc *sc = ifp->if_softc;
844 	struct kse_txsoft *txs;
845 	int i;
846 
847 	callout_stop(&sc->sc_tick_ch);
848 
849 	sc->sc_txc &= ~TXC_TEN;
850 	sc->sc_rxc &= ~RXC_REN;
851 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
852 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
853 
854 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
855 		txs = &sc->sc_txsoft[i];
856 		if (txs->txs_mbuf != NULL) {
857 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
858 			m_freem(txs->txs_mbuf);
859 			txs->txs_mbuf = NULL;
860 		}
861 	}
862 
863 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
864 	ifp->if_timer = 0;
865 
866 	if (disable)
867 		rxdrain(sc);
868 }
869 
870 static void
871 kse_reset(struct kse_softc *sc)
872 {
873 
874 	/* software reset */
875 	CSR_WRITE_2(sc, GRR, 1);
876 	delay(1000); /* PDF does not mention the delay amount */
877 	CSR_WRITE_2(sc, GRR, 0);
878 
879 	/* enable switch function */
880 	CSR_WRITE_2(sc, SIDER, 1);
881 }
882 
883 static void
884 kse_watchdog(struct ifnet *ifp)
885 {
886 	struct kse_softc *sc = ifp->if_softc;
887 
888 	/*
889 	 * Since we're not interrupting every packet, sweep
890 	 * up before we report an error.
891 	 */
892 	txreap(sc);
893 
894 	if (sc->sc_txfree != KSE_NTXDESC) {
895 		aprint_error_dev(sc->sc_dev,
896 		    "device timeout (txfree %d txsfree %d txnext %d)\n",
897 		    sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
898 		if_statinc(ifp, if_oerrors);
899 
900 		/* Reset the interface. */
901 		kse_init(ifp);
902 	}
903 	else if (ifp->if_flags & IFF_DEBUG)
904 		aprint_error_dev(sc->sc_dev, "recovered from device timeout\n");
905 
906 	/* Try to get more packets going. */
907 	kse_start(ifp);
908 }
909 
910 static void
911 kse_start(struct ifnet *ifp)
912 {
913 	struct kse_softc *sc = ifp->if_softc;
914 	struct mbuf *m0, *m;
915 	struct kse_txsoft *txs;
916 	bus_dmamap_t dmamap;
917 	int error, nexttx, lasttx, ofree, seg;
918 	uint32_t tdes0;
919 
920 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
921 		return;
922 
923 	/* Remember the previous number of free descriptors. */
924 	ofree = sc->sc_txfree;
925 
926 	/*
927 	 * Loop through the send queue, setting up transmit descriptors
928 	 * until we drain the queue, or use up all available transmit
929 	 * descriptors.
930 	 */
931 	for (;;) {
932 		IFQ_POLL(&ifp->if_snd, m0);
933 		if (m0 == NULL)
934 			break;
935 
936 		if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
937 			txreap(sc);
938 			if (sc->sc_txsfree == 0)
939 				break;
940 		}
941 		txs = &sc->sc_txsoft[sc->sc_txsnext];
942 		dmamap = txs->txs_dmamap;
943 
944 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
945 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
946 		if (error) {
947 			if (error == EFBIG) {
948 				aprint_error_dev(sc->sc_dev,
949 				    "Tx packet consumes too many "
950 				    "DMA segments, dropping...\n");
951 				    IFQ_DEQUEUE(&ifp->if_snd, m0);
952 				    m_freem(m0);
953 				    continue;
954 			}
955 			/* Short on resources, just stop for now. */
956 			break;
957 		}
958 
959 		if (dmamap->dm_nsegs > sc->sc_txfree) {
960 			/*
961 			 * Not enough free descriptors to transmit this
962 			 * packet.  We haven't committed anything yet,
963 			 * so just unload the DMA map, put the packet
964 			 * back on the queue, and punt.	 Notify the upper
965 			 * layer that there are not more slots left.
966 			 */
967 			ifp->if_flags |= IFF_OACTIVE;
968 			bus_dmamap_unload(sc->sc_dmat, dmamap);
969 			break;
970 		}
971 
972 		IFQ_DEQUEUE(&ifp->if_snd, m0);
973 
974 		/*
975 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
976 		 */
977 
978 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
979 		    BUS_DMASYNC_PREWRITE);
980 
981 		tdes0 = 0; /* to postpone 1st segment T0_OWN write */
982 		lasttx = -1;
983 		for (nexttx = sc->sc_txnext, seg = 0;
984 		     seg < dmamap->dm_nsegs;
985 		     seg++, nexttx = KSE_NEXTTX(nexttx)) {
986 			struct tdes *tdes = &sc->sc_txdescs[nexttx];
987 			/*
988 			 * If this is the first descriptor we're
989 			 * enqueueing, don't set the OWN bit just
990 			 * yet.	 That could cause a race condition.
991 			 * We'll do it below.
992 			 */
993 			tdes->t2 = dmamap->dm_segs[seg].ds_addr;
994 			tdes->t1 = sc->sc_t1csum
995 			     | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
996 			tdes->t0 = tdes0;
997 			tdes0 = T0_OWN; /* 2nd and other segments */
998 			lasttx = nexttx;
999 		}
1000 		/*
1001 		 * Outgoing NFS mbuf must be unloaded when Tx completed.
1002 		 * Without T1_IC NFS mbuf is left unack'ed for excessive
1003 		 * time and NFS stops to proceed until kse_watchdog()
1004 		 * calls txreap() to reclaim the unack'ed mbuf.
1005 		 * It's painful to traverse every mbuf chain to determine
1006 		 * whether someone is waiting for Tx completion.
1007 		 */
1008 		m = m0;
1009 		do {
1010 			if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1011 				sc->sc_txdescs[lasttx].t1 |= T1_IC;
1012 				break;
1013 			}
1014 		} while ((m = m->m_next) != NULL);
1015 
1016 		/* Write deferred 1st segment T0_OWN at the final stage */
1017 		sc->sc_txdescs[lasttx].t1 |= T1_LS;
1018 		sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
1019 		sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
1020 		KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1021 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1022 
1023 		/* Tell DMA start transmit */
1024 		CSR_WRITE_4(sc, MDTSC, 1);
1025 
1026 		txs->txs_mbuf = m0;
1027 		txs->txs_firstdesc = sc->sc_txnext;
1028 		txs->txs_lastdesc = lasttx;
1029 		txs->txs_ndesc = dmamap->dm_nsegs;
1030 
1031 		sc->sc_txfree -= txs->txs_ndesc;
1032 		sc->sc_txnext = nexttx;
1033 		sc->sc_txsfree--;
1034 		sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
1035 		/*
1036 		 * Pass the packet to any BPF listeners.
1037 		 */
1038 		bpf_mtap(ifp, m0, BPF_D_OUT);
1039 	}
1040 
1041 	if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1042 		/* No more slots left; notify upper layer. */
1043 		ifp->if_flags |= IFF_OACTIVE;
1044 	}
1045 	if (sc->sc_txfree != ofree) {
1046 		/* Set a watchdog timer in case the chip flakes out. */
1047 		ifp->if_timer = 5;
1048 	}
1049 }
1050 
1051 static void
1052 kse_set_rcvfilt(struct kse_softc *sc)
1053 {
1054 	struct ether_multistep step;
1055 	struct ether_multi *enm;
1056 	struct ethercom *ec = &sc->sc_ethercom;
1057 	struct ifnet *ifp = &ec->ec_if;
1058 	uint32_t crc, mchash[2];
1059 	int i;
1060 
1061 	sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA);
1062 
1063 	/* clear perfect match filter and prepare mcast hash table */
1064 	for (i = 0; i < 16; i++)
1065 		 CSR_WRITE_4(sc, MAAH0 + i*8, 0);
1066 	crc = mchash[0] = mchash[1] = 0;
1067 
1068 	ETHER_LOCK(ec);
1069 	if (ifp->if_flags & IFF_PROMISC) {
1070 		ec->ec_flags |= ETHER_F_ALLMULTI;
1071 		ETHER_UNLOCK(ec);
1072 		/* run promisc. mode */
1073 		sc->sc_rxc |= RXC_RA;
1074 		goto update;
1075 	}
1076 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1077 	ETHER_FIRST_MULTI(step, ec, enm);
1078 	i = 0;
1079 	while (enm != NULL) {
1080 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1081 			/*
1082 			 * We must listen to a range of multicast addresses.
1083 			 * For now, just accept all multicasts, rather than
1084 			 * trying to set only those filter bits needed to match
1085 			 * the range.  (At this time, the only use of address
1086 			 * ranges is for IP multicast routing, for which the
1087 			 * range is big enough to require all bits set.)
1088 			 */
1089 			ec->ec_flags |= ETHER_F_ALLMULTI;
1090 			ETHER_UNLOCK(ec);
1091 			/* accept all multicast */
1092 			sc->sc_rxc |= RXC_RM;
1093 			goto update;
1094 		}
1095 #if KSE_MCASTDEBUG == 1
1096 		printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1097 #endif
1098 		if (i < 16) {
1099 			/* use 16 additional MAC addr to accept mcast */
1100 			uint32_t addr;
1101 			uint8_t *ep = enm->enm_addrlo;
1102 			addr = (ep[3] << 24) | (ep[2] << 16)
1103 			     | (ep[1] << 8)  |  ep[0];
1104 			CSR_WRITE_4(sc, MAAL0 + i*8, addr);
1105 			addr = (ep[5] << 8) | ep[4];
1106 			CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31));
1107 		} else {
1108 			/* use hash table when too many */
1109 			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1110 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1111 		}
1112 		ETHER_NEXT_MULTI(step, enm);
1113 		i++;
1114 	}
1115 	ETHER_UNLOCK(ec);
1116 
1117 	if (crc)
1118 		sc->sc_rxc |= RXC_MHTE;
1119 	CSR_WRITE_4(sc, MTR0, mchash[0]);
1120 	CSR_WRITE_4(sc, MTR1, mchash[1]);
1121  update:
1122 	/* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */
1123 	return;
1124 }
1125 
1126 static int
1127 add_rxbuf(struct kse_softc *sc, int idx)
1128 {
1129 	struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1130 	struct mbuf *m;
1131 	int error;
1132 
1133 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1134 	if (m == NULL)
1135 		return ENOBUFS;
1136 
1137 	MCLGET(m, M_DONTWAIT);
1138 	if ((m->m_flags & M_EXT) == 0) {
1139 		m_freem(m);
1140 		return ENOBUFS;
1141 	}
1142 
1143 	if (rxs->rxs_mbuf != NULL)
1144 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1145 
1146 	rxs->rxs_mbuf = m;
1147 
1148 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1149 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1150 	if (error) {
1151 		aprint_error_dev(sc->sc_dev,
1152 		    "can't load rx DMA map %d, error = %d\n", idx, error);
1153 		panic("kse_add_rxbuf");
1154 	}
1155 
1156 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1157 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1158 
1159 	KSE_INIT_RXDESC(sc, idx);
1160 
1161 	return 0;
1162 }
1163 
1164 static void
1165 rxdrain(struct kse_softc *sc)
1166 {
1167 	struct kse_rxsoft *rxs;
1168 	int i;
1169 
1170 	for (i = 0; i < KSE_NRXDESC; i++) {
1171 		rxs = &sc->sc_rxsoft[i];
1172 		if (rxs->rxs_mbuf != NULL) {
1173 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1174 			m_freem(rxs->rxs_mbuf);
1175 			rxs->rxs_mbuf = NULL;
1176 		}
1177 	}
1178 }
1179 
1180 static int
1181 kse_intr(void *arg)
1182 {
1183 	struct kse_softc *sc = arg;
1184 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1185 	uint32_t isr;
1186 
1187 	if ((isr = CSR_READ_4(sc, INTST)) == 0)
1188 		return 0;
1189 
1190 	if (isr & INT_DMRS)
1191 		rxintr(sc);
1192 	if (isr & INT_DMTS)
1193 		txreap(sc);
1194 	if (isr & INT_DMLCS)
1195 		lnkchg(sc);
1196 	if (isr & INT_DMRBUS)
1197 		aprint_error_dev(sc->sc_dev, "Rx descriptor full\n");
1198 
1199 	CSR_WRITE_4(sc, INTST, isr);
1200 
1201 	if (ifp->if_flags & IFF_RUNNING)
1202 		if_schedule_deferred_start(ifp);
1203 
1204 	return 1;
1205 }
1206 
1207 static void
1208 rxintr(struct kse_softc *sc)
1209 {
1210 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1211 	struct kse_rxsoft *rxs;
1212 	struct mbuf *m;
1213 	uint32_t rxstat;
1214 	int i, len;
1215 
1216 	for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1217 		rxs = &sc->sc_rxsoft[i];
1218 
1219 		KSE_CDRXSYNC(sc, i,
1220 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1221 
1222 		rxstat = sc->sc_rxdescs[i].r0;
1223 
1224 		if (rxstat & R0_OWN) /* desc is left empty */
1225 			break;
1226 
1227 		/* R0_FS | R0_LS must have been marked for this desc */
1228 
1229 		if (rxstat & R0_ES) {
1230 			if_statinc(ifp, if_ierrors);
1231 #define PRINTERR(bit, str)						\
1232 			if (rxstat & (bit))				\
1233 				aprint_error_dev(sc->sc_dev,		\
1234 				    "%s\n", str)
1235 			PRINTERR(R0_TL, "frame too long");
1236 			PRINTERR(R0_RF, "runt frame");
1237 			PRINTERR(R0_CE, "bad FCS");
1238 #undef PRINTERR
1239 			KSE_INIT_RXDESC(sc, i);
1240 			continue;
1241 		}
1242 
1243 		/* HW errata; frame might be too small or too large */
1244 
1245 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1246 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1247 
1248 		len = rxstat & R0_FL_MASK;
1249 		len -= ETHER_CRC_LEN;	/* Trim CRC off */
1250 		m = rxs->rxs_mbuf;
1251 
1252 		if (add_rxbuf(sc, i) != 0) {
1253 			if_statinc(ifp, if_ierrors);
1254 			KSE_INIT_RXDESC(sc, i);
1255 			bus_dmamap_sync(sc->sc_dmat,
1256 			    rxs->rxs_dmamap, 0,
1257 			    rxs->rxs_dmamap->dm_mapsize,
1258 			    BUS_DMASYNC_PREREAD);
1259 			continue;
1260 		}
1261 
1262 		m_set_rcvif(m, ifp);
1263 		m->m_pkthdr.len = m->m_len = len;
1264 
1265 		if (sc->sc_mcsum) {
1266 			m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1267 			if (rxstat & R0_IPE)
1268 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1269 			if (rxstat & (R0_TCPE | R0_UDPE))
1270 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1271 		}
1272 		if_percpuq_enqueue(ifp->if_percpuq, m);
1273 #ifdef KSEDIAGNOSTIC
1274 		if (kse_monitor_rxintr > 0) {
1275 			aprint_error_dev(sc->sc_dev,
1276 			    "m stat %x data %p len %d\n",
1277 			    rxstat, m->m_data, m->m_len);
1278 		}
1279 #endif
1280 	}
1281 	sc->sc_rxptr = i;
1282 }
1283 
1284 static void
1285 txreap(struct kse_softc *sc)
1286 {
1287 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1288 	struct kse_txsoft *txs;
1289 	uint32_t txstat;
1290 	int i;
1291 
1292 	ifp->if_flags &= ~IFF_OACTIVE;
1293 
1294 	for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1295 	     i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1296 		txs = &sc->sc_txsoft[i];
1297 
1298 		KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1299 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1300 
1301 		txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1302 
1303 		if (txstat & T0_OWN) /* desc is still in use */
1304 			break;
1305 
1306 		/* There is no way to tell transmission status per frame */
1307 
1308 		if_statinc(ifp, if_opackets);
1309 
1310 		sc->sc_txfree += txs->txs_ndesc;
1311 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1312 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1313 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1314 		m_freem(txs->txs_mbuf);
1315 		txs->txs_mbuf = NULL;
1316 	}
1317 	sc->sc_txsdirty = i;
1318 	if (sc->sc_txsfree == KSE_TXQUEUELEN)
1319 		ifp->if_timer = 0;
1320 }
1321 
1322 static void
1323 lnkchg(struct kse_softc *sc)
1324 {
1325 	struct ifmediareq ifmr;
1326 
1327 #if KSE_LINKDEBUG == 1
1328 	uint16_t p1sr = CSR_READ_2(sc, P1SR);
1329 printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down");
1330 #endif
1331 	kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1332 }
1333 
1334 static int
1335 kse_ifmedia_upd(struct ifnet *ifp)
1336 {
1337 	struct kse_softc *sc = ifp->if_softc;
1338 	struct ifmedia *ifm = &sc->sc_mii.mii_media;
1339 	uint16_t p1cr4;
1340 
1341 	p1cr4 = 0;
1342 	if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
1343 		p1cr4 |= PxCR_STARTNEG;	/* restart AN */
1344 		p1cr4 |= PxCR_AUTOEN;	/* enable AN */
1345 		p1cr4 |= PxCR_USEFC;	/* advertise flow control pause */
1346 		p1cr4 |= 0xf;		/* adv. 100FDX,100HDX,10FDX,10HDX */
1347 	} else {
1348 		if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
1349 			p1cr4 |= PxCR_SPD100;
1350 		if (ifm->ifm_media & IFM_FDX)
1351 			p1cr4 |= PxCR_USEFDX;
1352 	}
1353 	CSR_WRITE_2(sc, P1CR4, p1cr4);
1354 #if KSE_LINKDEBUG == 1
1355 printf("P1CR4: %04x\n", p1cr4);
1356 #endif
1357 	return 0;
1358 }
1359 
1360 static void
1361 kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1362 {
1363 	struct kse_softc *sc = ifp->if_softc;
1364 	struct mii_data *mii = &sc->sc_mii;
1365 
1366 	mii_pollstat(mii);
1367 	ifmr->ifm_status = mii->mii_media_status;
1368 	ifmr->ifm_active = sc->sc_flowflags |
1369 	    (mii->mii_media_active & ~IFM_ETH_FMASK);
1370 }
1371 
1372 static void
1373 nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1374 {
1375 	struct kse_softc *sc = ifp->if_softc;
1376 	struct ifmedia *ifm = &sc->sc_media;
1377 
1378 #if KSE_LINKDEBUG == 2
1379 printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR));
1380 #endif
1381 
1382 	/* 8842 MAC pretends 100FDX all the time */
1383 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1384 	ifmr->ifm_active = ifm->ifm_cur->ifm_media |
1385 	    IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
1386 }
1387 
1388 static void
1389 phy_tick(void *arg)
1390 {
1391 	struct kse_softc *sc = arg;
1392 	struct mii_data *mii = &sc->sc_mii;
1393 	int s;
1394 
1395 	if (sc->sc_chip == 0x8841) {
1396 		s = splnet();
1397 		mii_tick(mii);
1398 		splx(s);
1399 	}
1400 #ifdef KSE_EVENT_COUNTERS
1401 	stat_tick(arg);
1402 #endif
1403 	callout_schedule(&sc->sc_tick_ch, hz);
1404 }
1405 
1406 static const uint16_t phy1csr[] = {
1407 	/* 0 BMCR */	0x4d0,
1408 	/* 1 BMSR */	0x4d2,
1409 	/* 2 PHYID1 */	0x4d6,	/* 0x0022 - PHY1HR */
1410 	/* 3 PHYID2 */	0x4d4,	/* 0x1430 - PHY1LR */
1411 	/* 4 ANAR */	0x4d8,
1412 	/* 5 ANLPAR */	0x4da,
1413 };
1414 
1415 int
1416 kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1417 {
1418 	struct kse_softc *sc = device_private(self);
1419 
1420 	if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1421 		return EINVAL;
1422 	*val = CSR_READ_2(sc, phy1csr[reg]);
1423 	return 0;
1424 }
1425 
1426 int
1427 kse_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1428 {
1429 	struct kse_softc *sc = device_private(self);
1430 
1431 	if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1432 		return EINVAL;
1433 	CSR_WRITE_2(sc, phy1csr[reg], val);
1434 	return 0;
1435 }
1436 
1437 void
1438 kse_mii_statchg(struct ifnet *ifp)
1439 {
1440 	struct kse_softc *sc = ifp->if_softc;
1441 	struct mii_data *mii = &sc->sc_mii;
1442 
1443 #if KSE_LINKDEBUG == 1
1444 	/* decode P1SR register value */
1445 	uint16_t p1sr = CSR_READ_2(sc, P1SR);
1446 	printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10);
1447 	if (p1sr & PxSR_FDX)
1448 		printf(",full-duplex");
1449 	if (p1sr & PxSR_RXFLOW)
1450 		printf(",rxpause");
1451 	if (p1sr & PxSR_TXFLOW)
1452 		printf(",txpause");
1453 	printf("\n");
1454 	/* show resolved mii(4) parameters to compare against above */
1455 	printf("MII spd%d",
1456 	    (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1)));
1457 	if (mii->mii_media_active & IFM_FDX)
1458 		printf(",full-duplex");
1459 	if (mii->mii_media_active & IFM_FLOW) {
1460 		printf(",flowcontrol");
1461 		if (mii->mii_media_active & IFM_ETH_RXPAUSE)
1462 			printf(",rxpause");
1463 		if (mii->mii_media_active & IFM_ETH_TXPAUSE)
1464 			printf(",txpause");
1465 	}
1466 	printf("\n");
1467 #endif
1468 	/* Get flow control negotiation result. */
1469 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1470 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
1471 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1472 
1473 	/* Adjust MAC PAUSE flow control. */
1474 	if ((mii->mii_media_active & IFM_FDX)
1475 	    && (sc->sc_flowflags & IFM_ETH_TXPAUSE))
1476 		sc->sc_txc |= TXC_FCE;
1477 	else
1478 		sc->sc_txc &= ~TXC_FCE;
1479 	if ((mii->mii_media_active & IFM_FDX)
1480 	    && (sc->sc_flowflags & IFM_ETH_RXPAUSE))
1481 		sc->sc_rxc |= RXC_FCE;
1482 	else
1483 		sc->sc_rxc &= ~RXC_FCE;
1484 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
1485 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
1486 #if KSE_LINKDEBUG == 1
1487 	printf("%ctxfce, %crxfce\n",
1488 	    (sc->sc_txc & TXC_FCE) ? '+' : '-',
1489 	    (sc->sc_rxc & RXC_FCE) ? '+' : '-');
1490 #endif
1491 }
1492 
1493 #ifdef KSE_EVENT_COUNTERS
1494 static void
1495 stat_tick(void *arg)
1496 {
1497 	struct kse_softc *sc = arg;
1498 	struct ksext *ee = &sc->sc_ext;
1499 	int nport, p, i, reg, val;
1500 
1501 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1502 	for (p = 0; p < nport; p++) {
1503 		/* read 34 ev counters by indirect read via IACR */
1504 		for (i = 0; i < 32; i++) {
1505 			reg = EVCNTBR + p * 0x20 + i;
1506 			CSR_WRITE_2(sc, IACR, reg);
1507 			/* 30-bit counter value are halved in IADR5 & IADR4 */
1508 			do {
1509 				val = CSR_READ_2(sc, IADR5) << 16;
1510 			} while ((val & IADR_LATCH) == 0);
1511 			if (val & IADR_OVF) {
1512 				(void)CSR_READ_2(sc, IADR4);
1513 				val = 0x3fffffff; /* has made overflow */
1514 			}
1515 			else {
1516 				val &= 0x3fff0000;		/* 29:16 */
1517 				val |= CSR_READ_2(sc, IADR4);	/* 15:0 */
1518 			}
1519 			ee->pev[p][i].ev_count += val; /* ev0 thru 31 */
1520 		}
1521 		/* ev32 and ev33 are 16-bit counter */
1522 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1523 		ee->pev[p][32].ev_count += CSR_READ_2(sc, IADR4); /* ev32 */
1524 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1525 		ee->pev[p][33].ev_count += CSR_READ_2(sc, IADR4); /* ev33 */
1526 	}
1527 }
1528 
1529 static void
1530 zerostats(struct kse_softc *sc)
1531 {
1532 	struct ksext *ee = &sc->sc_ext;
1533 	int nport, p, i, reg, val;
1534 
1535 	/* Make sure all the HW counters get zero */
1536 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1537 	for (p = 0; p < nport; p++) {
1538 		for (i = 0; i < 32; i++) {
1539 			reg = EVCNTBR + p * 0x20 + i;
1540 			CSR_WRITE_2(sc, IACR, reg);
1541 			do {
1542 				val = CSR_READ_2(sc, IADR5) << 16;
1543 			} while ((val & IADR_LATCH) == 0);
1544 			(void)CSR_READ_2(sc, IADR4);
1545 			ee->pev[p][i].ev_count = 0;
1546 		}
1547 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1548 		(void)CSR_READ_2(sc, IADR4);
1549 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1550 		(void)CSR_READ_2(sc, IADR4);
1551 		ee->pev[p][32].ev_count = 0;
1552 		ee->pev[p][33].ev_count = 0;
1553 	}
1554 }
1555 #endif
1556