xref: /netbsd-src/sys/dev/pci/if_kse.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: if_kse.c,v 1.57 2021/05/08 00:27:02 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Tohru Nishimura.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Micrel 8841/8842 10/100 PCI ethernet driver
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.57 2021/05/08 00:27:02 thorpej Exp $");
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/intr.h>
42 #include <sys/device.h>
43 #include <sys/callout.h>
44 #include <sys/ioctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/rndsource.h>
48 #include <sys/errno.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 
52 #include <net/if.h>
53 #include <net/if_media.h>
54 #include <net/if_dl.h>
55 #include <net/if_ether.h>
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58 #include <net/bpf.h>
59 
60 #include <dev/pci/pcivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcidevs.h>
63 
64 #define KSE_LINKDEBUG 0
65 
66 #define CSR_READ_4(sc, off) \
67 	    bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off))
68 #define CSR_WRITE_4(sc, off, val) \
69 	    bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val))
70 #define CSR_READ_2(sc, off) \
71 	    bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off))
72 #define CSR_WRITE_2(sc, off, val) \
73 	    bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val))
74 
75 #define MDTXC		0x000		/* DMA transmit control */
76 #define MDRXC		0x004		/* DMA receive control */
77 #define MDTSC		0x008		/* trigger DMA transmit (SC) */
78 #define MDRSC		0x00c		/* trigger DMA receive (SC) */
79 #define TDLB		0x010		/* transmit descriptor list base */
80 #define RDLB		0x014		/* receive descriptor list base */
81 #define MTR0		0x020		/* multicast table 31:0 */
82 #define MTR1		0x024		/* multicast table 63:32 */
83 #define INTEN		0x028		/* interrupt enable */
84 #define INTST		0x02c		/* interrupt status */
85 #define MAAL0		0x080		/* additional MAC address 0 low */
86 #define MAAH0		0x084		/* additional MAC address 0 high */
87 #define MARL		0x200		/* MAC address low */
88 #define MARM		0x202		/* MAC address middle */
89 #define MARH		0x204		/* MAC address high */
90 #define GRR		0x216		/* global reset */
91 #define SIDER		0x400		/* switch ID and function enable */
92 #define SGCR3		0x406		/* switch function control 3 */
93 #define  CR3_USEHDX	(1U<<6)		/* use half-duplex 8842 host port */
94 #define  CR3_USEFC	(1U<<5) 	/* use flowcontrol 8842 host port */
95 #define IACR		0x4a0		/* indirect access control */
96 #define IADR1		0x4a2		/* indirect access data 66:63 */
97 #define IADR2		0x4a4		/* indirect access data 47:32 */
98 #define IADR3		0x4a6		/* indirect access data 63:48 */
99 #define IADR4		0x4a8		/* indirect access data 15:0 */
100 #define IADR5		0x4aa		/* indirect access data 31:16 */
101 #define  IADR_LATCH	(1U<<30)	/* latch completed indication */
102 #define  IADR_OVF	(1U<<31)	/* overflow detected */
103 #define P1CR4		0x512		/* port 1 control 4 */
104 #define P1SR		0x514		/* port 1 status */
105 #define P2CR4		0x532		/* port 2 control 4 */
106 #define P2SR		0x534		/* port 2 status */
107 #define  PxCR_STARTNEG	(1U<<9)		/* restart auto negotiation */
108 #define  PxCR_AUTOEN	(1U<<7)		/* auto negotiation enable */
109 #define  PxCR_SPD100	(1U<<6)		/* force speed 100 */
110 #define  PxCR_USEFDX	(1U<<5)		/* force full duplex */
111 #define  PxCR_USEFC	(1U<<4)		/* advertise pause flow control */
112 #define  PxSR_ACOMP	(1U<<6)		/* auto negotiation completed */
113 #define  PxSR_SPD100	(1U<<10)	/* speed is 100Mbps */
114 #define  PxSR_FDX	(1U<<9)		/* full duplex */
115 #define  PxSR_LINKUP	(1U<<5)		/* link is good */
116 #define  PxSR_RXFLOW	(1U<<12)	/* receive flow control active */
117 #define  PxSR_TXFLOW	(1U<<11)	/* transmit flow control active */
118 #define P1VIDCR		0x504		/* port 1 vtag */
119 #define P2VIDCR		0x524		/* port 2 vtag */
120 #define P3VIDCR		0x544		/* 8842 host vtag */
121 #define EVCNTBR		0x1c00		/* 3 sets of 34 event counters */
122 
123 #define TXC_BS_MSK	0x3f000000	/* burst size */
124 #define TXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
125 #define TXC_UCG		(1U<<18)	/* generate UDP checksum */
126 #define TXC_TCG		(1U<<17)	/* generate TCP checksum */
127 #define TXC_ICG		(1U<<16)	/* generate IP checksum */
128 #define TXC_FCE		(1U<<9)		/* generate PAUSE to moderate Rx lvl */
129 #define TXC_EP		(1U<<2)		/* enable automatic padding */
130 #define TXC_AC		(1U<<1)		/* add CRC to frame */
131 #define TXC_TEN		(1)		/* enable DMA to run */
132 
133 #define RXC_BS_MSK	0x3f000000	/* burst size */
134 #define RXC_BS_SFT	(24)		/* 1,2,4,8,16,32 or 0 for unlimited */
135 #define RXC_IHAE	(1U<<19)	/* IP header alignment enable */
136 #define RXC_UCC		(1U<<18)	/* run UDP checksum */
137 #define RXC_TCC		(1U<<17)	/* run TDP checksum */
138 #define RXC_ICC		(1U<<16)	/* run IP checksum */
139 #define RXC_FCE		(1U<<9)		/* accept PAUSE to throttle Tx */
140 #define RXC_RB		(1U<<6)		/* receive broadcast frame */
141 #define RXC_RM		(1U<<5)		/* receive all multicast (inc. RB) */
142 #define RXC_RU		(1U<<4)		/* receive 16 additional unicasts */
143 #define RXC_RE		(1U<<3)		/* accept error frame */
144 #define RXC_RA		(1U<<2)		/* receive all frame */
145 #define RXC_MHTE	(1U<<1)		/* use multicast hash table */
146 #define RXC_REN		(1)		/* enable DMA to run */
147 
148 #define INT_DMLCS	(1U<<31)	/* link status change */
149 #define INT_DMTS	(1U<<30)	/* sending desc. has posted Tx done */
150 #define INT_DMRS	(1U<<29)	/* frame was received */
151 #define INT_DMRBUS	(1U<<27)	/* Rx descriptor pool is full */
152 #define INT_DMxPSS	(3U<<25)	/* 26:25 DMA Tx/Rx have stopped */
153 
154 struct tdes {
155 	uint32_t t0, t1, t2, t3;
156 };
157 
158 struct rdes {
159 	uint32_t r0, r1, r2, r3;
160 };
161 
162 #define T0_OWN		(1U<<31)	/* desc is ready to Tx */
163 
164 #define R0_OWN		(1U<<31)	/* desc is empty */
165 #define R0_FS		(1U<<30)	/* first segment of frame */
166 #define R0_LS		(1U<<29)	/* last segment of frame */
167 #define R0_IPE		(1U<<28)	/* IP checksum error */
168 #define R0_TCPE		(1U<<27)	/* TCP checksum error */
169 #define R0_UDPE		(1U<<26)	/* UDP checksum error */
170 #define R0_ES		(1U<<25)	/* error summary */
171 #define R0_MF		(1U<<24)	/* multicast frame */
172 #define R0_SPN		0x00300000	/* 21:20 switch port 1/2 */
173 #define R0_ALIGN	0x00300000	/* 21:20 (KSZ8692P) Rx align amount */
174 #define R0_RE		(1U<<19)	/* MII reported error */
175 #define R0_TL		(1U<<18)	/* frame too long, beyond 1518 */
176 #define R0_RF		(1U<<17)	/* damaged runt frame */
177 #define R0_CE		(1U<<16)	/* CRC error */
178 #define R0_FT		(1U<<15)	/* frame type */
179 #define R0_FL_MASK	0x7ff		/* frame length 10:0 */
180 
181 #define T1_IC		(1U<<31)	/* post interrupt on complete */
182 #define T1_FS		(1U<<30)	/* first segment of frame */
183 #define T1_LS		(1U<<29)	/* last segment of frame */
184 #define T1_IPCKG	(1U<<28)	/* generate IP checksum */
185 #define T1_TCPCKG	(1U<<27)	/* generate TCP checksum */
186 #define T1_UDPCKG	(1U<<26)	/* generate UDP checksum */
187 #define T1_TER		(1U<<25)	/* end of ring */
188 #define T1_SPN		0x00300000	/* 21:20 switch port 1/2 */
189 #define T1_TBS_MASK	0x7ff		/* segment size 10:0 */
190 
191 #define R1_RER		(1U<<25)	/* end of ring */
192 #define R1_RBS_MASK	0x7fc		/* segment size 10:0 */
193 
194 #define KSE_NTXSEGS		16
195 #define KSE_TXQUEUELEN		64
196 #define KSE_TXQUEUELEN_MASK	(KSE_TXQUEUELEN - 1)
197 #define KSE_TXQUEUE_GC		(KSE_TXQUEUELEN / 4)
198 #define KSE_NTXDESC		256
199 #define KSE_NTXDESC_MASK	(KSE_NTXDESC - 1)
200 #define KSE_NEXTTX(x)		(((x) + 1) & KSE_NTXDESC_MASK)
201 #define KSE_NEXTTXS(x)		(((x) + 1) & KSE_TXQUEUELEN_MASK)
202 
203 #define KSE_NRXDESC		64
204 #define KSE_NRXDESC_MASK	(KSE_NRXDESC - 1)
205 #define KSE_NEXTRX(x)		(((x) + 1) & KSE_NRXDESC_MASK)
206 
207 struct kse_control_data {
208 	struct tdes kcd_txdescs[KSE_NTXDESC];
209 	struct rdes kcd_rxdescs[KSE_NRXDESC];
210 };
211 #define KSE_CDOFF(x)		offsetof(struct kse_control_data, x)
212 #define KSE_CDTXOFF(x)		KSE_CDOFF(kcd_txdescs[(x)])
213 #define KSE_CDRXOFF(x)		KSE_CDOFF(kcd_rxdescs[(x)])
214 
215 struct kse_txsoft {
216 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
217 	bus_dmamap_t txs_dmamap;	/* our DMA map */
218 	int txs_firstdesc;		/* first descriptor in packet */
219 	int txs_lastdesc;		/* last descriptor in packet */
220 	int txs_ndesc;			/* # of descriptors used */
221 };
222 
223 struct kse_rxsoft {
224 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
225 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
226 };
227 
228 struct kse_softc {
229 	device_t sc_dev;		/* generic device information */
230 	bus_space_tag_t sc_st;		/* bus space tag */
231 	bus_space_handle_t sc_sh;	/* bus space handle */
232 	bus_size_t sc_memsize;		/* csr map size */
233 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
234 	pci_chipset_tag_t sc_pc;	/* PCI chipset tag */
235 	struct ethercom sc_ethercom;	/* Ethernet common data */
236 	void *sc_ih;			/* interrupt cookie */
237 
238 	struct mii_data sc_mii;		/* mii 8841 */
239 	struct ifmedia sc_media;	/* ifmedia 8842 */
240 	int sc_flowflags;		/* 802.3x PAUSE flow control */
241 
242 	callout_t  sc_tick_ch;		/* MII tick callout */
243 
244 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
245 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
246 
247 	struct kse_control_data *sc_control_data;
248 #define sc_txdescs	sc_control_data->kcd_txdescs
249 #define sc_rxdescs	sc_control_data->kcd_rxdescs
250 
251 	struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN];
252 	struct kse_rxsoft sc_rxsoft[KSE_NRXDESC];
253 	int sc_txfree;			/* number of free Tx descriptors */
254 	int sc_txnext;			/* next ready Tx descriptor */
255 	int sc_txsfree;			/* number of free Tx jobs */
256 	int sc_txsnext;			/* next ready Tx job */
257 	int sc_txsdirty;		/* dirty Tx jobs */
258 	int sc_rxptr;			/* next ready Rx descriptor/descsoft */
259 
260 	uint32_t sc_txc, sc_rxc;
261 	uint32_t sc_t1csum;
262 	int sc_mcsum;
263 	uint32_t sc_inten;
264 	uint32_t sc_chip;
265 
266 	krndsource_t rnd_source;	/* random source */
267 
268 #ifdef KSE_EVENT_COUNTERS
269 	struct ksext {
270 		char evcntname[3][8];
271 		struct evcnt pev[3][34];
272 	} sc_ext;			/* switch statistics */
273 #endif
274 };
275 
276 #define KSE_CDTXADDR(sc, x)	((sc)->sc_cddma + KSE_CDTXOFF((x)))
277 #define KSE_CDRXADDR(sc, x)	((sc)->sc_cddma + KSE_CDRXOFF((x)))
278 
279 #define KSE_CDTXSYNC(sc, x, n, ops)					\
280 do {									\
281 	int __x, __n;							\
282 									\
283 	__x = (x);							\
284 	__n = (n);							\
285 									\
286 	/* If it will wrap around, sync to the end of the ring. */	\
287 	if ((__x + __n) > KSE_NTXDESC) {				\
288 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
289 		    KSE_CDTXOFF(__x), sizeof(struct tdes) *		\
290 		    (KSE_NTXDESC - __x), (ops));			\
291 		__n -= (KSE_NTXDESC - __x);				\
292 		__x = 0;						\
293 	}								\
294 									\
295 	/* Now sync whatever is left. */				\
296 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
297 	    KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops));	\
298 } while (/*CONSTCOND*/0)
299 
300 #define KSE_CDRXSYNC(sc, x, ops)					\
301 do {									\
302 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
303 	    KSE_CDRXOFF((x)), sizeof(struct rdes), (ops));		\
304 } while (/*CONSTCOND*/0)
305 
306 #define KSE_INIT_RXDESC(sc, x)						\
307 do {									\
308 	struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
309 	struct rdes *__rxd = &(sc)->sc_rxdescs[(x)];			\
310 	struct mbuf *__m = __rxs->rxs_mbuf;				\
311 									\
312 	__m->m_data = __m->m_ext.ext_buf;				\
313 	__rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr;		\
314 	__rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */;		\
315 	__rxd->r0 = R0_OWN;						\
316 	KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
317 } while (/*CONSTCOND*/0)
318 
319 u_int kse_burstsize = 8;	/* DMA burst length tuning knob */
320 
321 #ifdef KSEDIAGNOSTIC
322 u_int kse_monitor_rxintr;	/* fragmented UDP csum HW bug hook */
323 #endif
324 
325 static int kse_match(device_t, cfdata_t, void *);
326 static void kse_attach(device_t, device_t, void *);
327 
328 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc),
329     kse_match, kse_attach, NULL, NULL);
330 
331 static int kse_ioctl(struct ifnet *, u_long, void *);
332 static void kse_start(struct ifnet *);
333 static void kse_watchdog(struct ifnet *);
334 static int kse_init(struct ifnet *);
335 static void kse_stop(struct ifnet *, int);
336 static void kse_reset(struct kse_softc *);
337 static void kse_set_rcvfilt(struct kse_softc *);
338 static int add_rxbuf(struct kse_softc *, int);
339 static void rxdrain(struct kse_softc *);
340 static int kse_intr(void *);
341 static void rxintr(struct kse_softc *);
342 static void txreap(struct kse_softc *);
343 static void lnkchg(struct kse_softc *);
344 static int kse_ifmedia_upd(struct ifnet *);
345 static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *);
346 static void nopifmedia_sts(struct ifnet *, struct ifmediareq *);
347 static void phy_tick(void *);
348 int kse_mii_readreg(device_t, int, int, uint16_t *);
349 int kse_mii_writereg(device_t, int, int, uint16_t);
350 void kse_mii_statchg(struct ifnet *);
351 #ifdef KSE_EVENT_COUNTERS
352 static void stat_tick(void *);
353 static void zerostats(struct kse_softc *);
354 #endif
355 
356 static const struct device_compatible_entry compat_data[] = {
357 	{ .id = PCI_ID_CODE(PCI_VENDOR_MICREL,
358 		PCI_PRODUCT_MICREL_KSZ8842) },
359 	{ .id = PCI_ID_CODE(PCI_VENDOR_MICREL,
360 		PCI_PRODUCT_MICREL_KSZ8841) },
361 
362 	PCI_COMPAT_EOL
363 };
364 
365 static int
366 kse_match(device_t parent, cfdata_t match, void *aux)
367 {
368 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
369 
370 	return PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK &&
371 	       pci_compatible_match(pa, compat_data);
372 }
373 
374 static void
375 kse_attach(device_t parent, device_t self, void *aux)
376 {
377 	struct kse_softc *sc = device_private(self);
378 	struct pci_attach_args *pa = aux;
379 	pci_chipset_tag_t pc = pa->pa_pc;
380 	pci_intr_handle_t ih;
381 	const char *intrstr;
382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
383 	struct mii_data * const mii = &sc->sc_mii;
384 	struct ifmedia *ifm;
385 	uint8_t enaddr[ETHER_ADDR_LEN];
386 	bus_dma_segment_t seg;
387 	int i, error, nseg;
388 	char intrbuf[PCI_INTRSTR_LEN];
389 
390 	aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n",
391 	    PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class));
392 
393 	if (pci_mapreg_map(pa, 0x10,
394 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
395 	    0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) {
396 		aprint_error_dev(self, "unable to map device registers\n");
397 		return;
398 	}
399 
400 	/* Make sure bus mastering is enabled. */
401 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
402 	    pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
403 	    PCI_COMMAND_MASTER_ENABLE);
404 
405 	/* Power up chip if necessary. */
406 	if ((error = pci_activate(pc, pa->pa_tag, self, NULL))
407 	    && error != EOPNOTSUPP) {
408 		aprint_error_dev(self, "cannot activate %d\n", error);
409 		return;
410 	}
411 
412 	/* Map and establish our interrupt. */
413 	if (pci_intr_map(pa, &ih)) {
414 		aprint_error_dev(self, "unable to map interrupt\n");
415 		goto fail;
416 	}
417 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
418 	sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc,
419 	    device_xname(self));
420 	if (sc->sc_ih == NULL) {
421 		aprint_error_dev(self, "unable to establish interrupt");
422 		if (intrstr != NULL)
423 			aprint_error(" at %s", intrstr);
424 		aprint_error("\n");
425 		goto fail;
426 	}
427 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
428 
429 	sc->sc_dev = self;
430 	sc->sc_dmat = pa->pa_dmat;
431 	sc->sc_pc = pa->pa_pc;
432 	sc->sc_chip = PCI_PRODUCT(pa->pa_id);
433 
434 	/*
435 	 * Read the Ethernet address from the EEPROM.
436 	 */
437 	i = CSR_READ_2(sc, MARL);
438 	enaddr[5] = i;
439 	enaddr[4] = i >> 8;
440 	i = CSR_READ_2(sc, MARM);
441 	enaddr[3] = i;
442 	enaddr[2] = i >> 8;
443 	i = CSR_READ_2(sc, MARH);
444 	enaddr[1] = i;
445 	enaddr[0] = i >> 8;
446 	aprint_normal_dev(self,
447 	    "Ethernet address %s\n", ether_sprintf(enaddr));
448 
449 	/*
450 	 * Enable chip function.
451 	 */
452 	CSR_WRITE_2(sc, SIDER, 1);
453 
454 	/*
455 	 * Allocate the control data structures, and create and load the
456 	 * DMA map for it.
457 	 */
458 	error = bus_dmamem_alloc(sc->sc_dmat,
459 	    sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0);
460 	if (error != 0) {
461 		aprint_error_dev(self,
462 		    "unable to allocate control data, error = %d\n", error);
463 		goto fail_0;
464 	}
465 	error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
466 	    sizeof(struct kse_control_data), (void **)&sc->sc_control_data,
467 	    BUS_DMA_COHERENT);
468 	if (error != 0) {
469 		aprint_error_dev(self,
470 		    "unable to map control data, error = %d\n", error);
471 		goto fail_1;
472 	}
473 	error = bus_dmamap_create(sc->sc_dmat,
474 	    sizeof(struct kse_control_data), 1,
475 	    sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap);
476 	if (error != 0) {
477 		aprint_error_dev(self,
478 		    "unable to create control data DMA map, "
479 		    "error = %d\n", error);
480 		goto fail_2;
481 	}
482 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
483 	    sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0);
484 	if (error != 0) {
485 		aprint_error_dev(self,
486 		    "unable to load control data DMA map, error = %d\n",
487 		    error);
488 		goto fail_3;
489 	}
490 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
491 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
492 		    KSE_NTXSEGS, MCLBYTES, 0, 0,
493 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
494 			aprint_error_dev(self,
495 			    "unable to create tx DMA map %d, error = %d\n",
496 			    i, error);
497 			goto fail_4;
498 		}
499 	}
500 	for (i = 0; i < KSE_NRXDESC; i++) {
501 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
502 		    1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
503 			aprint_error_dev(self,
504 			    "unable to create rx DMA map %d, error = %d\n",
505 			    i, error);
506 			goto fail_5;
507 		}
508 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
509 	}
510 
511 	mii->mii_ifp = ifp;
512 	mii->mii_readreg = kse_mii_readreg;
513 	mii->mii_writereg = kse_mii_writereg;
514 	mii->mii_statchg = kse_mii_statchg;
515 
516 	/* Initialize ifmedia structures. */
517 	if (sc->sc_chip == 0x8841) {
518 		/* use port 1 builtin PHY as index 1 device */
519 		sc->sc_ethercom.ec_mii = mii;
520 		ifm = &mii->mii_media;
521 		ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts);
522 		mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */,
523 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
524 		if (LIST_FIRST(&mii->mii_phys) == NULL) {
525 			ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL);
526 			ifmedia_set(ifm, IFM_ETHER | IFM_NONE);
527 		} else
528 			ifmedia_set(ifm, IFM_ETHER | IFM_AUTO);
529 	} else {
530 		/*
531 		 * pretend 100FDX w/ no alternative media selection.
532 		 * 8842 MAC is tied with a builtin 3 port switch. It can do
533 		 * 4 degree priotised rate control over either of tx/rx
534 		 * direction for any of ports, respectively. Tough, this
535 		 * driver leaves the rate unlimited intending 100Mbps maximum.
536 		 * 2 external ports behave in AN mode and this driver provides
537 		 * no mean to manipulate and see their operational details.
538 		 */
539 		sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media;
540 		ifmedia_init(ifm, 0, NULL, nopifmedia_sts);
541 		ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
542 		ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX);
543 
544 		aprint_normal_dev(self,
545 		    "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n");
546 	}
547 	ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */
548 
549 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
550 	ifp->if_softc = sc;
551 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
552 	ifp->if_ioctl = kse_ioctl;
553 	ifp->if_start = kse_start;
554 	ifp->if_watchdog = kse_watchdog;
555 	ifp->if_init = kse_init;
556 	ifp->if_stop = kse_stop;
557 	IFQ_SET_READY(&ifp->if_snd);
558 
559 	/*
560 	 * capable of 802.1Q VLAN-sized frames and hw assisted tagging.
561 	 * can do IPv4, TCPv4, and UDPv4 checksums in hardware.
562 	 */
563 	sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU;
564 	ifp->if_capabilities =
565 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
566 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
567 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
568 
569 	sc->sc_flowflags = 0;
570 
571 	if_attach(ifp);
572 	if_deferred_start_init(ifp, NULL);
573 	ether_ifattach(ifp, enaddr);
574 
575 	callout_init(&sc->sc_tick_ch, 0);
576 	callout_setfunc(&sc->sc_tick_ch, phy_tick, sc);
577 
578 	rnd_attach_source(&sc->rnd_source, device_xname(self),
579 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
580 
581 #ifdef KSE_EVENT_COUNTERS
582 	const char *events[34] = {
583 		"RxLoPriotyByte",
584 		"RxHiPriotyByte",
585 		"RxUndersizePkt",
586 		"RxFragments",
587 		"RxOversize",
588 		"RxJabbers",
589 		"RxSymbolError",
590 		"RxCRCError",
591 		"RxAlignmentError",
592 		"RxControl8808Pkts",
593 		"RxPausePkts",
594 		"RxBroadcast",
595 		"RxMulticast",
596 		"RxUnicast",
597 		"Rx64Octets",
598 		"Rx65To127Octets",
599 		"Rx128To255Octets",
600 		"Rx255To511Octets",
601 		"Rx512To1023Octets",
602 		"Rx1024To1522Octets",
603 		"TxLoPriotyByte",
604 		"TxHiPriotyByte",
605 		"TxLateCollision",
606 		"TxPausePkts",
607 		"TxBroadcastPkts",
608 		"TxMulticastPkts",
609 		"TxUnicastPkts",
610 		"TxDeferred",
611 		"TxTotalCollision",
612 		"TxExcessiveCollision",
613 		"TxSingleCollision",
614 		"TxMultipleCollision",
615 		"TxDropPkts",
616 		"RxDropPkts",
617 	};
618 	struct ksext *ee = &sc->sc_ext;
619 	int p = (sc->sc_chip == 0x8842) ? 3 : 1;
620 	for (i = 0; i < p; i++) {
621 		snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]),
622 		    "%s.%d", device_xname(sc->sc_dev), i+1);
623 		for (int ev = 0; ev < 34; ev++) {
624 			evcnt_attach_dynamic(&ee->pev[i][ev], EVCNT_TYPE_MISC,
625 			    NULL, ee->evcntname[i], events[ev]);
626 		}
627 	}
628 #endif
629 	return;
630 
631  fail_5:
632 	for (i = 0; i < KSE_NRXDESC; i++) {
633 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
634 			bus_dmamap_destroy(sc->sc_dmat,
635 			    sc->sc_rxsoft[i].rxs_dmamap);
636 	}
637  fail_4:
638 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
639 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
640 			bus_dmamap_destroy(sc->sc_dmat,
641 			    sc->sc_txsoft[i].txs_dmamap);
642 	}
643 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
644  fail_3:
645 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
646  fail_2:
647 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
648 	    sizeof(struct kse_control_data));
649  fail_1:
650 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
651  fail_0:
652 	pci_intr_disestablish(pc, sc->sc_ih);
653  fail:
654 	bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize);
655 	return;
656 }
657 
658 static int
659 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data)
660 {
661 	struct kse_softc *sc = ifp->if_softc;
662 	struct ifreq *ifr = (struct ifreq *)data;
663 	struct ifmedia *ifm;
664 	int s, error;
665 
666 	s = splnet();
667 
668 	switch (cmd) {
669 	case SIOCSIFMEDIA:
670 		/* Flow control requires full-duplex mode. */
671 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
672 		    (ifr->ifr_media & IFM_FDX) == 0)
673 			ifr->ifr_media &= ~IFM_ETH_FMASK;
674 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
675 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
676 				/* We can do both TXPAUSE and RXPAUSE. */
677 				ifr->ifr_media |=
678 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
679 			}
680 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
681 		}
682 		ifm = (sc->sc_chip == 0x8841)
683 		    ? &sc->sc_mii.mii_media : &sc->sc_media;
684 		error = ifmedia_ioctl(ifp, ifr, ifm, cmd);
685 		break;
686 	default:
687 		error = ether_ioctl(ifp, cmd, data);
688 		if (error != ENETRESET)
689 			break;
690 		error = 0;
691 		if (cmd == SIOCSIFCAP)
692 			error = (*ifp->if_init)(ifp);
693 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
694 			;
695 		else if (ifp->if_flags & IFF_RUNNING) {
696 			/*
697 			 * Multicast list has changed; set the hardware filter
698 			 * accordingly.
699 			 */
700 			kse_set_rcvfilt(sc);
701 		}
702 		break;
703 	}
704 
705 	splx(s);
706 
707 	return error;
708 }
709 
710 static int
711 kse_init(struct ifnet *ifp)
712 {
713 	struct kse_softc *sc = ifp->if_softc;
714 	uint32_t paddr;
715 	int i, error = 0;
716 
717 	/* cancel pending I/O */
718 	kse_stop(ifp, 0);
719 
720 	/* reset all registers but PCI configuration */
721 	kse_reset(sc);
722 
723 	/* craft Tx descriptor ring */
724 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
725 	for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) {
726 		sc->sc_txdescs[i].t3 = paddr;
727 		paddr += sizeof(struct tdes);
728 	}
729 	sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0);
730 	KSE_CDTXSYNC(sc, 0, KSE_NTXDESC,
731 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
732 	sc->sc_txfree = KSE_NTXDESC;
733 	sc->sc_txnext = 0;
734 
735 	for (i = 0; i < KSE_TXQUEUELEN; i++)
736 		sc->sc_txsoft[i].txs_mbuf = NULL;
737 	sc->sc_txsfree = KSE_TXQUEUELEN;
738 	sc->sc_txsnext = 0;
739 	sc->sc_txsdirty = 0;
740 
741 	/* craft Rx descriptor ring */
742 	memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
743 	for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) {
744 		sc->sc_rxdescs[i].r3 = paddr;
745 		paddr += sizeof(struct rdes);
746 	}
747 	sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0);
748 	for (i = 0; i < KSE_NRXDESC; i++) {
749 		if (sc->sc_rxsoft[i].rxs_mbuf == NULL) {
750 			if ((error = add_rxbuf(sc, i)) != 0) {
751 				aprint_error_dev(sc->sc_dev,
752 				    "unable to allocate or map rx "
753 				    "buffer %d, error = %d\n",
754 				    i, error);
755 				rxdrain(sc);
756 				goto out;
757 			}
758 		}
759 		else
760 			KSE_INIT_RXDESC(sc, i);
761 	}
762 	sc->sc_rxptr = 0;
763 
764 	/* hand Tx/Rx rings to HW */
765 	CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0));
766 	CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0));
767 
768 	sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC;
769 	sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB;
770 	sc->sc_t1csum = sc->sc_mcsum = 0;
771 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) {
772 		sc->sc_rxc |= RXC_ICC;
773 		sc->sc_mcsum |= M_CSUM_IPv4;
774 	}
775 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) {
776 		sc->sc_txc |= TXC_ICG;
777 		sc->sc_t1csum |= T1_IPCKG;
778 	}
779 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) {
780 		sc->sc_rxc |= RXC_TCC;
781 		sc->sc_mcsum |= M_CSUM_TCPv4;
782 	}
783 	if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) {
784 		sc->sc_txc |= TXC_TCG;
785 		sc->sc_t1csum |= T1_TCPCKG;
786 	}
787 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) {
788 		sc->sc_rxc |= RXC_UCC;
789 		sc->sc_mcsum |= M_CSUM_UDPv4;
790 	}
791 	if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) {
792 		sc->sc_txc |= TXC_UCG;
793 		sc->sc_t1csum |= T1_UDPCKG;
794 	}
795 	sc->sc_txc |= (kse_burstsize << TXC_BS_SFT);
796 	sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT);
797 
798 	if (sc->sc_chip == 0x8842) {
799 		/* make PAUSE flow control to run */
800 		sc->sc_txc |= TXC_FCE;
801 		sc->sc_rxc |= RXC_FCE;
802 		i = CSR_READ_2(sc, SGCR3);
803 		CSR_WRITE_2(sc, SGCR3, i | CR3_USEFC);
804 	}
805 
806 	/* accept multicast frame or run promisc mode */
807 	kse_set_rcvfilt(sc);
808 
809 	/* set current media */
810 	if (sc->sc_chip == 0x8841)
811 		(void)kse_ifmedia_upd(ifp);
812 
813 	/* enable transmitter and receiver */
814 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
815 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
816 	CSR_WRITE_4(sc, MDRSC, 1);
817 
818 	/* enable interrupts */
819 	sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS;
820 	if (sc->sc_chip == 0x8841)
821 		sc->sc_inten |= INT_DMLCS;
822 	CSR_WRITE_4(sc, INTST, ~0);
823 	CSR_WRITE_4(sc, INTEN, sc->sc_inten);
824 
825 	ifp->if_flags |= IFF_RUNNING;
826 	ifp->if_flags &= ~IFF_OACTIVE;
827 
828 	/* start one second timer */
829 	callout_schedule(&sc->sc_tick_ch, hz);
830 
831 #ifdef KSE_EVENT_COUNTERS
832 	zerostats(sc);
833 #endif
834 
835  out:
836 	if (error) {
837 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
838 		ifp->if_timer = 0;
839 		aprint_error_dev(sc->sc_dev, "interface not running\n");
840 	}
841 	return error;
842 }
843 
844 static void
845 kse_stop(struct ifnet *ifp, int disable)
846 {
847 	struct kse_softc *sc = ifp->if_softc;
848 	struct kse_txsoft *txs;
849 	int i;
850 
851 	callout_stop(&sc->sc_tick_ch);
852 
853 	sc->sc_txc &= ~TXC_TEN;
854 	sc->sc_rxc &= ~RXC_REN;
855 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
856 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
857 
858 	for (i = 0; i < KSE_TXQUEUELEN; i++) {
859 		txs = &sc->sc_txsoft[i];
860 		if (txs->txs_mbuf != NULL) {
861 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
862 			m_freem(txs->txs_mbuf);
863 			txs->txs_mbuf = NULL;
864 		}
865 	}
866 
867 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
868 	ifp->if_timer = 0;
869 
870 	if (disable)
871 		rxdrain(sc);
872 }
873 
874 static void
875 kse_reset(struct kse_softc *sc)
876 {
877 
878 	/* software reset */
879 	CSR_WRITE_2(sc, GRR, 1);
880 	delay(1000); /* PDF does not mention the delay amount */
881 	CSR_WRITE_2(sc, GRR, 0);
882 
883 	/* enable switch function */
884 	CSR_WRITE_2(sc, SIDER, 1);
885 }
886 
887 static void
888 kse_watchdog(struct ifnet *ifp)
889 {
890 	struct kse_softc *sc = ifp->if_softc;
891 
892 	/*
893 	 * Since we're not interrupting every packet, sweep
894 	 * up before we report an error.
895 	 */
896 	txreap(sc);
897 
898 	if (sc->sc_txfree != KSE_NTXDESC) {
899 		aprint_error_dev(sc->sc_dev,
900 		    "device timeout (txfree %d txsfree %d txnext %d)\n",
901 		    sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
902 		if_statinc(ifp, if_oerrors);
903 
904 		/* Reset the interface. */
905 		kse_init(ifp);
906 	}
907 	else if (ifp->if_flags & IFF_DEBUG)
908 		aprint_error_dev(sc->sc_dev, "recovered from device timeout\n");
909 
910 	/* Try to get more packets going. */
911 	kse_start(ifp);
912 }
913 
914 static void
915 kse_start(struct ifnet *ifp)
916 {
917 	struct kse_softc *sc = ifp->if_softc;
918 	struct mbuf *m0, *m;
919 	struct kse_txsoft *txs;
920 	bus_dmamap_t dmamap;
921 	int error, nexttx, lasttx, ofree, seg;
922 	uint32_t tdes0;
923 
924 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
925 		return;
926 
927 	/* Remember the previous number of free descriptors. */
928 	ofree = sc->sc_txfree;
929 
930 	/*
931 	 * Loop through the send queue, setting up transmit descriptors
932 	 * until we drain the queue, or use up all available transmit
933 	 * descriptors.
934 	 */
935 	for (;;) {
936 		IFQ_POLL(&ifp->if_snd, m0);
937 		if (m0 == NULL)
938 			break;
939 
940 		if (sc->sc_txsfree < KSE_TXQUEUE_GC) {
941 			txreap(sc);
942 			if (sc->sc_txsfree == 0)
943 				break;
944 		}
945 		txs = &sc->sc_txsoft[sc->sc_txsnext];
946 		dmamap = txs->txs_dmamap;
947 
948 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
949 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
950 		if (error) {
951 			if (error == EFBIG) {
952 				aprint_error_dev(sc->sc_dev,
953 				    "Tx packet consumes too many "
954 				    "DMA segments, dropping...\n");
955 				    IFQ_DEQUEUE(&ifp->if_snd, m0);
956 				    m_freem(m0);
957 				    continue;
958 			}
959 			/* Short on resources, just stop for now. */
960 			break;
961 		}
962 
963 		if (dmamap->dm_nsegs > sc->sc_txfree) {
964 			/*
965 			 * Not enough free descriptors to transmit this
966 			 * packet.  We haven't committed anything yet,
967 			 * so just unload the DMA map, put the packet
968 			 * back on the queue, and punt.	 Notify the upper
969 			 * layer that there are not more slots left.
970 			 */
971 			ifp->if_flags |= IFF_OACTIVE;
972 			bus_dmamap_unload(sc->sc_dmat, dmamap);
973 			break;
974 		}
975 
976 		IFQ_DEQUEUE(&ifp->if_snd, m0);
977 
978 		/*
979 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
980 		 */
981 
982 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
983 		    BUS_DMASYNC_PREWRITE);
984 
985 		tdes0 = 0; /* to postpone 1st segment T0_OWN write */
986 		lasttx = -1;
987 		for (nexttx = sc->sc_txnext, seg = 0;
988 		     seg < dmamap->dm_nsegs;
989 		     seg++, nexttx = KSE_NEXTTX(nexttx)) {
990 			struct tdes *tdes = &sc->sc_txdescs[nexttx];
991 			/*
992 			 * If this is the first descriptor we're
993 			 * enqueueing, don't set the OWN bit just
994 			 * yet.	 That could cause a race condition.
995 			 * We'll do it below.
996 			 */
997 			tdes->t2 = dmamap->dm_segs[seg].ds_addr;
998 			tdes->t1 = sc->sc_t1csum
999 			     | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK);
1000 			tdes->t0 = tdes0;
1001 			tdes0 = T0_OWN; /* 2nd and other segments */
1002 			lasttx = nexttx;
1003 		}
1004 		/*
1005 		 * Outgoing NFS mbuf must be unloaded when Tx completed.
1006 		 * Without T1_IC NFS mbuf is left unack'ed for excessive
1007 		 * time and NFS stops to proceed until kse_watchdog()
1008 		 * calls txreap() to reclaim the unack'ed mbuf.
1009 		 * It's painful to traverse every mbuf chain to determine
1010 		 * whether someone is waiting for Tx completion.
1011 		 */
1012 		m = m0;
1013 		do {
1014 			if ((m->m_flags & M_EXT) && m->m_ext.ext_free) {
1015 				sc->sc_txdescs[lasttx].t1 |= T1_IC;
1016 				break;
1017 			}
1018 		} while ((m = m->m_next) != NULL);
1019 
1020 		/* Write deferred 1st segment T0_OWN at the final stage */
1021 		sc->sc_txdescs[lasttx].t1 |= T1_LS;
1022 		sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS;
1023 		sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN;
1024 		KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1025 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1026 
1027 		/* Tell DMA start transmit */
1028 		CSR_WRITE_4(sc, MDTSC, 1);
1029 
1030 		txs->txs_mbuf = m0;
1031 		txs->txs_firstdesc = sc->sc_txnext;
1032 		txs->txs_lastdesc = lasttx;
1033 		txs->txs_ndesc = dmamap->dm_nsegs;
1034 
1035 		sc->sc_txfree -= txs->txs_ndesc;
1036 		sc->sc_txnext = nexttx;
1037 		sc->sc_txsfree--;
1038 		sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext);
1039 		/*
1040 		 * Pass the packet to any BPF listeners.
1041 		 */
1042 		bpf_mtap(ifp, m0, BPF_D_OUT);
1043 	}
1044 
1045 	if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1046 		/* No more slots left; notify upper layer. */
1047 		ifp->if_flags |= IFF_OACTIVE;
1048 	}
1049 	if (sc->sc_txfree != ofree) {
1050 		/* Set a watchdog timer in case the chip flakes out. */
1051 		ifp->if_timer = 5;
1052 	}
1053 }
1054 
1055 static void
1056 kse_set_rcvfilt(struct kse_softc *sc)
1057 {
1058 	struct ether_multistep step;
1059 	struct ether_multi *enm;
1060 	struct ethercom *ec = &sc->sc_ethercom;
1061 	struct ifnet *ifp = &ec->ec_if;
1062 	uint32_t crc, mchash[2];
1063 	int i;
1064 
1065 	sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA);
1066 
1067 	/* clear perfect match filter and prepare mcast hash table */
1068 	for (i = 0; i < 16; i++)
1069 		 CSR_WRITE_4(sc, MAAH0 + i*8, 0);
1070 	crc = mchash[0] = mchash[1] = 0;
1071 
1072 	ETHER_LOCK(ec);
1073 	if (ifp->if_flags & IFF_PROMISC) {
1074 		ec->ec_flags |= ETHER_F_ALLMULTI;
1075 		ETHER_UNLOCK(ec);
1076 		/* run promisc. mode */
1077 		sc->sc_rxc |= RXC_RA;
1078 		goto update;
1079 	}
1080 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1081 	ETHER_FIRST_MULTI(step, ec, enm);
1082 	i = 0;
1083 	while (enm != NULL) {
1084 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1085 			/*
1086 			 * We must listen to a range of multicast addresses.
1087 			 * For now, just accept all multicasts, rather than
1088 			 * trying to set only those filter bits needed to match
1089 			 * the range.  (At this time, the only use of address
1090 			 * ranges is for IP multicast routing, for which the
1091 			 * range is big enough to require all bits set.)
1092 			 */
1093 			ec->ec_flags |= ETHER_F_ALLMULTI;
1094 			ETHER_UNLOCK(ec);
1095 			/* accept all multicast */
1096 			sc->sc_rxc |= RXC_RM;
1097 			goto update;
1098 		}
1099 #if KSE_MCASTDEBUG == 1
1100 		printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo));
1101 #endif
1102 		if (i < 16) {
1103 			/* use 16 additional MAC addr to accept mcast */
1104 			uint32_t addr;
1105 			uint8_t *ep = enm->enm_addrlo;
1106 			addr = (ep[3] << 24) | (ep[2] << 16)
1107 			     | (ep[1] << 8)  |  ep[0];
1108 			CSR_WRITE_4(sc, MAAL0 + i*8, addr);
1109 			addr = (ep[5] << 8) | ep[4];
1110 			CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31));
1111 		} else {
1112 			/* use hash table when too many */
1113 			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1114 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1115 		}
1116 		ETHER_NEXT_MULTI(step, enm);
1117 		i++;
1118 	}
1119 	ETHER_UNLOCK(ec);
1120 
1121 	if (crc)
1122 		sc->sc_rxc |= RXC_MHTE;
1123 	CSR_WRITE_4(sc, MTR0, mchash[0]);
1124 	CSR_WRITE_4(sc, MTR1, mchash[1]);
1125  update:
1126 	/* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */
1127 	return;
1128 }
1129 
1130 static int
1131 add_rxbuf(struct kse_softc *sc, int idx)
1132 {
1133 	struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx];
1134 	struct mbuf *m;
1135 	int error;
1136 
1137 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1138 	if (m == NULL)
1139 		return ENOBUFS;
1140 
1141 	MCLGET(m, M_DONTWAIT);
1142 	if ((m->m_flags & M_EXT) == 0) {
1143 		m_freem(m);
1144 		return ENOBUFS;
1145 	}
1146 
1147 	if (rxs->rxs_mbuf != NULL)
1148 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1149 
1150 	rxs->rxs_mbuf = m;
1151 
1152 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1153 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1154 	if (error) {
1155 		aprint_error_dev(sc->sc_dev,
1156 		    "can't load rx DMA map %d, error = %d\n", idx, error);
1157 		panic("kse_add_rxbuf");
1158 	}
1159 
1160 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1161 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1162 
1163 	KSE_INIT_RXDESC(sc, idx);
1164 
1165 	return 0;
1166 }
1167 
1168 static void
1169 rxdrain(struct kse_softc *sc)
1170 {
1171 	struct kse_rxsoft *rxs;
1172 	int i;
1173 
1174 	for (i = 0; i < KSE_NRXDESC; i++) {
1175 		rxs = &sc->sc_rxsoft[i];
1176 		if (rxs->rxs_mbuf != NULL) {
1177 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1178 			m_freem(rxs->rxs_mbuf);
1179 			rxs->rxs_mbuf = NULL;
1180 		}
1181 	}
1182 }
1183 
1184 static int
1185 kse_intr(void *arg)
1186 {
1187 	struct kse_softc *sc = arg;
1188 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1189 	uint32_t isr;
1190 
1191 	if ((isr = CSR_READ_4(sc, INTST)) == 0)
1192 		return 0;
1193 
1194 	if (isr & INT_DMRS)
1195 		rxintr(sc);
1196 	if (isr & INT_DMTS)
1197 		txreap(sc);
1198 	if (isr & INT_DMLCS)
1199 		lnkchg(sc);
1200 	if (isr & INT_DMRBUS)
1201 		aprint_error_dev(sc->sc_dev, "Rx descriptor full\n");
1202 
1203 	CSR_WRITE_4(sc, INTST, isr);
1204 
1205 	if (ifp->if_flags & IFF_RUNNING)
1206 		if_schedule_deferred_start(ifp);
1207 
1208 	return 1;
1209 }
1210 
1211 static void
1212 rxintr(struct kse_softc *sc)
1213 {
1214 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1215 	struct kse_rxsoft *rxs;
1216 	struct mbuf *m;
1217 	uint32_t rxstat;
1218 	int i, len;
1219 
1220 	for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) {
1221 		rxs = &sc->sc_rxsoft[i];
1222 
1223 		KSE_CDRXSYNC(sc, i,
1224 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1225 
1226 		rxstat = sc->sc_rxdescs[i].r0;
1227 
1228 		if (rxstat & R0_OWN) /* desc is left empty */
1229 			break;
1230 
1231 		/* R0_FS | R0_LS must have been marked for this desc */
1232 
1233 		if (rxstat & R0_ES) {
1234 			if_statinc(ifp, if_ierrors);
1235 #define PRINTERR(bit, str)						\
1236 			if (rxstat & (bit))				\
1237 				aprint_error_dev(sc->sc_dev,		\
1238 				    "%s\n", str)
1239 			PRINTERR(R0_TL, "frame too long");
1240 			PRINTERR(R0_RF, "runt frame");
1241 			PRINTERR(R0_CE, "bad FCS");
1242 #undef PRINTERR
1243 			KSE_INIT_RXDESC(sc, i);
1244 			continue;
1245 		}
1246 
1247 		/* HW errata; frame might be too small or too large */
1248 
1249 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1250 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1251 
1252 		len = rxstat & R0_FL_MASK;
1253 		len -= ETHER_CRC_LEN;	/* Trim CRC off */
1254 		m = rxs->rxs_mbuf;
1255 
1256 		if (add_rxbuf(sc, i) != 0) {
1257 			if_statinc(ifp, if_ierrors);
1258 			KSE_INIT_RXDESC(sc, i);
1259 			bus_dmamap_sync(sc->sc_dmat,
1260 			    rxs->rxs_dmamap, 0,
1261 			    rxs->rxs_dmamap->dm_mapsize,
1262 			    BUS_DMASYNC_PREREAD);
1263 			continue;
1264 		}
1265 
1266 		m_set_rcvif(m, ifp);
1267 		m->m_pkthdr.len = m->m_len = len;
1268 
1269 		if (sc->sc_mcsum) {
1270 			m->m_pkthdr.csum_flags |= sc->sc_mcsum;
1271 			if (rxstat & R0_IPE)
1272 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1273 			if (rxstat & (R0_TCPE | R0_UDPE))
1274 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1275 		}
1276 		if_percpuq_enqueue(ifp->if_percpuq, m);
1277 #ifdef KSEDIAGNOSTIC
1278 		if (kse_monitor_rxintr > 0) {
1279 			aprint_error_dev(sc->sc_dev,
1280 			    "m stat %x data %p len %d\n",
1281 			    rxstat, m->m_data, m->m_len);
1282 		}
1283 #endif
1284 	}
1285 	sc->sc_rxptr = i;
1286 }
1287 
1288 static void
1289 txreap(struct kse_softc *sc)
1290 {
1291 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1292 	struct kse_txsoft *txs;
1293 	uint32_t txstat;
1294 	int i;
1295 
1296 	ifp->if_flags &= ~IFF_OACTIVE;
1297 
1298 	for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN;
1299 	     i = KSE_NEXTTXS(i), sc->sc_txsfree++) {
1300 		txs = &sc->sc_txsoft[i];
1301 
1302 		KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
1303 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1304 
1305 		txstat = sc->sc_txdescs[txs->txs_lastdesc].t0;
1306 
1307 		if (txstat & T0_OWN) /* desc is still in use */
1308 			break;
1309 
1310 		/* There is no way to tell transmission status per frame */
1311 
1312 		if_statinc(ifp, if_opackets);
1313 
1314 		sc->sc_txfree += txs->txs_ndesc;
1315 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1316 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1317 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1318 		m_freem(txs->txs_mbuf);
1319 		txs->txs_mbuf = NULL;
1320 	}
1321 	sc->sc_txsdirty = i;
1322 	if (sc->sc_txsfree == KSE_TXQUEUELEN)
1323 		ifp->if_timer = 0;
1324 }
1325 
1326 static void
1327 lnkchg(struct kse_softc *sc)
1328 {
1329 	struct ifmediareq ifmr;
1330 
1331 #if KSE_LINKDEBUG == 1
1332 	uint16_t p1sr = CSR_READ_2(sc, P1SR);
1333 printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down");
1334 #endif
1335 	kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr);
1336 }
1337 
1338 static int
1339 kse_ifmedia_upd(struct ifnet *ifp)
1340 {
1341 	struct kse_softc *sc = ifp->if_softc;
1342 	struct ifmedia *ifm = &sc->sc_mii.mii_media;
1343 	uint16_t p1cr4;
1344 
1345 	p1cr4 = 0;
1346 	if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) {
1347 		p1cr4 |= PxCR_STARTNEG;	/* restart AN */
1348 		p1cr4 |= PxCR_AUTOEN;	/* enable AN */
1349 		p1cr4 |= PxCR_USEFC;	/* advertise flow control pause */
1350 		p1cr4 |= 0xf;		/* adv. 100FDX,100HDX,10FDX,10HDX */
1351 	} else {
1352 		if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX)
1353 			p1cr4 |= PxCR_SPD100;
1354 		if (ifm->ifm_media & IFM_FDX)
1355 			p1cr4 |= PxCR_USEFDX;
1356 	}
1357 	CSR_WRITE_2(sc, P1CR4, p1cr4);
1358 #if KSE_LINKDEBUG == 1
1359 printf("P1CR4: %04x\n", p1cr4);
1360 #endif
1361 	return 0;
1362 }
1363 
1364 static void
1365 kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1366 {
1367 	struct kse_softc *sc = ifp->if_softc;
1368 	struct mii_data *mii = &sc->sc_mii;
1369 
1370 	mii_pollstat(mii);
1371 	ifmr->ifm_status = mii->mii_media_status;
1372 	ifmr->ifm_active = sc->sc_flowflags |
1373 	    (mii->mii_media_active & ~IFM_ETH_FMASK);
1374 }
1375 
1376 static void
1377 nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1378 {
1379 	struct kse_softc *sc = ifp->if_softc;
1380 	struct ifmedia *ifm = &sc->sc_media;
1381 
1382 #if KSE_LINKDEBUG == 2
1383 printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR));
1384 #endif
1385 
1386 	/* 8842 MAC pretends 100FDX all the time */
1387 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1388 	ifmr->ifm_active = ifm->ifm_cur->ifm_media |
1389 	    IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE;
1390 }
1391 
1392 static void
1393 phy_tick(void *arg)
1394 {
1395 	struct kse_softc *sc = arg;
1396 	struct mii_data *mii = &sc->sc_mii;
1397 	int s;
1398 
1399 	if (sc->sc_chip == 0x8841) {
1400 		s = splnet();
1401 		mii_tick(mii);
1402 		splx(s);
1403 	}
1404 #ifdef KSE_EVENT_COUNTERS
1405 	stat_tick(arg);
1406 #endif
1407 	callout_schedule(&sc->sc_tick_ch, hz);
1408 }
1409 
1410 static const uint16_t phy1csr[] = {
1411 	/* 0 BMCR */	0x4d0,
1412 	/* 1 BMSR */	0x4d2,
1413 	/* 2 PHYID1 */	0x4d6,	/* 0x0022 - PHY1HR */
1414 	/* 3 PHYID2 */	0x4d4,	/* 0x1430 - PHY1LR */
1415 	/* 4 ANAR */	0x4d8,
1416 	/* 5 ANLPAR */	0x4da,
1417 };
1418 
1419 int
1420 kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1421 {
1422 	struct kse_softc *sc = device_private(self);
1423 
1424 	if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1425 		return EINVAL;
1426 	*val = CSR_READ_2(sc, phy1csr[reg]);
1427 	return 0;
1428 }
1429 
1430 int
1431 kse_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1432 {
1433 	struct kse_softc *sc = device_private(self);
1434 
1435 	if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0)
1436 		return EINVAL;
1437 	CSR_WRITE_2(sc, phy1csr[reg], val);
1438 	return 0;
1439 }
1440 
1441 void
1442 kse_mii_statchg(struct ifnet *ifp)
1443 {
1444 	struct kse_softc *sc = ifp->if_softc;
1445 	struct mii_data *mii = &sc->sc_mii;
1446 
1447 #if KSE_LINKDEBUG == 1
1448 	/* decode P1SR register value */
1449 	uint16_t p1sr = CSR_READ_2(sc, P1SR);
1450 	printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10);
1451 	if (p1sr & PxSR_FDX)
1452 		printf(",full-duplex");
1453 	if (p1sr & PxSR_RXFLOW)
1454 		printf(",rxpause");
1455 	if (p1sr & PxSR_TXFLOW)
1456 		printf(",txpause");
1457 	printf("\n");
1458 	/* show resolved mii(4) parameters to compare against above */
1459 	printf("MII spd%d",
1460 	    (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1)));
1461 	if (mii->mii_media_active & IFM_FDX)
1462 		printf(",full-duplex");
1463 	if (mii->mii_media_active & IFM_FLOW) {
1464 		printf(",flowcontrol");
1465 		if (mii->mii_media_active & IFM_ETH_RXPAUSE)
1466 			printf(",rxpause");
1467 		if (mii->mii_media_active & IFM_ETH_TXPAUSE)
1468 			printf(",txpause");
1469 	}
1470 	printf("\n");
1471 #endif
1472 	/* Get flow control negotiation result. */
1473 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
1474 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags)
1475 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
1476 
1477 	/* Adjust MAC PAUSE flow control. */
1478 	if ((mii->mii_media_active & IFM_FDX)
1479 	    && (sc->sc_flowflags & IFM_ETH_TXPAUSE))
1480 		sc->sc_txc |= TXC_FCE;
1481 	else
1482 		sc->sc_txc &= ~TXC_FCE;
1483 	if ((mii->mii_media_active & IFM_FDX)
1484 	    && (sc->sc_flowflags & IFM_ETH_RXPAUSE))
1485 		sc->sc_rxc |= RXC_FCE;
1486 	else
1487 		sc->sc_rxc &= ~RXC_FCE;
1488 	CSR_WRITE_4(sc, MDTXC, sc->sc_txc);
1489 	CSR_WRITE_4(sc, MDRXC, sc->sc_rxc);
1490 #if KSE_LINKDEBUG == 1
1491 	printf("%ctxfce, %crxfce\n",
1492 	    (sc->sc_txc & TXC_FCE) ? '+' : '-',
1493 	    (sc->sc_rxc & RXC_FCE) ? '+' : '-');
1494 #endif
1495 }
1496 
1497 #ifdef KSE_EVENT_COUNTERS
1498 static void
1499 stat_tick(void *arg)
1500 {
1501 	struct kse_softc *sc = arg;
1502 	struct ksext *ee = &sc->sc_ext;
1503 	int nport, p, i, reg, val;
1504 
1505 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1506 	for (p = 0; p < nport; p++) {
1507 		/* read 34 ev counters by indirect read via IACR */
1508 		for (i = 0; i < 32; i++) {
1509 			reg = EVCNTBR + p * 0x20 + i;
1510 			CSR_WRITE_2(sc, IACR, reg);
1511 			/* 30-bit counter value are halved in IADR5 & IADR4 */
1512 			do {
1513 				val = CSR_READ_2(sc, IADR5) << 16;
1514 			} while ((val & IADR_LATCH) == 0);
1515 			if (val & IADR_OVF) {
1516 				(void)CSR_READ_2(sc, IADR4);
1517 				val = 0x3fffffff; /* has made overflow */
1518 			}
1519 			else {
1520 				val &= 0x3fff0000;		/* 29:16 */
1521 				val |= CSR_READ_2(sc, IADR4);	/* 15:0 */
1522 			}
1523 			ee->pev[p][i].ev_count += val; /* ev0 thru 31 */
1524 		}
1525 		/* ev32 and ev33 are 16-bit counter */
1526 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1527 		ee->pev[p][32].ev_count += CSR_READ_2(sc, IADR4); /* ev32 */
1528 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1529 		ee->pev[p][33].ev_count += CSR_READ_2(sc, IADR4); /* ev33 */
1530 	}
1531 }
1532 
1533 static void
1534 zerostats(struct kse_softc *sc)
1535 {
1536 	struct ksext *ee = &sc->sc_ext;
1537 	int nport, p, i, reg, val;
1538 
1539 	/* Make sure all the HW counters get zero */
1540 	nport = (sc->sc_chip == 0x8842) ? 3 : 1;
1541 	for (p = 0; p < nport; p++) {
1542 		for (i = 0; i < 32; i++) {
1543 			reg = EVCNTBR + p * 0x20 + i;
1544 			CSR_WRITE_2(sc, IACR, reg);
1545 			do {
1546 				val = CSR_READ_2(sc, IADR5) << 16;
1547 			} while ((val & IADR_LATCH) == 0);
1548 			(void)CSR_READ_2(sc, IADR4);
1549 			ee->pev[p][i].ev_count = 0;
1550 		}
1551 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p);
1552 		(void)CSR_READ_2(sc, IADR4);
1553 		CSR_WRITE_2(sc, IACR, EVCNTBR + 0x100 + p * 3 + 1);
1554 		(void)CSR_READ_2(sc, IADR4);
1555 		ee->pev[p][32].ev_count = 0;
1556 		ee->pev[p][33].ev_count = 0;
1557 	}
1558 }
1559 #endif
1560