1 /* $NetBSD: if_kse.c,v 1.53 2020/04/01 04:00:14 nisimura Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Tohru Nishimura. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Micrel 8841/8842 10/100 PCI ethernet driver 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.53 2020/04/01 04:00:14 nisimura Exp $"); 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/intr.h> 42 #include <sys/device.h> 43 #include <sys/callout.h> 44 #include <sys/ioctl.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/errno.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 51 #include <net/if.h> 52 #include <net/if_media.h> 53 #include <net/if_dl.h> 54 #include <net/if_ether.h> 55 #include <dev/mii/mii.h> 56 #include <dev/mii/miivar.h> 57 #include <net/bpf.h> 58 59 #include <dev/pci/pcivar.h> 60 #include <dev/pci/pcireg.h> 61 #include <dev/pci/pcidevs.h> 62 63 #define KSE_LINKDEBUG 0 64 65 #define CSR_READ_4(sc, off) \ 66 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off)) 67 #define CSR_WRITE_4(sc, off, val) \ 68 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val)) 69 #define CSR_READ_2(sc, off) \ 70 bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off)) 71 #define CSR_WRITE_2(sc, off, val) \ 72 bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val)) 73 74 #define MDTXC 0x000 /* DMA transmit control */ 75 #define MDRXC 0x004 /* DMA receive control */ 76 #define MDTSC 0x008 /* DMA transmit start */ 77 #define MDRSC 0x00c /* DMA receive start */ 78 #define TDLB 0x010 /* transmit descriptor list base */ 79 #define RDLB 0x014 /* receive descriptor list base */ 80 #define MTR0 0x020 /* multicast table 31:0 */ 81 #define MTR1 0x024 /* multicast table 63:32 */ 82 #define INTEN 0x028 /* interrupt enable */ 83 #define INTST 0x02c /* interrupt status */ 84 #define MAAL0 0x080 /* additional MAC address 0 low */ 85 #define MAAH0 0x084 /* additional MAC address 0 high */ 86 #define MARL 0x200 /* MAC address low */ 87 #define MARM 0x202 /* MAC address middle */ 88 #define MARH 0x204 /* MAC address high */ 89 #define GRR 0x216 /* global reset */ 90 #define SIDER 0x400 /* switch ID and function enable */ 91 #define SGCR3 0x406 /* switch function control 3 */ 92 #define CR3_USEHDX (1U<<6) /* use half-duplex 8842 host port */ 93 #define CR3_USEFC (1U<<5) /* use flowcontrol 8842 host port */ 94 #define IACR 0x4a0 /* indirect access control */ 95 #define IADR1 0x4a2 /* indirect access data 66:63 */ 96 #define IADR2 0x4a4 /* indirect access data 47:32 */ 97 #define IADR3 0x4a6 /* indirect access data 63:48 */ 98 #define IADR4 0x4a8 /* indirect access data 15:0 */ 99 #define IADR5 0x4aa /* indirect access data 31:16 */ 100 #define P1CR4 0x512 /* port 1 control 4 */ 101 #define P1SR 0x514 /* port 1 status */ 102 #define P2CR4 0x532 /* port 2 control 4 */ 103 #define P2SR 0x534 /* port 2 status */ 104 #define PxCR_STARTNEG (1U<<9) /* restart auto negotiation */ 105 #define PxCR_AUTOEN (1U<<7) /* auto negotiation enable */ 106 #define PxCR_SPD100 (1U<<6) /* force speed 100 */ 107 #define PxCR_USEFDX (1U<<5) /* force full duplex */ 108 #define PxCR_USEFC (1U<<4) /* advertise pause flow control */ 109 #define PxSR_ACOMP (1U<<6) /* auto negotiation completed */ 110 #define PxSR_SPD100 (1U<<10) /* speed is 100Mbps */ 111 #define PxSR_FDX (1U<<9) /* full duplex */ 112 #define PxSR_LINKUP (1U<<5) /* link is good */ 113 #define PxSR_RXFLOW (1U<<12) /* receive flow control active */ 114 #define PxSR_TXFLOW (1U<<11) /* transmit flow control active */ 115 #define P1VIDCR 0x504 /* port 1 vtag */ 116 #define P2VIDCR 0x524 /* port 2 vtag */ 117 #define P3VIDCR 0x544 /* 8842 host vtag */ 118 119 #define TXC_BS_MSK 0x3f000000 /* burst size */ 120 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 121 #define TXC_UCG (1U<<18) /* generate UDP checksum */ 122 #define TXC_TCG (1U<<17) /* generate TCP checksum */ 123 #define TXC_ICG (1U<<16) /* generate IP checksum */ 124 #define TXC_FCE (1U<<9) /* generate PAUSE to moderate Rx lvl */ 125 #define TXC_EP (1U<<2) /* enable automatic padding */ 126 #define TXC_AC (1U<<1) /* add CRC to frame */ 127 #define TXC_TEN (1) /* enable DMA to run */ 128 129 #define RXC_BS_MSK 0x3f000000 /* burst size */ 130 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 131 #define RXC_IHAE (1U<<19) /* IP header alignment enable */ 132 #define RXC_UCC (1U<<18) /* run UDP checksum */ 133 #define RXC_TCC (1U<<17) /* run TDP checksum */ 134 #define RXC_ICC (1U<<16) /* run IP checksum */ 135 #define RXC_FCE (1U<<9) /* accept PAUSE to throttle Tx */ 136 #define RXC_RB (1U<<6) /* receive broadcast frame */ 137 #define RXC_RM (1U<<5) /* receive all multicast (inc. RB) */ 138 #define RXC_RU (1U<<4) /* receive 16 additional unicasts */ 139 #define RXC_RE (1U<<3) /* accept error frame */ 140 #define RXC_RA (1U<<2) /* receive all frame */ 141 #define RXC_MHTE (1U<<1) /* use multicast hash table */ 142 #define RXC_REN (1) /* enable DMA to run */ 143 144 #define INT_DMLCS (1U<<31) /* link status change */ 145 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */ 146 #define INT_DMRS (1U<<29) /* frame was received */ 147 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */ 148 #define INT_DMxPSS (3U<<25) /* 26:25 DMA Tx/Rx have stopped */ 149 150 #define T0_OWN (1U<<31) /* desc is ready to Tx */ 151 152 #define R0_OWN (1U<<31) /* desc is empty */ 153 #define R0_FS (1U<<30) /* first segment of frame */ 154 #define R0_LS (1U<<29) /* last segment of frame */ 155 #define R0_IPE (1U<<28) /* IP checksum error */ 156 #define R0_TCPE (1U<<27) /* TCP checksum error */ 157 #define R0_UDPE (1U<<26) /* UDP checksum error */ 158 #define R0_ES (1U<<25) /* error summary */ 159 #define R0_MF (1U<<24) /* multicast frame */ 160 #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */ 161 #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */ 162 #define R0_RE (1U<<19) /* MII reported error */ 163 #define R0_TL (1U<<18) /* frame too long, beyond 1518 */ 164 #define R0_RF (1U<<17) /* damaged runt frame */ 165 #define R0_CE (1U<<16) /* CRC error */ 166 #define R0_FT (1U<<15) /* frame type */ 167 #define R0_FL_MASK 0x7ff /* frame length 10:0 */ 168 169 #define T1_IC (1U<<31) /* post interrupt on complete */ 170 #define T1_FS (1U<<30) /* first segment of frame */ 171 #define T1_LS (1U<<29) /* last segment of frame */ 172 #define T1_IPCKG (1U<<28) /* generate IP checksum */ 173 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */ 174 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */ 175 #define T1_TER (1U<<25) /* end of ring */ 176 #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */ 177 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */ 178 179 #define R1_RER (1U<<25) /* end of ring */ 180 #define R1_RBS_MASK 0x7fc /* segment size 10:0 */ 181 182 #define KSE_NTXSEGS 16 183 #define KSE_TXQUEUELEN 64 184 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1) 185 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4) 186 #define KSE_NTXDESC 256 187 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1) 188 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK) 189 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK) 190 191 #define KSE_NRXDESC 64 192 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1) 193 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK) 194 195 struct tdes { 196 uint32_t t0, t1, t2, t3; 197 }; 198 199 struct rdes { 200 uint32_t r0, r1, r2, r3; 201 }; 202 203 struct kse_control_data { 204 struct tdes kcd_txdescs[KSE_NTXDESC]; 205 struct rdes kcd_rxdescs[KSE_NRXDESC]; 206 }; 207 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x) 208 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)]) 209 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)]) 210 211 struct kse_txsoft { 212 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 213 bus_dmamap_t txs_dmamap; /* our DMA map */ 214 int txs_firstdesc; /* first descriptor in packet */ 215 int txs_lastdesc; /* last descriptor in packet */ 216 int txs_ndesc; /* # of descriptors used */ 217 }; 218 219 struct kse_rxsoft { 220 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 221 bus_dmamap_t rxs_dmamap; /* our DMA map */ 222 }; 223 224 struct kse_softc { 225 device_t sc_dev; /* generic device information */ 226 bus_space_tag_t sc_st; /* bus space tag */ 227 bus_space_handle_t sc_sh; /* bus space handle */ 228 bus_size_t sc_memsize; /* csr map size */ 229 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 230 pci_chipset_tag_t sc_pc; /* PCI chipset tag */ 231 struct ethercom sc_ethercom; /* Ethernet common data */ 232 void *sc_ih; /* interrupt cookie */ 233 234 struct mii_data sc_mii; /* mii 8841 */ 235 struct ifmedia sc_media; /* ifmedia 8842 */ 236 int sc_flowflags; /* 802.3x PAUSE flow control */ 237 238 callout_t sc_tick_ch; /* MII tick callout */ 239 callout_t sc_stat_ch; /* statistics counter callout */ 240 241 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 242 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 243 244 struct kse_control_data *sc_control_data; 245 #define sc_txdescs sc_control_data->kcd_txdescs 246 #define sc_rxdescs sc_control_data->kcd_rxdescs 247 248 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN]; 249 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC]; 250 int sc_txfree; /* number of free Tx descriptors */ 251 int sc_txnext; /* next ready Tx descriptor */ 252 int sc_txsfree; /* number of free Tx jobs */ 253 int sc_txsnext; /* next ready Tx job */ 254 int sc_txsdirty; /* dirty Tx jobs */ 255 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 256 257 uint32_t sc_txc, sc_rxc; 258 uint32_t sc_t1csum; 259 int sc_mcsum; 260 uint32_t sc_inten; 261 262 uint32_t sc_chip; 263 uint8_t sc_altmac[16][ETHER_ADDR_LEN]; 264 uint16_t sc_vlan[16]; 265 266 #ifdef KSE_EVENT_COUNTERS 267 struct ksext { 268 char evcntname[3][8]; 269 struct evcnt pev[3][34]; 270 } sc_ext; /* switch statistics */ 271 #endif 272 }; 273 274 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x))) 275 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x))) 276 277 #define KSE_CDTXSYNC(sc, x, n, ops) \ 278 do { \ 279 int __x, __n; \ 280 \ 281 __x = (x); \ 282 __n = (n); \ 283 \ 284 /* If it will wrap around, sync to the end of the ring. */ \ 285 if ((__x + __n) > KSE_NTXDESC) { \ 286 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 287 KSE_CDTXOFF(__x), sizeof(struct tdes) * \ 288 (KSE_NTXDESC - __x), (ops)); \ 289 __n -= (KSE_NTXDESC - __x); \ 290 __x = 0; \ 291 } \ 292 \ 293 /* Now sync whatever is left. */ \ 294 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 295 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ 296 } while (/*CONSTCOND*/0) 297 298 #define KSE_CDRXSYNC(sc, x, ops) \ 299 do { \ 300 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 301 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ 302 } while (/*CONSTCOND*/0) 303 304 #define KSE_INIT_RXDESC(sc, x) \ 305 do { \ 306 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 307 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 308 struct mbuf *__m = __rxs->rxs_mbuf; \ 309 \ 310 __m->m_data = __m->m_ext.ext_buf; \ 311 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ 312 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \ 313 __rxd->r0 = R0_OWN; \ 314 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ 315 } while (/*CONSTCOND*/0) 316 317 u_int kse_burstsize = 8; /* DMA burst length tuning knob */ 318 319 #ifdef KSEDIAGNOSTIC 320 u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */ 321 #endif 322 323 static int kse_match(device_t, cfdata_t, void *); 324 static void kse_attach(device_t, device_t, void *); 325 326 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc), 327 kse_match, kse_attach, NULL, NULL); 328 329 static int kse_ioctl(struct ifnet *, u_long, void *); 330 static void kse_start(struct ifnet *); 331 static void kse_watchdog(struct ifnet *); 332 static int kse_init(struct ifnet *); 333 static void kse_stop(struct ifnet *, int); 334 static void kse_reset(struct kse_softc *); 335 static void kse_set_rcvfilt(struct kse_softc *); 336 static int add_rxbuf(struct kse_softc *, int); 337 static void rxdrain(struct kse_softc *); 338 static int kse_intr(void *); 339 static void rxintr(struct kse_softc *); 340 static void txreap(struct kse_softc *); 341 static void lnkchg(struct kse_softc *); 342 static int kse_ifmedia_upd(struct ifnet *); 343 static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *); 344 static void nopifmedia_sts(struct ifnet *, struct ifmediareq *); 345 static void phy_tick(void *); 346 int kse_mii_readreg(device_t, int, int, uint16_t *); 347 int kse_mii_writereg(device_t, int, int, uint16_t); 348 void kse_mii_statchg(struct ifnet *); 349 #ifdef KSE_EVENT_COUNTERS 350 static void stat_tick(void *); 351 static void zerostats(struct kse_softc *); 352 #endif 353 354 static int 355 kse_match(device_t parent, cfdata_t match, void *aux) 356 { 357 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 358 359 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL && 360 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 || 361 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) && 362 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 363 return 1; 364 365 return 0; 366 } 367 368 static void 369 kse_attach(device_t parent, device_t self, void *aux) 370 { 371 struct kse_softc *sc = device_private(self); 372 struct pci_attach_args *pa = aux; 373 pci_chipset_tag_t pc = pa->pa_pc; 374 pci_intr_handle_t ih; 375 const char *intrstr; 376 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 377 struct mii_data * const mii = &sc->sc_mii; 378 struct ifmedia *ifm; 379 uint8_t enaddr[ETHER_ADDR_LEN]; 380 bus_dma_segment_t seg; 381 int i, error, nseg; 382 char intrbuf[PCI_INTRSTR_LEN]; 383 384 aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n", 385 PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class)); 386 387 if (pci_mapreg_map(pa, 0x10, 388 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 389 0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) { 390 aprint_error_dev(self, "unable to map device registers\n"); 391 return; 392 } 393 394 /* Make sure bus mastering is enabled. */ 395 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 396 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 397 PCI_COMMAND_MASTER_ENABLE); 398 399 /* Power up chip if necessary. */ 400 if ((error = pci_activate(pc, pa->pa_tag, self, NULL)) 401 && error != EOPNOTSUPP) { 402 aprint_error_dev(self, "cannot activate %d\n", error); 403 return; 404 } 405 406 /* Map and establish our interrupt. */ 407 if (pci_intr_map(pa, &ih)) { 408 aprint_error_dev(self, "unable to map interrupt\n"); 409 return; 410 } 411 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 412 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc, 413 device_xname(self)); 414 if (sc->sc_ih == NULL) { 415 aprint_error_dev(self, "unable to establish interrupt"); 416 if (intrstr != NULL) 417 aprint_error(" at %s", intrstr); 418 aprint_error("\n"); 419 return; 420 } 421 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 422 423 sc->sc_dev = self; 424 sc->sc_dmat = pa->pa_dmat; 425 sc->sc_pc = pa->pa_pc; 426 sc->sc_chip = PCI_PRODUCT(pa->pa_id); 427 428 /* 429 * Read the Ethernet address from the EEPROM. 430 */ 431 i = CSR_READ_2(sc, MARL); 432 enaddr[5] = i; 433 enaddr[4] = i >> 8; 434 i = CSR_READ_2(sc, MARM); 435 enaddr[3] = i; 436 enaddr[2] = i >> 8; 437 i = CSR_READ_2(sc, MARH); 438 enaddr[1] = i; 439 enaddr[0] = i >> 8; 440 aprint_normal_dev(self, 441 "Ethernet address %s\n", ether_sprintf(enaddr)); 442 443 /* 444 * Enable chip function. 445 */ 446 CSR_WRITE_2(sc, SIDER, 1); 447 448 /* 449 * Allocate the control data structures, and create and load the 450 * DMA map for it. 451 */ 452 error = bus_dmamem_alloc(sc->sc_dmat, 453 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); 454 if (error != 0) { 455 aprint_error_dev(self, 456 "unable to allocate control data, error = %d\n", error); 457 goto fail_0; 458 } 459 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 460 sizeof(struct kse_control_data), (void **)&sc->sc_control_data, 461 BUS_DMA_COHERENT); 462 if (error != 0) { 463 aprint_error_dev(self, 464 "unable to map control data, error = %d\n", error); 465 goto fail_1; 466 } 467 error = bus_dmamap_create(sc->sc_dmat, 468 sizeof(struct kse_control_data), 1, 469 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap); 470 if (error != 0) { 471 aprint_error_dev(self, 472 "unable to create control data DMA map, " 473 "error = %d\n", error); 474 goto fail_2; 475 } 476 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 477 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0); 478 if (error != 0) { 479 aprint_error_dev(self, 480 "unable to load control data DMA map, error = %d\n", 481 error); 482 goto fail_3; 483 } 484 for (i = 0; i < KSE_TXQUEUELEN; i++) { 485 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 486 KSE_NTXSEGS, MCLBYTES, 0, 0, 487 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 488 aprint_error_dev(self, 489 "unable to create tx DMA map %d, error = %d\n", 490 i, error); 491 goto fail_4; 492 } 493 } 494 for (i = 0; i < KSE_NRXDESC; i++) { 495 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 496 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 497 aprint_error_dev(self, 498 "unable to create rx DMA map %d, error = %d\n", 499 i, error); 500 goto fail_5; 501 } 502 sc->sc_rxsoft[i].rxs_mbuf = NULL; 503 } 504 505 callout_init(&sc->sc_tick_ch, 0); 506 callout_init(&sc->sc_stat_ch, 0); 507 callout_setfunc(&sc->sc_tick_ch, phy_tick, sc); 508 #ifdef KSE_EVENT_COUNTERS 509 callout_setfunc(&sc->sc_stat_ch, stat_tick, sc); 510 #endif 511 512 mii->mii_ifp = ifp; 513 mii->mii_readreg = kse_mii_readreg; 514 mii->mii_writereg = kse_mii_writereg; 515 mii->mii_statchg = kse_mii_statchg; 516 517 /* Initialize ifmedia structures. */ 518 sc->sc_flowflags = 0; 519 if (sc->sc_chip == 0x8841) { 520 /* use port 1 builtin PHY as index 1 device */ 521 sc->sc_ethercom.ec_mii = mii; 522 ifm = &mii->mii_media; 523 ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts); 524 mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */, 525 MII_OFFSET_ANY, MIIF_DOPAUSE); 526 if (LIST_FIRST(&mii->mii_phys) == NULL) { 527 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); 528 ifmedia_set(ifm, IFM_ETHER | IFM_NONE); 529 } else 530 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); 531 } else { 532 /* 533 * pretend 100FDX w/ no alternative media selection. 534 * 8842 MAC is tied with a builtin 3 port switch. It can do 535 * 4 degree priotised rate control over either of tx/rx 536 * direction for any of ports, respectively. Tough, this 537 * driver leaves the rate unlimited intending 100Mbps maximum. 538 * 2 external ports behave in AN mode and this driver provides 539 * no mean to manipulate and see their operational details. 540 */ 541 sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media; 542 ifmedia_init(ifm, 0, NULL, nopifmedia_sts); 543 ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 544 ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX); 545 546 aprint_normal_dev(self, 547 "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n"); 548 } 549 ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */ 550 551 552 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 553 ifp->if_softc = sc; 554 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 555 ifp->if_ioctl = kse_ioctl; 556 ifp->if_start = kse_start; 557 ifp->if_watchdog = kse_watchdog; 558 ifp->if_init = kse_init; 559 ifp->if_stop = kse_stop; 560 IFQ_SET_READY(&ifp->if_snd); 561 562 /* 563 * capable of 802.1Q VLAN-sized frames and hw assisted tagging. 564 * can do IPv4, TCPv4, and UDPv4 checksums in hardware. 565 */ 566 sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU; 567 ifp->if_capabilities = 568 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 569 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 570 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 571 572 if_attach(ifp); 573 if_deferred_start_init(ifp, NULL); 574 ether_ifattach(ifp, enaddr); 575 576 #ifdef KSE_EVENT_COUNTERS 577 int p = (sc->sc_chip == 0x8842) ? 3 : 1; 578 for (i = 0; i < p; i++) { 579 struct ksext *ee = &sc->sc_ext; 580 snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]), 581 "%s.%d", device_xname(sc->sc_dev), i+1); 582 evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC, 583 NULL, ee->evcntname[i], "RxLoPriotyByte"); 584 evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC, 585 NULL, ee->evcntname[i], "RxHiPriotyByte"); 586 evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC, 587 NULL, ee->evcntname[i], "RxUndersizePkt"); 588 evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC, 589 NULL, ee->evcntname[i], "RxFragments"); 590 evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC, 591 NULL, ee->evcntname[i], "RxOversize"); 592 evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC, 593 NULL, ee->evcntname[i], "RxJabbers"); 594 evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC, 595 NULL, ee->evcntname[i], "RxSymbolError"); 596 evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC, 597 NULL, ee->evcntname[i], "RxCRCError"); 598 evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC, 599 NULL, ee->evcntname[i], "RxAlignmentError"); 600 evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC, 601 NULL, ee->evcntname[i], "RxControl8808Pkts"); 602 evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC, 603 NULL, ee->evcntname[i], "RxPausePkts"); 604 evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC, 605 NULL, ee->evcntname[i], "RxBroadcast"); 606 evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC, 607 NULL, ee->evcntname[i], "RxMulticast"); 608 evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC, 609 NULL, ee->evcntname[i], "RxUnicast"); 610 evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC, 611 NULL, ee->evcntname[i], "Rx64Octets"); 612 evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC, 613 NULL, ee->evcntname[i], "Rx65To127Octets"); 614 evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC, 615 NULL, ee->evcntname[i], "Rx128To255Octets"); 616 evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC, 617 NULL, ee->evcntname[i], "Rx255To511Octets"); 618 evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC, 619 NULL, ee->evcntname[i], "Rx512To1023Octets"); 620 evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC, 621 NULL, ee->evcntname[i], "Rx1024To1522Octets"); 622 evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC, 623 NULL, ee->evcntname[i], "TxLoPriotyByte"); 624 evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC, 625 NULL, ee->evcntname[i], "TxHiPriotyByte"); 626 evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC, 627 NULL, ee->evcntname[i], "TxLateCollision"); 628 evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC, 629 NULL, ee->evcntname[i], "TxPausePkts"); 630 evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC, 631 NULL, ee->evcntname[i], "TxBroadcastPkts"); 632 evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC, 633 NULL, ee->evcntname[i], "TxMulticastPkts"); 634 evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC, 635 NULL, ee->evcntname[i], "TxUnicastPkts"); 636 evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC, 637 NULL, ee->evcntname[i], "TxDeferred"); 638 evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC, 639 NULL, ee->evcntname[i], "TxTotalCollision"); 640 evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC, 641 NULL, ee->evcntname[i], "TxExcessiveCollision"); 642 evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC, 643 NULL, ee->evcntname[i], "TxSingleCollision"); 644 evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC, 645 NULL, ee->evcntname[i], "TxMultipleCollision"); 646 evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC, 647 NULL, ee->evcntname[i], "TxDropPkts"); 648 evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC, 649 NULL, ee->evcntname[i], "RxDropPkts"); 650 } 651 #endif 652 return; 653 654 fail_5: 655 for (i = 0; i < KSE_NRXDESC; i++) { 656 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 657 bus_dmamap_destroy(sc->sc_dmat, 658 sc->sc_rxsoft[i].rxs_dmamap); 659 } 660 fail_4: 661 for (i = 0; i < KSE_TXQUEUELEN; i++) { 662 if (sc->sc_txsoft[i].txs_dmamap != NULL) 663 bus_dmamap_destroy(sc->sc_dmat, 664 sc->sc_txsoft[i].txs_dmamap); 665 } 666 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 667 fail_3: 668 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 669 fail_2: 670 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 671 sizeof(struct kse_control_data)); 672 fail_1: 673 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 674 fail_0: 675 if (sc->sc_ih != NULL) { 676 pci_intr_disestablish(pc, sc->sc_ih); 677 sc->sc_ih = NULL; 678 } 679 if (sc->sc_memsize) { 680 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize); 681 sc->sc_memsize = 0; 682 } 683 return; 684 } 685 686 static int 687 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data) 688 { 689 struct kse_softc *sc = ifp->if_softc; 690 struct ifreq *ifr = (struct ifreq *)data; 691 struct ifmedia *ifm; 692 int s, error; 693 694 s = splnet(); 695 696 switch (cmd) { 697 case SIOCSIFMEDIA: 698 /* Flow control requires full-duplex mode. */ 699 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 700 (ifr->ifr_media & IFM_FDX) == 0) 701 ifr->ifr_media &= ~IFM_ETH_FMASK; 702 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 703 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 704 /* We can do both TXPAUSE and RXPAUSE. */ 705 ifr->ifr_media |= 706 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 707 } 708 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 709 } 710 ifm = (sc->sc_chip == 0x8841) 711 ? &sc->sc_mii.mii_media : &sc->sc_media; 712 error = ifmedia_ioctl(ifp, ifr, ifm, cmd); 713 break; 714 default: 715 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 716 break; 717 718 error = 0; 719 720 if (cmd == SIOCSIFCAP) 721 error = (*ifp->if_init)(ifp); 722 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 723 ; 724 else if (ifp->if_flags & IFF_RUNNING) { 725 /* 726 * Multicast list has changed; set the hardware filter 727 * accordingly. 728 */ 729 kse_set_rcvfilt(sc); 730 } 731 break; 732 } 733 734 splx(s); 735 return error; 736 } 737 738 static int 739 kse_init(struct ifnet *ifp) 740 { 741 struct kse_softc *sc = ifp->if_softc; 742 uint32_t paddr; 743 int i, error = 0; 744 745 /* cancel pending I/O */ 746 kse_stop(ifp, 0); 747 748 /* reset all registers but PCI configuration */ 749 kse_reset(sc); 750 751 /* craft Tx descriptor ring */ 752 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 753 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) { 754 sc->sc_txdescs[i].t3 = paddr; 755 paddr += sizeof(struct tdes); 756 } 757 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0); 758 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC, 759 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 760 sc->sc_txfree = KSE_NTXDESC; 761 sc->sc_txnext = 0; 762 763 for (i = 0; i < KSE_TXQUEUELEN; i++) 764 sc->sc_txsoft[i].txs_mbuf = NULL; 765 sc->sc_txsfree = KSE_TXQUEUELEN; 766 sc->sc_txsnext = 0; 767 sc->sc_txsdirty = 0; 768 769 /* craft Rx descriptor ring */ 770 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 771 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) { 772 sc->sc_rxdescs[i].r3 = paddr; 773 paddr += sizeof(struct rdes); 774 } 775 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0); 776 for (i = 0; i < KSE_NRXDESC; i++) { 777 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { 778 if ((error = add_rxbuf(sc, i)) != 0) { 779 aprint_error_dev(sc->sc_dev, 780 "unable to allocate or map rx " 781 "buffer %d, error = %d\n", 782 i, error); 783 rxdrain(sc); 784 goto out; 785 } 786 } 787 else 788 KSE_INIT_RXDESC(sc, i); 789 } 790 sc->sc_rxptr = 0; 791 792 /* hand Tx/Rx rings to HW */ 793 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0)); 794 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0)); 795 796 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC; 797 sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB; 798 sc->sc_t1csum = sc->sc_mcsum = 0; 799 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) { 800 sc->sc_rxc |= RXC_ICC; 801 sc->sc_mcsum |= M_CSUM_IPv4; 802 } 803 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) { 804 sc->sc_txc |= TXC_ICG; 805 sc->sc_t1csum |= T1_IPCKG; 806 } 807 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) { 808 sc->sc_rxc |= RXC_TCC; 809 sc->sc_mcsum |= M_CSUM_TCPv4; 810 } 811 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) { 812 sc->sc_txc |= TXC_TCG; 813 sc->sc_t1csum |= T1_TCPCKG; 814 } 815 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) { 816 sc->sc_rxc |= RXC_UCC; 817 sc->sc_mcsum |= M_CSUM_UDPv4; 818 } 819 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) { 820 sc->sc_txc |= TXC_UCG; 821 sc->sc_t1csum |= T1_UDPCKG; 822 } 823 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT); 824 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT); 825 826 if (sc->sc_chip == 0x8842) { 827 sc->sc_txc |= TXC_FCE; 828 sc->sc_rxc |= RXC_FCE; 829 CSR_WRITE_2(sc, SGCR3, 830 CSR_READ_2(sc, SGCR3) | CR3_USEFC); 831 } 832 833 /* accept multicast frame or run promisc mode */ 834 kse_set_rcvfilt(sc); 835 836 /* set current media */ 837 if (sc->sc_chip == 0x8841) 838 (void)kse_ifmedia_upd(ifp); 839 840 /* enable transmitter and receiver */ 841 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 842 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 843 CSR_WRITE_4(sc, MDRSC, 1); 844 845 /* enable interrupts */ 846 sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS; 847 if (sc->sc_chip == 0x8841) 848 sc->sc_inten |= INT_DMLCS; 849 CSR_WRITE_4(sc, INTST, ~0); 850 CSR_WRITE_4(sc, INTEN, sc->sc_inten); 851 852 ifp->if_flags |= IFF_RUNNING; 853 ifp->if_flags &= ~IFF_OACTIVE; 854 855 if (sc->sc_chip == 0x8841) { 856 /* start one second timer */ 857 callout_schedule(&sc->sc_tick_ch, hz); 858 } 859 #ifdef KSE_EVENT_COUNTERS 860 /* start statistics gather 1 minute timer. should be tunable */ 861 zerostats(sc); 862 callout_schedule(&sc->sc_stat_ch, hz * 60); 863 #endif 864 865 out: 866 if (error) { 867 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 868 ifp->if_timer = 0; 869 aprint_error_dev(sc->sc_dev, "interface not running\n"); 870 } 871 return error; 872 } 873 874 static void 875 kse_stop(struct ifnet *ifp, int disable) 876 { 877 struct kse_softc *sc = ifp->if_softc; 878 struct kse_txsoft *txs; 879 int i; 880 881 if (sc->sc_chip == 0x8841) 882 callout_stop(&sc->sc_tick_ch); 883 callout_stop(&sc->sc_stat_ch); 884 885 sc->sc_txc &= ~TXC_TEN; 886 sc->sc_rxc &= ~RXC_REN; 887 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 888 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 889 890 for (i = 0; i < KSE_TXQUEUELEN; i++) { 891 txs = &sc->sc_txsoft[i]; 892 if (txs->txs_mbuf != NULL) { 893 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 894 m_freem(txs->txs_mbuf); 895 txs->txs_mbuf = NULL; 896 } 897 } 898 899 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 900 ifp->if_timer = 0; 901 902 if (disable) 903 rxdrain(sc); 904 } 905 906 static void 907 kse_reset(struct kse_softc *sc) 908 { 909 910 /* software reset */ 911 CSR_WRITE_2(sc, GRR, 1); 912 delay(1000); /* PDF does not mention the delay amount */ 913 CSR_WRITE_2(sc, GRR, 0); 914 915 /* enable switch function */ 916 CSR_WRITE_2(sc, SIDER, 1); 917 } 918 919 static void 920 kse_watchdog(struct ifnet *ifp) 921 { 922 struct kse_softc *sc = ifp->if_softc; 923 924 /* 925 * Since we're not interrupting every packet, sweep 926 * up before we report an error. 927 */ 928 txreap(sc); 929 930 if (sc->sc_txfree != KSE_NTXDESC) { 931 aprint_error_dev(sc->sc_dev, 932 "device timeout (txfree %d txsfree %d txnext %d)\n", 933 sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext); 934 if_statinc(ifp, if_oerrors); 935 936 /* Reset the interface. */ 937 kse_init(ifp); 938 } 939 else if (ifp->if_flags & IFF_DEBUG) 940 aprint_error_dev(sc->sc_dev, "recovered from device timeout\n"); 941 942 /* Try to get more packets going. */ 943 kse_start(ifp); 944 } 945 946 static void 947 kse_start(struct ifnet *ifp) 948 { 949 struct kse_softc *sc = ifp->if_softc; 950 struct mbuf *m0, *m; 951 struct kse_txsoft *txs; 952 bus_dmamap_t dmamap; 953 int error, nexttx, lasttx, ofree, seg; 954 uint32_t tdes0; 955 956 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 957 return; 958 959 /* Remember the previous number of free descriptors. */ 960 ofree = sc->sc_txfree; 961 962 /* 963 * Loop through the send queue, setting up transmit descriptors 964 * until we drain the queue, or use up all available transmit 965 * descriptors. 966 */ 967 for (;;) { 968 IFQ_POLL(&ifp->if_snd, m0); 969 if (m0 == NULL) 970 break; 971 972 if (sc->sc_txsfree < KSE_TXQUEUE_GC) { 973 txreap(sc); 974 if (sc->sc_txsfree == 0) 975 break; 976 } 977 txs = &sc->sc_txsoft[sc->sc_txsnext]; 978 dmamap = txs->txs_dmamap; 979 980 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 981 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 982 if (error) { 983 if (error == EFBIG) { 984 aprint_error_dev(sc->sc_dev, 985 "Tx packet consumes too many " 986 "DMA segments, dropping...\n"); 987 IFQ_DEQUEUE(&ifp->if_snd, m0); 988 m_freem(m0); 989 continue; 990 } 991 /* Short on resources, just stop for now. */ 992 break; 993 } 994 995 if (dmamap->dm_nsegs > sc->sc_txfree) { 996 /* 997 * Not enough free descriptors to transmit this 998 * packet. We haven't committed anything yet, 999 * so just unload the DMA map, put the packet 1000 * back on the queue, and punt. Notify the upper 1001 * layer that there are not more slots left. 1002 */ 1003 ifp->if_flags |= IFF_OACTIVE; 1004 bus_dmamap_unload(sc->sc_dmat, dmamap); 1005 break; 1006 } 1007 1008 IFQ_DEQUEUE(&ifp->if_snd, m0); 1009 1010 /* 1011 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1012 */ 1013 1014 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1015 BUS_DMASYNC_PREWRITE); 1016 1017 tdes0 = 0; /* to postpone 1st segment T0_OWN write */ 1018 lasttx = -1; 1019 for (nexttx = sc->sc_txnext, seg = 0; 1020 seg < dmamap->dm_nsegs; 1021 seg++, nexttx = KSE_NEXTTX(nexttx)) { 1022 struct tdes *tdes = &sc->sc_txdescs[nexttx]; 1023 /* 1024 * If this is the first descriptor we're 1025 * enqueueing, don't set the OWN bit just 1026 * yet. That could cause a race condition. 1027 * We'll do it below. 1028 */ 1029 tdes->t2 = dmamap->dm_segs[seg].ds_addr; 1030 tdes->t1 = sc->sc_t1csum 1031 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK); 1032 tdes->t0 = tdes0; 1033 tdes0 = T0_OWN; /* 2nd and other segments */ 1034 lasttx = nexttx; 1035 } 1036 /* 1037 * Outgoing NFS mbuf must be unloaded when Tx completed. 1038 * Without T1_IC NFS mbuf is left unack'ed for excessive 1039 * time and NFS stops to proceed until kse_watchdog() 1040 * calls txreap() to reclaim the unack'ed mbuf. 1041 * It's painful to traverse every mbuf chain to determine 1042 * whether someone is waiting for Tx completion. 1043 */ 1044 m = m0; 1045 do { 1046 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) { 1047 sc->sc_txdescs[lasttx].t1 |= T1_IC; 1048 break; 1049 } 1050 } while ((m = m->m_next) != NULL); 1051 1052 /* Write deferred 1st segment T0_OWN at the final stage */ 1053 sc->sc_txdescs[lasttx].t1 |= T1_LS; 1054 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS; 1055 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN; 1056 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1057 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1058 1059 /* Tell DMA start transmit */ 1060 CSR_WRITE_4(sc, MDTSC, 1); 1061 1062 txs->txs_mbuf = m0; 1063 txs->txs_firstdesc = sc->sc_txnext; 1064 txs->txs_lastdesc = lasttx; 1065 txs->txs_ndesc = dmamap->dm_nsegs; 1066 1067 sc->sc_txfree -= txs->txs_ndesc; 1068 sc->sc_txnext = nexttx; 1069 sc->sc_txsfree--; 1070 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext); 1071 /* 1072 * Pass the packet to any BPF listeners. 1073 */ 1074 bpf_mtap(ifp, m0, BPF_D_OUT); 1075 } 1076 1077 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1078 /* No more slots left; notify upper layer. */ 1079 ifp->if_flags |= IFF_OACTIVE; 1080 } 1081 if (sc->sc_txfree != ofree) { 1082 /* Set a watchdog timer in case the chip flakes out. */ 1083 ifp->if_timer = 5; 1084 } 1085 } 1086 1087 static void 1088 kse_set_rcvfilt(struct kse_softc *sc) 1089 { 1090 struct ether_multistep step; 1091 struct ether_multi *enm; 1092 struct ethercom *ec = &sc->sc_ethercom; 1093 struct ifnet *ifp = &ec->ec_if; 1094 uint32_t crc, mchash[2]; 1095 int i; 1096 1097 sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA); 1098 1099 /* clear perfect match filter and prepare mcast hash table */ 1100 for (i = 0; i < 16; i++) 1101 CSR_WRITE_4(sc, MAAH0 + i*8, 0); 1102 crc = mchash[0] = mchash[1] = 0; 1103 1104 ETHER_LOCK(ec); 1105 if (ifp->if_flags & IFF_PROMISC) { 1106 ec->ec_flags |= ETHER_F_ALLMULTI; 1107 ETHER_UNLOCK(ec); 1108 /* run promisc. mode */ 1109 sc->sc_rxc |= RXC_RA; 1110 goto update; 1111 } 1112 ec->ec_flags &= ~ETHER_F_ALLMULTI; 1113 ETHER_FIRST_MULTI(step, ec, enm); 1114 i = 0; 1115 while (enm != NULL) { 1116 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1117 /* 1118 * We must listen to a range of multicast addresses. 1119 * For now, just accept all multicasts, rather than 1120 * trying to set only those filter bits needed to match 1121 * the range. (At this time, the only use of address 1122 * ranges is for IP multicast routing, for which the 1123 * range is big enough to require all bits set.) 1124 */ 1125 ec->ec_flags |= ETHER_F_ALLMULTI; 1126 ETHER_UNLOCK(ec); 1127 /* accept all multicast */ 1128 sc->sc_rxc |= RXC_RM; 1129 goto update; 1130 } 1131 #if KSE_MCASTDEBUG == 1 1132 printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo)); 1133 #endif 1134 if (i < 16) { 1135 /* use 16 additional MAC addr to accept mcast */ 1136 uint32_t addr; 1137 uint8_t *ep = enm->enm_addrlo; 1138 addr = (ep[3] << 24) | (ep[2] << 16) 1139 | (ep[1] << 8) | ep[0]; 1140 CSR_WRITE_4(sc, MAAL0 + i*8, addr); 1141 addr = (ep[5] << 8) | ep[4]; 1142 CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31)); 1143 } else { 1144 /* use hash table when too many */ 1145 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1146 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 1147 } 1148 ETHER_NEXT_MULTI(step, enm); 1149 i++; 1150 } 1151 ETHER_UNLOCK(ec); 1152 1153 if (crc) 1154 sc->sc_rxc |= RXC_MHTE; 1155 CSR_WRITE_4(sc, MTR0, mchash[0]); 1156 CSR_WRITE_4(sc, MTR1, mchash[1]); 1157 update: 1158 /* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */ 1159 return; 1160 } 1161 1162 static int 1163 add_rxbuf(struct kse_softc *sc, int idx) 1164 { 1165 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1166 struct mbuf *m; 1167 int error; 1168 1169 MGETHDR(m, M_DONTWAIT, MT_DATA); 1170 if (m == NULL) 1171 return ENOBUFS; 1172 1173 MCLGET(m, M_DONTWAIT); 1174 if ((m->m_flags & M_EXT) == 0) { 1175 m_freem(m); 1176 return ENOBUFS; 1177 } 1178 1179 if (rxs->rxs_mbuf != NULL) 1180 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1181 1182 rxs->rxs_mbuf = m; 1183 1184 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1185 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1186 if (error) { 1187 aprint_error_dev(sc->sc_dev, 1188 "can't load rx DMA map %d, error = %d\n", idx, error); 1189 panic("kse_add_rxbuf"); 1190 } 1191 1192 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1193 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1194 1195 KSE_INIT_RXDESC(sc, idx); 1196 1197 return 0; 1198 } 1199 1200 static void 1201 rxdrain(struct kse_softc *sc) 1202 { 1203 struct kse_rxsoft *rxs; 1204 int i; 1205 1206 for (i = 0; i < KSE_NRXDESC; i++) { 1207 rxs = &sc->sc_rxsoft[i]; 1208 if (rxs->rxs_mbuf != NULL) { 1209 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1210 m_freem(rxs->rxs_mbuf); 1211 rxs->rxs_mbuf = NULL; 1212 } 1213 } 1214 } 1215 1216 static int 1217 kse_intr(void *arg) 1218 { 1219 struct kse_softc *sc = arg; 1220 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1221 uint32_t isr; 1222 1223 if ((isr = CSR_READ_4(sc, INTST)) == 0) 1224 return 0; 1225 1226 if (isr & INT_DMRS) 1227 rxintr(sc); 1228 if (isr & INT_DMTS) 1229 txreap(sc); 1230 if (isr & INT_DMLCS) 1231 lnkchg(sc); 1232 if (isr & INT_DMRBUS) 1233 aprint_error_dev(sc->sc_dev, "Rx descriptor full\n"); 1234 1235 CSR_WRITE_4(sc, INTST, isr); 1236 1237 if (ifp->if_flags & IFF_RUNNING) 1238 if_schedule_deferred_start(ifp); 1239 1240 return 1; 1241 } 1242 1243 static void 1244 rxintr(struct kse_softc *sc) 1245 { 1246 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1247 struct kse_rxsoft *rxs; 1248 struct mbuf *m; 1249 uint32_t rxstat; 1250 int i, len; 1251 1252 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) { 1253 rxs = &sc->sc_rxsoft[i]; 1254 1255 KSE_CDRXSYNC(sc, i, 1256 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1257 1258 rxstat = sc->sc_rxdescs[i].r0; 1259 1260 if (rxstat & R0_OWN) /* desc is left empty */ 1261 break; 1262 1263 /* R0_FS | R0_LS must have been marked for this desc */ 1264 1265 if (rxstat & R0_ES) { 1266 if_statinc(ifp, if_ierrors); 1267 #define PRINTERR(bit, str) \ 1268 if (rxstat & (bit)) \ 1269 aprint_error_dev(sc->sc_dev, \ 1270 "%s\n", str) 1271 PRINTERR(R0_TL, "frame too long"); 1272 PRINTERR(R0_RF, "runt frame"); 1273 PRINTERR(R0_CE, "bad FCS"); 1274 #undef PRINTERR 1275 KSE_INIT_RXDESC(sc, i); 1276 continue; 1277 } 1278 1279 /* HW errata; frame might be too small or too large */ 1280 1281 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1282 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1283 1284 len = rxstat & R0_FL_MASK; 1285 len -= ETHER_CRC_LEN; /* Trim CRC off */ 1286 m = rxs->rxs_mbuf; 1287 1288 if (add_rxbuf(sc, i) != 0) { 1289 if_statinc(ifp, if_ierrors); 1290 KSE_INIT_RXDESC(sc, i); 1291 bus_dmamap_sync(sc->sc_dmat, 1292 rxs->rxs_dmamap, 0, 1293 rxs->rxs_dmamap->dm_mapsize, 1294 BUS_DMASYNC_PREREAD); 1295 continue; 1296 } 1297 1298 m_set_rcvif(m, ifp); 1299 m->m_pkthdr.len = m->m_len = len; 1300 1301 if (sc->sc_mcsum) { 1302 m->m_pkthdr.csum_flags |= sc->sc_mcsum; 1303 if (rxstat & R0_IPE) 1304 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1305 if (rxstat & (R0_TCPE | R0_UDPE)) 1306 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1307 } 1308 if_percpuq_enqueue(ifp->if_percpuq, m); 1309 #ifdef KSEDIAGNOSTIC 1310 if (kse_monitor_rxintr > 0) { 1311 aprint_error_dev(sc->sc_dev, 1312 "m stat %x data %p len %d\n", 1313 rxstat, m->m_data, m->m_len); 1314 } 1315 #endif 1316 } 1317 sc->sc_rxptr = i; 1318 } 1319 1320 static void 1321 txreap(struct kse_softc *sc) 1322 { 1323 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1324 struct kse_txsoft *txs; 1325 uint32_t txstat; 1326 int i; 1327 1328 ifp->if_flags &= ~IFF_OACTIVE; 1329 1330 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN; 1331 i = KSE_NEXTTXS(i), sc->sc_txsfree++) { 1332 txs = &sc->sc_txsoft[i]; 1333 1334 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 1335 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1336 1337 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0; 1338 1339 if (txstat & T0_OWN) /* desc is still in use */ 1340 break; 1341 1342 /* There is no way to tell transmission status per frame */ 1343 1344 if_statinc(ifp, if_opackets); 1345 1346 sc->sc_txfree += txs->txs_ndesc; 1347 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1348 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1349 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1350 m_freem(txs->txs_mbuf); 1351 txs->txs_mbuf = NULL; 1352 } 1353 sc->sc_txsdirty = i; 1354 if (sc->sc_txsfree == KSE_TXQUEUELEN) 1355 ifp->if_timer = 0; 1356 } 1357 1358 static void 1359 lnkchg(struct kse_softc *sc) 1360 { 1361 struct ifmediareq ifmr; 1362 1363 #if KSE_LINKDEBUG == 1 1364 uint16_t p1sr = CSR_READ_2(sc, P1SR); 1365 printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down"); 1366 #endif 1367 kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1368 } 1369 1370 static int 1371 kse_ifmedia_upd(struct ifnet *ifp) 1372 { 1373 struct kse_softc *sc = ifp->if_softc; 1374 struct ifmedia *ifm = &sc->sc_mii.mii_media; 1375 uint16_t p1cr4; 1376 1377 p1cr4 = 0; 1378 if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) { 1379 p1cr4 |= PxCR_STARTNEG; /* restart AN */ 1380 p1cr4 |= PxCR_AUTOEN; /* enable AN */ 1381 p1cr4 |= PxCR_USEFC; /* advertise flow control pause */ 1382 p1cr4 |= 0xf; /* adv. 100FDX,100HDX,10FDX,10HDX */ 1383 } else { 1384 if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX) 1385 p1cr4 |= PxCR_SPD100; 1386 if (ifm->ifm_media & IFM_FDX) 1387 p1cr4 |= PxCR_USEFDX; 1388 } 1389 CSR_WRITE_2(sc, P1CR4, p1cr4); 1390 #if KSE_LINKDEBUG == 1 1391 printf("P1CR4: %04x\n", p1cr4); 1392 #endif 1393 return 0; 1394 } 1395 1396 static void 1397 kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1398 { 1399 struct kse_softc *sc = ifp->if_softc; 1400 struct mii_data *mii = &sc->sc_mii; 1401 1402 mii_pollstat(mii); 1403 ifmr->ifm_status = mii->mii_media_status; 1404 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 1405 sc->sc_flowflags; 1406 } 1407 1408 static void 1409 nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1410 { 1411 struct kse_softc *sc = ifp->if_softc; 1412 struct ifmedia *ifm = &sc->sc_media; 1413 1414 #if KSE_LINKDEBUG == 2 1415 printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR)); 1416 #endif 1417 1418 /* 8842 MAC pretends 100FDX all the time */ 1419 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1420 ifmr->ifm_active = ifm->ifm_cur->ifm_media | 1421 IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE; 1422 } 1423 1424 static void 1425 phy_tick(void *arg) 1426 { 1427 struct kse_softc *sc = arg; 1428 struct mii_data *mii = &sc->sc_mii; 1429 int s; 1430 1431 s = splnet(); 1432 mii_tick(mii); 1433 splx(s); 1434 1435 callout_schedule(&sc->sc_tick_ch, hz); 1436 } 1437 1438 static const uint16_t phy1csr[] = { 1439 /* 0 BMCR */ 0x4d0, 1440 /* 1 BMSR */ 0x4d2, 1441 /* 2 PHYID1 */ 0x4d6, /* 0x0022 - PHY1HR */ 1442 /* 3 PHYID2 */ 0x4d4, /* 0x1430 - PHY1LR */ 1443 /* 4 ANAR */ 0x4d8, 1444 /* 5 ANLPAR */ 0x4da, 1445 }; 1446 1447 int 1448 kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 1449 { 1450 struct kse_softc *sc = device_private(self); 1451 1452 if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0) 1453 return EINVAL; 1454 *val = CSR_READ_2(sc, phy1csr[reg]); 1455 return 0; 1456 } 1457 1458 int 1459 kse_mii_writereg(device_t self, int phy, int reg, uint16_t val) 1460 { 1461 struct kse_softc *sc = device_private(self); 1462 1463 if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0) 1464 return EINVAL; 1465 CSR_WRITE_2(sc, phy1csr[reg], val); 1466 return 0; 1467 } 1468 1469 void 1470 kse_mii_statchg(struct ifnet *ifp) 1471 { 1472 struct kse_softc *sc = ifp->if_softc; 1473 struct mii_data *mii = &sc->sc_mii; 1474 1475 #if KSE_LINKDEBUG == 1 1476 /* decode P1SR register value */ 1477 uint16_t p1sr = CSR_READ_2(sc, P1SR); 1478 printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10); 1479 if (p1sr & PxSR_FDX) 1480 printf(",full-duplex"); 1481 if (p1sr & PxSR_RXFLOW) 1482 printf(",rxpause"); 1483 if (p1sr & PxSR_TXFLOW) 1484 printf(",txpause"); 1485 printf("\n"); 1486 /* show resolved mii(4) parameters to compare against above */ 1487 printf("MII spd%d", 1488 (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1))); 1489 if (mii->mii_media_active & IFM_FDX) 1490 printf(",full-duplex"); 1491 if (mii->mii_media_active & IFM_FLOW) { 1492 printf(",flowcontrol"); 1493 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 1494 printf(",rxpause"); 1495 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 1496 printf(",txpause"); 1497 } 1498 printf("\n"); 1499 #endif 1500 /* Get flow control negotiation result. */ 1501 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1502 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) 1503 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1504 1505 /* Adjust MAC PAUSE flow control. */ 1506 if ((mii->mii_media_active & IFM_FDX) 1507 && (sc->sc_flowflags & IFM_ETH_TXPAUSE)) 1508 sc->sc_txc |= TXC_FCE; 1509 else 1510 sc->sc_txc &= ~TXC_FCE; 1511 if ((mii->mii_media_active & IFM_FDX) 1512 && (sc->sc_flowflags & IFM_ETH_RXPAUSE)) 1513 sc->sc_rxc |= RXC_FCE; 1514 else 1515 sc->sc_rxc &= ~RXC_FCE; 1516 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 1517 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 1518 #if KSE_LINKDEBUG == 1 1519 printf("%ctxfce, %crxfce\n", 1520 (sc->sc_txc & TXC_FCE) ? '+' : '-', 1521 (sc->sc_rxc & RXC_FCE) ? '+' : '-'); 1522 #endif 1523 } 1524 1525 #ifdef KSE_EVENT_COUNTERS 1526 static void 1527 stat_tick(void *arg) 1528 { 1529 struct kse_softc *sc = arg; 1530 struct ksext *ee = &sc->sc_ext; 1531 int nport, p, i, val; 1532 1533 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1534 for (p = 0; p < nport; p++) { 1535 for (i = 0; i < 32; i++) { 1536 val = 0x1c00 | (p * 0x20 + i); 1537 CSR_WRITE_2(sc, IACR, val); 1538 do { 1539 val = CSR_READ_2(sc, IADR5) << 16; 1540 } while ((val & (1U << 30)) == 0); 1541 if (val & (1U << 31)) { 1542 (void)CSR_READ_2(sc, IADR4); 1543 val = 0x3fffffff; /* has made overflow */ 1544 } 1545 else { 1546 val &= 0x3fff0000; /* 29:16 */ 1547 val |= CSR_READ_2(sc, IADR4); /* 15:0 */ 1548 } 1549 ee->pev[p][i].ev_count += val; /* i (0-31) */ 1550 } 1551 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p); 1552 ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */ 1553 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1); 1554 ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */ 1555 } 1556 callout_schedule(&sc->sc_stat_ch, hz * 60); 1557 } 1558 1559 static void 1560 zerostats(struct kse_softc *sc) 1561 { 1562 struct ksext *ee = &sc->sc_ext; 1563 int nport, p, i, val; 1564 1565 /* Make sure all the HW counters get zero */ 1566 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1567 for (p = 0; p < nport; p++) { 1568 for (i = 0; i < 31; i++) { 1569 val = 0x1c00 | (p * 0x20 + i); 1570 CSR_WRITE_2(sc, IACR, val); 1571 do { 1572 val = CSR_READ_2(sc, IADR5) << 16; 1573 } while ((val & (1U << 30)) == 0); 1574 (void)CSR_READ_2(sc, IADR4); 1575 ee->pev[p][i].ev_count = 0; 1576 } 1577 } 1578 } 1579 #endif 1580