1 /* $NetBSD: if_kse.c,v 1.55 2020/09/20 20:15:11 nisimura Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Tohru Nishimura. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Micrel 8841/8842 10/100 PCI ethernet driver 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.55 2020/09/20 20:15:11 nisimura Exp $"); 38 39 #include <sys/param.h> 40 #include <sys/bus.h> 41 #include <sys/intr.h> 42 #include <sys/device.h> 43 #include <sys/callout.h> 44 #include <sys/ioctl.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/errno.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 51 #include <net/if.h> 52 #include <net/if_media.h> 53 #include <net/if_dl.h> 54 #include <net/if_ether.h> 55 #include <dev/mii/mii.h> 56 #include <dev/mii/miivar.h> 57 #include <net/bpf.h> 58 59 #include <dev/pci/pcivar.h> 60 #include <dev/pci/pcireg.h> 61 #include <dev/pci/pcidevs.h> 62 63 #define KSE_LINKDEBUG 0 64 65 #define CSR_READ_4(sc, off) \ 66 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off)) 67 #define CSR_WRITE_4(sc, off, val) \ 68 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val)) 69 #define CSR_READ_2(sc, off) \ 70 bus_space_read_2((sc)->sc_st, (sc)->sc_sh, (off)) 71 #define CSR_WRITE_2(sc, off, val) \ 72 bus_space_write_2((sc)->sc_st, (sc)->sc_sh, (off), (val)) 73 74 #define MDTXC 0x000 /* DMA transmit control */ 75 #define MDRXC 0x004 /* DMA receive control */ 76 #define MDTSC 0x008 /* DMA transmit start */ 77 #define MDRSC 0x00c /* DMA receive start */ 78 #define TDLB 0x010 /* transmit descriptor list base */ 79 #define RDLB 0x014 /* receive descriptor list base */ 80 #define MTR0 0x020 /* multicast table 31:0 */ 81 #define MTR1 0x024 /* multicast table 63:32 */ 82 #define INTEN 0x028 /* interrupt enable */ 83 #define INTST 0x02c /* interrupt status */ 84 #define MAAL0 0x080 /* additional MAC address 0 low */ 85 #define MAAH0 0x084 /* additional MAC address 0 high */ 86 #define MARL 0x200 /* MAC address low */ 87 #define MARM 0x202 /* MAC address middle */ 88 #define MARH 0x204 /* MAC address high */ 89 #define GRR 0x216 /* global reset */ 90 #define SIDER 0x400 /* switch ID and function enable */ 91 #define SGCR3 0x406 /* switch function control 3 */ 92 #define CR3_USEHDX (1U<<6) /* use half-duplex 8842 host port */ 93 #define CR3_USEFC (1U<<5) /* use flowcontrol 8842 host port */ 94 #define IACR 0x4a0 /* indirect access control */ 95 #define IADR1 0x4a2 /* indirect access data 66:63 */ 96 #define IADR2 0x4a4 /* indirect access data 47:32 */ 97 #define IADR3 0x4a6 /* indirect access data 63:48 */ 98 #define IADR4 0x4a8 /* indirect access data 15:0 */ 99 #define IADR5 0x4aa /* indirect access data 31:16 */ 100 #define P1CR4 0x512 /* port 1 control 4 */ 101 #define P1SR 0x514 /* port 1 status */ 102 #define P2CR4 0x532 /* port 2 control 4 */ 103 #define P2SR 0x534 /* port 2 status */ 104 #define PxCR_STARTNEG (1U<<9) /* restart auto negotiation */ 105 #define PxCR_AUTOEN (1U<<7) /* auto negotiation enable */ 106 #define PxCR_SPD100 (1U<<6) /* force speed 100 */ 107 #define PxCR_USEFDX (1U<<5) /* force full duplex */ 108 #define PxCR_USEFC (1U<<4) /* advertise pause flow control */ 109 #define PxSR_ACOMP (1U<<6) /* auto negotiation completed */ 110 #define PxSR_SPD100 (1U<<10) /* speed is 100Mbps */ 111 #define PxSR_FDX (1U<<9) /* full duplex */ 112 #define PxSR_LINKUP (1U<<5) /* link is good */ 113 #define PxSR_RXFLOW (1U<<12) /* receive flow control active */ 114 #define PxSR_TXFLOW (1U<<11) /* transmit flow control active */ 115 #define P1VIDCR 0x504 /* port 1 vtag */ 116 #define P2VIDCR 0x524 /* port 2 vtag */ 117 #define P3VIDCR 0x544 /* 8842 host vtag */ 118 119 #define TXC_BS_MSK 0x3f000000 /* burst size */ 120 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 121 #define TXC_UCG (1U<<18) /* generate UDP checksum */ 122 #define TXC_TCG (1U<<17) /* generate TCP checksum */ 123 #define TXC_ICG (1U<<16) /* generate IP checksum */ 124 #define TXC_FCE (1U<<9) /* generate PAUSE to moderate Rx lvl */ 125 #define TXC_EP (1U<<2) /* enable automatic padding */ 126 #define TXC_AC (1U<<1) /* add CRC to frame */ 127 #define TXC_TEN (1) /* enable DMA to run */ 128 129 #define RXC_BS_MSK 0x3f000000 /* burst size */ 130 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 131 #define RXC_IHAE (1U<<19) /* IP header alignment enable */ 132 #define RXC_UCC (1U<<18) /* run UDP checksum */ 133 #define RXC_TCC (1U<<17) /* run TDP checksum */ 134 #define RXC_ICC (1U<<16) /* run IP checksum */ 135 #define RXC_FCE (1U<<9) /* accept PAUSE to throttle Tx */ 136 #define RXC_RB (1U<<6) /* receive broadcast frame */ 137 #define RXC_RM (1U<<5) /* receive all multicast (inc. RB) */ 138 #define RXC_RU (1U<<4) /* receive 16 additional unicasts */ 139 #define RXC_RE (1U<<3) /* accept error frame */ 140 #define RXC_RA (1U<<2) /* receive all frame */ 141 #define RXC_MHTE (1U<<1) /* use multicast hash table */ 142 #define RXC_REN (1) /* enable DMA to run */ 143 144 #define INT_DMLCS (1U<<31) /* link status change */ 145 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */ 146 #define INT_DMRS (1U<<29) /* frame was received */ 147 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */ 148 #define INT_DMxPSS (3U<<25) /* 26:25 DMA Tx/Rx have stopped */ 149 150 #define T0_OWN (1U<<31) /* desc is ready to Tx */ 151 152 #define R0_OWN (1U<<31) /* desc is empty */ 153 #define R0_FS (1U<<30) /* first segment of frame */ 154 #define R0_LS (1U<<29) /* last segment of frame */ 155 #define R0_IPE (1U<<28) /* IP checksum error */ 156 #define R0_TCPE (1U<<27) /* TCP checksum error */ 157 #define R0_UDPE (1U<<26) /* UDP checksum error */ 158 #define R0_ES (1U<<25) /* error summary */ 159 #define R0_MF (1U<<24) /* multicast frame */ 160 #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */ 161 #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */ 162 #define R0_RE (1U<<19) /* MII reported error */ 163 #define R0_TL (1U<<18) /* frame too long, beyond 1518 */ 164 #define R0_RF (1U<<17) /* damaged runt frame */ 165 #define R0_CE (1U<<16) /* CRC error */ 166 #define R0_FT (1U<<15) /* frame type */ 167 #define R0_FL_MASK 0x7ff /* frame length 10:0 */ 168 169 #define T1_IC (1U<<31) /* post interrupt on complete */ 170 #define T1_FS (1U<<30) /* first segment of frame */ 171 #define T1_LS (1U<<29) /* last segment of frame */ 172 #define T1_IPCKG (1U<<28) /* generate IP checksum */ 173 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */ 174 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */ 175 #define T1_TER (1U<<25) /* end of ring */ 176 #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */ 177 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */ 178 179 #define R1_RER (1U<<25) /* end of ring */ 180 #define R1_RBS_MASK 0x7fc /* segment size 10:0 */ 181 182 #define KSE_NTXSEGS 16 183 #define KSE_TXQUEUELEN 64 184 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1) 185 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4) 186 #define KSE_NTXDESC 256 187 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1) 188 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK) 189 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK) 190 191 #define KSE_NRXDESC 64 192 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1) 193 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK) 194 195 struct tdes { 196 uint32_t t0, t1, t2, t3; 197 }; 198 199 struct rdes { 200 uint32_t r0, r1, r2, r3; 201 }; 202 203 struct kse_control_data { 204 struct tdes kcd_txdescs[KSE_NTXDESC]; 205 struct rdes kcd_rxdescs[KSE_NRXDESC]; 206 }; 207 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x) 208 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)]) 209 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)]) 210 211 struct kse_txsoft { 212 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 213 bus_dmamap_t txs_dmamap; /* our DMA map */ 214 int txs_firstdesc; /* first descriptor in packet */ 215 int txs_lastdesc; /* last descriptor in packet */ 216 int txs_ndesc; /* # of descriptors used */ 217 }; 218 219 struct kse_rxsoft { 220 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 221 bus_dmamap_t rxs_dmamap; /* our DMA map */ 222 }; 223 224 struct kse_softc { 225 device_t sc_dev; /* generic device information */ 226 bus_space_tag_t sc_st; /* bus space tag */ 227 bus_space_handle_t sc_sh; /* bus space handle */ 228 bus_size_t sc_memsize; /* csr map size */ 229 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 230 pci_chipset_tag_t sc_pc; /* PCI chipset tag */ 231 struct ethercom sc_ethercom; /* Ethernet common data */ 232 void *sc_ih; /* interrupt cookie */ 233 234 struct mii_data sc_mii; /* mii 8841 */ 235 struct ifmedia sc_media; /* ifmedia 8842 */ 236 int sc_flowflags; /* 802.3x PAUSE flow control */ 237 238 callout_t sc_tick_ch; /* MII tick callout */ 239 240 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 241 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 242 243 struct kse_control_data *sc_control_data; 244 #define sc_txdescs sc_control_data->kcd_txdescs 245 #define sc_rxdescs sc_control_data->kcd_rxdescs 246 247 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN]; 248 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC]; 249 int sc_txfree; /* number of free Tx descriptors */ 250 int sc_txnext; /* next ready Tx descriptor */ 251 int sc_txsfree; /* number of free Tx jobs */ 252 int sc_txsnext; /* next ready Tx job */ 253 int sc_txsdirty; /* dirty Tx jobs */ 254 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 255 256 uint32_t sc_txc, sc_rxc; 257 uint32_t sc_t1csum; 258 int sc_mcsum; 259 uint32_t sc_inten; 260 uint32_t sc_chip; 261 262 #ifdef KSE_EVENT_COUNTERS 263 callout_t sc_stat_ch; /* statistics counter callout */ 264 struct ksext { 265 char evcntname[3][8]; 266 struct evcnt pev[3][34]; 267 } sc_ext; /* switch statistics */ 268 #endif 269 }; 270 271 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x))) 272 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x))) 273 274 #define KSE_CDTXSYNC(sc, x, n, ops) \ 275 do { \ 276 int __x, __n; \ 277 \ 278 __x = (x); \ 279 __n = (n); \ 280 \ 281 /* If it will wrap around, sync to the end of the ring. */ \ 282 if ((__x + __n) > KSE_NTXDESC) { \ 283 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 284 KSE_CDTXOFF(__x), sizeof(struct tdes) * \ 285 (KSE_NTXDESC - __x), (ops)); \ 286 __n -= (KSE_NTXDESC - __x); \ 287 __x = 0; \ 288 } \ 289 \ 290 /* Now sync whatever is left. */ \ 291 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 292 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ 293 } while (/*CONSTCOND*/0) 294 295 #define KSE_CDRXSYNC(sc, x, ops) \ 296 do { \ 297 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 298 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ 299 } while (/*CONSTCOND*/0) 300 301 #define KSE_INIT_RXDESC(sc, x) \ 302 do { \ 303 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 304 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 305 struct mbuf *__m = __rxs->rxs_mbuf; \ 306 \ 307 __m->m_data = __m->m_ext.ext_buf; \ 308 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ 309 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \ 310 __rxd->r0 = R0_OWN; \ 311 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ 312 } while (/*CONSTCOND*/0) 313 314 u_int kse_burstsize = 8; /* DMA burst length tuning knob */ 315 316 #ifdef KSEDIAGNOSTIC 317 u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */ 318 #endif 319 320 static int kse_match(device_t, cfdata_t, void *); 321 static void kse_attach(device_t, device_t, void *); 322 323 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc), 324 kse_match, kse_attach, NULL, NULL); 325 326 static int kse_ioctl(struct ifnet *, u_long, void *); 327 static void kse_start(struct ifnet *); 328 static void kse_watchdog(struct ifnet *); 329 static int kse_init(struct ifnet *); 330 static void kse_stop(struct ifnet *, int); 331 static void kse_reset(struct kse_softc *); 332 static void kse_set_rcvfilt(struct kse_softc *); 333 static int add_rxbuf(struct kse_softc *, int); 334 static void rxdrain(struct kse_softc *); 335 static int kse_intr(void *); 336 static void rxintr(struct kse_softc *); 337 static void txreap(struct kse_softc *); 338 static void lnkchg(struct kse_softc *); 339 static int kse_ifmedia_upd(struct ifnet *); 340 static void kse_ifmedia_sts(struct ifnet *, struct ifmediareq *); 341 static void nopifmedia_sts(struct ifnet *, struct ifmediareq *); 342 static void phy_tick(void *); 343 int kse_mii_readreg(device_t, int, int, uint16_t *); 344 int kse_mii_writereg(device_t, int, int, uint16_t); 345 void kse_mii_statchg(struct ifnet *); 346 #ifdef KSE_EVENT_COUNTERS 347 static void stat_tick(void *); 348 static void zerostats(struct kse_softc *); 349 #endif 350 351 static int 352 kse_match(device_t parent, cfdata_t match, void *aux) 353 { 354 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 355 356 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL && 357 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 || 358 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) && 359 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 360 return 1; 361 362 return 0; 363 } 364 365 static void 366 kse_attach(device_t parent, device_t self, void *aux) 367 { 368 struct kse_softc *sc = device_private(self); 369 struct pci_attach_args *pa = aux; 370 pci_chipset_tag_t pc = pa->pa_pc; 371 pci_intr_handle_t ih; 372 const char *intrstr; 373 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 374 struct mii_data * const mii = &sc->sc_mii; 375 struct ifmedia *ifm; 376 uint8_t enaddr[ETHER_ADDR_LEN]; 377 bus_dma_segment_t seg; 378 int i, error, nseg; 379 char intrbuf[PCI_INTRSTR_LEN]; 380 381 aprint_normal(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n", 382 PCI_PRODUCT(pa->pa_id), PCI_REVISION(pa->pa_class)); 383 384 if (pci_mapreg_map(pa, 0x10, 385 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 386 0, &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize) != 0) { 387 aprint_error_dev(self, "unable to map device registers\n"); 388 return; 389 } 390 391 /* Make sure bus mastering is enabled. */ 392 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 393 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 394 PCI_COMMAND_MASTER_ENABLE); 395 396 /* Power up chip if necessary. */ 397 if ((error = pci_activate(pc, pa->pa_tag, self, NULL)) 398 && error != EOPNOTSUPP) { 399 aprint_error_dev(self, "cannot activate %d\n", error); 400 return; 401 } 402 403 /* Map and establish our interrupt. */ 404 if (pci_intr_map(pa, &ih)) { 405 aprint_error_dev(self, "unable to map interrupt\n"); 406 goto fail; 407 } 408 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 409 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc, 410 device_xname(self)); 411 if (sc->sc_ih == NULL) { 412 aprint_error_dev(self, "unable to establish interrupt"); 413 if (intrstr != NULL) 414 aprint_error(" at %s", intrstr); 415 aprint_error("\n"); 416 goto fail; 417 } 418 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 419 420 sc->sc_dev = self; 421 sc->sc_dmat = pa->pa_dmat; 422 sc->sc_pc = pa->pa_pc; 423 sc->sc_chip = PCI_PRODUCT(pa->pa_id); 424 425 /* 426 * Read the Ethernet address from the EEPROM. 427 */ 428 i = CSR_READ_2(sc, MARL); 429 enaddr[5] = i; 430 enaddr[4] = i >> 8; 431 i = CSR_READ_2(sc, MARM); 432 enaddr[3] = i; 433 enaddr[2] = i >> 8; 434 i = CSR_READ_2(sc, MARH); 435 enaddr[1] = i; 436 enaddr[0] = i >> 8; 437 aprint_normal_dev(self, 438 "Ethernet address %s\n", ether_sprintf(enaddr)); 439 440 /* 441 * Enable chip function. 442 */ 443 CSR_WRITE_2(sc, SIDER, 1); 444 445 /* 446 * Allocate the control data structures, and create and load the 447 * DMA map for it. 448 */ 449 error = bus_dmamem_alloc(sc->sc_dmat, 450 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); 451 if (error != 0) { 452 aprint_error_dev(self, 453 "unable to allocate control data, error = %d\n", error); 454 goto fail_0; 455 } 456 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 457 sizeof(struct kse_control_data), (void **)&sc->sc_control_data, 458 BUS_DMA_COHERENT); 459 if (error != 0) { 460 aprint_error_dev(self, 461 "unable to map control data, error = %d\n", error); 462 goto fail_1; 463 } 464 error = bus_dmamap_create(sc->sc_dmat, 465 sizeof(struct kse_control_data), 1, 466 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap); 467 if (error != 0) { 468 aprint_error_dev(self, 469 "unable to create control data DMA map, " 470 "error = %d\n", error); 471 goto fail_2; 472 } 473 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 474 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0); 475 if (error != 0) { 476 aprint_error_dev(self, 477 "unable to load control data DMA map, error = %d\n", 478 error); 479 goto fail_3; 480 } 481 for (i = 0; i < KSE_TXQUEUELEN; i++) { 482 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 483 KSE_NTXSEGS, MCLBYTES, 0, 0, 484 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 485 aprint_error_dev(self, 486 "unable to create tx DMA map %d, error = %d\n", 487 i, error); 488 goto fail_4; 489 } 490 } 491 for (i = 0; i < KSE_NRXDESC; i++) { 492 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 493 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 494 aprint_error_dev(self, 495 "unable to create rx DMA map %d, error = %d\n", 496 i, error); 497 goto fail_5; 498 } 499 sc->sc_rxsoft[i].rxs_mbuf = NULL; 500 } 501 502 mii->mii_ifp = ifp; 503 mii->mii_readreg = kse_mii_readreg; 504 mii->mii_writereg = kse_mii_writereg; 505 mii->mii_statchg = kse_mii_statchg; 506 507 /* Initialize ifmedia structures. */ 508 sc->sc_flowflags = 0; 509 if (sc->sc_chip == 0x8841) { 510 /* use port 1 builtin PHY as index 1 device */ 511 sc->sc_ethercom.ec_mii = mii; 512 ifm = &mii->mii_media; 513 ifmedia_init(ifm, 0, kse_ifmedia_upd, kse_ifmedia_sts); 514 mii_attach(sc->sc_dev, mii, 0xffffffff, 1 /* PHY1 */, 515 MII_OFFSET_ANY, MIIF_DOPAUSE); 516 if (LIST_FIRST(&mii->mii_phys) == NULL) { 517 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); 518 ifmedia_set(ifm, IFM_ETHER | IFM_NONE); 519 } else 520 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); 521 } else { 522 /* 523 * pretend 100FDX w/ no alternative media selection. 524 * 8842 MAC is tied with a builtin 3 port switch. It can do 525 * 4 degree priotised rate control over either of tx/rx 526 * direction for any of ports, respectively. Tough, this 527 * driver leaves the rate unlimited intending 100Mbps maximum. 528 * 2 external ports behave in AN mode and this driver provides 529 * no mean to manipulate and see their operational details. 530 */ 531 sc->sc_ethercom.ec_ifmedia = ifm = &sc->sc_media; 532 ifmedia_init(ifm, 0, NULL, nopifmedia_sts); 533 ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 534 ifmedia_set(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX); 535 536 aprint_normal_dev(self, 537 "10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n"); 538 } 539 ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */ 540 541 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 542 ifp->if_softc = sc; 543 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 544 ifp->if_ioctl = kse_ioctl; 545 ifp->if_start = kse_start; 546 ifp->if_watchdog = kse_watchdog; 547 ifp->if_init = kse_init; 548 ifp->if_stop = kse_stop; 549 IFQ_SET_READY(&ifp->if_snd); 550 551 /* 552 * capable of 802.1Q VLAN-sized frames and hw assisted tagging. 553 * can do IPv4, TCPv4, and UDPv4 checksums in hardware. 554 */ 555 sc->sc_ethercom.ec_capabilities = ETHERCAP_VLAN_MTU; 556 ifp->if_capabilities = 557 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 558 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 559 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 560 561 if_attach(ifp); 562 if_deferred_start_init(ifp, NULL); 563 ether_ifattach(ifp, enaddr); 564 565 callout_init(&sc->sc_tick_ch, 0); 566 callout_setfunc(&sc->sc_tick_ch, phy_tick, sc); 567 568 #ifdef KSE_EVENT_COUNTERS 569 int p = (sc->sc_chip == 0x8842) ? 3 : 1; 570 for (i = 0; i < p; i++) { 571 struct ksext *ee = &sc->sc_ext; 572 snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]), 573 "%s.%d", device_xname(sc->sc_dev), i+1); 574 evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC, 575 NULL, ee->evcntname[i], "RxLoPriotyByte"); 576 evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC, 577 NULL, ee->evcntname[i], "RxHiPriotyByte"); 578 evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC, 579 NULL, ee->evcntname[i], "RxUndersizePkt"); 580 evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC, 581 NULL, ee->evcntname[i], "RxFragments"); 582 evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC, 583 NULL, ee->evcntname[i], "RxOversize"); 584 evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC, 585 NULL, ee->evcntname[i], "RxJabbers"); 586 evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC, 587 NULL, ee->evcntname[i], "RxSymbolError"); 588 evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC, 589 NULL, ee->evcntname[i], "RxCRCError"); 590 evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC, 591 NULL, ee->evcntname[i], "RxAlignmentError"); 592 evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC, 593 NULL, ee->evcntname[i], "RxControl8808Pkts"); 594 evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC, 595 NULL, ee->evcntname[i], "RxPausePkts"); 596 evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC, 597 NULL, ee->evcntname[i], "RxBroadcast"); 598 evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC, 599 NULL, ee->evcntname[i], "RxMulticast"); 600 evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC, 601 NULL, ee->evcntname[i], "RxUnicast"); 602 evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC, 603 NULL, ee->evcntname[i], "Rx64Octets"); 604 evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC, 605 NULL, ee->evcntname[i], "Rx65To127Octets"); 606 evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC, 607 NULL, ee->evcntname[i], "Rx128To255Octets"); 608 evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC, 609 NULL, ee->evcntname[i], "Rx255To511Octets"); 610 evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC, 611 NULL, ee->evcntname[i], "Rx512To1023Octets"); 612 evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC, 613 NULL, ee->evcntname[i], "Rx1024To1522Octets"); 614 evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC, 615 NULL, ee->evcntname[i], "TxLoPriotyByte"); 616 evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC, 617 NULL, ee->evcntname[i], "TxHiPriotyByte"); 618 evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC, 619 NULL, ee->evcntname[i], "TxLateCollision"); 620 evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC, 621 NULL, ee->evcntname[i], "TxPausePkts"); 622 evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC, 623 NULL, ee->evcntname[i], "TxBroadcastPkts"); 624 evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC, 625 NULL, ee->evcntname[i], "TxMulticastPkts"); 626 evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC, 627 NULL, ee->evcntname[i], "TxUnicastPkts"); 628 evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC, 629 NULL, ee->evcntname[i], "TxDeferred"); 630 evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC, 631 NULL, ee->evcntname[i], "TxTotalCollision"); 632 evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC, 633 NULL, ee->evcntname[i], "TxExcessiveCollision"); 634 evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC, 635 NULL, ee->evcntname[i], "TxSingleCollision"); 636 evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC, 637 NULL, ee->evcntname[i], "TxMultipleCollision"); 638 evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC, 639 NULL, ee->evcntname[i], "TxDropPkts"); 640 evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC, 641 NULL, ee->evcntname[i], "RxDropPkts"); 642 } 643 callout_init(&sc->sc_stat_ch, 0); 644 callout_setfunc(&sc->sc_stat_ch, stat_tick, sc); 645 #endif 646 return; 647 648 fail_5: 649 for (i = 0; i < KSE_NRXDESC; i++) { 650 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 651 bus_dmamap_destroy(sc->sc_dmat, 652 sc->sc_rxsoft[i].rxs_dmamap); 653 } 654 fail_4: 655 for (i = 0; i < KSE_TXQUEUELEN; i++) { 656 if (sc->sc_txsoft[i].txs_dmamap != NULL) 657 bus_dmamap_destroy(sc->sc_dmat, 658 sc->sc_txsoft[i].txs_dmamap); 659 } 660 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 661 fail_3: 662 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 663 fail_2: 664 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 665 sizeof(struct kse_control_data)); 666 fail_1: 667 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 668 fail_0: 669 pci_intr_disestablish(pc, sc->sc_ih); 670 fail: 671 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize); 672 return; 673 } 674 675 static int 676 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data) 677 { 678 struct kse_softc *sc = ifp->if_softc; 679 struct ifreq *ifr = (struct ifreq *)data; 680 struct ifmedia *ifm; 681 int s, error; 682 683 s = splnet(); 684 685 switch (cmd) { 686 case SIOCSIFMEDIA: 687 /* Flow control requires full-duplex mode. */ 688 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 689 (ifr->ifr_media & IFM_FDX) == 0) 690 ifr->ifr_media &= ~IFM_ETH_FMASK; 691 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 692 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 693 /* We can do both TXPAUSE and RXPAUSE. */ 694 ifr->ifr_media |= 695 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 696 } 697 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 698 } 699 ifm = (sc->sc_chip == 0x8841) 700 ? &sc->sc_mii.mii_media : &sc->sc_media; 701 error = ifmedia_ioctl(ifp, ifr, ifm, cmd); 702 break; 703 default: 704 error = ether_ioctl(ifp, cmd, data); 705 if (error != ENETRESET) 706 break; 707 error = 0; 708 if (cmd == SIOCSIFCAP) 709 error = (*ifp->if_init)(ifp); 710 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 711 ; 712 else if (ifp->if_flags & IFF_RUNNING) { 713 /* 714 * Multicast list has changed; set the hardware filter 715 * accordingly. 716 */ 717 kse_set_rcvfilt(sc); 718 } 719 break; 720 } 721 722 splx(s); 723 724 return error; 725 } 726 727 static int 728 kse_init(struct ifnet *ifp) 729 { 730 struct kse_softc *sc = ifp->if_softc; 731 uint32_t paddr; 732 int i, error = 0; 733 734 /* cancel pending I/O */ 735 kse_stop(ifp, 0); 736 737 /* reset all registers but PCI configuration */ 738 kse_reset(sc); 739 740 /* craft Tx descriptor ring */ 741 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 742 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) { 743 sc->sc_txdescs[i].t3 = paddr; 744 paddr += sizeof(struct tdes); 745 } 746 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0); 747 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC, 748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 749 sc->sc_txfree = KSE_NTXDESC; 750 sc->sc_txnext = 0; 751 752 for (i = 0; i < KSE_TXQUEUELEN; i++) 753 sc->sc_txsoft[i].txs_mbuf = NULL; 754 sc->sc_txsfree = KSE_TXQUEUELEN; 755 sc->sc_txsnext = 0; 756 sc->sc_txsdirty = 0; 757 758 /* craft Rx descriptor ring */ 759 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 760 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) { 761 sc->sc_rxdescs[i].r3 = paddr; 762 paddr += sizeof(struct rdes); 763 } 764 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0); 765 for (i = 0; i < KSE_NRXDESC; i++) { 766 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { 767 if ((error = add_rxbuf(sc, i)) != 0) { 768 aprint_error_dev(sc->sc_dev, 769 "unable to allocate or map rx " 770 "buffer %d, error = %d\n", 771 i, error); 772 rxdrain(sc); 773 goto out; 774 } 775 } 776 else 777 KSE_INIT_RXDESC(sc, i); 778 } 779 sc->sc_rxptr = 0; 780 781 /* hand Tx/Rx rings to HW */ 782 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0)); 783 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0)); 784 785 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC; 786 sc->sc_rxc = RXC_REN | RXC_RU | RXC_RB; 787 sc->sc_t1csum = sc->sc_mcsum = 0; 788 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) { 789 sc->sc_rxc |= RXC_ICC; 790 sc->sc_mcsum |= M_CSUM_IPv4; 791 } 792 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) { 793 sc->sc_txc |= TXC_ICG; 794 sc->sc_t1csum |= T1_IPCKG; 795 } 796 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) { 797 sc->sc_rxc |= RXC_TCC; 798 sc->sc_mcsum |= M_CSUM_TCPv4; 799 } 800 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) { 801 sc->sc_txc |= TXC_TCG; 802 sc->sc_t1csum |= T1_TCPCKG; 803 } 804 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) { 805 sc->sc_rxc |= RXC_UCC; 806 sc->sc_mcsum |= M_CSUM_UDPv4; 807 } 808 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) { 809 sc->sc_txc |= TXC_UCG; 810 sc->sc_t1csum |= T1_UDPCKG; 811 } 812 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT); 813 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT); 814 815 if (sc->sc_chip == 0x8842) { 816 sc->sc_txc |= TXC_FCE; 817 sc->sc_rxc |= RXC_FCE; 818 CSR_WRITE_2(sc, SGCR3, 819 CSR_READ_2(sc, SGCR3) | CR3_USEFC); 820 } 821 822 /* accept multicast frame or run promisc mode */ 823 kse_set_rcvfilt(sc); 824 825 /* set current media */ 826 if (sc->sc_chip == 0x8841) 827 (void)kse_ifmedia_upd(ifp); 828 829 /* enable transmitter and receiver */ 830 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 831 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 832 CSR_WRITE_4(sc, MDRSC, 1); 833 834 /* enable interrupts */ 835 sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS; 836 if (sc->sc_chip == 0x8841) 837 sc->sc_inten |= INT_DMLCS; 838 CSR_WRITE_4(sc, INTST, ~0); 839 CSR_WRITE_4(sc, INTEN, sc->sc_inten); 840 841 ifp->if_flags |= IFF_RUNNING; 842 ifp->if_flags &= ~IFF_OACTIVE; 843 844 if (sc->sc_chip == 0x8841) { 845 /* start one second timer */ 846 callout_schedule(&sc->sc_tick_ch, hz); 847 } 848 #ifdef KSE_EVENT_COUNTERS 849 /* start statistics gather 1 minute timer. should be tunable */ 850 zerostats(sc); 851 callout_schedule(&sc->sc_stat_ch, hz * 60); 852 #endif 853 854 out: 855 if (error) { 856 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 857 ifp->if_timer = 0; 858 aprint_error_dev(sc->sc_dev, "interface not running\n"); 859 } 860 return error; 861 } 862 863 static void 864 kse_stop(struct ifnet *ifp, int disable) 865 { 866 struct kse_softc *sc = ifp->if_softc; 867 struct kse_txsoft *txs; 868 int i; 869 870 if (sc->sc_chip == 0x8841) 871 callout_stop(&sc->sc_tick_ch); 872 #ifdef KSE_EVENT_COUNTERS 873 callout_stop(&sc->sc_stat_ch); 874 #endif 875 sc->sc_txc &= ~TXC_TEN; 876 sc->sc_rxc &= ~RXC_REN; 877 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 878 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 879 880 for (i = 0; i < KSE_TXQUEUELEN; i++) { 881 txs = &sc->sc_txsoft[i]; 882 if (txs->txs_mbuf != NULL) { 883 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 884 m_freem(txs->txs_mbuf); 885 txs->txs_mbuf = NULL; 886 } 887 } 888 889 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 890 ifp->if_timer = 0; 891 892 if (disable) 893 rxdrain(sc); 894 } 895 896 static void 897 kse_reset(struct kse_softc *sc) 898 { 899 900 /* software reset */ 901 CSR_WRITE_2(sc, GRR, 1); 902 delay(1000); /* PDF does not mention the delay amount */ 903 CSR_WRITE_2(sc, GRR, 0); 904 905 /* enable switch function */ 906 CSR_WRITE_2(sc, SIDER, 1); 907 } 908 909 static void 910 kse_watchdog(struct ifnet *ifp) 911 { 912 struct kse_softc *sc = ifp->if_softc; 913 914 /* 915 * Since we're not interrupting every packet, sweep 916 * up before we report an error. 917 */ 918 txreap(sc); 919 920 if (sc->sc_txfree != KSE_NTXDESC) { 921 aprint_error_dev(sc->sc_dev, 922 "device timeout (txfree %d txsfree %d txnext %d)\n", 923 sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext); 924 if_statinc(ifp, if_oerrors); 925 926 /* Reset the interface. */ 927 kse_init(ifp); 928 } 929 else if (ifp->if_flags & IFF_DEBUG) 930 aprint_error_dev(sc->sc_dev, "recovered from device timeout\n"); 931 932 /* Try to get more packets going. */ 933 kse_start(ifp); 934 } 935 936 static void 937 kse_start(struct ifnet *ifp) 938 { 939 struct kse_softc *sc = ifp->if_softc; 940 struct mbuf *m0, *m; 941 struct kse_txsoft *txs; 942 bus_dmamap_t dmamap; 943 int error, nexttx, lasttx, ofree, seg; 944 uint32_t tdes0; 945 946 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 947 return; 948 949 /* Remember the previous number of free descriptors. */ 950 ofree = sc->sc_txfree; 951 952 /* 953 * Loop through the send queue, setting up transmit descriptors 954 * until we drain the queue, or use up all available transmit 955 * descriptors. 956 */ 957 for (;;) { 958 IFQ_POLL(&ifp->if_snd, m0); 959 if (m0 == NULL) 960 break; 961 962 if (sc->sc_txsfree < KSE_TXQUEUE_GC) { 963 txreap(sc); 964 if (sc->sc_txsfree == 0) 965 break; 966 } 967 txs = &sc->sc_txsoft[sc->sc_txsnext]; 968 dmamap = txs->txs_dmamap; 969 970 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 971 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 972 if (error) { 973 if (error == EFBIG) { 974 aprint_error_dev(sc->sc_dev, 975 "Tx packet consumes too many " 976 "DMA segments, dropping...\n"); 977 IFQ_DEQUEUE(&ifp->if_snd, m0); 978 m_freem(m0); 979 continue; 980 } 981 /* Short on resources, just stop for now. */ 982 break; 983 } 984 985 if (dmamap->dm_nsegs > sc->sc_txfree) { 986 /* 987 * Not enough free descriptors to transmit this 988 * packet. We haven't committed anything yet, 989 * so just unload the DMA map, put the packet 990 * back on the queue, and punt. Notify the upper 991 * layer that there are not more slots left. 992 */ 993 ifp->if_flags |= IFF_OACTIVE; 994 bus_dmamap_unload(sc->sc_dmat, dmamap); 995 break; 996 } 997 998 IFQ_DEQUEUE(&ifp->if_snd, m0); 999 1000 /* 1001 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1002 */ 1003 1004 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1005 BUS_DMASYNC_PREWRITE); 1006 1007 tdes0 = 0; /* to postpone 1st segment T0_OWN write */ 1008 lasttx = -1; 1009 for (nexttx = sc->sc_txnext, seg = 0; 1010 seg < dmamap->dm_nsegs; 1011 seg++, nexttx = KSE_NEXTTX(nexttx)) { 1012 struct tdes *tdes = &sc->sc_txdescs[nexttx]; 1013 /* 1014 * If this is the first descriptor we're 1015 * enqueueing, don't set the OWN bit just 1016 * yet. That could cause a race condition. 1017 * We'll do it below. 1018 */ 1019 tdes->t2 = dmamap->dm_segs[seg].ds_addr; 1020 tdes->t1 = sc->sc_t1csum 1021 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK); 1022 tdes->t0 = tdes0; 1023 tdes0 = T0_OWN; /* 2nd and other segments */ 1024 lasttx = nexttx; 1025 } 1026 /* 1027 * Outgoing NFS mbuf must be unloaded when Tx completed. 1028 * Without T1_IC NFS mbuf is left unack'ed for excessive 1029 * time and NFS stops to proceed until kse_watchdog() 1030 * calls txreap() to reclaim the unack'ed mbuf. 1031 * It's painful to traverse every mbuf chain to determine 1032 * whether someone is waiting for Tx completion. 1033 */ 1034 m = m0; 1035 do { 1036 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) { 1037 sc->sc_txdescs[lasttx].t1 |= T1_IC; 1038 break; 1039 } 1040 } while ((m = m->m_next) != NULL); 1041 1042 /* Write deferred 1st segment T0_OWN at the final stage */ 1043 sc->sc_txdescs[lasttx].t1 |= T1_LS; 1044 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS; 1045 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN; 1046 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1047 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1048 1049 /* Tell DMA start transmit */ 1050 CSR_WRITE_4(sc, MDTSC, 1); 1051 1052 txs->txs_mbuf = m0; 1053 txs->txs_firstdesc = sc->sc_txnext; 1054 txs->txs_lastdesc = lasttx; 1055 txs->txs_ndesc = dmamap->dm_nsegs; 1056 1057 sc->sc_txfree -= txs->txs_ndesc; 1058 sc->sc_txnext = nexttx; 1059 sc->sc_txsfree--; 1060 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext); 1061 /* 1062 * Pass the packet to any BPF listeners. 1063 */ 1064 bpf_mtap(ifp, m0, BPF_D_OUT); 1065 } 1066 1067 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1068 /* No more slots left; notify upper layer. */ 1069 ifp->if_flags |= IFF_OACTIVE; 1070 } 1071 if (sc->sc_txfree != ofree) { 1072 /* Set a watchdog timer in case the chip flakes out. */ 1073 ifp->if_timer = 5; 1074 } 1075 } 1076 1077 static void 1078 kse_set_rcvfilt(struct kse_softc *sc) 1079 { 1080 struct ether_multistep step; 1081 struct ether_multi *enm; 1082 struct ethercom *ec = &sc->sc_ethercom; 1083 struct ifnet *ifp = &ec->ec_if; 1084 uint32_t crc, mchash[2]; 1085 int i; 1086 1087 sc->sc_rxc &= ~(RXC_MHTE | RXC_RM | RXC_RA); 1088 1089 /* clear perfect match filter and prepare mcast hash table */ 1090 for (i = 0; i < 16; i++) 1091 CSR_WRITE_4(sc, MAAH0 + i*8, 0); 1092 crc = mchash[0] = mchash[1] = 0; 1093 1094 ETHER_LOCK(ec); 1095 if (ifp->if_flags & IFF_PROMISC) { 1096 ec->ec_flags |= ETHER_F_ALLMULTI; 1097 ETHER_UNLOCK(ec); 1098 /* run promisc. mode */ 1099 sc->sc_rxc |= RXC_RA; 1100 goto update; 1101 } 1102 ec->ec_flags &= ~ETHER_F_ALLMULTI; 1103 ETHER_FIRST_MULTI(step, ec, enm); 1104 i = 0; 1105 while (enm != NULL) { 1106 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1107 /* 1108 * We must listen to a range of multicast addresses. 1109 * For now, just accept all multicasts, rather than 1110 * trying to set only those filter bits needed to match 1111 * the range. (At this time, the only use of address 1112 * ranges is for IP multicast routing, for which the 1113 * range is big enough to require all bits set.) 1114 */ 1115 ec->ec_flags |= ETHER_F_ALLMULTI; 1116 ETHER_UNLOCK(ec); 1117 /* accept all multicast */ 1118 sc->sc_rxc |= RXC_RM; 1119 goto update; 1120 } 1121 #if KSE_MCASTDEBUG == 1 1122 printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo)); 1123 #endif 1124 if (i < 16) { 1125 /* use 16 additional MAC addr to accept mcast */ 1126 uint32_t addr; 1127 uint8_t *ep = enm->enm_addrlo; 1128 addr = (ep[3] << 24) | (ep[2] << 16) 1129 | (ep[1] << 8) | ep[0]; 1130 CSR_WRITE_4(sc, MAAL0 + i*8, addr); 1131 addr = (ep[5] << 8) | ep[4]; 1132 CSR_WRITE_4(sc, MAAH0 + i*8, addr | (1U << 31)); 1133 } else { 1134 /* use hash table when too many */ 1135 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1136 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 1137 } 1138 ETHER_NEXT_MULTI(step, enm); 1139 i++; 1140 } 1141 ETHER_UNLOCK(ec); 1142 1143 if (crc) 1144 sc->sc_rxc |= RXC_MHTE; 1145 CSR_WRITE_4(sc, MTR0, mchash[0]); 1146 CSR_WRITE_4(sc, MTR1, mchash[1]); 1147 update: 1148 /* With RA or RM, MHTE/MTR0/MTR1 are never consulted. */ 1149 return; 1150 } 1151 1152 static int 1153 add_rxbuf(struct kse_softc *sc, int idx) 1154 { 1155 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1156 struct mbuf *m; 1157 int error; 1158 1159 MGETHDR(m, M_DONTWAIT, MT_DATA); 1160 if (m == NULL) 1161 return ENOBUFS; 1162 1163 MCLGET(m, M_DONTWAIT); 1164 if ((m->m_flags & M_EXT) == 0) { 1165 m_freem(m); 1166 return ENOBUFS; 1167 } 1168 1169 if (rxs->rxs_mbuf != NULL) 1170 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1171 1172 rxs->rxs_mbuf = m; 1173 1174 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1175 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1176 if (error) { 1177 aprint_error_dev(sc->sc_dev, 1178 "can't load rx DMA map %d, error = %d\n", idx, error); 1179 panic("kse_add_rxbuf"); 1180 } 1181 1182 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1183 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1184 1185 KSE_INIT_RXDESC(sc, idx); 1186 1187 return 0; 1188 } 1189 1190 static void 1191 rxdrain(struct kse_softc *sc) 1192 { 1193 struct kse_rxsoft *rxs; 1194 int i; 1195 1196 for (i = 0; i < KSE_NRXDESC; i++) { 1197 rxs = &sc->sc_rxsoft[i]; 1198 if (rxs->rxs_mbuf != NULL) { 1199 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1200 m_freem(rxs->rxs_mbuf); 1201 rxs->rxs_mbuf = NULL; 1202 } 1203 } 1204 } 1205 1206 static int 1207 kse_intr(void *arg) 1208 { 1209 struct kse_softc *sc = arg; 1210 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1211 uint32_t isr; 1212 1213 if ((isr = CSR_READ_4(sc, INTST)) == 0) 1214 return 0; 1215 1216 if (isr & INT_DMRS) 1217 rxintr(sc); 1218 if (isr & INT_DMTS) 1219 txreap(sc); 1220 if (isr & INT_DMLCS) 1221 lnkchg(sc); 1222 if (isr & INT_DMRBUS) 1223 aprint_error_dev(sc->sc_dev, "Rx descriptor full\n"); 1224 1225 CSR_WRITE_4(sc, INTST, isr); 1226 1227 if (ifp->if_flags & IFF_RUNNING) 1228 if_schedule_deferred_start(ifp); 1229 1230 return 1; 1231 } 1232 1233 static void 1234 rxintr(struct kse_softc *sc) 1235 { 1236 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1237 struct kse_rxsoft *rxs; 1238 struct mbuf *m; 1239 uint32_t rxstat; 1240 int i, len; 1241 1242 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) { 1243 rxs = &sc->sc_rxsoft[i]; 1244 1245 KSE_CDRXSYNC(sc, i, 1246 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1247 1248 rxstat = sc->sc_rxdescs[i].r0; 1249 1250 if (rxstat & R0_OWN) /* desc is left empty */ 1251 break; 1252 1253 /* R0_FS | R0_LS must have been marked for this desc */ 1254 1255 if (rxstat & R0_ES) { 1256 if_statinc(ifp, if_ierrors); 1257 #define PRINTERR(bit, str) \ 1258 if (rxstat & (bit)) \ 1259 aprint_error_dev(sc->sc_dev, \ 1260 "%s\n", str) 1261 PRINTERR(R0_TL, "frame too long"); 1262 PRINTERR(R0_RF, "runt frame"); 1263 PRINTERR(R0_CE, "bad FCS"); 1264 #undef PRINTERR 1265 KSE_INIT_RXDESC(sc, i); 1266 continue; 1267 } 1268 1269 /* HW errata; frame might be too small or too large */ 1270 1271 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1272 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1273 1274 len = rxstat & R0_FL_MASK; 1275 len -= ETHER_CRC_LEN; /* Trim CRC off */ 1276 m = rxs->rxs_mbuf; 1277 1278 if (add_rxbuf(sc, i) != 0) { 1279 if_statinc(ifp, if_ierrors); 1280 KSE_INIT_RXDESC(sc, i); 1281 bus_dmamap_sync(sc->sc_dmat, 1282 rxs->rxs_dmamap, 0, 1283 rxs->rxs_dmamap->dm_mapsize, 1284 BUS_DMASYNC_PREREAD); 1285 continue; 1286 } 1287 1288 m_set_rcvif(m, ifp); 1289 m->m_pkthdr.len = m->m_len = len; 1290 1291 if (sc->sc_mcsum) { 1292 m->m_pkthdr.csum_flags |= sc->sc_mcsum; 1293 if (rxstat & R0_IPE) 1294 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1295 if (rxstat & (R0_TCPE | R0_UDPE)) 1296 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1297 } 1298 if_percpuq_enqueue(ifp->if_percpuq, m); 1299 #ifdef KSEDIAGNOSTIC 1300 if (kse_monitor_rxintr > 0) { 1301 aprint_error_dev(sc->sc_dev, 1302 "m stat %x data %p len %d\n", 1303 rxstat, m->m_data, m->m_len); 1304 } 1305 #endif 1306 } 1307 sc->sc_rxptr = i; 1308 } 1309 1310 static void 1311 txreap(struct kse_softc *sc) 1312 { 1313 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1314 struct kse_txsoft *txs; 1315 uint32_t txstat; 1316 int i; 1317 1318 ifp->if_flags &= ~IFF_OACTIVE; 1319 1320 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN; 1321 i = KSE_NEXTTXS(i), sc->sc_txsfree++) { 1322 txs = &sc->sc_txsoft[i]; 1323 1324 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 1325 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1326 1327 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0; 1328 1329 if (txstat & T0_OWN) /* desc is still in use */ 1330 break; 1331 1332 /* There is no way to tell transmission status per frame */ 1333 1334 if_statinc(ifp, if_opackets); 1335 1336 sc->sc_txfree += txs->txs_ndesc; 1337 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1338 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1339 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1340 m_freem(txs->txs_mbuf); 1341 txs->txs_mbuf = NULL; 1342 } 1343 sc->sc_txsdirty = i; 1344 if (sc->sc_txsfree == KSE_TXQUEUELEN) 1345 ifp->if_timer = 0; 1346 } 1347 1348 static void 1349 lnkchg(struct kse_softc *sc) 1350 { 1351 struct ifmediareq ifmr; 1352 1353 #if KSE_LINKDEBUG == 1 1354 uint16_t p1sr = CSR_READ_2(sc, P1SR); 1355 printf("link %s detected\n", (p1sr & PxSR_LINKUP) ? "up" : "down"); 1356 #endif 1357 kse_ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1358 } 1359 1360 static int 1361 kse_ifmedia_upd(struct ifnet *ifp) 1362 { 1363 struct kse_softc *sc = ifp->if_softc; 1364 struct ifmedia *ifm = &sc->sc_mii.mii_media; 1365 uint16_t p1cr4; 1366 1367 p1cr4 = 0; 1368 if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_AUTO) { 1369 p1cr4 |= PxCR_STARTNEG; /* restart AN */ 1370 p1cr4 |= PxCR_AUTOEN; /* enable AN */ 1371 p1cr4 |= PxCR_USEFC; /* advertise flow control pause */ 1372 p1cr4 |= 0xf; /* adv. 100FDX,100HDX,10FDX,10HDX */ 1373 } else { 1374 if (IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_100_TX) 1375 p1cr4 |= PxCR_SPD100; 1376 if (ifm->ifm_media & IFM_FDX) 1377 p1cr4 |= PxCR_USEFDX; 1378 } 1379 CSR_WRITE_2(sc, P1CR4, p1cr4); 1380 #if KSE_LINKDEBUG == 1 1381 printf("P1CR4: %04x\n", p1cr4); 1382 #endif 1383 return 0; 1384 } 1385 1386 static void 1387 kse_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1388 { 1389 struct kse_softc *sc = ifp->if_softc; 1390 struct mii_data *mii = &sc->sc_mii; 1391 1392 mii_pollstat(mii); 1393 ifmr->ifm_status = mii->mii_media_status; 1394 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 1395 sc->sc_flowflags; 1396 } 1397 1398 static void 1399 nopifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1400 { 1401 struct kse_softc *sc = ifp->if_softc; 1402 struct ifmedia *ifm = &sc->sc_media; 1403 1404 #if KSE_LINKDEBUG == 2 1405 printf("p1sr: %04x, p2sr: %04x\n", CSR_READ_2(sc, P1SR), CSR_READ_2(sc, P2SR)); 1406 #endif 1407 1408 /* 8842 MAC pretends 100FDX all the time */ 1409 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 1410 ifmr->ifm_active = ifm->ifm_cur->ifm_media | 1411 IFM_FLOW | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE; 1412 } 1413 1414 static void 1415 phy_tick(void *arg) 1416 { 1417 struct kse_softc *sc = arg; 1418 struct mii_data *mii = &sc->sc_mii; 1419 int s; 1420 1421 s = splnet(); 1422 mii_tick(mii); 1423 splx(s); 1424 1425 callout_schedule(&sc->sc_tick_ch, hz); 1426 } 1427 1428 static const uint16_t phy1csr[] = { 1429 /* 0 BMCR */ 0x4d0, 1430 /* 1 BMSR */ 0x4d2, 1431 /* 2 PHYID1 */ 0x4d6, /* 0x0022 - PHY1HR */ 1432 /* 3 PHYID2 */ 0x4d4, /* 0x1430 - PHY1LR */ 1433 /* 4 ANAR */ 0x4d8, 1434 /* 5 ANLPAR */ 0x4da, 1435 }; 1436 1437 int 1438 kse_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 1439 { 1440 struct kse_softc *sc = device_private(self); 1441 1442 if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0) 1443 return EINVAL; 1444 *val = CSR_READ_2(sc, phy1csr[reg]); 1445 return 0; 1446 } 1447 1448 int 1449 kse_mii_writereg(device_t self, int phy, int reg, uint16_t val) 1450 { 1451 struct kse_softc *sc = device_private(self); 1452 1453 if (phy != 1 || reg >= __arraycount(phy1csr) || reg < 0) 1454 return EINVAL; 1455 CSR_WRITE_2(sc, phy1csr[reg], val); 1456 return 0; 1457 } 1458 1459 void 1460 kse_mii_statchg(struct ifnet *ifp) 1461 { 1462 struct kse_softc *sc = ifp->if_softc; 1463 struct mii_data *mii = &sc->sc_mii; 1464 1465 #if KSE_LINKDEBUG == 1 1466 /* decode P1SR register value */ 1467 uint16_t p1sr = CSR_READ_2(sc, P1SR); 1468 printf("P1SR %04x, spd%d", p1sr, (p1sr & PxSR_SPD100) ? 100 : 10); 1469 if (p1sr & PxSR_FDX) 1470 printf(",full-duplex"); 1471 if (p1sr & PxSR_RXFLOW) 1472 printf(",rxpause"); 1473 if (p1sr & PxSR_TXFLOW) 1474 printf(",txpause"); 1475 printf("\n"); 1476 /* show resolved mii(4) parameters to compare against above */ 1477 printf("MII spd%d", 1478 (int)(sc->sc_ethercom.ec_if.if_baudrate / IF_Mbps(1))); 1479 if (mii->mii_media_active & IFM_FDX) 1480 printf(",full-duplex"); 1481 if (mii->mii_media_active & IFM_FLOW) { 1482 printf(",flowcontrol"); 1483 if (mii->mii_media_active & IFM_ETH_RXPAUSE) 1484 printf(",rxpause"); 1485 if (mii->mii_media_active & IFM_ETH_TXPAUSE) 1486 printf(",txpause"); 1487 } 1488 printf("\n"); 1489 #endif 1490 /* Get flow control negotiation result. */ 1491 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1492 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) 1493 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1494 1495 /* Adjust MAC PAUSE flow control. */ 1496 if ((mii->mii_media_active & IFM_FDX) 1497 && (sc->sc_flowflags & IFM_ETH_TXPAUSE)) 1498 sc->sc_txc |= TXC_FCE; 1499 else 1500 sc->sc_txc &= ~TXC_FCE; 1501 if ((mii->mii_media_active & IFM_FDX) 1502 && (sc->sc_flowflags & IFM_ETH_RXPAUSE)) 1503 sc->sc_rxc |= RXC_FCE; 1504 else 1505 sc->sc_rxc &= ~RXC_FCE; 1506 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 1507 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 1508 #if KSE_LINKDEBUG == 1 1509 printf("%ctxfce, %crxfce\n", 1510 (sc->sc_txc & TXC_FCE) ? '+' : '-', 1511 (sc->sc_rxc & RXC_FCE) ? '+' : '-'); 1512 #endif 1513 } 1514 1515 #ifdef KSE_EVENT_COUNTERS 1516 static void 1517 stat_tick(void *arg) 1518 { 1519 struct kse_softc *sc = arg; 1520 struct ksext *ee = &sc->sc_ext; 1521 int nport, p, i, val; 1522 1523 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1524 for (p = 0; p < nport; p++) { 1525 for (i = 0; i < 32; i++) { 1526 val = 0x1c00 | (p * 0x20 + i); 1527 CSR_WRITE_2(sc, IACR, val); 1528 do { 1529 val = CSR_READ_2(sc, IADR5) << 16; 1530 } while ((val & (1U << 30)) == 0); 1531 if (val & (1U << 31)) { 1532 (void)CSR_READ_2(sc, IADR4); 1533 val = 0x3fffffff; /* has made overflow */ 1534 } 1535 else { 1536 val &= 0x3fff0000; /* 29:16 */ 1537 val |= CSR_READ_2(sc, IADR4); /* 15:0 */ 1538 } 1539 ee->pev[p][i].ev_count += val; /* i (0-31) */ 1540 } 1541 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p); 1542 ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */ 1543 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1); 1544 ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */ 1545 } 1546 callout_schedule(&sc->sc_stat_ch, hz * 60); 1547 } 1548 1549 static void 1550 zerostats(struct kse_softc *sc) 1551 { 1552 struct ksext *ee = &sc->sc_ext; 1553 int nport, p, i, val; 1554 1555 /* Make sure all the HW counters get zero */ 1556 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1557 for (p = 0; p < nport; p++) { 1558 for (i = 0; i < 31; i++) { 1559 val = 0x1c00 | (p * 0x20 + i); 1560 CSR_WRITE_2(sc, IACR, val); 1561 do { 1562 val = CSR_READ_2(sc, IADR5) << 16; 1563 } while ((val & (1U << 30)) == 0); 1564 (void)CSR_READ_2(sc, IADR4); 1565 ee->pev[p][i].ev_count = 0; 1566 } 1567 } 1568 } 1569 #endif 1570