1 /* $NetBSD: if_kse.c,v 1.15 2008/05/26 10:31:22 nisimura Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Tohru Nishimura. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.15 2008/05/26 10:31:22 nisimura Exp $"); 34 35 #include "bpfilter.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/callout.h> 40 #include <sys/mbuf.h> 41 #include <sys/malloc.h> 42 #include <sys/kernel.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/device.h> 46 #include <sys/queue.h> 47 48 #include <machine/endian.h> 49 #include <sys/bus.h> 50 #include <sys/intr.h> 51 52 #include <net/if.h> 53 #include <net/if_media.h> 54 #include <net/if_dl.h> 55 #include <net/if_ether.h> 56 57 #if NBPFILTER > 0 58 #include <net/bpf.h> 59 #endif 60 61 #include <dev/pci/pcivar.h> 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcidevs.h> 64 65 #define CSR_READ_4(sc, off) \ 66 bus_space_read_4(sc->sc_st, sc->sc_sh, off) 67 #define CSR_WRITE_4(sc, off, val) \ 68 bus_space_write_4(sc->sc_st, sc->sc_sh, off, val) 69 #define CSR_READ_2(sc, off) \ 70 bus_space_read_2(sc->sc_st, sc->sc_sh, off) 71 #define CSR_WRITE_2(sc, off, val) \ 72 bus_space_write_2(sc->sc_st, sc->sc_sh, off, val) 73 74 #define MDTXC 0x000 /* DMA transmit control */ 75 #define MDRXC 0x004 /* DMA receive control */ 76 #define MDTSC 0x008 /* DMA transmit start */ 77 #define MDRSC 0x00c /* DMA receive start */ 78 #define TDLB 0x010 /* transmit descriptor list base */ 79 #define RDLB 0x014 /* receive descriptor list base */ 80 #define MTR0 0x020 /* multicast table 31:0 */ 81 #define MTR1 0x024 /* multicast table 63:32 */ 82 #define INTEN 0x028 /* interrupt enable */ 83 #define INTST 0x02c /* interrupt status */ 84 #define MARL 0x200 /* MAC address low */ 85 #define MARM 0x202 /* MAC address middle */ 86 #define MARH 0x204 /* MAC address high */ 87 #define GRR 0x216 /* global reset */ 88 #define CIDR 0x400 /* chip ID and enable */ 89 #define CGCR 0x40a /* chip global control */ 90 #define IACR 0x4a0 /* indirect access control */ 91 #define IADR1 0x4a2 /* indirect access data 66:63 */ 92 #define IADR2 0x4a4 /* indirect access data 47:32 */ 93 #define IADR3 0x4a6 /* indirect access data 63:48 */ 94 #define IADR4 0x4a8 /* indirect access data 15:0 */ 95 #define IADR5 0x4aa /* indirect access data 31:16 */ 96 #define P1CR4 0x512 /* port 1 control 4 */ 97 #define P1SR 0x514 /* port 1 status */ 98 #define P2CR4 0x532 /* port 2 control 4 */ 99 #define P2SR 0x534 /* port 2 status */ 100 101 #define TXC_BS_MSK 0x3f000000 /* burst size */ 102 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 103 #define TXC_UCG (1U<<18) /* generate UDP checksum */ 104 #define TXC_TCG (1U<<17) /* generate TCP checksum */ 105 #define TXC_ICG (1U<<16) /* generate IP checksum */ 106 #define TXC_FCE (1U<<9) /* enable flowcontrol */ 107 #define TXC_EP (1U<<2) /* enable automatic padding */ 108 #define TXC_AC (1U<<1) /* add CRC to frame */ 109 #define TXC_TEN (1) /* enable DMA to run */ 110 111 #define RXC_BS_MSK 0x3f000000 /* burst size */ 112 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 113 #define RXC_IHAE (1U<<19) /* IP header alignment enable */ 114 #define RXC_UCC (1U<<18) /* run UDP checksum */ 115 #define RXC_TCC (1U<<17) /* run TDP checksum */ 116 #define RXC_ICC (1U<<16) /* run IP checksum */ 117 #define RXC_FCE (1U<<9) /* enable flowcontrol */ 118 #define RXC_RB (1U<<6) /* receive broadcast frame */ 119 #define RXC_RM (1U<<5) /* receive multicast frame */ 120 #define RXC_RU (1U<<4) /* receive unicast frame */ 121 #define RXC_RE (1U<<3) /* accept error frame */ 122 #define RXC_RA (1U<<2) /* receive all frame */ 123 #define RXC_MHTE (1U<<1) /* use multicast hash table */ 124 #define RXC_REN (1) /* enable DMA to run */ 125 126 #define INT_DMLCS (1U<<31) /* link status change */ 127 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */ 128 #define INT_DMRS (1U<<29) /* frame was received */ 129 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */ 130 131 #define T0_OWN (1U<<31) /* desc is ready to Tx */ 132 133 #define R0_OWN (1U<<31) /* desc is empty */ 134 #define R0_FS (1U<<30) /* first segment of frame */ 135 #define R0_LS (1U<<29) /* last segment of frame */ 136 #define R0_IPE (1U<<28) /* IP checksum error */ 137 #define R0_TCPE (1U<<27) /* TCP checksum error */ 138 #define R0_UDPE (1U<<26) /* UDP checksum error */ 139 #define R0_ES (1U<<25) /* error summary */ 140 #define R0_MF (1U<<24) /* multicast frame */ 141 #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */ 142 #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */ 143 #define R0_RE (1U<<19) /* MII reported error */ 144 #define R0_TL (1U<<18) /* frame too long, beyond 1518 */ 145 #define R0_RF (1U<<17) /* damaged runt frame */ 146 #define R0_CE (1U<<16) /* CRC error */ 147 #define R0_FT (1U<<15) /* frame type */ 148 #define R0_FL_MASK 0x7ff /* frame length 10:0 */ 149 150 #define T1_IC (1U<<31) /* post interrupt on complete */ 151 #define T1_FS (1U<<30) /* first segment of frame */ 152 #define T1_LS (1U<<29) /* last segment of frame */ 153 #define T1_IPCKG (1U<<28) /* generate IP checksum */ 154 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */ 155 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */ 156 #define T1_TER (1U<<25) /* end of ring */ 157 #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */ 158 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */ 159 160 #define R1_RER (1U<<25) /* end of ring */ 161 #define R1_RBS_MASK 0x7fc /* segment size 10:0 */ 162 163 #define KSE_NTXSEGS 16 164 #define KSE_TXQUEUELEN 64 165 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1) 166 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4) 167 #define KSE_NTXDESC 256 168 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1) 169 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK) 170 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK) 171 172 #define KSE_NRXDESC 64 173 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1) 174 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK) 175 176 struct tdes { 177 uint32_t t0, t1, t2, t3; 178 }; 179 180 struct rdes { 181 uint32_t r0, r1, r2, r3; 182 }; 183 184 struct kse_control_data { 185 struct tdes kcd_txdescs[KSE_NTXDESC]; 186 struct rdes kcd_rxdescs[KSE_NRXDESC]; 187 }; 188 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x) 189 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)]) 190 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)]) 191 192 struct kse_txsoft { 193 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 194 bus_dmamap_t txs_dmamap; /* our DMA map */ 195 int txs_firstdesc; /* first descriptor in packet */ 196 int txs_lastdesc; /* last descriptor in packet */ 197 int txs_ndesc; /* # of descriptors used */ 198 }; 199 200 struct kse_rxsoft { 201 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 202 bus_dmamap_t rxs_dmamap; /* our DMA map */ 203 }; 204 205 struct kse_softc { 206 struct device sc_dev; /* generic device information */ 207 bus_space_tag_t sc_st; /* bus space tag */ 208 bus_space_handle_t sc_sh; /* bus space handle */ 209 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 210 struct ethercom sc_ethercom; /* Ethernet common data */ 211 void *sc_ih; /* interrupt cookie */ 212 213 struct ifmedia sc_media; /* ifmedia information */ 214 int sc_media_status; /* PHY */ 215 int sc_media_active; /* PHY */ 216 callout_t sc_callout; /* MII tick callout */ 217 callout_t sc_stat_ch; /* statistics counter callout */ 218 219 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 220 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 221 222 struct kse_control_data *sc_control_data; 223 #define sc_txdescs sc_control_data->kcd_txdescs 224 #define sc_rxdescs sc_control_data->kcd_rxdescs 225 226 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN]; 227 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC]; 228 int sc_txfree; /* number of free Tx descriptors */ 229 int sc_txnext; /* next ready Tx descriptor */ 230 int sc_txsfree; /* number of free Tx jobs */ 231 int sc_txsnext; /* next ready Tx job */ 232 int sc_txsdirty; /* dirty Tx jobs */ 233 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 234 235 uint32_t sc_txc, sc_rxc; 236 uint32_t sc_t1csum; 237 int sc_mcsum; 238 uint32_t sc_inten; 239 240 uint32_t sc_chip; 241 uint8_t sc_altmac[16][ETHER_ADDR_LEN]; 242 uint16_t sc_vlan[16]; 243 244 #ifdef KSE_EVENT_COUNTERS 245 struct ksext { 246 char evcntname[3][8]; 247 struct evcnt pev[3][34]; 248 } sc_ext; /* switch statistics */ 249 #endif 250 }; 251 252 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x))) 253 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x))) 254 255 #define KSE_CDTXSYNC(sc, x, n, ops) \ 256 do { \ 257 int __x, __n; \ 258 \ 259 __x = (x); \ 260 __n = (n); \ 261 \ 262 /* If it will wrap around, sync to the end of the ring. */ \ 263 if ((__x + __n) > KSE_NTXDESC) { \ 264 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 265 KSE_CDTXOFF(__x), sizeof(struct tdes) * \ 266 (KSE_NTXDESC - __x), (ops)); \ 267 __n -= (KSE_NTXDESC - __x); \ 268 __x = 0; \ 269 } \ 270 \ 271 /* Now sync whatever is left. */ \ 272 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 273 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ 274 } while (/*CONSTCOND*/0) 275 276 #define KSE_CDRXSYNC(sc, x, ops) \ 277 do { \ 278 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 279 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ 280 } while (/*CONSTCOND*/0) 281 282 #define KSE_INIT_RXDESC(sc, x) \ 283 do { \ 284 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 285 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 286 struct mbuf *__m = __rxs->rxs_mbuf; \ 287 \ 288 __m->m_data = __m->m_ext.ext_buf; \ 289 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ 290 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \ 291 __rxd->r0 = R0_OWN; \ 292 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 293 } while (/*CONSTCOND*/0) 294 295 u_int kse_burstsize = 8; /* DMA burst length tuning knob */ 296 297 #ifdef KSEDIAGNOSTIC 298 u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */ 299 #endif 300 301 static int kse_match(struct device *, struct cfdata *, void *); 302 static void kse_attach(struct device *, struct device *, void *); 303 304 CFATTACH_DECL(kse, sizeof(struct kse_softc), 305 kse_match, kse_attach, NULL, NULL); 306 307 static int kse_ioctl(struct ifnet *, u_long, void *); 308 static void kse_start(struct ifnet *); 309 static void kse_watchdog(struct ifnet *); 310 static int kse_init(struct ifnet *); 311 static void kse_stop(struct ifnet *, int); 312 static void kse_reset(struct kse_softc *); 313 static void kse_set_filter(struct kse_softc *); 314 static int add_rxbuf(struct kse_softc *, int); 315 static void rxdrain(struct kse_softc *); 316 static int kse_intr(void *); 317 static void rxintr(struct kse_softc *); 318 static void txreap(struct kse_softc *); 319 static void lnkchg(struct kse_softc *); 320 static int ifmedia_upd(struct ifnet *); 321 static void ifmedia_sts(struct ifnet *, struct ifmediareq *); 322 static void phy_tick(void *); 323 static int ifmedia2_upd(struct ifnet *); 324 static void ifmedia2_sts(struct ifnet *, struct ifmediareq *); 325 #ifdef KSE_EVENT_COUNTERS 326 static void stat_tick(void *); 327 static void zerostats(struct kse_softc *); 328 #endif 329 330 static int 331 kse_match(struct device *parent, struct cfdata *match, void *aux) 332 { 333 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 334 335 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL && 336 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 || 337 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) && 338 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 339 return 1; 340 341 return 0; 342 } 343 344 static void 345 kse_attach(struct device *parent, struct device *self, void *aux) 346 { 347 struct kse_softc *sc = (struct kse_softc *)self; 348 struct pci_attach_args *pa = aux; 349 pci_chipset_tag_t pc = pa->pa_pc; 350 pci_intr_handle_t ih; 351 const char *intrstr; 352 struct ifnet *ifp; 353 struct ifmedia *ifm; 354 uint8_t enaddr[ETHER_ADDR_LEN]; 355 bus_dma_segment_t seg; 356 int i, p, error, nseg; 357 pcireg_t pmode; 358 int pmreg; 359 360 if (pci_mapreg_map(pa, 0x10, 361 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 362 0, &sc->sc_st, &sc->sc_sh, NULL, NULL) != 0) { 363 printf(": unable to map device registers\n"); 364 return; 365 } 366 367 sc->sc_dmat = pa->pa_dmat; 368 369 /* Make sure bus mastering is enabled. */ 370 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 371 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 372 PCI_COMMAND_MASTER_ENABLE); 373 374 /* Get it out of power save mode, if needed. */ 375 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 376 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) & 377 PCI_PMCSR_STATE_MASK; 378 if (pmode == PCI_PMCSR_STATE_D3) { 379 /* 380 * The card has lost all configuration data in 381 * this state, so punt. 382 */ 383 printf("%s: unable to wake from power state D3\n", 384 device_xname(&sc->sc_dev)); 385 return; 386 } 387 if (pmode != PCI_PMCSR_STATE_D0) { 388 printf("%s: waking up from power date D%d\n", 389 device_xname(&sc->sc_dev), pmode); 390 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR, 391 PCI_PMCSR_STATE_D0); 392 } 393 } 394 395 sc->sc_chip = PCI_PRODUCT(pa->pa_id); 396 printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n", 397 sc->sc_chip, PCI_REVISION(pa->pa_class)); 398 399 /* 400 * Read the Ethernet address from the EEPROM. 401 */ 402 i = CSR_READ_2(sc, MARL); 403 enaddr[5] = i; enaddr[4] = i >> 8; 404 i = CSR_READ_2(sc, MARM); 405 enaddr[3] = i; enaddr[2] = i >> 8; 406 i = CSR_READ_2(sc, MARH); 407 enaddr[1] = i; enaddr[0] = i >> 8; 408 printf("%s: Ethernet address: %s\n", 409 device_xname(&sc->sc_dev), ether_sprintf(enaddr)); 410 411 /* 412 * Enable chip function. 413 */ 414 CSR_WRITE_2(sc, CIDR, 1); 415 416 /* 417 * Map and establish our interrupt. 418 */ 419 if (pci_intr_map(pa, &ih)) { 420 aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n"); 421 return; 422 } 423 intrstr = pci_intr_string(pc, ih); 424 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, kse_intr, sc); 425 if (sc->sc_ih == NULL) { 426 aprint_error_dev(&sc->sc_dev, "unable to establish interrupt"); 427 if (intrstr != NULL) 428 printf(" at %s", intrstr); 429 printf("\n"); 430 return; 431 } 432 printf("%s: interrupting at %s\n", device_xname(&sc->sc_dev), intrstr); 433 434 /* 435 * Allocate the control data structures, and create and load the 436 * DMA map for it. 437 */ 438 error = bus_dmamem_alloc(sc->sc_dmat, 439 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); 440 if (error != 0) { 441 aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n", error); 442 goto fail_0; 443 } 444 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 445 sizeof(struct kse_control_data), (void **)&sc->sc_control_data, 446 BUS_DMA_COHERENT); 447 if (error != 0) { 448 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n", error); 449 goto fail_1; 450 } 451 error = bus_dmamap_create(sc->sc_dmat, 452 sizeof(struct kse_control_data), 1, 453 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap); 454 if (error != 0) { 455 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, " 456 "error = %d\n", error); 457 goto fail_2; 458 } 459 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 460 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0); 461 if (error != 0) { 462 aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n", 463 error); 464 goto fail_3; 465 } 466 for (i = 0; i < KSE_TXQUEUELEN; i++) { 467 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 468 KSE_NTXSEGS, MCLBYTES, 0, 0, 469 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 470 aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, " 471 "error = %d\n", i, error); 472 goto fail_4; 473 } 474 } 475 for (i = 0; i < KSE_NRXDESC; i++) { 476 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 477 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 478 aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, " 479 "error = %d\n", i, error); 480 goto fail_5; 481 } 482 sc->sc_rxsoft[i].rxs_mbuf = NULL; 483 } 484 485 callout_init(&sc->sc_callout, 0); 486 callout_init(&sc->sc_stat_ch, 0); 487 488 ifm = &sc->sc_media; 489 if (sc->sc_chip == 0x8841) { 490 ifmedia_init(ifm, 0, ifmedia_upd, ifmedia_sts); 491 ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL); 492 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 493 ifmedia_add(ifm, IFM_ETHER|IFM_100_TX, 0, NULL); 494 ifmedia_add(ifm, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 495 ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL); 496 ifmedia_set(ifm, IFM_ETHER|IFM_AUTO); 497 } 498 else { 499 ifmedia_init(ifm, 0, ifmedia2_upd, ifmedia2_sts); 500 ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL); 501 ifmedia_set(ifm, IFM_ETHER|IFM_AUTO); 502 } 503 504 printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n", 505 device_xname(&sc->sc_dev)); 506 507 ifp = &sc->sc_ethercom.ec_if; 508 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 509 ifp->if_softc = sc; 510 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 511 ifp->if_ioctl = kse_ioctl; 512 ifp->if_start = kse_start; 513 ifp->if_watchdog = kse_watchdog; 514 ifp->if_init = kse_init; 515 ifp->if_stop = kse_stop; 516 IFQ_SET_READY(&ifp->if_snd); 517 518 /* 519 * KSZ8842 can handle 802.1Q VLAN-sized frames, 520 * can do IPv4, TCPv4, and UDPv4 checksums in hardware. 521 */ 522 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 523 ifp->if_capabilities |= 524 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 525 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 526 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 527 528 if_attach(ifp); 529 ether_ifattach(ifp, enaddr); 530 531 p = (sc->sc_chip == 0x8842) ? 3 : 1; 532 #ifdef KSE_EVENT_COUNTERS 533 for (i = 0; i < p; i++) { 534 struct ksext *ee = &sc->sc_ext; 535 sprintf(ee->evcntname[i], "%s.%d", device_xname(&sc->sc_dev), i+1); 536 evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC, 537 NULL, ee->evcntname[i], "RxLoPriotyByte"); 538 evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC, 539 NULL, ee->evcntname[i], "RxHiPriotyByte"); 540 evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC, 541 NULL, ee->evcntname[i], "RxUndersizePkt"); 542 evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC, 543 NULL, ee->evcntname[i], "RxFragments"); 544 evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC, 545 NULL, ee->evcntname[i], "RxOversize"); 546 evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC, 547 NULL, ee->evcntname[i], "RxJabbers"); 548 evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC, 549 NULL, ee->evcntname[i], "RxSymbolError"); 550 evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC, 551 NULL, ee->evcntname[i], "RxCRCError"); 552 evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC, 553 NULL, ee->evcntname[i], "RxAlignmentError"); 554 evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC, 555 NULL, ee->evcntname[i], "RxControl8808Pkts"); 556 evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC, 557 NULL, ee->evcntname[i], "RxPausePkts"); 558 evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC, 559 NULL, ee->evcntname[i], "RxBroadcast"); 560 evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC, 561 NULL, ee->evcntname[i], "RxMulticast"); 562 evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC, 563 NULL, ee->evcntname[i], "RxUnicast"); 564 evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC, 565 NULL, ee->evcntname[i], "Rx64Octets"); 566 evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC, 567 NULL, ee->evcntname[i], "Rx65To127Octets"); 568 evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC, 569 NULL, ee->evcntname[i], "Rx128To255Octets"); 570 evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC, 571 NULL, ee->evcntname[i], "Rx255To511Octets"); 572 evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC, 573 NULL, ee->evcntname[i], "Rx512To1023Octets"); 574 evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC, 575 NULL, ee->evcntname[i], "Rx1024To1522Octets"); 576 evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC, 577 NULL, ee->evcntname[i], "TxLoPriotyByte"); 578 evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC, 579 NULL, ee->evcntname[i], "TxHiPriotyByte"); 580 evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC, 581 NULL, ee->evcntname[i], "TxLateCollision"); 582 evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC, 583 NULL, ee->evcntname[i], "TxPausePkts"); 584 evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC, 585 NULL, ee->evcntname[i], "TxBroadcastPkts"); 586 evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC, 587 NULL, ee->evcntname[i], "TxMulticastPkts"); 588 evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC, 589 NULL, ee->evcntname[i], "TxUnicastPkts"); 590 evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC, 591 NULL, ee->evcntname[i], "TxDeferred"); 592 evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC, 593 NULL, ee->evcntname[i], "TxTotalCollision"); 594 evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC, 595 NULL, ee->evcntname[i], "TxExcessiveCollision"); 596 evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC, 597 NULL, ee->evcntname[i], "TxSingleCollision"); 598 evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC, 599 NULL, ee->evcntname[i], "TxMultipleCollision"); 600 evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC, 601 NULL, ee->evcntname[i], "TxDropPkts"); 602 evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC, 603 NULL, ee->evcntname[i], "RxDropPkts"); 604 } 605 #endif 606 return; 607 608 fail_5: 609 for (i = 0; i < KSE_NRXDESC; i++) { 610 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 611 bus_dmamap_destroy(sc->sc_dmat, 612 sc->sc_rxsoft[i].rxs_dmamap); 613 } 614 fail_4: 615 for (i = 0; i < KSE_TXQUEUELEN; i++) { 616 if (sc->sc_txsoft[i].txs_dmamap != NULL) 617 bus_dmamap_destroy(sc->sc_dmat, 618 sc->sc_txsoft[i].txs_dmamap); 619 } 620 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 621 fail_3: 622 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 623 fail_2: 624 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 625 sizeof(struct kse_control_data)); 626 fail_1: 627 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 628 fail_0: 629 return; 630 } 631 632 static int 633 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data) 634 { 635 struct kse_softc *sc = ifp->if_softc; 636 struct ifreq *ifr = (struct ifreq *)data; 637 int s, error; 638 639 s = splnet(); 640 641 switch (cmd) { 642 case SIOCSIFMEDIA: 643 case SIOCGIFMEDIA: 644 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 645 break; 646 647 default: 648 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 649 break; 650 651 error = 0; 652 653 if (cmd == SIOCSIFCAP) 654 error = (*ifp->if_init)(ifp); 655 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 656 ; 657 else if (ifp->if_flags & IFF_RUNNING) { 658 /* 659 * Multicast list has changed; set the hardware filter 660 * accordingly. 661 */ 662 kse_set_filter(sc); 663 } 664 break; 665 } 666 667 kse_start(ifp); 668 669 splx(s); 670 return error; 671 } 672 673 static int 674 kse_init(struct ifnet *ifp) 675 { 676 struct kse_softc *sc = ifp->if_softc; 677 uint32_t paddr; 678 int i, error = 0; 679 680 /* cancel pending I/O */ 681 kse_stop(ifp, 0); 682 683 /* reset all registers but PCI configuration */ 684 kse_reset(sc); 685 686 /* craft Tx descriptor ring */ 687 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 688 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) { 689 sc->sc_txdescs[i].t3 = paddr; 690 paddr += sizeof(struct tdes); 691 } 692 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0); 693 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC, 694 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 695 sc->sc_txfree = KSE_NTXDESC; 696 sc->sc_txnext = 0; 697 698 for (i = 0; i < KSE_TXQUEUELEN; i++) 699 sc->sc_txsoft[i].txs_mbuf = NULL; 700 sc->sc_txsfree = KSE_TXQUEUELEN; 701 sc->sc_txsnext = 0; 702 sc->sc_txsdirty = 0; 703 704 /* craft Rx descriptor ring */ 705 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 706 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) { 707 sc->sc_rxdescs[i].r3 = paddr; 708 paddr += sizeof(struct rdes); 709 } 710 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0); 711 for (i = 0; i < KSE_NRXDESC; i++) { 712 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { 713 if ((error = add_rxbuf(sc, i)) != 0) { 714 printf("%s: unable to allocate or map rx " 715 "buffer %d, error = %d\n", 716 device_xname(&sc->sc_dev), i, error); 717 rxdrain(sc); 718 goto out; 719 } 720 } 721 else 722 KSE_INIT_RXDESC(sc, i); 723 } 724 sc->sc_rxptr = 0; 725 726 /* hand Tx/Rx rings to HW */ 727 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0)); 728 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0)); 729 730 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC | TXC_FCE; 731 sc->sc_rxc = RXC_REN | RXC_RU | RXC_FCE; 732 if (ifp->if_flags & IFF_PROMISC) 733 sc->sc_rxc |= RXC_RA; 734 if (ifp->if_flags & IFF_BROADCAST) 735 sc->sc_rxc |= RXC_RB; 736 sc->sc_t1csum = sc->sc_mcsum = 0; 737 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) { 738 sc->sc_rxc |= RXC_ICC; 739 sc->sc_mcsum |= M_CSUM_IPv4; 740 } 741 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) { 742 sc->sc_txc |= TXC_ICG; 743 sc->sc_t1csum |= T1_IPCKG; 744 } 745 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) { 746 sc->sc_rxc |= RXC_TCC; 747 sc->sc_mcsum |= M_CSUM_TCPv4; 748 } 749 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) { 750 sc->sc_txc |= TXC_TCG; 751 sc->sc_t1csum |= T1_TCPCKG; 752 } 753 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) { 754 sc->sc_rxc |= RXC_UCC; 755 sc->sc_mcsum |= M_CSUM_UDPv4; 756 } 757 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) { 758 sc->sc_txc |= TXC_UCG; 759 sc->sc_t1csum |= T1_UDPCKG; 760 } 761 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT); 762 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT); 763 764 /* build multicast hash filter if necessary */ 765 kse_set_filter(sc); 766 767 /* set current media */ 768 (void)ifmedia_upd(ifp); 769 770 /* enable transmitter and receiver */ 771 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 772 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 773 CSR_WRITE_4(sc, MDRSC, 1); 774 775 /* enable interrupts */ 776 sc->sc_inten = INT_DMTS|INT_DMRS|INT_DMRBUS; 777 if (sc->sc_chip == 0x8841) 778 sc->sc_inten |= INT_DMLCS; 779 CSR_WRITE_4(sc, INTST, ~0); 780 CSR_WRITE_4(sc, INTEN, sc->sc_inten); 781 782 ifp->if_flags |= IFF_RUNNING; 783 ifp->if_flags &= ~IFF_OACTIVE; 784 785 if (sc->sc_chip == 0x8841) { 786 /* start one second timer */ 787 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 788 } 789 #ifdef KSE_EVENT_COUNTERS 790 /* start statistics gather 1 minute timer */ 791 zerostats(sc); 792 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, sc); 793 #endif 794 795 out: 796 if (error) { 797 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 798 ifp->if_timer = 0; 799 printf("%s: interface not running\n", device_xname(&sc->sc_dev)); 800 } 801 return error; 802 } 803 804 static void 805 kse_stop(struct ifnet *ifp, int disable) 806 { 807 struct kse_softc *sc = ifp->if_softc; 808 struct kse_txsoft *txs; 809 int i; 810 811 if (sc->sc_chip == 0x8841) 812 callout_stop(&sc->sc_callout); 813 callout_stop(&sc->sc_stat_ch); 814 815 sc->sc_txc &= ~TXC_TEN; 816 sc->sc_rxc &= ~RXC_REN; 817 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 818 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 819 820 for (i = 0; i < KSE_TXQUEUELEN; i++) { 821 txs = &sc->sc_txsoft[i]; 822 if (txs->txs_mbuf != NULL) { 823 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 824 m_freem(txs->txs_mbuf); 825 txs->txs_mbuf = NULL; 826 } 827 } 828 829 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 830 ifp->if_timer = 0; 831 832 if (disable) 833 rxdrain(sc); 834 } 835 836 static void 837 kse_reset(struct kse_softc *sc) 838 { 839 840 CSR_WRITE_2(sc, GRR, 1); 841 delay(1000); /* PDF does not mention the delay amount */ 842 CSR_WRITE_2(sc, GRR, 0); 843 844 CSR_WRITE_2(sc, CIDR, 1); 845 } 846 847 static void 848 kse_watchdog(struct ifnet *ifp) 849 { 850 struct kse_softc *sc = ifp->if_softc; 851 852 /* 853 * Since we're not interrupting every packet, sweep 854 * up before we report an error. 855 */ 856 txreap(sc); 857 858 if (sc->sc_txfree != KSE_NTXDESC) { 859 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n", 860 device_xname(&sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 861 sc->sc_txnext); 862 ifp->if_oerrors++; 863 864 /* Reset the interface. */ 865 kse_init(ifp); 866 } 867 else if (ifp->if_flags & IFF_DEBUG) 868 printf("%s: recovered from device timeout\n", 869 device_xname(&sc->sc_dev)); 870 871 /* Try to get more packets going. */ 872 kse_start(ifp); 873 } 874 875 static void 876 kse_start(struct ifnet *ifp) 877 { 878 struct kse_softc *sc = ifp->if_softc; 879 struct mbuf *m0, *m; 880 struct kse_txsoft *txs; 881 bus_dmamap_t dmamap; 882 int error, nexttx, lasttx, ofree, seg; 883 uint32_t tdes0; 884 885 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 886 return; 887 888 /* 889 * Remember the previous number of free descriptors. 890 */ 891 ofree = sc->sc_txfree; 892 893 /* 894 * Loop through the send queue, setting up transmit descriptors 895 * until we drain the queue, or use up all available transmit 896 * descriptors. 897 */ 898 for (;;) { 899 IFQ_POLL(&ifp->if_snd, m0); 900 if (m0 == NULL) 901 break; 902 903 if (sc->sc_txsfree < KSE_TXQUEUE_GC) { 904 txreap(sc); 905 if (sc->sc_txsfree == 0) 906 break; 907 } 908 txs = &sc->sc_txsoft[sc->sc_txsnext]; 909 dmamap = txs->txs_dmamap; 910 911 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 912 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 913 if (error) { 914 if (error == EFBIG) { 915 printf("%s: Tx packet consumes too many " 916 "DMA segments, dropping...\n", 917 device_xname(&sc->sc_dev)); 918 IFQ_DEQUEUE(&ifp->if_snd, m0); 919 m_freem(m0); 920 continue; 921 } 922 /* Short on resources, just stop for now. */ 923 break; 924 } 925 926 if (dmamap->dm_nsegs > sc->sc_txfree) { 927 /* 928 * Not enough free descriptors to transmit this 929 * packet. We haven't committed anything yet, 930 * so just unload the DMA map, put the packet 931 * back on the queue, and punt. Notify the upper 932 * layer that there are not more slots left. 933 */ 934 ifp->if_flags |= IFF_OACTIVE; 935 bus_dmamap_unload(sc->sc_dmat, dmamap); 936 break; 937 } 938 939 IFQ_DEQUEUE(&ifp->if_snd, m0); 940 941 /* 942 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 943 */ 944 945 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 946 BUS_DMASYNC_PREWRITE); 947 948 lasttx = -1; tdes0 = 0; 949 for (nexttx = sc->sc_txnext, seg = 0; 950 seg < dmamap->dm_nsegs; 951 seg++, nexttx = KSE_NEXTTX(nexttx)) { 952 struct tdes *tdes = &sc->sc_txdescs[nexttx]; 953 /* 954 * If this is the first descriptor we're 955 * enqueueing, don't set the OWN bit just 956 * yet. That could cause a race condition. 957 * We'll do it below. 958 */ 959 tdes->t2 = dmamap->dm_segs[seg].ds_addr; 960 tdes->t1 = sc->sc_t1csum 961 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK); 962 tdes->t0 = tdes0; 963 tdes0 |= T0_OWN; 964 lasttx = nexttx; 965 } 966 967 /* 968 * Outgoing NFS mbuf must be unloaded when Tx completed. 969 * Without T1_IC NFS mbuf is left unack'ed for excessive 970 * time and NFS stops to proceed until kse_watchdog() 971 * calls txreap() to reclaim the unack'ed mbuf. 972 * It's painful to traverse every mbuf chain to determine 973 * whether someone is waiting for Tx completion. 974 */ 975 m = m0; 976 do { 977 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) { 978 sc->sc_txdescs[lasttx].t1 |= T1_IC; 979 break; 980 } 981 } while ((m = m->m_next) != NULL); 982 983 /* write last T0_OWN bit of the 1st segment */ 984 sc->sc_txdescs[lasttx].t1 |= T1_LS; 985 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS; 986 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN; 987 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 988 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 989 990 /* tell DMA start transmit */ 991 CSR_WRITE_4(sc, MDTSC, 1); 992 993 txs->txs_mbuf = m0; 994 txs->txs_firstdesc = sc->sc_txnext; 995 txs->txs_lastdesc = lasttx; 996 txs->txs_ndesc = dmamap->dm_nsegs; 997 998 sc->sc_txfree -= txs->txs_ndesc; 999 sc->sc_txnext = nexttx; 1000 sc->sc_txsfree--; 1001 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext); 1002 #if NBPFILTER > 0 1003 /* 1004 * Pass the packet to any BPF listeners. 1005 */ 1006 if (ifp->if_bpf) 1007 bpf_mtap(ifp->if_bpf, m0); 1008 #endif /* NBPFILTER > 0 */ 1009 } 1010 1011 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1012 /* No more slots left; notify upper layer. */ 1013 ifp->if_flags |= IFF_OACTIVE; 1014 } 1015 if (sc->sc_txfree != ofree) { 1016 /* Set a watchdog timer in case the chip flakes out. */ 1017 ifp->if_timer = 5; 1018 } 1019 } 1020 1021 static void 1022 kse_set_filter(struct kse_softc *sc) 1023 { 1024 struct ether_multistep step; 1025 struct ether_multi *enm; 1026 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1027 uint32_t h, hashes[2]; 1028 1029 sc->sc_rxc &= ~(RXC_MHTE | RXC_RM); 1030 ifp->if_flags &= ~IFF_ALLMULTI; 1031 if (ifp->if_flags & IFF_PROMISC) 1032 return; 1033 1034 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 1035 if (enm == NULL) 1036 return; 1037 hashes[0] = hashes[1] = 0; 1038 do { 1039 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1040 /* 1041 * We must listen to a range of multicast addresses. 1042 * For now, just accept all multicasts, rather than 1043 * trying to set only those filter bits needed to match 1044 * the range. (At this time, the only use of address 1045 * ranges is for IP multicast routing, for which the 1046 * range is big enough to require all bits set.) 1047 */ 1048 goto allmulti; 1049 } 1050 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26; 1051 hashes[h >> 5] |= 1 << (h & 0x1f); 1052 ETHER_NEXT_MULTI(step, enm); 1053 } while (enm != NULL); 1054 sc->sc_rxc |= RXC_MHTE; 1055 CSR_WRITE_4(sc, MTR0, hashes[0]); 1056 CSR_WRITE_4(sc, MTR1, hashes[1]); 1057 return; 1058 allmulti: 1059 sc->sc_rxc |= RXC_RM; 1060 ifp->if_flags |= IFF_ALLMULTI; 1061 } 1062 1063 static int 1064 add_rxbuf(struct kse_softc *sc, int idx) 1065 { 1066 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1067 struct mbuf *m; 1068 int error; 1069 1070 MGETHDR(m, M_DONTWAIT, MT_DATA); 1071 if (m == NULL) 1072 return ENOBUFS; 1073 1074 MCLGET(m, M_DONTWAIT); 1075 if ((m->m_flags & M_EXT) == 0) { 1076 m_freem(m); 1077 return ENOBUFS; 1078 } 1079 1080 if (rxs->rxs_mbuf != NULL) 1081 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1082 1083 rxs->rxs_mbuf = m; 1084 1085 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1086 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1087 if (error) { 1088 printf("%s: can't load rx DMA map %d, error = %d\n", 1089 device_xname(&sc->sc_dev), idx, error); 1090 panic("kse_add_rxbuf"); 1091 } 1092 1093 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1094 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1095 1096 KSE_INIT_RXDESC(sc, idx); 1097 1098 return 0; 1099 } 1100 1101 static void 1102 rxdrain(struct kse_softc *sc) 1103 { 1104 struct kse_rxsoft *rxs; 1105 int i; 1106 1107 for (i = 0; i < KSE_NRXDESC; i++) { 1108 rxs = &sc->sc_rxsoft[i]; 1109 if (rxs->rxs_mbuf != NULL) { 1110 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1111 m_freem(rxs->rxs_mbuf); 1112 rxs->rxs_mbuf = NULL; 1113 } 1114 } 1115 } 1116 1117 static int 1118 kse_intr(void *arg) 1119 { 1120 struct kse_softc *sc = arg; 1121 uint32_t isr; 1122 1123 if ((isr = CSR_READ_4(sc, INTST)) == 0) 1124 return 0; 1125 1126 if (isr & INT_DMRS) 1127 rxintr(sc); 1128 if (isr & INT_DMTS) 1129 txreap(sc); 1130 if (isr & INT_DMLCS) 1131 lnkchg(sc); 1132 if (isr & INT_DMRBUS) 1133 printf("%s: Rx descriptor full\n", device_xname(&sc->sc_dev)); 1134 1135 CSR_WRITE_4(sc, INTST, isr); 1136 return 1; 1137 } 1138 1139 static void 1140 rxintr(struct kse_softc *sc) 1141 { 1142 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1143 struct kse_rxsoft *rxs; 1144 struct mbuf *m; 1145 uint32_t rxstat; 1146 int i, len; 1147 1148 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) { 1149 rxs = &sc->sc_rxsoft[i]; 1150 1151 KSE_CDRXSYNC(sc, i, 1152 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1153 1154 rxstat = sc->sc_rxdescs[i].r0; 1155 1156 if (rxstat & R0_OWN) /* desc is left empty */ 1157 break; 1158 1159 /* R0_FS|R0_LS must have been marked for this desc */ 1160 1161 if (rxstat & R0_ES) { 1162 ifp->if_ierrors++; 1163 #define PRINTERR(bit, str) \ 1164 if (rxstat & (bit)) \ 1165 printf("%s: receive error: %s\n", \ 1166 device_xname(&sc->sc_dev), str) 1167 PRINTERR(R0_TL, "frame too long"); 1168 PRINTERR(R0_RF, "runt frame"); 1169 PRINTERR(R0_CE, "bad FCS"); 1170 #undef PRINTERR 1171 KSE_INIT_RXDESC(sc, i); 1172 continue; 1173 } 1174 1175 /* HW errata; frame might be too small or too large */ 1176 1177 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1178 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1179 1180 len = rxstat & R0_FL_MASK; 1181 len -= ETHER_CRC_LEN; /* trim CRC off */ 1182 m = rxs->rxs_mbuf; 1183 1184 if (add_rxbuf(sc, i) != 0) { 1185 ifp->if_ierrors++; 1186 KSE_INIT_RXDESC(sc, i); 1187 bus_dmamap_sync(sc->sc_dmat, 1188 rxs->rxs_dmamap, 0, 1189 rxs->rxs_dmamap->dm_mapsize, 1190 BUS_DMASYNC_PREREAD); 1191 continue; 1192 } 1193 1194 ifp->if_ipackets++; 1195 m->m_pkthdr.rcvif = ifp; 1196 m->m_pkthdr.len = m->m_len = len; 1197 1198 if (sc->sc_mcsum) { 1199 m->m_pkthdr.csum_flags |= sc->sc_mcsum; 1200 if (rxstat & R0_IPE) 1201 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1202 if (rxstat & (R0_TCPE | R0_UDPE)) 1203 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1204 } 1205 #if NBPFILTER > 0 1206 if (ifp->if_bpf) 1207 bpf_mtap(ifp->if_bpf, m); 1208 #endif /* NBPFILTER > 0 */ 1209 (*ifp->if_input)(ifp, m); 1210 #ifdef KSEDIAGNOSTIC 1211 if (kse_monitor_rxintr > 0) { 1212 printf("m stat %x data %p len %d\n", 1213 rxstat, m->m_data, m->m_len); 1214 } 1215 #endif 1216 } 1217 sc->sc_rxptr = i; 1218 } 1219 1220 static void 1221 txreap(struct kse_softc *sc) 1222 { 1223 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1224 struct kse_txsoft *txs; 1225 uint32_t txstat; 1226 int i; 1227 1228 ifp->if_flags &= ~IFF_OACTIVE; 1229 1230 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN; 1231 i = KSE_NEXTTXS(i), sc->sc_txsfree++) { 1232 txs = &sc->sc_txsoft[i]; 1233 1234 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 1235 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1236 1237 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0; 1238 1239 if (txstat & T0_OWN) /* desc is still in use */ 1240 break; 1241 1242 /* there is no way to tell transmission status per frame */ 1243 1244 ifp->if_opackets++; 1245 1246 sc->sc_txfree += txs->txs_ndesc; 1247 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1248 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1249 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1250 m_freem(txs->txs_mbuf); 1251 txs->txs_mbuf = NULL; 1252 } 1253 sc->sc_txsdirty = i; 1254 if (sc->sc_txsfree == KSE_TXQUEUELEN) 1255 ifp->if_timer = 0; 1256 } 1257 1258 static void 1259 lnkchg(struct kse_softc *sc) 1260 { 1261 struct ifmediareq ifmr; 1262 1263 #if 0 /* rambling link status */ 1264 printf("%s: link %s\n", device_xname(&sc->sc_dev), 1265 (CSR_READ_2(sc, P1SR) & (1U << 5)) ? "up" : "down"); 1266 #endif 1267 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1268 } 1269 1270 static int 1271 ifmedia_upd(struct ifnet *ifp) 1272 { 1273 struct kse_softc *sc = ifp->if_softc; 1274 struct ifmedia *ifm = &sc->sc_media; 1275 uint16_t ctl; 1276 1277 ctl = 0; 1278 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1279 ctl |= (1U << 13); /* restart AN */ 1280 ctl |= (1U << 7); /* enable AN */ 1281 ctl |= (1U << 4); /* advertise flow control pause */ 1282 ctl |= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0); 1283 } 1284 else { 1285 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) 1286 ctl |= (1U << 6); 1287 if (ifm->ifm_media & IFM_FDX) 1288 ctl |= (1U << 5); 1289 } 1290 CSR_WRITE_2(sc, P1CR4, ctl); 1291 1292 sc->sc_media_active = IFM_NONE; 1293 sc->sc_media_status = IFM_AVALID; 1294 1295 return 0; 1296 } 1297 1298 static void 1299 ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1300 { 1301 struct kse_softc *sc = ifp->if_softc; 1302 struct ifmedia *ifm = &sc->sc_media; 1303 uint16_t ctl, sts, result; 1304 1305 ifmr->ifm_status = IFM_AVALID; 1306 ifmr->ifm_active = IFM_ETHER; 1307 1308 ctl = CSR_READ_2(sc, P1CR4); 1309 sts = CSR_READ_2(sc, P1SR); 1310 if ((sts & (1U << 5)) == 0) { 1311 ifmr->ifm_active |= IFM_NONE; 1312 goto out; /* link is down */ 1313 } 1314 ifmr->ifm_status |= IFM_ACTIVE; 1315 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1316 if ((sts & (1U << 6)) == 0) { 1317 ifmr->ifm_active |= IFM_NONE; 1318 goto out; /* negotiation in progress */ 1319 } 1320 result = ctl & sts & 017; 1321 if (result & (1U << 3)) 1322 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1323 else if (result & (1U << 2)) 1324 ifmr->ifm_active |= IFM_100_TX; 1325 else if (result & (1U << 1)) 1326 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1327 else if (result & (1U << 0)) 1328 ifmr->ifm_active |= IFM_10_T; 1329 else 1330 ifmr->ifm_active |= IFM_NONE; 1331 if (ctl & (1U << 4)) 1332 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1333 if (sts & (1U << 4)) 1334 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1335 } 1336 else { 1337 ifmr->ifm_active |= (sts & (1U << 10)) ? IFM_100_TX : IFM_10_T; 1338 if (sts & (1U << 9)) 1339 ifmr->ifm_active |= IFM_FDX; 1340 if (sts & (1U << 12)) 1341 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1342 if (sts & (1U << 11)) 1343 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1344 } 1345 1346 out: 1347 sc->sc_media_status = ifmr->ifm_status; 1348 sc->sc_media_active = ifmr->ifm_active; 1349 } 1350 1351 static void 1352 phy_tick(void *arg) 1353 { 1354 struct kse_softc *sc = arg; 1355 struct ifmediareq ifmr; 1356 int s; 1357 1358 s = splnet(); 1359 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1360 splx(s); 1361 1362 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 1363 } 1364 1365 static int 1366 ifmedia2_upd(struct ifnet *ifp) 1367 { 1368 struct kse_softc *sc = ifp->if_softc; 1369 1370 sc->sc_media_status = IFM_AVALID; 1371 sc->sc_media_active = IFM_NONE; 1372 return 0; 1373 } 1374 1375 static void 1376 ifmedia2_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1377 { 1378 struct kse_softc *sc = ifp->if_softc; 1379 int p1sts, p2sts; 1380 1381 ifmr->ifm_status = IFM_AVALID; 1382 ifmr->ifm_active = IFM_ETHER; 1383 p1sts = CSR_READ_2(sc, P1SR); 1384 p2sts = CSR_READ_2(sc, P2SR); 1385 if (((p1sts | p2sts) & (1U << 5)) == 0) 1386 ifmr->ifm_active |= IFM_NONE; 1387 else { 1388 ifmr->ifm_status |= IFM_ACTIVE; 1389 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1390 ifmr->ifm_active |= IFM_FLOW|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE; 1391 } 1392 sc->sc_media_status = ifmr->ifm_status; 1393 sc->sc_media_active = ifmr->ifm_active; 1394 } 1395 1396 #ifdef KSE_EVENT_COUNTERS 1397 static void 1398 stat_tick(arg) 1399 void *arg; 1400 { 1401 struct kse_softc *sc = arg; 1402 struct ksext *ee = &sc->sc_ext; 1403 int nport, p, i, val; 1404 1405 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1406 for (p = 0; p < nport; p++) { 1407 for (i = 0; i < 32; i++) { 1408 val = 0x1c00 | (p * 0x20 + i); 1409 CSR_WRITE_2(sc, IACR, val); 1410 do { 1411 val = CSR_READ_2(sc, IADR5) << 16; 1412 } while ((val & (1U << 30)) == 0); 1413 if (val & (1U << 31)) { 1414 (void)CSR_READ_2(sc, IADR4); 1415 val = 0x3fffffff; /* has made overflow */ 1416 } 1417 else { 1418 val &= 0x3fff0000; /* 29:16 */ 1419 val |= CSR_READ_2(sc, IADR4); /* 15:0 */ 1420 } 1421 ee->pev[p][i].ev_count += val; /* i (0-31) */ 1422 } 1423 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p); 1424 ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */ 1425 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1); 1426 ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */ 1427 } 1428 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, arg); 1429 } 1430 1431 static void 1432 zerostats(struct kse_softc *sc) 1433 { 1434 struct ksext *ee = &sc->sc_ext; 1435 int nport, p, i, val; 1436 1437 /* make sure all the HW counters get zero */ 1438 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1439 for (p = 0; p < nport; p++) { 1440 for (i = 0; i < 31; i++) { 1441 val = 0x1c00 | (p * 0x20 + i); 1442 CSR_WRITE_2(sc, IACR, val); 1443 do { 1444 val = CSR_READ_2(sc, IADR5) << 16; 1445 } while ((val & (1U << 30)) == 0); 1446 (void)CSR_READ_2(sc, IADR4); 1447 ee->pev[p][i].ev_count = 0; 1448 } 1449 } 1450 } 1451 #endif 1452