1 /* $NetBSD: if_kse.c,v 1.11 2007/11/16 10:31:57 nisimura Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Tohru Nishimura 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Tohru Nishimura. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.11 2007/11/16 10:31:57 nisimura Exp $"); 34 35 #include "bpfilter.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/callout.h> 40 #include <sys/mbuf.h> 41 #include <sys/malloc.h> 42 #include <sys/kernel.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/device.h> 46 #include <sys/queue.h> 47 48 #include <machine/endian.h> 49 #include <sys/bus.h> 50 #include <sys/intr.h> 51 52 #include <net/if.h> 53 #include <net/if_media.h> 54 #include <net/if_dl.h> 55 #include <net/if_ether.h> 56 57 #if NBPFILTER > 0 58 #include <net/bpf.h> 59 #endif 60 61 #include <dev/pci/pcivar.h> 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcidevs.h> 64 65 #define CSR_READ_4(sc, off) \ 66 bus_space_read_4(sc->sc_st, sc->sc_sh, off) 67 #define CSR_WRITE_4(sc, off, val) \ 68 bus_space_write_4(sc->sc_st, sc->sc_sh, off, val) 69 #define CSR_READ_2(sc, off) \ 70 bus_space_read_2(sc->sc_st, sc->sc_sh, off) 71 #define CSR_WRITE_2(sc, off, val) \ 72 bus_space_write_2(sc->sc_st, sc->sc_sh, off, val) 73 74 #define MDTXC 0x000 /* DMA transmit control */ 75 #define MDRXC 0x004 /* DMA receive control */ 76 #define MDTSC 0x008 /* DMA transmit start */ 77 #define MDRSC 0x00c /* DMA receive start */ 78 #define TDLB 0x010 /* transmit descriptor list base */ 79 #define RDLB 0x014 /* receive descriptor list base */ 80 #define MTR0 0x020 /* multicast table 31:0 */ 81 #define MTR1 0x024 /* multicast table 63:32 */ 82 #define INTEN 0x028 /* interrupt enable */ 83 #define INTST 0x02c /* interrupt status */ 84 #define MARL 0x200 /* MAC address low */ 85 #define MARM 0x202 /* MAC address middle */ 86 #define MARH 0x204 /* MAC address high */ 87 #define GRR 0x216 /* global reset */ 88 #define CIDR 0x400 /* chip ID and enable */ 89 #define CGCR 0x40a /* chip global control */ 90 #define IACR 0x4a0 /* indirect access control */ 91 #define IADR1 0x4a2 /* indirect access data 66:63 */ 92 #define IADR2 0x4a4 /* indirect access data 47:32 */ 93 #define IADR3 0x4a6 /* indirect access data 63:48 */ 94 #define IADR4 0x4a8 /* indirect access data 15:0 */ 95 #define IADR5 0x4aa /* indirect access data 31:16 */ 96 #define P1CR4 0x512 /* port 1 control 4 */ 97 #define P1SR 0x514 /* port 1 status */ 98 #define P2CR4 0x532 /* port 2 control 4 */ 99 #define P2SR 0x534 /* port 2 status */ 100 101 #define TXC_BS_MSK 0x3f000000 /* burst size */ 102 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 103 #define TXC_UCG (1U<<18) /* generate UDP checksum */ 104 #define TXC_TCG (1U<<17) /* generate TCP checksum */ 105 #define TXC_ICG (1U<<16) /* generate IP checksum */ 106 #define TXC_FCE (1U<<9) /* enable flowcontrol */ 107 #define TXC_EP (1U<<2) /* enable automatic padding */ 108 #define TXC_AC (1U<<1) /* add CRC to frame */ 109 #define TXC_TEN (1) /* enable DMA to run */ 110 111 #define RXC_BS_MSK 0x3f000000 /* burst size */ 112 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 113 #define RXC_IHAE (1U<<19) /* IP header alignment enable */ 114 #define RXC_UCC (1U<<18) /* run UDP checksum */ 115 #define RXC_TCC (1U<<17) /* run TDP checksum */ 116 #define RXC_ICC (1U<<16) /* run IP checksum */ 117 #define RXC_FCE (1U<<9) /* enable flowcontrol */ 118 #define RXC_RB (1U<<6) /* receive broadcast frame */ 119 #define RXC_RM (1U<<5) /* receive multicast frame */ 120 #define RXC_RU (1U<<4) /* receive unicast frame */ 121 #define RXC_RE (1U<<3) /* accept error frame */ 122 #define RXC_RA (1U<<2) /* receive all frame */ 123 #define RXC_MHTE (1U<<1) /* use multicast hash table */ 124 #define RXC_REN (1) /* enable DMA to run */ 125 126 #define INT_DMLCS (1U<<31) /* link status change */ 127 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */ 128 #define INT_DMRS (1U<<29) /* frame was received */ 129 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */ 130 131 #define T0_OWN (1U<<31) /* desc is ready to Tx */ 132 133 #define R0_OWN (1U<<31) /* desc is empty */ 134 #define R0_FS (1U<<30) /* first segment of frame */ 135 #define R0_LS (1U<<29) /* last segment of frame */ 136 #define R0_IPE (1U<<28) /* IP checksum error */ 137 #define R0_TCPE (1U<<27) /* TCP checksum error */ 138 #define R0_UDPE (1U<<26) /* UDP checksum error */ 139 #define R0_ES (1U<<25) /* error summary */ 140 #define R0_MF (1U<<24) /* multicast frame */ 141 #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */ 142 #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */ 143 #define R0_RE (1U<<19) /* MII reported error */ 144 #define R0_TL (1U<<18) /* frame too long, beyond 1518 */ 145 #define R0_RF (1U<<17) /* damaged runt frame */ 146 #define R0_CE (1U<<16) /* CRC error */ 147 #define R0_FT (1U<<15) /* frame type */ 148 #define R0_FL_MASK 0x7ff /* frame length 10:0 */ 149 150 #define T1_IC (1U<<31) /* post interrupt on complete */ 151 #define T1_FS (1U<<30) /* first segment of frame */ 152 #define T1_LS (1U<<29) /* last segment of frame */ 153 #define T1_IPCKG (1U<<28) /* generate IP checksum */ 154 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */ 155 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */ 156 #define T1_TER (1U<<25) /* end of ring */ 157 #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */ 158 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */ 159 160 #define R1_RER (1U<<25) /* end of ring */ 161 #define R1_RBS_MASK 0x7fc /* segment size 10:0 */ 162 163 #define KSE_NTXSEGS 16 164 #define KSE_TXQUEUELEN 64 165 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1) 166 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4) 167 #define KSE_NTXDESC 256 168 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1) 169 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK) 170 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK) 171 172 #define KSE_NRXDESC 64 173 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1) 174 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK) 175 176 struct tdes { 177 uint32_t t0, t1, t2, t3; 178 }; 179 180 struct rdes { 181 uint32_t r0, r1, r2, r3; 182 }; 183 184 struct kse_control_data { 185 struct tdes kcd_txdescs[KSE_NTXDESC]; 186 struct rdes kcd_rxdescs[KSE_NRXDESC]; 187 }; 188 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x) 189 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)]) 190 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)]) 191 192 struct kse_txsoft { 193 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 194 bus_dmamap_t txs_dmamap; /* our DMA map */ 195 int txs_firstdesc; /* first descriptor in packet */ 196 int txs_lastdesc; /* last descriptor in packet */ 197 int txs_ndesc; /* # of descriptors used */ 198 }; 199 200 struct kse_rxsoft { 201 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 202 bus_dmamap_t rxs_dmamap; /* our DMA map */ 203 }; 204 205 struct kse_softc { 206 struct device sc_dev; /* generic device information */ 207 bus_space_tag_t sc_st; /* bus space tag */ 208 bus_space_handle_t sc_sh; /* bus space handle */ 209 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 210 struct ethercom sc_ethercom; /* Ethernet common data */ 211 void *sc_ih; /* interrupt cookie */ 212 213 struct ifmedia sc_media; /* ifmedia information */ 214 int sc_media_status; /* PHY */ 215 int sc_media_active; /* PHY */ 216 callout_t sc_callout; /* MII tick callout */ 217 callout_t sc_stat_ch; /* statistics counter callout */ 218 219 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 220 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 221 222 struct kse_control_data *sc_control_data; 223 #define sc_txdescs sc_control_data->kcd_txdescs 224 #define sc_rxdescs sc_control_data->kcd_rxdescs 225 226 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN]; 227 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC]; 228 int sc_txfree; /* number of free Tx descriptors */ 229 int sc_txnext; /* next ready Tx descriptor */ 230 int sc_txsfree; /* number of free Tx jobs */ 231 int sc_txsnext; /* next ready Tx job */ 232 int sc_txsdirty; /* dirty Tx jobs */ 233 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 234 235 uint32_t sc_txc, sc_rxc; 236 uint32_t sc_t1csum; 237 int sc_mcsum; 238 uint32_t sc_inten; 239 240 uint32_t sc_chip; 241 uint8_t sc_altmac[16][ETHER_ADDR_LEN]; 242 uint16_t sc_vlan[16]; 243 244 #ifdef KSE_EVENT_COUNTERS 245 struct ksext { 246 char evcntname[3][8]; 247 struct evcnt pev[3][34]; 248 } sc_ext; /* switch statistics */ 249 #endif 250 }; 251 252 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x))) 253 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x))) 254 255 #define KSE_CDTXSYNC(sc, x, n, ops) \ 256 do { \ 257 int __x, __n; \ 258 \ 259 __x = (x); \ 260 __n = (n); \ 261 \ 262 /* If it will wrap around, sync to the end of the ring. */ \ 263 if ((__x + __n) > KSE_NTXDESC) { \ 264 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 265 KSE_CDTXOFF(__x), sizeof(struct tdes) * \ 266 (KSE_NTXDESC - __x), (ops)); \ 267 __n -= (KSE_NTXDESC - __x); \ 268 __x = 0; \ 269 } \ 270 \ 271 /* Now sync whatever is left. */ \ 272 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 273 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ 274 } while (/*CONSTCOND*/0) 275 276 #define KSE_CDRXSYNC(sc, x, ops) \ 277 do { \ 278 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 279 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ 280 } while (/*CONSTCOND*/0) 281 282 #define KSE_INIT_RXDESC(sc, x) \ 283 do { \ 284 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 285 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 286 struct mbuf *__m = __rxs->rxs_mbuf; \ 287 \ 288 __m->m_data = __m->m_ext.ext_buf; \ 289 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ 290 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \ 291 __rxd->r0 = R0_OWN; \ 292 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 293 } while (/*CONSTCOND*/0) 294 295 u_int kse_burstsize = 8; /* DMA burst length tuning knob */ 296 297 #ifdef KSEDIAGNOSTIC 298 u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */ 299 #endif 300 301 static int kse_match(struct device *, struct cfdata *, void *); 302 static void kse_attach(struct device *, struct device *, void *); 303 304 CFATTACH_DECL(kse, sizeof(struct kse_softc), 305 kse_match, kse_attach, NULL, NULL); 306 307 static int kse_ioctl(struct ifnet *, u_long, void *); 308 static void kse_start(struct ifnet *); 309 static void kse_watchdog(struct ifnet *); 310 static int kse_init(struct ifnet *); 311 static void kse_stop(struct ifnet *, int); 312 static void kse_reset(struct kse_softc *); 313 static void kse_set_filter(struct kse_softc *); 314 static int add_rxbuf(struct kse_softc *, int); 315 static void rxdrain(struct kse_softc *); 316 static int kse_intr(void *); 317 static void rxintr(struct kse_softc *); 318 static void txreap(struct kse_softc *); 319 static void lnkchg(struct kse_softc *); 320 static int ifmedia_upd(struct ifnet *); 321 static void ifmedia_sts(struct ifnet *, struct ifmediareq *); 322 static void phy_tick(void *); 323 static int ifmedia2_upd(struct ifnet *); 324 static void ifmedia2_sts(struct ifnet *, struct ifmediareq *); 325 #ifdef KSE_EVENT_COUNTERS 326 static void stat_tick(void *); 327 static void zerostats(struct kse_softc *); 328 #endif 329 330 static int 331 kse_match(struct device *parent, struct cfdata *match, void *aux) 332 { 333 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 334 335 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL && 336 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 || 337 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) && 338 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 339 return 1; 340 341 return 0; 342 } 343 344 static void 345 kse_attach(struct device *parent, struct device *self, void *aux) 346 { 347 struct kse_softc *sc = (struct kse_softc *)self; 348 struct pci_attach_args *pa = aux; 349 pci_chipset_tag_t pc = pa->pa_pc; 350 pci_intr_handle_t ih; 351 const char *intrstr; 352 struct ifnet *ifp; 353 struct ifmedia *ifm; 354 uint8_t enaddr[ETHER_ADDR_LEN]; 355 bus_dma_segment_t seg; 356 int i, p, error, nseg; 357 pcireg_t pmode; 358 int pmreg; 359 360 if (pci_mapreg_map(pa, 0x10, 361 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 362 0, &sc->sc_st, &sc->sc_sh, NULL, NULL) != 0) { 363 printf(": unable to map device registers\n"); 364 return; 365 } 366 367 sc->sc_dmat = pa->pa_dmat; 368 369 /* Make sure bus mastering is enabled. */ 370 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 371 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 372 PCI_COMMAND_MASTER_ENABLE); 373 374 /* Get it out of power save mode, if needed. */ 375 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 376 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) & 377 PCI_PMCSR_STATE_MASK; 378 if (pmode == PCI_PMCSR_STATE_D3) { 379 /* 380 * The card has lost all configuration data in 381 * this state, so punt. 382 */ 383 printf("%s: unable to wake from power state D3\n", 384 sc->sc_dev.dv_xname); 385 return; 386 } 387 if (pmode != PCI_PMCSR_STATE_D0) { 388 printf("%s: waking up from power date D%d\n", 389 sc->sc_dev.dv_xname, pmode); 390 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR, 391 PCI_PMCSR_STATE_D0); 392 } 393 } 394 395 sc->sc_chip = PCI_PRODUCT(pa->pa_id); 396 printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n", 397 sc->sc_chip, PCI_REVISION(pa->pa_class)); 398 399 /* 400 * Read the Ethernet address from the EEPROM. 401 */ 402 i = CSR_READ_2(sc, MARL); 403 enaddr[5] = i; enaddr[4] = i >> 8; 404 i = CSR_READ_2(sc, MARM); 405 enaddr[3] = i; enaddr[2] = i >> 8; 406 i = CSR_READ_2(sc, MARH); 407 enaddr[1] = i; enaddr[0] = i >> 8; 408 printf("%s: Ethernet address: %s\n", 409 sc->sc_dev.dv_xname, ether_sprintf(enaddr)); 410 411 /* 412 * Enable chip function. 413 */ 414 CSR_WRITE_2(sc, CIDR, 1); 415 416 /* 417 * Map and establish our interrupt. 418 */ 419 if (pci_intr_map(pa, &ih)) { 420 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname); 421 return; 422 } 423 intrstr = pci_intr_string(pc, ih); 424 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, kse_intr, sc); 425 if (sc->sc_ih == NULL) { 426 printf("%s: unable to establish interrupt", 427 sc->sc_dev.dv_xname); 428 if (intrstr != NULL) 429 printf(" at %s", intrstr); 430 printf("\n"); 431 return; 432 } 433 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 434 435 /* 436 * Allocate the control data structures, and create and load the 437 * DMA map for it. 438 */ 439 error = bus_dmamem_alloc(sc->sc_dmat, 440 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); 441 if (error != 0) { 442 printf("%s: unable to allocate control data, error = %d\n", 443 sc->sc_dev.dv_xname, error); 444 goto fail_0; 445 } 446 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 447 sizeof(struct kse_control_data), (void **)&sc->sc_control_data, 448 BUS_DMA_COHERENT); 449 if (error != 0) { 450 printf("%s: unable to map control data, error = %d\n", 451 sc->sc_dev.dv_xname, error); 452 goto fail_1; 453 } 454 error = bus_dmamap_create(sc->sc_dmat, 455 sizeof(struct kse_control_data), 1, 456 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap); 457 if (error != 0) { 458 printf("%s: unable to create control data DMA map, " 459 "error = %d\n", sc->sc_dev.dv_xname, error); 460 goto fail_2; 461 } 462 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 463 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0); 464 if (error != 0) { 465 printf("%s: unable to load control data DMA map, error = %d\n", 466 sc->sc_dev.dv_xname, error); 467 goto fail_3; 468 } 469 for (i = 0; i < KSE_TXQUEUELEN; i++) { 470 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 471 KSE_NTXSEGS, MCLBYTES, 0, 0, 472 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 473 printf("%s: unable to create tx DMA map %d, " 474 "error = %d\n", sc->sc_dev.dv_xname, i, error); 475 goto fail_4; 476 } 477 } 478 for (i = 0; i < KSE_NRXDESC; i++) { 479 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 480 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 481 printf("%s: unable to create rx DMA map %d, " 482 "error = %d\n", sc->sc_dev.dv_xname, i, error); 483 goto fail_5; 484 } 485 sc->sc_rxsoft[i].rxs_mbuf = NULL; 486 } 487 488 callout_init(&sc->sc_callout, 0); 489 callout_init(&sc->sc_stat_ch, 0); 490 491 ifm = &sc->sc_media; 492 if (sc->sc_chip == 0x8841) { 493 ifmedia_init(ifm, 0, ifmedia_upd, ifmedia_sts); 494 ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL); 495 ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 496 ifmedia_add(ifm, IFM_ETHER|IFM_100_TX, 0, NULL); 497 ifmedia_add(ifm, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 498 ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL); 499 ifmedia_set(ifm, IFM_ETHER|IFM_AUTO); 500 } 501 else { 502 ifmedia_init(ifm, 0, ifmedia2_upd, ifmedia2_sts); 503 ifmedia_add(ifm, IFM_ETHER|IFM_AUTO, 0, NULL); 504 ifmedia_set(ifm, IFM_ETHER|IFM_AUTO); 505 } 506 507 printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n", 508 sc->sc_dev.dv_xname); 509 510 ifp = &sc->sc_ethercom.ec_if; 511 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 512 ifp->if_softc = sc; 513 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 514 ifp->if_ioctl = kse_ioctl; 515 ifp->if_start = kse_start; 516 ifp->if_watchdog = kse_watchdog; 517 ifp->if_init = kse_init; 518 ifp->if_stop = kse_stop; 519 IFQ_SET_READY(&ifp->if_snd); 520 521 /* 522 * KSZ8842 can handle 802.1Q VLAN-sized frames, 523 * can do IPv4, TCPv4, and UDPv4 checksums in hardware. 524 */ 525 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 526 ifp->if_capabilities |= 527 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 528 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 529 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 530 531 if_attach(ifp); 532 ether_ifattach(ifp, enaddr); 533 534 p = (sc->sc_chip == 0x8842) ? 3 : 1; 535 #ifdef KSE_EVENT_COUNTERS 536 for (i = 0; i < p; i++) { 537 struct ksext *ee = &sc->sc_ext; 538 sprintf(ee->evcntname[i], "%s.%d", sc->sc_dev.dv_xname, i+1); 539 evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC, 540 NULL, ee->evcntname[i], "RxLoPriotyByte"); 541 evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC, 542 NULL, ee->evcntname[i], "RxHiPriotyByte"); 543 evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC, 544 NULL, ee->evcntname[i], "RxUndersizePkt"); 545 evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC, 546 NULL, ee->evcntname[i], "RxFragments"); 547 evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC, 548 NULL, ee->evcntname[i], "RxOversize"); 549 evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC, 550 NULL, ee->evcntname[i], "RxJabbers"); 551 evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC, 552 NULL, ee->evcntname[i], "RxSymbolError"); 553 evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC, 554 NULL, ee->evcntname[i], "RxCRCError"); 555 evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC, 556 NULL, ee->evcntname[i], "RxAlignmentError"); 557 evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC, 558 NULL, ee->evcntname[i], "RxControl8808Pkts"); 559 evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC, 560 NULL, ee->evcntname[i], "RxPausePkts"); 561 evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC, 562 NULL, ee->evcntname[i], "RxBroadcast"); 563 evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC, 564 NULL, ee->evcntname[i], "RxMulticast"); 565 evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC, 566 NULL, ee->evcntname[i], "RxUnicast"); 567 evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC, 568 NULL, ee->evcntname[i], "Rx64Octets"); 569 evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC, 570 NULL, ee->evcntname[i], "Rx65To127Octets"); 571 evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC, 572 NULL, ee->evcntname[i], "Rx128To255Octets"); 573 evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC, 574 NULL, ee->evcntname[i], "Rx255To511Octets"); 575 evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC, 576 NULL, ee->evcntname[i], "Rx512To1023Octets"); 577 evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC, 578 NULL, ee->evcntname[i], "Rx1024To1522Octets"); 579 evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC, 580 NULL, ee->evcntname[i], "TxLoPriotyByte"); 581 evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC, 582 NULL, ee->evcntname[i], "TxHiPriotyByte"); 583 evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC, 584 NULL, ee->evcntname[i], "TxLateCollision"); 585 evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC, 586 NULL, ee->evcntname[i], "TxPausePkts"); 587 evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC, 588 NULL, ee->evcntname[i], "TxBroadcastPkts"); 589 evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC, 590 NULL, ee->evcntname[i], "TxMulticastPkts"); 591 evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC, 592 NULL, ee->evcntname[i], "TxUnicastPkts"); 593 evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC, 594 NULL, ee->evcntname[i], "TxDeferred"); 595 evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC, 596 NULL, ee->evcntname[i], "TxTotalCollision"); 597 evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC, 598 NULL, ee->evcntname[i], "TxExcessiveCollision"); 599 evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC, 600 NULL, ee->evcntname[i], "TxSingleCollision"); 601 evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC, 602 NULL, ee->evcntname[i], "TxMultipleCollision"); 603 evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC, 604 NULL, ee->evcntname[i], "TxDropPkts"); 605 evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC, 606 NULL, ee->evcntname[i], "RxDropPkts"); 607 } 608 #endif 609 return; 610 611 fail_5: 612 for (i = 0; i < KSE_NRXDESC; i++) { 613 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 614 bus_dmamap_destroy(sc->sc_dmat, 615 sc->sc_rxsoft[i].rxs_dmamap); 616 } 617 fail_4: 618 for (i = 0; i < KSE_TXQUEUELEN; i++) { 619 if (sc->sc_txsoft[i].txs_dmamap != NULL) 620 bus_dmamap_destroy(sc->sc_dmat, 621 sc->sc_txsoft[i].txs_dmamap); 622 } 623 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 624 fail_3: 625 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 626 fail_2: 627 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 628 sizeof(struct kse_control_data)); 629 fail_1: 630 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 631 fail_0: 632 return; 633 } 634 635 static int 636 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data) 637 { 638 struct kse_softc *sc = ifp->if_softc; 639 struct ifreq *ifr = (struct ifreq *)data; 640 int s, error; 641 642 s = splnet(); 643 644 switch (cmd) { 645 case SIOCSIFMEDIA: 646 case SIOCGIFMEDIA: 647 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 648 break; 649 650 default: 651 error = ether_ioctl(ifp, cmd, data); 652 if (cmd == ENETRESET) { 653 /* 654 * Multicast list has changed; set the hardware filter 655 * accordingly. 656 */ 657 if (ifp->if_flags & IFF_RUNNING) 658 kse_set_filter(sc); 659 error = 0; 660 } 661 break; 662 } 663 664 kse_start(ifp); 665 666 splx(s); 667 return error; 668 } 669 670 static int 671 kse_init(struct ifnet *ifp) 672 { 673 struct kse_softc *sc = ifp->if_softc; 674 uint32_t paddr; 675 int i, error = 0; 676 677 /* cancel pending I/O */ 678 kse_stop(ifp, 0); 679 680 /* reset all registers but PCI configuration */ 681 kse_reset(sc); 682 683 /* craft Tx descriptor ring */ 684 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 685 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) { 686 sc->sc_txdescs[i].t3 = paddr; 687 paddr += sizeof(struct tdes); 688 } 689 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0); 690 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC, 691 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 692 sc->sc_txfree = KSE_NTXDESC; 693 sc->sc_txnext = 0; 694 695 for (i = 0; i < KSE_TXQUEUELEN; i++) 696 sc->sc_txsoft[i].txs_mbuf = NULL; 697 sc->sc_txsfree = KSE_TXQUEUELEN; 698 sc->sc_txsnext = 0; 699 sc->sc_txsdirty = 0; 700 701 /* craft Rx descriptor ring */ 702 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 703 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) { 704 sc->sc_rxdescs[i].r3 = paddr; 705 paddr += sizeof(struct rdes); 706 } 707 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0); 708 for (i = 0; i < KSE_NRXDESC; i++) { 709 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { 710 if ((error = add_rxbuf(sc, i)) != 0) { 711 printf("%s: unable to allocate or map rx " 712 "buffer %d, error = %d\n", 713 sc->sc_dev.dv_xname, i, error); 714 rxdrain(sc); 715 goto out; 716 } 717 } 718 else 719 KSE_INIT_RXDESC(sc, i); 720 } 721 sc->sc_rxptr = 0; 722 723 /* hand Tx/Rx rings to HW */ 724 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0)); 725 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0)); 726 727 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC | TXC_FCE; 728 sc->sc_rxc = RXC_REN | RXC_RU | RXC_FCE; 729 if (ifp->if_flags & IFF_PROMISC) 730 sc->sc_rxc |= RXC_RA; 731 if (ifp->if_flags & IFF_BROADCAST) 732 sc->sc_rxc |= RXC_RB; 733 sc->sc_t1csum = sc->sc_mcsum = 0; 734 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) { 735 sc->sc_rxc |= RXC_ICC; 736 sc->sc_mcsum |= M_CSUM_IPv4; 737 } 738 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) { 739 sc->sc_txc |= TXC_ICG; 740 sc->sc_t1csum |= T1_IPCKG; 741 } 742 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) { 743 sc->sc_rxc |= RXC_TCC; 744 sc->sc_mcsum |= M_CSUM_TCPv4; 745 } 746 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) { 747 sc->sc_txc |= TXC_TCG; 748 sc->sc_t1csum |= T1_TCPCKG; 749 } 750 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) { 751 sc->sc_rxc |= RXC_UCC; 752 sc->sc_mcsum |= M_CSUM_UDPv4; 753 } 754 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) { 755 sc->sc_txc |= TXC_UCG; 756 sc->sc_t1csum |= T1_UDPCKG; 757 } 758 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT); 759 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT); 760 761 /* build multicast hash filter if necessary */ 762 kse_set_filter(sc); 763 764 /* set current media */ 765 (void)ifmedia_upd(ifp); 766 767 /* enable transmitter and receiver */ 768 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 769 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 770 CSR_WRITE_4(sc, MDRSC, 1); 771 772 /* enable interrupts */ 773 sc->sc_inten = INT_DMTS|INT_DMRS|INT_DMRBUS; 774 if (sc->sc_chip == 0x8841) 775 sc->sc_inten |= INT_DMLCS; 776 CSR_WRITE_4(sc, INTST, ~0); 777 CSR_WRITE_4(sc, INTEN, sc->sc_inten); 778 779 ifp->if_flags |= IFF_RUNNING; 780 ifp->if_flags &= ~IFF_OACTIVE; 781 782 if (sc->sc_chip == 0x8841) { 783 /* start one second timer */ 784 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 785 } 786 #ifdef KSE_EVENT_COUNTERS 787 /* start statistics gather 1 minute timer */ 788 zerostats(sc); 789 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, sc); 790 #endif 791 792 out: 793 if (error) { 794 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 795 ifp->if_timer = 0; 796 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 797 } 798 return error; 799 } 800 801 static void 802 kse_stop(struct ifnet *ifp, int disable) 803 { 804 struct kse_softc *sc = ifp->if_softc; 805 struct kse_txsoft *txs; 806 int i; 807 808 if (sc->sc_chip == 0x8841) 809 callout_stop(&sc->sc_callout); 810 callout_stop(&sc->sc_stat_ch); 811 812 sc->sc_txc &= ~TXC_TEN; 813 sc->sc_rxc &= ~RXC_REN; 814 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 815 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 816 817 for (i = 0; i < KSE_TXQUEUELEN; i++) { 818 txs = &sc->sc_txsoft[i]; 819 if (txs->txs_mbuf != NULL) { 820 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 821 m_freem(txs->txs_mbuf); 822 txs->txs_mbuf = NULL; 823 } 824 } 825 826 if (disable) 827 rxdrain(sc); 828 829 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 830 ifp->if_timer = 0; 831 } 832 833 static void 834 kse_reset(struct kse_softc *sc) 835 { 836 837 CSR_WRITE_2(sc, GRR, 1); 838 delay(1000); /* PDF does not mention the delay amount */ 839 CSR_WRITE_2(sc, GRR, 0); 840 841 CSR_WRITE_2(sc, CIDR, 1); 842 } 843 844 static void 845 kse_watchdog(struct ifnet *ifp) 846 { 847 struct kse_softc *sc = ifp->if_softc; 848 849 /* 850 * Since we're not interrupting every packet, sweep 851 * up before we report an error. 852 */ 853 txreap(sc); 854 855 if (sc->sc_txfree != KSE_NTXDESC) { 856 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n", 857 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 858 sc->sc_txnext); 859 ifp->if_oerrors++; 860 861 /* Reset the interface. */ 862 kse_init(ifp); 863 } 864 else if (ifp->if_flags & IFF_DEBUG) 865 printf("%s: recovered from device timeout\n", 866 sc->sc_dev.dv_xname); 867 868 /* Try to get more packets going. */ 869 kse_start(ifp); 870 } 871 872 static void 873 kse_start(struct ifnet *ifp) 874 { 875 struct kse_softc *sc = ifp->if_softc; 876 struct mbuf *m0, *m; 877 struct kse_txsoft *txs; 878 bus_dmamap_t dmamap; 879 int error, nexttx, lasttx, ofree, seg; 880 uint32_t tdes0; 881 882 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 883 return; 884 885 /* 886 * Remember the previous number of free descriptors. 887 */ 888 ofree = sc->sc_txfree; 889 890 /* 891 * Loop through the send queue, setting up transmit descriptors 892 * until we drain the queue, or use up all available transmit 893 * descriptors. 894 */ 895 for (;;) { 896 IFQ_POLL(&ifp->if_snd, m0); 897 if (m0 == NULL) 898 break; 899 900 if (sc->sc_txsfree < KSE_TXQUEUE_GC) { 901 txreap(sc); 902 if (sc->sc_txsfree == 0) 903 break; 904 } 905 txs = &sc->sc_txsoft[sc->sc_txsnext]; 906 dmamap = txs->txs_dmamap; 907 908 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 909 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 910 if (error) { 911 if (error == EFBIG) { 912 printf("%s: Tx packet consumes too many " 913 "DMA segments, dropping...\n", 914 sc->sc_dev.dv_xname); 915 IFQ_DEQUEUE(&ifp->if_snd, m0); 916 m_freem(m0); 917 continue; 918 } 919 /* Short on resources, just stop for now. */ 920 break; 921 } 922 923 if (dmamap->dm_nsegs > sc->sc_txfree) { 924 /* 925 * Not enough free descriptors to transmit this 926 * packet. We haven't committed anything yet, 927 * so just unload the DMA map, put the packet 928 * back on the queue, and punt. Notify the upper 929 * layer that there are not more slots left. 930 */ 931 ifp->if_flags |= IFF_OACTIVE; 932 bus_dmamap_unload(sc->sc_dmat, dmamap); 933 break; 934 } 935 936 IFQ_DEQUEUE(&ifp->if_snd, m0); 937 938 /* 939 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 940 */ 941 942 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 943 BUS_DMASYNC_PREWRITE); 944 945 lasttx = -1; tdes0 = 0; 946 for (nexttx = sc->sc_txnext, seg = 0; 947 seg < dmamap->dm_nsegs; 948 seg++, nexttx = KSE_NEXTTX(nexttx)) { 949 struct tdes *tdes = &sc->sc_txdescs[nexttx]; 950 /* 951 * If this is the first descriptor we're 952 * enqueueing, don't set the OWN bit just 953 * yet. That could cause a race condition. 954 * We'll do it below. 955 */ 956 tdes->t2 = dmamap->dm_segs[seg].ds_addr; 957 tdes->t1 = sc->sc_t1csum 958 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK); 959 tdes->t0 = tdes0; 960 tdes0 |= T0_OWN; 961 lasttx = nexttx; 962 } 963 964 /* 965 * Outgoing NFS mbuf must be unloaded when Tx completed. 966 * Without T1_IC NFS mbuf is left unack'ed for excessive 967 * time and NFS stops to proceed until kse_watchdog() 968 * calls txreap() to reclaim the unack'ed mbuf. 969 * It's painful to traverse every mbuf chain to determine 970 * whether someone is waiting for Tx completion. 971 */ 972 m = m0; 973 do { 974 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) { 975 sc->sc_txdescs[lasttx].t1 |= T1_IC; 976 break; 977 } 978 } while ((m = m->m_next) != NULL); 979 980 /* write last T0_OWN bit of the 1st segment */ 981 sc->sc_txdescs[lasttx].t1 |= T1_LS; 982 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS; 983 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN; 984 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 985 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 986 987 /* tell DMA start transmit */ 988 CSR_WRITE_4(sc, MDTSC, 1); 989 990 txs->txs_mbuf = m0; 991 txs->txs_firstdesc = sc->sc_txnext; 992 txs->txs_lastdesc = lasttx; 993 txs->txs_ndesc = dmamap->dm_nsegs; 994 995 sc->sc_txfree -= txs->txs_ndesc; 996 sc->sc_txnext = nexttx; 997 sc->sc_txsfree--; 998 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext); 999 #if NBPFILTER > 0 1000 /* 1001 * Pass the packet to any BPF listeners. 1002 */ 1003 if (ifp->if_bpf) 1004 bpf_mtap(ifp->if_bpf, m0); 1005 #endif /* NBPFILTER > 0 */ 1006 } 1007 1008 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1009 /* No more slots left; notify upper layer. */ 1010 ifp->if_flags |= IFF_OACTIVE; 1011 } 1012 if (sc->sc_txfree != ofree) { 1013 /* Set a watchdog timer in case the chip flakes out. */ 1014 ifp->if_timer = 5; 1015 } 1016 } 1017 1018 static void 1019 kse_set_filter(struct kse_softc *sc) 1020 { 1021 struct ether_multistep step; 1022 struct ether_multi *enm; 1023 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1024 uint32_t h, hashes[2]; 1025 1026 sc->sc_rxc &= ~(RXC_MHTE | RXC_RM); 1027 ifp->if_flags &= ~IFF_ALLMULTI; 1028 if (ifp->if_flags & IFF_PROMISC) 1029 return; 1030 1031 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 1032 if (enm == NULL) 1033 return; 1034 hashes[0] = hashes[1] = 0; 1035 do { 1036 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1037 /* 1038 * We must listen to a range of multicast addresses. 1039 * For now, just accept all multicasts, rather than 1040 * trying to set only those filter bits needed to match 1041 * the range. (At this time, the only use of address 1042 * ranges is for IP multicast routing, for which the 1043 * range is big enough to require all bits set.) 1044 */ 1045 goto allmulti; 1046 } 1047 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26; 1048 hashes[h >> 5] |= 1 << (h & 0x1f); 1049 ETHER_NEXT_MULTI(step, enm); 1050 } while (enm != NULL); 1051 sc->sc_rxc |= RXC_MHTE; 1052 CSR_WRITE_4(sc, MTR0, hashes[0]); 1053 CSR_WRITE_4(sc, MTR1, hashes[1]); 1054 return; 1055 allmulti: 1056 sc->sc_rxc |= RXC_RM; 1057 ifp->if_flags |= IFF_ALLMULTI; 1058 } 1059 1060 static int 1061 add_rxbuf(struct kse_softc *sc, int idx) 1062 { 1063 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1064 struct mbuf *m; 1065 int error; 1066 1067 MGETHDR(m, M_DONTWAIT, MT_DATA); 1068 if (m == NULL) 1069 return ENOBUFS; 1070 1071 MCLGET(m, M_DONTWAIT); 1072 if ((m->m_flags & M_EXT) == 0) { 1073 m_freem(m); 1074 return ENOBUFS; 1075 } 1076 1077 if (rxs->rxs_mbuf != NULL) 1078 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1079 1080 rxs->rxs_mbuf = m; 1081 1082 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1083 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1084 if (error) { 1085 printf("%s: can't load rx DMA map %d, error = %d\n", 1086 sc->sc_dev.dv_xname, idx, error); 1087 panic("kse_add_rxbuf"); 1088 } 1089 1090 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1091 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1092 1093 KSE_INIT_RXDESC(sc, idx); 1094 1095 return 0; 1096 } 1097 1098 static void 1099 rxdrain(struct kse_softc *sc) 1100 { 1101 struct kse_rxsoft *rxs; 1102 int i; 1103 1104 for (i = 0; i < KSE_NRXDESC; i++) { 1105 rxs = &sc->sc_rxsoft[i]; 1106 if (rxs->rxs_mbuf != NULL) { 1107 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1108 m_freem(rxs->rxs_mbuf); 1109 rxs->rxs_mbuf = NULL; 1110 } 1111 } 1112 } 1113 1114 static int 1115 kse_intr(void *arg) 1116 { 1117 struct kse_softc *sc = arg; 1118 uint32_t isr; 1119 1120 if ((isr = CSR_READ_4(sc, INTST)) == 0) 1121 return 0; 1122 1123 if (isr & INT_DMRS) 1124 rxintr(sc); 1125 if (isr & INT_DMTS) 1126 txreap(sc); 1127 if (isr & INT_DMLCS) 1128 lnkchg(sc); 1129 if (isr & INT_DMRBUS) 1130 printf("%s: Rx descriptor full\n", sc->sc_dev.dv_xname); 1131 1132 CSR_WRITE_4(sc, INTST, isr); 1133 return 1; 1134 } 1135 1136 static void 1137 rxintr(struct kse_softc *sc) 1138 { 1139 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1140 struct kse_rxsoft *rxs; 1141 struct mbuf *m; 1142 uint32_t rxstat; 1143 int i, len; 1144 1145 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) { 1146 rxs = &sc->sc_rxsoft[i]; 1147 1148 KSE_CDRXSYNC(sc, i, 1149 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1150 1151 rxstat = sc->sc_rxdescs[i].r0; 1152 1153 if (rxstat & R0_OWN) /* desc is left empty */ 1154 break; 1155 1156 /* R0_FS|R0_LS must have been marked for this desc */ 1157 1158 if (rxstat & R0_ES) { 1159 ifp->if_ierrors++; 1160 #define PRINTERR(bit, str) \ 1161 if (rxstat & (bit)) \ 1162 printf("%s: receive error: %s\n", \ 1163 sc->sc_dev.dv_xname, str) 1164 PRINTERR(R0_TL, "frame too long"); 1165 PRINTERR(R0_RF, "runt frame"); 1166 PRINTERR(R0_CE, "bad FCS"); 1167 #undef PRINTERR 1168 KSE_INIT_RXDESC(sc, i); 1169 continue; 1170 } 1171 1172 /* HW errata; frame might be too small or too large */ 1173 1174 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1175 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1176 1177 len = rxstat & R0_FL_MASK; 1178 len -= ETHER_CRC_LEN; /* trim CRC off */ 1179 m = rxs->rxs_mbuf; 1180 1181 if (add_rxbuf(sc, i) != 0) { 1182 ifp->if_ierrors++; 1183 KSE_INIT_RXDESC(sc, i); 1184 bus_dmamap_sync(sc->sc_dmat, 1185 rxs->rxs_dmamap, 0, 1186 rxs->rxs_dmamap->dm_mapsize, 1187 BUS_DMASYNC_PREREAD); 1188 continue; 1189 } 1190 1191 ifp->if_ipackets++; 1192 m->m_pkthdr.rcvif = ifp; 1193 m->m_pkthdr.len = m->m_len = len; 1194 1195 if (sc->sc_mcsum) { 1196 m->m_pkthdr.csum_flags |= sc->sc_mcsum; 1197 if (rxstat & R0_IPE) 1198 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1199 if (rxstat & (R0_TCPE | R0_UDPE)) 1200 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1201 } 1202 #if NBPFILTER > 0 1203 if (ifp->if_bpf) 1204 bpf_mtap(ifp->if_bpf, m); 1205 #endif /* NBPFILTER > 0 */ 1206 (*ifp->if_input)(ifp, m); 1207 #ifdef KSEDIAGNOSTIC 1208 if (kse_monitor_rxintr > 0) { 1209 printf("m stat %x data %p len %d\n", 1210 rxstat, m->m_data, m->m_len); 1211 } 1212 #endif 1213 } 1214 sc->sc_rxptr = i; 1215 } 1216 1217 static void 1218 txreap(struct kse_softc *sc) 1219 { 1220 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1221 struct kse_txsoft *txs; 1222 uint32_t txstat; 1223 int i; 1224 1225 ifp->if_flags &= ~IFF_OACTIVE; 1226 1227 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN; 1228 i = KSE_NEXTTXS(i), sc->sc_txsfree++) { 1229 txs = &sc->sc_txsoft[i]; 1230 1231 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 1232 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1233 1234 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0; 1235 1236 if (txstat & T0_OWN) /* desc is still in use */ 1237 break; 1238 1239 /* there is no way to tell transmission status per frame */ 1240 1241 ifp->if_opackets++; 1242 1243 sc->sc_txfree += txs->txs_ndesc; 1244 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1245 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1246 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1247 m_freem(txs->txs_mbuf); 1248 txs->txs_mbuf = NULL; 1249 } 1250 sc->sc_txsdirty = i; 1251 if (sc->sc_txsfree == KSE_TXQUEUELEN) 1252 ifp->if_timer = 0; 1253 } 1254 1255 static void 1256 lnkchg(struct kse_softc *sc) 1257 { 1258 struct ifmediareq ifmr; 1259 1260 #if 0 /* rambling link status */ 1261 printf("%s: link %s\n", sc->sc_dev.dv_xname, 1262 (CSR_READ_2(sc, P1SR) & (1U << 5)) ? "up" : "down"); 1263 #endif 1264 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1265 } 1266 1267 static int 1268 ifmedia_upd(struct ifnet *ifp) 1269 { 1270 struct kse_softc *sc = ifp->if_softc; 1271 struct ifmedia *ifm = &sc->sc_media; 1272 uint16_t ctl; 1273 1274 ctl = 0; 1275 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1276 ctl |= (1U << 13); /* restart AN */ 1277 ctl |= (1U << 7); /* enable AN */ 1278 ctl |= (1U << 4); /* advertise flow control pause */ 1279 ctl |= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0); 1280 } 1281 else { 1282 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) 1283 ctl |= (1U << 6); 1284 if (ifm->ifm_media & IFM_FDX) 1285 ctl |= (1U << 5); 1286 } 1287 CSR_WRITE_2(sc, P1CR4, ctl); 1288 1289 sc->sc_media_active = IFM_NONE; 1290 sc->sc_media_status = IFM_AVALID; 1291 1292 return 0; 1293 } 1294 1295 static void 1296 ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1297 { 1298 struct kse_softc *sc = ifp->if_softc; 1299 struct ifmedia *ifm = &sc->sc_media; 1300 uint16_t ctl, sts, result; 1301 1302 ifmr->ifm_status = IFM_AVALID; 1303 ifmr->ifm_active = IFM_ETHER; 1304 1305 ctl = CSR_READ_2(sc, P1CR4); 1306 sts = CSR_READ_2(sc, P1SR); 1307 if ((sts & (1U << 5)) == 0) { 1308 ifmr->ifm_active |= IFM_NONE; 1309 goto out; /* link is down */ 1310 } 1311 ifmr->ifm_status |= IFM_ACTIVE; 1312 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1313 if ((sts & (1U << 6)) == 0) { 1314 ifmr->ifm_active |= IFM_NONE; 1315 goto out; /* negotiation in progress */ 1316 } 1317 result = ctl & sts & 017; 1318 if (result & (1U << 3)) 1319 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1320 else if (result & (1U << 2)) 1321 ifmr->ifm_active |= IFM_100_TX; 1322 else if (result & (1U << 1)) 1323 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1324 else if (result & (1U << 0)) 1325 ifmr->ifm_active |= IFM_10_T; 1326 else 1327 ifmr->ifm_active |= IFM_NONE; 1328 if (ctl & (1U << 4)) 1329 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1330 if (sts & (1U << 4)) 1331 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1332 } 1333 else { 1334 ifmr->ifm_active |= (sts & (1U << 10)) ? IFM_100_TX : IFM_10_T; 1335 if (sts & (1U << 9)) 1336 ifmr->ifm_active |= IFM_FDX; 1337 if (sts & (1U << 12)) 1338 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1339 if (sts & (1U << 11)) 1340 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1341 } 1342 1343 out: 1344 sc->sc_media_status = ifmr->ifm_status; 1345 sc->sc_media_active = ifmr->ifm_active; 1346 } 1347 1348 static void 1349 phy_tick(void *arg) 1350 { 1351 struct kse_softc *sc = arg; 1352 struct ifmediareq ifmr; 1353 int s; 1354 1355 s = splnet(); 1356 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1357 splx(s); 1358 1359 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 1360 } 1361 1362 static int 1363 ifmedia2_upd(struct ifnet *ifp) 1364 { 1365 struct kse_softc *sc = ifp->if_softc; 1366 1367 sc->sc_media_status = IFM_AVALID; 1368 sc->sc_media_active = IFM_NONE; 1369 return 0; 1370 } 1371 1372 static void 1373 ifmedia2_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1374 { 1375 struct kse_softc *sc = ifp->if_softc; 1376 int p1sts, p2sts; 1377 1378 ifmr->ifm_status = IFM_AVALID; 1379 ifmr->ifm_active = IFM_ETHER; 1380 p1sts = CSR_READ_2(sc, P1SR); 1381 p2sts = CSR_READ_2(sc, P2SR); 1382 if (((p1sts | p2sts) & (1U << 5)) == 0) 1383 ifmr->ifm_active |= IFM_NONE; 1384 else { 1385 ifmr->ifm_status |= IFM_ACTIVE; 1386 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1387 ifmr->ifm_active |= IFM_FLOW|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE; 1388 } 1389 sc->sc_media_status = ifmr->ifm_status; 1390 sc->sc_media_active = ifmr->ifm_active; 1391 } 1392 1393 #ifdef KSE_EVENT_COUNTERS 1394 static void 1395 stat_tick(arg) 1396 void *arg; 1397 { 1398 struct kse_softc *sc = arg; 1399 struct ksext *ee = &sc->sc_ext; 1400 int nport, p, i, val; 1401 1402 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1403 for (p = 0; p < nport; p++) { 1404 for (i = 0; i < 32; i++) { 1405 val = 0x1c00 | (p * 0x20 + i); 1406 CSR_WRITE_2(sc, IACR, val); 1407 do { 1408 val = CSR_READ_2(sc, IADR5) << 16; 1409 } while ((val & (1U << 30)) == 0); 1410 if (val & (1U << 31)) { 1411 (void)CSR_READ_2(sc, IADR4); 1412 val = 0x3fffffff; /* has made overflow */ 1413 } 1414 else { 1415 val &= 0x3fff0000; /* 29:16 */ 1416 val |= CSR_READ_2(sc, IADR4); /* 15:0 */ 1417 } 1418 ee->pev[p][i].ev_count += val; /* i (0-31) */ 1419 } 1420 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p); 1421 ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */ 1422 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1); 1423 ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */ 1424 } 1425 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, arg); 1426 } 1427 1428 static void 1429 zerostats(struct kse_softc *sc) 1430 { 1431 struct ksext *ee = &sc->sc_ext; 1432 int nport, p, i, val; 1433 1434 /* make sure all the HW counters get zero */ 1435 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1436 for (p = 0; p < nport; p++) { 1437 for (i = 0; i < 31; i++) { 1438 val = 0x1c00 | (p * 0x20 + i); 1439 CSR_WRITE_2(sc, IACR, val); 1440 do { 1441 val = CSR_READ_2(sc, IADR5) << 16; 1442 } while ((val & (1U << 30)) == 0); 1443 (void)CSR_READ_2(sc, IADR4); 1444 ee->pev[p][i].ev_count = 0; 1445 } 1446 } 1447 } 1448 #endif 1449