1 /* $NetBSD: if_kse.c,v 1.38 2019/05/29 10:07:29 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Tohru Nishimura. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.38 2019/05/29 10:07:29 msaitoh Exp $"); 34 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/callout.h> 39 #include <sys/mbuf.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/ioctl.h> 43 #include <sys/errno.h> 44 #include <sys/device.h> 45 #include <sys/queue.h> 46 47 #include <machine/endian.h> 48 #include <sys/bus.h> 49 #include <sys/intr.h> 50 51 #include <net/if.h> 52 #include <net/if_media.h> 53 #include <net/if_dl.h> 54 #include <net/if_ether.h> 55 #include <net/bpf.h> 56 57 #include <dev/pci/pcivar.h> 58 #include <dev/pci/pcireg.h> 59 #include <dev/pci/pcidevs.h> 60 61 #define CSR_READ_4(sc, off) \ 62 bus_space_read_4(sc->sc_st, sc->sc_sh, off) 63 #define CSR_WRITE_4(sc, off, val) \ 64 bus_space_write_4(sc->sc_st, sc->sc_sh, off, val) 65 #define CSR_READ_2(sc, off) \ 66 bus_space_read_2(sc->sc_st, sc->sc_sh, off) 67 #define CSR_WRITE_2(sc, off, val) \ 68 bus_space_write_2(sc->sc_st, sc->sc_sh, off, val) 69 70 #define MDTXC 0x000 /* DMA transmit control */ 71 #define MDRXC 0x004 /* DMA receive control */ 72 #define MDTSC 0x008 /* DMA transmit start */ 73 #define MDRSC 0x00c /* DMA receive start */ 74 #define TDLB 0x010 /* transmit descriptor list base */ 75 #define RDLB 0x014 /* receive descriptor list base */ 76 #define MTR0 0x020 /* multicast table 31:0 */ 77 #define MTR1 0x024 /* multicast table 63:32 */ 78 #define INTEN 0x028 /* interrupt enable */ 79 #define INTST 0x02c /* interrupt status */ 80 #define MARL 0x200 /* MAC address low */ 81 #define MARM 0x202 /* MAC address middle */ 82 #define MARH 0x204 /* MAC address high */ 83 #define GRR 0x216 /* global reset */ 84 #define CIDR 0x400 /* chip ID and enable */ 85 #define CGCR 0x40a /* chip global control */ 86 #define IACR 0x4a0 /* indirect access control */ 87 #define IADR1 0x4a2 /* indirect access data 66:63 */ 88 #define IADR2 0x4a4 /* indirect access data 47:32 */ 89 #define IADR3 0x4a6 /* indirect access data 63:48 */ 90 #define IADR4 0x4a8 /* indirect access data 15:0 */ 91 #define IADR5 0x4aa /* indirect access data 31:16 */ 92 #define P1CR4 0x512 /* port 1 control 4 */ 93 #define P1SR 0x514 /* port 1 status */ 94 #define P2CR4 0x532 /* port 2 control 4 */ 95 #define P2SR 0x534 /* port 2 status */ 96 97 #define TXC_BS_MSK 0x3f000000 /* burst size */ 98 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 99 #define TXC_UCG (1U<<18) /* generate UDP checksum */ 100 #define TXC_TCG (1U<<17) /* generate TCP checksum */ 101 #define TXC_ICG (1U<<16) /* generate IP checksum */ 102 #define TXC_FCE (1U<<9) /* enable flowcontrol */ 103 #define TXC_EP (1U<<2) /* enable automatic padding */ 104 #define TXC_AC (1U<<1) /* add CRC to frame */ 105 #define TXC_TEN (1) /* enable DMA to run */ 106 107 #define RXC_BS_MSK 0x3f000000 /* burst size */ 108 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 109 #define RXC_IHAE (1U<<19) /* IP header alignment enable */ 110 #define RXC_UCC (1U<<18) /* run UDP checksum */ 111 #define RXC_TCC (1U<<17) /* run TDP checksum */ 112 #define RXC_ICC (1U<<16) /* run IP checksum */ 113 #define RXC_FCE (1U<<9) /* enable flowcontrol */ 114 #define RXC_RB (1U<<6) /* receive broadcast frame */ 115 #define RXC_RM (1U<<5) /* receive multicast frame */ 116 #define RXC_RU (1U<<4) /* receive unicast frame */ 117 #define RXC_RE (1U<<3) /* accept error frame */ 118 #define RXC_RA (1U<<2) /* receive all frame */ 119 #define RXC_MHTE (1U<<1) /* use multicast hash table */ 120 #define RXC_REN (1) /* enable DMA to run */ 121 122 #define INT_DMLCS (1U<<31) /* link status change */ 123 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */ 124 #define INT_DMRS (1U<<29) /* frame was received */ 125 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */ 126 127 #define T0_OWN (1U<<31) /* desc is ready to Tx */ 128 129 #define R0_OWN (1U<<31) /* desc is empty */ 130 #define R0_FS (1U<<30) /* first segment of frame */ 131 #define R0_LS (1U<<29) /* last segment of frame */ 132 #define R0_IPE (1U<<28) /* IP checksum error */ 133 #define R0_TCPE (1U<<27) /* TCP checksum error */ 134 #define R0_UDPE (1U<<26) /* UDP checksum error */ 135 #define R0_ES (1U<<25) /* error summary */ 136 #define R0_MF (1U<<24) /* multicast frame */ 137 #define R0_SPN 0x00300000 /* 21:20 switch port 1/2 */ 138 #define R0_ALIGN 0x00300000 /* 21:20 (KSZ8692P) Rx align amount */ 139 #define R0_RE (1U<<19) /* MII reported error */ 140 #define R0_TL (1U<<18) /* frame too long, beyond 1518 */ 141 #define R0_RF (1U<<17) /* damaged runt frame */ 142 #define R0_CE (1U<<16) /* CRC error */ 143 #define R0_FT (1U<<15) /* frame type */ 144 #define R0_FL_MASK 0x7ff /* frame length 10:0 */ 145 146 #define T1_IC (1U<<31) /* post interrupt on complete */ 147 #define T1_FS (1U<<30) /* first segment of frame */ 148 #define T1_LS (1U<<29) /* last segment of frame */ 149 #define T1_IPCKG (1U<<28) /* generate IP checksum */ 150 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */ 151 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */ 152 #define T1_TER (1U<<25) /* end of ring */ 153 #define T1_SPN 0x00300000 /* 21:20 switch port 1/2 */ 154 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */ 155 156 #define R1_RER (1U<<25) /* end of ring */ 157 #define R1_RBS_MASK 0x7fc /* segment size 10:0 */ 158 159 #define KSE_NTXSEGS 16 160 #define KSE_TXQUEUELEN 64 161 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1) 162 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4) 163 #define KSE_NTXDESC 256 164 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1) 165 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK) 166 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK) 167 168 #define KSE_NRXDESC 64 169 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1) 170 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK) 171 172 struct tdes { 173 uint32_t t0, t1, t2, t3; 174 }; 175 176 struct rdes { 177 uint32_t r0, r1, r2, r3; 178 }; 179 180 struct kse_control_data { 181 struct tdes kcd_txdescs[KSE_NTXDESC]; 182 struct rdes kcd_rxdescs[KSE_NRXDESC]; 183 }; 184 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x) 185 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)]) 186 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)]) 187 188 struct kse_txsoft { 189 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 190 bus_dmamap_t txs_dmamap; /* our DMA map */ 191 int txs_firstdesc; /* first descriptor in packet */ 192 int txs_lastdesc; /* last descriptor in packet */ 193 int txs_ndesc; /* # of descriptors used */ 194 }; 195 196 struct kse_rxsoft { 197 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 198 bus_dmamap_t rxs_dmamap; /* our DMA map */ 199 }; 200 201 struct kse_softc { 202 device_t sc_dev; /* generic device information */ 203 bus_space_tag_t sc_st; /* bus space tag */ 204 bus_space_handle_t sc_sh; /* bus space handle */ 205 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 206 struct ethercom sc_ethercom; /* Ethernet common data */ 207 void *sc_ih; /* interrupt cookie */ 208 209 struct ifmedia sc_media; /* ifmedia information */ 210 int sc_media_status; /* PHY */ 211 int sc_media_active; /* PHY */ 212 callout_t sc_callout; /* MII tick callout */ 213 callout_t sc_stat_ch; /* statistics counter callout */ 214 215 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 216 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 217 218 struct kse_control_data *sc_control_data; 219 #define sc_txdescs sc_control_data->kcd_txdescs 220 #define sc_rxdescs sc_control_data->kcd_rxdescs 221 222 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN]; 223 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC]; 224 int sc_txfree; /* number of free Tx descriptors */ 225 int sc_txnext; /* next ready Tx descriptor */ 226 int sc_txsfree; /* number of free Tx jobs */ 227 int sc_txsnext; /* next ready Tx job */ 228 int sc_txsdirty; /* dirty Tx jobs */ 229 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 230 231 uint32_t sc_txc, sc_rxc; 232 uint32_t sc_t1csum; 233 int sc_mcsum; 234 uint32_t sc_inten; 235 236 uint32_t sc_chip; 237 uint8_t sc_altmac[16][ETHER_ADDR_LEN]; 238 uint16_t sc_vlan[16]; 239 240 #ifdef KSE_EVENT_COUNTERS 241 struct ksext { 242 char evcntname[3][8]; 243 struct evcnt pev[3][34]; 244 } sc_ext; /* switch statistics */ 245 #endif 246 }; 247 248 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x))) 249 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x))) 250 251 #define KSE_CDTXSYNC(sc, x, n, ops) \ 252 do { \ 253 int __x, __n; \ 254 \ 255 __x = (x); \ 256 __n = (n); \ 257 \ 258 /* If it will wrap around, sync to the end of the ring. */ \ 259 if ((__x + __n) > KSE_NTXDESC) { \ 260 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 261 KSE_CDTXOFF(__x), sizeof(struct tdes) * \ 262 (KSE_NTXDESC - __x), (ops)); \ 263 __n -= (KSE_NTXDESC - __x); \ 264 __x = 0; \ 265 } \ 266 \ 267 /* Now sync whatever is left. */ \ 268 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 269 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ 270 } while (/*CONSTCOND*/0) 271 272 #define KSE_CDRXSYNC(sc, x, ops) \ 273 do { \ 274 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 275 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ 276 } while (/*CONSTCOND*/0) 277 278 #define KSE_INIT_RXDESC(sc, x) \ 279 do { \ 280 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 281 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 282 struct mbuf *__m = __rxs->rxs_mbuf; \ 283 \ 284 __m->m_data = __m->m_ext.ext_buf; \ 285 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ 286 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \ 287 __rxd->r0 = R0_OWN; \ 288 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \ 289 } while (/*CONSTCOND*/0) 290 291 u_int kse_burstsize = 8; /* DMA burst length tuning knob */ 292 293 #ifdef KSEDIAGNOSTIC 294 u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */ 295 #endif 296 297 static int kse_match(device_t, cfdata_t, void *); 298 static void kse_attach(device_t, device_t, void *); 299 300 CFATTACH_DECL_NEW(kse, sizeof(struct kse_softc), 301 kse_match, kse_attach, NULL, NULL); 302 303 static int kse_ioctl(struct ifnet *, u_long, void *); 304 static void kse_start(struct ifnet *); 305 static void kse_watchdog(struct ifnet *); 306 static int kse_init(struct ifnet *); 307 static void kse_stop(struct ifnet *, int); 308 static void kse_reset(struct kse_softc *); 309 static void kse_set_filter(struct kse_softc *); 310 static int add_rxbuf(struct kse_softc *, int); 311 static void rxdrain(struct kse_softc *); 312 static int kse_intr(void *); 313 static void rxintr(struct kse_softc *); 314 static void txreap(struct kse_softc *); 315 static void lnkchg(struct kse_softc *); 316 static int ifmedia_upd(struct ifnet *); 317 static void ifmedia_sts(struct ifnet *, struct ifmediareq *); 318 static void phy_tick(void *); 319 static int ifmedia2_upd(struct ifnet *); 320 static void ifmedia2_sts(struct ifnet *, struct ifmediareq *); 321 #ifdef KSE_EVENT_COUNTERS 322 static void stat_tick(void *); 323 static void zerostats(struct kse_softc *); 324 #endif 325 326 static int 327 kse_match(device_t parent, cfdata_t match, void *aux) 328 { 329 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 330 331 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL && 332 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 || 333 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) && 334 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 335 return 1; 336 337 return 0; 338 } 339 340 static void 341 kse_attach(device_t parent, device_t self, void *aux) 342 { 343 struct kse_softc *sc = device_private(self); 344 struct pci_attach_args *pa = aux; 345 pci_chipset_tag_t pc = pa->pa_pc; 346 pci_intr_handle_t ih; 347 const char *intrstr; 348 struct ifnet *ifp; 349 struct ifmedia *ifm; 350 uint8_t enaddr[ETHER_ADDR_LEN]; 351 bus_dma_segment_t seg; 352 int i, error, nseg; 353 pcireg_t pmode; 354 int pmreg; 355 char intrbuf[PCI_INTRSTR_LEN]; 356 357 if (pci_mapreg_map(pa, 0x10, 358 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 359 0, &sc->sc_st, &sc->sc_sh, NULL, NULL) != 0) { 360 printf(": unable to map device registers\n"); 361 return; 362 } 363 364 sc->sc_dev = self; 365 sc->sc_dmat = pa->pa_dmat; 366 367 /* Make sure bus mastering is enabled. */ 368 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 369 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 370 PCI_COMMAND_MASTER_ENABLE); 371 372 /* Get it out of power save mode, if needed. */ 373 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 374 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) & 375 PCI_PMCSR_STATE_MASK; 376 if (pmode == PCI_PMCSR_STATE_D3) { 377 /* 378 * The card has lost all configuration data in 379 * this state, so punt. 380 */ 381 printf("%s: unable to wake from power state D3\n", 382 device_xname(sc->sc_dev)); 383 return; 384 } 385 if (pmode != PCI_PMCSR_STATE_D0) { 386 printf("%s: waking up from power date D%d\n", 387 device_xname(sc->sc_dev), pmode); 388 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR, 389 PCI_PMCSR_STATE_D0); 390 } 391 } 392 393 sc->sc_chip = PCI_PRODUCT(pa->pa_id); 394 printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n", 395 sc->sc_chip, PCI_REVISION(pa->pa_class)); 396 397 /* 398 * Read the Ethernet address from the EEPROM. 399 */ 400 i = CSR_READ_2(sc, MARL); 401 enaddr[5] = i; enaddr[4] = i >> 8; 402 i = CSR_READ_2(sc, MARM); 403 enaddr[3] = i; enaddr[2] = i >> 8; 404 i = CSR_READ_2(sc, MARH); 405 enaddr[1] = i; enaddr[0] = i >> 8; 406 printf("%s: Ethernet address %s\n", 407 device_xname(sc->sc_dev), ether_sprintf(enaddr)); 408 409 /* 410 * Enable chip function. 411 */ 412 CSR_WRITE_2(sc, CIDR, 1); 413 414 /* 415 * Map and establish our interrupt. 416 */ 417 if (pci_intr_map(pa, &ih)) { 418 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 419 return; 420 } 421 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 422 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, kse_intr, sc, 423 device_xname(self)); 424 if (sc->sc_ih == NULL) { 425 aprint_error_dev(sc->sc_dev, "unable to establish interrupt"); 426 if (intrstr != NULL) 427 aprint_error(" at %s", intrstr); 428 aprint_error("\n"); 429 return; 430 } 431 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 432 433 /* 434 * Allocate the control data structures, and create and load the 435 * DMA map for it. 436 */ 437 error = bus_dmamem_alloc(sc->sc_dmat, 438 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); 439 if (error != 0) { 440 aprint_error_dev(sc->sc_dev, 441 "unable to allocate control data, error = %d\n", error); 442 goto fail_0; 443 } 444 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 445 sizeof(struct kse_control_data), (void **)&sc->sc_control_data, 446 BUS_DMA_COHERENT); 447 if (error != 0) { 448 aprint_error_dev(sc->sc_dev, 449 "unable to map control data, error = %d\n", error); 450 goto fail_1; 451 } 452 error = bus_dmamap_create(sc->sc_dmat, 453 sizeof(struct kse_control_data), 1, 454 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap); 455 if (error != 0) { 456 aprint_error_dev(sc->sc_dev, 457 "unable to create control data DMA map, " 458 "error = %d\n", error); 459 goto fail_2; 460 } 461 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 462 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0); 463 if (error != 0) { 464 aprint_error_dev(sc->sc_dev, 465 "unable to load control data DMA map, error = %d\n", 466 error); 467 goto fail_3; 468 } 469 for (i = 0; i < KSE_TXQUEUELEN; i++) { 470 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 471 KSE_NTXSEGS, MCLBYTES, 0, 0, 472 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 473 aprint_error_dev(sc->sc_dev, 474 "unable to create tx DMA map %d, error = %d\n", 475 i, error); 476 goto fail_4; 477 } 478 } 479 for (i = 0; i < KSE_NRXDESC; i++) { 480 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 481 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 482 aprint_error_dev(sc->sc_dev, 483 "unable to create rx DMA map %d, error = %d\n", 484 i, error); 485 goto fail_5; 486 } 487 sc->sc_rxsoft[i].rxs_mbuf = NULL; 488 } 489 490 callout_init(&sc->sc_callout, 0); 491 callout_init(&sc->sc_stat_ch, 0); 492 493 /* Initialize ifmedia structures. */ 494 ifm = &sc->sc_media; 495 sc->sc_ethercom.ec_ifmedia = ifm; 496 if (sc->sc_chip == 0x8841) { 497 ifmedia_init(ifm, 0, ifmedia_upd, ifmedia_sts); 498 ifmedia_add(ifm, IFM_ETHER | IFM_10_T, 0, NULL); 499 ifmedia_add(ifm, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 500 ifmedia_add(ifm, IFM_ETHER | IFM_100_TX, 0, NULL); 501 ifmedia_add(ifm, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 502 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL); 503 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); 504 } else { 505 ifmedia_init(ifm, 0, ifmedia2_upd, ifmedia2_sts); 506 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL); 507 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); 508 } 509 510 printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n", 511 device_xname(sc->sc_dev)); 512 513 ifp = &sc->sc_ethercom.ec_if; 514 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 515 ifp->if_softc = sc; 516 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 517 ifp->if_ioctl = kse_ioctl; 518 ifp->if_start = kse_start; 519 ifp->if_watchdog = kse_watchdog; 520 ifp->if_init = kse_init; 521 ifp->if_stop = kse_stop; 522 IFQ_SET_READY(&ifp->if_snd); 523 524 /* 525 * KSZ8842 can handle 802.1Q VLAN-sized frames, 526 * can do IPv4, TCPv4, and UDPv4 checksums in hardware. 527 */ 528 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 529 ifp->if_capabilities |= 530 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 531 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 532 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 533 534 if_attach(ifp); 535 ether_ifattach(ifp, enaddr); 536 537 #ifdef KSE_EVENT_COUNTERS 538 int p = (sc->sc_chip == 0x8842) ? 3 : 1; 539 for (i = 0; i < p; i++) { 540 struct ksext *ee = &sc->sc_ext; 541 snprintf(ee->evcntname[i], sizeof(ee->evcntname[i]), 542 "%s.%d", device_xname(sc->sc_dev), i+1); 543 evcnt_attach_dynamic(&ee->pev[i][0], EVCNT_TYPE_MISC, 544 NULL, ee->evcntname[i], "RxLoPriotyByte"); 545 evcnt_attach_dynamic(&ee->pev[i][1], EVCNT_TYPE_MISC, 546 NULL, ee->evcntname[i], "RxHiPriotyByte"); 547 evcnt_attach_dynamic(&ee->pev[i][2], EVCNT_TYPE_MISC, 548 NULL, ee->evcntname[i], "RxUndersizePkt"); 549 evcnt_attach_dynamic(&ee->pev[i][3], EVCNT_TYPE_MISC, 550 NULL, ee->evcntname[i], "RxFragments"); 551 evcnt_attach_dynamic(&ee->pev[i][4], EVCNT_TYPE_MISC, 552 NULL, ee->evcntname[i], "RxOversize"); 553 evcnt_attach_dynamic(&ee->pev[i][5], EVCNT_TYPE_MISC, 554 NULL, ee->evcntname[i], "RxJabbers"); 555 evcnt_attach_dynamic(&ee->pev[i][6], EVCNT_TYPE_MISC, 556 NULL, ee->evcntname[i], "RxSymbolError"); 557 evcnt_attach_dynamic(&ee->pev[i][7], EVCNT_TYPE_MISC, 558 NULL, ee->evcntname[i], "RxCRCError"); 559 evcnt_attach_dynamic(&ee->pev[i][8], EVCNT_TYPE_MISC, 560 NULL, ee->evcntname[i], "RxAlignmentError"); 561 evcnt_attach_dynamic(&ee->pev[i][9], EVCNT_TYPE_MISC, 562 NULL, ee->evcntname[i], "RxControl8808Pkts"); 563 evcnt_attach_dynamic(&ee->pev[i][10], EVCNT_TYPE_MISC, 564 NULL, ee->evcntname[i], "RxPausePkts"); 565 evcnt_attach_dynamic(&ee->pev[i][11], EVCNT_TYPE_MISC, 566 NULL, ee->evcntname[i], "RxBroadcast"); 567 evcnt_attach_dynamic(&ee->pev[i][12], EVCNT_TYPE_MISC, 568 NULL, ee->evcntname[i], "RxMulticast"); 569 evcnt_attach_dynamic(&ee->pev[i][13], EVCNT_TYPE_MISC, 570 NULL, ee->evcntname[i], "RxUnicast"); 571 evcnt_attach_dynamic(&ee->pev[i][14], EVCNT_TYPE_MISC, 572 NULL, ee->evcntname[i], "Rx64Octets"); 573 evcnt_attach_dynamic(&ee->pev[i][15], EVCNT_TYPE_MISC, 574 NULL, ee->evcntname[i], "Rx65To127Octets"); 575 evcnt_attach_dynamic(&ee->pev[i][16], EVCNT_TYPE_MISC, 576 NULL, ee->evcntname[i], "Rx128To255Octets"); 577 evcnt_attach_dynamic(&ee->pev[i][17], EVCNT_TYPE_MISC, 578 NULL, ee->evcntname[i], "Rx255To511Octets"); 579 evcnt_attach_dynamic(&ee->pev[i][18], EVCNT_TYPE_MISC, 580 NULL, ee->evcntname[i], "Rx512To1023Octets"); 581 evcnt_attach_dynamic(&ee->pev[i][19], EVCNT_TYPE_MISC, 582 NULL, ee->evcntname[i], "Rx1024To1522Octets"); 583 evcnt_attach_dynamic(&ee->pev[i][20], EVCNT_TYPE_MISC, 584 NULL, ee->evcntname[i], "TxLoPriotyByte"); 585 evcnt_attach_dynamic(&ee->pev[i][21], EVCNT_TYPE_MISC, 586 NULL, ee->evcntname[i], "TxHiPriotyByte"); 587 evcnt_attach_dynamic(&ee->pev[i][22], EVCNT_TYPE_MISC, 588 NULL, ee->evcntname[i], "TxLateCollision"); 589 evcnt_attach_dynamic(&ee->pev[i][23], EVCNT_TYPE_MISC, 590 NULL, ee->evcntname[i], "TxPausePkts"); 591 evcnt_attach_dynamic(&ee->pev[i][24], EVCNT_TYPE_MISC, 592 NULL, ee->evcntname[i], "TxBroadcastPkts"); 593 evcnt_attach_dynamic(&ee->pev[i][25], EVCNT_TYPE_MISC, 594 NULL, ee->evcntname[i], "TxMulticastPkts"); 595 evcnt_attach_dynamic(&ee->pev[i][26], EVCNT_TYPE_MISC, 596 NULL, ee->evcntname[i], "TxUnicastPkts"); 597 evcnt_attach_dynamic(&ee->pev[i][27], EVCNT_TYPE_MISC, 598 NULL, ee->evcntname[i], "TxDeferred"); 599 evcnt_attach_dynamic(&ee->pev[i][28], EVCNT_TYPE_MISC, 600 NULL, ee->evcntname[i], "TxTotalCollision"); 601 evcnt_attach_dynamic(&ee->pev[i][29], EVCNT_TYPE_MISC, 602 NULL, ee->evcntname[i], "TxExcessiveCollision"); 603 evcnt_attach_dynamic(&ee->pev[i][30], EVCNT_TYPE_MISC, 604 NULL, ee->evcntname[i], "TxSingleCollision"); 605 evcnt_attach_dynamic(&ee->pev[i][31], EVCNT_TYPE_MISC, 606 NULL, ee->evcntname[i], "TxMultipleCollision"); 607 evcnt_attach_dynamic(&ee->pev[i][32], EVCNT_TYPE_MISC, 608 NULL, ee->evcntname[i], "TxDropPkts"); 609 evcnt_attach_dynamic(&ee->pev[i][33], EVCNT_TYPE_MISC, 610 NULL, ee->evcntname[i], "RxDropPkts"); 611 } 612 #endif 613 return; 614 615 fail_5: 616 for (i = 0; i < KSE_NRXDESC; i++) { 617 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 618 bus_dmamap_destroy(sc->sc_dmat, 619 sc->sc_rxsoft[i].rxs_dmamap); 620 } 621 fail_4: 622 for (i = 0; i < KSE_TXQUEUELEN; i++) { 623 if (sc->sc_txsoft[i].txs_dmamap != NULL) 624 bus_dmamap_destroy(sc->sc_dmat, 625 sc->sc_txsoft[i].txs_dmamap); 626 } 627 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 628 fail_3: 629 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 630 fail_2: 631 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 632 sizeof(struct kse_control_data)); 633 fail_1: 634 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 635 fail_0: 636 return; 637 } 638 639 static int 640 kse_ioctl(struct ifnet *ifp, u_long cmd, void *data) 641 { 642 struct kse_softc *sc = ifp->if_softc; 643 int s, error; 644 645 s = splnet(); 646 647 switch (cmd) { 648 default: 649 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 650 break; 651 652 error = 0; 653 654 if (cmd == SIOCSIFCAP) 655 error = (*ifp->if_init)(ifp); 656 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 657 ; 658 else if (ifp->if_flags & IFF_RUNNING) { 659 /* 660 * Multicast list has changed; set the hardware filter 661 * accordingly. 662 */ 663 kse_set_filter(sc); 664 } 665 break; 666 } 667 668 kse_start(ifp); 669 670 splx(s); 671 return error; 672 } 673 674 static int 675 kse_init(struct ifnet *ifp) 676 { 677 struct kse_softc *sc = ifp->if_softc; 678 uint32_t paddr; 679 int i, error = 0; 680 681 /* cancel pending I/O */ 682 kse_stop(ifp, 0); 683 684 /* reset all registers but PCI configuration */ 685 kse_reset(sc); 686 687 /* craft Tx descriptor ring */ 688 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 689 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) { 690 sc->sc_txdescs[i].t3 = paddr; 691 paddr += sizeof(struct tdes); 692 } 693 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0); 694 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC, 695 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 696 sc->sc_txfree = KSE_NTXDESC; 697 sc->sc_txnext = 0; 698 699 for (i = 0; i < KSE_TXQUEUELEN; i++) 700 sc->sc_txsoft[i].txs_mbuf = NULL; 701 sc->sc_txsfree = KSE_TXQUEUELEN; 702 sc->sc_txsnext = 0; 703 sc->sc_txsdirty = 0; 704 705 /* craft Rx descriptor ring */ 706 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 707 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) { 708 sc->sc_rxdescs[i].r3 = paddr; 709 paddr += sizeof(struct rdes); 710 } 711 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0); 712 for (i = 0; i < KSE_NRXDESC; i++) { 713 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { 714 if ((error = add_rxbuf(sc, i)) != 0) { 715 printf("%s: unable to allocate or map rx " 716 "buffer %d, error = %d\n", 717 device_xname(sc->sc_dev), i, error); 718 rxdrain(sc); 719 goto out; 720 } 721 } 722 else 723 KSE_INIT_RXDESC(sc, i); 724 } 725 sc->sc_rxptr = 0; 726 727 /* hand Tx/Rx rings to HW */ 728 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0)); 729 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0)); 730 731 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC | TXC_FCE; 732 sc->sc_rxc = RXC_REN | RXC_RU | RXC_FCE; 733 if (ifp->if_flags & IFF_PROMISC) 734 sc->sc_rxc |= RXC_RA; 735 if (ifp->if_flags & IFF_BROADCAST) 736 sc->sc_rxc |= RXC_RB; 737 sc->sc_t1csum = sc->sc_mcsum = 0; 738 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) { 739 sc->sc_rxc |= RXC_ICC; 740 sc->sc_mcsum |= M_CSUM_IPv4; 741 } 742 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) { 743 sc->sc_txc |= TXC_ICG; 744 sc->sc_t1csum |= T1_IPCKG; 745 } 746 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) { 747 sc->sc_rxc |= RXC_TCC; 748 sc->sc_mcsum |= M_CSUM_TCPv4; 749 } 750 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) { 751 sc->sc_txc |= TXC_TCG; 752 sc->sc_t1csum |= T1_TCPCKG; 753 } 754 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) { 755 sc->sc_rxc |= RXC_UCC; 756 sc->sc_mcsum |= M_CSUM_UDPv4; 757 } 758 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) { 759 sc->sc_txc |= TXC_UCG; 760 sc->sc_t1csum |= T1_UDPCKG; 761 } 762 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT); 763 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT); 764 765 /* build multicast hash filter if necessary */ 766 kse_set_filter(sc); 767 768 /* set current media */ 769 (void)ifmedia_upd(ifp); 770 771 /* enable transmitter and receiver */ 772 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 773 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 774 CSR_WRITE_4(sc, MDRSC, 1); 775 776 /* enable interrupts */ 777 sc->sc_inten = INT_DMTS | INT_DMRS | INT_DMRBUS; 778 if (sc->sc_chip == 0x8841) 779 sc->sc_inten |= INT_DMLCS; 780 CSR_WRITE_4(sc, INTST, ~0); 781 CSR_WRITE_4(sc, INTEN, sc->sc_inten); 782 783 ifp->if_flags |= IFF_RUNNING; 784 ifp->if_flags &= ~IFF_OACTIVE; 785 786 if (sc->sc_chip == 0x8841) { 787 /* start one second timer */ 788 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 789 } 790 #ifdef KSE_EVENT_COUNTERS 791 /* start statistics gather 1 minute timer */ 792 zerostats(sc); 793 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, sc); 794 #endif 795 796 out: 797 if (error) { 798 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 799 ifp->if_timer = 0; 800 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 801 } 802 return error; 803 } 804 805 static void 806 kse_stop(struct ifnet *ifp, int disable) 807 { 808 struct kse_softc *sc = ifp->if_softc; 809 struct kse_txsoft *txs; 810 int i; 811 812 if (sc->sc_chip == 0x8841) 813 callout_stop(&sc->sc_callout); 814 callout_stop(&sc->sc_stat_ch); 815 816 sc->sc_txc &= ~TXC_TEN; 817 sc->sc_rxc &= ~RXC_REN; 818 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 819 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 820 821 for (i = 0; i < KSE_TXQUEUELEN; i++) { 822 txs = &sc->sc_txsoft[i]; 823 if (txs->txs_mbuf != NULL) { 824 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 825 m_freem(txs->txs_mbuf); 826 txs->txs_mbuf = NULL; 827 } 828 } 829 830 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 831 ifp->if_timer = 0; 832 833 if (disable) 834 rxdrain(sc); 835 } 836 837 static void 838 kse_reset(struct kse_softc *sc) 839 { 840 841 CSR_WRITE_2(sc, GRR, 1); 842 delay(1000); /* PDF does not mention the delay amount */ 843 CSR_WRITE_2(sc, GRR, 0); 844 845 CSR_WRITE_2(sc, CIDR, 1); 846 } 847 848 static void 849 kse_watchdog(struct ifnet *ifp) 850 { 851 struct kse_softc *sc = ifp->if_softc; 852 853 /* 854 * Since we're not interrupting every packet, sweep 855 * up before we report an error. 856 */ 857 txreap(sc); 858 859 if (sc->sc_txfree != KSE_NTXDESC) { 860 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n", 861 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 862 sc->sc_txnext); 863 ifp->if_oerrors++; 864 865 /* Reset the interface. */ 866 kse_init(ifp); 867 } 868 else if (ifp->if_flags & IFF_DEBUG) 869 printf("%s: recovered from device timeout\n", 870 device_xname(sc->sc_dev)); 871 872 /* Try to get more packets going. */ 873 kse_start(ifp); 874 } 875 876 static void 877 kse_start(struct ifnet *ifp) 878 { 879 struct kse_softc *sc = ifp->if_softc; 880 struct mbuf *m0, *m; 881 struct kse_txsoft *txs; 882 bus_dmamap_t dmamap; 883 int error, nexttx, lasttx, ofree, seg; 884 uint32_t tdes0; 885 886 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 887 return; 888 889 /* Remember the previous number of free descriptors. */ 890 ofree = sc->sc_txfree; 891 892 /* 893 * Loop through the send queue, setting up transmit descriptors 894 * until we drain the queue, or use up all available transmit 895 * descriptors. 896 */ 897 for (;;) { 898 IFQ_POLL(&ifp->if_snd, m0); 899 if (m0 == NULL) 900 break; 901 902 if (sc->sc_txsfree < KSE_TXQUEUE_GC) { 903 txreap(sc); 904 if (sc->sc_txsfree == 0) 905 break; 906 } 907 txs = &sc->sc_txsoft[sc->sc_txsnext]; 908 dmamap = txs->txs_dmamap; 909 910 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 911 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 912 if (error) { 913 if (error == EFBIG) { 914 printf("%s: Tx packet consumes too many " 915 "DMA segments, dropping...\n", 916 device_xname(sc->sc_dev)); 917 IFQ_DEQUEUE(&ifp->if_snd, m0); 918 m_freem(m0); 919 continue; 920 } 921 /* Short on resources, just stop for now. */ 922 break; 923 } 924 925 if (dmamap->dm_nsegs > sc->sc_txfree) { 926 /* 927 * Not enough free descriptors to transmit this 928 * packet. We haven't committed anything yet, 929 * so just unload the DMA map, put the packet 930 * back on the queue, and punt. Notify the upper 931 * layer that there are not more slots left. 932 */ 933 ifp->if_flags |= IFF_OACTIVE; 934 bus_dmamap_unload(sc->sc_dmat, dmamap); 935 break; 936 } 937 938 IFQ_DEQUEUE(&ifp->if_snd, m0); 939 940 /* 941 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 942 */ 943 944 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 945 BUS_DMASYNC_PREWRITE); 946 947 lasttx = -1; tdes0 = 0; 948 for (nexttx = sc->sc_txnext, seg = 0; 949 seg < dmamap->dm_nsegs; 950 seg++, nexttx = KSE_NEXTTX(nexttx)) { 951 struct tdes *tdes = &sc->sc_txdescs[nexttx]; 952 /* 953 * If this is the first descriptor we're 954 * enqueueing, don't set the OWN bit just 955 * yet. That could cause a race condition. 956 * We'll do it below. 957 */ 958 tdes->t2 = dmamap->dm_segs[seg].ds_addr; 959 tdes->t1 = sc->sc_t1csum 960 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK); 961 tdes->t0 = tdes0; 962 tdes0 |= T0_OWN; 963 lasttx = nexttx; 964 } 965 966 /* 967 * Outgoing NFS mbuf must be unloaded when Tx completed. 968 * Without T1_IC NFS mbuf is left unack'ed for excessive 969 * time and NFS stops to proceed until kse_watchdog() 970 * calls txreap() to reclaim the unack'ed mbuf. 971 * It's painful to traverse every mbuf chain to determine 972 * whether someone is waiting for Tx completion. 973 */ 974 m = m0; 975 do { 976 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) { 977 sc->sc_txdescs[lasttx].t1 |= T1_IC; 978 break; 979 } 980 } while ((m = m->m_next) != NULL); 981 982 /* Write last T0_OWN bit of the 1st segment */ 983 sc->sc_txdescs[lasttx].t1 |= T1_LS; 984 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS; 985 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN; 986 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 987 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 988 989 /* Tell DMA start transmit */ 990 CSR_WRITE_4(sc, MDTSC, 1); 991 992 txs->txs_mbuf = m0; 993 txs->txs_firstdesc = sc->sc_txnext; 994 txs->txs_lastdesc = lasttx; 995 txs->txs_ndesc = dmamap->dm_nsegs; 996 997 sc->sc_txfree -= txs->txs_ndesc; 998 sc->sc_txnext = nexttx; 999 sc->sc_txsfree--; 1000 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext); 1001 /* 1002 * Pass the packet to any BPF listeners. 1003 */ 1004 bpf_mtap(ifp, m0, BPF_D_OUT); 1005 } 1006 1007 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1008 /* No more slots left; notify upper layer. */ 1009 ifp->if_flags |= IFF_OACTIVE; 1010 } 1011 if (sc->sc_txfree != ofree) { 1012 /* Set a watchdog timer in case the chip flakes out. */ 1013 ifp->if_timer = 5; 1014 } 1015 } 1016 1017 static void 1018 kse_set_filter(struct kse_softc *sc) 1019 { 1020 struct ether_multistep step; 1021 struct ether_multi *enm; 1022 struct ethercom *ec = &sc->sc_ethercom; 1023 struct ifnet *ifp = &ec->ec_if; 1024 uint32_t h, hashes[2]; 1025 1026 sc->sc_rxc &= ~(RXC_MHTE | RXC_RM); 1027 ifp->if_flags &= ~IFF_ALLMULTI; 1028 if (ifp->if_flags & IFF_PROMISC) 1029 return; 1030 1031 ETHER_LOCK(ec); 1032 ETHER_FIRST_MULTI(step, ec, enm); 1033 if (enm == NULL) { 1034 ETHER_UNLOCK(ec); 1035 return; 1036 } 1037 hashes[0] = hashes[1] = 0; 1038 do { 1039 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1040 /* 1041 * We must listen to a range of multicast addresses. 1042 * For now, just accept all multicasts, rather than 1043 * trying to set only those filter bits needed to match 1044 * the range. (At this time, the only use of address 1045 * ranges is for IP multicast routing, for which the 1046 * range is big enough to require all bits set.) 1047 */ 1048 ETHER_UNLOCK(ec); 1049 goto allmulti; 1050 } 1051 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26; 1052 hashes[h >> 5] |= 1 << (h & 0x1f); 1053 ETHER_NEXT_MULTI(step, enm); 1054 } while (enm != NULL); 1055 ETHER_UNLOCK(ec); 1056 sc->sc_rxc |= RXC_MHTE; 1057 CSR_WRITE_4(sc, MTR0, hashes[0]); 1058 CSR_WRITE_4(sc, MTR1, hashes[1]); 1059 return; 1060 allmulti: 1061 sc->sc_rxc |= RXC_RM; 1062 ifp->if_flags |= IFF_ALLMULTI; 1063 } 1064 1065 static int 1066 add_rxbuf(struct kse_softc *sc, int idx) 1067 { 1068 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1069 struct mbuf *m; 1070 int error; 1071 1072 MGETHDR(m, M_DONTWAIT, MT_DATA); 1073 if (m == NULL) 1074 return ENOBUFS; 1075 1076 MCLGET(m, M_DONTWAIT); 1077 if ((m->m_flags & M_EXT) == 0) { 1078 m_freem(m); 1079 return ENOBUFS; 1080 } 1081 1082 if (rxs->rxs_mbuf != NULL) 1083 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1084 1085 rxs->rxs_mbuf = m; 1086 1087 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1088 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1089 if (error) { 1090 printf("%s: can't load rx DMA map %d, error = %d\n", 1091 device_xname(sc->sc_dev), idx, error); 1092 panic("kse_add_rxbuf"); 1093 } 1094 1095 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1096 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1097 1098 KSE_INIT_RXDESC(sc, idx); 1099 1100 return 0; 1101 } 1102 1103 static void 1104 rxdrain(struct kse_softc *sc) 1105 { 1106 struct kse_rxsoft *rxs; 1107 int i; 1108 1109 for (i = 0; i < KSE_NRXDESC; i++) { 1110 rxs = &sc->sc_rxsoft[i]; 1111 if (rxs->rxs_mbuf != NULL) { 1112 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1113 m_freem(rxs->rxs_mbuf); 1114 rxs->rxs_mbuf = NULL; 1115 } 1116 } 1117 } 1118 1119 static int 1120 kse_intr(void *arg) 1121 { 1122 struct kse_softc *sc = arg; 1123 uint32_t isr; 1124 1125 if ((isr = CSR_READ_4(sc, INTST)) == 0) 1126 return 0; 1127 1128 if (isr & INT_DMRS) 1129 rxintr(sc); 1130 if (isr & INT_DMTS) 1131 txreap(sc); 1132 if (isr & INT_DMLCS) 1133 lnkchg(sc); 1134 if (isr & INT_DMRBUS) 1135 printf("%s: Rx descriptor full\n", device_xname(sc->sc_dev)); 1136 1137 CSR_WRITE_4(sc, INTST, isr); 1138 return 1; 1139 } 1140 1141 static void 1142 rxintr(struct kse_softc *sc) 1143 { 1144 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1145 struct kse_rxsoft *rxs; 1146 struct mbuf *m; 1147 uint32_t rxstat; 1148 int i, len; 1149 1150 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) { 1151 rxs = &sc->sc_rxsoft[i]; 1152 1153 KSE_CDRXSYNC(sc, i, 1154 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1155 1156 rxstat = sc->sc_rxdescs[i].r0; 1157 1158 if (rxstat & R0_OWN) /* desc is left empty */ 1159 break; 1160 1161 /* R0_FS | R0_LS must have been marked for this desc */ 1162 1163 if (rxstat & R0_ES) { 1164 ifp->if_ierrors++; 1165 #define PRINTERR(bit, str) \ 1166 if (rxstat & (bit)) \ 1167 printf("%s: receive error: %s\n", \ 1168 device_xname(sc->sc_dev), str) 1169 PRINTERR(R0_TL, "frame too long"); 1170 PRINTERR(R0_RF, "runt frame"); 1171 PRINTERR(R0_CE, "bad FCS"); 1172 #undef PRINTERR 1173 KSE_INIT_RXDESC(sc, i); 1174 continue; 1175 } 1176 1177 /* HW errata; frame might be too small or too large */ 1178 1179 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1180 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1181 1182 len = rxstat & R0_FL_MASK; 1183 len -= ETHER_CRC_LEN; /* Trim CRC off */ 1184 m = rxs->rxs_mbuf; 1185 1186 if (add_rxbuf(sc, i) != 0) { 1187 ifp->if_ierrors++; 1188 KSE_INIT_RXDESC(sc, i); 1189 bus_dmamap_sync(sc->sc_dmat, 1190 rxs->rxs_dmamap, 0, 1191 rxs->rxs_dmamap->dm_mapsize, 1192 BUS_DMASYNC_PREREAD); 1193 continue; 1194 } 1195 1196 m_set_rcvif(m, ifp); 1197 m->m_pkthdr.len = m->m_len = len; 1198 1199 if (sc->sc_mcsum) { 1200 m->m_pkthdr.csum_flags |= sc->sc_mcsum; 1201 if (rxstat & R0_IPE) 1202 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1203 if (rxstat & (R0_TCPE | R0_UDPE)) 1204 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1205 } 1206 if_percpuq_enqueue(ifp->if_percpuq, m); 1207 #ifdef KSEDIAGNOSTIC 1208 if (kse_monitor_rxintr > 0) { 1209 printf("m stat %x data %p len %d\n", 1210 rxstat, m->m_data, m->m_len); 1211 } 1212 #endif 1213 } 1214 sc->sc_rxptr = i; 1215 } 1216 1217 static void 1218 txreap(struct kse_softc *sc) 1219 { 1220 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1221 struct kse_txsoft *txs; 1222 uint32_t txstat; 1223 int i; 1224 1225 ifp->if_flags &= ~IFF_OACTIVE; 1226 1227 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN; 1228 i = KSE_NEXTTXS(i), sc->sc_txsfree++) { 1229 txs = &sc->sc_txsoft[i]; 1230 1231 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 1232 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1233 1234 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0; 1235 1236 if (txstat & T0_OWN) /* desc is still in use */ 1237 break; 1238 1239 /* There is no way to tell transmission status per frame */ 1240 1241 ifp->if_opackets++; 1242 1243 sc->sc_txfree += txs->txs_ndesc; 1244 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1245 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1246 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1247 m_freem(txs->txs_mbuf); 1248 txs->txs_mbuf = NULL; 1249 } 1250 sc->sc_txsdirty = i; 1251 if (sc->sc_txsfree == KSE_TXQUEUELEN) 1252 ifp->if_timer = 0; 1253 } 1254 1255 static void 1256 lnkchg(struct kse_softc *sc) 1257 { 1258 struct ifmediareq ifmr; 1259 1260 #if 0 /* rambling link status */ 1261 printf("%s: link %s\n", device_xname(sc->sc_dev), 1262 (CSR_READ_2(sc, P1SR) & (1U << 5)) ? "up" : "down"); 1263 #endif 1264 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1265 } 1266 1267 static int 1268 ifmedia_upd(struct ifnet *ifp) 1269 { 1270 struct kse_softc *sc = ifp->if_softc; 1271 struct ifmedia *ifm = &sc->sc_media; 1272 uint16_t ctl; 1273 1274 ctl = 0; 1275 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1276 ctl |= (1U << 13); /* Restart AN */ 1277 ctl |= (1U << 7); /* Enable AN */ 1278 ctl |= (1U << 4); /* Advertise flow control pause */ 1279 ctl |= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0); 1280 } 1281 else { 1282 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) 1283 ctl |= (1U << 6); 1284 if (ifm->ifm_media & IFM_FDX) 1285 ctl |= (1U << 5); 1286 } 1287 CSR_WRITE_2(sc, P1CR4, ctl); 1288 1289 sc->sc_media_active = IFM_NONE; 1290 sc->sc_media_status = IFM_AVALID; 1291 1292 return 0; 1293 } 1294 1295 static void 1296 ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1297 { 1298 struct kse_softc *sc = ifp->if_softc; 1299 struct ifmedia *ifm = &sc->sc_media; 1300 uint16_t ctl, sts, result; 1301 1302 ifmr->ifm_status = IFM_AVALID; 1303 ifmr->ifm_active = IFM_ETHER; 1304 1305 ctl = CSR_READ_2(sc, P1CR4); 1306 sts = CSR_READ_2(sc, P1SR); 1307 if ((sts & (1U << 5)) == 0) { 1308 ifmr->ifm_active |= IFM_NONE; 1309 goto out; /* Link is down */ 1310 } 1311 ifmr->ifm_status |= IFM_ACTIVE; 1312 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1313 if ((sts & (1U << 6)) == 0) { 1314 ifmr->ifm_active |= IFM_NONE; 1315 goto out; /* Negotiation in progress */ 1316 } 1317 result = ctl & sts & 017; 1318 if (result & (1U << 3)) 1319 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1320 else if (result & (1U << 2)) 1321 ifmr->ifm_active |= IFM_100_TX | IFM_HDX; 1322 else if (result & (1U << 1)) 1323 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 1324 else if (result & (1U << 0)) 1325 ifmr->ifm_active |= IFM_10_T | IFM_HDX; 1326 else 1327 ifmr->ifm_active |= IFM_NONE; 1328 if (ctl & (1U << 4)) 1329 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1330 if (sts & (1U << 4)) 1331 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1332 } 1333 else { 1334 ifmr->ifm_active |= (sts & (1U << 10)) ? IFM_100_TX : IFM_10_T; 1335 if (sts & (1U << 9)) 1336 ifmr->ifm_active |= IFM_FDX; 1337 if (sts & (1U << 12)) 1338 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1339 if (sts & (1U << 11)) 1340 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1341 } 1342 1343 out: 1344 sc->sc_media_status = ifmr->ifm_status; 1345 sc->sc_media_active = ifmr->ifm_active; 1346 } 1347 1348 static void 1349 phy_tick(void *arg) 1350 { 1351 struct kse_softc *sc = arg; 1352 struct ifmediareq ifmr; 1353 int s; 1354 1355 s = splnet(); 1356 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1357 splx(s); 1358 1359 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 1360 } 1361 1362 static int 1363 ifmedia2_upd(struct ifnet *ifp) 1364 { 1365 struct kse_softc *sc = ifp->if_softc; 1366 1367 sc->sc_media_status = IFM_AVALID; 1368 sc->sc_media_active = IFM_NONE; 1369 return 0; 1370 } 1371 1372 static void 1373 ifmedia2_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1374 { 1375 struct kse_softc *sc = ifp->if_softc; 1376 int p1sts, p2sts; 1377 1378 ifmr->ifm_status = IFM_AVALID; 1379 ifmr->ifm_active = IFM_ETHER; 1380 p1sts = CSR_READ_2(sc, P1SR); 1381 p2sts = CSR_READ_2(sc, P2SR); 1382 if (((p1sts | p2sts) & (1U << 5)) == 0) 1383 ifmr->ifm_active |= IFM_NONE; 1384 else { 1385 ifmr->ifm_status |= IFM_ACTIVE; 1386 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 1387 ifmr->ifm_active |= IFM_FLOW 1388 | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE; 1389 } 1390 sc->sc_media_status = ifmr->ifm_status; 1391 sc->sc_media_active = ifmr->ifm_active; 1392 } 1393 1394 #ifdef KSE_EVENT_COUNTERS 1395 static void 1396 stat_tick(void *arg) 1397 { 1398 struct kse_softc *sc = arg; 1399 struct ksext *ee = &sc->sc_ext; 1400 int nport, p, i, val; 1401 1402 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1403 for (p = 0; p < nport; p++) { 1404 for (i = 0; i < 32; i++) { 1405 val = 0x1c00 | (p * 0x20 + i); 1406 CSR_WRITE_2(sc, IACR, val); 1407 do { 1408 val = CSR_READ_2(sc, IADR5) << 16; 1409 } while ((val & (1U << 30)) == 0); 1410 if (val & (1U << 31)) { 1411 (void)CSR_READ_2(sc, IADR4); 1412 val = 0x3fffffff; /* has made overflow */ 1413 } 1414 else { 1415 val &= 0x3fff0000; /* 29:16 */ 1416 val |= CSR_READ_2(sc, IADR4); /* 15:0 */ 1417 } 1418 ee->pev[p][i].ev_count += val; /* i (0-31) */ 1419 } 1420 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p); 1421 ee->pev[p][32].ev_count = CSR_READ_2(sc, IADR4); /* 32 */ 1422 CSR_WRITE_2(sc, IACR, 0x1c00 + 0x100 + p * 3 + 1); 1423 ee->pev[p][33].ev_count = CSR_READ_2(sc, IADR4); /* 33 */ 1424 } 1425 callout_reset(&sc->sc_stat_ch, hz * 60, stat_tick, arg); 1426 } 1427 1428 static void 1429 zerostats(struct kse_softc *sc) 1430 { 1431 struct ksext *ee = &sc->sc_ext; 1432 int nport, p, i, val; 1433 1434 /* Make sure all the HW counters get zero */ 1435 nport = (sc->sc_chip == 0x8842) ? 3 : 1; 1436 for (p = 0; p < nport; p++) { 1437 for (i = 0; i < 31; i++) { 1438 val = 0x1c00 | (p * 0x20 + i); 1439 CSR_WRITE_2(sc, IACR, val); 1440 do { 1441 val = CSR_READ_2(sc, IADR5) << 16; 1442 } while ((val & (1U << 30)) == 0); 1443 (void)CSR_READ_2(sc, IADR4); 1444 ee->pev[p][i].ev_count = 0; 1445 } 1446 } 1447 } 1448 #endif 1449