1 /* $NetBSD: if_kse.c,v 1.2 2006/09/29 08:49:30 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Tohru Nishimura 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Tohru Nishimura. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.2 2006/09/29 08:49:30 tsutsui Exp $"); 34 35 #include "bpfilter.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/callout.h> 40 #include <sys/mbuf.h> 41 #include <sys/malloc.h> 42 #include <sys/kernel.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/device.h> 46 #include <sys/queue.h> 47 48 #include <machine/endian.h> 49 #include <machine/bus.h> 50 #include <machine/intr.h> 51 52 #include <net/if.h> 53 #include <net/if_media.h> 54 #include <net/if_dl.h> 55 #include <net/if_ether.h> 56 57 #if NBPFILTER > 0 58 #include <net/bpf.h> 59 #endif 60 61 #include <dev/pci/pcivar.h> 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcidevs.h> 64 65 #define CSR_READ_4(sc, off) \ 66 bus_space_read_4(sc->sc_st, sc->sc_sh, off) 67 #define CSR_WRITE_4(sc, off, val) \ 68 bus_space_write_4(sc->sc_st, sc->sc_sh, off, val) 69 #define CSR_READ_2(sc, off) \ 70 bus_space_read_2(sc->sc_st, sc->sc_sh, off) 71 #define CSR_WRITE_2(sc, off, val) \ 72 bus_space_write_2(sc->sc_st, sc->sc_sh, off, val) 73 74 #define MDTXC 0x000 /* DMA transmit control */ 75 #define MDRXC 0x004 /* DMA receive control */ 76 #define MDTSC 0x008 /* DMA transmit start */ 77 #define MDRSC 0x00c /* DMA receive start */ 78 #define TDLB 0x010 /* transmit descriptor list base */ 79 #define RDLB 0x014 /* receive descriptor list base */ 80 #define INTEN 0x028 /* interrupt enable */ 81 #define INTST 0x02c /* interrupt status */ 82 #define MARL 0x200 /* MAC address low */ 83 #define MARM 0x202 /* MAC address middle */ 84 #define MARH 0x204 /* MAC address high */ 85 #define GRR 0x216 /* global reset */ 86 #define CIDR 0x400 /* chip ID and enable */ 87 #define CGCR 0x40a /* chip global control */ 88 #define P1CR4 0x512 /* port 1 control 4 */ 89 #define P1SR 0x514 /* port 1 status */ 90 91 #define TXC_BS_MSK 0x3f000000 /* burst size */ 92 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 93 #define TXC_UCG (1U<<18) /* generate UDP checksum */ 94 #define TXC_TCG (1U<<17) /* generate TCP checksum */ 95 #define TXC_ICG (1U<<16) /* generate IP checksum */ 96 #define TXC_FCE (1U<<9) /* enable flowcontrol */ 97 #define TXC_EP (1U<<2) /* enable automatic padding */ 98 #define TXC_AC (1U<<1) /* add CRC to frame */ 99 #define TXC_TEN (1) /* enable DMA to run */ 100 101 #define RXC_BS_MSK 0x3f000000 /* burst size */ 102 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 103 #define RXC_UCG (1U<<18) /* run UDP checksum */ 104 #define RXC_TCG (1U<<17) /* run TDP checksum */ 105 #define RXC_ICG (1U<<16) /* run IP checksum */ 106 #define RXC_FCE (1U<<9) /* enable flowcontrol */ 107 #define RXC_RB (1U<<6) /* receive broadcast frame */ 108 #define RXC_RM (1U<<5) /* receive multicast frame */ 109 #define RXC_RU (1U<<4) /* receive unicast frame */ 110 #define RXC_RE (1U<<3) /* accept error frame */ 111 #define RXC_RA (1U<<2) /* receive all frame */ 112 #define RXC_MA (1U<<1) /* receive through hash filter */ 113 #define RXC_REN (1) /* enable DMA to run */ 114 115 #define INT_DMLCS (1U<<31) /* link status change */ 116 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */ 117 #define INT_DMRS (1U<<29) /* frame was received */ 118 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */ 119 120 #define T0_OWN (1U<<31) /* desc is ready to Tx */ 121 122 #define R0_OWN (1U<<31) /* desc is empty */ 123 #define R0_FS (1U<<30) /* first segment of frame */ 124 #define R0_LS (1U<<29) /* last segment of frame */ 125 #define R0_IPE (1U<<28) /* IP checksum error */ 126 #define R0_TCPE (1U<<27) /* TCP checksum error */ 127 #define R0_UDPE (1U<<26) /* UDP checksum error */ 128 #define R0_ES (1U<<25) /* error summary */ 129 #define R0_MF (1U<<24) /* multicast frame */ 130 #define R0_RE (1U<<19) /* framing error */ 131 #define R0_TL (1U<<18) /* too long frame */ 132 #define R0_RF (1U<<17) /* damaged runt frame */ 133 #define R0_CE (1U<<16) /* CRC error */ 134 #define R0_FT (1U<<15) /* frame type */ 135 #define R0_FL_MASK 0x7ff /* frame length 10:0 */ 136 137 #define T1_IC (1U<<31) /* post interrupt on complete */ 138 #define T1_FS (1U<<30) /* first segment of frame */ 139 #define T1_LS (1U<<29) /* last segment of frame */ 140 #define T1_IPCKG (1U<<28) /* generate IP checksum */ 141 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */ 142 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */ 143 #define T1_TER (1U<<25) /* end of ring */ 144 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */ 145 146 #define R1_RER (1U<<25) /* end of ring */ 147 #define R1_RBS_MASK 0x7ff /* segment size 10:0 */ 148 149 #define KSE_NTXSEGS 16 150 #define KSE_TXQUEUELEN 64 151 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1) 152 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4) 153 #define KSE_NTXDESC 256 154 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1) 155 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK) 156 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK) 157 158 #define KSE_NRXDESC 64 159 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1) 160 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK) 161 162 struct tdes { 163 uint32_t t0, t1, t2, t3; 164 }; 165 166 struct rdes { 167 uint32_t r0, r1, r2, r3; 168 }; 169 170 struct kse_control_data { 171 struct tdes kcd_txdescs[KSE_NTXDESC]; 172 struct rdes kcd_rxdescs[KSE_NRXDESC]; 173 }; 174 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x) 175 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)]) 176 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)]) 177 178 struct kse_txsoft { 179 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 180 bus_dmamap_t txs_dmamap; /* our DMA map */ 181 int txs_firstdesc; /* first descriptor in packet */ 182 int txs_lastdesc; /* last descriptor in packet */ 183 int txs_ndesc; /* # of descriptors used */ 184 }; 185 186 struct kse_rxsoft { 187 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 188 bus_dmamap_t rxs_dmamap; /* our DMA map */ 189 }; 190 191 struct kse_softc { 192 struct device sc_dev; /* generic device information */ 193 bus_space_tag_t sc_st; /* bus space tag */ 194 bus_space_handle_t sc_sh; /* bus space handle */ 195 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 196 struct ethercom sc_ethercom; /* Ethernet common data */ 197 void *sc_ih; /* interrupt cookie */ 198 199 struct ifmedia sc_media; /* ifmedia information */ 200 int sc_media_status; /* PHY */ 201 int sc_media_active; /* PHY */ 202 struct callout sc_callout; /* tick callout */ 203 204 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 205 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 206 207 struct kse_control_data *sc_control_data; 208 #define sc_txdescs sc_control_data->kcd_txdescs 209 #define sc_rxdescs sc_control_data->kcd_rxdescs 210 211 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN]; 212 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC]; 213 int sc_txfree; /* number of free Tx descriptors */ 214 int sc_txnext; /* next ready Tx descriptor */ 215 int sc_txsfree; /* number of free Tx jobs */ 216 int sc_txsnext; /* next ready Tx job */ 217 int sc_txsdirty; /* dirty Tx jobs */ 218 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 219 220 uint32_t sc_txc, sc_rxc; 221 uint32_t sc_t1csum; 222 int sc_mcsum; 223 uint32_t sc_chip; 224 }; 225 226 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x))) 227 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x))) 228 229 #define KSE_CDTXSYNC(sc, x, n, ops) \ 230 do { \ 231 int __x, __n; \ 232 \ 233 __x = (x); \ 234 __n = (n); \ 235 \ 236 /* If it will wrap around, sync to the end of the ring. */ \ 237 if ((__x + __n) > KSE_NTXDESC) { \ 238 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 239 KSE_CDTXOFF(__x), sizeof(struct tdes) * \ 240 (KSE_NTXDESC - __x), (ops)); \ 241 __n -= (KSE_NTXDESC - __x); \ 242 __x = 0; \ 243 } \ 244 \ 245 /* Now sync whatever is left. */ \ 246 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 247 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ 248 } while (/*CONSTCOND*/0) 249 250 #define KSE_CDRXSYNC(sc, x, ops) \ 251 do { \ 252 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 253 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ 254 } while (/*CONSTCOND*/0) 255 256 #define KSE_INIT_RXDESC(sc, x) \ 257 do { \ 258 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 259 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 260 struct mbuf *__m = __rxs->rxs_mbuf; \ 261 \ 262 /* \ 263 * Note: may be able to scoot the packet forward 2 bytes for \ 264 * the alignment. Unclear KS8842 Rx DMA really mandates to have \ 265 * 32-bit buffer boundary. Tx DMA has no alignment limitation. \ 266 */ \ 267 __m->m_data = __m->m_ext.ext_buf; \ 268 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ 269 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \ 270 __rxd->r0 = R0_OWN; \ 271 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 272 } while (/*CONSTCOND*/0) 273 274 u_int kse_burstsize = 16; /* DMA burst length tuning knob */ 275 276 #ifdef KSEDIAGNOSTIC 277 u_int kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */ 278 #endif 279 280 static int kse_match(struct device *, struct cfdata *, void *); 281 static void kse_attach(struct device *, struct device *, void *); 282 283 CFATTACH_DECL(kse, sizeof(struct kse_softc), 284 kse_match, kse_attach, NULL, NULL); 285 286 static int kse_ioctl(struct ifnet *, u_long, caddr_t); 287 static void kse_start(struct ifnet *); 288 static void kse_watchdog(struct ifnet *); 289 static int kse_init(struct ifnet *); 290 static void kse_stop(struct ifnet *, int); 291 static void kse_reset(struct kse_softc *); 292 static void kse_set_filter(struct kse_softc *); 293 static int add_rxbuf(struct kse_softc *, int); 294 static void rxdrain(struct kse_softc *); 295 static int kse_intr(void *); 296 static void rxintr(struct kse_softc *); 297 static void txreap(struct kse_softc *); 298 static void lnkchg(struct kse_softc *); 299 static int ifmedia_upd(struct ifnet *); 300 static void ifmedia_sts(struct ifnet *, struct ifmediareq *); 301 static void phy_tick(void *); 302 303 static int 304 kse_match(struct device *parent, struct cfdata *match, void *aux) 305 { 306 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 307 308 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL && 309 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 || 310 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) && 311 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 312 return 1; 313 314 return 0; 315 } 316 317 static void 318 kse_attach(struct device *parent, struct device *self, void *aux) 319 { 320 struct kse_softc *sc = (struct kse_softc *)self; 321 struct pci_attach_args *pa = aux; 322 pci_chipset_tag_t pc = pa->pa_pc; 323 pci_intr_handle_t ih; 324 const char *intrstr; 325 struct ifnet *ifp; 326 uint8_t enaddr[ETHER_ADDR_LEN]; 327 bus_dma_segment_t seg; 328 int error, i, nseg; 329 pcireg_t pmode; 330 int pmreg; 331 332 if (pci_mapreg_map(pa, 0x10, 333 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 334 0, &sc->sc_st, &sc->sc_sh, NULL, NULL) != 0) { 335 printf(": unable to map device registers\n"); 336 return; 337 } 338 339 sc->sc_dmat = pa->pa_dmat; 340 341 /* Make sure bus mastering is enabled. */ 342 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 343 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 344 PCI_COMMAND_MASTER_ENABLE); 345 346 /* Get it out of power save mode, if needed. */ 347 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 348 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) & 349 PCI_PMCSR_STATE_MASK; 350 if (pmode == PCI_PMCSR_STATE_D3) { 351 /* 352 * The card has lost all configuration data in 353 * this state, so punt. 354 */ 355 printf("%s: unable to wake from power state D3\n", 356 sc->sc_dev.dv_xname); 357 return; 358 } 359 if (pmode != PCI_PMCSR_STATE_D0) { 360 printf("%s: waking up from power date D%d\n", 361 sc->sc_dev.dv_xname, pmode); 362 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR, 363 PCI_PMCSR_STATE_D0); 364 } 365 } 366 367 sc->sc_chip = PCI_PRODUCT(pa->pa_id); 368 printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n", 369 sc->sc_chip, PCI_REVISION(pa->pa_class)); 370 371 /* 372 * Read the Ethernet address from the EEPROM. 373 */ 374 i = CSR_READ_2(sc, MARL); 375 enaddr[5] = i; enaddr[4] = i >> 8; 376 i = CSR_READ_2(sc, MARM); 377 enaddr[3] = i; enaddr[2] = i >> 8; 378 i = CSR_READ_2(sc, MARH); 379 enaddr[1] = i; enaddr[0] = i >> 8; 380 printf("%s: Ethernet address: %s\n", 381 sc->sc_dev.dv_xname, ether_sprintf(enaddr)); 382 383 /* 384 * Enable chip function. 385 */ 386 CSR_WRITE_2(sc, CIDR, 1); 387 388 /* 389 * Map and establish our interrupt. 390 */ 391 if (pci_intr_map(pa, &ih)) { 392 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname); 393 return; 394 } 395 intrstr = pci_intr_string(pc, ih); 396 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, kse_intr, sc); 397 if (sc->sc_ih == NULL) { 398 printf("%s: unable to establish interrupt", 399 sc->sc_dev.dv_xname); 400 if (intrstr != NULL) 401 printf(" at %s", intrstr); 402 printf("\n"); 403 return; 404 } 405 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 406 407 /* 408 * Allocate the control data structures, and create and load the 409 * DMA map for it. 410 */ 411 error = bus_dmamem_alloc(sc->sc_dmat, 412 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); 413 if (error != 0) { 414 printf("%s: unable to allocate control data, error = %d\n", 415 sc->sc_dev.dv_xname, error); 416 goto fail_0; 417 } 418 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 419 sizeof(struct kse_control_data), (caddr_t *)&sc->sc_control_data, 420 BUS_DMA_COHERENT); 421 if (error != 0) { 422 printf("%s: unable to map control data, error = %d\n", 423 sc->sc_dev.dv_xname, error); 424 goto fail_1; 425 } 426 error = bus_dmamap_create(sc->sc_dmat, 427 sizeof(struct kse_control_data), 1, 428 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap); 429 if (error != 0) { 430 printf("%s: unable to create control data DMA map, " 431 "error = %d\n", sc->sc_dev.dv_xname, error); 432 goto fail_2; 433 } 434 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 435 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0); 436 if (error != 0) { 437 printf("%s: unable to load control data DMA map, error = %d\n", 438 sc->sc_dev.dv_xname, error); 439 goto fail_3; 440 } 441 for (i = 0; i < KSE_TXQUEUELEN; i++) { 442 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 443 KSE_NTXSEGS, MCLBYTES, 0, 0, 444 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 445 printf("%s: unable to create tx DMA map %d, " 446 "error = %d\n", sc->sc_dev.dv_xname, i, error); 447 goto fail_4; 448 } 449 } 450 for (i = 0; i < KSE_NRXDESC; i++) { 451 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 452 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 453 printf("%s: unable to create rx DMA map %d, " 454 "error = %d\n", sc->sc_dev.dv_xname, i, error); 455 goto fail_5; 456 } 457 sc->sc_rxsoft[i].rxs_mbuf = NULL; 458 } 459 460 callout_init(&sc->sc_callout); 461 462 ifmedia_init(&sc->sc_media, 0, ifmedia_upd, ifmedia_sts); 463 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10_T, 0, NULL); 464 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 465 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX, 0, NULL); 466 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 467 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL); 468 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 469 470 printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n", 471 sc->sc_dev.dv_xname); 472 473 ifp = &sc->sc_ethercom.ec_if; 474 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 475 ifp->if_softc = sc; 476 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 477 ifp->if_ioctl = kse_ioctl; 478 ifp->if_start = kse_start; 479 ifp->if_watchdog = kse_watchdog; 480 ifp->if_init = kse_init; 481 ifp->if_stop = kse_stop; 482 IFQ_SET_READY(&ifp->if_snd); 483 484 /* 485 * KSZ8842 can handle 802.1Q VLAN-sized frames, 486 * can do IPv4, TCPv4, and UDPv4 checksums in hardware. 487 */ 488 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 489 ifp->if_capabilities |= 490 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 491 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 492 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 493 494 if_attach(ifp); 495 ether_ifattach(ifp, enaddr); 496 return; 497 498 fail_5: 499 for (i = 0; i < KSE_NRXDESC; i++) { 500 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 501 bus_dmamap_destroy(sc->sc_dmat, 502 sc->sc_rxsoft[i].rxs_dmamap); 503 } 504 fail_4: 505 for (i = 0; i < KSE_TXQUEUELEN; i++) { 506 if (sc->sc_txsoft[i].txs_dmamap != NULL) 507 bus_dmamap_destroy(sc->sc_dmat, 508 sc->sc_txsoft[i].txs_dmamap); 509 } 510 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 511 fail_3: 512 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 513 fail_2: 514 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 515 sizeof(struct kse_control_data)); 516 fail_1: 517 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 518 fail_0: 519 return; 520 } 521 522 static int 523 kse_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 524 { 525 struct kse_softc *sc = ifp->if_softc; 526 struct ifreq *ifr = (struct ifreq *)data; 527 int s, error; 528 529 s = splnet(); 530 531 switch (cmd) { 532 case SIOCSIFMEDIA: 533 case SIOCGIFMEDIA: 534 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 535 break; 536 537 default: 538 error = ether_ioctl(ifp, cmd, data); 539 if (cmd == ENETRESET) { 540 /* 541 * Multicast list has changed; set the hardware filter 542 * accordingly. 543 */ 544 kse_set_filter(sc); 545 error = 0; 546 } 547 break; 548 } 549 550 kse_start(ifp); 551 552 splx(s); 553 return error; 554 } 555 556 #define KSE_INTRS (INT_DMLCS|INT_DMTS|INT_DMRS|INT_DMRBUS) 557 558 static int 559 kse_init(struct ifnet *ifp) 560 { 561 struct kse_softc *sc = ifp->if_softc; 562 uint32_t paddr; 563 int i, error = 0; 564 565 /* cancel pending I/O */ 566 kse_stop(ifp, 0); 567 568 /* reset all registers but PCI configuration */ 569 kse_reset(sc); 570 571 /* craft Tx descriptor ring */ 572 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 573 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) { 574 sc->sc_txdescs[i].t3 = paddr; 575 paddr += sizeof(struct tdes); 576 } 577 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0); 578 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC, 579 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 580 sc->sc_txfree = KSE_NTXDESC; 581 sc->sc_txnext = 0; 582 583 for (i = 0; i < KSE_TXQUEUELEN; i++) 584 sc->sc_txsoft[i].txs_mbuf = NULL; 585 sc->sc_txsfree = KSE_TXQUEUELEN; 586 sc->sc_txsnext = 0; 587 sc->sc_txsdirty = 0; 588 589 /* craft Rx descriptor ring */ 590 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 591 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) { 592 sc->sc_rxdescs[i].r3 = paddr; 593 paddr += sizeof(struct rdes); 594 } 595 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0); 596 for (i = 0; i < KSE_NRXDESC; i++) { 597 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { 598 if ((error = add_rxbuf(sc, i)) != 0) { 599 printf("%s: unable to allocate or map rx " 600 "buffer %d, error = %d\n", 601 sc->sc_dev.dv_xname, i, error); 602 rxdrain(sc); 603 goto out; 604 } 605 } 606 else 607 KSE_INIT_RXDESC(sc, i); 608 } 609 sc->sc_rxptr = 0; 610 611 /* hand Tx/Rx rings to HW */ 612 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0)); 613 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0)); 614 615 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC | TXC_FCE; 616 sc->sc_rxc = RXC_REN | RXC_RU | RXC_FCE; 617 if (ifp->if_flags & IFF_PROMISC) 618 sc->sc_rxc |= RXC_RA; 619 if (ifp->if_flags & IFF_BROADCAST) 620 sc->sc_rxc |= RXC_RB; 621 622 sc->sc_t1csum = sc->sc_mcsum = 0; 623 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) { 624 sc->sc_rxc |= RXC_ICG; 625 sc->sc_mcsum |= M_CSUM_IPv4; 626 } 627 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) { 628 sc->sc_txc |= TXC_ICG; 629 sc->sc_t1csum |= T1_IPCKG; 630 } 631 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) { 632 sc->sc_rxc |= RXC_TCG; 633 sc->sc_mcsum |= M_CSUM_TCPv4; 634 } 635 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) { 636 sc->sc_txc |= TXC_TCG; 637 sc->sc_t1csum |= T1_TCPCKG; 638 } 639 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) { 640 sc->sc_rxc |= RXC_UCG; 641 sc->sc_mcsum |= M_CSUM_UDPv4; 642 } 643 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) { 644 sc->sc_txc |= TXC_UCG; 645 sc->sc_t1csum |= T1_UDPCKG; 646 } 647 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT); 648 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT); 649 650 /* set current media */ 651 (void)ifmedia_upd(ifp); 652 653 /* enable transmitter and receiver */ 654 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 655 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 656 CSR_WRITE_4(sc, MDRSC, 1); 657 658 /* enable interrupts */ 659 CSR_WRITE_4(sc, INTST, ~0); 660 CSR_WRITE_4(sc, INTEN, KSE_INTRS); 661 662 ifp->if_flags |= IFF_RUNNING; 663 ifp->if_flags &= ~IFF_OACTIVE; 664 665 /* start one second timer */ 666 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 667 668 out: 669 if (error) { 670 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 671 ifp->if_timer = 0; 672 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 673 } 674 return error; 675 } 676 677 static void 678 kse_stop(struct ifnet *ifp, int disable) 679 { 680 struct kse_softc *sc = ifp->if_softc; 681 struct kse_txsoft *txs; 682 int i; 683 684 callout_stop(&sc->sc_callout); 685 686 sc->sc_txc &= ~TXC_TEN; 687 sc->sc_rxc &= ~RXC_REN; 688 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 689 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 690 691 for (i = 0; i < KSE_TXQUEUELEN; i++) { 692 txs = &sc->sc_txsoft[i]; 693 if (txs->txs_mbuf != NULL) { 694 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 695 m_freem(txs->txs_mbuf); 696 txs->txs_mbuf = NULL; 697 } 698 } 699 700 if (disable) 701 rxdrain(sc); 702 703 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 704 ifp->if_timer = 0; 705 } 706 707 static void 708 kse_reset(struct kse_softc *sc) 709 { 710 711 CSR_WRITE_2(sc, GRR, 1); 712 delay(1000); /* PDF does not mention the delay amount */ 713 CSR_WRITE_2(sc, GRR, 0); 714 715 CSR_WRITE_2(sc, CIDR, 1); 716 } 717 718 static void 719 kse_watchdog(struct ifnet *ifp) 720 { 721 struct kse_softc *sc = ifp->if_softc; 722 723 /* 724 * Since we're not interrupting every packet, sweep 725 * up before we report an error. 726 */ 727 txreap(sc); 728 729 if (sc->sc_txfree != KSE_NTXDESC) { 730 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n", 731 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 732 sc->sc_txnext); 733 ifp->if_oerrors++; 734 735 /* Reset the interface. */ 736 kse_init(ifp); 737 } 738 else if (ifp->if_flags & IFF_DEBUG) 739 printf("%s: recovered from device timeout\n", 740 sc->sc_dev.dv_xname); 741 742 /* Try to get more packets going. */ 743 kse_start(ifp); 744 } 745 746 static void 747 kse_start(struct ifnet *ifp) 748 { 749 struct kse_softc *sc = ifp->if_softc; 750 struct mbuf *m0; 751 struct kse_txsoft *txs; 752 bus_dmamap_t dmamap; 753 int error, nexttx, lasttx, ofree, seg; 754 755 lasttx = -1; 756 757 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 758 return; 759 760 /* 761 * Remember the previous number of free descriptors. 762 */ 763 ofree = sc->sc_txfree; 764 765 /* 766 * Loop through the send queue, setting up transmit descriptors 767 * until we drain the queue, or use up all available transmit 768 * descriptors. 769 */ 770 for (;;) { 771 IFQ_POLL(&ifp->if_snd, m0); 772 if (m0 == NULL) 773 break; 774 775 if (sc->sc_txsfree < KSE_TXQUEUE_GC) { 776 txreap(sc); 777 if (sc->sc_txsfree == 0) 778 break; 779 } 780 txs = &sc->sc_txsoft[sc->sc_txsnext]; 781 dmamap = txs->txs_dmamap; 782 783 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 784 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 785 if (error) { 786 if (error == EFBIG) { 787 printf("%s: Tx packet consumes too many " 788 "DMA segments, dropping...\n", 789 sc->sc_dev.dv_xname); 790 IFQ_DEQUEUE(&ifp->if_snd, m0); 791 m_freem(m0); 792 continue; 793 } 794 /* Short on resources, just stop for now. */ 795 break; 796 } 797 798 if (dmamap->dm_nsegs > sc->sc_txfree) { 799 /* 800 * Not enough free descriptors to transmit this 801 * packet. We haven't committed anything yet, 802 * so just unload the DMA map, put the packet 803 * back on the queue, and punt. Notify the upper 804 * layer that there are not more slots left. 805 */ 806 ifp->if_flags |= IFF_OACTIVE; 807 bus_dmamap_unload(sc->sc_dmat, dmamap); 808 break; 809 } 810 811 IFQ_DEQUEUE(&ifp->if_snd, m0); 812 813 /* 814 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 815 */ 816 817 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 818 BUS_DMASYNC_PREWRITE); 819 820 for (nexttx = sc->sc_txnext, seg = 0; 821 seg < dmamap->dm_nsegs; 822 seg++, nexttx = KSE_NEXTTX(nexttx)) { 823 struct tdes *tdes = &sc->sc_txdescs[nexttx]; 824 /* 825 * If this is the first descriptor we're 826 * enqueueing, don't set the OWN bit just 827 * yet. That could cause a race condition. 828 * We'll do it below. 829 */ 830 tdes->t2 = dmamap->dm_segs[seg].ds_addr; 831 tdes->t1 = sc->sc_t1csum 832 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK); 833 if (nexttx != sc->sc_txnext) 834 tdes->t0 = T0_OWN; 835 lasttx = nexttx; 836 } 837 #if 0 838 /* 839 * T1_IC bit could schedule Tx frame done interrupt here, 840 * but this driver takes a "shoot away" Tx strategy. 841 */ 842 #else 843 { 844 /* 845 * Outgoing NFS mbuf must be unloaded when Tx completed. 846 * Without T1_IC NFS mbuf is left unack'ed for excessive 847 * time and NFS stops to proceed until kse_watchdog() 848 * calls txreap() to reclaim the unack'ed mbuf. 849 * It's painful to tranverse every mbuf chain to determine 850 * whether someone is waiting for Tx completion. 851 */ 852 struct mbuf *m = m0; 853 do { 854 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) { 855 sc->sc_txdescs[lasttx].t1 |= T1_IC; 856 break; 857 } 858 } while ((m = m->m_next) != NULL); 859 } 860 #endif 861 862 /* write last T0_OWN bit of the 1st segment */ 863 sc->sc_txdescs[lasttx].t1 |= T1_LS; 864 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS; 865 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN; 866 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 867 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 868 869 /* tell DMA start transmit */ 870 CSR_WRITE_4(sc, MDTSC, 1); 871 872 txs->txs_mbuf = m0; 873 txs->txs_firstdesc = sc->sc_txnext; 874 txs->txs_lastdesc = lasttx; 875 txs->txs_ndesc = dmamap->dm_nsegs; 876 877 sc->sc_txfree -= txs->txs_ndesc; 878 sc->sc_txnext = nexttx; 879 sc->sc_txsfree--; 880 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext); 881 #if NBPFILTER > 0 882 /* 883 * Pass the packet to any BPF listeners. 884 */ 885 if (ifp->if_bpf) 886 bpf_mtap(ifp->if_bpf, m0); 887 #endif /* NBPFILTER > 0 */ 888 } 889 890 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 891 /* No more slots left; notify upper layer. */ 892 ifp->if_flags |= IFF_OACTIVE; 893 } 894 if (sc->sc_txfree != ofree) { 895 /* Set a watchdog timer in case the chip flakes out. */ 896 ifp->if_timer = 5; 897 } 898 } 899 900 static void 901 kse_set_filter(struct kse_softc *sc) 902 { 903 #if 0 /* later */ 904 struct ether_multistep step; 905 struct ether_multi *enm; 906 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 907 int cnt = 0; 908 909 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 910 while (enm != NULL) { 911 if (memcmp(enm->enm_addrlo, 912 enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 913 ; 914 } 915 ETHER_NEXT_MULTI(step, enm); 916 cnt++; 917 } 918 return; 919 #endif 920 } 921 922 static int 923 add_rxbuf(struct kse_softc *sc, int idx) 924 { 925 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx]; 926 struct mbuf *m; 927 int error; 928 929 MGETHDR(m, M_DONTWAIT, MT_DATA); 930 if (m == NULL) 931 return ENOBUFS; 932 933 MCLGET(m, M_DONTWAIT); 934 if ((m->m_flags & M_EXT) == 0) { 935 m_freem(m); 936 return ENOBUFS; 937 } 938 939 if (rxs->rxs_mbuf != NULL) 940 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 941 942 rxs->rxs_mbuf = m; 943 944 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 945 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 946 if (error) { 947 printf("%s: can't load rx DMA map %d, error = %d\n", 948 sc->sc_dev.dv_xname, idx, error); 949 panic("kse_add_rxbuf"); 950 } 951 952 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 953 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 954 955 KSE_INIT_RXDESC(sc, idx); 956 957 return 0; 958 } 959 960 static void 961 rxdrain(struct kse_softc *sc) 962 { 963 struct kse_rxsoft *rxs; 964 int i; 965 966 for (i = 0; i < KSE_NRXDESC; i++) { 967 rxs = &sc->sc_rxsoft[i]; 968 if (rxs->rxs_mbuf != NULL) { 969 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 970 m_freem(rxs->rxs_mbuf); 971 rxs->rxs_mbuf = NULL; 972 } 973 } 974 } 975 976 static int 977 kse_intr(void *arg) 978 { 979 struct kse_softc *sc = arg; 980 uint32_t isr; 981 982 if ((isr = CSR_READ_4(sc, INTST)) == 0) 983 return 0; 984 985 if (isr & INT_DMRS) 986 rxintr(sc); 987 if (isr & INT_DMTS) 988 txreap(sc); 989 if (isr & INT_DMLCS) 990 lnkchg(sc); 991 if (isr & INT_DMRBUS) 992 printf("%s: Rx descriptor full\n", sc->sc_dev.dv_xname); 993 994 CSR_WRITE_4(sc, INTST, isr); 995 return 1; 996 } 997 998 static void 999 rxintr(struct kse_softc *sc) 1000 { 1001 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1002 struct kse_rxsoft *rxs; 1003 struct mbuf *m; 1004 uint32_t rxstat; 1005 int i, len; 1006 1007 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) { 1008 rxs = &sc->sc_rxsoft[i]; 1009 1010 KSE_CDRXSYNC(sc, i, 1011 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1012 1013 rxstat = sc->sc_rxdescs[i].r0; 1014 1015 if (rxstat & R0_OWN) /* desc is left empty */ 1016 break; 1017 1018 /* R0_FS|R0_LS must have been marked for this desc */ 1019 1020 if (rxstat & R0_ES) { 1021 ifp->if_ierrors++; 1022 #define PRINTERR(bit, str) \ 1023 if (rxstat & (bit)) \ 1024 printf("%s: receive error: %s\n", \ 1025 sc->sc_dev.dv_xname, str) 1026 PRINTERR(R0_TL, "frame too long"); 1027 PRINTERR(R0_RF, "runt frame"); 1028 PRINTERR(R0_CE, "bad FCS"); 1029 #undef PRINTERR 1030 KSE_INIT_RXDESC(sc, i); 1031 continue; 1032 } 1033 1034 /* HW errata; frame might be too small or too large */ 1035 1036 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1037 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1038 1039 len = rxstat & R0_FL_MASK; 1040 len -= ETHER_CRC_LEN; /* trim CRC off */ 1041 m = rxs->rxs_mbuf; 1042 1043 if (add_rxbuf(sc, i) != 0) { 1044 ifp->if_ierrors++; 1045 KSE_INIT_RXDESC(sc, i); 1046 bus_dmamap_sync(sc->sc_dmat, 1047 rxs->rxs_dmamap, 0, 1048 rxs->rxs_dmamap->dm_mapsize, 1049 BUS_DMASYNC_PREREAD); 1050 continue; 1051 } 1052 1053 ifp->if_ipackets++; 1054 m->m_pkthdr.rcvif = ifp; 1055 m->m_pkthdr.len = m->m_len = len; 1056 1057 if (sc->sc_mcsum) { 1058 m->m_pkthdr.csum_flags |= sc->sc_mcsum; 1059 if (rxstat & R0_IPE) 1060 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1061 if (rxstat & (R0_TCPE | R0_UDPE)) 1062 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1063 } 1064 #if NBPFILTER > 0 1065 if (ifp->if_bpf) 1066 bpf_mtap(ifp->if_bpf, m); 1067 #endif /* NBPFILTER > 0 */ 1068 (*ifp->if_input)(ifp, m); 1069 #ifdef KSEDIAGNOSTIC 1070 if (kse_monitor_rxintr > 0) { 1071 printf("m stat %x data %p len %d\n", 1072 rxstat, m->m_data, m->m_len); 1073 } 1074 #endif 1075 } 1076 sc->sc_rxptr = i; 1077 } 1078 1079 static void 1080 txreap(struct kse_softc *sc) 1081 { 1082 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1083 struct kse_txsoft *txs; 1084 uint32_t txstat; 1085 int i; 1086 1087 ifp->if_flags &= ~IFF_OACTIVE; 1088 1089 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN; 1090 i = KSE_NEXTTXS(i), sc->sc_txsfree++) { 1091 txs = &sc->sc_txsoft[i]; 1092 1093 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 1094 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1095 1096 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0; 1097 1098 if (txstat & T0_OWN) /* desc is still in use */ 1099 break; 1100 1101 /* there is no way to tell transmission status per frame */ 1102 1103 ifp->if_opackets++; 1104 1105 sc->sc_txfree += txs->txs_ndesc; 1106 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1107 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1108 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1109 m_freem(txs->txs_mbuf); 1110 txs->txs_mbuf = NULL; 1111 } 1112 sc->sc_txsdirty = i; 1113 if (sc->sc_txsfree == KSE_TXQUEUELEN) 1114 ifp->if_timer = 0; 1115 } 1116 1117 static void 1118 lnkchg(struct kse_softc *sc) 1119 { 1120 struct ifmediareq ifmr; 1121 1122 #if 0 /* rambling link status */ 1123 printf("%s: link %s\n", sc->sc_dev.dv_xname, 1124 (CSR_READ_2(sc, P1SR) & (1U << 5)) ? "up" : "down"); 1125 #endif 1126 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1127 } 1128 1129 static int 1130 ifmedia_upd(struct ifnet *ifp) 1131 { 1132 struct kse_softc *sc = ifp->if_softc; 1133 struct ifmedia *ifm = &sc->sc_media; 1134 uint16_t ctl; 1135 1136 ctl = 0; 1137 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1138 ctl |= (1U << 13); /* restart AN */ 1139 ctl |= (1U << 7); /* enable AN */ 1140 ctl |= (1U << 4); /* advertise flow control pause */ 1141 ctl |= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0); 1142 } 1143 else { 1144 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) 1145 ctl |= (1U << 6); 1146 if (ifm->ifm_media & IFM_FDX) 1147 ctl |= (1U << 5); 1148 } 1149 CSR_WRITE_2(sc, P1CR4, ctl); 1150 1151 sc->sc_media_active = IFM_NONE; 1152 sc->sc_media_status = IFM_AVALID; 1153 1154 return 0; 1155 } 1156 1157 static void 1158 ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1159 { 1160 struct kse_softc *sc = ifp->if_softc; 1161 struct ifmedia *ifm = &sc->sc_media; 1162 uint16_t ctl, sts, result; 1163 1164 ifmr->ifm_status = IFM_AVALID; 1165 ifmr->ifm_active = IFM_ETHER; 1166 1167 ctl = CSR_READ_2(sc, P1CR4); 1168 sts = CSR_READ_2(sc, P1SR); 1169 if ((sts & (1U << 5)) == 0) { 1170 ifmr->ifm_active |= IFM_NONE; 1171 goto out; /* link is down */ 1172 } 1173 ifmr->ifm_status |= IFM_ACTIVE; 1174 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1175 if ((sts & (1U << 6)) == 0) { 1176 ifmr->ifm_active |= IFM_NONE; 1177 goto out; /* negotiation in progress */ 1178 } 1179 result = ctl & sts & 017; 1180 if (result & (1U << 3)) 1181 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1182 else if (result & (1U << 2)) 1183 ifmr->ifm_active |= IFM_100_TX; 1184 else if (result & (1U << 1)) 1185 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1186 else if (result & (1U << 0)) 1187 ifmr->ifm_active |= IFM_10_T; 1188 else 1189 ifmr->ifm_active |= IFM_NONE; 1190 if (ctl & (1U << 4)) 1191 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1192 if (sts & (1U << 4)) 1193 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1194 } 1195 else { 1196 ifmr->ifm_active |= (sts & (1U << 10)) ? IFM_100_TX : IFM_10_T; 1197 if (sts & (1U << 9)) 1198 ifmr->ifm_active |= IFM_FDX; 1199 if (sts & (1U << 12)) 1200 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1201 if (sts & (1U << 11)) 1202 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1203 } 1204 1205 out: 1206 sc->sc_media_status = ifmr->ifm_status; 1207 sc->sc_media_active = ifmr->ifm_active; 1208 } 1209 1210 static void 1211 phy_tick(void *arg) 1212 { 1213 struct kse_softc *sc = arg; 1214 struct ifmediareq ifmr; 1215 int s; 1216 1217 s = splnet(); 1218 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1219 splx(s); 1220 1221 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 1222 } 1223