1 /* $NetBSD: if_kse.c,v 1.1 2006/06/30 17:17:09 nisimura Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Tohru Nishimura 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Tohru Nishimura. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: if_kse.c,v 1.1 2006/06/30 17:17:09 nisimura Exp $"); 34 35 #include "bpfilter.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/callout.h> 40 #include <sys/mbuf.h> 41 #include <sys/malloc.h> 42 #include <sys/kernel.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/device.h> 46 #include <sys/queue.h> 47 48 #include <machine/endian.h> 49 #include <machine/bus.h> 50 #include <machine/intr.h> 51 52 #include <net/if.h> 53 #include <net/if_media.h> 54 #include <net/if_dl.h> 55 #include <net/if_ether.h> 56 57 #if NBPFILTER > 0 58 #include <net/bpf.h> 59 #endif 60 61 #include <dev/pci/pcivar.h> 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcidevs.h> 64 65 #define CSR_READ_4(sc, off) \ 66 bus_space_read_4(sc->sc_st, sc->sc_sh, off) 67 #define CSR_WRITE_4(sc, off, val) \ 68 bus_space_write_4(sc->sc_st, sc->sc_sh, off, val) 69 #define CSR_READ_2(sc, off) \ 70 bus_space_read_2(sc->sc_st, sc->sc_sh, off) 71 #define CSR_WRITE_2(sc, off, val) \ 72 bus_space_write_2(sc->sc_st, sc->sc_sh, off, val) 73 74 #define MDTXC 0x000 /* DMA transmit control */ 75 #define MDRXC 0x004 /* DMA receive control */ 76 #define MDTSC 0x008 /* DMA transmit start */ 77 #define MDRSC 0x00c /* DMA receive start */ 78 #define TDLB 0x010 /* transmit descriptor list base */ 79 #define RDLB 0x014 /* receive descriptor list base */ 80 #define INTEN 0x028 /* interrupt enable */ 81 #define INTST 0x02c /* interrupt status */ 82 #define MARL 0x200 /* MAC address low */ 83 #define MARM 0x202 /* MAC address middle */ 84 #define MARH 0x204 /* MAC address high */ 85 #define GRR 0x216 /* global reset */ 86 #define CIDR 0x400 /* chip ID and enable */ 87 #define CGCR 0x40a /* chip global control */ 88 #define P1CR4 0x512 /* port 1 control 4 */ 89 #define P1SR 0x514 /* port 1 status */ 90 91 #define TXC_BS_MSK 0x3f000000 /* burst size */ 92 #define TXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 93 #define TXC_UCG (1U<<18) /* generate UDP checksum */ 94 #define TXC_TCG (1U<<17) /* generate TCP checksum */ 95 #define TXC_ICG (1U<<16) /* generate IP checksum */ 96 #define TXC_FCE (1U<<9) /* enable flowcontrol */ 97 #define TXC_EP (1U<<2) /* enable automatic padding */ 98 #define TXC_AC (1U<<1) /* add CRC to frame */ 99 #define TXC_TEN (1) /* enable DMA to run */ 100 101 #define RXC_BS_MSK 0x3f000000 /* burst size */ 102 #define RXC_BS_SFT (24) /* 1,2,4,8,16,32 or 0 for unlimited */ 103 #define RXC_UCG (1U<<18) /* run UDP checksum */ 104 #define RXC_TCG (1U<<17) /* run TDP checksum */ 105 #define RXC_ICG (1U<<16) /* run IP checksum */ 106 #define RXC_FCE (1U<<9) /* enable flowcontrol */ 107 #define RXC_RB (1U<<6) /* receive broadcast frame */ 108 #define RXC_RM (1U<<5) /* receive multicast frame */ 109 #define RXC_RU (1U<<4) /* receive unicast frame */ 110 #define RXC_RE (1U<<3) /* accept error frame */ 111 #define RXC_RA (1U<<2) /* receive all frame */ 112 #define RXC_MA (1U<<1) /* receive through hash filter */ 113 #define RXC_REN (1) /* enable DMA to run */ 114 115 #define INT_DMLCS (1U<<31) /* link status change */ 116 #define INT_DMTS (1U<<30) /* sending desc. has posted Tx done */ 117 #define INT_DMRS (1U<<29) /* frame was received */ 118 #define INT_DMRBUS (1U<<27) /* Rx descriptor pool is full */ 119 120 #define T0_OWN (1U<<31) /* desc is ready to Tx */ 121 122 #define R0_OWN (1U<<31) /* desc is empty */ 123 #define R0_FS (1U<<30) /* first segment of frame */ 124 #define R0_LS (1U<<29) /* last segment of frame */ 125 #define R0_IPE (1U<<28) /* IP checksum error */ 126 #define R0_TCPE (1U<<27) /* TCP checksum error */ 127 #define R0_UDPE (1U<<26) /* UDP checksum error */ 128 #define R0_ES (1U<<25) /* error summary */ 129 #define R0_MF (1U<<24) /* multicast frame */ 130 #define R0_RE (1U<<19) /* framing error */ 131 #define R0_TL (1U<<18) /* too long frame */ 132 #define R0_RF (1U<<17) /* damaged runt frame */ 133 #define R0_CE (1U<<16) /* CRC error */ 134 #define R0_FT (1U<<15) /* frame type */ 135 #define R0_FL_MASK 0x7ff /* frame length 10:0 */ 136 137 #define T1_IC (1U<<31) /* post interrupt on complete */ 138 #define T1_FS (1U<<30) /* first segment of frame */ 139 #define T1_LS (1U<<29) /* last segment of frame */ 140 #define T1_IPCKG (1U<<28) /* generate IP checksum */ 141 #define T1_TCPCKG (1U<<27) /* generate TCP checksum */ 142 #define T1_UDPCKG (1U<<26) /* generate UDP checksum */ 143 #define T1_TER (1U<<25) /* end of ring */ 144 #define T1_TBS_MASK 0x7ff /* segment size 10:0 */ 145 146 #define R1_RER (1U<<25) /* end of ring */ 147 #define R1_RBS_MASK 0x7ff /* segment size 10:0 */ 148 149 #define KSE_NTXSEGS 16 150 #define KSE_TXQUEUELEN 64 151 #define KSE_TXQUEUELEN_MASK (KSE_TXQUEUELEN - 1) 152 #define KSE_TXQUEUE_GC (KSE_TXQUEUELEN / 4) 153 #define KSE_NTXDESC 256 154 #define KSE_NTXDESC_MASK (KSE_NTXDESC - 1) 155 #define KSE_NEXTTX(x) (((x) + 1) & KSE_NTXDESC_MASK) 156 #define KSE_NEXTTXS(x) (((x) + 1) & KSE_TXQUEUELEN_MASK) 157 158 #define KSE_NRXDESC 64 159 #define KSE_NRXDESC_MASK (KSE_NRXDESC - 1) 160 #define KSE_NEXTRX(x) (((x) + 1) & KSE_NRXDESC_MASK) 161 162 struct tdes { 163 unsigned t0, t1, t2, t3; 164 }; 165 166 struct rdes { 167 unsigned r0, r1, r2, r3; 168 }; 169 170 struct kse_control_data { 171 struct tdes kcd_txdescs[KSE_NTXDESC]; 172 struct rdes kcd_rxdescs[KSE_NRXDESC]; 173 }; 174 #define KSE_CDOFF(x) offsetof(struct kse_control_data, x) 175 #define KSE_CDTXOFF(x) KSE_CDOFF(kcd_txdescs[(x)]) 176 #define KSE_CDRXOFF(x) KSE_CDOFF(kcd_rxdescs[(x)]) 177 178 struct kse_txsoft { 179 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 180 bus_dmamap_t txs_dmamap; /* our DMA map */ 181 int txs_firstdesc; /* first descriptor in packet */ 182 int txs_lastdesc; /* last descriptor in packet */ 183 int txs_ndesc; /* # of descriptors used */ 184 }; 185 186 struct kse_rxsoft { 187 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 188 bus_dmamap_t rxs_dmamap; /* our DMA map */ 189 }; 190 191 struct kse_softc { 192 struct device sc_dev; /* generic device information */ 193 bus_space_tag_t sc_st; /* bus space tag */ 194 bus_space_handle_t sc_sh; /* bus space handle */ 195 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 196 struct ethercom sc_ethercom; /* Ethernet common data */ 197 void *sc_ih; /* interrupt cookie */ 198 199 struct ifmedia sc_media; /* ifmedia information */ 200 int sc_media_status; /* PHY */ 201 unsigned sc_media_active; /* PHY */ 202 struct callout sc_callout; /* tick callout */ 203 204 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 205 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 206 207 struct kse_control_data *sc_control_data; 208 #define sc_txdescs sc_control_data->kcd_txdescs 209 #define sc_rxdescs sc_control_data->kcd_rxdescs 210 211 struct kse_txsoft sc_txsoft[KSE_TXQUEUELEN]; 212 struct kse_rxsoft sc_rxsoft[KSE_NRXDESC]; 213 int sc_txfree; /* number of free Tx descriptors */ 214 int sc_txnext; /* next ready Tx descriptor */ 215 int sc_txsfree; /* number of free Tx jobs */ 216 int sc_txsnext; /* next ready Tx job */ 217 int sc_txsdirty; /* dirty Tx jobs */ 218 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 219 220 unsigned sc_txc, sc_rxc; 221 unsigned sc_t1csum, sc_mcsum; 222 unsigned sc_chip; 223 }; 224 225 #define KSE_CDTXADDR(sc, x) ((sc)->sc_cddma + KSE_CDTXOFF((x))) 226 #define KSE_CDRXADDR(sc, x) ((sc)->sc_cddma + KSE_CDRXOFF((x))) 227 228 #define KSE_CDTXSYNC(sc, x, n, ops) \ 229 do { \ 230 int __x, __n; \ 231 \ 232 __x = (x); \ 233 __n = (n); \ 234 \ 235 /* If it will wrap around, sync to the end of the ring. */ \ 236 if ((__x + __n) > KSE_NTXDESC) { \ 237 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 238 KSE_CDTXOFF(__x), sizeof(struct tdes) * \ 239 (KSE_NTXDESC - __x), (ops)); \ 240 __n -= (KSE_NTXDESC - __x); \ 241 __x = 0; \ 242 } \ 243 \ 244 /* Now sync whatever is left. */ \ 245 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 246 KSE_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ 247 } while (/*CONSTCOND*/0) 248 249 #define KSE_CDRXSYNC(sc, x, ops) \ 250 do { \ 251 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 252 KSE_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ 253 } while (/*CONSTCOND*/0) 254 255 #define KSE_INIT_RXDESC(sc, x) \ 256 do { \ 257 struct kse_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 258 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 259 struct mbuf *__m = __rxs->rxs_mbuf; \ 260 \ 261 /* \ 262 * Note: may be able to scoot the packet forward 2 bytes for \ 263 * the alignment. Unclear KS8842 Rx DMA really mandates to have \ 264 * 32-bit buffer boundary. Tx DMA has no alignment limitation. \ 265 */ \ 266 __m->m_data = __m->m_ext.ext_buf; \ 267 __rxd->r2 = __rxs->rxs_dmamap->dm_segs[0].ds_addr; \ 268 __rxd->r1 = R1_RBS_MASK /* __m->m_ext.ext_size */; \ 269 __rxd->r0 = R0_OWN; \ 270 KSE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 271 } while (/*CONSTCOND*/0) 272 273 unsigned kse_burstsize = 16; /* DMA burst length tuning knob */ 274 275 #ifdef KSEDIAGNOSTIC 276 unsigned kse_monitor_rxintr; /* fragmented UDP csum HW bug hook */ 277 #endif 278 279 static int kse_match(struct device *, struct cfdata *, void *); 280 static void kse_attach(struct device *, struct device *, void *); 281 282 CFATTACH_DECL(kse, sizeof(struct kse_softc), 283 kse_match, kse_attach, NULL, NULL); 284 285 static int kse_ioctl(struct ifnet *, u_long, caddr_t); 286 static void kse_start(struct ifnet *); 287 static void kse_watchdog(struct ifnet *); 288 static int kse_init(struct ifnet *); 289 static void kse_stop(struct ifnet *, int); 290 static void kse_reset(struct kse_softc *); 291 static void kse_set_filter(struct kse_softc *); 292 static int add_rxbuf(struct kse_softc *, int); 293 static void rxdrain(struct kse_softc *); 294 static int kse_intr(void *); 295 static void rxintr(struct kse_softc *); 296 static void txreap(struct kse_softc *); 297 static void lnkchg(struct kse_softc *); 298 static int ifmedia_upd(struct ifnet *); 299 static void ifmedia_sts(struct ifnet *, struct ifmediareq *); 300 static void phy_tick(void *); 301 302 static int 303 kse_match(struct device *parent, struct cfdata *match, void *aux) 304 { 305 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 306 307 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MICREL && 308 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8842 || 309 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_MICREL_KSZ8841) && 310 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 311 return 1; 312 313 return 0; 314 } 315 316 static void 317 kse_attach(struct device *parent, struct device *self, void *aux) 318 { 319 struct kse_softc *sc = (struct kse_softc *)self; 320 struct pci_attach_args *pa = aux; 321 pci_chipset_tag_t pc = pa->pa_pc; 322 pci_intr_handle_t ih; 323 const char *intrstr; 324 struct ifnet *ifp; 325 uint8_t enaddr[ETHER_ADDR_LEN]; 326 bus_dma_segment_t seg; 327 int error, i, nseg; 328 pcireg_t pmode; 329 int pmreg; 330 331 if (pci_mapreg_map(pa, 0x10, 332 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 333 0, &sc->sc_st, &sc->sc_sh, NULL, NULL) != 0) { 334 printf(": unable to map device registers\n"); 335 return; 336 } 337 338 sc->sc_dmat = pa->pa_dmat; 339 340 /* Make sure bus mastering is enabled. */ 341 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 342 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 343 PCI_COMMAND_MASTER_ENABLE); 344 345 /* Get it out of power save mode, if needed. */ 346 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 347 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) & 348 PCI_PMCSR_STATE_MASK; 349 if (pmode == PCI_PMCSR_STATE_D3) { 350 /* 351 * The card has lost all configuration data in 352 * this state, so punt. 353 */ 354 printf("%s: unable to wake from power state D3\n", 355 sc->sc_dev.dv_xname); 356 return; 357 } 358 if (pmode != PCI_PMCSR_STATE_D0) { 359 printf("%s: waking up from power date D%d\n", 360 sc->sc_dev.dv_xname, pmode); 361 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR, 362 PCI_PMCSR_STATE_D0); 363 } 364 } 365 366 sc->sc_chip = PCI_PRODUCT(pa->pa_id); 367 printf(": Micrel KSZ%04x Ethernet (rev. 0x%02x)\n", 368 sc->sc_chip, PCI_REVISION(pa->pa_class)); 369 370 /* 371 * Read the Ethernet address from the EEPROM. 372 */ 373 i = CSR_READ_2(sc, MARL); 374 enaddr[5] = i; enaddr[4] = i >> 8; 375 i = CSR_READ_2(sc, MARM); 376 enaddr[3] = i; enaddr[2] = i >> 8; 377 i = CSR_READ_2(sc, MARH); 378 enaddr[1] = i; enaddr[0] = i >> 8; 379 printf("%s: Ethernet address: %s\n", 380 sc->sc_dev.dv_xname, ether_sprintf(enaddr)); 381 382 /* 383 * Enable chip function. 384 */ 385 CSR_WRITE_2(sc, CIDR, 1); 386 387 /* 388 * Map and establish our interrupt. 389 */ 390 if (pci_intr_map(pa, &ih)) { 391 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname); 392 return; 393 } 394 intrstr = pci_intr_string(pc, ih); 395 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, kse_intr, sc); 396 if (sc->sc_ih == NULL) { 397 printf("%s: unable to establish interrupt", 398 sc->sc_dev.dv_xname); 399 if (intrstr != NULL) 400 printf(" at %s", intrstr); 401 printf("\n"); 402 return; 403 } 404 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 405 406 /* 407 * Allocate the control data structures, and create and load the 408 * DMA map for it. 409 */ 410 error = bus_dmamem_alloc(sc->sc_dmat, 411 sizeof(struct kse_control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); 412 if (error != 0) { 413 printf("%s: unable to allocate control data, error = %d\n", 414 sc->sc_dev.dv_xname, error); 415 goto fail_0; 416 } 417 error = bus_dmamem_map(sc->sc_dmat, &seg, nseg, 418 sizeof(struct kse_control_data), (caddr_t *)&sc->sc_control_data, 419 BUS_DMA_COHERENT); 420 if (error != 0) { 421 printf("%s: unable to map control data, error = %d\n", 422 sc->sc_dev.dv_xname, error); 423 goto fail_1; 424 } 425 error = bus_dmamap_create(sc->sc_dmat, 426 sizeof(struct kse_control_data), 1, 427 sizeof(struct kse_control_data), 0, 0, &sc->sc_cddmamap); 428 if (error != 0) { 429 printf("%s: unable to create control data DMA map, " 430 "error = %d\n", sc->sc_dev.dv_xname, error); 431 goto fail_2; 432 } 433 error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 434 sc->sc_control_data, sizeof(struct kse_control_data), NULL, 0); 435 if (error != 0) { 436 printf("%s: unable to load control data DMA map, error = %d\n", 437 sc->sc_dev.dv_xname, error); 438 goto fail_3; 439 } 440 for (i = 0; i < KSE_TXQUEUELEN; i++) { 441 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 442 KSE_NTXSEGS, MCLBYTES, 0, 0, 443 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 444 printf("%s: unable to create tx DMA map %d, " 445 "error = %d\n", sc->sc_dev.dv_xname, i, error); 446 goto fail_4; 447 } 448 } 449 for (i = 0; i < KSE_NRXDESC; i++) { 450 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 451 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 452 printf("%s: unable to create rx DMA map %d, " 453 "error = %d\n", sc->sc_dev.dv_xname, i, error); 454 goto fail_5; 455 } 456 sc->sc_rxsoft[i].rxs_mbuf = NULL; 457 } 458 459 callout_init(&sc->sc_callout); 460 461 ifmedia_init(&sc->sc_media, 0, ifmedia_upd, ifmedia_sts); 462 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10_T, 0, NULL); 463 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 464 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX, 0, NULL); 465 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 466 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL); 467 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 468 469 printf("%s: 10baseT, 10baseT-FDX, 100baseTX, 100baseTX-FDX, auto\n", 470 sc->sc_dev.dv_xname); 471 472 ifp = &sc->sc_ethercom.ec_if; 473 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 474 ifp->if_softc = sc; 475 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 476 ifp->if_ioctl = kse_ioctl; 477 ifp->if_start = kse_start; 478 ifp->if_watchdog = kse_watchdog; 479 ifp->if_init = kse_init; 480 ifp->if_stop = kse_stop; 481 IFQ_SET_READY(&ifp->if_snd); 482 483 /* 484 * KSZ8842 can handle 802.1Q VLAN-sized frames, 485 * can do IPv4, TCPv4, and UDPv4 checksums in hardware. 486 */ 487 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 488 ifp->if_capabilities |= 489 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 490 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 491 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 492 493 if_attach(ifp); 494 ether_ifattach(ifp, enaddr); 495 return; 496 497 fail_5: 498 for (i = 0; i < KSE_NRXDESC; i++) { 499 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 500 bus_dmamap_destroy(sc->sc_dmat, 501 sc->sc_rxsoft[i].rxs_dmamap); 502 } 503 fail_4: 504 for (i = 0; i < KSE_TXQUEUELEN; i++) { 505 if (sc->sc_txsoft[i].txs_dmamap != NULL) 506 bus_dmamap_destroy(sc->sc_dmat, 507 sc->sc_txsoft[i].txs_dmamap); 508 } 509 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 510 fail_3: 511 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 512 fail_2: 513 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 514 sizeof(struct kse_control_data)); 515 fail_1: 516 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 517 fail_0: 518 return; 519 } 520 521 static int 522 kse_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 523 { 524 struct kse_softc *sc = ifp->if_softc; 525 struct ifreq *ifr = (struct ifreq *)data; 526 int s, error; 527 528 s = splnet(); 529 530 switch (cmd) { 531 case SIOCSIFMEDIA: 532 case SIOCGIFMEDIA: 533 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 534 break; 535 536 default: 537 error = ether_ioctl(ifp, cmd, data); 538 if (cmd == ENETRESET) { 539 /* 540 * Multicast list has changed; set the hardware filter 541 * accordingly. 542 */ 543 kse_set_filter(sc); 544 error = 0; 545 } 546 break; 547 } 548 549 kse_start(ifp); 550 551 splx(s); 552 return error; 553 } 554 555 #define KSE_INTRS (INT_DMLCS|INT_DMTS|INT_DMRS|INT_DMRBUS) 556 557 static int 558 kse_init(struct ifnet *ifp) 559 { 560 struct kse_softc *sc = ifp->if_softc; 561 unsigned paddr; 562 int i, error = 0; 563 564 /* cancel pending I/O */ 565 kse_stop(ifp, 0); 566 567 /* reset all registers but PCI configuration */ 568 kse_reset(sc); 569 570 /* craft Tx descriptor ring */ 571 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 572 for (i = 0, paddr = KSE_CDTXADDR(sc, 1); i < KSE_NTXDESC - 1; i++) { 573 sc->sc_txdescs[i].t3 = paddr; 574 paddr += sizeof(struct tdes); 575 } 576 sc->sc_txdescs[KSE_NTXDESC - 1].t3 = KSE_CDTXADDR(sc, 0); 577 KSE_CDTXSYNC(sc, 0, KSE_NTXDESC, 578 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 579 sc->sc_txfree = KSE_NTXDESC; 580 sc->sc_txnext = 0; 581 582 for (i = 0; i < KSE_TXQUEUELEN; i++) 583 sc->sc_txsoft[i].txs_mbuf = NULL; 584 sc->sc_txsfree = KSE_TXQUEUELEN; 585 sc->sc_txsnext = 0; 586 sc->sc_txsdirty = 0; 587 588 /* craft Rx descriptor ring */ 589 memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs)); 590 for (i = 0, paddr = KSE_CDRXADDR(sc, 1); i < KSE_NRXDESC - 1; i++) { 591 sc->sc_rxdescs[i].r3 = paddr; 592 paddr += sizeof(struct rdes); 593 } 594 sc->sc_rxdescs[KSE_NRXDESC - 1].r3 = KSE_CDRXADDR(sc, 0); 595 for (i = 0; i < KSE_NRXDESC; i++) { 596 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { 597 if ((error = add_rxbuf(sc, i)) != 0) { 598 printf("%s: unable to allocate or map rx " 599 "buffer %d, error = %d\n", 600 sc->sc_dev.dv_xname, i, error); 601 rxdrain(sc); 602 goto out; 603 } 604 } 605 else 606 KSE_INIT_RXDESC(sc, i); 607 } 608 sc->sc_rxptr = 0; 609 610 /* hand Tx/Rx rings to HW */ 611 CSR_WRITE_4(sc, TDLB, KSE_CDTXADDR(sc, 0)); 612 CSR_WRITE_4(sc, RDLB, KSE_CDRXADDR(sc, 0)); 613 614 sc->sc_txc = TXC_TEN | TXC_EP | TXC_AC | TXC_FCE; 615 sc->sc_rxc = RXC_REN | RXC_RU | RXC_FCE; 616 if (ifp->if_flags & IFF_PROMISC) 617 sc->sc_rxc |= RXC_RA; 618 if (ifp->if_flags & IFF_BROADCAST) 619 sc->sc_rxc |= RXC_RB; 620 621 sc->sc_t1csum = sc->sc_mcsum = 0; 622 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) { 623 sc->sc_rxc |= RXC_ICG; 624 sc->sc_mcsum |= M_CSUM_IPv4; 625 } 626 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Tx) { 627 sc->sc_txc |= TXC_ICG; 628 sc->sc_t1csum |= T1_IPCKG; 629 } 630 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Rx) { 631 sc->sc_rxc |= RXC_TCG; 632 sc->sc_mcsum |= M_CSUM_TCPv4; 633 } 634 if (ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx) { 635 sc->sc_txc |= TXC_TCG; 636 sc->sc_t1csum |= T1_TCPCKG; 637 } 638 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Rx) { 639 sc->sc_rxc |= RXC_UCG; 640 sc->sc_mcsum |= M_CSUM_UDPv4; 641 } 642 if (ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx) { 643 sc->sc_txc |= TXC_UCG; 644 sc->sc_t1csum |= T1_UDPCKG; 645 } 646 sc->sc_txc |= (kse_burstsize << TXC_BS_SFT); 647 sc->sc_rxc |= (kse_burstsize << RXC_BS_SFT); 648 649 /* set current media */ 650 (void)ifmedia_upd(ifp); 651 652 /* enable transmitter and receiver */ 653 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 654 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 655 CSR_WRITE_4(sc, MDRSC, 1); 656 657 /* enable interrupts */ 658 CSR_WRITE_4(sc, INTST, ~0); 659 CSR_WRITE_4(sc, INTEN, KSE_INTRS); 660 661 ifp->if_flags |= IFF_RUNNING; 662 ifp->if_flags &= ~IFF_OACTIVE; 663 664 /* start one second timer */ 665 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 666 667 out: 668 if (error) { 669 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 670 ifp->if_timer = 0; 671 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 672 } 673 return error; 674 } 675 676 static void 677 kse_stop(struct ifnet *ifp, int disable) 678 { 679 struct kse_softc *sc = ifp->if_softc; 680 struct kse_txsoft *txs; 681 int i; 682 683 callout_stop(&sc->sc_callout); 684 685 sc->sc_txc &= ~TXC_TEN; 686 sc->sc_rxc &= ~RXC_REN; 687 CSR_WRITE_4(sc, MDTXC, sc->sc_txc); 688 CSR_WRITE_4(sc, MDRXC, sc->sc_rxc); 689 690 for (i = 0; i < KSE_TXQUEUELEN; i++) { 691 txs = &sc->sc_txsoft[i]; 692 if (txs->txs_mbuf != NULL) { 693 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 694 m_freem(txs->txs_mbuf); 695 txs->txs_mbuf = NULL; 696 } 697 } 698 699 if (disable) 700 rxdrain(sc); 701 702 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 703 ifp->if_timer = 0; 704 } 705 706 static void 707 kse_reset(struct kse_softc *sc) 708 { 709 710 CSR_WRITE_2(sc, GRR, 1); 711 delay(1000); /* PDF does not mention the delay amount */ 712 CSR_WRITE_2(sc, GRR, 0); 713 714 CSR_WRITE_2(sc, CIDR, 1); 715 } 716 717 static void 718 kse_watchdog(struct ifnet *ifp) 719 { 720 struct kse_softc *sc = ifp->if_softc; 721 722 /* 723 * Since we're not interrupting every packet, sweep 724 * up before we report an error. 725 */ 726 txreap(sc); 727 728 if (sc->sc_txfree != KSE_NTXDESC) { 729 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n", 730 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 731 sc->sc_txnext); 732 ifp->if_oerrors++; 733 734 /* Reset the interface. */ 735 kse_init(ifp); 736 } 737 else if (ifp->if_flags & IFF_DEBUG) 738 printf("%s: recovered from device timeout\n", 739 sc->sc_dev.dv_xname); 740 741 /* Try to get more packets going. */ 742 kse_start(ifp); 743 } 744 745 static void 746 kse_start(struct ifnet *ifp) 747 { 748 struct kse_softc *sc = ifp->if_softc; 749 struct mbuf *m0; 750 struct kse_txsoft *txs; 751 bus_dmamap_t dmamap; 752 int error, nexttx, lasttx, ofree, seg; 753 754 lasttx = -1; 755 756 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 757 return; 758 759 /* 760 * Remember the previous number of free descriptors. 761 */ 762 ofree = sc->sc_txfree; 763 764 /* 765 * Loop through the send queue, setting up transmit descriptors 766 * until we drain the queue, or use up all available transmit 767 * descriptors. 768 */ 769 for (;;) { 770 IFQ_POLL(&ifp->if_snd, m0); 771 if (m0 == NULL) 772 break; 773 774 if (sc->sc_txsfree < KSE_TXQUEUE_GC) { 775 txreap(sc); 776 if (sc->sc_txsfree == 0) 777 break; 778 } 779 txs = &sc->sc_txsoft[sc->sc_txsnext]; 780 dmamap = txs->txs_dmamap; 781 782 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 783 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 784 if (error) { 785 if (error == EFBIG) { 786 printf("%s: Tx packet consumes too many " 787 "DMA segments, dropping...\n", 788 sc->sc_dev.dv_xname); 789 IFQ_DEQUEUE(&ifp->if_snd, m0); 790 m_freem(m0); 791 continue; 792 } 793 /* Short on resources, just stop for now. */ 794 break; 795 } 796 797 if (dmamap->dm_nsegs > sc->sc_txfree) { 798 /* 799 * Not enough free descriptors to transmit this 800 * packet. We haven't committed anything yet, 801 * so just unload the DMA map, put the packet 802 * back on the queue, and punt. Notify the upper 803 * layer that there are not more slots left. 804 */ 805 ifp->if_flags |= IFF_OACTIVE; 806 bus_dmamap_unload(sc->sc_dmat, dmamap); 807 break; 808 } 809 810 IFQ_DEQUEUE(&ifp->if_snd, m0); 811 812 /* 813 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 814 */ 815 816 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 817 BUS_DMASYNC_PREWRITE); 818 819 for (nexttx = sc->sc_txnext, seg = 0; 820 seg < dmamap->dm_nsegs; 821 seg++, nexttx = KSE_NEXTTX(nexttx)) { 822 struct tdes *tdes = &sc->sc_txdescs[nexttx]; 823 /* 824 * If this is the first descriptor we're 825 * enqueueing, don't set the OWN bit just 826 * yet. That could cause a race condition. 827 * We'll do it below. 828 */ 829 tdes->t2 = dmamap->dm_segs[seg].ds_addr; 830 tdes->t1 = sc->sc_t1csum 831 | (dmamap->dm_segs[seg].ds_len & T1_TBS_MASK); 832 if (nexttx != sc->sc_txnext) 833 tdes->t0 = T0_OWN; 834 lasttx = nexttx; 835 } 836 #if 0 837 /* 838 * T1_IC bit could schedule Tx frame done interrupt here, 839 * but this driver takes a "shoot away" Tx strategy. 840 */ 841 #else 842 { 843 /* 844 * Outgoing NFS mbuf must be unloaded when Tx completed. 845 * Without T1_IC NFS mbuf is left unack'ed for excessive 846 * time and NFS stops to proceed until kse_watchdog() 847 * calls txreap() to reclaim the unack'ed mbuf. 848 * It's painful to tranverse every mbuf chain to determine 849 * whether someone is waiting for Tx completion. 850 */ 851 struct mbuf *m = m0; 852 do { 853 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) { 854 sc->sc_txdescs[lasttx].t1 |= T1_IC; 855 break; 856 } 857 } while ((m = m->m_next) != NULL); 858 } 859 #endif 860 861 /* write last T0_OWN bit of the 1st segment */ 862 sc->sc_txdescs[lasttx].t1 |= T1_LS; 863 sc->sc_txdescs[sc->sc_txnext].t1 |= T1_FS; 864 sc->sc_txdescs[sc->sc_txnext].t0 = T0_OWN; 865 KSE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 866 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 867 868 /* tell DMA start transmit */ 869 CSR_WRITE_4(sc, MDTSC, 1); 870 871 txs->txs_mbuf = m0; 872 txs->txs_firstdesc = sc->sc_txnext; 873 txs->txs_lastdesc = lasttx; 874 txs->txs_ndesc = dmamap->dm_nsegs; 875 876 sc->sc_txfree -= txs->txs_ndesc; 877 sc->sc_txnext = nexttx; 878 sc->sc_txsfree--; 879 sc->sc_txsnext = KSE_NEXTTXS(sc->sc_txsnext); 880 #if NBPFILTER > 0 881 /* 882 * Pass the packet to any BPF listeners. 883 */ 884 if (ifp->if_bpf) 885 bpf_mtap(ifp->if_bpf, m0); 886 #endif /* NBPFILTER > 0 */ 887 } 888 889 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 890 /* No more slots left; notify upper layer. */ 891 ifp->if_flags |= IFF_OACTIVE; 892 } 893 if (sc->sc_txfree != ofree) { 894 /* Set a watchdog timer in case the chip flakes out. */ 895 ifp->if_timer = 5; 896 } 897 } 898 899 static void 900 kse_set_filter(struct kse_softc *sc) 901 { 902 #if 0 /* later */ 903 struct ether_multistep step; 904 struct ether_multi *enm; 905 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 906 int cnt = 0; 907 908 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 909 while (enm != NULL) { 910 if (memcmp(enm->enm_addrlo, 911 enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 912 ; 913 } 914 ETHER_NEXT_MULTI(step, enm); 915 cnt++; 916 } 917 return; 918 #endif 919 } 920 921 static int 922 add_rxbuf(struct kse_softc *sc, int idx) 923 { 924 struct kse_rxsoft *rxs = &sc->sc_rxsoft[idx]; 925 struct mbuf *m; 926 int error; 927 928 MGETHDR(m, M_DONTWAIT, MT_DATA); 929 if (m == NULL) 930 return ENOBUFS; 931 932 MCLGET(m, M_DONTWAIT); 933 if ((m->m_flags & M_EXT) == 0) { 934 m_freem(m); 935 return ENOBUFS; 936 } 937 938 if (rxs->rxs_mbuf != NULL) 939 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 940 941 rxs->rxs_mbuf = m; 942 943 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 944 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 945 if (error) { 946 printf("%s: can't load rx DMA map %d, error = %d\n", 947 sc->sc_dev.dv_xname, idx, error); 948 panic("kse_add_rxbuf"); 949 } 950 951 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 952 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 953 954 KSE_INIT_RXDESC(sc, idx); 955 956 return 0; 957 } 958 959 static void 960 rxdrain(struct kse_softc *sc) 961 { 962 struct kse_rxsoft *rxs; 963 int i; 964 965 for (i = 0; i < KSE_NRXDESC; i++) { 966 rxs = &sc->sc_rxsoft[i]; 967 if (rxs->rxs_mbuf != NULL) { 968 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 969 m_freem(rxs->rxs_mbuf); 970 rxs->rxs_mbuf = NULL; 971 } 972 } 973 } 974 975 static int 976 kse_intr(void *arg) 977 { 978 struct kse_softc *sc = arg; 979 unsigned isr; 980 981 if ((isr = CSR_READ_4(sc, INTST)) == 0) 982 return 0; 983 984 if (isr & INT_DMRS) 985 rxintr(sc); 986 if (isr & INT_DMTS) 987 txreap(sc); 988 if (isr & INT_DMLCS) 989 lnkchg(sc); 990 if (isr & INT_DMRBUS) 991 printf("%s: Rx descriptor full\n", sc->sc_dev.dv_xname); 992 993 CSR_WRITE_4(sc, INTST, isr); 994 return 1; 995 } 996 997 static void 998 rxintr(struct kse_softc *sc) 999 { 1000 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1001 struct kse_rxsoft *rxs; 1002 struct mbuf *m; 1003 unsigned rxstat; 1004 int i, len; 1005 1006 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = KSE_NEXTRX(i)) { 1007 rxs = &sc->sc_rxsoft[i]; 1008 1009 KSE_CDRXSYNC(sc, i, 1010 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1011 1012 rxstat = sc->sc_rxdescs[i].r0; 1013 1014 if (rxstat & R0_OWN) /* desc is left empty */ 1015 break; 1016 1017 /* R0_FS|R0_LS must have been marked for this desc */ 1018 1019 if (rxstat & R0_ES) { 1020 ifp->if_ierrors++; 1021 #define PRINTERR(bit, str) \ 1022 if (rxstat & (bit)) \ 1023 printf("%s: receive error: %s\n", \ 1024 sc->sc_dev.dv_xname, str) 1025 PRINTERR(R0_TL, "frame too long"); 1026 PRINTERR(R0_RF, "runt frame"); 1027 PRINTERR(R0_CE, "bad FCS"); 1028 #undef PRINTERR 1029 KSE_INIT_RXDESC(sc, i); 1030 continue; 1031 } 1032 1033 /* HW errata; frame might be too small or too large */ 1034 1035 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1036 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1037 1038 len = rxstat & R0_FL_MASK; 1039 m = rxs->rxs_mbuf; 1040 1041 if (add_rxbuf(sc, i) != 0) { 1042 ifp->if_ierrors++; 1043 KSE_INIT_RXDESC(sc, i); 1044 bus_dmamap_sync(sc->sc_dmat, 1045 rxs->rxs_dmamap, 0, 1046 rxs->rxs_dmamap->dm_mapsize, 1047 BUS_DMASYNC_PREREAD); 1048 continue; 1049 } 1050 1051 ifp->if_ipackets++; 1052 m->m_flags |= M_HASFCS; 1053 m->m_pkthdr.rcvif = ifp; 1054 m->m_pkthdr.len = m->m_len = len; 1055 1056 if (sc->sc_mcsum) { 1057 m->m_pkthdr.csum_flags |= sc->sc_mcsum; 1058 if (rxstat & R0_IPE) 1059 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1060 if (rxstat & (R0_TCPE | R0_UDPE)) 1061 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1062 } 1063 #if NBPFILTER > 0 1064 if (ifp->if_bpf) 1065 bpf_mtap(ifp->if_bpf, m); 1066 #endif /* NBPFILTER > 0 */ 1067 (*ifp->if_input)(ifp, m); 1068 #ifdef KSEDIAGNOSTIC 1069 if (kse_monitor_rxintr > 0) { 1070 printf("m stat %x data %p len %d\n", 1071 rxstat, m->m_data, m->m_len); 1072 } 1073 #endif 1074 } 1075 sc->sc_rxptr = i; 1076 } 1077 1078 static void 1079 txreap(struct kse_softc *sc) 1080 { 1081 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1082 struct kse_txsoft *txs; 1083 unsigned txstat; 1084 int i; 1085 1086 ifp->if_flags &= ~IFF_OACTIVE; 1087 1088 for (i = sc->sc_txsdirty; sc->sc_txsfree != KSE_TXQUEUELEN; 1089 i = KSE_NEXTTXS(i), sc->sc_txsfree++) { 1090 txs = &sc->sc_txsoft[i]; 1091 1092 KSE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 1093 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1094 1095 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0; 1096 1097 if (txstat & T0_OWN) /* desc is still in use */ 1098 break; 1099 1100 /* there is no way to tell transmission status per frame */ 1101 1102 ifp->if_opackets++; 1103 1104 sc->sc_txfree += txs->txs_ndesc; 1105 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1106 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1107 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1108 m_freem(txs->txs_mbuf); 1109 txs->txs_mbuf = NULL; 1110 } 1111 sc->sc_txsdirty = i; 1112 if (sc->sc_txsfree == KSE_TXQUEUELEN) 1113 ifp->if_timer = 0; 1114 } 1115 1116 static void 1117 lnkchg(struct kse_softc *sc) 1118 { 1119 struct ifmediareq ifmr; 1120 1121 #if 0 /* rambling link status */ 1122 printf("%s: link %s\n", sc->sc_dev.dv_xname, 1123 (CSR_READ_2(sc, P1SR) & (1U << 5)) ? "up" : "down"); 1124 #endif 1125 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1126 } 1127 1128 static int 1129 ifmedia_upd(struct ifnet *ifp) 1130 { 1131 struct kse_softc *sc = ifp->if_softc; 1132 struct ifmedia *ifm = &sc->sc_media; 1133 unsigned ctl; 1134 1135 ctl = 0; 1136 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1137 ctl |= (1U << 13); /* restart AN */ 1138 ctl |= (1U << 7); /* enable AN */ 1139 ctl |= (1U << 4); /* advertise flow control pause */ 1140 ctl |= (1U << 3) | (1U << 2) | (1U << 1) | (1U << 0); 1141 } 1142 else { 1143 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) 1144 ctl |= (1U << 6); 1145 if (ifm->ifm_media & IFM_FDX) 1146 ctl |= (1U << 5); 1147 } 1148 CSR_WRITE_2(sc, P1CR4, ctl); 1149 1150 sc->sc_media_active = IFM_NONE; 1151 sc->sc_media_status = IFM_AVALID; 1152 1153 return 0; 1154 } 1155 1156 static void 1157 ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1158 { 1159 struct kse_softc *sc = ifp->if_softc; 1160 struct ifmedia *ifm = &sc->sc_media; 1161 unsigned ctl, sts, result; 1162 1163 ifmr->ifm_status = IFM_AVALID; 1164 ifmr->ifm_active = IFM_ETHER; 1165 1166 ctl = CSR_READ_2(sc, P1CR4); 1167 sts = CSR_READ_2(sc, P1SR); 1168 if ((sts & (1U << 5)) == 0) { 1169 ifmr->ifm_active |= IFM_NONE; 1170 goto out; /* link is down */ 1171 } 1172 ifmr->ifm_status |= IFM_ACTIVE; 1173 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1174 if ((sts & (1U << 6)) == 0) { 1175 ifmr->ifm_active |= IFM_NONE; 1176 goto out; /* negotiation in progress */ 1177 } 1178 result = ctl & sts & 017; 1179 if (result & (1U << 3)) 1180 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1181 else if (result & (1U << 2)) 1182 ifmr->ifm_active |= IFM_100_TX; 1183 else if (result & (1U << 1)) 1184 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1185 else if (result & (1U << 0)) 1186 ifmr->ifm_active |= IFM_10_T; 1187 else 1188 ifmr->ifm_active |= IFM_NONE; 1189 if (ctl & (1U << 4)) 1190 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1191 if (sts & (1U << 4)) 1192 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1193 } 1194 else { 1195 ifmr->ifm_active |= (sts & (1U << 10)) ? IFM_100_TX : IFM_10_T; 1196 if (sts & (1U << 9)) 1197 ifmr->ifm_active |= IFM_FDX; 1198 if (sts & (1U << 12)) 1199 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1200 if (sts & (1U << 11)) 1201 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1202 } 1203 1204 out: 1205 sc->sc_media_status = ifmr->ifm_status; 1206 sc->sc_media_active = ifmr->ifm_active; 1207 } 1208 1209 static void 1210 phy_tick(void *arg) 1211 { 1212 struct kse_softc *sc = arg; 1213 struct ifmediareq ifmr; 1214 int s; 1215 1216 s = splnet(); 1217 ifmedia_sts(&sc->sc_ethercom.ec_if, &ifmr); 1218 splx(s); 1219 1220 callout_reset(&sc->sc_callout, hz, phy_tick, sc); 1221 } 1222