1 /* $NetBSD: sbmac.c,v 1.42 2012/07/22 14:32:52 matt Exp $ */ 2 3 /* 4 * Copyright 2000, 2001, 2004 5 * Broadcom Corporation. All rights reserved. 6 * 7 * This software is furnished under license and may be used and copied only 8 * in accordance with the following terms and conditions. Subject to these 9 * conditions, you may download, copy, install, use, modify and distribute 10 * modified or unmodified copies of this software in source and/or binary 11 * form. No title or ownership is transferred hereby. 12 * 13 * 1) Any source code used, modified or distributed must reproduce and 14 * retain this copyright notice and list of conditions as they appear in 15 * the source file. 16 * 17 * 2) No right is granted to use any trade name, trademark, or logo of 18 * Broadcom Corporation. The "Broadcom Corporation" name may not be 19 * used to endorse or promote products derived from this software 20 * without the prior written permission of Broadcom Corporation. 21 * 22 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED 23 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR 25 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE 26 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE 27 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 32 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: sbmac.c,v 1.42 2012/07/22 14:32:52 matt Exp $"); 37 38 #include "opt_inet.h" 39 #include "opt_ns.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/sockio.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/queue.h> 49 #include <sys/device.h> 50 51 #include <net/if.h> 52 #include <net/if_arp.h> 53 #include <net/if_ether.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 57 #include <net/bpf.h> 58 59 #ifdef INET 60 #include <netinet/in.h> 61 #include <netinet/if_inarp.h> 62 #endif 63 64 #include <mips/locore.h> 65 66 #include "sbobiovar.h" 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 #include <dev/mii/mii_bitbang.h> 71 72 #include <mips/sibyte/include/sb1250_defs.h> 73 #include <mips/sibyte/include/sb1250_regs.h> 74 #include <mips/sibyte/include/sb1250_mac.h> 75 #include <mips/sibyte/include/sb1250_dma.h> 76 #include <mips/sibyte/include/sb1250_scd.h> 77 78 /* Simple types */ 79 80 typedef u_long sbmac_port_t; 81 typedef uint64_t sbmac_physaddr_t; 82 typedef uint64_t sbmac_enetaddr_t; 83 84 typedef enum { sbmac_speed_auto, sbmac_speed_10, 85 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t; 86 87 typedef enum { sbmac_duplex_auto, sbmac_duplex_half, 88 sbmac_duplex_full } sbmac_duplex_t; 89 90 typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame, 91 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t; 92 93 typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, 94 sbmac_state_broken } sbmac_state_t; 95 96 97 /* Macros */ 98 99 #define SBMAC_EVENT_COUNTERS /* Include counters for various events */ 100 101 #define SBDMA_NEXTBUF(d, f) ((f + 1) & (d)->sbdma_dscr_mask) 102 103 #define CACHELINESIZE 32 104 #define NUMCACHEBLKS(x) (((x)+CACHELINESIZE-1)/CACHELINESIZE) 105 #define KMALLOC(x) malloc((x), M_DEVBUF, M_DONTWAIT) 106 #define KVTOPHYS(x) kvtophys((vaddr_t)(x)) 107 108 #ifdef SBMACDEBUG 109 #define dprintf(x) printf x 110 #else 111 #define dprintf(x) 112 #endif 113 114 #define SBMAC_READCSR(t) mips3_ld((volatile uint64_t *) (t)) 115 #define SBMAC_WRITECSR(t, v) mips3_sd((volatile uint64_t *) (t), (v)) 116 117 #define PKSEG1(x) ((sbmac_port_t) MIPS_PHYS_TO_KSEG1(x)) 118 119 /* These are limited to fit within one virtual page, and must be 2**N. */ 120 #define SBMAC_MAX_TXDESCR 256 /* should be 1024 */ 121 #define SBMAC_MAX_RXDESCR 256 /* should be 512 */ 122 123 #define ETHER_ALIGN 2 124 125 /* DMA Descriptor structure */ 126 127 typedef struct sbdmadscr_s { 128 uint64_t dscr_a; 129 uint64_t dscr_b; 130 } sbdmadscr_t; 131 132 133 /* DMA Controller structure */ 134 135 typedef struct sbmacdma_s { 136 137 /* 138 * This stuff is used to identify the channel and the registers 139 * associated with it. 140 */ 141 142 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ 143 int sbdma_channel; /* channel number */ 144 int sbdma_txdir; /* direction (1=transmit) */ 145 int sbdma_maxdescr; /* total # of descriptors in ring */ 146 sbmac_port_t sbdma_config0; /* DMA config register 0 */ 147 sbmac_port_t sbdma_config1; /* DMA config register 1 */ 148 sbmac_port_t sbdma_dscrbase; /* Descriptor base address */ 149 sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */ 150 sbmac_port_t sbdma_curdscr; /* current descriptor address */ 151 152 /* 153 * This stuff is for maintenance of the ring 154 */ 155 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */ 156 struct mbuf **sbdma_ctxtable; /* context table, one per descr */ 157 unsigned int sbdma_dscr_mask; /* sbdma_maxdescr - 1 */ 158 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */ 159 unsigned int sbdma_add_index; /* next dscr for sw to add */ 160 unsigned int sbdma_rem_index; /* next dscr for sw to remove */ 161 } sbmacdma_t; 162 163 164 /* Ethernet softc structure */ 165 166 struct sbmac_softc { 167 168 /* 169 * NetBSD-specific things 170 */ 171 struct ethercom sc_ethercom; /* Ethernet common part */ 172 struct mii_data sc_mii; 173 struct callout sc_tick_ch; 174 175 device_t sc_dev; /* device */ 176 int sbm_if_flags; 177 void *sbm_intrhand; 178 179 /* 180 * Controller-specific things 181 */ 182 183 sbmac_port_t sbm_base; /* MAC's base address */ 184 sbmac_state_t sbm_state; /* current state */ 185 186 sbmac_port_t sbm_macenable; /* MAC Enable Register */ 187 sbmac_port_t sbm_maccfg; /* MAC Configuration Register */ 188 sbmac_port_t sbm_fifocfg; /* FIFO configuration register */ 189 sbmac_port_t sbm_framecfg; /* Frame configuration register */ 190 sbmac_port_t sbm_rxfilter; /* receive filter register */ 191 sbmac_port_t sbm_isr; /* Interrupt status register */ 192 sbmac_port_t sbm_imr; /* Interrupt mask register */ 193 194 sbmac_speed_t sbm_speed; /* current speed */ 195 sbmac_duplex_t sbm_duplex; /* current duplex */ 196 sbmac_fc_t sbm_fc; /* current flow control setting */ 197 int sbm_rxflags; /* received packet flags */ 198 199 u_char sbm_hwaddr[ETHER_ADDR_LEN]; 200 201 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */ 202 sbmacdma_t sbm_rxdma; 203 204 int sbm_pass3_dma; /* chip has pass3 SOC DMA features */ 205 206 #ifdef SBMAC_EVENT_COUNTERS 207 struct evcnt sbm_ev_rxintr; /* Rx interrupts */ 208 struct evcnt sbm_ev_txintr; /* Tx interrupts */ 209 struct evcnt sbm_ev_txdrop; /* Tx dropped due to no mbuf alloc failed */ 210 struct evcnt sbm_ev_txstall; /* Tx stalled due to no descriptors free */ 211 212 struct evcnt sbm_ev_txsplit; /* pass3 Tx split mbuf */ 213 struct evcnt sbm_ev_txkeep; /* pass3 Tx didn't split mbuf */ 214 #endif 215 }; 216 217 218 #ifdef SBMAC_EVENT_COUNTERS 219 #define SBMAC_EVCNT_INCR(ev) (ev).ev_count++ 220 #else 221 #define SBMAC_EVCNT_INCR(ev) do { /* nothing */ } while (0) 222 #endif 223 224 /* Externs */ 225 226 extern paddr_t kvtophys(vaddr_t); 227 228 /* Prototypes */ 229 230 static void sbdma_initctx(sbmacdma_t *, struct sbmac_softc *, int, int, int); 231 static void sbdma_channel_start(sbmacdma_t *); 232 static int sbdma_add_rcvbuffer(sbmacdma_t *, struct mbuf *); 233 static int sbdma_add_txbuffer(sbmacdma_t *, struct mbuf *); 234 static void sbdma_emptyring(sbmacdma_t *); 235 static void sbdma_fillring(sbmacdma_t *); 236 static void sbdma_rx_process(struct sbmac_softc *, sbmacdma_t *); 237 static void sbdma_tx_process(struct sbmac_softc *, sbmacdma_t *); 238 static void sbmac_initctx(struct sbmac_softc *); 239 static void sbmac_channel_start(struct sbmac_softc *); 240 static void sbmac_channel_stop(struct sbmac_softc *); 241 static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *, 242 sbmac_state_t); 243 static void sbmac_promiscuous_mode(struct sbmac_softc *, bool); 244 static void sbmac_init_and_start(struct sbmac_softc *); 245 static uint64_t sbmac_addr2reg(u_char *); 246 static void sbmac_intr(void *, uint32_t, vaddr_t); 247 static void sbmac_start(struct ifnet *); 248 static void sbmac_setmulti(struct sbmac_softc *); 249 static int sbmac_ether_ioctl(struct ifnet *, u_long, void *); 250 static int sbmac_ioctl(struct ifnet *, u_long, void *); 251 static void sbmac_watchdog(struct ifnet *); 252 static int sbmac_match(device_t, cfdata_t, void *); 253 static void sbmac_attach(device_t, device_t, void *); 254 static bool sbmac_set_speed(struct sbmac_softc *, sbmac_speed_t); 255 static bool sbmac_set_duplex(struct sbmac_softc *, sbmac_duplex_t, sbmac_fc_t); 256 static void sbmac_tick(void *); 257 258 259 /* Globals */ 260 261 CFATTACH_DECL_NEW(sbmac, sizeof(struct sbmac_softc), 262 sbmac_match, sbmac_attach, NULL, NULL); 263 264 static uint32_t sbmac_mii_bitbang_read(device_t self); 265 static void sbmac_mii_bitbang_write(device_t self, uint32_t val); 266 267 static const struct mii_bitbang_ops sbmac_mii_bitbang_ops = { 268 sbmac_mii_bitbang_read, 269 sbmac_mii_bitbang_write, 270 { 271 (uint32_t)M_MAC_MDIO_OUT, /* MII_BIT_MDO */ 272 (uint32_t)M_MAC_MDIO_IN, /* MII_BIT_MDI */ 273 (uint32_t)M_MAC_MDC, /* MII_BIT_MDC */ 274 0, /* MII_BIT_DIR_HOST_PHY */ 275 (uint32_t)M_MAC_MDIO_DIR /* MII_BIT_DIR_PHY_HOST */ 276 } 277 }; 278 279 static uint32_t 280 sbmac_mii_bitbang_read(device_t self) 281 { 282 struct sbmac_softc *sc = device_private(self); 283 sbmac_port_t reg; 284 285 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO); 286 return (uint32_t) SBMAC_READCSR(reg); 287 } 288 289 static void 290 sbmac_mii_bitbang_write(device_t self, uint32_t val) 291 { 292 struct sbmac_softc *sc = device_private(self); 293 sbmac_port_t reg; 294 295 reg = PKSEG1(sc->sbm_base + R_MAC_MDIO); 296 297 SBMAC_WRITECSR(reg, (val & 298 (M_MAC_MDC|M_MAC_MDIO_DIR|M_MAC_MDIO_OUT|M_MAC_MDIO_IN))); 299 } 300 301 /* 302 * Read an PHY register through the MII. 303 */ 304 static int 305 sbmac_mii_readreg(device_t self, int phy, int reg) 306 { 307 308 return (mii_bitbang_readreg(self, &sbmac_mii_bitbang_ops, phy, reg)); 309 } 310 311 /* 312 * Write to a PHY register through the MII. 313 */ 314 static void 315 sbmac_mii_writereg(device_t self, int phy, int reg, int val) 316 { 317 318 mii_bitbang_writereg(self, &sbmac_mii_bitbang_ops, phy, reg, val); 319 } 320 321 static void 322 sbmac_mii_statchg(struct ifnet *ifp) 323 { 324 struct sbmac_softc *sc = ifp->if_softc; 325 sbmac_state_t oldstate; 326 327 /* Stop the MAC in preparation for changing all of the parameters. */ 328 oldstate = sbmac_set_channel_state(sc, sbmac_state_off); 329 330 switch (sc->sc_ethercom.ec_if.if_baudrate) { 331 default: /* if autonegotiation fails, assume 10Mbit */ 332 case IF_Mbps(10): 333 sbmac_set_speed(sc, sbmac_speed_10); 334 break; 335 336 case IF_Mbps(100): 337 sbmac_set_speed(sc, sbmac_speed_100); 338 break; 339 340 case IF_Mbps(1000): 341 sbmac_set_speed(sc, sbmac_speed_1000); 342 break; 343 } 344 345 if (sc->sc_mii.mii_media_active & IFM_FDX) { 346 /* Configure for full-duplex */ 347 /* XXX: is flow control right for 10, 100? */ 348 sbmac_set_duplex(sc, sbmac_duplex_full, sbmac_fc_frame); 349 } else { 350 /* Configure for half-duplex */ 351 /* XXX: is flow control right? */ 352 sbmac_set_duplex(sc, sbmac_duplex_half, sbmac_fc_disabled); 353 } 354 355 /* And put it back into its former state. */ 356 sbmac_set_channel_state(sc, oldstate); 357 } 358 359 /* 360 * SBDMA_INITCTX(d, sc, chan, txrx, maxdescr) 361 * 362 * Initialize a DMA channel context. Since there are potentially 363 * eight DMA channels per MAC, it's nice to do this in a standard 364 * way. 365 * 366 * Input parameters: 367 * d - sbmacdma_t structure (DMA channel context) 368 * sc - sbmac_softc structure (pointer to a MAC) 369 * chan - channel number (0..1 right now) 370 * txrx - Identifies DMA_TX or DMA_RX for channel direction 371 * maxdescr - number of descriptors 372 * 373 * Return value: 374 * nothing 375 */ 376 377 static void 378 sbdma_initctx(sbmacdma_t *d, struct sbmac_softc *sc, int chan, int txrx, 379 int maxdescr) 380 { 381 /* 382 * Save away interesting stuff in the structure 383 */ 384 385 d->sbdma_eth = sc; 386 d->sbdma_channel = chan; 387 d->sbdma_txdir = txrx; 388 389 /* 390 * initialize register pointers 391 */ 392 393 d->sbdma_config0 = PKSEG1(sc->sbm_base + 394 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG0)); 395 d->sbdma_config1 = PKSEG1(sc->sbm_base + 396 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CONFIG1)); 397 d->sbdma_dscrbase = PKSEG1(sc->sbm_base + 398 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_BASE)); 399 d->sbdma_dscrcnt = PKSEG1(sc->sbm_base + 400 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_DSCR_CNT)); 401 d->sbdma_curdscr = PKSEG1(sc->sbm_base + 402 R_MAC_DMA_REGISTER(txrx, chan, R_MAC_DMA_CUR_DSCRADDR)); 403 404 /* 405 * Allocate memory for the ring 406 */ 407 408 d->sbdma_maxdescr = maxdescr; 409 d->sbdma_dscr_mask = d->sbdma_maxdescr - 1; 410 411 d->sbdma_dscrtable = (sbdmadscr_t *) 412 KMALLOC(d->sbdma_maxdescr * sizeof(sbdmadscr_t)); 413 414 memset(d->sbdma_dscrtable, 0, d->sbdma_maxdescr*sizeof(sbdmadscr_t)); 415 416 d->sbdma_dscrtable_phys = KVTOPHYS(d->sbdma_dscrtable); 417 418 /* 419 * And context table 420 */ 421 422 d->sbdma_ctxtable = (struct mbuf **) 423 KMALLOC(d->sbdma_maxdescr*sizeof(struct mbuf *)); 424 425 memset(d->sbdma_ctxtable, 0, d->sbdma_maxdescr*sizeof(struct mbuf *)); 426 } 427 428 /* 429 * SBDMA_CHANNEL_START(d) 430 * 431 * Initialize the hardware registers for a DMA channel. 432 * 433 * Input parameters: 434 * d - DMA channel to init (context must be previously init'd 435 * 436 * Return value: 437 * nothing 438 */ 439 440 static void 441 sbdma_channel_start(sbmacdma_t *d) 442 { 443 /* 444 * Turn on the DMA channel 445 */ 446 447 SBMAC_WRITECSR(d->sbdma_config1, 0); 448 449 SBMAC_WRITECSR(d->sbdma_dscrbase, d->sbdma_dscrtable_phys); 450 451 SBMAC_WRITECSR(d->sbdma_config0, V_DMA_RINGSZ(d->sbdma_maxdescr) | 0); 452 453 /* 454 * Initialize ring pointers 455 */ 456 457 d->sbdma_add_index = 0; 458 d->sbdma_rem_index = 0; 459 } 460 461 /* 462 * SBDMA_ADD_RCVBUFFER(d, m) 463 * 464 * Add a buffer to the specified DMA channel. For receive channels, 465 * this queues a buffer for inbound packets. 466 * 467 * Input parameters: 468 * d - DMA channel descriptor 469 * m - mbuf to add, or NULL if we should allocate one. 470 * 471 * Return value: 472 * 0 if buffer could not be added (ring is full) 473 * 1 if buffer added successfully 474 */ 475 476 static int 477 sbdma_add_rcvbuffer(sbmacdma_t *d, struct mbuf *m) 478 { 479 unsigned int dsc, nextdsc; 480 struct mbuf *m_new = NULL; 481 482 /* get pointer to our current place in the ring */ 483 484 dsc = d->sbdma_add_index; 485 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index); 486 487 /* 488 * figure out if the ring is full - if the next descriptor 489 * is the same as the one that we're going to remove from 490 * the ring, the ring is full 491 */ 492 493 if (nextdsc == d->sbdma_rem_index) 494 return ENOSPC; 495 496 /* 497 * Allocate an mbuf if we don't already have one. 498 * If we do have an mbuf, reset it so that it's empty. 499 */ 500 501 if (m == NULL) { 502 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 503 if (m_new == NULL) { 504 aprint_error_dev(d->sbdma_eth->sc_dev, 505 "mbuf allocation failed\n"); 506 return ENOBUFS; 507 } 508 509 MCLGET(m_new, M_DONTWAIT); 510 if (!(m_new->m_flags & M_EXT)) { 511 aprint_error_dev(d->sbdma_eth->sc_dev, 512 "mbuf cluster allocation failed\n"); 513 m_freem(m_new); 514 return ENOBUFS; 515 } 516 517 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES; 518 m_adj(m_new, ETHER_ALIGN); 519 } else { 520 m_new = m; 521 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 522 m_new->m_data = m_new->m_ext.ext_buf; 523 m_adj(m_new, ETHER_ALIGN); 524 } 525 526 /* 527 * fill in the descriptor 528 */ 529 530 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new, void *)) | 531 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(ETHER_ALIGN + m_new->m_len)) | 532 M_DMA_DSCRA_INTERRUPT; 533 534 /* receiving: no options */ 535 d->sbdma_dscrtable[dsc].dscr_b = 0; 536 537 /* 538 * fill in the context 539 */ 540 541 d->sbdma_ctxtable[dsc] = m_new; 542 543 /* 544 * point at next packet 545 */ 546 547 d->sbdma_add_index = nextdsc; 548 549 /* 550 * Give the buffer to the DMA engine. 551 */ 552 553 SBMAC_WRITECSR(d->sbdma_dscrcnt, 1); 554 555 return 0; /* we did it */ 556 } 557 558 /* 559 * SBDMA_ADD_TXBUFFER(d, m) 560 * 561 * Add a transmit buffer to the specified DMA channel, causing a 562 * transmit to start. 563 * 564 * Input parameters: 565 * d - DMA channel descriptor 566 * m - mbuf to add 567 * 568 * Return value: 569 * 0 transmit queued successfully 570 * otherwise error code 571 */ 572 573 static int 574 sbdma_add_txbuffer(sbmacdma_t *d, struct mbuf *m) 575 { 576 unsigned int dsc, nextdsc, prevdsc, origdesc; 577 int length; 578 int num_mbufs = 0; 579 struct sbmac_softc *sc = d->sbdma_eth; 580 581 /* get pointer to our current place in the ring */ 582 583 dsc = d->sbdma_add_index; 584 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index); 585 586 /* 587 * figure out if the ring is full - if the next descriptor 588 * is the same as the one that we're going to remove from 589 * the ring, the ring is full 590 */ 591 592 if (nextdsc == d->sbdma_rem_index) { 593 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall); 594 return ENOSPC; 595 } 596 597 /* 598 * PASS3 parts do not have buffer alignment restriction. 599 * No need to copy/coalesce to new mbuf. Also has different 600 * descriptor format 601 */ 602 if (sc->sbm_pass3_dma) { 603 struct mbuf *m_temp = NULL; 604 605 /* 606 * Loop thru this mbuf record. 607 * The head mbuf will have SOP set. 608 */ 609 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m,void *)) | 610 M_DMA_ETHTX_SOP; 611 612 /* 613 * transmitting: set outbound options,buffer A size(+ low 5 614 * bits of start addr),and packet length. 615 */ 616 d->sbdma_dscrtable[dsc].dscr_b = 617 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | 618 V_DMA_DSCRB_A_SIZE((m->m_len + 619 (mtod(m,uintptr_t) & 0x0000001F))) | 620 V_DMA_DSCRB_PKT_SIZE_MSB((m->m_pkthdr.len & 0xc000) >> 14) | 621 V_DMA_DSCRB_PKT_SIZE(m->m_pkthdr.len & 0x3fff); 622 623 d->sbdma_add_index = nextdsc; 624 origdesc = prevdsc = dsc; 625 dsc = d->sbdma_add_index; 626 num_mbufs++; 627 628 /* Start with first non-head mbuf */ 629 for(m_temp = m->m_next; m_temp != 0; m_temp = m_temp->m_next) { 630 int len, next_len; 631 uint64_t addr; 632 633 if (m_temp->m_len == 0) 634 continue; /* Skip 0-length mbufs */ 635 636 len = m_temp->m_len; 637 addr = KVTOPHYS(mtod(m_temp, void *)); 638 639 /* 640 * Check to see if the mbuf spans a page boundary. If 641 * it does, and the physical pages behind the virtual 642 * pages are not contiguous, split it so that each 643 * virtual page uses it's own Tx descriptor. 644 */ 645 if (trunc_page(addr) != trunc_page(addr + len - 1)) { 646 next_len = (addr + len) - trunc_page(addr + len); 647 648 len -= next_len; 649 650 if (addr + len == 651 KVTOPHYS(mtod(m_temp, char *) + len)) { 652 SBMAC_EVCNT_INCR(sc->sbm_ev_txkeep); 653 len += next_len; 654 next_len = 0; 655 } else { 656 SBMAC_EVCNT_INCR(sc->sbm_ev_txsplit); 657 } 658 } else { 659 next_len = 0; 660 } 661 662 again: 663 /* 664 * fill in the descriptor 665 */ 666 d->sbdma_dscrtable[dsc].dscr_a = addr; 667 668 /* 669 * transmitting: set outbound options,buffer A 670 * size(+ low 5 bits of start addr) 671 */ 672 d->sbdma_dscrtable[dsc].dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_NOTSOP) | 673 V_DMA_DSCRB_A_SIZE((len + (addr & 0x0000001F))); 674 675 d->sbdma_ctxtable[dsc] = NULL; 676 677 /* 678 * point at next descriptor 679 */ 680 nextdsc = SBDMA_NEXTBUF(d, d->sbdma_add_index); 681 if (nextdsc == d->sbdma_rem_index) { 682 d->sbdma_add_index = origdesc; 683 SBMAC_EVCNT_INCR(sc->sbm_ev_txstall); 684 return ENOSPC; 685 } 686 d->sbdma_add_index = nextdsc; 687 688 prevdsc = dsc; 689 dsc = d->sbdma_add_index; 690 num_mbufs++; 691 692 if (next_len != 0) { 693 addr = KVTOPHYS(mtod(m_temp, char *) + len); 694 len = next_len; 695 696 next_len = 0; 697 goto again; 698 } 699 700 } 701 /* Set head mbuf to last context index */ 702 d->sbdma_ctxtable[prevdsc] = m; 703 704 /* Interrupt on last dscr of packet. */ 705 d->sbdma_dscrtable[prevdsc].dscr_a |= M_DMA_DSCRA_INTERRUPT; 706 } else { 707 struct mbuf *m_new = NULL; 708 /* 709 * [BEGIN XXX] 710 * XXX Copy/coalesce the mbufs into a single mbuf cluster (we 711 * assume it will fit). This is a temporary hack to get us 712 * going. 713 */ 714 715 MGETHDR(m_new,M_DONTWAIT,MT_DATA); 716 if (m_new == NULL) { 717 aprint_error_dev(d->sbdma_eth->sc_dev, 718 "mbuf allocation failed\n"); 719 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop); 720 return ENOBUFS; 721 } 722 723 MCLGET(m_new,M_DONTWAIT); 724 if (!(m_new->m_flags & M_EXT)) { 725 aprint_error_dev(d->sbdma_eth->sc_dev, 726 "mbuf cluster allocation failed\n"); 727 m_freem(m_new); 728 SBMAC_EVCNT_INCR(sc->sbm_ev_txdrop); 729 return ENOBUFS; 730 } 731 732 m_new->m_len = m_new->m_pkthdr.len= MCLBYTES; 733 /*m_adj(m_new,ETHER_ALIGN);*/ 734 735 /* 736 * XXX Don't forget to include the offset portion in the 737 * XXX cache block calculation when this code is rewritten! 738 */ 739 740 /* 741 * Copy data 742 */ 743 744 m_copydata(m,0,m->m_pkthdr.len,mtod(m_new,void *)); 745 m_new->m_len = m_new->m_pkthdr.len = m->m_pkthdr.len; 746 747 /* Free old mbuf 'm', actual mbuf is now 'm_new' */ 748 749 // XXX: CALLERS WILL FREE, they might have to bpf_mtap() if this 750 // XXX: function succeeds. 751 // m_freem(m); 752 length = m_new->m_len; 753 754 /* [END XXX] */ 755 /* 756 * fill in the descriptor 757 */ 758 759 d->sbdma_dscrtable[dsc].dscr_a = KVTOPHYS(mtod(m_new,void *)) | 760 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(m_new->m_len)) | 761 M_DMA_DSCRA_INTERRUPT | 762 M_DMA_ETHTX_SOP; 763 764 /* transmitting: set outbound options and length */ 765 d->sbdma_dscrtable[dsc].dscr_b = 766 V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | 767 V_DMA_DSCRB_PKT_SIZE(length); 768 769 num_mbufs++; 770 771 /* 772 * fill in the context 773 */ 774 775 d->sbdma_ctxtable[dsc] = m_new; 776 777 /* 778 * point at next packet 779 */ 780 d->sbdma_add_index = nextdsc; 781 } 782 783 /* 784 * Give the buffer to the DMA engine. 785 */ 786 787 SBMAC_WRITECSR(d->sbdma_dscrcnt, num_mbufs); 788 789 return 0; /* we did it */ 790 } 791 792 /* 793 * SBDMA_EMPTYRING(d) 794 * 795 * Free all allocated mbufs on the specified DMA channel; 796 * 797 * Input parameters: 798 * d - DMA channel 799 * 800 * Return value: 801 * nothing 802 */ 803 804 static void 805 sbdma_emptyring(sbmacdma_t *d) 806 { 807 int idx; 808 struct mbuf *m; 809 810 for (idx = 0; idx < d->sbdma_maxdescr; idx++) { 811 m = d->sbdma_ctxtable[idx]; 812 if (m) { 813 m_freem(m); 814 d->sbdma_ctxtable[idx] = NULL; 815 } 816 } 817 } 818 819 /* 820 * SBDMA_FILLRING(d) 821 * 822 * Fill the specified DMA channel (must be receive channel) 823 * with mbufs 824 * 825 * Input parameters: 826 * d - DMA channel 827 * 828 * Return value: 829 * nothing 830 */ 831 832 static void 833 sbdma_fillring(sbmacdma_t *d) 834 { 835 int idx; 836 837 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) 838 if (sbdma_add_rcvbuffer(d, NULL) != 0) 839 break; 840 } 841 842 /* 843 * SBDMA_RX_PROCESS(sc, d) 844 * 845 * Process "completed" receive buffers on the specified DMA channel. 846 * Note that this isn't really ideal for priority channels, since 847 * it processes all of the packets on a given channel before 848 * returning. 849 * 850 * Input parameters: 851 * sc - softc structure 852 * d - DMA channel context 853 * 854 * Return value: 855 * nothing 856 */ 857 858 static void 859 sbdma_rx_process(struct sbmac_softc *sc, sbmacdma_t *d) 860 { 861 int curidx; 862 int hwidx; 863 sbdmadscr_t *dscp; 864 struct mbuf *m; 865 int len; 866 867 struct ifnet *ifp = &(sc->sc_ethercom.ec_if); 868 869 for (;;) { 870 /* 871 * figure out where we are (as an index) and where 872 * the hardware is (also as an index) 873 * 874 * This could be done faster if (for example) the 875 * descriptor table was page-aligned and contiguous in 876 * both virtual and physical memory -- you could then 877 * just compare the low-order bits of the virtual address 878 * (sbdma_rem_index) and the physical address 879 * (sbdma_curdscr CSR). 880 */ 881 882 curidx = d->sbdma_rem_index; 883 hwidx = (int) 884 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - 885 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); 886 887 /* 888 * If they're the same, that means we've processed all 889 * of the descriptors up to (but not including) the one that 890 * the hardware is working on right now. 891 */ 892 893 if (curidx == hwidx) 894 break; 895 896 /* 897 * Otherwise, get the packet's mbuf ptr back 898 */ 899 900 dscp = &(d->sbdma_dscrtable[curidx]); 901 m = d->sbdma_ctxtable[curidx]; 902 d->sbdma_ctxtable[curidx] = NULL; 903 904 len = (int)G_DMA_DSCRB_PKT_SIZE(dscp->dscr_b) - 4; 905 906 /* 907 * Check packet status. If good, process it. 908 * If not, silently drop it and put it back on the 909 * receive ring. 910 */ 911 912 if (! (dscp->dscr_a & M_DMA_ETHRX_BAD)) { 913 914 /* 915 * Set length into the packet 916 * XXX do we remove the CRC here? 917 */ 918 m->m_pkthdr.len = m->m_len = len; 919 920 ifp->if_ipackets++; 921 m->m_pkthdr.rcvif = ifp; 922 923 924 /* 925 * Add a new buffer to replace the old one. 926 */ 927 sbdma_add_rcvbuffer(d, NULL); 928 929 /* 930 * Handle BPF listeners. Let the BPF user see the 931 * packet, but don't pass it up to the ether_input() 932 * layer unless it's a broadcast packet, multicast 933 * packet, matches our ethernet address or the 934 * interface is in promiscuous mode. 935 */ 936 937 bpf_mtap(ifp, m); 938 /* 939 * Pass the buffer to the kernel 940 */ 941 (*ifp->if_input)(ifp, m); 942 } else { 943 /* 944 * Packet was mangled somehow. Just drop it and 945 * put it back on the receive ring. 946 */ 947 sbdma_add_rcvbuffer(d, m); 948 } 949 950 /* 951 * .. and advance to the next buffer. 952 */ 953 954 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index); 955 } 956 } 957 958 /* 959 * SBDMA_TX_PROCESS(sc, d) 960 * 961 * Process "completed" transmit buffers on the specified DMA channel. 962 * This is normally called within the interrupt service routine. 963 * Note that this isn't really ideal for priority channels, since 964 * it processes all of the packets on a given channel before 965 * returning. 966 * 967 * Input parameters: 968 * sc - softc structure 969 * d - DMA channel context 970 * 971 * Return value: 972 * nothing 973 */ 974 975 static void 976 sbdma_tx_process(struct sbmac_softc *sc, sbmacdma_t *d) 977 { 978 int curidx; 979 int hwidx; 980 struct mbuf *m; 981 982 struct ifnet *ifp = &(sc->sc_ethercom.ec_if); 983 984 for (;;) { 985 /* 986 * figure out where we are (as an index) and where 987 * the hardware is (also as an index) 988 * 989 * This could be done faster if (for example) the 990 * descriptor table was page-aligned and contiguous in 991 * both virtual and physical memory -- you could then 992 * just compare the low-order bits of the virtual address 993 * (sbdma_rem_index) and the physical address 994 * (sbdma_curdscr CSR). 995 */ 996 997 curidx = d->sbdma_rem_index; 998 hwidx = (int) 999 (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - 1000 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); 1001 1002 /* 1003 * If they're the same, that means we've processed all 1004 * of the descriptors up to (but not including) the one that 1005 * the hardware is working on right now. 1006 */ 1007 1008 if (curidx == hwidx) 1009 break; 1010 1011 /* 1012 * Otherwise, get the packet's mbuf ptr back 1013 */ 1014 1015 m = d->sbdma_ctxtable[curidx]; 1016 d->sbdma_ctxtable[curidx] = NULL; 1017 1018 /* 1019 * for transmits we just free buffers and count packets. 1020 */ 1021 ifp->if_opackets++; 1022 m_freem(m); 1023 1024 /* 1025 * .. and advance to the next buffer. 1026 */ 1027 1028 d->sbdma_rem_index = SBDMA_NEXTBUF(d, d->sbdma_rem_index); 1029 } 1030 1031 /* 1032 * Decide what to set the IFF_OACTIVE bit in the interface to. 1033 * It's supposed to reflect if the interface is actively 1034 * transmitting, but that's really hard to do quickly. 1035 */ 1036 1037 ifp->if_flags &= ~IFF_OACTIVE; 1038 } 1039 1040 /* 1041 * SBMAC_INITCTX(s) 1042 * 1043 * Initialize an Ethernet context structure - this is called 1044 * once per MAC on the 1250. Memory is allocated here, so don't 1045 * call it again from inside the ioctl routines that bring the 1046 * interface up/down 1047 * 1048 * Input parameters: 1049 * sc - sbmac context structure 1050 * 1051 * Return value: 1052 * 0 1053 */ 1054 1055 static void 1056 sbmac_initctx(struct sbmac_softc *sc) 1057 { 1058 uint64_t sysrev; 1059 1060 /* 1061 * figure out the addresses of some ports 1062 */ 1063 1064 sc->sbm_macenable = PKSEG1(sc->sbm_base + R_MAC_ENABLE); 1065 sc->sbm_maccfg = PKSEG1(sc->sbm_base + R_MAC_CFG); 1066 sc->sbm_fifocfg = PKSEG1(sc->sbm_base + R_MAC_THRSH_CFG); 1067 sc->sbm_framecfg = PKSEG1(sc->sbm_base + R_MAC_FRAMECFG); 1068 sc->sbm_rxfilter = PKSEG1(sc->sbm_base + R_MAC_ADFILTER_CFG); 1069 sc->sbm_isr = PKSEG1(sc->sbm_base + R_MAC_STATUS); 1070 sc->sbm_imr = PKSEG1(sc->sbm_base + R_MAC_INT_MASK); 1071 1072 /* 1073 * Initialize the DMA channels. Right now, only one per MAC is used 1074 * Note: Only do this _once_, as it allocates memory from the kernel! 1075 */ 1076 1077 sbdma_initctx(&(sc->sbm_txdma), sc, 0, DMA_TX, SBMAC_MAX_TXDESCR); 1078 sbdma_initctx(&(sc->sbm_rxdma), sc, 0, DMA_RX, SBMAC_MAX_RXDESCR); 1079 1080 /* 1081 * initial state is OFF 1082 */ 1083 1084 sc->sbm_state = sbmac_state_off; 1085 1086 /* 1087 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC 1088 */ 1089 1090 sc->sbm_speed = sbmac_speed_10; 1091 sc->sbm_duplex = sbmac_duplex_half; 1092 sc->sbm_fc = sbmac_fc_disabled; 1093 1094 /* 1095 * Determine SOC type. 112x has Pass3 SOC features. 1096 */ 1097 sysrev = SBMAC_READCSR( PKSEG1(A_SCD_SYSTEM_REVISION) ); 1098 sc->sbm_pass3_dma = (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1120 || 1099 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125 || 1100 SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1125H || 1101 (SYS_SOC_TYPE(sysrev) == K_SYS_SOC_TYPE_BCM1250 && 1102 G_SYS_REVISION(sysrev) >= K_SYS_REVISION_BCM1250_PASS3)); 1103 #ifdef SBMAC_EVENT_COUNTERS 1104 const char * const xname = device_xname(sc->sc_dev); 1105 evcnt_attach_dynamic(&sc->sbm_ev_rxintr, EVCNT_TYPE_INTR, 1106 NULL, xname, "rxintr"); 1107 evcnt_attach_dynamic(&sc->sbm_ev_txintr, EVCNT_TYPE_INTR, 1108 NULL, xname, "txintr"); 1109 evcnt_attach_dynamic(&sc->sbm_ev_txdrop, EVCNT_TYPE_MISC, 1110 NULL, xname, "txdrop"); 1111 evcnt_attach_dynamic(&sc->sbm_ev_txstall, EVCNT_TYPE_MISC, 1112 NULL, xname, "txstall"); 1113 if (sc->sbm_pass3_dma) { 1114 evcnt_attach_dynamic(&sc->sbm_ev_txsplit, EVCNT_TYPE_MISC, 1115 NULL, xname, "pass3tx-split"); 1116 evcnt_attach_dynamic(&sc->sbm_ev_txkeep, EVCNT_TYPE_MISC, 1117 NULL, xname, "pass3tx-keep"); 1118 } 1119 #endif 1120 } 1121 1122 /* 1123 * SBMAC_CHANNEL_START(s) 1124 * 1125 * Start packet processing on this MAC. 1126 * 1127 * Input parameters: 1128 * sc - sbmac structure 1129 * 1130 * Return value: 1131 * nothing 1132 */ 1133 1134 static void 1135 sbmac_channel_start(struct sbmac_softc *sc) 1136 { 1137 uint64_t reg; 1138 sbmac_port_t port; 1139 uint64_t cfg, fifo, framecfg; 1140 int idx; 1141 uint64_t dma_cfg0, fifo_cfg; 1142 sbmacdma_t *txdma; 1143 1144 /* 1145 * Don't do this if running 1146 */ 1147 1148 if (sc->sbm_state == sbmac_state_on) 1149 return; 1150 1151 /* 1152 * Bring the controller out of reset, but leave it off. 1153 */ 1154 1155 SBMAC_WRITECSR(sc->sbm_macenable, 0); 1156 1157 /* 1158 * Ignore all received packets 1159 */ 1160 1161 SBMAC_WRITECSR(sc->sbm_rxfilter, 0); 1162 1163 /* 1164 * Calculate values for various control registers. 1165 */ 1166 1167 cfg = M_MAC_RETRY_EN | 1168 M_MAC_TX_HOLD_SOP_EN | 1169 V_MAC_TX_PAUSE_CNT_16K | 1170 M_MAC_AP_STAT_EN | 1171 M_MAC_SS_EN | 1172 0; 1173 1174 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ 1175 V_MAC_TX_RD_THRSH(4) | 1176 V_MAC_TX_RL_THRSH(4) | 1177 V_MAC_RX_PL_THRSH(4) | 1178 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ 1179 V_MAC_RX_PL_THRSH(4) | 1180 V_MAC_RX_RL_THRSH(8) | 1181 0; 1182 1183 framecfg = V_MAC_MIN_FRAMESZ_DEFAULT | 1184 V_MAC_MAX_FRAMESZ_DEFAULT | 1185 V_MAC_BACKOFF_SEL(1); 1186 1187 /* 1188 * Clear out the hash address map 1189 */ 1190 1191 port = PKSEG1(sc->sbm_base + R_MAC_HASH_BASE); 1192 for (idx = 0; idx < MAC_HASH_COUNT; idx++) { 1193 SBMAC_WRITECSR(port, 0); 1194 port += sizeof(uint64_t); 1195 } 1196 1197 /* 1198 * Clear out the exact-match table 1199 */ 1200 1201 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE); 1202 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { 1203 SBMAC_WRITECSR(port, 0); 1204 port += sizeof(uint64_t); 1205 } 1206 1207 /* 1208 * Clear out the DMA Channel mapping table registers 1209 */ 1210 1211 port = PKSEG1(sc->sbm_base + R_MAC_CHUP0_BASE); 1212 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { 1213 SBMAC_WRITECSR(port, 0); 1214 port += sizeof(uint64_t); 1215 } 1216 1217 port = PKSEG1(sc->sbm_base + R_MAC_CHLO0_BASE); 1218 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { 1219 SBMAC_WRITECSR(port, 0); 1220 port += sizeof(uint64_t); 1221 } 1222 1223 /* 1224 * Program the hardware address. It goes into the hardware-address 1225 * register as well as the first filter register. 1226 */ 1227 1228 reg = sbmac_addr2reg(sc->sbm_hwaddr); 1229 1230 port = PKSEG1(sc->sbm_base + R_MAC_ADDR_BASE); 1231 SBMAC_WRITECSR(port, reg); 1232 port = PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR); 1233 SBMAC_WRITECSR(port, 0); // pass1 workaround 1234 1235 /* 1236 * Set the receive filter for no packets, and write values 1237 * to the various config registers 1238 */ 1239 1240 SBMAC_WRITECSR(sc->sbm_rxfilter, 0); 1241 SBMAC_WRITECSR(sc->sbm_imr, 0); 1242 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg); 1243 SBMAC_WRITECSR(sc->sbm_fifocfg, fifo); 1244 SBMAC_WRITECSR(sc->sbm_maccfg, cfg); 1245 1246 /* 1247 * Initialize DMA channels (rings should be ok now) 1248 */ 1249 1250 sbdma_channel_start(&(sc->sbm_rxdma)); 1251 sbdma_channel_start(&(sc->sbm_txdma)); 1252 1253 /* 1254 * Configure the speed, duplex, and flow control 1255 */ 1256 1257 sbmac_set_speed(sc, sc->sbm_speed); 1258 sbmac_set_duplex(sc, sc->sbm_duplex, sc->sbm_fc); 1259 1260 /* 1261 * Fill the receive ring 1262 */ 1263 1264 sbdma_fillring(&(sc->sbm_rxdma)); 1265 1266 /* 1267 * Turn on the rest of the bits in the enable register 1268 */ 1269 1270 SBMAC_WRITECSR(sc->sbm_macenable, M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 | 1271 M_MAC_RX_ENABLE | M_MAC_TX_ENABLE); 1272 1273 1274 /* 1275 * Accept any kind of interrupt on TX and RX DMA channel 0 1276 */ 1277 SBMAC_WRITECSR(sc->sbm_imr, 1278 (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | 1279 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)); 1280 1281 /* 1282 * Enable receiving unicasts and broadcasts 1283 */ 1284 1285 SBMAC_WRITECSR(sc->sbm_rxfilter, M_MAC_UCAST_EN | M_MAC_BCAST_EN); 1286 1287 /* 1288 * On chips which support unaligned DMA features, set the descriptor 1289 * ring for transmit channels to use the unaligned buffer format. 1290 */ 1291 txdma = &(sc->sbm_txdma); 1292 1293 if (sc->sbm_pass3_dma) { 1294 dma_cfg0 = SBMAC_READCSR(txdma->sbdma_config0); 1295 dma_cfg0 |= V_DMA_DESC_TYPE(K_DMA_DESC_TYPE_RING_UAL_RMW) | 1296 M_DMA_TBX_EN | M_DMA_TDX_EN; 1297 SBMAC_WRITECSR(txdma->sbdma_config0,dma_cfg0); 1298 1299 fifo_cfg = SBMAC_READCSR(sc->sbm_fifocfg); 1300 fifo_cfg |= V_MAC_TX_WR_THRSH(8) | 1301 V_MAC_TX_RD_THRSH(8) | V_MAC_TX_RL_THRSH(8); 1302 SBMAC_WRITECSR(sc->sbm_fifocfg,fifo_cfg); 1303 } 1304 1305 /* 1306 * we're running now. 1307 */ 1308 1309 sc->sbm_state = sbmac_state_on; 1310 sc->sc_ethercom.ec_if.if_flags |= IFF_RUNNING; 1311 1312 /* 1313 * Program multicast addresses 1314 */ 1315 1316 sbmac_setmulti(sc); 1317 1318 /* 1319 * If channel was in promiscuous mode before, turn that on 1320 */ 1321 1322 if (sc->sc_ethercom.ec_if.if_flags & IFF_PROMISC) 1323 sbmac_promiscuous_mode(sc, true); 1324 1325 /* 1326 * Turn on the once-per-second timer 1327 */ 1328 1329 callout_reset(&(sc->sc_tick_ch), hz, sbmac_tick, sc); 1330 } 1331 1332 /* 1333 * SBMAC_CHANNEL_STOP(s) 1334 * 1335 * Stop packet processing on this MAC. 1336 * 1337 * Input parameters: 1338 * sc - sbmac structure 1339 * 1340 * Return value: 1341 * nothing 1342 */ 1343 1344 static void 1345 sbmac_channel_stop(struct sbmac_softc *sc) 1346 { 1347 uint64_t ctl; 1348 1349 /* don't do this if already stopped */ 1350 1351 if (sc->sbm_state == sbmac_state_off) 1352 return; 1353 1354 /* don't accept any packets, disable all interrupts */ 1355 1356 SBMAC_WRITECSR(sc->sbm_rxfilter, 0); 1357 SBMAC_WRITECSR(sc->sbm_imr, 0); 1358 1359 /* Turn off ticker */ 1360 1361 callout_stop(&(sc->sc_tick_ch)); 1362 1363 /* turn off receiver and transmitter */ 1364 1365 ctl = SBMAC_READCSR(sc->sbm_macenable); 1366 ctl &= ~(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0); 1367 SBMAC_WRITECSR(sc->sbm_macenable, ctl); 1368 1369 /* We're stopped now. */ 1370 1371 sc->sbm_state = sbmac_state_off; 1372 sc->sc_ethercom.ec_if.if_flags &= ~IFF_RUNNING; 1373 1374 /* Empty the receive and transmit rings */ 1375 1376 sbdma_emptyring(&(sc->sbm_rxdma)); 1377 sbdma_emptyring(&(sc->sbm_txdma)); 1378 } 1379 1380 /* 1381 * SBMAC_SET_CHANNEL_STATE(state) 1382 * 1383 * Set the channel's state ON or OFF 1384 * 1385 * Input parameters: 1386 * state - new state 1387 * 1388 * Return value: 1389 * old state 1390 */ 1391 1392 static sbmac_state_t 1393 sbmac_set_channel_state(struct sbmac_softc *sc, sbmac_state_t state) 1394 { 1395 sbmac_state_t oldstate = sc->sbm_state; 1396 1397 /* 1398 * If same as previous state, return 1399 */ 1400 1401 if (state == oldstate) 1402 return oldstate; 1403 1404 /* 1405 * If new state is ON, turn channel on 1406 */ 1407 1408 if (state == sbmac_state_on) 1409 sbmac_channel_start(sc); 1410 else 1411 sbmac_channel_stop(sc); 1412 1413 /* 1414 * Return previous state 1415 */ 1416 1417 return oldstate; 1418 } 1419 1420 /* 1421 * SBMAC_PROMISCUOUS_MODE(sc, enabled) 1422 * 1423 * Turn on or off promiscuous mode 1424 * 1425 * Input parameters: 1426 * sc - softc 1427 * enabled - true to turn on, false to turn off 1428 * 1429 * Return value: 1430 * nothing 1431 */ 1432 1433 static void 1434 sbmac_promiscuous_mode(struct sbmac_softc *sc, bool enabled) 1435 { 1436 uint64_t reg; 1437 1438 if (sc->sbm_state != sbmac_state_on) 1439 return; 1440 1441 if (enabled) { 1442 reg = SBMAC_READCSR(sc->sbm_rxfilter); 1443 reg |= M_MAC_ALLPKT_EN; 1444 SBMAC_WRITECSR(sc->sbm_rxfilter, reg); 1445 } else { 1446 reg = SBMAC_READCSR(sc->sbm_rxfilter); 1447 reg &= ~M_MAC_ALLPKT_EN; 1448 SBMAC_WRITECSR(sc->sbm_rxfilter, reg); 1449 } 1450 } 1451 1452 /* 1453 * SBMAC_INIT_AND_START(sc) 1454 * 1455 * Stop the channel and restart it. This is generally used 1456 * when we have to do something to the channel that requires 1457 * a swift kick. 1458 * 1459 * Input parameters: 1460 * sc - softc 1461 */ 1462 1463 static void 1464 sbmac_init_and_start(struct sbmac_softc *sc) 1465 { 1466 int s; 1467 1468 s = splnet(); 1469 1470 mii_pollstat(&sc->sc_mii); /* poll phy for current speed */ 1471 sbmac_mii_statchg(&sc->sc_ethercom.ec_if); /* set state to new speed */ 1472 sbmac_set_channel_state(sc, sbmac_state_on); 1473 1474 splx(s); 1475 } 1476 1477 /* 1478 * SBMAC_ADDR2REG(ptr) 1479 * 1480 * Convert six bytes into the 64-bit register value that 1481 * we typically write into the SBMAC's address/mcast registers 1482 * 1483 * Input parameters: 1484 * ptr - pointer to 6 bytes 1485 * 1486 * Return value: 1487 * register value 1488 */ 1489 1490 static uint64_t 1491 sbmac_addr2reg(u_char *ptr) 1492 { 1493 uint64_t reg = 0; 1494 1495 ptr += 6; 1496 1497 reg |= (uint64_t) *(--ptr); 1498 reg <<= 8; 1499 reg |= (uint64_t) *(--ptr); 1500 reg <<= 8; 1501 reg |= (uint64_t) *(--ptr); 1502 reg <<= 8; 1503 reg |= (uint64_t) *(--ptr); 1504 reg <<= 8; 1505 reg |= (uint64_t) *(--ptr); 1506 reg <<= 8; 1507 reg |= (uint64_t) *(--ptr); 1508 1509 return reg; 1510 } 1511 1512 /* 1513 * SBMAC_SET_SPEED(sc, speed) 1514 * 1515 * Configure LAN speed for the specified MAC. 1516 * Warning: must be called when MAC is off! 1517 * 1518 * Input parameters: 1519 * sc - sbmac structure 1520 * speed - speed to set MAC to (see sbmac_speed_t enum) 1521 * 1522 * Return value: 1523 * true if successful 1524 * false indicates invalid parameters 1525 */ 1526 1527 static bool 1528 sbmac_set_speed(struct sbmac_softc *sc, sbmac_speed_t speed) 1529 { 1530 uint64_t cfg; 1531 uint64_t framecfg; 1532 1533 /* 1534 * Save new current values 1535 */ 1536 1537 sc->sbm_speed = speed; 1538 1539 if (sc->sbm_state != sbmac_state_off) 1540 panic("sbmac_set_speed while MAC not off"); 1541 1542 /* 1543 * Read current register values 1544 */ 1545 1546 cfg = SBMAC_READCSR(sc->sbm_maccfg); 1547 framecfg = SBMAC_READCSR(sc->sbm_framecfg); 1548 1549 /* 1550 * Mask out the stuff we want to change 1551 */ 1552 1553 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); 1554 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | 1555 M_MAC_SLOT_SIZE); 1556 1557 /* 1558 * Now add in the new bits 1559 */ 1560 1561 switch (speed) { 1562 case sbmac_speed_10: 1563 framecfg |= V_MAC_IFG_RX_10 | 1564 V_MAC_IFG_TX_10 | 1565 K_MAC_IFG_THRSH_10 | 1566 V_MAC_SLOT_SIZE_10; 1567 cfg |= V_MAC_SPEED_SEL_10MBPS; 1568 break; 1569 1570 case sbmac_speed_100: 1571 framecfg |= V_MAC_IFG_RX_100 | 1572 V_MAC_IFG_TX_100 | 1573 V_MAC_IFG_THRSH_100 | 1574 V_MAC_SLOT_SIZE_100; 1575 cfg |= V_MAC_SPEED_SEL_100MBPS ; 1576 break; 1577 1578 case sbmac_speed_1000: 1579 framecfg |= V_MAC_IFG_RX_1000 | 1580 V_MAC_IFG_TX_1000 | 1581 V_MAC_IFG_THRSH_1000 | 1582 V_MAC_SLOT_SIZE_1000; 1583 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; 1584 break; 1585 1586 case sbmac_speed_auto: /* XXX not implemented */ 1587 /* fall through */ 1588 default: 1589 return false; 1590 } 1591 1592 /* 1593 * Send the bits back to the hardware 1594 */ 1595 1596 SBMAC_WRITECSR(sc->sbm_framecfg, framecfg); 1597 SBMAC_WRITECSR(sc->sbm_maccfg, cfg); 1598 1599 return true; 1600 } 1601 1602 /* 1603 * SBMAC_SET_DUPLEX(sc, duplex, fc) 1604 * 1605 * Set Ethernet duplex and flow control options for this MAC 1606 * Warning: must be called when MAC is off! 1607 * 1608 * Input parameters: 1609 * sc - sbmac structure 1610 * duplex - duplex setting (see sbmac_duplex_t) 1611 * fc - flow control setting (see sbmac_fc_t) 1612 * 1613 * Return value: 1614 * true if ok 1615 * false if an invalid parameter combination was specified 1616 */ 1617 1618 static bool 1619 sbmac_set_duplex(struct sbmac_softc *sc, sbmac_duplex_t duplex, sbmac_fc_t fc) 1620 { 1621 uint64_t cfg; 1622 1623 /* 1624 * Save new current values 1625 */ 1626 1627 sc->sbm_duplex = duplex; 1628 sc->sbm_fc = fc; 1629 1630 if (sc->sbm_state != sbmac_state_off) 1631 panic("sbmac_set_duplex while MAC not off"); 1632 1633 /* 1634 * Read current register values 1635 */ 1636 1637 cfg = SBMAC_READCSR(sc->sbm_maccfg); 1638 1639 /* 1640 * Mask off the stuff we're about to change 1641 */ 1642 1643 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); 1644 1645 switch (duplex) { 1646 case sbmac_duplex_half: 1647 switch (fc) { 1648 case sbmac_fc_disabled: 1649 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; 1650 break; 1651 1652 case sbmac_fc_collision: 1653 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; 1654 break; 1655 1656 case sbmac_fc_carrier: 1657 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; 1658 break; 1659 1660 case sbmac_fc_auto: /* XXX not implemented */ 1661 /* fall through */ 1662 case sbmac_fc_frame: /* not valid in half duplex */ 1663 default: /* invalid selection */ 1664 panic("%s: invalid half duplex fc selection %d", 1665 device_xname(sc->sc_dev), fc); 1666 return false; 1667 } 1668 break; 1669 1670 case sbmac_duplex_full: 1671 switch (fc) { 1672 case sbmac_fc_disabled: 1673 cfg |= V_MAC_FC_CMD_DISABLED; 1674 break; 1675 1676 case sbmac_fc_frame: 1677 cfg |= V_MAC_FC_CMD_ENABLED; 1678 break; 1679 1680 case sbmac_fc_collision: /* not valid in full duplex */ 1681 case sbmac_fc_carrier: /* not valid in full duplex */ 1682 case sbmac_fc_auto: /* XXX not implemented */ 1683 /* fall through */ 1684 default: 1685 panic("%s: invalid full duplex fc selection %d", 1686 device_xname(sc->sc_dev), fc); 1687 return false; 1688 } 1689 break; 1690 1691 default: 1692 /* fall through */ 1693 case sbmac_duplex_auto: 1694 panic("%s: bad duplex %d", device_xname(sc->sc_dev), duplex); 1695 /* XXX not implemented */ 1696 break; 1697 } 1698 1699 /* 1700 * Send the bits back to the hardware 1701 */ 1702 1703 SBMAC_WRITECSR(sc->sbm_maccfg, cfg); 1704 1705 return true; 1706 } 1707 1708 /* 1709 * SBMAC_INTR() 1710 * 1711 * Interrupt handler for MAC interrupts 1712 * 1713 * Input parameters: 1714 * MAC structure 1715 * 1716 * Return value: 1717 * nothing 1718 */ 1719 1720 /* ARGSUSED */ 1721 static void 1722 sbmac_intr(void *xsc, uint32_t status, vaddr_t pc) 1723 { 1724 struct sbmac_softc *sc = xsc; 1725 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1726 uint64_t isr; 1727 1728 for (;;) { 1729 1730 /* 1731 * Read the ISR (this clears the bits in the real register) 1732 */ 1733 1734 isr = SBMAC_READCSR(sc->sbm_isr); 1735 1736 if (isr == 0) 1737 break; 1738 1739 /* 1740 * Transmits on channel 0 1741 */ 1742 1743 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) { 1744 sbdma_tx_process(sc, &(sc->sbm_txdma)); 1745 SBMAC_EVCNT_INCR(sc->sbm_ev_txintr); 1746 } 1747 1748 /* 1749 * Receives on channel 0 1750 */ 1751 1752 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { 1753 sbdma_rx_process(sc, &(sc->sbm_rxdma)); 1754 SBMAC_EVCNT_INCR(sc->sbm_ev_rxintr); 1755 } 1756 } 1757 1758 /* try to get more packets going */ 1759 sbmac_start(ifp); 1760 } 1761 1762 1763 /* 1764 * SBMAC_START(ifp) 1765 * 1766 * Start output on the specified interface. Basically, we 1767 * queue as many buffers as we can until the ring fills up, or 1768 * we run off the end of the queue, whichever comes first. 1769 * 1770 * Input parameters: 1771 * ifp - interface 1772 * 1773 * Return value: 1774 * nothing 1775 */ 1776 1777 static void 1778 sbmac_start(struct ifnet *ifp) 1779 { 1780 struct sbmac_softc *sc; 1781 struct mbuf *m_head = NULL; 1782 int rv; 1783 1784 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1785 return; 1786 1787 sc = ifp->if_softc; 1788 1789 for (;;) { 1790 1791 IF_DEQUEUE(&ifp->if_snd, m_head); 1792 if (m_head == NULL) 1793 break; 1794 1795 /* 1796 * Put the buffer on the transmit ring. If we 1797 * don't have room, set the OACTIVE flag and wait 1798 * for the NIC to drain the ring. 1799 */ 1800 1801 rv = sbdma_add_txbuffer(&(sc->sbm_txdma), m_head); 1802 1803 if (rv == 0) { 1804 /* 1805 * If there's a BPF listener, bounce a copy of this 1806 * frame to it. 1807 */ 1808 bpf_mtap(ifp, m_head); 1809 if (!sc->sbm_pass3_dma) { 1810 /* 1811 * Don't free mbuf if we're not copying to new 1812 * mbuf in sbdma_add_txbuffer. It will be 1813 * freed in sbdma_tx_process. 1814 */ 1815 m_freem(m_head); 1816 } 1817 } else { 1818 IF_PREPEND(&ifp->if_snd, m_head); 1819 ifp->if_flags |= IFF_OACTIVE; 1820 break; 1821 } 1822 } 1823 } 1824 1825 /* 1826 * SBMAC_SETMULTI(sc) 1827 * 1828 * Reprogram the multicast table into the hardware, given 1829 * the list of multicasts associated with the interface 1830 * structure. 1831 * 1832 * Input parameters: 1833 * sc - softc 1834 * 1835 * Return value: 1836 * nothing 1837 */ 1838 1839 static void 1840 sbmac_setmulti(struct sbmac_softc *sc) 1841 { 1842 struct ifnet *ifp; 1843 uint64_t reg; 1844 sbmac_port_t port; 1845 int idx; 1846 struct ether_multi *enm; 1847 struct ether_multistep step; 1848 1849 ifp = &sc->sc_ethercom.ec_if; 1850 1851 /* 1852 * Clear out entire multicast table. We do this by nuking 1853 * the entire hash table and all the direct matches except 1854 * the first one, which is used for our station address 1855 */ 1856 1857 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { 1858 port = PKSEG1(sc->sbm_base + 1859 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t))); 1860 SBMAC_WRITECSR(port, 0); 1861 } 1862 1863 for (idx = 0; idx < MAC_HASH_COUNT; idx++) { 1864 port = PKSEG1(sc->sbm_base + 1865 R_MAC_HASH_BASE+(idx*sizeof(uint64_t))); 1866 SBMAC_WRITECSR(port, 0); 1867 } 1868 1869 /* 1870 * Clear the filter to say we don't want any multicasts. 1871 */ 1872 1873 reg = SBMAC_READCSR(sc->sbm_rxfilter); 1874 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); 1875 SBMAC_WRITECSR(sc->sbm_rxfilter, reg); 1876 1877 if (ifp->if_flags & IFF_ALLMULTI) { 1878 /* 1879 * Enable ALL multicasts. Do this by inverting the 1880 * multicast enable bit. 1881 */ 1882 reg = SBMAC_READCSR(sc->sbm_rxfilter); 1883 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); 1884 SBMAC_WRITECSR(sc->sbm_rxfilter, reg); 1885 return; 1886 } 1887 1888 /* 1889 * Progam new multicast entries. For now, only use the 1890 * perfect filter. In the future we'll need to use the 1891 * hash filter if the perfect filter overflows 1892 */ 1893 1894 /* 1895 * XXX only using perfect filter for now, need to use hash 1896 * XXX if the table overflows 1897 */ 1898 1899 idx = 1; /* skip station address */ 1900 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 1901 while ((enm != NULL) && (idx < MAC_ADDR_COUNT)) { 1902 reg = sbmac_addr2reg(enm->enm_addrlo); 1903 port = PKSEG1(sc->sbm_base + 1904 R_MAC_ADDR_BASE+(idx*sizeof(uint64_t))); 1905 SBMAC_WRITECSR(port, reg); 1906 idx++; 1907 ETHER_NEXT_MULTI(step, enm); 1908 } 1909 1910 /* 1911 * Enable the "accept multicast bits" if we programmed at least one 1912 * multicast. 1913 */ 1914 1915 if (idx > 1) { 1916 reg = SBMAC_READCSR(sc->sbm_rxfilter); 1917 reg |= M_MAC_MCAST_EN; 1918 SBMAC_WRITECSR(sc->sbm_rxfilter, reg); 1919 } 1920 } 1921 1922 /* 1923 * SBMAC_ETHER_IOCTL(ifp, cmd, data) 1924 * 1925 * Generic IOCTL requests for this interface. The basic 1926 * stuff is handled here for bringing the interface up, 1927 * handling multicasts, etc. 1928 * 1929 * Input parameters: 1930 * ifp - interface structure 1931 * cmd - command code 1932 * data - pointer to data 1933 * 1934 * Return value: 1935 * return value (0 is success) 1936 */ 1937 1938 static int 1939 sbmac_ether_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1940 { 1941 struct ifaddr *ifa = (struct ifaddr *) data; 1942 struct sbmac_softc *sc = ifp->if_softc; 1943 1944 switch (cmd) { 1945 case SIOCINITIFADDR: 1946 ifp->if_flags |= IFF_UP; 1947 1948 switch (ifa->ifa_addr->sa_family) { 1949 #ifdef INET 1950 case AF_INET: 1951 sbmac_init_and_start(sc); 1952 arp_ifinit(ifp, ifa); 1953 break; 1954 #endif 1955 default: 1956 sbmac_init_and_start(sc); 1957 break; 1958 } 1959 break; 1960 1961 default: 1962 return ENOTTY; 1963 } 1964 1965 return (0); 1966 } 1967 1968 /* 1969 * SBMAC_IOCTL(ifp, cmd, data) 1970 * 1971 * Main IOCTL handler - dispatches to other IOCTLs for various 1972 * types of requests. 1973 * 1974 * Input parameters: 1975 * ifp - interface pointer 1976 * cmd - command code 1977 * data - pointer to argument data 1978 * 1979 * Return value: 1980 * 0 if ok 1981 * else error code 1982 */ 1983 1984 static int 1985 sbmac_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1986 { 1987 struct sbmac_softc *sc = ifp->if_softc; 1988 struct ifreq *ifr = (struct ifreq *) data; 1989 int s, error = 0; 1990 1991 s = splnet(); 1992 1993 switch (cmd) { 1994 case SIOCINITIFADDR: 1995 error = sbmac_ether_ioctl(ifp, cmd, data); 1996 break; 1997 case SIOCSIFMTU: 1998 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 1999 error = EINVAL; 2000 else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 2001 /* XXX Program new MTU here */ 2002 error = 0; 2003 break; 2004 case SIOCSIFFLAGS: 2005 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 2006 break; 2007 if (ifp->if_flags & IFF_UP) { 2008 /* 2009 * If only the state of the PROMISC flag changed, 2010 * just tweak the hardware registers. 2011 */ 2012 if ((ifp->if_flags & IFF_RUNNING) && 2013 (ifp->if_flags & IFF_PROMISC)) { 2014 /* turn on promiscuous mode */ 2015 sbmac_promiscuous_mode(sc, true); 2016 } else if (ifp->if_flags & IFF_RUNNING && 2017 !(ifp->if_flags & IFF_PROMISC)) { 2018 /* turn off promiscuous mode */ 2019 sbmac_promiscuous_mode(sc, false); 2020 } else 2021 sbmac_set_channel_state(sc, sbmac_state_on); 2022 } else { 2023 if (ifp->if_flags & IFF_RUNNING) 2024 sbmac_set_channel_state(sc, sbmac_state_off); 2025 } 2026 2027 sc->sbm_if_flags = ifp->if_flags; 2028 error = 0; 2029 break; 2030 2031 case SIOCADDMULTI: 2032 case SIOCDELMULTI: 2033 case SIOCSIFMEDIA: 2034 case SIOCGIFMEDIA: 2035 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 2036 error = 0; 2037 if (ifp->if_flags & IFF_RUNNING) 2038 sbmac_setmulti(sc); 2039 } 2040 break; 2041 default: 2042 error = ether_ioctl(ifp, cmd, data); 2043 break; 2044 } 2045 2046 (void)splx(s); 2047 2048 return(error); 2049 } 2050 2051 /* 2052 * SBMAC_IFMEDIA_UPD(ifp) 2053 * 2054 * Configure an appropriate media type for this interface, 2055 * given the data in the interface structure 2056 * 2057 * Input parameters: 2058 * ifp - interface 2059 * 2060 * Return value: 2061 * 0 if ok 2062 * else error code 2063 */ 2064 2065 /* 2066 * SBMAC_IFMEDIA_STS(ifp, ifmr) 2067 * 2068 * Report current media status (used by ifconfig, for example) 2069 * 2070 * Input parameters: 2071 * ifp - interface structure 2072 * ifmr - media request structure 2073 * 2074 * Return value: 2075 * nothing 2076 */ 2077 2078 /* 2079 * SBMAC_WATCHDOG(ifp) 2080 * 2081 * Called periodically to make sure we're still happy. 2082 * 2083 * Input parameters: 2084 * ifp - interface structure 2085 * 2086 * Return value: 2087 * nothing 2088 */ 2089 2090 static void 2091 sbmac_watchdog(struct ifnet *ifp) 2092 { 2093 2094 /* XXX do something */ 2095 } 2096 2097 /* 2098 * One second timer, used to tick MII. 2099 */ 2100 static void 2101 sbmac_tick(void *arg) 2102 { 2103 struct sbmac_softc *sc = arg; 2104 int s; 2105 2106 s = splnet(); 2107 mii_tick(&sc->sc_mii); 2108 splx(s); 2109 2110 callout_reset(&sc->sc_tick_ch, hz, sbmac_tick, sc); 2111 } 2112 2113 2114 /* 2115 * SBMAC_MATCH(parent, match, aux) 2116 * 2117 * Part of the config process - see if this device matches the 2118 * info about what we expect to find on the bus. 2119 * 2120 * Input parameters: 2121 * parent - parent bus structure 2122 * match - 2123 * aux - bus-specific args 2124 * 2125 * Return value: 2126 * 1 if we match 2127 * 0 if we don't match 2128 */ 2129 2130 static int 2131 sbmac_match(device_t parent, cfdata_t match, void *aux) 2132 { 2133 struct sbobio_attach_args *sa = aux; 2134 2135 /* 2136 * Make sure it's a MAC 2137 */ 2138 if (sa->sa_locs.sa_type != SBOBIO_DEVTYPE_MAC) 2139 return 0; 2140 2141 /* 2142 * Yup, it is. 2143 */ 2144 2145 return 1; 2146 } 2147 2148 /* 2149 * SBMAC_PARSE_XDIGIT(str) 2150 * 2151 * Parse a hex digit, returning its value 2152 * 2153 * Input parameters: 2154 * str - character 2155 * 2156 * Return value: 2157 * hex value, or -1 if invalid 2158 */ 2159 2160 static int 2161 sbmac_parse_xdigit(char str) 2162 { 2163 int digit; 2164 2165 if ((str >= '0') && (str <= '9')) 2166 digit = str - '0'; 2167 else if ((str >= 'a') && (str <= 'f')) 2168 digit = str - 'a' + 10; 2169 else if ((str >= 'A') && (str <= 'F')) 2170 digit = str - 'A' + 10; 2171 else 2172 digit = -1; 2173 2174 return digit; 2175 } 2176 2177 /* 2178 * SBMAC_PARSE_HWADDR(str, hwaddr) 2179 * 2180 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte 2181 * Ethernet address. 2182 * 2183 * Input parameters: 2184 * str - string 2185 * hwaddr - pointer to hardware address 2186 * 2187 * Return value: 2188 * 0 if ok, else -1 2189 */ 2190 2191 static int 2192 sbmac_parse_hwaddr(const char *str, u_char *hwaddr) 2193 { 2194 int digit1, digit2; 2195 int idx = 6; 2196 2197 while (*str && (idx > 0)) { 2198 digit1 = sbmac_parse_xdigit(*str); 2199 if (digit1 < 0) 2200 return -1; 2201 str++; 2202 if (!*str) 2203 return -1; 2204 2205 if ((*str == ':') || (*str == '-')) { 2206 digit2 = digit1; 2207 digit1 = 0; 2208 } else { 2209 digit2 = sbmac_parse_xdigit(*str); 2210 if (digit2 < 0) 2211 return -1; 2212 str++; 2213 } 2214 2215 *hwaddr++ = (digit1 << 4) | digit2; 2216 idx--; 2217 2218 if (*str == '-') 2219 str++; 2220 if (*str == ':') 2221 str++; 2222 } 2223 return 0; 2224 } 2225 2226 /* 2227 * SBMAC_ATTACH(parent, self, aux) 2228 * 2229 * Attach routine - init hardware and hook ourselves into NetBSD. 2230 * 2231 * Input parameters: 2232 * parent - parent bus device 2233 * self - our softc 2234 * aux - attach data 2235 * 2236 * Return value: 2237 * nothing 2238 */ 2239 2240 static void 2241 sbmac_attach(device_t parent, device_t self, void *aux) 2242 { 2243 struct sbmac_softc * const sc = device_private(self); 2244 struct ifnet * const ifp = &sc->sc_ethercom.ec_if; 2245 struct sbobio_attach_args * const sa = aux; 2246 u_char *eaddr; 2247 static int unit = 0; /* XXX */ 2248 uint64_t ea_reg; 2249 int idx; 2250 2251 sc->sc_dev = self; 2252 2253 /* Determine controller base address */ 2254 2255 sc->sbm_base = sa->sa_base + sa->sa_locs.sa_offset; 2256 2257 eaddr = sc->sbm_hwaddr; 2258 2259 /* 2260 * Initialize context (get pointers to registers and stuff), then 2261 * allocate the memory for the descriptor tables. 2262 */ 2263 2264 sbmac_initctx(sc); 2265 2266 callout_init(&(sc->sc_tick_ch), 0); 2267 2268 /* 2269 * Read the ethernet address. The firwmare left this programmed 2270 * for us in the ethernet address register for each mac. 2271 */ 2272 2273 ea_reg = SBMAC_READCSR(PKSEG1(sc->sbm_base + R_MAC_ETHERNET_ADDR)); 2274 for (idx = 0; idx < 6; idx++) { 2275 eaddr[idx] = (uint8_t) (ea_reg & 0xFF); 2276 ea_reg >>= 8; 2277 } 2278 2279 #define SBMAC_DEFAULT_HWADDR "40:00:00:00:01:00" 2280 if (eaddr[0] == 0 && eaddr[1] == 0 && eaddr[2] == 0 && 2281 eaddr[3] == 0 && eaddr[4] == 0 && eaddr[5] == 0) { 2282 sbmac_parse_hwaddr(SBMAC_DEFAULT_HWADDR, eaddr); 2283 eaddr[5] = unit; 2284 } 2285 2286 #ifdef SBMAC_ETH0_HWADDR 2287 if (unit == 0) 2288 sbmac_parse_hwaddr(SBMAC_ETH0_HWADDR, eaddr); 2289 #endif 2290 #ifdef SBMAC_ETH1_HWADDR 2291 if (unit == 1) 2292 sbmac_parse_hwaddr(SBMAC_ETH1_HWADDR, eaddr); 2293 #endif 2294 #ifdef SBMAC_ETH2_HWADDR 2295 if (unit == 2) 2296 sbmac_parse_hwaddr(SBMAC_ETH2_HWADDR, eaddr); 2297 #endif 2298 unit++; 2299 2300 /* 2301 * Display Ethernet address (this is called during the config process 2302 * so we need to finish off the config message that was being displayed) 2303 */ 2304 aprint_normal(": Ethernet%s\n", 2305 sc->sbm_pass3_dma ? ", using unaligned tx DMA" : ""); 2306 aprint_normal_dev(self, "Ethernet address: %s\n", ether_sprintf(eaddr)); 2307 2308 2309 /* 2310 * Set up ifnet structure 2311 */ 2312 2313 ifp->if_softc = sc; 2314 memcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 2315 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | 2316 IFF_NOTRAILERS; 2317 ifp->if_ioctl = sbmac_ioctl; 2318 ifp->if_start = sbmac_start; 2319 ifp->if_watchdog = sbmac_watchdog; 2320 ifp->if_snd.ifq_maxlen = SBMAC_MAX_TXDESCR - 1; 2321 2322 /* 2323 * Set up ifmedia support. 2324 */ 2325 2326 /* 2327 * Initialize MII/media info. 2328 */ 2329 sc->sc_mii.mii_ifp = ifp; 2330 sc->sc_mii.mii_readreg = sbmac_mii_readreg; 2331 sc->sc_mii.mii_writereg = sbmac_mii_writereg; 2332 sc->sc_mii.mii_statchg = sbmac_mii_statchg; 2333 sc->sc_ethercom.ec_mii = &sc->sc_mii; 2334 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 2335 ether_mediastatus); 2336 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 2337 MII_OFFSET_ANY, 0); 2338 2339 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2340 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 2341 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 2342 } else { 2343 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2344 } 2345 2346 2347 /* 2348 * map/route interrupt 2349 */ 2350 2351 sc->sbm_intrhand = cpu_intr_establish(sa->sa_locs.sa_intr[0], IPL_NET, 2352 sbmac_intr, sc); 2353 2354 /* 2355 * Call MI attach routines. 2356 */ 2357 if_attach(ifp); 2358 ether_ifattach(ifp, eaddr); 2359 } 2360