1 /* $OpenBSD: if_mc.c,v 1.35 2024/09/06 10:54:08 jsg Exp $ */ 2 /* $NetBSD: if_mc.c,v 1.9.16.1 2006/06/21 14:53:13 yamt Exp $ */ 3 4 /*- 5 * Copyright (c) 1997 David Huang <khym@bga.com> 6 * All rights reserved. 7 * 8 * Portions of this code are based on code by Denton Gentry <denny1@home.com> 9 * and Yanagisawa Takeshi <yanagisw@aa.ap.titech.ac.jp>. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * AMD AM79C940 (MACE) driver with DBDMA bus attachment and DMA routines 34 * for onboard ethernet found on most old world macs. 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mbuf.h> 40 #include <sys/buf.h> 41 #include <sys/socket.h> 42 #include <sys/syslog.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/device.h> 46 #include <sys/timeout.h> 47 48 #include <net/if.h> 49 #include <net/if_media.h> 50 51 #include <netinet/in.h> 52 #include <netinet/if_ether.h> 53 54 #include "bpfilter.h" 55 #if NBPFILTER > 0 56 #include <net/bpf.h> 57 #endif 58 59 #include <dev/ofw/openfirm.h> 60 #include <machine/pio.h> 61 #include <machine/bus.h> 62 #include <machine/autoconf.h> 63 64 #include <macppc/dev/dbdma.h> 65 66 #define MC_REGSPACING 16 67 #define MC_REGSIZE MACE_NREGS * MC_REGSPACING 68 #define MACE_REG(x) ((x)*MC_REGSPACING) 69 #define MACE_BUFLEN 2048 70 #define MACE_TXBUFS 2 71 #define MACE_RXBUFS 8 72 73 #define MC_RXDMABUFS 4 74 75 #define MACE_BUFSZ ((MACE_RXBUFS + MACE_TXBUFS + 2) * MACE_BUFLEN) 76 77 #define NIC_GET(sc, reg) (in8rb(sc->sc_reg + MACE_REG(reg))) 78 79 #define NIC_PUT(sc, reg, val) (out8rb(sc->sc_reg + MACE_REG(reg), (val))) 80 81 /* 82 * AMD MACE (Am79C940) register definitions 83 */ 84 #define MACE_RCVFIFO 0 /* Receive FIFO [15-00] (read only) */ 85 #define MACE_XMTFIFO 1 /* Transmit FIFO [15-00] (write only) */ 86 #define MACE_XMTFC 2 /* Transmit Frame Control (read/write) */ 87 #define MACE_XMTFS 3 /* Transmit Frame Status (read only) */ 88 #define MACE_XMTRC 4 /* Transmit Retry Count (read only) */ 89 #define MACE_RCVFC 5 /* Receive Frame Control (read/write) */ 90 #define MACE_RCVFS 6 /* Receive Frame Status (4 bytes) (read only) */ 91 #define MACE_FIFOFC 7 /* FIFO Frame Count (read only) */ 92 #define MACE_IR 8 /* Interrupt Register (read only) */ 93 #define MACE_IMR 9 /* Interrupt Mask Register (read/write) */ 94 #define MACE_PR 10 /* Poll Register (read only) */ 95 #define MACE_BIUCC 11 /* BIU Configuration Control (read/write) */ 96 #define MACE_FIFOCC 12 /* FIFO Configuration Control (read/write) */ 97 #define MACE_MACCC 13 /* MAC Configuration Control (read/write) */ 98 #define MACE_PLSCC 14 /* PLS Configuration Control (read/write) */ 99 #define MACE_PHYCC 15 /* PHY Configuration Control (read/write) */ 100 #define MACE_CHIPIDL 16 /* Chip ID Register [07-00] (read only) */ 101 #define MACE_CHIPIDH 17 /* Chip ID Register [15-08] (read only) */ 102 #define MACE_IAC 18 /* Internal Address Configuration (read/write) */ 103 /* RESERVED 19 Reserved (read/write as 0) */ 104 #define MACE_LADRF 20 /* Logical Address Filter (8 bytes) (read/write) */ 105 #define MACE_PADR 21 /* Physical Address (6 bytes) (read/write) */ 106 /* RESERVED 22 Reserved (read/write as 0) */ 107 /* RESERVED 23 Reserved (read/write as 0) */ 108 #define MACE_MPC 24 /* Missed Packet Count (read only) */ 109 /* RESERVED 25 Reserved (read/write as 0) */ 110 #define MACE_RNTPC 26 /* Runt Packet Count (read only) */ 111 #define MACE_RCVCC 27 /* Receive Collision Count (read only) */ 112 /* RESERVED 28 Reserved (read/write as 0) */ 113 #define MACE_UTR 29 /* User Test Register (read/write) */ 114 #define MACE_RTR1 30 /* Reserved Test Register 1 (read/write as 0) */ 115 #define MACE_RTR2 31 /* Reserved Test Register 2 (read/write as 0) */ 116 117 #define MACE_NREGS 32 118 119 /* 2: Transmit Frame Control (XMTFC) */ 120 #define DRTRY 0x80 /* Disable Retry */ 121 #define DXMTFCS 0x08 /* Disable Transmit FCS */ 122 #define APADXMT 0x01 /* Auto Pad Transmit */ 123 124 /* 3: Transmit Frame Status (XMTFS) */ 125 #define XMTSV 0x80 /* Transmit Status Valid */ 126 #define UFLO 0x40 /* Underflow */ 127 #define LCOL 0x20 /* Late Collision */ 128 #define MORE 0x10 /* More than one retry needed */ 129 #define ONE 0x08 /* Exactly one retry needed */ 130 #define DEFER 0x04 /* Transmission deferred */ 131 #define LCAR 0x02 /* Loss of Carrier */ 132 #define RTRY 0x01 /* Retry Error */ 133 134 /* 4: Transmit Retry Count (XMTRC) */ 135 #define EXDEF 0x80 /* Excessive Defer */ 136 #define XMTRC 0x0f /* Transmit Retry Count */ 137 138 /* 5: Receive Frame Control (RCVFC) */ 139 #define LLRCV 0x08 /* Low Latency Receive */ 140 #define MR 0x04 /* Match/Reject */ 141 #define ASTRPRCV 0x01 /* Auto Strip Receive */ 142 143 /* 6: Receive Frame Status (RCVFS) */ 144 /* 4 byte register; read 4 times to get all of the bytes */ 145 /* Read 1: RFS0 - Receive Message Byte Count [7-0] (RCVCNT) */ 146 147 /* Read 2: RFS1 - Receive Status (RCVSTS) */ 148 #define OFLO 0x80 /* Overflow flag */ 149 #define CLSN 0x40 /* Collision flag */ 150 #define FRAM 0x20 /* Framing Error flag */ 151 #define FCS 0x10 /* FCS Error flag */ 152 #define RCVCNT 0x0f /* Receive Message Byte Count [11-8] */ 153 154 /* Read 3: RFS2 - Runt Packet Count (RNTPC) [7-0] */ 155 156 /* Read 4: RFS3 - Receive Collision Count (RCVCC) [7-0] */ 157 158 /* 7: FIFO Frame Count (FIFOFC) */ 159 #define RCVFC 0xf0 /* Receive Frame Count */ 160 #define XMTFC 0x0f /* Transmit Frame Count */ 161 162 /* 8: Interrupt Register (IR) */ 163 #define JAB 0x80 /* Jabber Error */ 164 #define BABL 0x40 /* Babble Error */ 165 #define CERR 0x20 /* Collision Error */ 166 #define RCVCCO 0x10 /* Receive Collision Count Overflow */ 167 #define RNTPCO 0x08 /* Runt Packet Count Overflow */ 168 #define MPCO 0x04 /* Missed Packet Count Overflow */ 169 #define RCVINT 0x02 /* Receive Interrupt */ 170 #define XMTINT 0x01 /* Transmit Interrupt */ 171 172 /* 9: Interrupt Mask Register (IMR) */ 173 #define JABM 0x80 /* Jabber Error Mask */ 174 #define BABLM 0x40 /* Babble Error Mask */ 175 #define CERRM 0x20 /* Collision Error Mask */ 176 #define RCVCCOM 0x10 /* Receive Collision Count Overflow Mask */ 177 #define RNTPCOM 0x08 /* Runt Packet Count Overflow Mask */ 178 #define MPCOM 0x04 /* Missed Packet Count Overflow Mask */ 179 #define RCVINTM 0x02 /* Receive Interrupt Mask */ 180 #define XMTINTM 0x01 /* Transmit Interrupt Mask */ 181 182 /* 10: Poll Register (PR) */ 183 #define XMTSV 0x80 /* Transmit Status Valid */ 184 #define TDTREQ 0x40 /* Transmit Data Transfer Request */ 185 #define RDTREQ 0x20 /* Receive Data Transfer Request */ 186 187 /* 11: BIU Configuration Control (BIUCC) */ 188 #define BSWP 0x40 /* Byte Swap */ 189 #define XMTSP 0x30 /* Transmit Start Point */ 190 #define XMTSP_4 0x00 /* 4 bytes */ 191 #define XMTSP_16 0x10 /* 16 bytes */ 192 #define XMTSP_64 0x20 /* 64 bytes */ 193 #define XMTSP_112 0x30 /* 112 bytes */ 194 #define SWRST 0x01 /* Software Reset */ 195 196 /* 12: FIFO Configuration Control (FIFOCC) */ 197 #define XMTFW 0xc0 /* Transmit FIFO Watermark */ 198 #define XMTFW_8 0x00 /* 8 write cycles */ 199 #define XMTFW_16 0x40 /* 16 write cycles */ 200 #define XMTFW_32 0x80 /* 32 write cycles */ 201 #define RCVFW 0x30 /* Receive FIFO Watermark */ 202 #define RCVFW_16 0x00 /* 16 bytes */ 203 #define RCVFW_32 0x10 /* 32 bytes */ 204 #define RCVFW_64 0x20 /* 64 bytes */ 205 #define XMTFWU 0x08 /* Transmit FIFO Watermark Update */ 206 #define RCVFWU 0x04 /* Receive FIFO Watermark Update */ 207 #define XMTBRST 0x02 /* Transmit Burst */ 208 #define RCVBRST 0x01 /* Receive Burst */ 209 210 /* 13: MAC Configuration (MACCC) */ 211 #define PROM 0x80 /* Promiscuous */ 212 #define DXMT2PD 0x40 /* Disable Transmit Two Part Deferral */ 213 #define EMBA 0x20 /* Enable Modified Back-off Algorithm */ 214 #define DRCVPA 0x08 /* Disable Receive Physical Address */ 215 #define DRCVBC 0x04 /* Disable Receive Broadcast */ 216 #define ENXMT 0x02 /* Enable Transmit */ 217 #define ENRCV 0x01 /* Enable Receive */ 218 219 /* 14: PLS Configuration Control (PLSCC) */ 220 #define XMTSEL 0x08 /* Transmit Mode Select */ 221 #define PORTSEL 0x06 /* Port Select */ 222 #define PORTSEL_AUI 0x00 /* Select AUI */ 223 #define PORTSEL_10BT 0x02 /* Select 10BASE-T */ 224 #define PORTSEL_DAI 0x04 /* Select DAI port */ 225 #define PORTSEL_GPSI 0x06 /* Select GPSI */ 226 #define ENPLSIO 0x01 /* Enable PLS I/O */ 227 228 /* 15: PHY Configuration (PHYCC) */ 229 #define LNKFL 0x80 /* Link Fail */ 230 #define DLNKTST 0x40 /* Disable Link Test */ 231 #define REVPOL 0x20 /* Reversed Polarity */ 232 #define DAPC 0x10 /* Disable Auto Polarity Correction */ 233 #define LRT 0x08 /* Low Receive Threshold */ 234 #define ASEL 0x04 /* Auto Select */ 235 #define RWAKE 0x02 /* Remote Wake */ 236 #define AWAKE 0x01 /* Auto Wake */ 237 238 /* 18: Internal Address Configuration (IAC) */ 239 #define ADDRCHG 0x80 /* Address Change */ 240 #define PHYADDR 0x04 /* Physical Address Reset */ 241 #define LOGADDR 0x02 /* Logical Address Reset */ 242 243 /* 28: User Test Register (UTR) */ 244 #define RTRE 0x80 /* Reserved Test Register Enable */ 245 #define RTRD 0x40 /* Reserved Test Register Disable */ 246 #define RPA 0x20 /* Run Packet Accept */ 247 #define FCOLL 0x10 /* Force Collision */ 248 #define RCVFCSE 0x08 /* Receive FCS Enable */ 249 #define LOOP 0x06 /* Loopback Control */ 250 #define LOOP_NONE 0x00 /* No Loopback */ 251 #define LOOP_EXT 0x02 /* External Loopback */ 252 #define LOOP_INT 0x04 /* Internal Loopback, excludes MENDEC */ 253 #define LOOP_INT_MENDEC 0x06 /* Internal Loopback, includes MENDEC */ 254 255 struct mc_rxframe { 256 u_int8_t rx_rcvcnt; 257 u_int8_t rx_rcvsts; 258 u_int8_t rx_rntpc; 259 u_int8_t rx_rcvcc; 260 u_char *rx_frame; 261 }; 262 263 struct mc_softc { 264 struct device sc_dev; /* base device glue */ 265 struct arpcom sc_arpcom; /* Ethernet common part */ 266 struct timeout sc_tick_ch; 267 268 struct mc_rxframe sc_rxframe; 269 u_int8_t sc_biucc; 270 u_int8_t sc_fifocc; 271 u_int8_t sc_plscc; 272 u_int8_t sc_enaddr[6]; 273 u_int8_t sc_pad[2]; 274 int sc_havecarrier; /* carrier status */ 275 276 char *sc_reg; 277 bus_dma_tag_t sc_dmat; 278 bus_dmamap_t sc_bufmap; 279 bus_dma_segment_t sc_bufseg[1]; 280 281 dbdma_regmap_t *sc_txdma; 282 dbdma_regmap_t *sc_rxdma; 283 dbdma_command_t *sc_txdmacmd; 284 dbdma_command_t *sc_rxdmacmd; 285 dbdma_t sc_txdbdma; 286 dbdma_t sc_rxdbdma; 287 288 caddr_t sc_txbuf; 289 caddr_t sc_rxbuf; 290 paddr_t sc_txbuf_pa; 291 paddr_t sc_rxbuf_pa; 292 int sc_tail; 293 int sc_rxset; 294 int sc_txset; 295 int sc_txseti; 296 }; 297 298 int mc_match(struct device *, void *, void *); 299 void mc_attach(struct device *, struct device *, void *); 300 301 const struct cfattach mc_ca = { 302 sizeof(struct mc_softc), mc_match, mc_attach 303 }; 304 305 struct cfdriver mc_cd = { 306 NULL, "mc", DV_IFNET 307 }; 308 309 void mc_init(struct mc_softc *sc); 310 int mc_dmaintr(void *arg); 311 void mc_reset_rxdma(struct mc_softc *sc); 312 void mc_reset_txdma(struct mc_softc *sc); 313 int mc_stop(struct mc_softc *sc); 314 int mc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 315 void mc_start(struct ifnet *ifp); 316 void mc_reset(struct mc_softc *sc); 317 void mc_tint(struct mc_softc *sc); 318 void mc_rint(struct mc_softc *sc); 319 int mc_intr(void *); 320 void mc_watchdog(struct ifnet *ifp); 321 322 u_int maceput(struct mc_softc *sc, struct mbuf *); 323 void mace_read(struct mc_softc *, caddr_t, int); 324 struct mbuf *mace_get(struct mc_softc *, caddr_t, int); 325 static void mace_calcladrf(struct mc_softc *, u_int8_t *); 326 void mc_putpacket(struct mc_softc *, u_int); 327 328 int 329 mc_match(struct device *parent, void *arg, void *aux) 330 { 331 struct confargs *ca = aux; 332 333 if (strcmp(ca->ca_name, "mace") != 0) 334 return 0; 335 336 /* requires 6 regs */ 337 if (ca->ca_nreg / sizeof(int) != 6) 338 return 0; 339 340 /* requires 3 intrs */ 341 if (ca->ca_nintr / sizeof(int) != 3) 342 return 0; 343 344 return 1; 345 } 346 347 void 348 mc_attach(struct device *parent, struct device *self, void *aux) 349 { 350 struct confargs *ca = aux; 351 struct mc_softc *sc = (struct mc_softc *)self; 352 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 353 u_int8_t lladdr[ETHER_ADDR_LEN]; 354 int nseg, error; 355 356 if (OF_getprop(ca->ca_node, "local-mac-address", lladdr, 357 ETHER_ADDR_LEN) != ETHER_ADDR_LEN) { 358 printf(": failed to get MAC address.\n"); 359 return; 360 } 361 362 ca->ca_reg[0] += ca->ca_baseaddr; 363 ca->ca_reg[2] += ca->ca_baseaddr; 364 ca->ca_reg[4] += ca->ca_baseaddr; 365 366 if ((sc->sc_reg = mapiodev(ca->ca_reg[0], ca->ca_reg[1])) == NULL) { 367 printf(": cannot map registers\n"); 368 return; 369 } 370 371 sc->sc_dmat = ca->ca_dmat; 372 sc->sc_tail = 0; 373 374 if ((sc->sc_txdma = mapiodev(ca->ca_reg[2], ca->ca_reg[3])) == NULL) { 375 printf(": cannot map TX DMA registers\n"); 376 goto notxdma; 377 } 378 if ((sc->sc_rxdma = mapiodev(ca->ca_reg[4], ca->ca_reg[5])) == NULL) { 379 printf(": cannot map RX DMA registers\n"); 380 goto norxdma; 381 } 382 if ((sc->sc_txdbdma = dbdma_alloc(sc->sc_dmat, 2)) == NULL) { 383 printf(": cannot alloc TX DMA descriptors\n"); 384 goto notxdbdma; 385 } 386 sc->sc_txdmacmd = sc->sc_txdbdma->d_addr; 387 388 if ((sc->sc_rxdbdma = dbdma_alloc(sc->sc_dmat, 8 + 1)) == NULL) { 389 printf(": cannot alloc RX DMA descriptors\n"); 390 goto norxdbdma; 391 } 392 sc->sc_rxdmacmd = sc->sc_rxdbdma->d_addr; 393 394 if ((error = bus_dmamem_alloc(sc->sc_dmat, MACE_BUFSZ, PAGE_SIZE, 0, 395 sc->sc_bufseg, 1, &nseg, BUS_DMA_NOWAIT))) { 396 printf(": cannot allocate DMA mem (%d)\n", error); 397 goto nodmamem; 398 } 399 400 if ((error = bus_dmamem_map(sc->sc_dmat, sc->sc_bufseg, nseg, 401 MACE_BUFSZ, &sc->sc_txbuf, BUS_DMA_NOWAIT))) { 402 printf(": cannot map DMA mem (%d)\n", error); 403 goto nodmamap; 404 } 405 406 if ((error = bus_dmamap_create(sc->sc_dmat, MACE_BUFSZ, 1, MACE_BUFSZ, 407 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_bufmap))) { 408 printf(": cannot create DMA map (%d)\n", error); 409 goto nodmacreate; 410 } 411 412 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_bufmap, sc->sc_txbuf, 413 MACE_BUFSZ, NULL, BUS_DMA_NOWAIT))) { 414 printf(": cannot load DMA map (%d)\n", error); 415 goto nodmaload; 416 } 417 418 sc->sc_txbuf_pa = sc->sc_bufmap->dm_segs->ds_addr; 419 sc->sc_rxbuf = sc->sc_txbuf + MACE_BUFLEN * MACE_TXBUFS; 420 sc->sc_rxbuf_pa = sc->sc_txbuf_pa + MACE_BUFLEN * MACE_TXBUFS; 421 422 printf(": irq %d,%d,%d", ca->ca_intr[0], ca->ca_intr[1], 423 ca->ca_intr[2]); 424 425 /* disable receive DMA */ 426 dbdma_reset(sc->sc_rxdma); 427 428 /* disable transmit DMA */ 429 dbdma_reset(sc->sc_txdma); 430 431 /* install interrupt handlers */ 432 mac_intr_establish(parent, ca->ca_intr[2], IST_LEVEL, IPL_NET, 433 mc_dmaintr, sc, sc->sc_dev.dv_xname); 434 mac_intr_establish(parent, ca->ca_intr[0], IST_LEVEL, IPL_NET, 435 mc_intr, sc, sc->sc_dev.dv_xname); 436 437 sc->sc_biucc = XMTSP_64; 438 sc->sc_fifocc = XMTFW_16 | RCVFW_64 | XMTFWU | RCVFWU | 439 XMTBRST | RCVBRST; 440 sc->sc_plscc = PORTSEL_GPSI | ENPLSIO; 441 442 /* reset the chip and disable all interrupts */ 443 NIC_PUT(sc, MACE_BIUCC, SWRST); 444 DELAY(100); 445 446 NIC_PUT(sc, MACE_IMR, ~0); 447 448 bcopy(lladdr, sc->sc_enaddr, ETHER_ADDR_LEN); 449 bcopy(sc->sc_enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 450 printf(": address %s\n", ether_sprintf(lladdr)); 451 452 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 453 ifp->if_softc = sc; 454 ifp->if_ioctl = mc_ioctl; 455 ifp->if_start = mc_start; 456 ifp->if_flags = 457 IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 458 ifp->if_watchdog = mc_watchdog; 459 ifp->if_timer = 0; 460 461 if_attach(ifp); 462 ether_ifattach(ifp); 463 464 return; 465 nodmaload: 466 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufmap); 467 nodmacreate: 468 bus_dmamem_unmap(sc->sc_dmat, sc->sc_txbuf, MACE_BUFSZ); 469 nodmamap: 470 bus_dmamem_free(sc->sc_dmat, sc->sc_bufseg, 1); 471 nodmamem: 472 dbdma_free(sc->sc_rxdbdma); 473 norxdbdma: 474 dbdma_free(sc->sc_txdbdma); 475 notxdbdma: 476 unmapiodev((void *)sc->sc_rxdma, ca->ca_reg[5]); 477 norxdma: 478 unmapiodev((void *)sc->sc_txdma, ca->ca_reg[3]); 479 notxdma: 480 unmapiodev(sc->sc_reg, ca->ca_reg[1]); 481 } 482 483 int 484 mc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 485 { 486 struct mc_softc *sc = ifp->if_softc; 487 int s, err = 0; 488 489 s = splnet(); 490 491 switch (cmd) { 492 case SIOCSIFADDR: 493 ifp->if_flags |= IFF_UP; 494 if (!(ifp->if_flags & IFF_RUNNING)) 495 mc_init(sc); 496 break; 497 498 case SIOCSIFFLAGS: 499 if ((ifp->if_flags & IFF_UP) == 0 && 500 (ifp->if_flags & IFF_RUNNING) != 0) { 501 /* 502 * If interface is marked down and it is running, 503 * then stop it. 504 */ 505 mc_stop(sc); 506 } else if ((ifp->if_flags & IFF_UP) != 0 && 507 (ifp->if_flags & IFF_RUNNING) == 0) { 508 /* 509 * If interface is marked up and it is stopped, 510 * then start it. 511 */ 512 mc_init(sc); 513 } else { 514 /* 515 * reset the interface to pick up any other changes 516 * in flags 517 */ 518 mc_reset(sc); 519 mc_start(ifp); 520 } 521 break; 522 523 default: 524 err = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 525 } 526 527 if (err == ENETRESET) { 528 if (ifp->if_flags & IFF_RUNNING) 529 mc_reset(sc); 530 err = 0; 531 } 532 533 splx(s); 534 return (err); 535 } 536 537 /* 538 * Encapsulate a packet of type family for the local net. 539 */ 540 void 541 mc_start(struct ifnet *ifp) 542 { 543 struct mc_softc *sc = ifp->if_softc; 544 struct mbuf *m; 545 546 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 547 return; 548 549 while (1) { 550 if (ifq_is_oactive(&ifp->if_snd)) 551 return; 552 553 m = ifq_dequeue(&ifp->if_snd); 554 if (m == NULL) 555 return; 556 557 #if NBPFILTER > 0 558 /* 559 * If bpf is listening on this interface, let it 560 * see the packet before we commit it to the wire. 561 */ 562 if (ifp->if_bpf) 563 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 564 #endif 565 566 /* 567 * Copy the mbuf chain into the transmit buffer. 568 */ 569 ifq_set_oactive(&ifp->if_snd); 570 maceput(sc, m); 571 } 572 } 573 574 /* 575 * reset and restart the MACE. Called in case of fatal 576 * hardware/software errors. 577 */ 578 void 579 mc_reset(struct mc_softc *sc) 580 { 581 mc_stop(sc); 582 mc_init(sc); 583 } 584 585 void 586 mc_init(struct mc_softc *sc) 587 { 588 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 589 u_int8_t maccc, ladrf[8]; 590 int s, i; 591 592 s = splnet(); 593 594 NIC_PUT(sc, MACE_BIUCC, sc->sc_biucc); 595 NIC_PUT(sc, MACE_FIFOCC, sc->sc_fifocc); 596 NIC_PUT(sc, MACE_IMR, ~0); /* disable all interrupts */ 597 NIC_PUT(sc, MACE_PLSCC, sc->sc_plscc); 598 599 NIC_PUT(sc, MACE_UTR, RTRD); /* disable reserved test registers */ 600 601 /* set MAC address */ 602 NIC_PUT(sc, MACE_IAC, ADDRCHG); 603 while (NIC_GET(sc, MACE_IAC) & ADDRCHG) 604 ; 605 NIC_PUT(sc, MACE_IAC, PHYADDR); 606 for (i = 0; i < ETHER_ADDR_LEN; i++) 607 out8rb(sc->sc_reg + MACE_REG(MACE_PADR) + i, 608 sc->sc_enaddr[i]); 609 610 /* set logical address filter */ 611 mace_calcladrf(sc, ladrf); 612 613 NIC_PUT(sc, MACE_IAC, ADDRCHG); 614 while (NIC_GET(sc, MACE_IAC) & ADDRCHG) 615 ; 616 NIC_PUT(sc, MACE_IAC, LOGADDR); 617 for (i = 0; i < 8; i++) 618 out8rb(sc->sc_reg + MACE_REG(MACE_LADRF) + i, 619 ladrf[i]); 620 621 NIC_PUT(sc, MACE_XMTFC, APADXMT); 622 /* 623 * No need to autostrip padding on receive... Ethernet frames 624 * don't have a length field, unlike 802.3 frames, so the MACE 625 * can't figure out the length of the packet anyways. 626 */ 627 NIC_PUT(sc, MACE_RCVFC, 0); 628 629 maccc = ENXMT | ENRCV; 630 if (ifp->if_flags & IFF_PROMISC) 631 maccc |= PROM; 632 633 NIC_PUT(sc, MACE_MACCC, maccc); 634 635 mc_reset_rxdma(sc); 636 mc_reset_txdma(sc); 637 /* 638 * Enable all interrupts except receive, since we use the DMA 639 * completion interrupt for that. 640 */ 641 NIC_PUT(sc, MACE_IMR, RCVINTM); 642 643 /* flag interface as "running" */ 644 ifp->if_flags |= IFF_RUNNING; 645 ifq_clr_oactive(&ifp->if_snd); 646 647 splx(s); 648 } 649 650 /* 651 * Close down an interface and free its buffers. 652 * Called on final close of device, or if mcinit() fails 653 * part way through. 654 */ 655 int 656 mc_stop(struct mc_softc *sc) 657 { 658 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 659 int s; 660 661 s = splnet(); 662 663 NIC_PUT(sc, MACE_BIUCC, SWRST); 664 DELAY(100); 665 666 ifp->if_timer = 0; 667 ifp->if_flags &= ~IFF_RUNNING; 668 ifq_clr_oactive(&ifp->if_snd); 669 670 splx(s); 671 return (0); 672 } 673 674 /* 675 * Called if any Tx packets remain unsent after 5 seconds, 676 * In all cases we just reset the chip, and any retransmission 677 * will be handled by higher level protocol timeouts. 678 */ 679 void 680 mc_watchdog(struct ifnet *ifp) 681 { 682 struct mc_softc *sc = ifp->if_softc; 683 684 printf("mcwatchdog: resetting chip\n"); 685 mc_reset(sc); 686 } 687 688 int 689 mc_intr(void *arg) 690 { 691 struct mc_softc *sc = arg; 692 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 693 u_int8_t ir; 694 695 ir = NIC_GET(sc, MACE_IR) & ~NIC_GET(sc, MACE_IMR); 696 697 if (ir & JAB) { 698 #ifdef MCDEBUG 699 printf("%s: jabber error\n", sc->sc_dev.dv_xname); 700 #endif 701 ifp->if_oerrors++; 702 } 703 704 if (ir & BABL) { 705 #ifdef MCDEBUG 706 printf("%s: babble\n", sc->sc_dev.dv_xname); 707 #endif 708 ifp->if_oerrors++; 709 } 710 711 if (ir & CERR) { 712 #ifdef MCDEBUG 713 printf("%s: collision error\n", sc->sc_dev.dv_xname); 714 #endif 715 ifp->if_collisions++; 716 } 717 718 /* 719 * Pretend we have carrier; if we don't this will be cleared 720 * shortly. 721 */ 722 sc->sc_havecarrier = 1; 723 724 if (ir & XMTINT) 725 mc_tint(sc); 726 727 if (ir & RCVINT) 728 mc_rint(sc); 729 730 return(1); 731 } 732 733 void 734 mc_tint(struct mc_softc *sc) 735 { 736 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 737 u_int8_t xmtrc, xmtfs; 738 739 xmtrc = NIC_GET(sc, MACE_XMTRC); 740 xmtfs = NIC_GET(sc, MACE_XMTFS); 741 742 if ((xmtfs & XMTSV) == 0) 743 return; 744 745 if (xmtfs & UFLO) { 746 printf("%s: underflow\n", sc->sc_dev.dv_xname); 747 mc_reset(sc); 748 return; 749 } 750 751 if (xmtfs & LCOL) { 752 printf("%s: late collision\n", sc->sc_dev.dv_xname); 753 ifp->if_oerrors++; 754 ifp->if_collisions++; 755 } 756 757 if (xmtfs & MORE) 758 /* Real number is unknown. */ 759 ifp->if_collisions += 2; 760 else if (xmtfs & ONE) 761 ifp->if_collisions++; 762 else if (xmtfs & RTRY) { 763 printf("%s: excessive collisions\n", sc->sc_dev.dv_xname); 764 ifp->if_collisions += 16; 765 ifp->if_oerrors++; 766 } 767 768 if (xmtfs & LCAR) { 769 sc->sc_havecarrier = 0; 770 printf("%s: lost carrier\n", sc->sc_dev.dv_xname); 771 ifp->if_oerrors++; 772 } 773 774 ifq_clr_oactive(&ifp->if_snd); 775 ifp->if_timer = 0; 776 mc_start(ifp); 777 } 778 779 void 780 mc_rint(struct mc_softc *sc) 781 { 782 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 783 #define rxf sc->sc_rxframe 784 u_int len; 785 786 len = (rxf.rx_rcvcnt | ((rxf.rx_rcvsts & 0xf) << 8)) - 4; 787 788 #ifdef MCDEBUG 789 if (rxf.rx_rcvsts & 0xf0) 790 printf("%s: rcvcnt %02x rcvsts %02x rntpc 0x%02x rcvcc 0x%02x\n", 791 sc->sc_dev.dv_xname, rxf.rx_rcvcnt, rxf.rx_rcvsts, 792 rxf.rx_rntpc, rxf.rx_rcvcc); 793 #endif 794 795 if (rxf.rx_rcvsts & OFLO) { 796 #ifdef MCDEBUG 797 printf("%s: receive FIFO overflow\n", sc->sc_dev.dv_xname); 798 #endif 799 ifp->if_ierrors++; 800 return; 801 } 802 803 if (rxf.rx_rcvsts & CLSN) 804 ifp->if_collisions++; 805 806 if (rxf.rx_rcvsts & FRAM) { 807 #ifdef MCDEBUG 808 printf("%s: framing error\n", sc->sc_dev.dv_xname); 809 #endif 810 ifp->if_ierrors++; 811 return; 812 } 813 814 if (rxf.rx_rcvsts & FCS) { 815 #ifdef MCDEBUG 816 printf("%s: frame control checksum error\n", sc->sc_dev.dv_xname); 817 #endif 818 ifp->if_ierrors++; 819 return; 820 } 821 822 mace_read(sc, rxf.rx_frame, len); 823 #undef rxf 824 } 825 /* 826 * stuff packet into MACE (at splnet) 827 */ 828 u_int 829 maceput(struct mc_softc *sc, struct mbuf *m) 830 { 831 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 832 struct mbuf *n; 833 u_int len, totlen = 0; 834 u_char *buff; 835 836 buff = sc->sc_txbuf; 837 838 for (; m; m = n) { 839 u_char *data = mtod(m, u_char *); 840 len = m->m_len; 841 totlen += len; 842 bcopy(data, buff, len); 843 buff += len; 844 n = m_free(m); 845 } 846 847 if (totlen > PAGE_SIZE) 848 panic("%s: maceput: packet overflow", sc->sc_dev.dv_xname); 849 850 #if 0 851 if (totlen < ETHERMIN + sizeof(struct ether_header)) { 852 int pad = ETHERMIN + sizeof(struct ether_header) - totlen; 853 bzero(sc->sc_txbuf + totlen, pad); 854 totlen = ETHERMIN + sizeof(struct ether_header); 855 } 856 #endif 857 858 859 /* 5 seconds to watch for failing to transmit */ 860 ifp->if_timer = 5; 861 mc_putpacket(sc, totlen); 862 return (totlen); 863 } 864 865 void 866 mace_read(struct mc_softc *sc, caddr_t pkt, int len) 867 { 868 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 869 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 870 struct mbuf *m; 871 872 if (len <= sizeof(struct ether_header) || 873 len > ETHERMTU + sizeof(struct ether_header)) { 874 #ifdef MCDEBUG 875 printf("%s: invalid packet size %d; dropping\n", 876 sc->sc_dev.dv_xname, len); 877 #endif 878 ifp->if_ierrors++; 879 return; 880 } 881 882 m = mace_get(sc, pkt, len); 883 if (m == NULL) { 884 ifp->if_ierrors++; 885 return; 886 } 887 888 ml_enqueue(&ml, m); 889 if_input(ifp, &ml); 890 } 891 892 /* 893 * Pull data off an interface. 894 * Len is length of data, with local net header stripped. 895 * We copy the data into mbufs. When full cluster sized units are present 896 * we copy into clusters. 897 */ 898 struct mbuf * 899 mace_get(struct mc_softc *sc, caddr_t pkt, int totlen) 900 { 901 struct mbuf *m; 902 struct mbuf *top, **mp; 903 int len; 904 905 MGETHDR(m, M_DONTWAIT, MT_DATA); 906 if (m == NULL) 907 return (NULL); 908 909 m->m_pkthdr.len = totlen; 910 len = MHLEN; 911 top = 0; 912 mp = ⊤ 913 914 while (totlen > 0) { 915 if (top) { 916 MGET(m, M_DONTWAIT, MT_DATA); 917 if (m == NULL) { 918 m_freem(top); 919 return (NULL); 920 } 921 len = MLEN; 922 } 923 if (totlen >= MINCLSIZE) { 924 MCLGET(m, M_DONTWAIT); 925 if ((m->m_flags & M_EXT) == 0) { 926 m_free(m); 927 m_freem(top); 928 return (NULL); 929 } 930 len = MCLBYTES; 931 } 932 m->m_len = len = min(totlen, len); 933 bcopy(pkt, mtod(m, caddr_t), len); 934 pkt += len; 935 totlen -= len; 936 *mp = m; 937 mp = &m->m_next; 938 } 939 940 return (top); 941 } 942 943 void 944 mc_putpacket(struct mc_softc *sc, u_int len) 945 { 946 dbdma_command_t *cmd = sc->sc_txdmacmd; 947 948 DBDMA_BUILD(cmd, DBDMA_CMD_OUT_LAST, 0, len, sc->sc_txbuf_pa, 949 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 950 cmd++; 951 DBDMA_BUILD(cmd, DBDMA_CMD_STOP, 0, 0, 0, DBDMA_INT_ALWAYS, 952 DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 953 954 dbdma_start(sc->sc_txdma, sc->sc_txdbdma); 955 } 956 957 /* 958 * Interrupt handler for the MACE DMA completion interrupts 959 */ 960 int 961 mc_dmaintr(void *arg) 962 { 963 struct mc_softc *sc = arg; 964 int status, offset, statoff; 965 int datalen, resid; 966 int i, n, count; 967 dbdma_command_t *cmd; 968 969 /* We've received some packets from the MACE */ 970 /* Loop through, processing each of the packets */ 971 i = sc->sc_tail; 972 for (n = 0; n < MC_RXDMABUFS; n++, i++) { 973 if (i == MC_RXDMABUFS) 974 i = 0; 975 976 cmd = &sc->sc_rxdmacmd[i]; 977 status = dbdma_ld16(&cmd->d_status); 978 resid = dbdma_ld16(&cmd->d_resid); 979 980 if ((status & DBDMA_CNTRL_ACTIVE) == 0) { 981 continue; 982 } 983 984 count = dbdma_ld16(&cmd->d_count); 985 datalen = count - resid; 986 datalen -= 4; /* 4 == status bytes */ 987 988 if (datalen < 4 + sizeof(struct ether_header)) { 989 printf("short packet len=%d\n", datalen); 990 /* continue; */ 991 goto next; 992 } 993 DBDMA_BUILD_CMD(cmd, DBDMA_CMD_STOP, 0, 0, 0, 0); 994 995 offset = i * MACE_BUFLEN; 996 statoff = offset + datalen; 997 sc->sc_rxframe.rx_rcvcnt = sc->sc_rxbuf[statoff + 0]; 998 sc->sc_rxframe.rx_rcvsts = sc->sc_rxbuf[statoff + 1]; 999 sc->sc_rxframe.rx_rntpc = sc->sc_rxbuf[statoff + 2]; 1000 sc->sc_rxframe.rx_rcvcc = sc->sc_rxbuf[statoff + 3]; 1001 sc->sc_rxframe.rx_frame = sc->sc_rxbuf + offset; 1002 1003 mc_rint(sc); 1004 1005 next: 1006 DBDMA_BUILD_CMD(cmd, DBDMA_CMD_IN_LAST, 0, DBDMA_INT_ALWAYS, 1007 DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 1008 1009 cmd->d_status = 0; 1010 cmd->d_resid = 0; 1011 sc->sc_tail = i + 1; 1012 } 1013 1014 dbdma_continue(sc->sc_rxdma); 1015 1016 return 1; 1017 } 1018 1019 void 1020 mc_reset_rxdma(struct mc_softc *sc) 1021 { 1022 dbdma_command_t *cmd = sc->sc_rxdmacmd; 1023 int i; 1024 u_int8_t maccc; 1025 1026 /* Disable receiver, reset the DMA channels */ 1027 maccc = NIC_GET(sc, MACE_MACCC); 1028 NIC_PUT(sc, MACE_MACCC, maccc & ~ENRCV); 1029 1030 dbdma_reset(sc->sc_rxdma); 1031 1032 bzero(sc->sc_rxdmacmd, 8 * sizeof(dbdma_command_t)); 1033 for (i = 0; i < MC_RXDMABUFS; i++) { 1034 DBDMA_BUILD(cmd, DBDMA_CMD_IN_LAST, 0, MACE_BUFLEN, 1035 sc->sc_rxbuf_pa + MACE_BUFLEN * i, DBDMA_INT_ALWAYS, 1036 DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 1037 cmd++; 1038 } 1039 1040 DBDMA_BUILD(cmd, DBDMA_CMD_NOP, 0, 0, 0, 1041 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_ALWAYS); 1042 dbdma_st32(&cmd->d_cmddep, sc->sc_rxdbdma->d_paddr); 1043 cmd++; 1044 1045 sc->sc_tail = 0; 1046 1047 dbdma_start(sc->sc_rxdma, sc->sc_rxdbdma); 1048 /* Reenable receiver, reenable DMA */ 1049 NIC_PUT(sc, MACE_MACCC, maccc); 1050 } 1051 1052 void 1053 mc_reset_txdma(struct mc_softc *sc) 1054 { 1055 dbdma_command_t *cmd = sc->sc_txdmacmd; 1056 dbdma_regmap_t *dmareg = sc->sc_txdma; 1057 u_int8_t maccc; 1058 1059 /* disable transmitter */ 1060 maccc = NIC_GET(sc, MACE_MACCC); 1061 NIC_PUT(sc, MACE_MACCC, maccc & ~ENXMT); 1062 1063 dbdma_reset(sc->sc_txdma); 1064 1065 bzero(sc->sc_txdmacmd, 2 * sizeof(dbdma_command_t)); 1066 DBDMA_BUILD(cmd, DBDMA_CMD_OUT_LAST, 0, 0, sc->sc_txbuf_pa, 1067 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 1068 cmd++; 1069 DBDMA_BUILD(cmd, DBDMA_CMD_STOP, 0, 0, 0, 1070 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); 1071 1072 out32rb(&dmareg->d_cmdptrhi, 0); 1073 out32rb(&dmareg->d_cmdptrlo, sc->sc_txdbdma->d_paddr); 1074 1075 /* restore old value */ 1076 NIC_PUT(sc, MACE_MACCC, maccc); 1077 } 1078 1079 /* 1080 * Go through the list of multicast addresses and calculate the logical 1081 * address filter. 1082 */ 1083 void 1084 mace_calcladrf(struct mc_softc *sc, u_int8_t *af) 1085 { 1086 struct ether_multi *enm; 1087 u_int32_t crc; 1088 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1089 struct arpcom *ac = &sc->sc_arpcom; 1090 struct ether_multistep step; 1091 /* 1092 * Set up multicast address filter by passing all multicast addresses 1093 * through a crc generator, and then using the high order 6 bits as an 1094 * index into the 64 bit logical address filter. The high order bit 1095 * selects the word, while the rest of the bits select the bit within 1096 * the word. 1097 */ 1098 1099 if (ac->ac_multirangecnt > 0) 1100 goto allmulti; 1101 1102 *((u_int32_t *)af) = *((u_int32_t *)af + 1) = 0; 1103 ETHER_FIRST_MULTI(step, ac, enm); 1104 while (enm != NULL) { 1105 crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo)); 1106 1107 /* Just want the 6 most significant bits. */ 1108 crc >>= 26; 1109 1110 /* Set the corresponding bit in the filter. */ 1111 af[crc >> 3] |= 1 << (crc & 7); 1112 1113 ETHER_NEXT_MULTI(step, enm); 1114 } 1115 ifp->if_flags &= ~IFF_ALLMULTI; 1116 return; 1117 1118 allmulti: 1119 ifp->if_flags |= IFF_ALLMULTI; 1120 *((u_int32_t *)af) = *((u_int32_t *)af + 1) = 0xffffffff; 1121 } 1122