1 /* $NetBSD: mtd803.c,v 1.23 2010/01/19 22:06:24 pooka Exp $ */ 2 3 /*- 4 * 5 * Copyright (c) 2002 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Peter Bex <Peter.Bex@student.kun.nl>. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * TODO: 35 * - Most importantly, get some bus_dmamap_syncs in the correct places. 36 * I don't have access to a computer with PCI other than i386, and i386 37 * is just such a machine where dmamap_syncs don't do anything. 38 * - Powerhook for when resuming after standby. 39 * - Watchdog stuff doesn't work yet, the system crashes. 40 * - There seems to be a CardBus version of the card. (see datasheet) 41 * Perhaps a detach function is necessary then? (free buffs, stop rx/tx etc) 42 * - When you enable the TXBUN (Tx buffer unavailable) interrupt, it gets 43 * raised every time a packet is sent. Strange, since everything works anyway 44 */ 45 46 #include <sys/cdefs.h> 47 __KERNEL_RCSID(0, "$NetBSD: mtd803.c,v 1.23 2010/01/19 22:06:24 pooka Exp $"); 48 49 50 #include <sys/param.h> 51 #include <sys/mbuf.h> 52 #include <sys/systm.h> 53 #include <sys/device.h> 54 #include <sys/socket.h> 55 #include <sys/ioctl.h> 56 #include <sys/syslog.h> 57 58 #include <net/if.h> 59 #include <net/if_ether.h> 60 #include <net/if_media.h> 61 62 #ifdef INET 63 #include <netinet/in.h> 64 #include <netinet/if_inarp.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip.h> 68 #endif 69 70 #include <net/bpf.h> 71 #include <net/bpfdesc.h> 72 73 #include <sys/bus.h> 74 75 #include <dev/ic/mtd803reg.h> 76 #include <dev/ic/mtd803var.h> 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 80 /* 81 * Device driver for the MTD803 3-in-1 Fast Ethernet Controller 82 * Written by Peter Bex (peter.bex@student.kun.nl) 83 * 84 * Datasheet at: http://www.myson.com.tw or http://www.century-semi.com 85 */ 86 87 #define MTD_READ_1(sc, reg) \ 88 bus_space_read_1((sc)->bus_tag, (sc)->bus_handle, (reg)) 89 #define MTD_WRITE_1(sc, reg, data) \ 90 bus_space_write_1((sc)->bus_tag, (sc)->bus_handle, (reg), (data)) 91 92 #define MTD_READ_2(sc, reg) \ 93 bus_space_read_2((sc)->bus_tag, (sc)->bus_handle, (reg)) 94 #define MTD_WRITE_2(sc, reg, data) \ 95 bus_space_write_2((sc)->bus_tag, (sc)->bus_handle, (reg), (data)) 96 97 #define MTD_READ_4(sc, reg) \ 98 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, (reg)) 99 #define MTD_WRITE_4(sc, reg, data) \ 100 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, (reg), (data)) 101 102 #define MTD_SETBIT(sc, reg, x) \ 103 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) | (x)) 104 #define MTD_CLRBIT(sc, reg, x) \ 105 MTD_WRITE_4((sc), (reg), MTD_READ_4((sc), (reg)) & ~(x)) 106 107 #define ETHER_CRC32(buf, len) (ether_crc32_be((buf), (len))) 108 109 int mtd_mii_readreg(device_t, int, int); 110 void mtd_mii_writereg(device_t, int, int, int); 111 void mtd_mii_statchg(device_t); 112 113 void mtd_start(struct ifnet *); 114 void mtd_stop(struct ifnet *, int); 115 int mtd_ioctl(struct ifnet *, u_long, void *); 116 void mtd_setmulti(struct mtd_softc *); 117 void mtd_watchdog(struct ifnet *); 118 119 int mtd_init(struct ifnet *); 120 void mtd_reset(struct mtd_softc *); 121 void mtd_shutdown(void *); 122 int mtd_init_desc(struct mtd_softc *); 123 int mtd_put(struct mtd_softc *, int, struct mbuf *); 124 struct mbuf *mtd_get(struct mtd_softc *, int, int); 125 126 int mtd_rxirq(struct mtd_softc *); 127 int mtd_txirq(struct mtd_softc *); 128 int mtd_bufirq(struct mtd_softc *); 129 130 131 int 132 mtd_config(struct mtd_softc *sc) 133 { 134 struct ifnet *ifp = &sc->ethercom.ec_if; 135 int i; 136 137 /* Read station address */ 138 for (i = 0; i < ETHER_ADDR_LEN; ++i) 139 sc->eaddr[i] = MTD_READ_1(sc, MTD_PAR0 + i); 140 141 /* Initialize ifnet structure */ 142 memcpy(ifp->if_xname, device_xname(&sc->dev), IFNAMSIZ); 143 ifp->if_softc = sc; 144 ifp->if_init = mtd_init; 145 ifp->if_start = mtd_start; 146 ifp->if_stop = mtd_stop; 147 ifp->if_ioctl = mtd_ioctl; 148 ifp->if_watchdog = mtd_watchdog; 149 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 150 IFQ_SET_READY(&ifp->if_snd); 151 152 /* Setup MII interface */ 153 sc->mii.mii_ifp = ifp; 154 sc->mii.mii_readreg = mtd_mii_readreg; 155 sc->mii.mii_writereg = mtd_mii_writereg; 156 sc->mii.mii_statchg = mtd_mii_statchg; 157 158 sc->ethercom.ec_mii = &sc->mii; 159 ifmedia_init(&sc->mii.mii_media, 0, ether_mediachange, 160 ether_mediastatus); 161 162 mii_attach(&sc->dev, &sc->mii, 0xffffffff, MII_PHY_ANY, 0, 0); 163 164 if (LIST_FIRST(&sc->mii.mii_phys) == NULL) { 165 aprint_error_dev(&sc->dev, "Unable to configure MII\n"); 166 return 1; 167 } else { 168 ifmedia_set(&sc->mii.mii_media, IFM_ETHER | IFM_AUTO); 169 } 170 171 if (mtd_init_desc(sc)) 172 return 1; 173 174 /* Attach interface */ 175 if_attach(ifp); 176 ether_ifattach(ifp, sc->eaddr); 177 178 #if NRND > 0 179 /* Initialise random source */ 180 rnd_attach_source(&sc->rnd_src, device_xname(&sc->dev), RND_TYPE_NET, 0); 181 #endif 182 183 /* Add shutdown hook to reset card when we reboot */ 184 sc->sd_hook = shutdownhook_establish(mtd_shutdown, sc); 185 186 return 0; 187 } 188 189 190 /* 191 * mtd_init 192 * Must be called at splnet() 193 */ 194 int 195 mtd_init(struct ifnet *ifp) 196 { 197 struct mtd_softc *sc = ifp->if_softc; 198 199 mtd_reset(sc); 200 201 /* 202 * Set cache alignment and burst length. Don't really know what these 203 * mean, so their values are probably suboptimal. 204 */ 205 MTD_WRITE_4(sc, MTD_BCR, MTD_BCR_BLEN16); 206 207 MTD_WRITE_4(sc, MTD_RXTXR, MTD_TX_STFWD | MTD_TX_FDPLX); 208 209 /* Promiscuous mode? */ 210 if (ifp->if_flags & IFF_PROMISC) 211 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_PROM); 212 else 213 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_PROM); 214 215 /* Broadcast mode? */ 216 if (ifp->if_flags & IFF_BROADCAST) 217 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ABROAD); 218 else 219 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ABROAD); 220 221 mtd_setmulti(sc); 222 223 /* Enable interrupts */ 224 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK); 225 MTD_WRITE_4(sc, MTD_ISR, MTD_ISR_ENABLE); 226 227 /* Set descriptor base addresses */ 228 MTD_WRITE_4(sc, MTD_TXLBA, htole32(sc->desc_dma_map->dm_segs[0].ds_addr 229 + sizeof(struct mtd_desc) * MTD_NUM_RXD)); 230 MTD_WRITE_4(sc, MTD_RXLBA, 231 htole32(sc->desc_dma_map->dm_segs[0].ds_addr)); 232 233 /* Enable receiver and transmitter */ 234 MTD_SETBIT(sc, MTD_RXTXR, MTD_RX_ENABLE); 235 MTD_SETBIT(sc, MTD_RXTXR, MTD_TX_ENABLE); 236 237 /* Interface is running */ 238 ifp->if_flags |= IFF_RUNNING; 239 ifp->if_flags &= ~IFF_OACTIVE; 240 241 return 0; 242 } 243 244 245 int 246 mtd_init_desc(struct mtd_softc *sc) 247 { 248 int rseg, err, i; 249 bus_dma_segment_t seg; 250 bus_size_t size; 251 252 /* Allocate memory for descriptors */ 253 size = (MTD_NUM_RXD + MTD_NUM_TXD) * sizeof(struct mtd_desc); 254 255 /* Allocate DMA-safe memory */ 256 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN, 257 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 258 aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n", err); 259 return 1; 260 } 261 262 /* Map memory to kernel addressable space */ 263 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size, 264 (void **)&sc->desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 265 aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n", err); 266 bus_dmamem_free(sc->dma_tag, &seg, rseg); 267 return 1; 268 } 269 270 /* Create a DMA map */ 271 if ((err = bus_dmamap_create(sc->dma_tag, size, 1, 272 size, 0, BUS_DMA_NOWAIT, &sc->desc_dma_map)) != 0) { 273 aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n", err); 274 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); 275 bus_dmamem_free(sc->dma_tag, &seg, rseg); 276 return 1; 277 } 278 279 /* Load the DMA map */ 280 if ((err = bus_dmamap_load(sc->dma_tag, sc->desc_dma_map, sc->desc, 281 size, NULL, BUS_DMA_NOWAIT)) != 0) { 282 aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n", 283 err); 284 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); 285 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); 286 bus_dmamem_free(sc->dma_tag, &seg, rseg); 287 return 1; 288 } 289 290 /* Allocate memory for the buffers */ 291 size = MTD_NUM_RXD * MTD_RXBUF_SIZE + MTD_NUM_TXD * MTD_TXBUF_SIZE; 292 293 /* Allocate DMA-safe memory */ 294 if ((err = bus_dmamem_alloc(sc->dma_tag, size, MTD_DMA_ALIGN, 295 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 296 aprint_error_dev(&sc->dev, "unable to allocate DMA buffer, error = %d\n", 297 err); 298 299 /* Undo DMA map for descriptors */ 300 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map); 301 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); 302 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); 303 bus_dmamem_free(sc->dma_tag, &seg, rseg); 304 return 1; 305 } 306 307 /* Map memory to kernel addressable space */ 308 if ((err = bus_dmamem_map(sc->dma_tag, &seg, 1, size, 309 &sc->buf, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 310 aprint_error_dev(&sc->dev, "unable to map DMA buffer, error = %d\n", 311 err); 312 bus_dmamem_free(sc->dma_tag, &seg, rseg); 313 314 /* Undo DMA map for descriptors */ 315 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map); 316 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); 317 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); 318 bus_dmamem_free(sc->dma_tag, &seg, rseg); 319 return 1; 320 } 321 322 /* Create a DMA map */ 323 if ((err = bus_dmamap_create(sc->dma_tag, size, 1, 324 size, 0, BUS_DMA_NOWAIT, &sc->buf_dma_map)) != 0) { 325 aprint_error_dev(&sc->dev, "unable to create DMA map, error = %d\n", 326 err); 327 bus_dmamem_unmap(sc->dma_tag, sc->buf, size); 328 bus_dmamem_free(sc->dma_tag, &seg, rseg); 329 330 /* Undo DMA map for descriptors */ 331 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map); 332 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); 333 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); 334 bus_dmamem_free(sc->dma_tag, &seg, rseg); 335 return 1; 336 } 337 338 /* Load the DMA map */ 339 if ((err = bus_dmamap_load(sc->dma_tag, sc->buf_dma_map, sc->buf, 340 size, NULL, BUS_DMA_NOWAIT)) != 0) { 341 aprint_error_dev(&sc->dev, "unable to load DMA map, error = %d\n", 342 err); 343 bus_dmamap_destroy(sc->dma_tag, sc->buf_dma_map); 344 bus_dmamem_unmap(sc->dma_tag, sc->buf, size); 345 bus_dmamem_free(sc->dma_tag, &seg, rseg); 346 347 /* Undo DMA map for descriptors */ 348 bus_dmamap_unload(sc->dma_tag, sc->desc_dma_map); 349 bus_dmamap_destroy(sc->dma_tag, sc->desc_dma_map); 350 bus_dmamem_unmap(sc->dma_tag, (void *)sc->desc, size); 351 bus_dmamem_free(sc->dma_tag, &seg, rseg); 352 return 1; 353 } 354 355 /* Descriptors are stored as a circular linked list */ 356 /* Fill in rx descriptors */ 357 for (i = 0; i < MTD_NUM_RXD; ++i) { 358 sc->desc[i].stat = MTD_RXD_OWNER; 359 if (i == MTD_NUM_RXD - 1) { /* Last descriptor */ 360 /* Link back to first rx descriptor */ 361 sc->desc[i].next = 362 htole32(sc->desc_dma_map->dm_segs[0].ds_addr); 363 } else { 364 /* Link forward to next rx descriptor */ 365 sc->desc[i].next = 366 htole32(sc->desc_dma_map->dm_segs[0].ds_addr 367 + (i + 1) * sizeof(struct mtd_desc)); 368 } 369 sc->desc[i].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS; 370 /* Set buffer's address */ 371 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr 372 + i * MTD_RXBUF_SIZE); 373 } 374 375 /* Fill in tx descriptors */ 376 for (/* i = MTD_NUM_RXD */; i < (MTD_NUM_TXD + MTD_NUM_RXD); ++i) { 377 sc->desc[i].stat = 0; /* At least, NOT MTD_TXD_OWNER! */ 378 if (i == (MTD_NUM_RXD + MTD_NUM_TXD - 1)) { /* Last descr */ 379 /* Link back to first tx descriptor */ 380 sc->desc[i].next = 381 htole32(sc->desc_dma_map->dm_segs[0].ds_addr 382 +MTD_NUM_RXD * sizeof(struct mtd_desc)); 383 } else { 384 /* Link forward to next tx descriptor */ 385 sc->desc[i].next = 386 htole32(sc->desc_dma_map->dm_segs[0].ds_addr 387 + (i + 1) * sizeof(struct mtd_desc)); 388 } 389 /* sc->desc[i].conf = MTD_TXBUF_SIZE & MTD_TXD_CONF_BUFS; */ 390 /* Set buffer's address */ 391 sc->desc[i].data = htole32(sc->buf_dma_map->dm_segs[0].ds_addr 392 + MTD_NUM_RXD * MTD_RXBUF_SIZE 393 + (i - MTD_NUM_RXD) * MTD_TXBUF_SIZE); 394 } 395 396 return 0; 397 } 398 399 400 void 401 mtd_mii_statchg(device_t self) 402 { 403 /* Should we do something here? :) */ 404 } 405 406 407 int 408 mtd_mii_readreg(device_t self, int phy, int reg) 409 { 410 struct mtd_softc *sc = device_private(self); 411 412 return (MTD_READ_2(sc, MTD_PHYBASE + reg * 2)); 413 } 414 415 416 void 417 mtd_mii_writereg(device_t self, int phy, int reg, int val) 418 { 419 struct mtd_softc *sc = device_private(self); 420 421 MTD_WRITE_2(sc, MTD_PHYBASE + reg * 2, val); 422 } 423 424 425 int 426 mtd_put(struct mtd_softc *sc, int index, struct mbuf *m) 427 { 428 int len, tlen; 429 char *buf = (char *)sc->buf + MTD_NUM_RXD * MTD_RXBUF_SIZE 430 + index * MTD_TXBUF_SIZE; 431 struct mbuf *n; 432 433 for (tlen = 0; m != NULL; m = n) { 434 len = m->m_len; 435 if (len == 0) { 436 MFREE(m, n); 437 continue; 438 } else if (tlen > MTD_TXBUF_SIZE) { 439 /* XXX FIXME: No idea what to do here. */ 440 aprint_error_dev(&sc->dev, "packet too large! Size = %i\n", 441 tlen); 442 MFREE(m, n); 443 continue; 444 } 445 memcpy(buf, mtod(m, void *), len); 446 buf += len; 447 tlen += len; 448 MFREE(m, n); 449 } 450 sc->desc[MTD_NUM_RXD + index].conf = MTD_TXD_CONF_PAD | MTD_TXD_CONF_CRC 451 | MTD_TXD_CONF_IRQC 452 | ((tlen << MTD_TXD_PKTS_SHIFT) & MTD_TXD_CONF_PKTS) 453 | (tlen & MTD_TXD_CONF_BUFS); 454 455 return tlen; 456 } 457 458 459 void 460 mtd_start(struct ifnet *ifp) 461 { 462 struct mtd_softc *sc = ifp->if_softc; 463 struct mbuf *m; 464 int len; 465 int first_tx = sc->cur_tx; 466 467 /* Don't transmit when the interface is busy or inactive */ 468 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 469 return; 470 471 for (;;) { 472 IF_DEQUEUE(&ifp->if_snd, m); 473 474 if (m == NULL) 475 break; 476 477 if (ifp->if_bpf) 478 bpf_ops->bpf_mtap(ifp->if_bpf, m); 479 480 /* Copy mbuf chain into tx buffer */ 481 len = mtd_put(sc, sc->cur_tx, m); 482 483 if (sc->cur_tx != first_tx) 484 sc->desc[MTD_NUM_RXD + sc->cur_tx].stat = MTD_TXD_OWNER; 485 486 if (++sc->cur_tx >= MTD_NUM_TXD) 487 sc->cur_tx = 0; 488 } 489 /* Mark first & last descriptor */ 490 sc->desc[MTD_NUM_RXD + first_tx].conf |= MTD_TXD_CONF_FSD; 491 492 if (sc->cur_tx == 0) { 493 sc->desc[MTD_NUM_RXD + MTD_NUM_TXD - 1].conf |=MTD_TXD_CONF_LSD; 494 } else { 495 sc->desc[MTD_NUM_RXD + sc->cur_tx - 1].conf |= MTD_TXD_CONF_LSD; 496 } 497 498 /* Give first descriptor to chip to complete transaction */ 499 sc->desc[MTD_NUM_RXD + first_tx].stat = MTD_TXD_OWNER; 500 501 /* Transmit polling demand */ 502 MTD_WRITE_4(sc, MTD_TXPDR, MTD_TXPDR_DEMAND); 503 504 /* XXX FIXME: Set up a watchdog timer */ 505 /* ifp->if_timer = 5; */ 506 } 507 508 509 void 510 mtd_stop(struct ifnet *ifp, int disable) 511 { 512 struct mtd_softc *sc = ifp->if_softc; 513 514 /* Disable transmitter and receiver */ 515 MTD_CLRBIT(sc, MTD_RXTXR, MTD_TX_ENABLE); 516 MTD_CLRBIT(sc, MTD_RXTXR, MTD_RX_ENABLE); 517 518 /* Disable interrupts */ 519 MTD_WRITE_4(sc, MTD_IMR, 0x00000000); 520 521 /* Must do more at disable??... */ 522 if (disable) { 523 /* Delete tx and rx descriptor base addresses */ 524 MTD_WRITE_4(sc, MTD_RXLBA, 0x00000000); 525 MTD_WRITE_4(sc, MTD_TXLBA, 0x00000000); 526 } 527 528 ifp->if_timer = 0; 529 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 530 } 531 532 533 void 534 mtd_watchdog(struct ifnet *ifp) 535 { 536 struct mtd_softc *sc = ifp->if_softc; 537 int s; 538 539 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->dev)); 540 ++sc->ethercom.ec_if.if_oerrors; 541 542 mtd_stop(ifp, 0); 543 544 s = splnet(); 545 mtd_init(ifp); 546 splx(s); 547 548 return; 549 } 550 551 552 int 553 mtd_ioctl(struct ifnet *ifp, u_long cmd, void *data) 554 { 555 struct mtd_softc *sc = ifp->if_softc; 556 int s, error = 0; 557 558 s = splnet(); 559 560 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 561 /* 562 * Multicast list has changed; set the hardware 563 * filter accordingly. 564 */ 565 if (ifp->if_flags & IFF_RUNNING) 566 mtd_setmulti(sc); 567 error = 0; 568 } 569 570 splx(s); 571 return error; 572 } 573 574 575 struct mbuf * 576 mtd_get(struct mtd_softc *sc, int index, int totlen) 577 { 578 struct ifnet *ifp = &sc->ethercom.ec_if; 579 struct mbuf *m, *m0, *newm; 580 int len; 581 char *buf = (char *)sc->buf + index * MTD_RXBUF_SIZE; 582 583 MGETHDR(m0, M_DONTWAIT, MT_DATA); 584 if (m0 == NULL) 585 return NULL; 586 587 m0->m_pkthdr.rcvif = ifp; 588 m0->m_pkthdr.len = totlen; 589 m = m0; 590 len = MHLEN; 591 592 while (totlen > 0) { 593 if (totlen >= MINCLSIZE) { 594 MCLGET(m, M_DONTWAIT); 595 if (!(m->m_flags & M_EXT)) { 596 m_freem(m0); 597 return NULL; 598 } 599 len = MCLBYTES; 600 } 601 602 if (m == m0) { 603 char *newdata = (char *) 604 ALIGN(m->m_data + sizeof(struct ether_header)) - 605 sizeof(struct ether_header); 606 len -= newdata - m->m_data; 607 m->m_data = newdata; 608 } 609 610 m->m_len = len = min(totlen, len); 611 memcpy(mtod(m, void *), buf, len); 612 buf += len; 613 614 totlen -= len; 615 if (totlen > 0) { 616 MGET(newm, M_DONTWAIT, MT_DATA); 617 if (newm == NULL) { 618 m_freem(m0); 619 return NULL; 620 } 621 len = MLEN; 622 m = m->m_next = newm; 623 } 624 } 625 626 return m0; 627 } 628 629 630 int 631 mtd_rxirq(struct mtd_softc *sc) 632 { 633 struct ifnet *ifp = &sc->ethercom.ec_if; 634 int len; 635 struct mbuf *m; 636 637 for (; !(sc->desc[sc->cur_rx].stat & MTD_RXD_OWNER);) { 638 /* Error summary set? */ 639 if (sc->desc[sc->cur_rx].stat & MTD_RXD_ERRSUM) { 640 aprint_error_dev(&sc->dev, "received packet with errors\n"); 641 /* Give up packet, since an error occurred */ 642 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER; 643 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & 644 MTD_RXD_CONF_BUFS; 645 ++ifp->if_ierrors; 646 if (++sc->cur_rx >= MTD_NUM_RXD) 647 sc->cur_rx = 0; 648 continue; 649 } 650 /* Get buffer length */ 651 len = (sc->desc[sc->cur_rx].stat & MTD_RXD_FLEN) 652 >> MTD_RXD_FLEN_SHIFT; 653 len -= ETHER_CRC_LEN; 654 655 /* Check packet size */ 656 if (len <= sizeof(struct ether_header)) { 657 aprint_error_dev(&sc->dev, "invalid packet size %d; dropping\n", 658 len); 659 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER; 660 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & 661 MTD_RXD_CONF_BUFS; 662 ++ifp->if_ierrors; 663 if (++sc->cur_rx >= MTD_NUM_RXD) 664 sc->cur_rx = 0; 665 continue; 666 } 667 668 m = mtd_get(sc, (sc->cur_rx), len); 669 670 /* Give descriptor back to card */ 671 sc->desc[sc->cur_rx].conf = MTD_RXBUF_SIZE & MTD_RXD_CONF_BUFS; 672 sc->desc[sc->cur_rx].stat = MTD_RXD_OWNER; 673 674 if (++sc->cur_rx >= MTD_NUM_RXD) 675 sc->cur_rx = 0; 676 677 if (m == NULL) { 678 aprint_error_dev(&sc->dev, "error pulling packet off interface\n"); 679 ++ifp->if_ierrors; 680 continue; 681 } 682 683 ++ifp->if_ipackets; 684 685 if (ifp->if_bpf) 686 bpf_ops->bpf_mtap(ifp->if_bpf, m); 687 /* Pass the packet up */ 688 (*ifp->if_input)(ifp, m); 689 } 690 691 return 1; 692 } 693 694 695 int 696 mtd_txirq(struct mtd_softc *sc) 697 { 698 struct ifnet *ifp = &sc->ethercom.ec_if; 699 700 /* Clear timeout */ 701 ifp->if_timer = 0; 702 703 ifp->if_flags &= ~IFF_OACTIVE; 704 ++ifp->if_opackets; 705 706 /* XXX FIXME If there is some queued, do an mtd_start? */ 707 708 return 1; 709 } 710 711 712 int 713 mtd_bufirq(struct mtd_softc *sc) 714 { 715 struct ifnet *ifp = &sc->ethercom.ec_if; 716 717 /* Clear timeout */ 718 ifp->if_timer = 0; 719 720 /* XXX FIXME: Do something here to make sure we get some buffers! */ 721 722 return 1; 723 } 724 725 726 int 727 mtd_irq_h(void *args) 728 { 729 struct mtd_softc *sc = args; 730 struct ifnet *ifp = &sc->ethercom.ec_if; 731 u_int32_t status; 732 int r = 0; 733 734 if (!(ifp->if_flags & IFF_RUNNING) || !device_is_active(&sc->dev)) 735 return 0; 736 737 /* Disable interrupts */ 738 MTD_WRITE_4(sc, MTD_IMR, 0x00000000); 739 740 for(;;) { 741 status = MTD_READ_4(sc, MTD_ISR); 742 #if NRND > 0 743 /* Add random seed before masking out bits */ 744 if (status) 745 rnd_add_uint32(&sc->rnd_src, status); 746 #endif 747 status &= MTD_ISR_MASK; 748 if (!status) /* We didn't ask for this */ 749 break; 750 751 MTD_WRITE_4(sc, MTD_ISR, status); 752 753 /* NOTE: Perhaps we should reset with some of these errors? */ 754 755 if (status & MTD_ISR_RXBUN) { 756 aprint_error_dev(&sc->dev, "receive buffer unavailable\n"); 757 ++ifp->if_ierrors; 758 } 759 760 if (status & MTD_ISR_RXERR) { 761 aprint_error_dev(&sc->dev, "receive error\n"); 762 ++ifp->if_ierrors; 763 } 764 765 if (status & MTD_ISR_TXBUN) { 766 aprint_error_dev(&sc->dev, "transmit buffer unavailable\n"); 767 ++ifp->if_ierrors; 768 } 769 770 if ((status & MTD_ISR_PDF)) { 771 aprint_error_dev(&sc->dev, "parallel detection fault\n"); 772 ++ifp->if_ierrors; 773 } 774 775 if (status & MTD_ISR_FBUSERR) { 776 aprint_error_dev(&sc->dev, "fatal bus error\n"); 777 ++ifp->if_ierrors; 778 } 779 780 if (status & MTD_ISR_TARERR) { 781 aprint_error_dev(&sc->dev, "target error\n"); 782 ++ifp->if_ierrors; 783 } 784 785 if (status & MTD_ISR_MASTERR) { 786 aprint_error_dev(&sc->dev, "master error\n"); 787 ++ifp->if_ierrors; 788 } 789 790 if (status & MTD_ISR_PARERR) { 791 aprint_error_dev(&sc->dev, "parity error\n"); 792 ++ifp->if_ierrors; 793 } 794 795 if (status & MTD_ISR_RXIRQ) /* Receive interrupt */ 796 r |= mtd_rxirq(sc); 797 798 if (status & MTD_ISR_TXIRQ) /* Transmit interrupt */ 799 r |= mtd_txirq(sc); 800 801 if (status & MTD_ISR_TXEARLY) /* Transmit early */ 802 r |= mtd_txirq(sc); 803 804 if (status & MTD_ISR_TXBUN) /* Transmit buffer n/a */ 805 r |= mtd_bufirq(sc); 806 807 } 808 809 /* Enable interrupts */ 810 MTD_WRITE_4(sc, MTD_IMR, MTD_IMR_MASK); 811 812 return r; 813 } 814 815 816 void 817 mtd_setmulti(struct mtd_softc *sc) 818 { 819 struct ifnet *ifp = &sc->ethercom.ec_if; 820 u_int32_t rxtx_stat; 821 u_int32_t hash[2] = {0, 0}; 822 u_int32_t crc; 823 struct ether_multi *enm; 824 struct ether_multistep step; 825 int mcnt = 0; 826 827 /* Get old status */ 828 rxtx_stat = MTD_READ_4(sc, MTD_RXTXR); 829 830 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) { 831 rxtx_stat |= MTD_RX_AMULTI; 832 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat); 833 MTD_WRITE_4(sc, MTD_MAR0, MTD_ALL_ADDR); 834 MTD_WRITE_4(sc, MTD_MAR1, MTD_ALL_ADDR); 835 return; 836 } 837 838 ETHER_FIRST_MULTI(step, &sc->ethercom, enm); 839 while (enm != NULL) { 840 /* We need the 6 most significant bits of the CRC */ 841 crc = ETHER_CRC32(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26; 842 843 hash[crc >> 5] |= 1 << (crc & 0xf); 844 845 ++mcnt; 846 ETHER_NEXT_MULTI(step, enm); 847 } 848 849 /* Accept multicast bit needs to be on? */ 850 if (mcnt) 851 rxtx_stat |= MTD_RX_AMULTI; 852 else 853 rxtx_stat &= ~MTD_RX_AMULTI; 854 855 /* Write out the hash */ 856 MTD_WRITE_4(sc, MTD_MAR0, hash[0]); 857 MTD_WRITE_4(sc, MTD_MAR1, hash[1]); 858 MTD_WRITE_4(sc, MTD_RXTXR, rxtx_stat); 859 } 860 861 862 void 863 mtd_reset(struct mtd_softc *sc) 864 { 865 int i; 866 867 MTD_SETBIT(sc, MTD_BCR, MTD_BCR_RESET); 868 869 /* Reset descriptor status */ 870 sc->cur_tx = 0; 871 sc->cur_rx = 0; 872 873 /* Wait until done with reset */ 874 for (i = 0; i < MTD_TIMEOUT; ++i) { 875 DELAY(10); 876 if (!(MTD_READ_4(sc, MTD_BCR) & MTD_BCR_RESET)) 877 break; 878 } 879 880 if (i == MTD_TIMEOUT) { 881 aprint_error_dev(&sc->dev, "reset timed out\n"); 882 } 883 884 /* Wait a little so chip can stabilize */ 885 DELAY(1000); 886 } 887 888 889 void 890 mtd_shutdown (void *arg) 891 { 892 struct mtd_softc *sc = arg; 893 struct ifnet *ifp = &sc->ethercom.ec_if; 894 895 #if NRND > 0 896 rnd_detach_source(&sc->rnd_src); 897 #endif 898 mtd_stop(ifp, 1); 899 } 900