1 /* $NetBSD: if_mec.c,v 1.33 2008/08/23 18:44:51 tsutsui Exp $ */ 2 3 /*- 4 * Copyright (c) 2004, 2008 Izumi Tsutsui. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * Copyright (c) 2003 Christopher SEKIYA 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 3. All advertising materials mentioning features or use of this software 40 * must display the following acknowledgement: 41 * This product includes software developed for the 42 * NetBSD Project. See http://www.NetBSD.org/ for 43 * information about NetBSD. 44 * 4. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * MACE MAC-110 Ethernet driver 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.33 2008/08/23 18:44:51 tsutsui Exp $"); 65 66 #include "opt_ddb.h" 67 #include "bpfilter.h" 68 #include "rnd.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/device.h> 73 #include <sys/callout.h> 74 #include <sys/mbuf.h> 75 #include <sys/malloc.h> 76 #include <sys/kernel.h> 77 #include <sys/socket.h> 78 #include <sys/ioctl.h> 79 #include <sys/errno.h> 80 81 #if NRND > 0 82 #include <sys/rnd.h> 83 #endif 84 85 #include <net/if.h> 86 #include <net/if_dl.h> 87 #include <net/if_media.h> 88 #include <net/if_ether.h> 89 90 #if NBPFILTER > 0 91 #include <net/bpf.h> 92 #endif 93 94 #include <machine/bus.h> 95 #include <machine/intr.h> 96 #include <machine/machtype.h> 97 98 #include <dev/mii/mii.h> 99 #include <dev/mii/miivar.h> 100 101 #include <sgimips/mace/macevar.h> 102 #include <sgimips/mace/if_mecreg.h> 103 104 #include <dev/arcbios/arcbios.h> 105 #include <dev/arcbios/arcbiosvar.h> 106 107 /* #define MEC_DEBUG */ 108 109 #ifdef MEC_DEBUG 110 #define MEC_DEBUG_RESET 0x01 111 #define MEC_DEBUG_START 0x02 112 #define MEC_DEBUG_STOP 0x04 113 #define MEC_DEBUG_INTR 0x08 114 #define MEC_DEBUG_RXINTR 0x10 115 #define MEC_DEBUG_TXINTR 0x20 116 #define MEC_DEBUG_TXSEGS 0x40 117 uint32_t mec_debug = 0; 118 #define DPRINTF(x, y) if (mec_debug & (x)) printf y 119 #else 120 #define DPRINTF(x, y) /* nothing */ 121 #endif 122 123 /* #define MEC_EVENT_COUNTERS */ 124 125 #ifdef MEC_EVENT_COUNTERS 126 #define MEC_EVCNT_INCR(ev) (ev)->ev_count++ 127 #else 128 #define MEC_EVCNT_INCR(ev) do {} while (/* CONSTCOND */ 0) 129 #endif 130 131 /* 132 * Transmit descriptor list size 133 */ 134 #define MEC_NTXDESC 64 135 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1) 136 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK) 137 #define MEC_NTXDESC_RSVD 4 138 #define MEC_NTXDESC_INTR 8 139 140 /* 141 * software state for TX 142 */ 143 struct mec_txsoft { 144 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 145 bus_dmamap_t txs_dmamap; /* our DMA map */ 146 uint32_t txs_flags; 147 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */ 148 #define MEC_TXS_TXDPTR 0x00000080 /* concat txd_ptr is used */ 149 }; 150 151 /* 152 * Transmit buffer descriptor 153 */ 154 #define MEC_TXDESCSIZE 128 155 #define MEC_NTXPTR 3 156 #define MEC_TXD_BUFOFFSET sizeof(uint64_t) 157 #define MEC_TXD_BUFOFFSET1 \ 158 (sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR) 159 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET) 160 #define MEC_TXD_BUFSIZE1 (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1) 161 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len)) 162 #define MEC_TXD_ALIGN 8 163 #define MEC_TXD_ALIGNMASK (MEC_TXD_ALIGN - 1) 164 #define MEC_TXD_ROUNDUP(addr) \ 165 (((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK) 166 #define MEC_NTXSEG 16 167 168 struct mec_txdesc { 169 volatile uint64_t txd_cmd; 170 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */ 171 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */ 172 #define TXCMD_BUFSTART(x) ((x) << 16) 173 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */ 174 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */ 175 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */ 176 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */ 177 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */ 178 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */ 179 180 #define txd_stat txd_cmd 181 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */ 182 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */ 183 #define MEC_TXSTAT_COLCNT_SHIFT 16 184 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */ 185 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */ 186 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */ 187 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */ 188 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */ 189 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */ 190 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */ 191 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */ 192 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */ 193 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */ 194 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */ 195 196 union { 197 uint64_t txptr[MEC_NTXPTR]; 198 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */ 199 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */ 200 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */ 201 #define TXPTR_LEN(x) ((uint64_t)(x) << 32) 202 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */ 203 204 uint8_t txbuf[MEC_TXD_BUFSIZE]; 205 } txd_data; 206 #define txd_ptr txd_data.txptr 207 #define txd_buf txd_data.txbuf 208 }; 209 210 /* 211 * Receive buffer size 212 */ 213 #define MEC_NRXDESC 16 214 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1) 215 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK) 216 217 /* 218 * Receive buffer description 219 */ 220 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */ 221 #define MEC_RXD_NRXPAD 3 222 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD) 223 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t)) 224 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET) 225 226 struct mec_rxdesc { 227 volatile uint64_t rxd_stat; 228 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */ 229 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */ 230 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */ 231 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */ 232 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */ 233 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */ 234 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */ 235 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */ 236 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */ 237 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */ 238 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */ 239 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */ 240 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */ 241 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */ 242 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */ 243 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */ 244 uint64_t rxd_pad1[MEC_RXD_NRXPAD]; 245 uint8_t rxd_buf[MEC_RXD_BUFSIZE]; 246 }; 247 248 /* 249 * control structures for DMA ops 250 */ 251 struct mec_control_data { 252 /* 253 * TX descriptors and buffers 254 */ 255 struct mec_txdesc mcd_txdesc[MEC_NTXDESC]; 256 257 /* 258 * RX descriptors and buffers 259 */ 260 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC]; 261 }; 262 263 /* 264 * It _seems_ there are some restrictions on descriptor address: 265 * 266 * - Base address of txdescs should be 8kbyte aligned 267 * - Each txdesc should be 128byte aligned 268 * - Each rxdesc should be 4kbyte aligned 269 * 270 * So we should specify 8k align to allocalte txdescs. 271 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192 272 * so rxdescs are also allocated at 4kbyte aligned. 273 */ 274 #define MEC_CONTROL_DATA_ALIGN (8 * 1024) 275 276 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x) 277 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)]) 278 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)]) 279 280 /* 281 * software state per device 282 */ 283 struct mec_softc { 284 device_t sc_dev; /* generic device structures */ 285 286 bus_space_tag_t sc_st; /* bus_space tag */ 287 bus_space_handle_t sc_sh; /* bus_space handle */ 288 bus_dma_tag_t sc_dmat; /* bus_dma tag */ 289 void *sc_sdhook; /* shutdown hook */ 290 291 struct ethercom sc_ethercom; /* Ethernet common part */ 292 293 struct mii_data sc_mii; /* MII/media information */ 294 int sc_phyaddr; /* MII address */ 295 struct callout sc_tick_ch; /* tick callout */ 296 297 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */ 298 299 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */ 300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 301 302 /* pointer to allocated control data */ 303 struct mec_control_data *sc_control_data; 304 #define sc_txdesc sc_control_data->mcd_txdesc 305 #define sc_rxdesc sc_control_data->mcd_rxdesc 306 307 /* software state for TX descs */ 308 struct mec_txsoft sc_txsoft[MEC_NTXDESC]; 309 310 int sc_txpending; /* number of TX requests pending */ 311 int sc_txdirty; /* first dirty TX descriptor */ 312 int sc_txlast; /* last used TX descriptor */ 313 314 int sc_rxptr; /* next ready RX buffer */ 315 316 #if NRND > 0 317 rndsource_element_t sc_rnd_source; /* random source */ 318 #endif 319 #ifdef MEC_EVENT_COUNTERS 320 struct evcnt sc_ev_txpkts; /* TX packets queued total */ 321 struct evcnt sc_ev_txdpad; /* TX packets padded in txdesc buf */ 322 struct evcnt sc_ev_txdbuf; /* TX packets copied to txdesc buf */ 323 struct evcnt sc_ev_txptr1; /* TX packets using concat ptr1 */ 324 struct evcnt sc_ev_txptr1a; /* TX packets w/ptr1 ~160bytes */ 325 struct evcnt sc_ev_txptr1b; /* TX packets w/ptr1 ~256bytes */ 326 struct evcnt sc_ev_txptr1c; /* TX packets w/ptr1 ~512bytes */ 327 struct evcnt sc_ev_txptr1d; /* TX packets w/ptr1 ~1024bytes */ 328 struct evcnt sc_ev_txptr1e; /* TX packets w/ptr1 >1024bytes */ 329 struct evcnt sc_ev_txptr2; /* TX packets using concat ptr1,2 */ 330 struct evcnt sc_ev_txptr2a; /* TX packets w/ptr2 ~160bytes */ 331 struct evcnt sc_ev_txptr2b; /* TX packets w/ptr2 ~256bytes */ 332 struct evcnt sc_ev_txptr2c; /* TX packets w/ptr2 ~512bytes */ 333 struct evcnt sc_ev_txptr2d; /* TX packets w/ptr2 ~1024bytes */ 334 struct evcnt sc_ev_txptr2e; /* TX packets w/ptr2 >1024bytes */ 335 struct evcnt sc_ev_txptr3; /* TX packets using concat ptr1,2,3 */ 336 struct evcnt sc_ev_txptr3a; /* TX packets w/ptr3 ~160bytes */ 337 struct evcnt sc_ev_txptr3b; /* TX packets w/ptr3 ~256bytes */ 338 struct evcnt sc_ev_txptr3c; /* TX packets w/ptr3 ~512bytes */ 339 struct evcnt sc_ev_txptr3d; /* TX packets w/ptr3 ~1024bytes */ 340 struct evcnt sc_ev_txptr3e; /* TX packets w/ptr3 >1024bytes */ 341 struct evcnt sc_ev_txmbuf; /* TX packets copied to new mbufs */ 342 struct evcnt sc_ev_txmbufa; /* TX packets w/mbuf ~160bytes */ 343 struct evcnt sc_ev_txmbufb; /* TX packets w/mbuf ~256bytes */ 344 struct evcnt sc_ev_txmbufc; /* TX packets w/mbuf ~512bytes */ 345 struct evcnt sc_ev_txmbufd; /* TX packets w/mbuf ~1024bytes */ 346 struct evcnt sc_ev_txmbufe; /* TX packets w/mbuf >1024bytes */ 347 struct evcnt sc_ev_txptrs; /* TX packets using ptrs total */ 348 struct evcnt sc_ev_txptrc0; /* TX packets w/ptrs no hdr chain */ 349 struct evcnt sc_ev_txptrc1; /* TX packets w/ptrs 1 hdr chain */ 350 struct evcnt sc_ev_txptrc2; /* TX packets w/ptrs 2 hdr chains */ 351 struct evcnt sc_ev_txptrc3; /* TX packets w/ptrs 3 hdr chains */ 352 struct evcnt sc_ev_txptrc4; /* TX packets w/ptrs 4 hdr chains */ 353 struct evcnt sc_ev_txptrc5; /* TX packets w/ptrs 5 hdr chains */ 354 struct evcnt sc_ev_txptrc6; /* TX packets w/ptrs >5 hdr chains */ 355 struct evcnt sc_ev_txptrh0; /* TX packets w/ptrs ~8bytes hdr */ 356 struct evcnt sc_ev_txptrh1; /* TX packets w/ptrs ~16bytes hdr */ 357 struct evcnt sc_ev_txptrh2; /* TX packets w/ptrs ~32bytes hdr */ 358 struct evcnt sc_ev_txptrh3; /* TX packets w/ptrs ~64bytes hdr */ 359 struct evcnt sc_ev_txptrh4; /* TX packets w/ptrs ~80bytes hdr */ 360 struct evcnt sc_ev_txptrh5; /* TX packets w/ptrs ~96bytes hdr */ 361 struct evcnt sc_ev_txdstall; /* TX stalled due to no txdesc */ 362 struct evcnt sc_ev_txempty; /* TX empty interrupts */ 363 struct evcnt sc_ev_txsent; /* TX sent interrupts */ 364 #endif 365 }; 366 367 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x)) 368 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x)) 369 370 #define MEC_TXDESCSYNC(sc, x, ops) \ 371 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 372 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops)) 373 #define MEC_TXCMDSYNC(sc, x, ops) \ 374 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 375 MEC_CDTXOFF(x), sizeof(uint64_t), (ops)) 376 377 #define MEC_RXSTATSYNC(sc, x, ops) \ 378 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 379 MEC_CDRXOFF(x), sizeof(uint64_t), (ops)) 380 #define MEC_RXBUFSYNC(sc, x, len, ops) \ 381 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 382 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \ 383 MEC_ETHER_ALIGN + (len), (ops)) 384 385 /* XXX these values should be moved to <net/if_ether.h> ? */ 386 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 387 #define MEC_ETHER_ALIGN 2 388 389 static int mec_match(device_t, cfdata_t, void *); 390 static void mec_attach(device_t, device_t, void *); 391 392 static int mec_mii_readreg(device_t, int, int); 393 static void mec_mii_writereg(device_t, int, int, int); 394 static int mec_mii_wait(struct mec_softc *); 395 static void mec_statchg(device_t); 396 397 static void enaddr_aton(const char *, uint8_t *); 398 399 static int mec_init(struct ifnet * ifp); 400 static void mec_start(struct ifnet *); 401 static void mec_watchdog(struct ifnet *); 402 static void mec_tick(void *); 403 static int mec_ioctl(struct ifnet *, u_long, void *); 404 static void mec_reset(struct mec_softc *); 405 static void mec_setfilter(struct mec_softc *); 406 static int mec_intr(void *arg); 407 static void mec_stop(struct ifnet *, int); 408 static void mec_rxintr(struct mec_softc *); 409 static void mec_txintr(struct mec_softc *, uint32_t); 410 static void mec_shutdown(void *); 411 412 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc), 413 mec_match, mec_attach, NULL, NULL); 414 415 static int mec_matched = 0; 416 417 static int 418 mec_match(device_t parent, cfdata_t cf, void *aux) 419 { 420 421 /* allow only one device */ 422 if (mec_matched) 423 return 0; 424 425 mec_matched = 1; 426 return 1; 427 } 428 429 static void 430 mec_attach(device_t parent, device_t self, void *aux) 431 { 432 struct mec_softc *sc = device_private(self); 433 struct mace_attach_args *maa = aux; 434 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 435 uint64_t address, command; 436 const char *macaddr; 437 struct mii_softc *child; 438 bus_dma_segment_t seg; 439 int i, err, rseg; 440 bool mac_is_fake; 441 442 sc->sc_dev = self; 443 sc->sc_st = maa->maa_st; 444 if (bus_space_subregion(sc->sc_st, maa->maa_sh, 445 maa->maa_offset, 0, &sc->sc_sh) != 0) { 446 aprint_error(": can't map i/o space\n"); 447 return; 448 } 449 450 /* set up DMA structures */ 451 sc->sc_dmat = maa->maa_dmat; 452 453 /* 454 * Allocate the control data structures, and create and load the 455 * DMA map for it. 456 */ 457 if ((err = bus_dmamem_alloc(sc->sc_dmat, 458 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0, 459 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 460 aprint_error(": unable to allocate control data, error = %d\n", 461 err); 462 goto fail_0; 463 } 464 /* 465 * XXX needs re-think... 466 * control data structures contain whole RX data buffer, so 467 * BUS_DMA_COHERENT (which disables cache) may cause some performance 468 * issue on copying data from the RX buffer to mbuf on normal memory, 469 * though we have to make sure all bus_dmamap_sync(9) ops are called 470 * properly in that case. 471 */ 472 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 473 sizeof(struct mec_control_data), 474 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) { 475 aprint_error(": unable to map control data, error = %d\n", err); 476 goto fail_1; 477 } 478 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data)); 479 480 if ((err = bus_dmamap_create(sc->sc_dmat, 481 sizeof(struct mec_control_data), 1, 482 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 483 aprint_error(": unable to create control data DMA map," 484 " error = %d\n", err); 485 goto fail_2; 486 } 487 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 488 sc->sc_control_data, sizeof(struct mec_control_data), NULL, 489 BUS_DMA_NOWAIT)) != 0) { 490 aprint_error(": unable to load control data DMA map," 491 " error = %d\n", err); 492 goto fail_3; 493 } 494 495 /* create TX buffer DMA maps */ 496 for (i = 0; i < MEC_NTXDESC; i++) { 497 if ((err = bus_dmamap_create(sc->sc_dmat, 498 MCLBYTES, MEC_NTXSEG, MCLBYTES, PAGE_SIZE, 0, 499 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 500 aprint_error(": unable to create tx DMA map %d," 501 " error = %d\n", i, err); 502 goto fail_4; 503 } 504 } 505 506 callout_init(&sc->sc_tick_ch, 0); 507 508 /* get Ethernet address from ARCBIOS */ 509 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) { 510 aprint_error(": unable to get MAC address!\n"); 511 goto fail_4; 512 } 513 /* 514 * On some machines the DS2502 chip storing the serial number/ 515 * mac address is on the pci riser board - if this board is 516 * missing, ARCBIOS will not know a good ethernet address (but 517 * otherwise the machine will work fine). 518 */ 519 mac_is_fake = false; 520 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) { 521 uint32_t ui = 0; 522 const char * netaddr = 523 ARCBIOS->GetEnvironmentVariable("netaddr"); 524 525 /* 526 * Create a MAC address by abusing the "netaddr" env var 527 */ 528 sc->sc_enaddr[0] = 0xf2; 529 sc->sc_enaddr[1] = 0x0b; 530 sc->sc_enaddr[2] = 0xa4; 531 if (netaddr) { 532 mac_is_fake = true; 533 while (*netaddr) { 534 int v = 0; 535 while (*netaddr && *netaddr != '.') { 536 if (*netaddr >= '0' && *netaddr <= '9') 537 v = v*10 + (*netaddr - '0'); 538 netaddr++; 539 } 540 ui <<= 8; 541 ui |= v; 542 if (*netaddr == '.') 543 netaddr++; 544 } 545 } 546 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3); 547 } 548 if (!mac_is_fake) 549 enaddr_aton(macaddr, sc->sc_enaddr); 550 551 /* set the Ethernet address */ 552 address = 0; 553 for (i = 0; i < ETHER_ADDR_LEN; i++) { 554 address = address << 8; 555 address |= sc->sc_enaddr[i]; 556 } 557 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address); 558 559 /* reset device */ 560 mec_reset(sc); 561 562 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL); 563 564 aprint_normal(": MAC-110 Ethernet, rev %u\n", 565 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT)); 566 567 if (mac_is_fake) 568 aprint_normal_dev(self, 569 "could not get ethernet address from firmware" 570 " - generated one from the \"netaddr\" environment" 571 " variable\n"); 572 aprint_normal_dev(self, "Ethernet address %s\n", 573 ether_sprintf(sc->sc_enaddr)); 574 575 /* Done, now attach everything */ 576 577 sc->sc_mii.mii_ifp = ifp; 578 sc->sc_mii.mii_readreg = mec_mii_readreg; 579 sc->sc_mii.mii_writereg = mec_mii_writereg; 580 sc->sc_mii.mii_statchg = mec_statchg; 581 582 /* Set up PHY properties */ 583 sc->sc_ethercom.ec_mii = &sc->sc_mii; 584 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 585 ether_mediastatus); 586 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 587 MII_OFFSET_ANY, 0); 588 589 child = LIST_FIRST(&sc->sc_mii.mii_phys); 590 if (child == NULL) { 591 /* No PHY attached */ 592 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 593 0, NULL); 594 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 595 } else { 596 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 597 sc->sc_phyaddr = child->mii_phy; 598 } 599 600 strcpy(ifp->if_xname, device_xname(self)); 601 ifp->if_softc = sc; 602 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 603 ifp->if_ioctl = mec_ioctl; 604 ifp->if_start = mec_start; 605 ifp->if_watchdog = mec_watchdog; 606 ifp->if_init = mec_init; 607 ifp->if_stop = mec_stop; 608 ifp->if_mtu = ETHERMTU; 609 IFQ_SET_READY(&ifp->if_snd); 610 611 /* We can support 802.1Q VLAN-sized frames. */ 612 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 613 614 /* attach the interface */ 615 if_attach(ifp); 616 ether_ifattach(ifp, sc->sc_enaddr); 617 618 /* establish interrupt */ 619 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc); 620 621 #if NRND > 0 622 rnd_attach_source(&sc->sc_rnd_source, device_xname(self), 623 RND_TYPE_NET, 0); 624 #endif 625 626 #ifdef MEC_EVENT_COUNTERS 627 evcnt_attach_dynamic(&sc->sc_ev_txpkts , EVCNT_TYPE_MISC, 628 NULL, device_xname(self), "TX pkts queued total"); 629 evcnt_attach_dynamic(&sc->sc_ev_txdpad , EVCNT_TYPE_MISC, 630 NULL, device_xname(self), "TX pkts padded in txdesc buf"); 631 evcnt_attach_dynamic(&sc->sc_ev_txdbuf , EVCNT_TYPE_MISC, 632 NULL, device_xname(self), "TX pkts copied to txdesc buf"); 633 evcnt_attach_dynamic(&sc->sc_ev_txptr1 , EVCNT_TYPE_MISC, 634 NULL, device_xname(self), "TX pkts using concat ptr1"); 635 evcnt_attach_dynamic(&sc->sc_ev_txptr1a , EVCNT_TYPE_MISC, 636 NULL, device_xname(self), "TX pkts w/ptr1 ~160bytes"); 637 evcnt_attach_dynamic(&sc->sc_ev_txptr1b , EVCNT_TYPE_MISC, 638 NULL, device_xname(self), "TX pkts w/ptr1 ~256bytes"); 639 evcnt_attach_dynamic(&sc->sc_ev_txptr1c , EVCNT_TYPE_MISC, 640 NULL, device_xname(self), "TX pkts w/ptr1 ~512bytes"); 641 evcnt_attach_dynamic(&sc->sc_ev_txptr1d , EVCNT_TYPE_MISC, 642 NULL, device_xname(self), "TX pkts w/ptr1 ~1024bytes"); 643 evcnt_attach_dynamic(&sc->sc_ev_txptr1e , EVCNT_TYPE_MISC, 644 NULL, device_xname(self), "TX pkts w/ptr1 >1024bytes"); 645 evcnt_attach_dynamic(&sc->sc_ev_txptr2 , EVCNT_TYPE_MISC, 646 NULL, device_xname(self), "TX pkts using concat ptr1,2"); 647 evcnt_attach_dynamic(&sc->sc_ev_txptr2a , EVCNT_TYPE_MISC, 648 NULL, device_xname(self), "TX pkts w/ptr2 ~160bytes"); 649 evcnt_attach_dynamic(&sc->sc_ev_txptr2b , EVCNT_TYPE_MISC, 650 NULL, device_xname(self), "TX pkts w/ptr2 ~256bytes"); 651 evcnt_attach_dynamic(&sc->sc_ev_txptr2c , EVCNT_TYPE_MISC, 652 NULL, device_xname(self), "TX pkts w/ptr2 ~512bytes"); 653 evcnt_attach_dynamic(&sc->sc_ev_txptr2d , EVCNT_TYPE_MISC, 654 NULL, device_xname(self), "TX pkts w/ptr2 ~1024bytes"); 655 evcnt_attach_dynamic(&sc->sc_ev_txptr2e , EVCNT_TYPE_MISC, 656 NULL, device_xname(self), "TX pkts w/ptr2 >1024bytes"); 657 evcnt_attach_dynamic(&sc->sc_ev_txptr3 , EVCNT_TYPE_MISC, 658 NULL, device_xname(self), "TX pkts using concat ptr1,2,3"); 659 evcnt_attach_dynamic(&sc->sc_ev_txptr3a , EVCNT_TYPE_MISC, 660 NULL, device_xname(self), "TX pkts w/ptr3 ~160bytes"); 661 evcnt_attach_dynamic(&sc->sc_ev_txptr3b , EVCNT_TYPE_MISC, 662 NULL, device_xname(self), "TX pkts w/ptr3 ~256bytes"); 663 evcnt_attach_dynamic(&sc->sc_ev_txptr3c , EVCNT_TYPE_MISC, 664 NULL, device_xname(self), "TX pkts w/ptr3 ~512bytes"); 665 evcnt_attach_dynamic(&sc->sc_ev_txptr3d , EVCNT_TYPE_MISC, 666 NULL, device_xname(self), "TX pkts w/ptr3 ~1024bytes"); 667 evcnt_attach_dynamic(&sc->sc_ev_txptr3e , EVCNT_TYPE_MISC, 668 NULL, device_xname(self), "TX pkts w/ptr3 >1024bytes"); 669 evcnt_attach_dynamic(&sc->sc_ev_txmbuf , EVCNT_TYPE_MISC, 670 NULL, device_xname(self), "TX pkts copied to new mbufs"); 671 evcnt_attach_dynamic(&sc->sc_ev_txmbufa , EVCNT_TYPE_MISC, 672 NULL, device_xname(self), "TX pkts w/mbuf ~160bytes"); 673 evcnt_attach_dynamic(&sc->sc_ev_txmbufb , EVCNT_TYPE_MISC, 674 NULL, device_xname(self), "TX pkts w/mbuf ~256bytes"); 675 evcnt_attach_dynamic(&sc->sc_ev_txmbufc , EVCNT_TYPE_MISC, 676 NULL, device_xname(self), "TX pkts w/mbuf ~512bytes"); 677 evcnt_attach_dynamic(&sc->sc_ev_txmbufd , EVCNT_TYPE_MISC, 678 NULL, device_xname(self), "TX pkts w/mbuf ~1024bytes"); 679 evcnt_attach_dynamic(&sc->sc_ev_txmbufe , EVCNT_TYPE_MISC, 680 NULL, device_xname(self), "TX pkts w/mbuf >1024bytes"); 681 evcnt_attach_dynamic(&sc->sc_ev_txptrs , EVCNT_TYPE_MISC, 682 NULL, device_xname(self), "TX pkts using ptrs total"); 683 evcnt_attach_dynamic(&sc->sc_ev_txptrc0 , EVCNT_TYPE_MISC, 684 NULL, device_xname(self), "TX pkts w/ptrs no hdr chain"); 685 evcnt_attach_dynamic(&sc->sc_ev_txptrc1 , EVCNT_TYPE_MISC, 686 NULL, device_xname(self), "TX pkts w/ptrs 1 hdr chain"); 687 evcnt_attach_dynamic(&sc->sc_ev_txptrc2 , EVCNT_TYPE_MISC, 688 NULL, device_xname(self), "TX pkts w/ptrs 2 hdr chains"); 689 evcnt_attach_dynamic(&sc->sc_ev_txptrc3 , EVCNT_TYPE_MISC, 690 NULL, device_xname(self), "TX pkts w/ptrs 3 hdr chains"); 691 evcnt_attach_dynamic(&sc->sc_ev_txptrc4 , EVCNT_TYPE_MISC, 692 NULL, device_xname(self), "TX pkts w/ptrs 4 hdr chains"); 693 evcnt_attach_dynamic(&sc->sc_ev_txptrc5 , EVCNT_TYPE_MISC, 694 NULL, device_xname(self), "TX pkts w/ptrs 5 hdr chains"); 695 evcnt_attach_dynamic(&sc->sc_ev_txptrc6 , EVCNT_TYPE_MISC, 696 NULL, device_xname(self), "TX pkts w/ptrs >5 hdr chains"); 697 evcnt_attach_dynamic(&sc->sc_ev_txptrh0 , EVCNT_TYPE_MISC, 698 NULL, device_xname(self), "TX pkts w/ptrs ~8bytes hdr"); 699 evcnt_attach_dynamic(&sc->sc_ev_txptrh1 , EVCNT_TYPE_MISC, 700 NULL, device_xname(self), "TX pkts w/ptrs ~16bytes hdr"); 701 evcnt_attach_dynamic(&sc->sc_ev_txptrh2 , EVCNT_TYPE_MISC, 702 NULL, device_xname(self), "TX pkts w/ptrs ~32bytes hdr"); 703 evcnt_attach_dynamic(&sc->sc_ev_txptrh3 , EVCNT_TYPE_MISC, 704 NULL, device_xname(self), "TX pkts w/ptrs ~64bytes hdr"); 705 evcnt_attach_dynamic(&sc->sc_ev_txptrh4 , EVCNT_TYPE_MISC, 706 NULL, device_xname(self), "TX pkts w/ptrs ~80bytes hdr"); 707 evcnt_attach_dynamic(&sc->sc_ev_txptrh5 , EVCNT_TYPE_MISC, 708 NULL, device_xname(self), "TX pkts w/ptrs ~96bytes hdr"); 709 evcnt_attach_dynamic(&sc->sc_ev_txdstall , EVCNT_TYPE_MISC, 710 NULL, device_xname(self), "TX stalled due to no txdesc"); 711 evcnt_attach_dynamic(&sc->sc_ev_txempty , EVCNT_TYPE_MISC, 712 NULL, device_xname(self), "TX empty interrupts"); 713 evcnt_attach_dynamic(&sc->sc_ev_txsent , EVCNT_TYPE_MISC, 714 NULL, device_xname(self), "TX sent interrupts"); 715 #endif 716 717 /* set shutdown hook to reset interface on powerdown */ 718 sc->sc_sdhook = shutdownhook_establish(mec_shutdown, sc); 719 720 return; 721 722 /* 723 * Free any resources we've allocated during the failed attach 724 * attempt. Do this in reverse order and fall though. 725 */ 726 fail_4: 727 for (i = 0; i < MEC_NTXDESC; i++) { 728 if (sc->sc_txsoft[i].txs_dmamap != NULL) 729 bus_dmamap_destroy(sc->sc_dmat, 730 sc->sc_txsoft[i].txs_dmamap); 731 } 732 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 733 fail_3: 734 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 735 fail_2: 736 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 737 sizeof(struct mec_control_data)); 738 fail_1: 739 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 740 fail_0: 741 return; 742 } 743 744 static int 745 mec_mii_readreg(device_t self, int phy, int reg) 746 { 747 struct mec_softc *sc = device_private(self); 748 bus_space_tag_t st = sc->sc_st; 749 bus_space_handle_t sh = sc->sc_sh; 750 uint64_t val; 751 int i; 752 753 if (mec_mii_wait(sc) != 0) 754 return 0; 755 756 bus_space_write_8(st, sh, MEC_PHY_ADDRESS, 757 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER)); 758 delay(25); 759 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1); 760 delay(25); 761 mec_mii_wait(sc); 762 763 for (i = 0; i < 20; i++) { 764 delay(30); 765 766 val = bus_space_read_8(st, sh, MEC_PHY_DATA); 767 768 if ((val & MEC_PHY_DATA_BUSY) == 0) 769 return val & MEC_PHY_DATA_VALUE; 770 } 771 return 0; 772 } 773 774 static void 775 mec_mii_writereg(device_t self, int phy, int reg, int val) 776 { 777 struct mec_softc *sc = device_private(self); 778 bus_space_tag_t st = sc->sc_st; 779 bus_space_handle_t sh = sc->sc_sh; 780 781 if (mec_mii_wait(sc) != 0) { 782 printf("timed out writing %x: %x\n", reg, val); 783 return; 784 } 785 786 bus_space_write_8(st, sh, MEC_PHY_ADDRESS, 787 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER)); 788 789 delay(60); 790 791 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE); 792 793 delay(60); 794 795 mec_mii_wait(sc); 796 } 797 798 static int 799 mec_mii_wait(struct mec_softc *sc) 800 { 801 uint32_t busy; 802 int i, s; 803 804 for (i = 0; i < 100; i++) { 805 delay(30); 806 807 s = splhigh(); 808 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA); 809 splx(s); 810 811 if ((busy & MEC_PHY_DATA_BUSY) == 0) 812 return 0; 813 #if 0 814 if (busy == 0xffff) /* XXX ? */ 815 return 0; 816 #endif 817 } 818 819 printf("%s: MII timed out\n", device_xname(sc->sc_dev)); 820 return 1; 821 } 822 823 static void 824 mec_statchg(device_t self) 825 { 826 struct mec_softc *sc = device_private(self); 827 bus_space_tag_t st = sc->sc_st; 828 bus_space_handle_t sh = sc->sc_sh; 829 uint32_t control; 830 831 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL); 832 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 | 833 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT); 834 835 /* must also set IPG here for duplex stuff ... */ 836 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) { 837 control |= MEC_MAC_FULL_DUPLEX; 838 } else { 839 /* set IPG */ 840 control |= MEC_MAC_IPG_DEFAULT; 841 } 842 843 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 844 } 845 846 /* 847 * XXX 848 * maybe this function should be moved to common part 849 * (sgimips/machdep.c or elsewhere) for all on-board network devices. 850 */ 851 static void 852 enaddr_aton(const char *str, uint8_t *eaddr) 853 { 854 int i; 855 char c; 856 857 for (i = 0; i < ETHER_ADDR_LEN; i++) { 858 if (*str == ':') 859 str++; 860 861 c = *str++; 862 if (isdigit(c)) { 863 eaddr[i] = (c - '0'); 864 } else if (isxdigit(c)) { 865 eaddr[i] = (toupper(c) + 10 - 'A'); 866 } 867 c = *str++; 868 if (isdigit(c)) { 869 eaddr[i] = (eaddr[i] << 4) | (c - '0'); 870 } else if (isxdigit(c)) { 871 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A'); 872 } 873 } 874 } 875 876 static int 877 mec_init(struct ifnet *ifp) 878 { 879 struct mec_softc *sc = ifp->if_softc; 880 bus_space_tag_t st = sc->sc_st; 881 bus_space_handle_t sh = sc->sc_sh; 882 struct mec_rxdesc *rxd; 883 int i, rc; 884 885 /* cancel any pending I/O */ 886 mec_stop(ifp, 0); 887 888 /* reset device */ 889 mec_reset(sc); 890 891 /* setup filter for multicast or promisc mode */ 892 mec_setfilter(sc); 893 894 /* set the TX ring pointer to the base address */ 895 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0)); 896 897 sc->sc_txpending = 0; 898 sc->sc_txdirty = 0; 899 sc->sc_txlast = MEC_NTXDESC - 1; 900 901 /* put RX buffers into FIFO */ 902 for (i = 0; i < MEC_NRXDESC; i++) { 903 rxd = &sc->sc_rxdesc[i]; 904 rxd->rxd_stat = 0; 905 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); 906 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD); 907 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); 908 } 909 sc->sc_rxptr = 0; 910 911 #if 0 /* XXX no info */ 912 bus_space_write_8(st, sh, MEC_TIMER, 0); 913 #endif 914 915 /* 916 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes 917 * spurious interrupts when TX buffers are empty 918 */ 919 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 920 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) | 921 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) | 922 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */ 923 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE); 924 925 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc); 926 927 if ((rc = ether_mediachange(ifp)) != 0) 928 return rc; 929 930 ifp->if_flags |= IFF_RUNNING; 931 ifp->if_flags &= ~IFF_OACTIVE; 932 mec_start(ifp); 933 934 return 0; 935 } 936 937 static void 938 mec_reset(struct mec_softc *sc) 939 { 940 bus_space_tag_t st = sc->sc_st; 941 bus_space_handle_t sh = sc->sc_sh; 942 uint64_t control; 943 944 /* stop DMA first */ 945 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0); 946 947 /* reset chip */ 948 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET); 949 delay(1000); 950 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0); 951 delay(1000); 952 953 /* Default to 100/half and let auto-negotiation work its magic */ 954 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI | 955 MEC_MAC_IPG_DEFAULT; 956 957 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 958 /* stop DMA again for sanity */ 959 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0); 960 961 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n", 962 bus_space_read_8(st, sh, MEC_MAC_CONTROL))); 963 } 964 965 static void 966 mec_start(struct ifnet *ifp) 967 { 968 struct mec_softc *sc = ifp->if_softc; 969 struct mbuf *m0, *m; 970 struct mec_txdesc *txd; 971 struct mec_txsoft *txs; 972 bus_dmamap_t dmamap; 973 bus_space_tag_t st = sc->sc_st; 974 bus_space_handle_t sh = sc->sc_sh; 975 int error, firsttx, nexttx, opending; 976 int len, bufoff, buflen, nsegs, align, resid, pseg, nptr, slen, i; 977 uint32_t txdcmd; 978 979 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 980 return; 981 982 /* 983 * Remember the previous txpending and the first transmit descriptor. 984 */ 985 opending = sc->sc_txpending; 986 firsttx = MEC_NEXTTX(sc->sc_txlast); 987 988 DPRINTF(MEC_DEBUG_START, 989 ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx)); 990 991 while (sc->sc_txpending < MEC_NTXDESC - 1) { 992 /* Grab a packet off the queue. */ 993 IFQ_POLL(&ifp->if_snd, m0); 994 if (m0 == NULL) 995 break; 996 m = NULL; 997 998 /* 999 * Get the next available transmit descriptor. 1000 */ 1001 nexttx = MEC_NEXTTX(sc->sc_txlast); 1002 txd = &sc->sc_txdesc[nexttx]; 1003 txs = &sc->sc_txsoft[nexttx]; 1004 dmamap = txs->txs_dmamap; 1005 txs->txs_flags = 0; 1006 1007 buflen = 0; 1008 bufoff = 0; 1009 resid = 0; 1010 nptr = 0; /* XXX gcc */ 1011 pseg = 0; /* XXX gcc */ 1012 1013 len = m0->m_pkthdr.len; 1014 1015 DPRINTF(MEC_DEBUG_START, 1016 ("mec_start: len = %d, nexttx = %d, txpending = %d\n", 1017 len, nexttx, sc->sc_txpending)); 1018 1019 if (len <= MEC_TXD_BUFSIZE) { 1020 /* 1021 * If a TX packet will fit into small txdesc buffer, 1022 * just copy it into there. Maybe it's faster than 1023 * checking alignment and calling bus_dma(9) etc. 1024 */ 1025 DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n")); 1026 IFQ_DEQUEUE(&ifp->if_snd, m0); 1027 1028 /* 1029 * I don't know if MEC chip does auto padding, 1030 * but do it manually for safety. 1031 */ 1032 if (len < ETHER_PAD_LEN) { 1033 MEC_EVCNT_INCR(&sc->sc_ev_txdpad); 1034 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN); 1035 m_copydata(m0, 0, len, txd->txd_buf + bufoff); 1036 memset(txd->txd_buf + bufoff + len, 0, 1037 ETHER_PAD_LEN - len); 1038 len = buflen = ETHER_PAD_LEN; 1039 } else { 1040 MEC_EVCNT_INCR(&sc->sc_ev_txdbuf); 1041 bufoff = MEC_TXD_BUFSTART(len); 1042 m_copydata(m0, 0, len, txd->txd_buf + bufoff); 1043 buflen = len; 1044 } 1045 } else { 1046 /* 1047 * If the packet won't fit the static buffer in txdesc, 1048 * we have to use the concatenate pointers to handle it. 1049 */ 1050 DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n")); 1051 txs->txs_flags = MEC_TXS_TXDPTR; 1052 1053 /* 1054 * Call bus_dmamap_load_mbuf(9) first to see 1055 * how many chains the TX mbuf has. 1056 */ 1057 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1058 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1059 if (error == 0) { 1060 /* 1061 * Check chains which might contain headers. 1062 * They might be so much fragmented and 1063 * it's better to copy them into txdesc buffer 1064 * since they would be small enough. 1065 */ 1066 nsegs = dmamap->dm_nsegs; 1067 for (pseg = 0; pseg < nsegs; pseg++) { 1068 slen = dmamap->dm_segs[pseg].ds_len; 1069 if (buflen + slen > 1070 MEC_TXD_BUFSIZE1 - MEC_TXD_ALIGN) 1071 break; 1072 buflen += slen; 1073 } 1074 /* 1075 * Check if the rest chains can be fit into 1076 * the concatinate pointers. 1077 */ 1078 align = dmamap->dm_segs[pseg].ds_addr & 1079 MEC_TXD_ALIGNMASK; 1080 if (align > 0) { 1081 /* 1082 * If the first chain isn't uint64_t 1083 * aligned, append the unaligned part 1084 * into txdesc buffer too. 1085 */ 1086 resid = MEC_TXD_ALIGN - align; 1087 buflen += resid; 1088 for (; pseg < nsegs; pseg++) { 1089 slen = 1090 dmamap->dm_segs[pseg].ds_len; 1091 if (slen > resid) 1092 break; 1093 resid -= slen; 1094 } 1095 } else if (pseg == 0) { 1096 /* 1097 * In this case, the first chain is 1098 * uint64_t aligned but it's too long 1099 * to put into txdesc buf. 1100 * We have to put some data into 1101 * txdesc buf even in this case, 1102 * so put MEC_TXD_ALIGN bytes there. 1103 */ 1104 buflen = resid = MEC_TXD_ALIGN; 1105 } 1106 nptr = nsegs - pseg; 1107 if (nptr <= MEC_NTXPTR) { 1108 bufoff = MEC_TXD_BUFSTART(buflen); 1109 1110 /* 1111 * Check if all the rest chains are 1112 * uint64_t aligned. 1113 */ 1114 align = 0; 1115 for (i = pseg + 1; i < nsegs; i++) 1116 align |= 1117 dmamap->dm_segs[i].ds_addr 1118 & MEC_TXD_ALIGNMASK; 1119 if (align != 0) { 1120 /* chains are not aligned */ 1121 error = -1; 1122 } 1123 } else { 1124 /* The TX mbuf chains doesn't fit. */ 1125 error = -1; 1126 } 1127 if (error == -1) 1128 bus_dmamap_unload(sc->sc_dmat, dmamap); 1129 } 1130 if (error != 0) { 1131 /* 1132 * The TX mbuf chains can't be put into 1133 * the concatinate buffers. In this case, 1134 * we have to allocate a new contiguous mbuf 1135 * and copy data into it. 1136 * 1137 * Even in this case, the Ethernet header in 1138 * the TX mbuf might be unaligned and trailing 1139 * data might be word aligned, so put 2 byte 1140 * (MEC_ETHER_ALIGN) padding at the top of the 1141 * allocated mbuf and copy TX packets. 1142 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN) 1143 * at the top of the new mbuf won't be uint64_t 1144 * alignd, but we have to put some data into 1145 * txdesc buffer anyway even if the buffer 1146 * is uint64_t aligned. 1147 */ 1148 DPRINTF(MEC_DEBUG_START|MEC_DEBUG_TXSEGS, 1149 ("mec_start: re-allocating mbuf\n")); 1150 1151 MGETHDR(m, M_DONTWAIT, MT_DATA); 1152 if (m == NULL) { 1153 printf("%s: unable to allocate " 1154 "TX mbuf\n", 1155 device_xname(sc->sc_dev)); 1156 break; 1157 } 1158 if (len > (MHLEN - MEC_ETHER_ALIGN)) { 1159 MCLGET(m, M_DONTWAIT); 1160 if ((m->m_flags & M_EXT) == 0) { 1161 printf("%s: unable to allocate " 1162 "TX cluster\n", 1163 device_xname(sc->sc_dev)); 1164 m_freem(m); 1165 break; 1166 } 1167 } 1168 m->m_data += MEC_ETHER_ALIGN; 1169 1170 /* 1171 * Copy whole data (including unaligned part) 1172 * for following bpf_mtap(). 1173 */ 1174 m_copydata(m0, 0, len, mtod(m, void *)); 1175 m->m_pkthdr.len = m->m_len = len; 1176 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1177 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1178 if (dmamap->dm_nsegs > 1) { 1179 /* should not happen, but for sanity */ 1180 bus_dmamap_unload(sc->sc_dmat, dmamap); 1181 error = -1; 1182 } 1183 if (error != 0) { 1184 printf("%s: unable to load TX buffer, " 1185 "error = %d\n", 1186 device_xname(sc->sc_dev), error); 1187 m_freem(m); 1188 break; 1189 } 1190 /* 1191 * Only the first segment should be put into 1192 * the concatinate pointer in this case. 1193 */ 1194 pseg = 0; 1195 nptr = 1; 1196 1197 /* 1198 * Set lenght of unaligned part which will be 1199 * copied into txdesc buffer. 1200 */ 1201 buflen = MEC_TXD_ALIGN - MEC_ETHER_ALIGN; 1202 bufoff = MEC_TXD_BUFSTART(buflen); 1203 resid = buflen; 1204 #ifdef MEC_EVENT_COUNTERS 1205 MEC_EVCNT_INCR(&sc->sc_ev_txmbuf); 1206 if (len <= 160) 1207 MEC_EVCNT_INCR(&sc->sc_ev_txmbufa); 1208 else if (len <= 256) 1209 MEC_EVCNT_INCR(&sc->sc_ev_txmbufb); 1210 else if (len <= 512) 1211 MEC_EVCNT_INCR(&sc->sc_ev_txmbufc); 1212 else if (len <= 1024) 1213 MEC_EVCNT_INCR(&sc->sc_ev_txmbufd); 1214 else 1215 MEC_EVCNT_INCR(&sc->sc_ev_txmbufe); 1216 #endif 1217 } 1218 #ifdef MEC_EVENT_COUNTERS 1219 else { 1220 MEC_EVCNT_INCR(&sc->sc_ev_txptrs); 1221 if (nptr == 1) { 1222 MEC_EVCNT_INCR(&sc->sc_ev_txptr1); 1223 if (len <= 160) 1224 MEC_EVCNT_INCR( 1225 &sc->sc_ev_txptr1a); 1226 else if (len <= 256) 1227 MEC_EVCNT_INCR( 1228 &sc->sc_ev_txptr1b); 1229 else if (len <= 512) 1230 MEC_EVCNT_INCR( 1231 &sc->sc_ev_txptr1c); 1232 else if (len <= 1024) 1233 MEC_EVCNT_INCR( 1234 &sc->sc_ev_txptr1d); 1235 else 1236 MEC_EVCNT_INCR( 1237 &sc->sc_ev_txptr1e); 1238 } else if (nptr == 2) { 1239 MEC_EVCNT_INCR(&sc->sc_ev_txptr2); 1240 if (len <= 160) 1241 MEC_EVCNT_INCR( 1242 &sc->sc_ev_txptr2a); 1243 else if (len <= 256) 1244 MEC_EVCNT_INCR( 1245 &sc->sc_ev_txptr2b); 1246 else if (len <= 512) 1247 MEC_EVCNT_INCR( 1248 &sc->sc_ev_txptr2c); 1249 else if (len <= 1024) 1250 MEC_EVCNT_INCR( 1251 &sc->sc_ev_txptr2d); 1252 else 1253 MEC_EVCNT_INCR( 1254 &sc->sc_ev_txptr2e); 1255 } else if (nptr == 3) { 1256 MEC_EVCNT_INCR(&sc->sc_ev_txptr3); 1257 if (len <= 160) 1258 MEC_EVCNT_INCR( 1259 &sc->sc_ev_txptr3a); 1260 else if (len <= 256) 1261 MEC_EVCNT_INCR( 1262 &sc->sc_ev_txptr3b); 1263 else if (len <= 512) 1264 MEC_EVCNT_INCR( 1265 &sc->sc_ev_txptr3c); 1266 else if (len <= 1024) 1267 MEC_EVCNT_INCR( 1268 &sc->sc_ev_txptr3d); 1269 else 1270 MEC_EVCNT_INCR( 1271 &sc->sc_ev_txptr3e); 1272 } 1273 if (pseg == 0) 1274 MEC_EVCNT_INCR(&sc->sc_ev_txptrc0); 1275 else if (pseg == 1) 1276 MEC_EVCNT_INCR(&sc->sc_ev_txptrc1); 1277 else if (pseg == 2) 1278 MEC_EVCNT_INCR(&sc->sc_ev_txptrc2); 1279 else if (pseg == 3) 1280 MEC_EVCNT_INCR(&sc->sc_ev_txptrc3); 1281 else if (pseg == 4) 1282 MEC_EVCNT_INCR(&sc->sc_ev_txptrc4); 1283 else if (pseg == 5) 1284 MEC_EVCNT_INCR(&sc->sc_ev_txptrc5); 1285 else 1286 MEC_EVCNT_INCR(&sc->sc_ev_txptrc6); 1287 if (buflen <= 8) 1288 MEC_EVCNT_INCR(&sc->sc_ev_txptrh0); 1289 else if (buflen <= 16) 1290 MEC_EVCNT_INCR(&sc->sc_ev_txptrh1); 1291 else if (buflen <= 32) 1292 MEC_EVCNT_INCR(&sc->sc_ev_txptrh2); 1293 else if (buflen <= 64) 1294 MEC_EVCNT_INCR(&sc->sc_ev_txptrh3); 1295 else if (buflen <= 80) 1296 MEC_EVCNT_INCR(&sc->sc_ev_txptrh4); 1297 else 1298 MEC_EVCNT_INCR(&sc->sc_ev_txptrh5); 1299 } 1300 #endif 1301 m_copydata(m0, 0, buflen, txd->txd_buf + bufoff); 1302 1303 IFQ_DEQUEUE(&ifp->if_snd, m0); 1304 if (m != NULL) { 1305 m_freem(m0); 1306 m0 = m; 1307 } 1308 1309 /* 1310 * sync the DMA map for TX mbuf 1311 */ 1312 bus_dmamap_sync(sc->sc_dmat, dmamap, buflen, 1313 len - buflen, BUS_DMASYNC_PREWRITE); 1314 } 1315 1316 #if NBPFILTER > 0 1317 /* 1318 * Pass packet to bpf if there is a listener. 1319 */ 1320 if (ifp->if_bpf) 1321 bpf_mtap(ifp->if_bpf, m0); 1322 #endif 1323 MEC_EVCNT_INCR(&sc->sc_ev_txpkts); 1324 1325 /* 1326 * setup the transmit descriptor. 1327 */ 1328 txdcmd = TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen) | (len - 1); 1329 1330 /* 1331 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets 1332 * if more than half txdescs have been queued 1333 * because TX_EMPTY interrupts will rarely happen 1334 * if TX queue is so stacked. 1335 */ 1336 if (sc->sc_txpending > (MEC_NTXDESC / 2) && 1337 (nexttx & (MEC_NTXDESC_INTR - 1)) == 0) 1338 txdcmd |= MEC_TXCMD_TXINT; 1339 1340 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) { 1341 bus_dma_segment_t *segs = dmamap->dm_segs; 1342 1343 DPRINTF(MEC_DEBUG_TXSEGS, 1344 ("mec_start: nsegs = %d, pseg = %d, nptr = %d\n", 1345 dmamap->dm_nsegs, pseg, nptr)); 1346 1347 switch (nptr) { 1348 case 3: 1349 KASSERT((segs[pseg + 2].ds_addr & 1350 MEC_TXD_ALIGNMASK) == 0); 1351 txdcmd |= MEC_TXCMD_PTR3; 1352 txd->txd_ptr[2] = 1353 TXPTR_LEN(segs[pseg + 2].ds_len - 1) | 1354 segs[pseg + 2].ds_addr; 1355 /* FALLTHROUGH */ 1356 case 2: 1357 KASSERT((segs[pseg + 1].ds_addr & 1358 MEC_TXD_ALIGNMASK) == 0); 1359 txdcmd |= MEC_TXCMD_PTR2; 1360 txd->txd_ptr[1] = 1361 TXPTR_LEN(segs[pseg + 1].ds_len - 1) | 1362 segs[pseg + 1].ds_addr; 1363 /* FALLTHROUGH */ 1364 case 1: 1365 txdcmd |= MEC_TXCMD_PTR1; 1366 txd->txd_ptr[0] = 1367 TXPTR_LEN(segs[pseg].ds_len - resid - 1) | 1368 (segs[pseg].ds_addr + resid); 1369 break; 1370 default: 1371 panic("%s: impossible nptr in %s", 1372 device_xname(sc->sc_dev), __func__); 1373 /* NOTREACHED */ 1374 } 1375 /* 1376 * Store a pointer to the packet so we can 1377 * free it later. 1378 */ 1379 txs->txs_mbuf = m0; 1380 } else { 1381 /* 1382 * In this case all data are copied to buffer in txdesc, 1383 * we can free TX mbuf here. 1384 */ 1385 m_freem(m0); 1386 } 1387 txd->txd_cmd = txdcmd; 1388 1389 DPRINTF(MEC_DEBUG_START, 1390 ("mec_start: txd_cmd = 0x%016llx\n", txd->txd_cmd)); 1391 DPRINTF(MEC_DEBUG_START, 1392 ("mec_start: txd_ptr[0] = 0x%016llx\n", txd->txd_ptr[0])); 1393 DPRINTF(MEC_DEBUG_START, 1394 ("mec_start: txd_ptr[1] = 0x%016llx\n", txd->txd_ptr[1])); 1395 DPRINTF(MEC_DEBUG_START, 1396 ("mec_start: txd_ptr[2] = 0x%016llx\n", txd->txd_ptr[2])); 1397 DPRINTF(MEC_DEBUG_START, 1398 ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n", 1399 len, len, buflen, buflen)); 1400 1401 /* sync TX descriptor */ 1402 MEC_TXDESCSYNC(sc, nexttx, 1403 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1404 1405 /* start TX */ 1406 bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx)); 1407 1408 /* advance the TX pointer. */ 1409 sc->sc_txpending++; 1410 sc->sc_txlast = nexttx; 1411 } 1412 1413 if (sc->sc_txpending == MEC_NTXDESC - 1) { 1414 /* No more slots; notify upper layer. */ 1415 MEC_EVCNT_INCR(&sc->sc_ev_txdstall); 1416 ifp->if_flags |= IFF_OACTIVE; 1417 } 1418 1419 if (sc->sc_txpending != opending) { 1420 /* 1421 * If the transmitter was idle, 1422 * reset the txdirty pointer and re-enable TX interrupt. 1423 */ 1424 if (opending == 0) { 1425 sc->sc_txdirty = firsttx; 1426 bus_space_write_8(st, sh, MEC_TX_ALIAS, 1427 MEC_TX_ALIAS_INT_ENABLE); 1428 } 1429 1430 /* Set a watchdog timer in case the chip flakes out. */ 1431 ifp->if_timer = 5; 1432 } 1433 } 1434 1435 static void 1436 mec_stop(struct ifnet *ifp, int disable) 1437 { 1438 struct mec_softc *sc = ifp->if_softc; 1439 struct mec_txsoft *txs; 1440 int i; 1441 1442 DPRINTF(MEC_DEBUG_STOP, ("mec_stop\n")); 1443 1444 ifp->if_timer = 0; 1445 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1446 1447 callout_stop(&sc->sc_tick_ch); 1448 mii_down(&sc->sc_mii); 1449 1450 /* release any TX buffers */ 1451 for (i = 0; i < MEC_NTXDESC; i++) { 1452 txs = &sc->sc_txsoft[i]; 1453 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) { 1454 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1455 m_freem(txs->txs_mbuf); 1456 txs->txs_mbuf = NULL; 1457 } 1458 } 1459 } 1460 1461 static int 1462 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1463 { 1464 int s, error; 1465 1466 s = splnet(); 1467 1468 error = ether_ioctl(ifp, cmd, data); 1469 if (error == ENETRESET) { 1470 /* 1471 * Multicast list has changed; set the hardware filter 1472 * accordingly. 1473 */ 1474 if (ifp->if_flags & IFF_RUNNING) 1475 error = mec_init(ifp); 1476 else 1477 error = 0; 1478 } 1479 1480 /* Try to get more packets going. */ 1481 mec_start(ifp); 1482 1483 splx(s); 1484 return error; 1485 } 1486 1487 static void 1488 mec_watchdog(struct ifnet *ifp) 1489 { 1490 struct mec_softc *sc = ifp->if_softc; 1491 1492 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 1493 ifp->if_oerrors++; 1494 1495 mec_init(ifp); 1496 } 1497 1498 static void 1499 mec_tick(void *arg) 1500 { 1501 struct mec_softc *sc = arg; 1502 int s; 1503 1504 s = splnet(); 1505 mii_tick(&sc->sc_mii); 1506 splx(s); 1507 1508 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc); 1509 } 1510 1511 static void 1512 mec_setfilter(struct mec_softc *sc) 1513 { 1514 struct ethercom *ec = &sc->sc_ethercom; 1515 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1516 struct ether_multi *enm; 1517 struct ether_multistep step; 1518 bus_space_tag_t st = sc->sc_st; 1519 bus_space_handle_t sh = sc->sc_sh; 1520 uint64_t mchash; 1521 uint32_t control, hash; 1522 int mcnt; 1523 1524 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL); 1525 control &= ~MEC_MAC_FILTER_MASK; 1526 1527 if (ifp->if_flags & IFF_PROMISC) { 1528 control |= MEC_MAC_FILTER_PROMISC; 1529 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL); 1530 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 1531 return; 1532 } 1533 1534 mcnt = 0; 1535 mchash = 0; 1536 ETHER_FIRST_MULTI(step, ec, enm); 1537 while (enm != NULL) { 1538 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1539 /* set allmulti for a range of multicast addresses */ 1540 control |= MEC_MAC_FILTER_ALLMULTI; 1541 bus_space_write_8(st, sh, MEC_MULTICAST, 1542 0xffffffffffffffffULL); 1543 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 1544 return; 1545 } 1546 1547 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26) 1548 1549 hash = mec_calchash(enm->enm_addrlo); 1550 mchash |= 1 << hash; 1551 mcnt++; 1552 ETHER_NEXT_MULTI(step, enm); 1553 } 1554 1555 ifp->if_flags &= ~IFF_ALLMULTI; 1556 1557 if (mcnt > 0) 1558 control |= MEC_MAC_FILTER_MATCHMULTI; 1559 1560 bus_space_write_8(st, sh, MEC_MULTICAST, mchash); 1561 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 1562 } 1563 1564 static int 1565 mec_intr(void *arg) 1566 { 1567 struct mec_softc *sc = arg; 1568 bus_space_tag_t st = sc->sc_st; 1569 bus_space_handle_t sh = sc->sc_sh; 1570 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1571 uint32_t statreg, statack, txptr; 1572 int handled, sent; 1573 1574 DPRINTF(MEC_DEBUG_INTR, ("mec_intr: called\n")); 1575 1576 handled = sent = 0; 1577 1578 for (;;) { 1579 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS); 1580 1581 DPRINTF(MEC_DEBUG_INTR, 1582 ("mec_intr: INT_STAT = 0x%08x\n", statreg)); 1583 1584 statack = statreg & MEC_INT_STATUS_MASK; 1585 if (statack == 0) 1586 break; 1587 bus_space_write_8(st, sh, MEC_INT_STATUS, statack); 1588 1589 handled = 1; 1590 1591 if (statack & 1592 (MEC_INT_RX_THRESHOLD | 1593 MEC_INT_RX_FIFO_UNDERFLOW)) { 1594 mec_rxintr(sc); 1595 } 1596 1597 if (statack & 1598 (MEC_INT_TX_EMPTY | 1599 MEC_INT_TX_PACKET_SENT | 1600 MEC_INT_TX_ABORT)) { 1601 txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS) 1602 >> MEC_INT_TX_RING_BUFFER_SHIFT; 1603 mec_txintr(sc, txptr); 1604 sent = 1; 1605 if ((statack & MEC_INT_TX_EMPTY) != 0) { 1606 /* 1607 * disable TX interrupt to stop 1608 * TX empty interrupt 1609 */ 1610 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0); 1611 DPRINTF(MEC_DEBUG_INTR, 1612 ("mec_intr: disable TX_INT\n")); 1613 } 1614 #ifdef MEC_EVENT_COUNTERS 1615 if ((statack & MEC_INT_TX_EMPTY) != 0) 1616 MEC_EVCNT_INCR(&sc->sc_ev_txempty); 1617 if ((statack & MEC_INT_TX_PACKET_SENT) != 0) 1618 MEC_EVCNT_INCR(&sc->sc_ev_txsent); 1619 #endif 1620 } 1621 1622 if (statack & 1623 (MEC_INT_TX_LINK_FAIL | 1624 MEC_INT_TX_MEM_ERROR | 1625 MEC_INT_TX_ABORT | 1626 MEC_INT_RX_FIFO_UNDERFLOW | 1627 MEC_INT_RX_DMA_UNDERFLOW)) { 1628 printf("%s: mec_intr: interrupt status = 0x%08x\n", 1629 device_xname(sc->sc_dev), statreg); 1630 mec_init(ifp); 1631 break; 1632 } 1633 } 1634 1635 if (sent && !IFQ_IS_EMPTY(&ifp->if_snd)) { 1636 /* try to get more packets going */ 1637 mec_start(ifp); 1638 } 1639 1640 #if NRND > 0 1641 if (handled) 1642 rnd_add_uint32(&sc->sc_rnd_source, statreg); 1643 #endif 1644 1645 return handled; 1646 } 1647 1648 static void 1649 mec_rxintr(struct mec_softc *sc) 1650 { 1651 bus_space_tag_t st = sc->sc_st; 1652 bus_space_handle_t sh = sc->sc_sh; 1653 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1654 struct mbuf *m; 1655 struct mec_rxdesc *rxd; 1656 uint64_t rxstat; 1657 u_int len; 1658 int i; 1659 1660 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n")); 1661 1662 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) { 1663 rxd = &sc->sc_rxdesc[i]; 1664 1665 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD); 1666 rxstat = rxd->rxd_stat; 1667 1668 DPRINTF(MEC_DEBUG_RXINTR, 1669 ("mec_rxintr: rxstat = 0x%016llx, rxptr = %d\n", 1670 rxstat, i)); 1671 DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%08x\n", 1672 (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO))); 1673 1674 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) { 1675 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); 1676 break; 1677 } 1678 1679 len = rxstat & MEC_RXSTAT_LEN; 1680 1681 if (len < ETHER_MIN_LEN || 1682 len > (MCLBYTES - MEC_ETHER_ALIGN)) { 1683 /* invalid length packet; drop it. */ 1684 DPRINTF(MEC_DEBUG_RXINTR, 1685 ("mec_rxintr: wrong packet\n")); 1686 dropit: 1687 ifp->if_ierrors++; 1688 rxd->rxd_stat = 0; 1689 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); 1690 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, 1691 MEC_CDRXADDR(sc, i)); 1692 continue; 1693 } 1694 1695 /* 1696 * If 802.1Q VLAN MTU is enabled, ignore the bad packet errror. 1697 */ 1698 if ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) != 0) 1699 rxstat &= ~MEC_RXSTAT_BADPACKET; 1700 1701 if (rxstat & 1702 (MEC_RXSTAT_BADPACKET | 1703 MEC_RXSTAT_LONGEVENT | 1704 MEC_RXSTAT_INVALID | 1705 MEC_RXSTAT_CRCERROR | 1706 MEC_RXSTAT_VIOLATION)) { 1707 printf("%s: mec_rxintr: status = 0x%016llx\n", 1708 device_xname(sc->sc_dev), rxstat); 1709 goto dropit; 1710 } 1711 1712 /* 1713 * The MEC includes the CRC with every packet. Trim 1714 * it off here. 1715 */ 1716 len -= ETHER_CRC_LEN; 1717 1718 /* 1719 * now allocate an mbuf (and possibly a cluster) to hold 1720 * the received packet. 1721 */ 1722 MGETHDR(m, M_DONTWAIT, MT_DATA); 1723 if (m == NULL) { 1724 printf("%s: unable to allocate RX mbuf\n", 1725 device_xname(sc->sc_dev)); 1726 goto dropit; 1727 } 1728 if (len > (MHLEN - MEC_ETHER_ALIGN)) { 1729 MCLGET(m, M_DONTWAIT); 1730 if ((m->m_flags & M_EXT) == 0) { 1731 printf("%s: unable to allocate RX cluster\n", 1732 device_xname(sc->sc_dev)); 1733 m_freem(m); 1734 m = NULL; 1735 goto dropit; 1736 } 1737 } 1738 1739 /* 1740 * Note MEC chip seems to insert 2 byte padding at the top of 1741 * RX buffer, but we copy whole buffer to avoid unaligned copy. 1742 */ 1743 MEC_RXBUFSYNC(sc, i, len, BUS_DMASYNC_POSTREAD); 1744 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len); 1745 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD); 1746 m->m_data += MEC_ETHER_ALIGN; 1747 1748 /* put RX buffer into FIFO again */ 1749 rxd->rxd_stat = 0; 1750 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); 1751 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); 1752 1753 m->m_pkthdr.rcvif = ifp; 1754 m->m_pkthdr.len = m->m_len = len; 1755 1756 ifp->if_ipackets++; 1757 1758 #if NBPFILTER > 0 1759 /* 1760 * Pass this up to any BPF listeners, but only 1761 * pass it up the stack if it's for us. 1762 */ 1763 if (ifp->if_bpf) 1764 bpf_mtap(ifp->if_bpf, m); 1765 #endif 1766 1767 /* Pass it on. */ 1768 (*ifp->if_input)(ifp, m); 1769 } 1770 1771 /* update RX pointer */ 1772 sc->sc_rxptr = i; 1773 } 1774 1775 static void 1776 mec_txintr(struct mec_softc *sc, uint32_t txptr) 1777 { 1778 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1779 struct mec_txdesc *txd; 1780 struct mec_txsoft *txs; 1781 bus_dmamap_t dmamap; 1782 uint64_t txstat; 1783 int i; 1784 u_int col; 1785 1786 DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n")); 1787 1788 for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0; 1789 i = MEC_NEXTTX(i), sc->sc_txpending--) { 1790 txd = &sc->sc_txdesc[i]; 1791 1792 MEC_TXCMDSYNC(sc, i, 1793 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1794 1795 txstat = txd->txd_stat; 1796 DPRINTF(MEC_DEBUG_TXINTR, 1797 ("mec_txintr: dirty = %d, txstat = 0x%016llx\n", 1798 i, txstat)); 1799 if ((txstat & MEC_TXSTAT_SENT) == 0) { 1800 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD); 1801 break; 1802 } 1803 1804 txs = &sc->sc_txsoft[i]; 1805 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) { 1806 dmamap = txs->txs_dmamap; 1807 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, 1808 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1809 bus_dmamap_unload(sc->sc_dmat, dmamap); 1810 m_freem(txs->txs_mbuf); 1811 txs->txs_mbuf = NULL; 1812 } 1813 1814 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT; 1815 ifp->if_collisions += col; 1816 1817 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) { 1818 printf("%s: TX error: txstat = 0x%016llx\n", 1819 device_xname(sc->sc_dev), txstat); 1820 ifp->if_oerrors++; 1821 } else 1822 ifp->if_opackets++; 1823 } 1824 1825 /* update the dirty TX buffer pointer */ 1826 sc->sc_txdirty = i; 1827 DPRINTF(MEC_DEBUG_INTR, 1828 ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n", 1829 sc->sc_txdirty, sc->sc_txpending)); 1830 1831 /* cancel the watchdog timer if there are no pending TX packets */ 1832 if (sc->sc_txpending == 0) 1833 ifp->if_timer = 0; 1834 if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD) 1835 ifp->if_flags &= ~IFF_OACTIVE; 1836 } 1837 1838 static void 1839 mec_shutdown(void *arg) 1840 { 1841 struct mec_softc *sc = arg; 1842 1843 mec_stop(&sc->sc_ethercom.ec_if, 1); 1844 /* make sure to stop DMA etc. */ 1845 mec_reset(sc); 1846 } 1847