1 /* $NetBSD: if_mec.c,v 1.42 2010/01/19 22:06:22 pooka Exp $ */ 2 3 /*- 4 * Copyright (c) 2004, 2008 Izumi Tsutsui. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * Copyright (c) 2003 Christopher SEKIYA 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 3. All advertising materials mentioning features or use of this software 40 * must display the following acknowledgement: 41 * This product includes software developed for the 42 * NetBSD Project. See http://www.NetBSD.org/ for 43 * information about NetBSD. 44 * 4. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * MACE MAC-110 Ethernet driver 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.42 2010/01/19 22:06:22 pooka Exp $"); 65 66 #include "opt_ddb.h" 67 #include "rnd.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/device.h> 72 #include <sys/callout.h> 73 #include <sys/mbuf.h> 74 #include <sys/malloc.h> 75 #include <sys/kernel.h> 76 #include <sys/socket.h> 77 #include <sys/ioctl.h> 78 #include <sys/errno.h> 79 80 #if NRND > 0 81 #include <sys/rnd.h> 82 #endif 83 84 #include <net/if.h> 85 #include <net/if_dl.h> 86 #include <net/if_media.h> 87 #include <net/if_ether.h> 88 89 #include <netinet/in.h> 90 #include <netinet/in_systm.h> 91 #include <netinet/ip.h> 92 #include <netinet/tcp.h> 93 #include <netinet/udp.h> 94 95 #include <net/bpf.h> 96 97 #include <machine/bus.h> 98 #include <machine/intr.h> 99 #include <machine/machtype.h> 100 101 #include <dev/mii/mii.h> 102 #include <dev/mii/miivar.h> 103 104 #include <sgimips/mace/macevar.h> 105 #include <sgimips/mace/if_mecreg.h> 106 107 #include <dev/arcbios/arcbios.h> 108 #include <dev/arcbios/arcbiosvar.h> 109 110 /* #define MEC_DEBUG */ 111 112 #ifdef MEC_DEBUG 113 #define MEC_DEBUG_RESET 0x01 114 #define MEC_DEBUG_START 0x02 115 #define MEC_DEBUG_STOP 0x04 116 #define MEC_DEBUG_INTR 0x08 117 #define MEC_DEBUG_RXINTR 0x10 118 #define MEC_DEBUG_TXINTR 0x20 119 #define MEC_DEBUG_TXSEGS 0x40 120 uint32_t mec_debug = 0; 121 #define DPRINTF(x, y) if (mec_debug & (x)) printf y 122 #else 123 #define DPRINTF(x, y) /* nothing */ 124 #endif 125 126 /* #define MEC_EVENT_COUNTERS */ 127 128 #ifdef MEC_EVENT_COUNTERS 129 #define MEC_EVCNT_INCR(ev) (ev)->ev_count++ 130 #else 131 #define MEC_EVCNT_INCR(ev) do {} while (/* CONSTCOND */ 0) 132 #endif 133 134 /* 135 * Transmit descriptor list size 136 */ 137 #define MEC_NTXDESC 64 138 #define MEC_NTXDESC_MASK (MEC_NTXDESC - 1) 139 #define MEC_NEXTTX(x) (((x) + 1) & MEC_NTXDESC_MASK) 140 #define MEC_NTXDESC_RSVD 4 141 #define MEC_NTXDESC_INTR 8 142 143 /* 144 * software state for TX 145 */ 146 struct mec_txsoft { 147 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 148 bus_dmamap_t txs_dmamap; /* our DMA map */ 149 uint32_t txs_flags; 150 #define MEC_TXS_BUFLEN_MASK 0x0000007f /* data len in txd_buf */ 151 #define MEC_TXS_TXDPTR 0x00000080 /* concat txd_ptr is used */ 152 }; 153 154 /* 155 * Transmit buffer descriptor 156 */ 157 #define MEC_TXDESCSIZE 128 158 #define MEC_NTXPTR 3 159 #define MEC_TXD_BUFOFFSET sizeof(uint64_t) 160 #define MEC_TXD_BUFOFFSET1 \ 161 (sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR) 162 #define MEC_TXD_BUFSIZE (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET) 163 #define MEC_TXD_BUFSIZE1 (MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1) 164 #define MEC_TXD_BUFSTART(len) (MEC_TXD_BUFSIZE - (len)) 165 #define MEC_TXD_ALIGN 8 166 #define MEC_TXD_ALIGNMASK (MEC_TXD_ALIGN - 1) 167 #define MEC_TXD_ROUNDUP(addr) \ 168 (((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK) 169 #define MEC_NTXSEG 16 170 171 struct mec_txdesc { 172 volatile uint64_t txd_cmd; 173 #define MEC_TXCMD_DATALEN 0x000000000000ffff /* data length */ 174 #define MEC_TXCMD_BUFSTART 0x00000000007f0000 /* start byte offset */ 175 #define TXCMD_BUFSTART(x) ((x) << 16) 176 #define MEC_TXCMD_TERMDMA 0x0000000000800000 /* stop DMA on abort */ 177 #define MEC_TXCMD_TXINT 0x0000000001000000 /* INT after TX done */ 178 #define MEC_TXCMD_PTR1 0x0000000002000000 /* valid 1st txd_ptr */ 179 #define MEC_TXCMD_PTR2 0x0000000004000000 /* valid 2nd txd_ptr */ 180 #define MEC_TXCMD_PTR3 0x0000000008000000 /* valid 3rd txd_ptr */ 181 #define MEC_TXCMD_UNUSED 0xfffffffff0000000ULL /* should be zero */ 182 183 #define txd_stat txd_cmd 184 #define MEC_TXSTAT_LEN 0x000000000000ffff /* TX length */ 185 #define MEC_TXSTAT_COLCNT 0x00000000000f0000 /* collision count */ 186 #define MEC_TXSTAT_COLCNT_SHIFT 16 187 #define MEC_TXSTAT_LATE_COL 0x0000000000100000 /* late collision */ 188 #define MEC_TXSTAT_CRCERROR 0x0000000000200000 /* */ 189 #define MEC_TXSTAT_DEFERRED 0x0000000000400000 /* */ 190 #define MEC_TXSTAT_SUCCESS 0x0000000000800000 /* TX complete */ 191 #define MEC_TXSTAT_TOOBIG 0x0000000001000000 /* */ 192 #define MEC_TXSTAT_UNDERRUN 0x0000000002000000 /* */ 193 #define MEC_TXSTAT_COLLISIONS 0x0000000004000000 /* */ 194 #define MEC_TXSTAT_EXDEFERRAL 0x0000000008000000 /* */ 195 #define MEC_TXSTAT_COLLIDED 0x0000000010000000 /* */ 196 #define MEC_TXSTAT_UNUSED 0x7fffffffe0000000ULL /* should be zero */ 197 #define MEC_TXSTAT_SENT 0x8000000000000000ULL /* packet sent */ 198 199 union { 200 uint64_t txptr[MEC_NTXPTR]; 201 #define MEC_TXPTR_UNUSED2 0x0000000000000007 /* should be zero */ 202 #define MEC_TXPTR_DMAADDR 0x00000000fffffff8 /* TX DMA address */ 203 #define MEC_TXPTR_LEN 0x0000ffff00000000ULL /* buffer length */ 204 #define TXPTR_LEN(x) ((uint64_t)(x) << 32) 205 #define MEC_TXPTR_UNUSED1 0xffff000000000000ULL /* should be zero */ 206 207 uint8_t txbuf[MEC_TXD_BUFSIZE]; 208 } txd_data; 209 #define txd_ptr txd_data.txptr 210 #define txd_buf txd_data.txbuf 211 }; 212 213 /* 214 * Receive buffer size 215 */ 216 #define MEC_NRXDESC 16 217 #define MEC_NRXDESC_MASK (MEC_NRXDESC - 1) 218 #define MEC_NEXTRX(x) (((x) + 1) & MEC_NRXDESC_MASK) 219 220 /* 221 * Receive buffer description 222 */ 223 #define MEC_RXDESCSIZE 4096 /* umm, should be 4kbyte aligned */ 224 #define MEC_RXD_NRXPAD 3 225 #define MEC_RXD_DMAOFFSET (1 + MEC_RXD_NRXPAD) 226 #define MEC_RXD_BUFOFFSET (MEC_RXD_DMAOFFSET * sizeof(uint64_t)) 227 #define MEC_RXD_BUFSIZE (MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET) 228 229 struct mec_rxdesc { 230 volatile uint64_t rxd_stat; 231 #define MEC_RXSTAT_LEN 0x000000000000ffff /* data length */ 232 #define MEC_RXSTAT_VIOLATION 0x0000000000010000 /* code violation (?) */ 233 #define MEC_RXSTAT_UNUSED2 0x0000000000020000 /* unknown (?) */ 234 #define MEC_RXSTAT_CRCERROR 0x0000000000040000 /* CRC error */ 235 #define MEC_RXSTAT_MULTICAST 0x0000000000080000 /* multicast packet */ 236 #define MEC_RXSTAT_BROADCAST 0x0000000000100000 /* broadcast packet */ 237 #define MEC_RXSTAT_INVALID 0x0000000000200000 /* invalid preamble */ 238 #define MEC_RXSTAT_LONGEVENT 0x0000000000400000 /* long packet */ 239 #define MEC_RXSTAT_BADPACKET 0x0000000000800000 /* bad packet */ 240 #define MEC_RXSTAT_CAREVENT 0x0000000001000000 /* carrier event */ 241 #define MEC_RXSTAT_MATCHMCAST 0x0000000002000000 /* match multicast */ 242 #define MEC_RXSTAT_MATCHMAC 0x0000000004000000 /* match MAC */ 243 #define MEC_RXSTAT_SEQNUM 0x00000000f8000000 /* sequence number */ 244 #define MEC_RXSTAT_CKSUM 0x0000ffff00000000ULL /* IP checksum */ 245 #define RXSTAT_CKSUM(x) (((uint64_t)(x) & MEC_RXSTAT_CKSUM) >> 32) 246 #define MEC_RXSTAT_UNUSED1 0x7fff000000000000ULL /* should be zero */ 247 #define MEC_RXSTAT_RECEIVED 0x8000000000000000ULL /* set to 1 on RX */ 248 uint64_t rxd_pad1[MEC_RXD_NRXPAD]; 249 uint8_t rxd_buf[MEC_RXD_BUFSIZE]; 250 }; 251 252 /* 253 * control structures for DMA ops 254 */ 255 struct mec_control_data { 256 /* 257 * TX descriptors and buffers 258 */ 259 struct mec_txdesc mcd_txdesc[MEC_NTXDESC]; 260 261 /* 262 * RX descriptors and buffers 263 */ 264 struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC]; 265 }; 266 267 /* 268 * It _seems_ there are some restrictions on descriptor address: 269 * 270 * - Base address of txdescs should be 8kbyte aligned 271 * - Each txdesc should be 128byte aligned 272 * - Each rxdesc should be 4kbyte aligned 273 * 274 * So we should specify 8k align to allocalte txdescs. 275 * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192 276 * so rxdescs are also allocated at 4kbyte aligned. 277 */ 278 #define MEC_CONTROL_DATA_ALIGN (8 * 1024) 279 280 #define MEC_CDOFF(x) offsetof(struct mec_control_data, x) 281 #define MEC_CDTXOFF(x) MEC_CDOFF(mcd_txdesc[(x)]) 282 #define MEC_CDRXOFF(x) MEC_CDOFF(mcd_rxdesc[(x)]) 283 284 /* 285 * software state per device 286 */ 287 struct mec_softc { 288 device_t sc_dev; /* generic device structures */ 289 290 bus_space_tag_t sc_st; /* bus_space tag */ 291 bus_space_handle_t sc_sh; /* bus_space handle */ 292 bus_dma_tag_t sc_dmat; /* bus_dma tag */ 293 294 struct ethercom sc_ethercom; /* Ethernet common part */ 295 296 struct mii_data sc_mii; /* MII/media information */ 297 int sc_phyaddr; /* MII address */ 298 struct callout sc_tick_ch; /* tick callout */ 299 300 uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */ 301 302 bus_dmamap_t sc_cddmamap; /* bus_dma map for control data */ 303 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 304 305 /* pointer to allocated control data */ 306 struct mec_control_data *sc_control_data; 307 #define sc_txdesc sc_control_data->mcd_txdesc 308 #define sc_rxdesc sc_control_data->mcd_rxdesc 309 310 /* software state for TX descs */ 311 struct mec_txsoft sc_txsoft[MEC_NTXDESC]; 312 313 int sc_txpending; /* number of TX requests pending */ 314 int sc_txdirty; /* first dirty TX descriptor */ 315 int sc_txlast; /* last used TX descriptor */ 316 317 int sc_rxptr; /* next ready RX buffer */ 318 319 #if NRND > 0 320 rndsource_element_t sc_rnd_source; /* random source */ 321 #endif 322 #ifdef MEC_EVENT_COUNTERS 323 struct evcnt sc_ev_txpkts; /* TX packets queued total */ 324 struct evcnt sc_ev_txdpad; /* TX packets padded in txdesc buf */ 325 struct evcnt sc_ev_txdbuf; /* TX packets copied to txdesc buf */ 326 struct evcnt sc_ev_txptr1; /* TX packets using concat ptr1 */ 327 struct evcnt sc_ev_txptr1a; /* TX packets w/ptr1 ~160bytes */ 328 struct evcnt sc_ev_txptr1b; /* TX packets w/ptr1 ~256bytes */ 329 struct evcnt sc_ev_txptr1c; /* TX packets w/ptr1 ~512bytes */ 330 struct evcnt sc_ev_txptr1d; /* TX packets w/ptr1 ~1024bytes */ 331 struct evcnt sc_ev_txptr1e; /* TX packets w/ptr1 >1024bytes */ 332 struct evcnt sc_ev_txptr2; /* TX packets using concat ptr1,2 */ 333 struct evcnt sc_ev_txptr2a; /* TX packets w/ptr2 ~160bytes */ 334 struct evcnt sc_ev_txptr2b; /* TX packets w/ptr2 ~256bytes */ 335 struct evcnt sc_ev_txptr2c; /* TX packets w/ptr2 ~512bytes */ 336 struct evcnt sc_ev_txptr2d; /* TX packets w/ptr2 ~1024bytes */ 337 struct evcnt sc_ev_txptr2e; /* TX packets w/ptr2 >1024bytes */ 338 struct evcnt sc_ev_txptr3; /* TX packets using concat ptr1,2,3 */ 339 struct evcnt sc_ev_txptr3a; /* TX packets w/ptr3 ~160bytes */ 340 struct evcnt sc_ev_txptr3b; /* TX packets w/ptr3 ~256bytes */ 341 struct evcnt sc_ev_txptr3c; /* TX packets w/ptr3 ~512bytes */ 342 struct evcnt sc_ev_txptr3d; /* TX packets w/ptr3 ~1024bytes */ 343 struct evcnt sc_ev_txptr3e; /* TX packets w/ptr3 >1024bytes */ 344 struct evcnt sc_ev_txmbuf; /* TX packets copied to new mbufs */ 345 struct evcnt sc_ev_txmbufa; /* TX packets w/mbuf ~160bytes */ 346 struct evcnt sc_ev_txmbufb; /* TX packets w/mbuf ~256bytes */ 347 struct evcnt sc_ev_txmbufc; /* TX packets w/mbuf ~512bytes */ 348 struct evcnt sc_ev_txmbufd; /* TX packets w/mbuf ~1024bytes */ 349 struct evcnt sc_ev_txmbufe; /* TX packets w/mbuf >1024bytes */ 350 struct evcnt sc_ev_txptrs; /* TX packets using ptrs total */ 351 struct evcnt sc_ev_txptrc0; /* TX packets w/ptrs no hdr chain */ 352 struct evcnt sc_ev_txptrc1; /* TX packets w/ptrs 1 hdr chain */ 353 struct evcnt sc_ev_txptrc2; /* TX packets w/ptrs 2 hdr chains */ 354 struct evcnt sc_ev_txptrc3; /* TX packets w/ptrs 3 hdr chains */ 355 struct evcnt sc_ev_txptrc4; /* TX packets w/ptrs 4 hdr chains */ 356 struct evcnt sc_ev_txptrc5; /* TX packets w/ptrs 5 hdr chains */ 357 struct evcnt sc_ev_txptrc6; /* TX packets w/ptrs >5 hdr chains */ 358 struct evcnt sc_ev_txptrh0; /* TX packets w/ptrs ~8bytes hdr */ 359 struct evcnt sc_ev_txptrh1; /* TX packets w/ptrs ~16bytes hdr */ 360 struct evcnt sc_ev_txptrh2; /* TX packets w/ptrs ~32bytes hdr */ 361 struct evcnt sc_ev_txptrh3; /* TX packets w/ptrs ~64bytes hdr */ 362 struct evcnt sc_ev_txptrh4; /* TX packets w/ptrs ~80bytes hdr */ 363 struct evcnt sc_ev_txptrh5; /* TX packets w/ptrs ~96bytes hdr */ 364 struct evcnt sc_ev_txdstall; /* TX stalled due to no txdesc */ 365 struct evcnt sc_ev_txempty; /* TX empty interrupts */ 366 struct evcnt sc_ev_txsent; /* TX sent interrupts */ 367 #endif 368 }; 369 370 #define MEC_CDTXADDR(sc, x) ((sc)->sc_cddma + MEC_CDTXOFF(x)) 371 #define MEC_CDRXADDR(sc, x) ((sc)->sc_cddma + MEC_CDRXOFF(x)) 372 373 #define MEC_TXDESCSYNC(sc, x, ops) \ 374 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 375 MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops)) 376 #define MEC_TXCMDSYNC(sc, x, ops) \ 377 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 378 MEC_CDTXOFF(x), sizeof(uint64_t), (ops)) 379 380 #define MEC_RXSTATSYNC(sc, x, ops) \ 381 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 382 MEC_CDRXOFF(x), sizeof(uint64_t), (ops)) 383 #define MEC_RXBUFSYNC(sc, x, len, ops) \ 384 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 385 MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET, \ 386 MEC_ETHER_ALIGN + (len), (ops)) 387 388 /* XXX these values should be moved to <net/if_ether.h> ? */ 389 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 390 #define MEC_ETHER_ALIGN 2 391 392 static int mec_match(device_t, cfdata_t, void *); 393 static void mec_attach(device_t, device_t, void *); 394 395 static int mec_mii_readreg(device_t, int, int); 396 static void mec_mii_writereg(device_t, int, int, int); 397 static int mec_mii_wait(struct mec_softc *); 398 static void mec_statchg(device_t); 399 400 static void enaddr_aton(const char *, uint8_t *); 401 402 static int mec_init(struct ifnet * ifp); 403 static void mec_start(struct ifnet *); 404 static void mec_watchdog(struct ifnet *); 405 static void mec_tick(void *); 406 static int mec_ioctl(struct ifnet *, u_long, void *); 407 static void mec_reset(struct mec_softc *); 408 static void mec_setfilter(struct mec_softc *); 409 static int mec_intr(void *arg); 410 static void mec_stop(struct ifnet *, int); 411 static void mec_rxintr(struct mec_softc *); 412 static void mec_rxcsum(struct mec_softc *, struct mbuf *, uint16_t, 413 uint32_t); 414 static void mec_txintr(struct mec_softc *, uint32_t); 415 static bool mec_shutdown(device_t, int); 416 417 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc), 418 mec_match, mec_attach, NULL, NULL); 419 420 static int mec_matched = 0; 421 422 static int 423 mec_match(device_t parent, cfdata_t cf, void *aux) 424 { 425 426 /* allow only one device */ 427 if (mec_matched) 428 return 0; 429 430 mec_matched = 1; 431 return 1; 432 } 433 434 static void 435 mec_attach(device_t parent, device_t self, void *aux) 436 { 437 struct mec_softc *sc = device_private(self); 438 struct mace_attach_args *maa = aux; 439 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 440 uint64_t address, command; 441 const char *macaddr; 442 struct mii_softc *child; 443 bus_dma_segment_t seg; 444 int i, err, rseg; 445 bool mac_is_fake; 446 447 sc->sc_dev = self; 448 sc->sc_st = maa->maa_st; 449 if (bus_space_subregion(sc->sc_st, maa->maa_sh, 450 maa->maa_offset, 0, &sc->sc_sh) != 0) { 451 aprint_error(": can't map i/o space\n"); 452 return; 453 } 454 455 /* set up DMA structures */ 456 sc->sc_dmat = maa->maa_dmat; 457 458 /* 459 * Allocate the control data structures, and create and load the 460 * DMA map for it. 461 */ 462 if ((err = bus_dmamem_alloc(sc->sc_dmat, 463 sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0, 464 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 465 aprint_error(": unable to allocate control data, error = %d\n", 466 err); 467 goto fail_0; 468 } 469 /* 470 * XXX needs re-think... 471 * control data structures contain whole RX data buffer, so 472 * BUS_DMA_COHERENT (which disables cache) may cause some performance 473 * issue on copying data from the RX buffer to mbuf on normal memory, 474 * though we have to make sure all bus_dmamap_sync(9) ops are called 475 * properly in that case. 476 */ 477 if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 478 sizeof(struct mec_control_data), 479 (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) { 480 aprint_error(": unable to map control data, error = %d\n", err); 481 goto fail_1; 482 } 483 memset(sc->sc_control_data, 0, sizeof(struct mec_control_data)); 484 485 if ((err = bus_dmamap_create(sc->sc_dmat, 486 sizeof(struct mec_control_data), 1, 487 sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 488 aprint_error(": unable to create control data DMA map," 489 " error = %d\n", err); 490 goto fail_2; 491 } 492 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 493 sc->sc_control_data, sizeof(struct mec_control_data), NULL, 494 BUS_DMA_NOWAIT)) != 0) { 495 aprint_error(": unable to load control data DMA map," 496 " error = %d\n", err); 497 goto fail_3; 498 } 499 500 /* create TX buffer DMA maps */ 501 for (i = 0; i < MEC_NTXDESC; i++) { 502 if ((err = bus_dmamap_create(sc->sc_dmat, 503 MCLBYTES, MEC_NTXSEG, MCLBYTES, PAGE_SIZE, 0, 504 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 505 aprint_error(": unable to create tx DMA map %d," 506 " error = %d\n", i, err); 507 goto fail_4; 508 } 509 } 510 511 callout_init(&sc->sc_tick_ch, 0); 512 513 /* get Ethernet address from ARCBIOS */ 514 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) { 515 aprint_error(": unable to get MAC address!\n"); 516 goto fail_4; 517 } 518 /* 519 * On some machines the DS2502 chip storing the serial number/ 520 * mac address is on the pci riser board - if this board is 521 * missing, ARCBIOS will not know a good ethernet address (but 522 * otherwise the machine will work fine). 523 */ 524 mac_is_fake = false; 525 if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) { 526 uint32_t ui = 0; 527 const char * netaddr = 528 ARCBIOS->GetEnvironmentVariable("netaddr"); 529 530 /* 531 * Create a MAC address by abusing the "netaddr" env var 532 */ 533 sc->sc_enaddr[0] = 0xf2; 534 sc->sc_enaddr[1] = 0x0b; 535 sc->sc_enaddr[2] = 0xa4; 536 if (netaddr) { 537 mac_is_fake = true; 538 while (*netaddr) { 539 int v = 0; 540 while (*netaddr && *netaddr != '.') { 541 if (*netaddr >= '0' && *netaddr <= '9') 542 v = v*10 + (*netaddr - '0'); 543 netaddr++; 544 } 545 ui <<= 8; 546 ui |= v; 547 if (*netaddr == '.') 548 netaddr++; 549 } 550 } 551 memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3); 552 } 553 if (!mac_is_fake) 554 enaddr_aton(macaddr, sc->sc_enaddr); 555 556 /* set the Ethernet address */ 557 address = 0; 558 for (i = 0; i < ETHER_ADDR_LEN; i++) { 559 address = address << 8; 560 address |= sc->sc_enaddr[i]; 561 } 562 bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address); 563 564 /* reset device */ 565 mec_reset(sc); 566 567 command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL); 568 569 aprint_normal(": MAC-110 Ethernet, rev %u\n", 570 (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT)); 571 572 if (mac_is_fake) 573 aprint_normal_dev(self, 574 "could not get ethernet address from firmware" 575 " - generated one from the \"netaddr\" environment" 576 " variable\n"); 577 aprint_normal_dev(self, "Ethernet address %s\n", 578 ether_sprintf(sc->sc_enaddr)); 579 580 /* Done, now attach everything */ 581 582 sc->sc_mii.mii_ifp = ifp; 583 sc->sc_mii.mii_readreg = mec_mii_readreg; 584 sc->sc_mii.mii_writereg = mec_mii_writereg; 585 sc->sc_mii.mii_statchg = mec_statchg; 586 587 /* Set up PHY properties */ 588 sc->sc_ethercom.ec_mii = &sc->sc_mii; 589 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 590 ether_mediastatus); 591 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 592 MII_OFFSET_ANY, 0); 593 594 child = LIST_FIRST(&sc->sc_mii.mii_phys); 595 if (child == NULL) { 596 /* No PHY attached */ 597 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 598 0, NULL); 599 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 600 } else { 601 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 602 sc->sc_phyaddr = child->mii_phy; 603 } 604 605 strcpy(ifp->if_xname, device_xname(self)); 606 ifp->if_softc = sc; 607 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 608 ifp->if_ioctl = mec_ioctl; 609 ifp->if_start = mec_start; 610 ifp->if_watchdog = mec_watchdog; 611 ifp->if_init = mec_init; 612 ifp->if_stop = mec_stop; 613 ifp->if_mtu = ETHERMTU; 614 IFQ_SET_READY(&ifp->if_snd); 615 616 /* mec has dumb RX cksum support */ 617 ifp->if_capabilities = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx; 618 619 /* We can support 802.1Q VLAN-sized frames. */ 620 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 621 622 /* attach the interface */ 623 if_attach(ifp); 624 ether_ifattach(ifp, sc->sc_enaddr); 625 626 /* establish interrupt */ 627 cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc); 628 629 #if NRND > 0 630 rnd_attach_source(&sc->sc_rnd_source, device_xname(self), 631 RND_TYPE_NET, 0); 632 #endif 633 634 #ifdef MEC_EVENT_COUNTERS 635 evcnt_attach_dynamic(&sc->sc_ev_txpkts , EVCNT_TYPE_MISC, 636 NULL, device_xname(self), "TX pkts queued total"); 637 evcnt_attach_dynamic(&sc->sc_ev_txdpad , EVCNT_TYPE_MISC, 638 NULL, device_xname(self), "TX pkts padded in txdesc buf"); 639 evcnt_attach_dynamic(&sc->sc_ev_txdbuf , EVCNT_TYPE_MISC, 640 NULL, device_xname(self), "TX pkts copied to txdesc buf"); 641 evcnt_attach_dynamic(&sc->sc_ev_txptr1 , EVCNT_TYPE_MISC, 642 NULL, device_xname(self), "TX pkts using concat ptr1"); 643 evcnt_attach_dynamic(&sc->sc_ev_txptr1a , EVCNT_TYPE_MISC, 644 NULL, device_xname(self), "TX pkts w/ptr1 ~160bytes"); 645 evcnt_attach_dynamic(&sc->sc_ev_txptr1b , EVCNT_TYPE_MISC, 646 NULL, device_xname(self), "TX pkts w/ptr1 ~256bytes"); 647 evcnt_attach_dynamic(&sc->sc_ev_txptr1c , EVCNT_TYPE_MISC, 648 NULL, device_xname(self), "TX pkts w/ptr1 ~512bytes"); 649 evcnt_attach_dynamic(&sc->sc_ev_txptr1d , EVCNT_TYPE_MISC, 650 NULL, device_xname(self), "TX pkts w/ptr1 ~1024bytes"); 651 evcnt_attach_dynamic(&sc->sc_ev_txptr1e , EVCNT_TYPE_MISC, 652 NULL, device_xname(self), "TX pkts w/ptr1 >1024bytes"); 653 evcnt_attach_dynamic(&sc->sc_ev_txptr2 , EVCNT_TYPE_MISC, 654 NULL, device_xname(self), "TX pkts using concat ptr1,2"); 655 evcnt_attach_dynamic(&sc->sc_ev_txptr2a , EVCNT_TYPE_MISC, 656 NULL, device_xname(self), "TX pkts w/ptr2 ~160bytes"); 657 evcnt_attach_dynamic(&sc->sc_ev_txptr2b , EVCNT_TYPE_MISC, 658 NULL, device_xname(self), "TX pkts w/ptr2 ~256bytes"); 659 evcnt_attach_dynamic(&sc->sc_ev_txptr2c , EVCNT_TYPE_MISC, 660 NULL, device_xname(self), "TX pkts w/ptr2 ~512bytes"); 661 evcnt_attach_dynamic(&sc->sc_ev_txptr2d , EVCNT_TYPE_MISC, 662 NULL, device_xname(self), "TX pkts w/ptr2 ~1024bytes"); 663 evcnt_attach_dynamic(&sc->sc_ev_txptr2e , EVCNT_TYPE_MISC, 664 NULL, device_xname(self), "TX pkts w/ptr2 >1024bytes"); 665 evcnt_attach_dynamic(&sc->sc_ev_txptr3 , EVCNT_TYPE_MISC, 666 NULL, device_xname(self), "TX pkts using concat ptr1,2,3"); 667 evcnt_attach_dynamic(&sc->sc_ev_txptr3a , EVCNT_TYPE_MISC, 668 NULL, device_xname(self), "TX pkts w/ptr3 ~160bytes"); 669 evcnt_attach_dynamic(&sc->sc_ev_txptr3b , EVCNT_TYPE_MISC, 670 NULL, device_xname(self), "TX pkts w/ptr3 ~256bytes"); 671 evcnt_attach_dynamic(&sc->sc_ev_txptr3c , EVCNT_TYPE_MISC, 672 NULL, device_xname(self), "TX pkts w/ptr3 ~512bytes"); 673 evcnt_attach_dynamic(&sc->sc_ev_txptr3d , EVCNT_TYPE_MISC, 674 NULL, device_xname(self), "TX pkts w/ptr3 ~1024bytes"); 675 evcnt_attach_dynamic(&sc->sc_ev_txptr3e , EVCNT_TYPE_MISC, 676 NULL, device_xname(self), "TX pkts w/ptr3 >1024bytes"); 677 evcnt_attach_dynamic(&sc->sc_ev_txmbuf , EVCNT_TYPE_MISC, 678 NULL, device_xname(self), "TX pkts copied to new mbufs"); 679 evcnt_attach_dynamic(&sc->sc_ev_txmbufa , EVCNT_TYPE_MISC, 680 NULL, device_xname(self), "TX pkts w/mbuf ~160bytes"); 681 evcnt_attach_dynamic(&sc->sc_ev_txmbufb , EVCNT_TYPE_MISC, 682 NULL, device_xname(self), "TX pkts w/mbuf ~256bytes"); 683 evcnt_attach_dynamic(&sc->sc_ev_txmbufc , EVCNT_TYPE_MISC, 684 NULL, device_xname(self), "TX pkts w/mbuf ~512bytes"); 685 evcnt_attach_dynamic(&sc->sc_ev_txmbufd , EVCNT_TYPE_MISC, 686 NULL, device_xname(self), "TX pkts w/mbuf ~1024bytes"); 687 evcnt_attach_dynamic(&sc->sc_ev_txmbufe , EVCNT_TYPE_MISC, 688 NULL, device_xname(self), "TX pkts w/mbuf >1024bytes"); 689 evcnt_attach_dynamic(&sc->sc_ev_txptrs , EVCNT_TYPE_MISC, 690 NULL, device_xname(self), "TX pkts using ptrs total"); 691 evcnt_attach_dynamic(&sc->sc_ev_txptrc0 , EVCNT_TYPE_MISC, 692 NULL, device_xname(self), "TX pkts w/ptrs no hdr chain"); 693 evcnt_attach_dynamic(&sc->sc_ev_txptrc1 , EVCNT_TYPE_MISC, 694 NULL, device_xname(self), "TX pkts w/ptrs 1 hdr chain"); 695 evcnt_attach_dynamic(&sc->sc_ev_txptrc2 , EVCNT_TYPE_MISC, 696 NULL, device_xname(self), "TX pkts w/ptrs 2 hdr chains"); 697 evcnt_attach_dynamic(&sc->sc_ev_txptrc3 , EVCNT_TYPE_MISC, 698 NULL, device_xname(self), "TX pkts w/ptrs 3 hdr chains"); 699 evcnt_attach_dynamic(&sc->sc_ev_txptrc4 , EVCNT_TYPE_MISC, 700 NULL, device_xname(self), "TX pkts w/ptrs 4 hdr chains"); 701 evcnt_attach_dynamic(&sc->sc_ev_txptrc5 , EVCNT_TYPE_MISC, 702 NULL, device_xname(self), "TX pkts w/ptrs 5 hdr chains"); 703 evcnt_attach_dynamic(&sc->sc_ev_txptrc6 , EVCNT_TYPE_MISC, 704 NULL, device_xname(self), "TX pkts w/ptrs >5 hdr chains"); 705 evcnt_attach_dynamic(&sc->sc_ev_txptrh0 , EVCNT_TYPE_MISC, 706 NULL, device_xname(self), "TX pkts w/ptrs ~8bytes hdr"); 707 evcnt_attach_dynamic(&sc->sc_ev_txptrh1 , EVCNT_TYPE_MISC, 708 NULL, device_xname(self), "TX pkts w/ptrs ~16bytes hdr"); 709 evcnt_attach_dynamic(&sc->sc_ev_txptrh2 , EVCNT_TYPE_MISC, 710 NULL, device_xname(self), "TX pkts w/ptrs ~32bytes hdr"); 711 evcnt_attach_dynamic(&sc->sc_ev_txptrh3 , EVCNT_TYPE_MISC, 712 NULL, device_xname(self), "TX pkts w/ptrs ~64bytes hdr"); 713 evcnt_attach_dynamic(&sc->sc_ev_txptrh4 , EVCNT_TYPE_MISC, 714 NULL, device_xname(self), "TX pkts w/ptrs ~80bytes hdr"); 715 evcnt_attach_dynamic(&sc->sc_ev_txptrh5 , EVCNT_TYPE_MISC, 716 NULL, device_xname(self), "TX pkts w/ptrs ~96bytes hdr"); 717 evcnt_attach_dynamic(&sc->sc_ev_txdstall , EVCNT_TYPE_MISC, 718 NULL, device_xname(self), "TX stalled due to no txdesc"); 719 evcnt_attach_dynamic(&sc->sc_ev_txempty , EVCNT_TYPE_MISC, 720 NULL, device_xname(self), "TX empty interrupts"); 721 evcnt_attach_dynamic(&sc->sc_ev_txsent , EVCNT_TYPE_MISC, 722 NULL, device_xname(self), "TX sent interrupts"); 723 #endif 724 725 /* set shutdown hook to reset interface on powerdown */ 726 if (pmf_device_register1(self, NULL, NULL, mec_shutdown)) 727 pmf_class_network_register(self, ifp); 728 else 729 aprint_error_dev(self, "couldn't establish power handler\n"); 730 731 return; 732 733 /* 734 * Free any resources we've allocated during the failed attach 735 * attempt. Do this in reverse order and fall though. 736 */ 737 fail_4: 738 for (i = 0; i < MEC_NTXDESC; i++) { 739 if (sc->sc_txsoft[i].txs_dmamap != NULL) 740 bus_dmamap_destroy(sc->sc_dmat, 741 sc->sc_txsoft[i].txs_dmamap); 742 } 743 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 744 fail_3: 745 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 746 fail_2: 747 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 748 sizeof(struct mec_control_data)); 749 fail_1: 750 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 751 fail_0: 752 return; 753 } 754 755 static int 756 mec_mii_readreg(device_t self, int phy, int reg) 757 { 758 struct mec_softc *sc = device_private(self); 759 bus_space_tag_t st = sc->sc_st; 760 bus_space_handle_t sh = sc->sc_sh; 761 uint64_t val; 762 int i; 763 764 if (mec_mii_wait(sc) != 0) 765 return 0; 766 767 bus_space_write_8(st, sh, MEC_PHY_ADDRESS, 768 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER)); 769 delay(25); 770 bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1); 771 delay(25); 772 mec_mii_wait(sc); 773 774 for (i = 0; i < 20; i++) { 775 delay(30); 776 777 val = bus_space_read_8(st, sh, MEC_PHY_DATA); 778 779 if ((val & MEC_PHY_DATA_BUSY) == 0) 780 return val & MEC_PHY_DATA_VALUE; 781 } 782 return 0; 783 } 784 785 static void 786 mec_mii_writereg(device_t self, int phy, int reg, int val) 787 { 788 struct mec_softc *sc = device_private(self); 789 bus_space_tag_t st = sc->sc_st; 790 bus_space_handle_t sh = sc->sc_sh; 791 792 if (mec_mii_wait(sc) != 0) { 793 printf("timed out writing %x: %x\n", reg, val); 794 return; 795 } 796 797 bus_space_write_8(st, sh, MEC_PHY_ADDRESS, 798 (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER)); 799 800 delay(60); 801 802 bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE); 803 804 delay(60); 805 806 mec_mii_wait(sc); 807 } 808 809 static int 810 mec_mii_wait(struct mec_softc *sc) 811 { 812 uint32_t busy; 813 int i, s; 814 815 for (i = 0; i < 100; i++) { 816 delay(30); 817 818 s = splhigh(); 819 busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA); 820 splx(s); 821 822 if ((busy & MEC_PHY_DATA_BUSY) == 0) 823 return 0; 824 #if 0 825 if (busy == 0xffff) /* XXX ? */ 826 return 0; 827 #endif 828 } 829 830 printf("%s: MII timed out\n", device_xname(sc->sc_dev)); 831 return 1; 832 } 833 834 static void 835 mec_statchg(device_t self) 836 { 837 struct mec_softc *sc = device_private(self); 838 bus_space_tag_t st = sc->sc_st; 839 bus_space_handle_t sh = sc->sc_sh; 840 uint32_t control; 841 842 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL); 843 control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 | 844 MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT); 845 846 /* must also set IPG here for duplex stuff ... */ 847 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) { 848 control |= MEC_MAC_FULL_DUPLEX; 849 } else { 850 /* set IPG */ 851 control |= MEC_MAC_IPG_DEFAULT; 852 } 853 854 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 855 } 856 857 /* 858 * XXX 859 * maybe this function should be moved to common part 860 * (sgimips/machdep.c or elsewhere) for all on-board network devices. 861 */ 862 static void 863 enaddr_aton(const char *str, uint8_t *eaddr) 864 { 865 int i; 866 char c; 867 868 for (i = 0; i < ETHER_ADDR_LEN; i++) { 869 if (*str == ':') 870 str++; 871 872 c = *str++; 873 if (isdigit(c)) { 874 eaddr[i] = (c - '0'); 875 } else if (isxdigit(c)) { 876 eaddr[i] = (toupper(c) + 10 - 'A'); 877 } 878 c = *str++; 879 if (isdigit(c)) { 880 eaddr[i] = (eaddr[i] << 4) | (c - '0'); 881 } else if (isxdigit(c)) { 882 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A'); 883 } 884 } 885 } 886 887 static int 888 mec_init(struct ifnet *ifp) 889 { 890 struct mec_softc *sc = ifp->if_softc; 891 bus_space_tag_t st = sc->sc_st; 892 bus_space_handle_t sh = sc->sc_sh; 893 struct mec_rxdesc *rxd; 894 int i, rc; 895 896 /* cancel any pending I/O */ 897 mec_stop(ifp, 0); 898 899 /* reset device */ 900 mec_reset(sc); 901 902 /* setup filter for multicast or promisc mode */ 903 mec_setfilter(sc); 904 905 /* set the TX ring pointer to the base address */ 906 bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0)); 907 908 sc->sc_txpending = 0; 909 sc->sc_txdirty = 0; 910 sc->sc_txlast = MEC_NTXDESC - 1; 911 912 /* put RX buffers into FIFO */ 913 for (i = 0; i < MEC_NRXDESC; i++) { 914 rxd = &sc->sc_rxdesc[i]; 915 rxd->rxd_stat = 0; 916 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); 917 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD); 918 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); 919 } 920 sc->sc_rxptr = 0; 921 922 #if 0 /* XXX no info */ 923 bus_space_write_8(st, sh, MEC_TIMER, 0); 924 #endif 925 926 /* 927 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes 928 * spurious interrupts when TX buffers are empty 929 */ 930 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 931 (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) | 932 (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) | 933 MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */ 934 MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE); 935 936 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc); 937 938 if ((rc = ether_mediachange(ifp)) != 0) 939 return rc; 940 941 ifp->if_flags |= IFF_RUNNING; 942 ifp->if_flags &= ~IFF_OACTIVE; 943 mec_start(ifp); 944 945 return 0; 946 } 947 948 static void 949 mec_reset(struct mec_softc *sc) 950 { 951 bus_space_tag_t st = sc->sc_st; 952 bus_space_handle_t sh = sc->sc_sh; 953 uint64_t control; 954 955 /* stop DMA first */ 956 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0); 957 958 /* reset chip */ 959 bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET); 960 delay(1000); 961 bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0); 962 delay(1000); 963 964 /* Default to 100/half and let auto-negotiation work its magic */ 965 control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI | 966 MEC_MAC_IPG_DEFAULT; 967 968 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 969 /* stop DMA again for sanity */ 970 bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0); 971 972 DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n", 973 bus_space_read_8(st, sh, MEC_MAC_CONTROL))); 974 } 975 976 static void 977 mec_start(struct ifnet *ifp) 978 { 979 struct mec_softc *sc = ifp->if_softc; 980 struct mbuf *m0, *m; 981 struct mec_txdesc *txd; 982 struct mec_txsoft *txs; 983 bus_dmamap_t dmamap; 984 bus_space_tag_t st = sc->sc_st; 985 bus_space_handle_t sh = sc->sc_sh; 986 int error, firsttx, nexttx, opending; 987 int len, bufoff, buflen, nsegs, align, resid, pseg, nptr, slen, i; 988 uint32_t txdcmd; 989 990 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 991 return; 992 993 /* 994 * Remember the previous txpending and the first transmit descriptor. 995 */ 996 opending = sc->sc_txpending; 997 firsttx = MEC_NEXTTX(sc->sc_txlast); 998 999 DPRINTF(MEC_DEBUG_START, 1000 ("%s: opending = %d, firsttx = %d\n", __func__, opending, firsttx)); 1001 1002 while (sc->sc_txpending < MEC_NTXDESC - 1) { 1003 /* Grab a packet off the queue. */ 1004 IFQ_POLL(&ifp->if_snd, m0); 1005 if (m0 == NULL) 1006 break; 1007 m = NULL; 1008 1009 /* 1010 * Get the next available transmit descriptor. 1011 */ 1012 nexttx = MEC_NEXTTX(sc->sc_txlast); 1013 txd = &sc->sc_txdesc[nexttx]; 1014 txs = &sc->sc_txsoft[nexttx]; 1015 dmamap = txs->txs_dmamap; 1016 txs->txs_flags = 0; 1017 1018 buflen = 0; 1019 bufoff = 0; 1020 resid = 0; 1021 nptr = 0; /* XXX gcc */ 1022 pseg = 0; /* XXX gcc */ 1023 1024 len = m0->m_pkthdr.len; 1025 1026 DPRINTF(MEC_DEBUG_START, 1027 ("%s: len = %d, nexttx = %d, txpending = %d\n", 1028 __func__, len, nexttx, sc->sc_txpending)); 1029 1030 if (len <= MEC_TXD_BUFSIZE) { 1031 /* 1032 * If a TX packet will fit into small txdesc buffer, 1033 * just copy it into there. Maybe it's faster than 1034 * checking alignment and calling bus_dma(9) etc. 1035 */ 1036 DPRINTF(MEC_DEBUG_START, ("%s: short packet\n", 1037 __func__)); 1038 IFQ_DEQUEUE(&ifp->if_snd, m0); 1039 1040 /* 1041 * I don't know if MEC chip does auto padding, 1042 * but do it manually for safety. 1043 */ 1044 if (len < ETHER_PAD_LEN) { 1045 MEC_EVCNT_INCR(&sc->sc_ev_txdpad); 1046 bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN); 1047 m_copydata(m0, 0, len, txd->txd_buf + bufoff); 1048 memset(txd->txd_buf + bufoff + len, 0, 1049 ETHER_PAD_LEN - len); 1050 len = buflen = ETHER_PAD_LEN; 1051 } else { 1052 MEC_EVCNT_INCR(&sc->sc_ev_txdbuf); 1053 bufoff = MEC_TXD_BUFSTART(len); 1054 m_copydata(m0, 0, len, txd->txd_buf + bufoff); 1055 buflen = len; 1056 } 1057 } else { 1058 /* 1059 * If the packet won't fit the static buffer in txdesc, 1060 * we have to use the concatenate pointers to handle it. 1061 */ 1062 DPRINTF(MEC_DEBUG_START, ("%s: long packet\n", 1063 __func__)); 1064 txs->txs_flags = MEC_TXS_TXDPTR; 1065 1066 /* 1067 * Call bus_dmamap_load_mbuf(9) first to see 1068 * how many chains the TX mbuf has. 1069 */ 1070 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1071 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1072 if (error == 0) { 1073 /* 1074 * Check chains which might contain headers. 1075 * They might be so much fragmented and 1076 * it's better to copy them into txdesc buffer 1077 * since they would be small enough. 1078 */ 1079 nsegs = dmamap->dm_nsegs; 1080 for (pseg = 0; pseg < nsegs; pseg++) { 1081 slen = dmamap->dm_segs[pseg].ds_len; 1082 if (buflen + slen > 1083 MEC_TXD_BUFSIZE1 - MEC_TXD_ALIGN) 1084 break; 1085 buflen += slen; 1086 } 1087 /* 1088 * Check if the rest chains can be fit into 1089 * the concatinate pointers. 1090 */ 1091 align = dmamap->dm_segs[pseg].ds_addr & 1092 MEC_TXD_ALIGNMASK; 1093 if (align > 0) { 1094 /* 1095 * If the first chain isn't uint64_t 1096 * aligned, append the unaligned part 1097 * into txdesc buffer too. 1098 */ 1099 resid = MEC_TXD_ALIGN - align; 1100 buflen += resid; 1101 for (; pseg < nsegs; pseg++) { 1102 slen = 1103 dmamap->dm_segs[pseg].ds_len; 1104 if (slen > resid) 1105 break; 1106 resid -= slen; 1107 } 1108 } else if (pseg == 0) { 1109 /* 1110 * In this case, the first chain is 1111 * uint64_t aligned but it's too long 1112 * to put into txdesc buf. 1113 * We have to put some data into 1114 * txdesc buf even in this case, 1115 * so put MEC_TXD_ALIGN bytes there. 1116 */ 1117 buflen = resid = MEC_TXD_ALIGN; 1118 } 1119 nptr = nsegs - pseg; 1120 if (nptr <= MEC_NTXPTR) { 1121 bufoff = MEC_TXD_BUFSTART(buflen); 1122 1123 /* 1124 * Check if all the rest chains are 1125 * uint64_t aligned. 1126 */ 1127 align = 0; 1128 for (i = pseg + 1; i < nsegs; i++) 1129 align |= 1130 dmamap->dm_segs[i].ds_addr 1131 & MEC_TXD_ALIGNMASK; 1132 if (align != 0) { 1133 /* chains are not aligned */ 1134 error = -1; 1135 } 1136 } else { 1137 /* The TX mbuf chains doesn't fit. */ 1138 error = -1; 1139 } 1140 if (error == -1) 1141 bus_dmamap_unload(sc->sc_dmat, dmamap); 1142 } 1143 if (error != 0) { 1144 /* 1145 * The TX mbuf chains can't be put into 1146 * the concatinate buffers. In this case, 1147 * we have to allocate a new contiguous mbuf 1148 * and copy data into it. 1149 * 1150 * Even in this case, the Ethernet header in 1151 * the TX mbuf might be unaligned and trailing 1152 * data might be word aligned, so put 2 byte 1153 * (MEC_ETHER_ALIGN) padding at the top of the 1154 * allocated mbuf and copy TX packets. 1155 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN) 1156 * at the top of the new mbuf won't be uint64_t 1157 * alignd, but we have to put some data into 1158 * txdesc buffer anyway even if the buffer 1159 * is uint64_t aligned. 1160 */ 1161 DPRINTF(MEC_DEBUG_START|MEC_DEBUG_TXSEGS, 1162 ("%s: re-allocating mbuf\n", __func__)); 1163 1164 MGETHDR(m, M_DONTWAIT, MT_DATA); 1165 if (m == NULL) { 1166 printf("%s: unable to allocate " 1167 "TX mbuf\n", 1168 device_xname(sc->sc_dev)); 1169 break; 1170 } 1171 if (len > (MHLEN - MEC_ETHER_ALIGN)) { 1172 MCLGET(m, M_DONTWAIT); 1173 if ((m->m_flags & M_EXT) == 0) { 1174 printf("%s: unable to allocate " 1175 "TX cluster\n", 1176 device_xname(sc->sc_dev)); 1177 m_freem(m); 1178 break; 1179 } 1180 } 1181 m->m_data += MEC_ETHER_ALIGN; 1182 1183 /* 1184 * Copy whole data (including unaligned part) 1185 * for following bpf_mtap(). 1186 */ 1187 m_copydata(m0, 0, len, mtod(m, void *)); 1188 m->m_pkthdr.len = m->m_len = len; 1189 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1190 dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1191 if (dmamap->dm_nsegs > 1) { 1192 /* should not happen, but for sanity */ 1193 bus_dmamap_unload(sc->sc_dmat, dmamap); 1194 error = -1; 1195 } 1196 if (error != 0) { 1197 printf("%s: unable to load TX buffer, " 1198 "error = %d\n", 1199 device_xname(sc->sc_dev), error); 1200 m_freem(m); 1201 break; 1202 } 1203 /* 1204 * Only the first segment should be put into 1205 * the concatinate pointer in this case. 1206 */ 1207 pseg = 0; 1208 nptr = 1; 1209 1210 /* 1211 * Set lenght of unaligned part which will be 1212 * copied into txdesc buffer. 1213 */ 1214 buflen = MEC_TXD_ALIGN - MEC_ETHER_ALIGN; 1215 bufoff = MEC_TXD_BUFSTART(buflen); 1216 resid = buflen; 1217 #ifdef MEC_EVENT_COUNTERS 1218 MEC_EVCNT_INCR(&sc->sc_ev_txmbuf); 1219 if (len <= 160) 1220 MEC_EVCNT_INCR(&sc->sc_ev_txmbufa); 1221 else if (len <= 256) 1222 MEC_EVCNT_INCR(&sc->sc_ev_txmbufb); 1223 else if (len <= 512) 1224 MEC_EVCNT_INCR(&sc->sc_ev_txmbufc); 1225 else if (len <= 1024) 1226 MEC_EVCNT_INCR(&sc->sc_ev_txmbufd); 1227 else 1228 MEC_EVCNT_INCR(&sc->sc_ev_txmbufe); 1229 #endif 1230 } 1231 #ifdef MEC_EVENT_COUNTERS 1232 else { 1233 MEC_EVCNT_INCR(&sc->sc_ev_txptrs); 1234 if (nptr == 1) { 1235 MEC_EVCNT_INCR(&sc->sc_ev_txptr1); 1236 if (len <= 160) 1237 MEC_EVCNT_INCR( 1238 &sc->sc_ev_txptr1a); 1239 else if (len <= 256) 1240 MEC_EVCNT_INCR( 1241 &sc->sc_ev_txptr1b); 1242 else if (len <= 512) 1243 MEC_EVCNT_INCR( 1244 &sc->sc_ev_txptr1c); 1245 else if (len <= 1024) 1246 MEC_EVCNT_INCR( 1247 &sc->sc_ev_txptr1d); 1248 else 1249 MEC_EVCNT_INCR( 1250 &sc->sc_ev_txptr1e); 1251 } else if (nptr == 2) { 1252 MEC_EVCNT_INCR(&sc->sc_ev_txptr2); 1253 if (len <= 160) 1254 MEC_EVCNT_INCR( 1255 &sc->sc_ev_txptr2a); 1256 else if (len <= 256) 1257 MEC_EVCNT_INCR( 1258 &sc->sc_ev_txptr2b); 1259 else if (len <= 512) 1260 MEC_EVCNT_INCR( 1261 &sc->sc_ev_txptr2c); 1262 else if (len <= 1024) 1263 MEC_EVCNT_INCR( 1264 &sc->sc_ev_txptr2d); 1265 else 1266 MEC_EVCNT_INCR( 1267 &sc->sc_ev_txptr2e); 1268 } else if (nptr == 3) { 1269 MEC_EVCNT_INCR(&sc->sc_ev_txptr3); 1270 if (len <= 160) 1271 MEC_EVCNT_INCR( 1272 &sc->sc_ev_txptr3a); 1273 else if (len <= 256) 1274 MEC_EVCNT_INCR( 1275 &sc->sc_ev_txptr3b); 1276 else if (len <= 512) 1277 MEC_EVCNT_INCR( 1278 &sc->sc_ev_txptr3c); 1279 else if (len <= 1024) 1280 MEC_EVCNT_INCR( 1281 &sc->sc_ev_txptr3d); 1282 else 1283 MEC_EVCNT_INCR( 1284 &sc->sc_ev_txptr3e); 1285 } 1286 if (pseg == 0) 1287 MEC_EVCNT_INCR(&sc->sc_ev_txptrc0); 1288 else if (pseg == 1) 1289 MEC_EVCNT_INCR(&sc->sc_ev_txptrc1); 1290 else if (pseg == 2) 1291 MEC_EVCNT_INCR(&sc->sc_ev_txptrc2); 1292 else if (pseg == 3) 1293 MEC_EVCNT_INCR(&sc->sc_ev_txptrc3); 1294 else if (pseg == 4) 1295 MEC_EVCNT_INCR(&sc->sc_ev_txptrc4); 1296 else if (pseg == 5) 1297 MEC_EVCNT_INCR(&sc->sc_ev_txptrc5); 1298 else 1299 MEC_EVCNT_INCR(&sc->sc_ev_txptrc6); 1300 if (buflen <= 8) 1301 MEC_EVCNT_INCR(&sc->sc_ev_txptrh0); 1302 else if (buflen <= 16) 1303 MEC_EVCNT_INCR(&sc->sc_ev_txptrh1); 1304 else if (buflen <= 32) 1305 MEC_EVCNT_INCR(&sc->sc_ev_txptrh2); 1306 else if (buflen <= 64) 1307 MEC_EVCNT_INCR(&sc->sc_ev_txptrh3); 1308 else if (buflen <= 80) 1309 MEC_EVCNT_INCR(&sc->sc_ev_txptrh4); 1310 else 1311 MEC_EVCNT_INCR(&sc->sc_ev_txptrh5); 1312 } 1313 #endif 1314 m_copydata(m0, 0, buflen, txd->txd_buf + bufoff); 1315 1316 IFQ_DEQUEUE(&ifp->if_snd, m0); 1317 if (m != NULL) { 1318 m_freem(m0); 1319 m0 = m; 1320 } 1321 1322 /* 1323 * sync the DMA map for TX mbuf 1324 */ 1325 bus_dmamap_sync(sc->sc_dmat, dmamap, buflen, 1326 len - buflen, BUS_DMASYNC_PREWRITE); 1327 } 1328 1329 /* 1330 * Pass packet to bpf if there is a listener. 1331 */ 1332 if (ifp->if_bpf) 1333 bpf_ops->bpf_mtap(ifp->if_bpf, m0); 1334 MEC_EVCNT_INCR(&sc->sc_ev_txpkts); 1335 1336 /* 1337 * setup the transmit descriptor. 1338 */ 1339 txdcmd = TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen) | (len - 1); 1340 1341 /* 1342 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets 1343 * if more than half txdescs have been queued 1344 * because TX_EMPTY interrupts will rarely happen 1345 * if TX queue is so stacked. 1346 */ 1347 if (sc->sc_txpending > (MEC_NTXDESC / 2) && 1348 (nexttx & (MEC_NTXDESC_INTR - 1)) == 0) 1349 txdcmd |= MEC_TXCMD_TXINT; 1350 1351 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) { 1352 bus_dma_segment_t *segs = dmamap->dm_segs; 1353 1354 DPRINTF(MEC_DEBUG_TXSEGS, 1355 ("%s: nsegs = %d, pseg = %d, nptr = %d\n", 1356 __func__, dmamap->dm_nsegs, pseg, nptr)); 1357 1358 switch (nptr) { 1359 case 3: 1360 KASSERT((segs[pseg + 2].ds_addr & 1361 MEC_TXD_ALIGNMASK) == 0); 1362 txdcmd |= MEC_TXCMD_PTR3; 1363 txd->txd_ptr[2] = 1364 TXPTR_LEN(segs[pseg + 2].ds_len - 1) | 1365 segs[pseg + 2].ds_addr; 1366 /* FALLTHROUGH */ 1367 case 2: 1368 KASSERT((segs[pseg + 1].ds_addr & 1369 MEC_TXD_ALIGNMASK) == 0); 1370 txdcmd |= MEC_TXCMD_PTR2; 1371 txd->txd_ptr[1] = 1372 TXPTR_LEN(segs[pseg + 1].ds_len - 1) | 1373 segs[pseg + 1].ds_addr; 1374 /* FALLTHROUGH */ 1375 case 1: 1376 txdcmd |= MEC_TXCMD_PTR1; 1377 txd->txd_ptr[0] = 1378 TXPTR_LEN(segs[pseg].ds_len - resid - 1) | 1379 (segs[pseg].ds_addr + resid); 1380 break; 1381 default: 1382 panic("%s: impossible nptr in %s", 1383 device_xname(sc->sc_dev), __func__); 1384 /* NOTREACHED */ 1385 } 1386 /* 1387 * Store a pointer to the packet so we can 1388 * free it later. 1389 */ 1390 txs->txs_mbuf = m0; 1391 } else { 1392 /* 1393 * In this case all data are copied to buffer in txdesc, 1394 * we can free TX mbuf here. 1395 */ 1396 m_freem(m0); 1397 } 1398 txd->txd_cmd = txdcmd; 1399 1400 DPRINTF(MEC_DEBUG_START, 1401 ("%s: txd_cmd = 0x%016llx\n", 1402 __func__, txd->txd_cmd)); 1403 DPRINTF(MEC_DEBUG_START, 1404 ("%s: txd_ptr[0] = 0x%016llx\n", 1405 __func__, txd->txd_ptr[0])); 1406 DPRINTF(MEC_DEBUG_START, 1407 ("%s: txd_ptr[1] = 0x%016llx\n", 1408 __func__, txd->txd_ptr[1])); 1409 DPRINTF(MEC_DEBUG_START, 1410 ("%s: txd_ptr[2] = 0x%016llx\n", 1411 __func__, txd->txd_ptr[2])); 1412 DPRINTF(MEC_DEBUG_START, 1413 ("%s: len = %d (0x%04x), buflen = %d (0x%02x)\n", 1414 __func__, len, len, buflen, buflen)); 1415 1416 /* sync TX descriptor */ 1417 MEC_TXDESCSYNC(sc, nexttx, 1418 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1419 1420 /* start TX */ 1421 bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx)); 1422 1423 /* advance the TX pointer. */ 1424 sc->sc_txpending++; 1425 sc->sc_txlast = nexttx; 1426 } 1427 1428 if (sc->sc_txpending == MEC_NTXDESC - 1) { 1429 /* No more slots; notify upper layer. */ 1430 MEC_EVCNT_INCR(&sc->sc_ev_txdstall); 1431 ifp->if_flags |= IFF_OACTIVE; 1432 } 1433 1434 if (sc->sc_txpending != opending) { 1435 /* 1436 * If the transmitter was idle, 1437 * reset the txdirty pointer and re-enable TX interrupt. 1438 */ 1439 if (opending == 0) { 1440 sc->sc_txdirty = firsttx; 1441 bus_space_write_8(st, sh, MEC_TX_ALIAS, 1442 MEC_TX_ALIAS_INT_ENABLE); 1443 } 1444 1445 /* Set a watchdog timer in case the chip flakes out. */ 1446 ifp->if_timer = 5; 1447 } 1448 } 1449 1450 static void 1451 mec_stop(struct ifnet *ifp, int disable) 1452 { 1453 struct mec_softc *sc = ifp->if_softc; 1454 struct mec_txsoft *txs; 1455 int i; 1456 1457 DPRINTF(MEC_DEBUG_STOP, ("%s\n", __func__)); 1458 1459 ifp->if_timer = 0; 1460 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1461 1462 callout_stop(&sc->sc_tick_ch); 1463 mii_down(&sc->sc_mii); 1464 1465 /* release any TX buffers */ 1466 for (i = 0; i < MEC_NTXDESC; i++) { 1467 txs = &sc->sc_txsoft[i]; 1468 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) { 1469 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1470 m_freem(txs->txs_mbuf); 1471 txs->txs_mbuf = NULL; 1472 } 1473 } 1474 } 1475 1476 static int 1477 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1478 { 1479 int s, error; 1480 1481 s = splnet(); 1482 1483 error = ether_ioctl(ifp, cmd, data); 1484 if (error == ENETRESET) { 1485 /* 1486 * Multicast list has changed; set the hardware filter 1487 * accordingly. 1488 */ 1489 if (ifp->if_flags & IFF_RUNNING) 1490 error = mec_init(ifp); 1491 else 1492 error = 0; 1493 } 1494 1495 /* Try to get more packets going. */ 1496 mec_start(ifp); 1497 1498 splx(s); 1499 return error; 1500 } 1501 1502 static void 1503 mec_watchdog(struct ifnet *ifp) 1504 { 1505 struct mec_softc *sc = ifp->if_softc; 1506 1507 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 1508 ifp->if_oerrors++; 1509 1510 mec_init(ifp); 1511 } 1512 1513 static void 1514 mec_tick(void *arg) 1515 { 1516 struct mec_softc *sc = arg; 1517 int s; 1518 1519 s = splnet(); 1520 mii_tick(&sc->sc_mii); 1521 splx(s); 1522 1523 callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc); 1524 } 1525 1526 static void 1527 mec_setfilter(struct mec_softc *sc) 1528 { 1529 struct ethercom *ec = &sc->sc_ethercom; 1530 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1531 struct ether_multi *enm; 1532 struct ether_multistep step; 1533 bus_space_tag_t st = sc->sc_st; 1534 bus_space_handle_t sh = sc->sc_sh; 1535 uint64_t mchash; 1536 uint32_t control, hash; 1537 int mcnt; 1538 1539 control = bus_space_read_8(st, sh, MEC_MAC_CONTROL); 1540 control &= ~MEC_MAC_FILTER_MASK; 1541 1542 if (ifp->if_flags & IFF_PROMISC) { 1543 control |= MEC_MAC_FILTER_PROMISC; 1544 bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL); 1545 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 1546 return; 1547 } 1548 1549 mcnt = 0; 1550 mchash = 0; 1551 ETHER_FIRST_MULTI(step, ec, enm); 1552 while (enm != NULL) { 1553 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1554 /* set allmulti for a range of multicast addresses */ 1555 control |= MEC_MAC_FILTER_ALLMULTI; 1556 bus_space_write_8(st, sh, MEC_MULTICAST, 1557 0xffffffffffffffffULL); 1558 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 1559 return; 1560 } 1561 1562 #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26) 1563 1564 hash = mec_calchash(enm->enm_addrlo); 1565 mchash |= 1 << hash; 1566 mcnt++; 1567 ETHER_NEXT_MULTI(step, enm); 1568 } 1569 1570 ifp->if_flags &= ~IFF_ALLMULTI; 1571 1572 if (mcnt > 0) 1573 control |= MEC_MAC_FILTER_MATCHMULTI; 1574 1575 bus_space_write_8(st, sh, MEC_MULTICAST, mchash); 1576 bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); 1577 } 1578 1579 static int 1580 mec_intr(void *arg) 1581 { 1582 struct mec_softc *sc = arg; 1583 bus_space_tag_t st = sc->sc_st; 1584 bus_space_handle_t sh = sc->sc_sh; 1585 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1586 uint32_t statreg, statack, txptr; 1587 int handled, sent; 1588 1589 DPRINTF(MEC_DEBUG_INTR, ("%s: called\n", __func__)); 1590 1591 handled = sent = 0; 1592 1593 for (;;) { 1594 statreg = bus_space_read_8(st, sh, MEC_INT_STATUS); 1595 1596 DPRINTF(MEC_DEBUG_INTR, 1597 ("%s: INT_STAT = 0x%08x\n", __func__, statreg)); 1598 1599 statack = statreg & MEC_INT_STATUS_MASK; 1600 if (statack == 0) 1601 break; 1602 bus_space_write_8(st, sh, MEC_INT_STATUS, statack); 1603 1604 handled = 1; 1605 1606 if (statack & 1607 (MEC_INT_RX_THRESHOLD | 1608 MEC_INT_RX_FIFO_UNDERFLOW)) { 1609 mec_rxintr(sc); 1610 } 1611 1612 if (statack & 1613 (MEC_INT_TX_EMPTY | 1614 MEC_INT_TX_PACKET_SENT | 1615 MEC_INT_TX_ABORT)) { 1616 txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS) 1617 >> MEC_INT_TX_RING_BUFFER_SHIFT; 1618 mec_txintr(sc, txptr); 1619 sent = 1; 1620 if ((statack & MEC_INT_TX_EMPTY) != 0) { 1621 /* 1622 * disable TX interrupt to stop 1623 * TX empty interrupt 1624 */ 1625 bus_space_write_8(st, sh, MEC_TX_ALIAS, 0); 1626 DPRINTF(MEC_DEBUG_INTR, 1627 ("%s: disable TX_INT\n", __func__)); 1628 } 1629 #ifdef MEC_EVENT_COUNTERS 1630 if ((statack & MEC_INT_TX_EMPTY) != 0) 1631 MEC_EVCNT_INCR(&sc->sc_ev_txempty); 1632 if ((statack & MEC_INT_TX_PACKET_SENT) != 0) 1633 MEC_EVCNT_INCR(&sc->sc_ev_txsent); 1634 #endif 1635 } 1636 1637 if (statack & 1638 (MEC_INT_TX_LINK_FAIL | 1639 MEC_INT_TX_MEM_ERROR | 1640 MEC_INT_TX_ABORT | 1641 MEC_INT_RX_FIFO_UNDERFLOW | 1642 MEC_INT_RX_DMA_UNDERFLOW)) { 1643 printf("%s: %s: interrupt status = 0x%08x\n", 1644 device_xname(sc->sc_dev), __func__, statreg); 1645 mec_init(ifp); 1646 break; 1647 } 1648 } 1649 1650 if (sent && !IFQ_IS_EMPTY(&ifp->if_snd)) { 1651 /* try to get more packets going */ 1652 mec_start(ifp); 1653 } 1654 1655 #if NRND > 0 1656 if (handled) 1657 rnd_add_uint32(&sc->sc_rnd_source, statreg); 1658 #endif 1659 1660 return handled; 1661 } 1662 1663 static void 1664 mec_rxintr(struct mec_softc *sc) 1665 { 1666 bus_space_tag_t st = sc->sc_st; 1667 bus_space_handle_t sh = sc->sc_sh; 1668 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1669 struct mbuf *m; 1670 struct mec_rxdesc *rxd; 1671 uint64_t rxstat; 1672 u_int len; 1673 int i; 1674 uint32_t crc; 1675 1676 DPRINTF(MEC_DEBUG_RXINTR, ("%s: called\n", __func__)); 1677 1678 for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) { 1679 rxd = &sc->sc_rxdesc[i]; 1680 1681 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD); 1682 rxstat = rxd->rxd_stat; 1683 1684 DPRINTF(MEC_DEBUG_RXINTR, 1685 ("%s: rxstat = 0x%016llx, rxptr = %d\n", 1686 __func__, rxstat, i)); 1687 DPRINTF(MEC_DEBUG_RXINTR, ("%s: rxfifo = 0x%08x\n", 1688 __func__, (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO))); 1689 1690 if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) { 1691 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); 1692 break; 1693 } 1694 1695 len = rxstat & MEC_RXSTAT_LEN; 1696 1697 if (len < ETHER_MIN_LEN || 1698 len > (MCLBYTES - MEC_ETHER_ALIGN)) { 1699 /* invalid length packet; drop it. */ 1700 DPRINTF(MEC_DEBUG_RXINTR, 1701 ("%s: wrong packet\n", __func__)); 1702 dropit: 1703 ifp->if_ierrors++; 1704 rxd->rxd_stat = 0; 1705 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); 1706 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, 1707 MEC_CDRXADDR(sc, i)); 1708 continue; 1709 } 1710 1711 /* 1712 * If 802.1Q VLAN MTU is enabled, ignore the bad packet error. 1713 */ 1714 if ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) != 0) 1715 rxstat &= ~MEC_RXSTAT_BADPACKET; 1716 1717 if (rxstat & 1718 (MEC_RXSTAT_BADPACKET | 1719 MEC_RXSTAT_LONGEVENT | 1720 MEC_RXSTAT_INVALID | 1721 MEC_RXSTAT_CRCERROR | 1722 MEC_RXSTAT_VIOLATION)) { 1723 printf("%s: mec_rxintr: status = 0x%016"PRIx64"\n", 1724 device_xname(sc->sc_dev), rxstat); 1725 goto dropit; 1726 } 1727 1728 /* 1729 * The MEC includes the CRC with every packet. Trim 1730 * it off here. 1731 */ 1732 len -= ETHER_CRC_LEN; 1733 1734 /* 1735 * now allocate an mbuf (and possibly a cluster) to hold 1736 * the received packet. 1737 */ 1738 MGETHDR(m, M_DONTWAIT, MT_DATA); 1739 if (m == NULL) { 1740 printf("%s: unable to allocate RX mbuf\n", 1741 device_xname(sc->sc_dev)); 1742 goto dropit; 1743 } 1744 if (len > (MHLEN - MEC_ETHER_ALIGN)) { 1745 MCLGET(m, M_DONTWAIT); 1746 if ((m->m_flags & M_EXT) == 0) { 1747 printf("%s: unable to allocate RX cluster\n", 1748 device_xname(sc->sc_dev)); 1749 m_freem(m); 1750 m = NULL; 1751 goto dropit; 1752 } 1753 } 1754 1755 /* 1756 * Note MEC chip seems to insert 2 byte padding at the top of 1757 * RX buffer, but we copy whole buffer to avoid unaligned copy. 1758 */ 1759 MEC_RXBUFSYNC(sc, i, len + ETHER_CRC_LEN, BUS_DMASYNC_POSTREAD); 1760 memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len); 1761 crc = be32dec(rxd->rxd_buf + MEC_ETHER_ALIGN + len); 1762 MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD); 1763 m->m_data += MEC_ETHER_ALIGN; 1764 1765 /* put RX buffer into FIFO again */ 1766 rxd->rxd_stat = 0; 1767 MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); 1768 bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); 1769 1770 m->m_pkthdr.rcvif = ifp; 1771 m->m_pkthdr.len = m->m_len = len; 1772 if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0) 1773 mec_rxcsum(sc, m, RXSTAT_CKSUM(rxstat), crc); 1774 1775 ifp->if_ipackets++; 1776 1777 /* 1778 * Pass this up to any BPF listeners, but only 1779 * pass it up the stack if it's for us. 1780 */ 1781 if (ifp->if_bpf) 1782 bpf_ops->bpf_mtap(ifp->if_bpf, m); 1783 1784 /* Pass it on. */ 1785 (*ifp->if_input)(ifp, m); 1786 } 1787 1788 /* update RX pointer */ 1789 sc->sc_rxptr = i; 1790 } 1791 1792 static void 1793 mec_rxcsum(struct mec_softc *sc, struct mbuf *m, uint16_t rxcsum, uint32_t crc) 1794 { 1795 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1796 struct ether_header *eh; 1797 struct ip *ip; 1798 struct udphdr *uh; 1799 u_int len, pktlen, hlen; 1800 uint32_t csum_data, dsum; 1801 int csum_flags; 1802 const uint16_t *dp; 1803 1804 csum_data = 0; 1805 csum_flags = 0; 1806 1807 len = m->m_len; 1808 if (len < ETHER_HDR_LEN + sizeof(struct ip)) 1809 goto out; 1810 pktlen = len - ETHER_HDR_LEN; 1811 eh = mtod(m, struct ether_header *); 1812 if (ntohs(eh->ether_type) != ETHERTYPE_IP) 1813 goto out; 1814 ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN); 1815 if (ip->ip_v != IPVERSION) 1816 goto out; 1817 1818 hlen = ip->ip_hl << 2; 1819 if (hlen < sizeof(struct ip)) 1820 goto out; 1821 1822 /* 1823 * Bail if too short, has random trailing garbage, truncated, 1824 * fragment, or has ethernet pad. 1825 */ 1826 if (ntohs(ip->ip_len) < hlen || 1827 ntohs(ip->ip_len) != pktlen || 1828 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0) 1829 goto out; 1830 1831 switch (ip->ip_p) { 1832 case IPPROTO_TCP: 1833 if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 || 1834 pktlen < (hlen + sizeof(struct tcphdr))) 1835 goto out; 1836 csum_flags = M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; 1837 break; 1838 case IPPROTO_UDP: 1839 if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 || 1840 pktlen < (hlen + sizeof(struct udphdr))) 1841 goto out; 1842 uh = (struct udphdr *)((uint8_t *)ip + hlen); 1843 if (uh->uh_sum == 0) 1844 goto out; /* no checksum */ 1845 csum_flags = M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; 1846 break; 1847 default: 1848 goto out; 1849 } 1850 1851 /* 1852 * The computed checksum includes Ethernet header, IP headers, 1853 * and CRC, so we have to deduct them. 1854 * Note IP header cksum should be 0xffff so we don't have to 1855 * dedecut them. 1856 */ 1857 dsum = 0; 1858 1859 /* deduct Ethernet header */ 1860 dp = (const uint16_t *)eh; 1861 for (hlen = 0; hlen < (ETHER_HDR_LEN / sizeof(uint16_t)); hlen++) 1862 dsum += ntohs(*dp++); 1863 1864 /* deduct CRC */ 1865 if (len & 1) { 1866 dsum += (crc >> 24) & 0x00ff; 1867 dsum += (crc >> 8) & 0xffff; 1868 dsum += (crc << 8) & 0xff00; 1869 } else { 1870 dsum += (crc >> 16) & 0xffff; 1871 dsum += (crc >> 0) & 0xffff; 1872 } 1873 while (dsum >> 16) 1874 dsum = (dsum >> 16) + (dsum & 0xffff); 1875 1876 csum_data = rxcsum; 1877 csum_data += (uint16_t)~dsum; 1878 1879 while (csum_data >> 16) 1880 csum_data = (csum_data >> 16) + (csum_data & 0xffff); 1881 1882 out: 1883 m->m_pkthdr.csum_flags = csum_flags; 1884 m->m_pkthdr.csum_data = csum_data; 1885 } 1886 1887 static void 1888 mec_txintr(struct mec_softc *sc, uint32_t txptr) 1889 { 1890 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1891 struct mec_txdesc *txd; 1892 struct mec_txsoft *txs; 1893 bus_dmamap_t dmamap; 1894 uint64_t txstat; 1895 int i; 1896 u_int col; 1897 1898 DPRINTF(MEC_DEBUG_TXINTR, ("%s: called\n", __func__)); 1899 1900 for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0; 1901 i = MEC_NEXTTX(i), sc->sc_txpending--) { 1902 txd = &sc->sc_txdesc[i]; 1903 1904 MEC_TXCMDSYNC(sc, i, 1905 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1906 1907 txstat = txd->txd_stat; 1908 DPRINTF(MEC_DEBUG_TXINTR, 1909 ("%s: dirty = %d, txstat = 0x%016llx\n", 1910 __func__, i, txstat)); 1911 if ((txstat & MEC_TXSTAT_SENT) == 0) { 1912 MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD); 1913 break; 1914 } 1915 1916 txs = &sc->sc_txsoft[i]; 1917 if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) { 1918 dmamap = txs->txs_dmamap; 1919 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, 1920 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1921 bus_dmamap_unload(sc->sc_dmat, dmamap); 1922 m_freem(txs->txs_mbuf); 1923 txs->txs_mbuf = NULL; 1924 } 1925 1926 col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT; 1927 ifp->if_collisions += col; 1928 1929 if ((txstat & MEC_TXSTAT_SUCCESS) == 0) { 1930 printf("%s: TX error: txstat = 0x%016"PRIx64"\n", 1931 device_xname(sc->sc_dev), txstat); 1932 ifp->if_oerrors++; 1933 } else 1934 ifp->if_opackets++; 1935 } 1936 1937 /* update the dirty TX buffer pointer */ 1938 sc->sc_txdirty = i; 1939 DPRINTF(MEC_DEBUG_INTR, 1940 ("%s: sc_txdirty = %2d, sc_txpending = %2d\n", 1941 __func__, sc->sc_txdirty, sc->sc_txpending)); 1942 1943 /* cancel the watchdog timer if there are no pending TX packets */ 1944 if (sc->sc_txpending == 0) 1945 ifp->if_timer = 0; 1946 if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD) 1947 ifp->if_flags &= ~IFF_OACTIVE; 1948 } 1949 1950 static bool 1951 mec_shutdown(device_t self, int howto) 1952 { 1953 struct mec_softc *sc = device_private(self); 1954 1955 mec_stop(&sc->sc_ethercom.ec_if, 1); 1956 /* make sure to stop DMA etc. */ 1957 mec_reset(sc); 1958 1959 return true; 1960 } 1961