1 /* $NetBSD: ralink_eth.c,v 1.6 2012/07/22 14:32:52 matt Exp $ */ 2 /*- 3 * Copyright (c) 2011 CradlePoint Technology, Inc. 4 * All rights reserved. 5 * 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY CRADLEPOINT TECHNOLOGY, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* ralink_eth.c -- Ralink Ethernet Driver */ 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: ralink_eth.c,v 1.6 2012/07/22 14:32:52 matt Exp $"); 33 34 #include <sys/param.h> 35 #include <sys/bus.h> 36 #include <sys/callout.h> 37 #include <sys/device.h> 38 #include <sys/endian.h> 39 #include <sys/errno.h> 40 #include <sys/ioctl.h> 41 #include <sys/intr.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/socket.h> 46 #include <sys/systm.h> 47 48 #include <uvm/uvm_extern.h> 49 50 #include <net/if.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/if_ether.h> 54 #include <net/if_vlanvar.h> 55 56 #include <net/bpf.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 #include <dev/mii/mii_bitbang.h> 61 62 #include <mips/ralink/ralink_var.h> 63 #include <mips/ralink/ralink_reg.h> 64 #if 0 65 #define CPDEBUG /* XXX TMP DEBUG FIXME */ 66 #define RALINK_ETH_DEBUG /* XXX TMP DEBUG FIXME */ 67 #define ENABLE_RALINK_DEBUG_ERROR 1 68 #define ENABLE_RALINK_DEBUG_MISC 1 69 #define ENABLE_RALINK_DEBUG_INFO 1 70 #define ENABLE_RALINK_DEBUG_FORCE 1 71 #define ENABLE_RALINK_DEBUG_REG 1 72 #endif 73 #include <mips/ralink/ralink_debug.h> 74 75 76 /* PDMA RX Descriptor Format */ 77 struct ralink_rx_desc { 78 uint32_t data_ptr; 79 uint32_t rxd_info1; 80 #define RXD_LEN1(x) (((x) >> 0) & 0x3fff) 81 #define RXD_LAST1 (1 << 14) 82 #define RXD_LEN0(x) (((x) >> 16) & 0x3fff) 83 #define RXD_LAST0 (1 << 30) 84 #define RXD_DDONE (1 << 31) 85 uint32_t unused; 86 uint32_t rxd_info2; 87 #define RXD_FOE(x) (((x) >> 0) & 0x3fff) 88 #define RXD_FVLD (1 << 14) 89 #define RXD_INFO(x) (((x) >> 16) & 0xff) 90 #define RXD_PORT(x) (((x) >> 24) & 0x7) 91 #define RXD_INFO_CPU (1 << 27) 92 #define RXD_L4_FAIL (1 << 28) 93 #define RXD_IP_FAIL (1 << 29) 94 #define RXD_L4_VLD (1 << 30) 95 #define RXD_IP_VLD (1 << 31) 96 }; 97 98 /* PDMA RX Descriptor Format */ 99 struct ralink_tx_desc { 100 uint32_t data_ptr0; 101 uint32_t txd_info1; 102 #define TXD_LEN1(x) (((x) & 0x3fff) << 0) 103 #define TXD_LAST1 (1 << 14) 104 #define TXD_BURST (1 << 15) 105 #define TXD_LEN0(x) (((x) & 0x3fff) << 16) 106 #define TXD_LAST0 (1 << 30) 107 #define TXD_DDONE (1 << 31) 108 uint32_t data_ptr1; 109 uint32_t txd_info2; 110 #define TXD_VIDX(x) (((x) & 0xf) << 0) 111 #define TXD_VPRI(x) (((x) & 0x7) << 4) 112 #define TXD_VEN (1 << 7) 113 #define TXD_SIDX(x) (((x) & 0xf) << 8) 114 #define TXD_SEN(x) (1 << 13) 115 #define TXD_QN(x) (((x) & 0x7) << 16) 116 #define TXD_PN(x) (((x) & 0x7) << 24) 117 #define TXD_PN_CPU 0 118 #define TXD_PN_GDMA1 1 119 #define TXD_PN_GDMA2 2 120 #define TXD_TCP_EN (1 << 29) 121 #define TXD_UDP_EN (1 << 30) 122 #define TXD_IP_EN (1 << 31) 123 }; 124 125 /* TODO: 126 * try to scale number of descriptors swith size of memory 127 * these numbers may have a significant impact on performance/memory/mbuf usage 128 */ 129 #if RTMEMSIZE >= 64 130 #define RALINK_ETH_NUM_RX_DESC 256 131 #define RALINK_ETH_NUM_TX_DESC 256 132 #else 133 #define RALINK_ETH_NUM_RX_DESC 64 134 #define RALINK_ETH_NUM_TX_DESC 64 135 #endif 136 /* maximum segments per packet */ 137 #define RALINK_ETH_MAX_TX_SEGS 1 138 139 /* define a struct for ease of dma memory allocation */ 140 struct ralink_descs { 141 struct ralink_rx_desc rxdesc[RALINK_ETH_NUM_RX_DESC]; 142 struct ralink_tx_desc txdesc[RALINK_ETH_NUM_TX_DESC]; 143 }; 144 145 /* Software state for transmit jobs. */ 146 struct ralink_eth_txstate { 147 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 148 bus_dmamap_t txs_dmamap; /* our DMA map */ 149 int txs_idx; /* the index in txdesc ring that */ 150 /* this state is tracking */ 151 SIMPLEQ_ENTRY(ralink_eth_txstate) txs_q; 152 }; 153 154 SIMPLEQ_HEAD(ralink_eth_txsq, ralink_eth_txstate); 155 156 /* 157 * Software state for receive jobs. 158 */ 159 struct ralink_eth_rxstate { 160 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 161 bus_dmamap_t rxs_dmamap; /* our DMA map */ 162 }; 163 164 typedef struct ralink_eth_softc { 165 device_t sc_dev; /* generic device information */ 166 bus_space_tag_t sc_memt; /* bus space tag */ 167 bus_space_handle_t sc_sy_memh; /* handle at SYSCTL_BASE */ 168 bus_space_handle_t sc_fe_memh; /* handle at FRAME_ENGINE_BASE */ 169 bus_space_handle_t sc_sw_memh; /* handle at ETH_SW_BASE */ 170 int sc_sy_size; /* size of Sysctl regs space */ 171 int sc_fe_size; /* size of Frame Engine regs space */ 172 int sc_sw_size; /* size of Ether Switch regs space */ 173 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 174 void *sc_ih; /* interrupt handle */ 175 176 /* tx/rx dma mapping */ 177 bus_dma_segment_t sc_dseg; 178 int sc_ndseg; 179 bus_dmamap_t sc_pdmamap; /* PDMA DMA map */ 180 #define sc_pdma sc_pdmamap->dm_segs[0].ds_addr 181 182 struct ralink_descs *sc_descs; 183 #define sc_rxdesc sc_descs->rxdesc 184 #define sc_txdesc sc_descs->txdesc 185 186 #define RALINK_MIN_BUF 64 187 char ralink_zero_buf[RALINK_MIN_BUF]; 188 189 struct ralink_eth_txstate sc_txstate[RALINK_ETH_NUM_TX_DESC]; 190 struct ralink_eth_rxstate sc_rxstate[RALINK_ETH_NUM_RX_DESC]; 191 192 struct ralink_eth_txsq sc_txfreeq; /* free Tx descsofts */ 193 struct ralink_eth_txsq sc_txdirtyq; /* dirty Tx descsofts */ 194 195 struct ethercom sc_ethercom; /* ethernet common data */ 196 u_int sc_pending_tx; 197 198 /* mii */ 199 struct mii_data sc_mii; 200 struct callout sc_tick_callout; 201 202 struct evcnt sc_evcnt_spurious_intr; 203 struct evcnt sc_evcnt_rxintr; 204 struct evcnt sc_evcnt_rxintr_skip_len; 205 struct evcnt sc_evcnt_rxintr_skip_tag_none; 206 struct evcnt sc_evcnt_rxintr_skip_tag_inval; 207 struct evcnt sc_evcnt_rxintr_skip_inact; 208 struct evcnt sc_evcnt_txintr; 209 struct evcnt sc_evcnt_input; 210 struct evcnt sc_evcnt_output; 211 struct evcnt sc_evcnt_watchdog; 212 struct evcnt sc_evcnt_wd_reactivate; 213 struct evcnt sc_evcnt_wd_tx; 214 struct evcnt sc_evcnt_wd_spurious; 215 struct evcnt sc_evcnt_add_rxbuf_hdr_fail; 216 struct evcnt sc_evcnt_add_rxbuf_mcl_fail; 217 } ralink_eth_softc_t; 218 219 /* alignment so the IP header is aligned */ 220 #define RALINK_ETHER_ALIGN 2 221 222 /* device functions */ 223 static int ralink_eth_match(device_t, cfdata_t, void *); 224 static void ralink_eth_attach(device_t, device_t, void *); 225 static int ralink_eth_detach(device_t, int); 226 static int ralink_eth_activate(device_t, enum devact); 227 228 /* local driver functions */ 229 static void ralink_eth_hw_init(ralink_eth_softc_t *); 230 static int ralink_eth_intr(void *); 231 static void ralink_eth_reset(ralink_eth_softc_t *); 232 static void ralink_eth_rxintr(ralink_eth_softc_t *); 233 static void ralink_eth_txintr(ralink_eth_softc_t *); 234 235 /* partition functions */ 236 static int ralink_eth_enable(ralink_eth_softc_t *); 237 static void ralink_eth_disable(ralink_eth_softc_t *); 238 239 /* ifnet functions */ 240 static int ralink_eth_init(struct ifnet *); 241 static void ralink_eth_rxdrain(ralink_eth_softc_t *); 242 static void ralink_eth_stop(struct ifnet *, int); 243 static int ralink_eth_add_rxbuf(ralink_eth_softc_t *, int); 244 static void ralink_eth_start(struct ifnet *); 245 static void ralink_eth_watchdog(struct ifnet *); 246 static int ralink_eth_ioctl(struct ifnet *, u_long, void *); 247 248 /* mii functions */ 249 #if defined(RT3050) || defined(RT3052) 250 static void ralink_eth_mdio_enable(ralink_eth_softc_t *, bool); 251 #endif 252 static void ralink_eth_mii_statchg(struct ifnet *); 253 static void ralink_eth_mii_tick(void *); 254 static int ralink_eth_mii_read(device_t, int, int); 255 static void ralink_eth_mii_write(device_t, int, int, int); 256 257 CFATTACH_DECL_NEW(reth, sizeof(struct ralink_eth_softc), 258 ralink_eth_match, ralink_eth_attach, ralink_eth_detach, ralink_eth_activate); 259 260 static inline uint32_t 261 sy_read(const ralink_eth_softc_t *sc, const bus_size_t off) 262 { 263 return bus_space_read_4(sc->sc_memt, sc->sc_sy_memh, off); 264 } 265 266 static inline void 267 sy_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) 268 { 269 bus_space_write_4(sc->sc_memt, sc->sc_sy_memh, off, val); 270 } 271 272 static inline uint32_t 273 fe_read(const ralink_eth_softc_t *sc, const bus_size_t off) 274 { 275 return bus_space_read_4(sc->sc_memt, sc->sc_fe_memh, off); 276 } 277 278 static inline void 279 fe_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) 280 { 281 bus_space_write_4(sc->sc_memt, sc->sc_fe_memh, off, val); 282 } 283 284 static inline uint32_t 285 sw_read(const ralink_eth_softc_t *sc, const bus_size_t off) 286 { 287 return bus_space_read_4(sc->sc_memt, sc->sc_sw_memh, off); 288 } 289 290 static inline void 291 sw_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val) 292 { 293 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, off, val); 294 } 295 296 /* 297 * ralink_eth_match 298 */ 299 int 300 ralink_eth_match(device_t parent, cfdata_t cf, void *aux) 301 { 302 return 1; 303 } 304 305 /* 306 * ralink_eth_attach 307 */ 308 void 309 ralink_eth_attach(device_t parent, device_t self, void *aux) 310 { 311 ralink_eth_softc_t * const sc = device_private(self); 312 const struct mainbus_attach_args *ma = aux; 313 int error; 314 int i; 315 316 aprint_naive(": Ralink Ethernet\n"); 317 aprint_normal(": Ralink Ethernet\n"); 318 319 evcnt_attach_dynamic(&sc->sc_evcnt_spurious_intr, EVCNT_TYPE_INTR, NULL, 320 device_xname(self), "spurious intr"); 321 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr, EVCNT_TYPE_INTR, NULL, 322 device_xname(self), "rxintr"); 323 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_len, 324 EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, 325 device_xname(self), "rxintr skip: no room for VLAN header"); 326 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_none, 327 EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, 328 device_xname(self), "rxintr skip: no VLAN tag"); 329 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_inval, 330 EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, 331 device_xname(self), "rxintr skip: invalid VLAN tag"); 332 evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_inact, 333 EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr, 334 device_xname(self), "rxintr skip: partition inactive"); 335 evcnt_attach_dynamic(&sc->sc_evcnt_txintr, EVCNT_TYPE_INTR, NULL, 336 device_xname(self), "txintr"); 337 evcnt_attach_dynamic(&sc->sc_evcnt_input, EVCNT_TYPE_INTR, NULL, 338 device_xname(self), "input"); 339 evcnt_attach_dynamic(&sc->sc_evcnt_output, EVCNT_TYPE_INTR, NULL, 340 device_xname(self), "output"); 341 evcnt_attach_dynamic(&sc->sc_evcnt_watchdog, EVCNT_TYPE_INTR, NULL, 342 device_xname(self), "watchdog"); 343 evcnt_attach_dynamic(&sc->sc_evcnt_wd_tx, 344 EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, 345 device_xname(self), "watchdog TX timeout"); 346 evcnt_attach_dynamic(&sc->sc_evcnt_wd_spurious, 347 EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, 348 device_xname(self), "watchdog spurious"); 349 evcnt_attach_dynamic(&sc->sc_evcnt_wd_reactivate, 350 EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog, 351 device_xname(self), "watchdog reactivate"); 352 evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_hdr_fail, 353 EVCNT_TYPE_INTR, NULL, 354 device_xname(self), "add rxbuf hdr fail"); 355 evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_mcl_fail, 356 EVCNT_TYPE_INTR, NULL, 357 device_xname(self), "add rxbuf mcl fail"); 358 359 /* 360 * In order to obtain unique initial Ethernet address on a host, 361 * do some randomisation using the current uptime. It's not meant 362 * for anything but avoiding hard-coding an address. 363 */ 364 uint8_t enaddr[ETHER_ADDR_LEN] = { 0x00, 0x30, 0x44, 0x00, 0x00, 0x00 }; 365 366 sc->sc_dev = self; 367 sc->sc_dmat = ma->ma_dmat; 368 sc->sc_memt = ma->ma_memt; 369 sc->sc_sy_size = 0x10000; 370 sc->sc_fe_size = 0x10000; 371 sc->sc_sw_size = 0x08000; 372 373 /* 374 * map the registers 375 * 376 * we map the Sysctl, Frame Engine and Ether Switch registers 377 * seperately so we can use the defined register offsets sanely 378 */ 379 if ((error = bus_space_map(sc->sc_memt, RA_SYSCTL_BASE, 380 sc->sc_sy_size, 0, &sc->sc_sy_memh)) != 0) { 381 aprint_error_dev(self, "unable to map Sysctl registers, " 382 "error=%d\n", error); 383 goto fail_0a; 384 } 385 if ((error = bus_space_map(sc->sc_memt, RA_FRAME_ENGINE_BASE, 386 sc->sc_fe_size, 0, &sc->sc_fe_memh)) != 0) { 387 aprint_error_dev(self, "unable to map Frame Engine registers, " 388 "error=%d\n", error); 389 goto fail_0b; 390 } 391 if ((error = bus_space_map(sc->sc_memt, RA_ETH_SW_BASE, 392 sc->sc_sw_size, 0, &sc->sc_sw_memh)) != 0) { 393 aprint_error_dev(self, "unable to map Ether Switch registers, " 394 "error=%d\n", error); 395 goto fail_0c; 396 } 397 398 /* Allocate desc structures, and create & load the DMA map for them */ 399 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ralink_descs), 400 PAGE_SIZE, 0, &sc->sc_dseg, 1, &sc->sc_ndseg, 0)) != 0) { 401 aprint_error_dev(self, "unable to allocate transmit descs, " 402 "error=%d\n", error); 403 goto fail_1; 404 } 405 406 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg, 407 sizeof(struct ralink_descs), (void **)&sc->sc_descs, BUS_DMA_COHERENT)) 408 != 0) { 409 aprint_error_dev(self, "unable to map control data, " 410 "error=%d\n", error); 411 goto fail_2; 412 } 413 414 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ralink_descs), 1, 415 sizeof(struct ralink_descs), 0, 0, &sc->sc_pdmamap)) != 0) { 416 aprint_error_dev(self, "unable to create control data DMA map, " 417 "error=%d\n", error); 418 goto fail_3; 419 } 420 421 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_pdmamap, sc->sc_descs, 422 sizeof(struct ralink_descs), NULL, 0)) != 0) { 423 aprint_error_dev(self, "unable to load control data DMA map, " 424 "error=%d\n", error); 425 goto fail_4; 426 } 427 428 /* Create the transmit buffer DMA maps. */ 429 for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { 430 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 431 RALINK_ETH_MAX_TX_SEGS, MCLBYTES, 0, 0, 432 &sc->sc_txstate[i].txs_dmamap)) != 0) { 433 aprint_error_dev(self, "unable to create tx DMA map %d, " 434 "error=%d\n", i, error); 435 goto fail_5; 436 } 437 } 438 439 /* Create the receive buffer DMA maps. */ 440 for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { 441 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 442 MCLBYTES, 0, 0, &sc->sc_rxstate[i].rxs_dmamap)) != 0) { 443 aprint_error_dev(self, "unable to create rx DMA map %d, " 444 "error=%d\n", i, error); 445 goto fail_6; 446 } 447 sc->sc_rxstate[i].rxs_mbuf = NULL; 448 } 449 450 /* this is a zero buffer used for zero'ing out short packets */ 451 memset(sc->ralink_zero_buf, 0, RALINK_MIN_BUF); 452 453 /* setup some address in hardware */ 454 fe_write(sc, RA_FE_GDMA1_MAC_LSB, 455 (enaddr[5] | (enaddr[4] << 8) | 456 (enaddr[3] << 16) | (enaddr[2] << 24))); 457 fe_write(sc, RA_FE_GDMA1_MAC_MSB, 458 (enaddr[1] | (enaddr[0] << 8))); 459 460 /* 461 * iterate through ports 462 * slickrock must use specific non-linear sequence 463 * others are linear 464 */ 465 struct ifnet * const ifp = &sc->sc_ethercom.ec_if; 466 467 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 468 469 /* 470 * Initialize our media structures. 471 * This may probe the PHY, if present. 472 */ 473 sc->sc_mii.mii_ifp = ifp; 474 sc->sc_mii.mii_readreg = ralink_eth_mii_read; 475 sc->sc_mii.mii_writereg = ralink_eth_mii_write; 476 sc->sc_mii.mii_statchg = ralink_eth_mii_statchg; 477 sc->sc_ethercom.ec_mii = &sc->sc_mii; 478 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 479 ether_mediastatus); 480 mii_attach(sc->sc_dev, &sc->sc_mii, ~0, i, MII_OFFSET_ANY, 481 MIIF_FORCEANEG|MIIF_DOPAUSE|MIIF_NOISOLATE); 482 483 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { 484 #if 1 485 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| 486 IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE, 0, NULL); 487 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_1000_T| 488 IFM_FDX|IFM_ETH_RXPAUSE|IFM_ETH_TXPAUSE); 489 #else 490 ifmedia_add(&sc->sc_mii.mii_media, 491 IFM_ETHER|IFM_MANUAL, 0, NULL); 492 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 493 #endif 494 } else { 495 /* Ensure we mask ok for the switch multiple phy's */ 496 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 497 } 498 499 ifp->if_softc = sc; 500 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 501 ifp->if_init = ralink_eth_init; 502 ifp->if_start = ralink_eth_start; 503 ifp->if_ioctl = ralink_eth_ioctl; 504 ifp->if_stop = ralink_eth_stop; 505 ifp->if_watchdog = ralink_eth_watchdog; 506 IFQ_SET_READY(&ifp->if_snd); 507 508 /* We can support 802.1Q VLAN-sized frames. */ 509 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 510 511 /* We support IPV4 CRC Offload */ 512 ifp->if_capabilities |= 513 (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 514 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 515 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx); 516 517 /* Attach the interface. */ 518 if_attach(ifp); 519 ether_ifattach(ifp, enaddr); 520 521 /* init our mii ticker */ 522 callout_init(&sc->sc_tick_callout, 0); 523 callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc); 524 525 return; 526 527 /* 528 * Free any resources we've allocated during the failed attach 529 * attempt. Do this in reverse order and fall through. 530 */ 531 fail_6: 532 for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { 533 if (sc->sc_rxstate[i].rxs_dmamap != NULL) 534 bus_dmamap_destroy(sc->sc_dmat, 535 sc->sc_rxstate[i].rxs_dmamap); 536 } 537 fail_5: 538 for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { 539 if (sc->sc_txstate[i].txs_dmamap != NULL) 540 bus_dmamap_destroy(sc->sc_dmat, 541 sc->sc_txstate[i].txs_dmamap); 542 } 543 bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); 544 fail_4: 545 bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); 546 fail_3: 547 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, 548 sizeof(struct ralink_descs)); 549 fail_2: 550 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); 551 fail_1: 552 bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); 553 fail_0c: 554 bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); 555 fail_0b: 556 bus_space_unmap(sc->sc_memt, sc->sc_sy_memh, sc->sc_fe_size); 557 fail_0a: 558 return; 559 } 560 561 /* 562 * ralink_eth_activate: 563 * 564 * Handle device activation/deactivation requests. 565 */ 566 int 567 ralink_eth_activate(device_t self, enum devact act) 568 { 569 ralink_eth_softc_t * const sc = device_private(self); 570 int error = 0; 571 int s; 572 573 s = splnet(); 574 switch (act) { 575 case DVACT_DEACTIVATE: 576 if_deactivate(&sc->sc_ethercom.ec_if); 577 break; 578 } 579 splx(s); 580 581 return error; 582 } 583 584 /* 585 * ralink_eth_partition_enable 586 */ 587 static int 588 ralink_eth_enable(ralink_eth_softc_t *sc) 589 { 590 RALINK_DEBUG_FUNC_ENTRY(); 591 592 if (sc->sc_ih != NULL) { 593 RALINK_DEBUG(RALINK_DEBUG_MISC, "%s() already active", 594 __func__); 595 return EALREADY; 596 } 597 598 sc->sc_pending_tx = 0; 599 600 int s = splnet(); 601 ralink_eth_hw_init(sc); 602 sc->sc_ih = ra_intr_establish(RA_IRQ_FENGINE, 603 ralink_eth_intr, sc, 1); 604 splx(s); 605 if (sc->sc_ih == NULL) { 606 RALINK_DEBUG(RALINK_DEBUG_ERROR, 607 "%s: unable to establish interrupt\n", 608 device_xname(sc->sc_dev)); 609 return EIO; 610 } 611 612 return 0; 613 } 614 615 /* 616 * ralink_eth_partition_disable 617 */ 618 static void 619 ralink_eth_disable(ralink_eth_softc_t *sc) 620 { 621 RALINK_DEBUG_FUNC_ENTRY(); 622 623 int s = splnet(); 624 ralink_eth_rxdrain(sc); 625 ra_intr_disestablish(sc->sc_ih); 626 sc->sc_ih = NULL; 627 628 /* stop the mii ticker */ 629 callout_stop(&sc->sc_tick_callout); 630 631 /* quiesce the block */ 632 ralink_eth_reset(sc); 633 splx(s); 634 } 635 636 /* 637 * ralink_eth_detach 638 */ 639 static int 640 ralink_eth_detach(device_t self, int flags) 641 { 642 RALINK_DEBUG_FUNC_ENTRY(); 643 ralink_eth_softc_t * const sc = device_private(self); 644 struct ifnet * const ifp = &sc->sc_ethercom.ec_if; 645 struct ralink_eth_rxstate *rxs; 646 struct ralink_eth_txstate *txs; 647 int i; 648 649 ralink_eth_disable(sc); 650 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 651 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 652 ether_ifdetach(ifp); 653 if_detach(ifp); 654 655 for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { 656 rxs = &sc->sc_rxstate[i]; 657 if (rxs->rxs_mbuf != NULL) { 658 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 659 m_freem(rxs->rxs_mbuf); 660 rxs->rxs_mbuf = NULL; 661 } 662 bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap); 663 } 664 665 for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { 666 txs = &sc->sc_txstate[i]; 667 if (txs->txs_mbuf != NULL) { 668 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 669 m_freem(txs->txs_mbuf); 670 txs->txs_mbuf = NULL; 671 } 672 bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap); 673 } 674 675 bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap); 676 bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap); 677 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs, 678 sizeof(struct ralink_descs)); 679 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg); 680 681 bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size); 682 bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size); 683 684 return 0; 685 } 686 687 /* 688 * ralink_eth_reset 689 */ 690 static void 691 ralink_eth_reset(ralink_eth_softc_t *sc) 692 { 693 RALINK_DEBUG_FUNC_ENTRY(); 694 uint32_t r; 695 696 /* Reset the frame engine */ 697 r = sy_read(sc, RA_SYSCTL_RST); 698 r |= RST_FE; 699 sy_write(sc, RA_SYSCTL_RST, r); 700 r ^= RST_FE; 701 sy_write(sc, RA_SYSCTL_RST, r); 702 703 /* Wait until the PDMA is quiscent */ 704 for (;;) { 705 r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); 706 if (r & FE_PDMA_GLOBAL_CFG_RX_DMA_BUSY) { 707 aprint_normal_dev(sc->sc_dev, "RX DMA BUSY\n"); 708 continue; 709 } 710 if (r & FE_PDMA_GLOBAL_CFG_TX_DMA_BUSY) { 711 aprint_normal_dev(sc->sc_dev, "TX DMA BUSY\n"); 712 continue; 713 } 714 break; 715 } 716 } 717 718 /* 719 * ralink_eth_hw_init 720 */ 721 static void 722 ralink_eth_hw_init(ralink_eth_softc_t *sc) 723 { 724 RALINK_DEBUG_FUNC_ENTRY(); 725 struct ralink_eth_txstate *txs; 726 uint32_t r; 727 int i; 728 729 /* reset to a known good state */ 730 ralink_eth_reset(sc); 731 732 #if defined(RT3050) || defined(RT3052) 733 /* Bring the switch to a sane default state (from linux driver) */ 734 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SGC2, 735 0x00000000); 736 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PFC1, 737 0x00405555); /* check VLAN tag on port forward */ 738 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VLANI0, 739 0x00002001); 740 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC0, 741 0x00001002); 742 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC1, 743 0x00001001); 744 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC2, 745 0x00001001); 746 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0, 747 0xffff417e); 748 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0, 749 0x00007f7f); 750 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2, 751 0x00007f3f); 752 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2, 753 0x00d6500c); 754 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SWGC, 755 0x0008a301); /* hashing algorithm=XOR48 */ 756 /* aging interval=300sec */ 757 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SOCPC, 758 0x02404040); 759 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPORT, 760 0x3f502b28); /* Change polling Ext PHY Addr=0x0 */ 761 bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPA, 762 0x00000000); 763 764 /* do some mii magic TODO: define these registers/bits */ 765 /* lower down PHY 10Mbps mode power */ 766 /* select local register */ 767 ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000); 768 769 for (i=0;i<5;i++){ 770 /* set TX10 waveform coefficient */ 771 ralink_eth_mii_write(sc->sc_dev, i, 26, 0x1601); 772 773 /* set TX100/TX10 AD/DA current bias */ 774 ralink_eth_mii_write(sc->sc_dev, i, 29, 0x7058); 775 776 /* set TX100 slew rate control */ 777 ralink_eth_mii_write(sc->sc_dev, i, 30, 0x0018); 778 } 779 780 /* PHY IOT */ 781 782 /* select global register */ 783 ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x0); 784 785 /* tune TP_IDL tail and head waveform */ 786 ralink_eth_mii_write(sc->sc_dev, 0, 22, 0x052f); 787 788 /* set TX10 signal amplitude threshold to minimum */ 789 ralink_eth_mii_write(sc->sc_dev, 0, 17, 0x0fe0); 790 791 /* set squelch amplitude to higher threshold */ 792 ralink_eth_mii_write(sc->sc_dev, 0, 18, 0x40ba); 793 794 /* longer TP_IDL tail length */ 795 ralink_eth_mii_write(sc->sc_dev, 0, 14, 0x65); 796 797 /* select local register */ 798 ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000); 799 #else 800 /* GE1 + GigSW */ 801 fe_write(sc, RA_FE_MDIO_CFG1, 802 MDIO_CFG_PHY_ADDR(0x1f) | 803 MDIO_CFG_BP_EN | 804 MDIO_CFG_FORCE_CFG | 805 MDIO_CFG_SPEED(MDIO_CFG_SPEED_1000M) | 806 MDIO_CFG_FULL_DUPLEX | 807 MDIO_CFG_FC_TX | 808 MDIO_CFG_FC_RX | 809 MDIO_CFG_TX_CLK_MODE(MDIO_CFG_TX_CLK_MODE_3COM)); 810 #endif 811 812 /* 813 * TODO: QOS - RT3052 has 4 TX queues for QOS, 814 * forgoing for 1 for simplicity 815 */ 816 817 /* 818 * Allocate DMA accessible memory for TX/RX descriptor rings 819 */ 820 821 /* Initialize the TX queues. */ 822 SIMPLEQ_INIT(&sc->sc_txfreeq); 823 SIMPLEQ_INIT(&sc->sc_txdirtyq); 824 825 /* Initialize the TX descriptor ring. */ 826 memset(sc->sc_txdesc, 0, sizeof(sc->sc_txdesc)); 827 for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) { 828 829 sc->sc_txdesc[i].txd_info1 = TXD_LAST0 | TXD_DDONE; 830 831 /* setup the freeq as well */ 832 txs = &sc->sc_txstate[i]; 833 txs->txs_mbuf = NULL; 834 txs->txs_idx = i; 835 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 836 } 837 838 /* 839 * Flush the TX descriptors 840 * - TODO: can we just access descriptors via KSEG1 841 * to avoid the flush? 842 */ 843 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, 844 (int)&sc->sc_txdesc - (int)sc->sc_descs, sizeof(sc->sc_txdesc), 845 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 846 847 /* Initialize the RX descriptor ring */ 848 memset(sc->sc_rxdesc, 0, sizeof(sc->sc_rxdesc)); 849 for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { 850 if (ralink_eth_add_rxbuf(sc, i)) { 851 panic("Can't allocate rx mbuf\n"); 852 } 853 } 854 855 /* 856 * Flush the RX descriptors 857 * - TODO: can we just access descriptors via KSEG1 858 * to avoid the flush? 859 */ 860 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, 861 (int)&sc->sc_rxdesc - (int)sc->sc_descs, sizeof(sc->sc_rxdesc), 862 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 863 864 /* Clear the PDMA state */ 865 r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); 866 r &= 0xff; 867 fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, r); 868 (void) fe_read(sc, RA_FE_PDMA_GLOBAL_CFG); 869 870 /* Setup the PDMA VLAN ID's */ 871 fe_write(sc, RA_FE_VLAN_ID_0001, 0x00010000); 872 fe_write(sc, RA_FE_VLAN_ID_0203, 0x00030002); 873 fe_write(sc, RA_FE_VLAN_ID_0405, 0x00050004); 874 fe_write(sc, RA_FE_VLAN_ID_0607, 0x00070006); 875 fe_write(sc, RA_FE_VLAN_ID_0809, 0x00090008); 876 fe_write(sc, RA_FE_VLAN_ID_1011, 0x000b000a); 877 fe_write(sc, RA_FE_VLAN_ID_1213, 0x000d000c); 878 fe_write(sc, RA_FE_VLAN_ID_1415, 0x000f000e); 879 880 /* Give the TX and TX rings to the chip. */ 881 fe_write(sc, RA_FE_PDMA_TX0_PTR, 882 htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_txdesc))); 883 fe_write(sc, RA_FE_PDMA_TX0_COUNT, htole32(RALINK_ETH_NUM_TX_DESC)); 884 fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, 0); 885 fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_TX0); 886 887 fe_write(sc, RA_FE_PDMA_RX0_PTR, 888 htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_rxdesc))); 889 fe_write(sc, RA_FE_PDMA_RX0_COUNT, htole32(RALINK_ETH_NUM_RX_DESC)); 890 fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, 891 htole32(RALINK_ETH_NUM_RX_DESC - 1)); 892 fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_RX0); 893 fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, 894 htole32(RALINK_ETH_NUM_RX_DESC - 1)); 895 896 /* Start PDMA */ 897 fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, 898 FE_PDMA_GLOBAL_CFG_TX_WB_DDONE | 899 FE_PDMA_GLOBAL_CFG_RX_DMA_EN | 900 FE_PDMA_GLOBAL_CFG_TX_DMA_EN | 901 FE_PDMA_GLOBAL_CFG_BURST_SZ_4); 902 903 /* Setup the clock for the Frame Engine */ 904 fe_write(sc, RA_FE_GLOBAL_CFG, 905 FE_GLOBAL_CFG_EXT_VLAN(0x8100) | 906 FE_GLOBAL_CFG_US_CLK(RA_BUS_FREQ / 1000000) | 907 FE_GLOBAL_CFG_L2_SPACE(0x8)); 908 909 /* Turn on all interrupts */ 910 fe_write(sc, RA_FE_INT_ENABLE, 911 FE_INT_RX | FE_INT_TX3 | FE_INT_TX2 | FE_INT_TX1 | FE_INT_TX0); 912 913 /* 914 * Configure GDMA forwarding 915 * - default all packets to CPU 916 * - Turn on auto-CRC 917 */ 918 #if 0 919 fe_write(sc, RA_FE_GDMA1_FWD_CFG, 920 (FE_GDMA_FWD_CFG_DIS_TX_CRC | FE_GDMA_FWD_CFG_DIS_TX_PAD)); 921 #endif 922 fe_write(sc, RA_FE_GDMA1_FWD_CFG, 923 FE_GDMA_FWD_CFG_JUMBO_LEN(MCLBYTES/1024) | 924 FE_GDMA_FWD_CFG_STRIP_RX_CRC | 925 FE_GDMA_FWD_CFG_IP4_CRC_EN | 926 FE_GDMA_FWD_CFG_TCP_CRC_EN | 927 FE_GDMA_FWD_CFG_UDP_CRC_EN); 928 929 /* CDMA also needs CRCs turned on */ 930 r = fe_read(sc, RA_FE_CDMA_CSG_CFG); 931 r |= (FE_CDMA_CSG_CFG_IP4_CRC_EN | FE_CDMA_CSG_CFG_UDP_CRC_EN | 932 FE_CDMA_CSG_CFG_TCP_CRC_EN); 933 fe_write(sc, RA_FE_CDMA_CSG_CFG, r); 934 935 /* Configure Flow Control Thresholds */ 936 #ifdef RT3883 937 fe_write(sc, RA_FE_PSE_FQ_CFG, 938 FE_PSE_FQ_MAX_COUNT(0xff) | 939 FE_PSE_FQ_FC_RELEASE(0x90) | 940 FE_PSE_FQ_FC_ASSERT(0x80)); 941 #else 942 fe_write(sc, RA_FE_PSE_FQ_CFG, 943 FE_PSE_FQ_MAX_COUNT(0x80) | 944 FE_PSE_FQ_FC_RELEASE(0x50) | 945 FE_PSE_FQ_FC_ASSERT(0x40)); 946 #endif 947 948 #ifdef RALINK_ETH_DEBUG 949 printf("FE_MDIO_CFG1: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG1)); 950 printf("FE_MDIO_CFG2: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG2)); 951 printf("FE_PDMA_TX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_TX0_PTR)); 952 printf("FE_PDMA_TX0_COUNT: %08x\n", 953 fe_read(sc, RA_FE_PDMA_TX0_COUNT)); 954 printf("FE_PDMA_TX0_CPU_IDX: %08x\n", 955 fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX)); 956 printf("FE_PDMA_TX0_DMA_IDX: %08x\n", 957 fe_read(sc, RA_FE_PDMA_TX0_DMA_IDX)); 958 printf("FE_PDMA_RX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_RX0_PTR)); 959 printf("FE_PDMA_RX0_COUNT: %08x\n", 960 fe_read(sc, RA_FE_PDMA_RX0_COUNT)); 961 printf("FE_PDMA_RX0_CPU_IDX: %08x\n", 962 fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX)); 963 printf("FE_PDMA_RX0_DMA_IDX: %08x\n", 964 fe_read(sc, RA_FE_PDMA_RX0_DMA_IDX)); 965 printf("FE_PDMA_GLOBAL_CFG: %08x\n", 966 fe_read(sc, RA_FE_PDMA_GLOBAL_CFG)); 967 printf("FE_GLOBAL_CFG: %08x\n", fe_read(sc, RA_FE_GLOBAL_CFG)); 968 printf("FE_GDMA1_FWD_CFG: %08x\n", 969 fe_read(sc, RA_FE_GDMA1_FWD_CFG)); 970 printf("FE_CDMA_CSG_CFG: %08x\n", fe_read(sc, RA_FE_CDMA_CSG_CFG)); 971 printf("FE_PSE_FQ_CFG: %08x\n", fe_read(sc, RA_FE_PSE_FQ_CFG)); 972 #endif 973 974 /* Force PSE Reset to get everything finalized */ 975 fe_write(sc, RA_FE_GLOBAL_RESET, FE_GLOBAL_RESET_PSE); 976 fe_write(sc, RA_FE_GLOBAL_RESET, 0); 977 } 978 979 /* 980 * ralink_eth_init 981 */ 982 static int 983 ralink_eth_init(struct ifnet *ifp) 984 { 985 RALINK_DEBUG_FUNC_ENTRY(); 986 ralink_eth_softc_t * const sc = ifp->if_softc; 987 int error; 988 989 error = ralink_eth_enable(sc); 990 if (!error) { 991 /* Note that the interface is now running. */ 992 ifp->if_flags |= IFF_RUNNING; 993 ifp->if_flags &= ~IFF_OACTIVE; 994 } 995 996 return error; 997 } 998 999 /* 1000 * ralink_eth_rxdrain 1001 * 1002 * Drain the receive queue. 1003 */ 1004 static void 1005 ralink_eth_rxdrain(ralink_eth_softc_t *sc) 1006 { 1007 RALINK_DEBUG_FUNC_ENTRY(); 1008 1009 for (int i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) { 1010 struct ralink_eth_rxstate *rxs = &sc->sc_rxstate[i]; 1011 if (rxs->rxs_mbuf != NULL) { 1012 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1013 m_freem(rxs->rxs_mbuf); 1014 rxs->rxs_mbuf = NULL; 1015 } 1016 } 1017 } 1018 1019 /* 1020 * ralink_eth_stop 1021 */ 1022 static void 1023 ralink_eth_stop(struct ifnet *ifp, int disable) 1024 { 1025 RALINK_DEBUG_FUNC_ENTRY(); 1026 ralink_eth_softc_t * const sc = ifp->if_softc; 1027 1028 ralink_eth_disable(sc); 1029 1030 /* Mark the interface down and cancel the watchdog timer. */ 1031 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1032 ifp->if_timer = 0; 1033 } 1034 1035 /* 1036 * ralink_eth_add_rxbuf 1037 */ 1038 static int 1039 ralink_eth_add_rxbuf(ralink_eth_softc_t *sc, int idx) 1040 { 1041 RALINK_DEBUG_FUNC_ENTRY(); 1042 struct ralink_eth_rxstate * const rxs = &sc->sc_rxstate[idx]; 1043 struct mbuf *m; 1044 int error; 1045 1046 MGETHDR(m, M_DONTWAIT, MT_DATA); 1047 if (m == NULL) { 1048 printf("MGETHDR failed\n"); 1049 sc->sc_evcnt_add_rxbuf_hdr_fail.ev_count++; 1050 return ENOBUFS; 1051 } 1052 1053 MCLGET(m, M_DONTWAIT); 1054 if ((m->m_flags & M_EXT) == 0) { 1055 m_freem(m); 1056 printf("MCLGET failed\n"); 1057 sc->sc_evcnt_add_rxbuf_mcl_fail.ev_count++; 1058 return ENOBUFS; 1059 } 1060 1061 m->m_data = m->m_ext.ext_buf; 1062 rxs->rxs_mbuf = m; 1063 1064 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf, 1065 m->m_ext.ext_size, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT); 1066 if (error) { 1067 aprint_error_dev(sc->sc_dev, "can't load rx DMA map %d, " 1068 "error=%d\n", idx, error); 1069 panic(__func__); /* XXX */ 1070 } 1071 1072 sc->sc_rxdesc[idx].data_ptr = MIPS_KSEG0_TO_PHYS( 1073 rxs->rxs_dmamap->dm_segs[0].ds_addr + RALINK_ETHER_ALIGN); 1074 sc->sc_rxdesc[idx].rxd_info1 = RXD_LAST0; 1075 1076 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1077 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1078 1079 return 0; 1080 } 1081 1082 1083 /* 1084 * ralink_eth_start 1085 */ 1086 static void 1087 ralink_eth_start(struct ifnet *ifp) 1088 { 1089 RALINK_DEBUG_FUNC_ENTRY(); 1090 ralink_eth_softc_t * const sc = ifp->if_softc; 1091 struct mbuf *m0, *m = NULL; 1092 struct ralink_eth_txstate *txs; 1093 bus_dmamap_t dmamap; 1094 int tx_cpu_idx; 1095 int error; 1096 int s; 1097 1098 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1099 return; 1100 1101 s = splnet(); 1102 1103 tx_cpu_idx = fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX); 1104 1105 /* 1106 * Loop through the send queue, setting up transmit descriptors 1107 * until we drain the queue, or use up all available 1108 * transmit descriptors. 1109 */ 1110 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL) { 1111 /* Grab a packet off the queue. */ 1112 IFQ_POLL(&ifp->if_snd, m0); 1113 if (m0 == NULL) 1114 break; 1115 1116 dmamap = txs->txs_dmamap; 1117 1118 if (m0->m_pkthdr.len < RALINK_MIN_BUF) { 1119 int padlen = 64 - m0->m_pkthdr.len; 1120 m_copyback(m0, m0->m_pkthdr.len, padlen, 1121 sc->ralink_zero_buf); 1122 /* TODO : need some checking here */ 1123 } 1124 1125 /* 1126 * Do we need to align the buffer 1127 * or does the DMA map load fail? 1128 */ 1129 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1130 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 1131 1132 /* Allocate a new mbuf for re-alignment */ 1133 MGETHDR(m, M_DONTWAIT, MT_DATA); 1134 if (m == NULL) { 1135 aprint_error_dev(sc->sc_dev, 1136 "unable to allocate aligned Tx mbuf\n"); 1137 break; 1138 } 1139 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 1140 if (m0->m_pkthdr.len > MHLEN) { 1141 MCLGET(m, M_DONTWAIT); 1142 if ((m->m_flags & M_EXT) == 0) { 1143 aprint_error_dev(sc->sc_dev, 1144 "unable to allocate Tx cluster\n"); 1145 m_freem(m); 1146 break; 1147 } 1148 } 1149 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1150 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1151 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m, 1152 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1153 if (error) { 1154 aprint_error_dev(sc->sc_dev, 1155 "unable to load Tx buffer error=%d\n", 1156 error); 1157 m_freem(m); 1158 break; 1159 } 1160 } 1161 1162 IFQ_DEQUEUE(&ifp->if_snd, m0); 1163 /* did we copy the buffer out already? */ 1164 if (m != NULL) { 1165 m_freem(m0); 1166 m0 = m; 1167 } 1168 1169 /* Sync the DMA map. */ 1170 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1171 BUS_DMASYNC_PREWRITE); 1172 1173 /* Initialize the transmit descriptor */ 1174 sc->sc_txdesc[tx_cpu_idx].data_ptr0 = 1175 MIPS_KSEG0_TO_PHYS(dmamap->dm_segs[0].ds_addr); 1176 sc->sc_txdesc[tx_cpu_idx].txd_info1 = 1177 TXD_LEN0(dmamap->dm_segs[0].ds_len) | TXD_LAST0; 1178 sc->sc_txdesc[tx_cpu_idx].txd_info2 = 1179 TXD_QN(3) | TXD_PN(TXD_PN_GDMA1); 1180 sc->sc_txdesc[tx_cpu_idx].txd_info2 = TXD_QN(3) | 1181 TXD_PN(TXD_PN_GDMA1) | TXD_VEN | 1182 // TXD_VIDX(pt->vlan_id) | 1183 TXD_TCP_EN | TXD_UDP_EN | TXD_IP_EN; 1184 1185 RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", 1186 tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr0, 1187 sc->sc_txdesc[tx_cpu_idx].data_ptr0); 1188 RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", 1189 tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].txd_info1, 1190 sc->sc_txdesc[tx_cpu_idx].txd_info1); 1191 RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", 1192 tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr1, 1193 sc->sc_txdesc[tx_cpu_idx].data_ptr1); 1194 RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n", tx_cpu_idx, 1195 (int)&sc->sc_txdesc[tx_cpu_idx].txd_info2, 1196 sc->sc_txdesc[tx_cpu_idx].txd_info2); 1197 1198 /* sync the descriptor we're using. */ 1199 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, 1200 (int)&sc->sc_txdesc[tx_cpu_idx] - (int)sc->sc_descs, 1201 sizeof(struct ralink_tx_desc), 1202 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1203 1204 /* 1205 * Store a pointer to the packet so we can free it later, 1206 * and remember what txdirty will be once the packet is 1207 * done. 1208 */ 1209 txs->txs_mbuf = m0; 1210 sc->sc_pending_tx++; 1211 if (txs->txs_idx != tx_cpu_idx) { 1212 panic("txs_idx doesn't match %d != %d\n", 1213 txs->txs_idx, tx_cpu_idx); 1214 } 1215 1216 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1217 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1218 1219 /* Pass the packet to any BPF listeners. */ 1220 bpf_mtap(ifp, m0); 1221 1222 /* Set a watchdog timer in case the chip flakes out. */ 1223 ifp->if_timer = 5; 1224 1225 tx_cpu_idx = (tx_cpu_idx + 1) % RALINK_ETH_NUM_TX_DESC; 1226 1227 /* Write back the tx_cpu_idx */ 1228 fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, tx_cpu_idx); 1229 } 1230 1231 if (txs == NULL) { 1232 /* No more slots left; notify upper layer. */ 1233 ifp->if_flags |= IFF_OACTIVE; 1234 } 1235 1236 splx(s); 1237 } 1238 1239 /* 1240 * ralink_eth_watchdog 1241 * 1242 * Watchdog timer handler. 1243 */ 1244 static void 1245 ralink_eth_watchdog(struct ifnet *ifp) 1246 { 1247 RALINK_DEBUG_FUNC_ENTRY(); 1248 ralink_eth_softc_t * const sc = ifp->if_softc; 1249 bool doing_transmit; 1250 1251 sc->sc_evcnt_watchdog.ev_count++; 1252 doing_transmit = !SIMPLEQ_EMPTY(&sc->sc_txdirtyq); 1253 1254 if (doing_transmit) { 1255 RALINK_DEBUG(RALINK_DEBUG_ERROR, "%s: transmit timeout\n", 1256 ifp->if_xname); 1257 ifp->if_oerrors++; 1258 sc->sc_evcnt_wd_tx.ev_count++; 1259 } else { 1260 RALINK_DEBUG(RALINK_DEBUG_ERROR, "%s: spurious watchog timeout\n", 1261 ifp->if_xname); 1262 sc->sc_evcnt_wd_spurious.ev_count++; 1263 return; 1264 } 1265 1266 sc->sc_evcnt_wd_reactivate.ev_count++; 1267 const int s = splnet(); 1268 /* deactive the active partitions, retaining the active information */ 1269 ralink_eth_disable(sc); 1270 ralink_eth_enable(sc); 1271 splx(s); 1272 1273 /* Try to get more packets going. */ 1274 ralink_eth_start(ifp); 1275 } 1276 1277 /* 1278 * ralink_eth_ioctl 1279 * 1280 * Handle control requests from the operator. 1281 */ 1282 static int 1283 ralink_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1284 { 1285 RALINK_DEBUG_FUNC_ENTRY(); 1286 struct ifdrv * const ifd = (struct ifdrv *) data; 1287 ralink_eth_softc_t * const sc = ifp->if_softc; 1288 int s, error = 0; 1289 1290 RALINK_DEBUG(RALINK_DEBUG_INFO, "ifp: %p cmd: %lu data: %p\n", 1291 ifp, cmd, data); 1292 1293 s = splnet(); 1294 1295 switch (cmd) { 1296 case SIOCSDRVSPEC: 1297 switch (ifd->ifd_cmd) { 1298 #if 0 1299 case ETH_SWITCH_CMD_PORT_MODE: 1300 /* len parameter is the mode */ 1301 pt->mode = (int) ifd->ifd_len; 1302 ralink_eth_configure_switch(pt->sc_reth); 1303 break; 1304 #endif 1305 default: 1306 error = EINVAL; 1307 } 1308 break; 1309 default: 1310 error = ether_ioctl(ifp, cmd, data); 1311 if (error == ENETRESET) { 1312 if (ifp->if_flags & IFF_RUNNING) { 1313 /* 1314 * Multicast list has changed. Set the 1315 * hardware filter accordingly. 1316 */ 1317 RALINK_DEBUG(RALINK_DEBUG_INFO, "TODO!!!"); 1318 #if 0 1319 ralink_eth_filter_setup(sc); 1320 #endif 1321 } 1322 error = 0; 1323 } 1324 break; 1325 } 1326 1327 splx(s); 1328 1329 /* Try to get more packets going. */ 1330 if (sc->sc_ih != NULL) 1331 ralink_eth_start(ifp); 1332 1333 return error; 1334 } 1335 1336 /* 1337 * ralink_eth_intr 1338 * 1339 */ 1340 static int 1341 ralink_eth_intr(void *arg) 1342 { 1343 RALINK_DEBUG_FUNC_ENTRY(); 1344 ralink_eth_softc_t * const sc = arg; 1345 1346 for (u_int n=0;; n = 1) { 1347 u_int32_t status = fe_read(sc, RA_FE_INT_STATUS); 1348 fe_write(sc, RA_FE_INT_STATUS, ~0); 1349 RALINK_DEBUG(RALINK_DEBUG_REG,"%s() status: 0x%08x\n", 1350 __func__, status); 1351 1352 if ((status & (FE_INT_RX | FE_INT_TX0)) == 0) { 1353 if (n == 0) 1354 sc->sc_evcnt_spurious_intr.ev_count++; 1355 return (n != 0); 1356 } 1357 1358 if (status & FE_INT_RX) 1359 ralink_eth_rxintr(sc); 1360 1361 if (status & FE_INT_TX0) 1362 ralink_eth_txintr(sc); 1363 } 1364 1365 /* Try to get more packets going. */ 1366 ralink_eth_start(&sc->sc_ethercom.ec_if); 1367 1368 return 1; 1369 } 1370 1371 /* 1372 * ralink_eth_rxintr 1373 */ 1374 static void 1375 ralink_eth_rxintr(ralink_eth_softc_t *sc) 1376 { 1377 RALINK_DEBUG_FUNC_ENTRY(); 1378 struct ifnet * const ifp = &sc->sc_ethercom.ec_if; 1379 struct ralink_eth_rxstate *rxs; 1380 struct mbuf *m; 1381 int len; 1382 int rx_cpu_idx; 1383 1384 KASSERT(curcpu()->ci_cpl >= IPL_NET); 1385 sc->sc_evcnt_rxintr.ev_count++; 1386 rx_cpu_idx = fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX); 1387 1388 for (;;) { 1389 rx_cpu_idx = (rx_cpu_idx + 1) % RALINK_ETH_NUM_RX_DESC; 1390 1391 rxs = &sc->sc_rxstate[rx_cpu_idx]; 1392 1393 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, 1394 (int)&sc->sc_rxdesc[rx_cpu_idx] - (int)sc->sc_descs, 1395 sizeof(struct ralink_rx_desc), 1396 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1397 1398 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", 1399 rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].data_ptr, 1400 sc->sc_rxdesc[rx_cpu_idx].data_ptr); 1401 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", 1402 rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info1, 1403 sc->sc_rxdesc[rx_cpu_idx].rxd_info1); 1404 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", rx_cpu_idx, 1405 (int)&sc->sc_rxdesc[rx_cpu_idx].unused, 1406 sc->sc_rxdesc[rx_cpu_idx].unused); 1407 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n", 1408 rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info2, 1409 sc->sc_rxdesc[rx_cpu_idx].rxd_info2); 1410 1411 if (!(sc->sc_rxdesc[rx_cpu_idx].rxd_info1 & RXD_DDONE)) 1412 break; 1413 1414 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1415 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1416 1417 /* 1418 * No errors; receive the packet. 1419 * Note the chip includes the CRC with every packet. 1420 */ 1421 len = RXD_LEN0(sc->sc_rxdesc[rx_cpu_idx].rxd_info1); 1422 1423 RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) packet rx %d bytes\n", 1424 rx_cpu_idx, len); 1425 1426 /* 1427 * Allocate a new mbuf cluster. If that fails, we are 1428 * out of memory, and must drop the packet and recycle 1429 * the buffer that's already attached to this descriptor. 1430 */ 1431 m = rxs->rxs_mbuf; 1432 if (ralink_eth_add_rxbuf(sc, rx_cpu_idx) != 0) 1433 break; 1434 m->m_data += RALINK_ETHER_ALIGN; 1435 m->m_pkthdr.len = m->m_len = len; 1436 1437 #ifdef RALINK_ETH_DEBUG 1438 { 1439 struct ether_header *eh = mtod(m, struct ether_header *); 1440 printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost)); 1441 printf("rx: eth_src: %s type: 0x%04x \n", 1442 ether_sprintf(eh->ether_shost), ntohs(eh->ether_type)); 1443 printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014)); 1444 printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098)); 1445 1446 unsigned char * s = mtod(m, unsigned char *); 1447 for (int j = 0; j < 32; j++) 1448 printf("%02x%c", *(s + j), 1449 (j == 15 || j == 31) ? '\n' : ' '); 1450 } 1451 #endif 1452 1453 /* 1454 * claim the buffer here since we can't do it at 1455 * allocation time due to the SW partitions 1456 */ 1457 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 1458 1459 /* push it up the inteface */ 1460 ifp->if_ipackets++; 1461 m->m_pkthdr.rcvif = ifp; 1462 1463 #ifdef RALINK_ETH_DEBUG 1464 { 1465 struct ether_header *eh = mtod(m, struct ether_header *); 1466 printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost)); 1467 printf("rx: eth_src: %s type: 0x%04x\n", 1468 ether_sprintf(eh->ether_shost), ntohs(eh->ether_type)); 1469 printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014)); 1470 printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098)); 1471 1472 unsigned char * s = mtod(m, unsigned char *); 1473 for (int j = 0; j < 32; j++) 1474 printf("%02x%c", *(s + j), 1475 (j == 15 || j == 31) ? '\n' : ' '); 1476 } 1477 #endif 1478 1479 /* 1480 * XXX: M_CSUM_TCPv4 and M_CSUM_UDPv4 do not currently work when 1481 * using PF's ROUTETO option for load balancing. 1482 */ 1483 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1484 1485 /* 1486 * Pass this up to any BPF listeners, but only 1487 * pass it up the stack if its for us. 1488 */ 1489 bpf_mtap(ifp, m); 1490 1491 /* Pass it on. */ 1492 sc->sc_evcnt_input.ev_count++; 1493 (*ifp->if_input)(ifp, m); 1494 1495 fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, rx_cpu_idx); 1496 } 1497 } 1498 1499 /* 1500 * ralink_eth_txintr 1501 */ 1502 static void 1503 ralink_eth_txintr(ralink_eth_softc_t *sc) 1504 { 1505 RALINK_DEBUG_FUNC_ENTRY(); 1506 struct ralink_eth_txstate *txs; 1507 1508 KASSERT(curcpu()->ci_cpl >= IPL_NET); 1509 sc->sc_evcnt_txintr.ev_count++; 1510 1511 /* 1512 * Go through our Tx list and free mbufs for those 1513 * frames that have been transmitted. 1514 */ 1515 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1516 bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap, 1517 (int)&sc->sc_txdesc[txs->txs_idx] - (int)sc->sc_descs, 1518 sizeof(struct ralink_tx_desc), 1519 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1520 1521 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", txs->txs_idx, 1522 (int)&sc->sc_txdesc[txs->txs_idx].data_ptr0, 1523 sc->sc_txdesc[txs->txs_idx].data_ptr0); 1524 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", txs->txs_idx, 1525 (int)&sc->sc_txdesc[txs->txs_idx].txd_info1, 1526 sc->sc_txdesc[txs->txs_idx].txd_info1); 1527 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", txs->txs_idx, 1528 (int)&sc->sc_txdesc[txs->txs_idx].data_ptr1, 1529 sc->sc_txdesc[txs->txs_idx].data_ptr1); 1530 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n", txs->txs_idx, 1531 (int)&sc->sc_txdesc[txs->txs_idx].txd_info2, 1532 sc->sc_txdesc[txs->txs_idx].txd_info2); 1533 1534 /* we're finished if the current tx isn't done */ 1535 if (!(sc->sc_txdesc[txs->txs_idx].txd_info1 & TXD_DDONE)) 1536 break; 1537 1538 RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) transmitted\n", txs->txs_idx); 1539 1540 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1541 1542 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0, 1543 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1544 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1545 m_freem(txs->txs_mbuf); 1546 txs->txs_mbuf = NULL; 1547 1548 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1549 1550 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1551 ifp->if_flags &= ~IFF_OACTIVE; 1552 ifp->if_opackets++; 1553 sc->sc_evcnt_output.ev_count++; 1554 1555 if (--sc->sc_pending_tx == 0) 1556 ifp->if_timer = 0; 1557 } 1558 } 1559 1560 /* 1561 * ralink_eth_mdio_enable 1562 */ 1563 #if defined (RT3050) || defined(RT3052) 1564 static void 1565 ralink_eth_mdio_enable(ralink_eth_softc_t *sc, bool enable) 1566 { 1567 uint32_t data = sy_read(sc, RA_SYSCTL_GPIOMODE); 1568 1569 if (enable) 1570 data &= ~GPIOMODE_MDIO; 1571 else 1572 data |= GPIOMODE_MDIO; 1573 1574 sy_write(sc, RA_SYSCTL_GPIOMODE, data); 1575 } 1576 #else 1577 #define ralink_eth_mdio_enable(sc, enable) 1578 #endif 1579 1580 /* 1581 * ralink_eth_mii_statchg 1582 */ 1583 static void 1584 ralink_eth_mii_statchg(struct ifnet *ifp) 1585 { 1586 #if 0 1587 ralink_eth_softc_t * const sc = ifp->if_softc; 1588 1589 #endif 1590 } 1591 1592 /* 1593 * ralink_eth_mii_tick 1594 * 1595 * One second timer, used to tick the MIIs. 1596 */ 1597 static void 1598 ralink_eth_mii_tick(void *arg) 1599 { 1600 ralink_eth_softc_t * const sc = arg; 1601 1602 const int s = splnet(); 1603 mii_tick(&sc->sc_mii); 1604 splx(s); 1605 1606 callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc); 1607 } 1608 1609 /* 1610 * ralink_eth_mii_read 1611 */ 1612 static int 1613 ralink_eth_mii_read(device_t self, int phy_addr, int phy_reg) 1614 { 1615 ralink_eth_softc_t *sc = device_private(self); 1616 KASSERT(sc != NULL); 1617 #if 0 1618 printf("%s() phy_addr: %d phy_reg: %d\n", __func__, phy_addr, phy_reg); 1619 #endif 1620 #if defined(RT3050) || defined(RT3052) 1621 if (phy_addr > 5) 1622 return 0; 1623 #endif 1624 1625 /* We enable mdio gpio purpose register, and disable it when exit. */ 1626 ralink_eth_mdio_enable(sc, true); 1627 1628 /* 1629 * make sure previous read operation is complete 1630 * TODO: timeout (linux uses jiffies to measure 5 seconds) 1631 */ 1632 for (;;) { 1633 /* rd_rdy: read operation is complete */ 1634 #if defined(RT3050) || defined(RT3052) 1635 if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0) 1636 break; 1637 #else 1638 if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) 1639 break; 1640 #endif 1641 } 1642 1643 #if defined(RT3050) || defined(RT3052) 1644 sw_write(sc, RA_ETH_SW_PCTL0, 1645 PCTL0_RD_CMD | PCTL0_REG(phy_reg) | PCTL0_ADDR(phy_addr)); 1646 #else 1647 fe_write(sc, RA_FE_MDIO_ACCESS, 1648 MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg)); 1649 fe_write(sc, RA_FE_MDIO_ACCESS, 1650 MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg) | 1651 MDIO_ACCESS_TRG); 1652 #endif 1653 1654 /* 1655 * make sure read operation is complete 1656 * TODO: timeout (linux uses jiffies to measure 5 seconds) 1657 */ 1658 for (;;) { 1659 #if defined(RT3050) || defined(RT3052) 1660 if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) != 0) { 1661 int data = PCTL1_RD_VAL( 1662 sw_read(sc, RA_ETH_SW_PCTL1)); 1663 ralink_eth_mdio_enable(sc, false); 1664 return data; 1665 } 1666 #else 1667 if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) { 1668 int data = MDIO_ACCESS_DATA( 1669 fe_read(sc, RA_FE_MDIO_ACCESS)); 1670 ralink_eth_mdio_enable(sc, false); 1671 return data; 1672 } 1673 #endif 1674 } 1675 } 1676 1677 /* 1678 * ralink_eth_mii_write 1679 */ 1680 static void 1681 ralink_eth_mii_write(device_t self, int phy_addr, int phy_reg, int val) 1682 { 1683 ralink_eth_softc_t *sc = device_private(self); 1684 KASSERT(sc != NULL); 1685 #if 0 1686 printf("%s() phy_addr: %d phy_reg: %d val: 0x%04x\n", 1687 __func__, phy_addr, phy_reg, val); 1688 #endif 1689 ralink_eth_mdio_enable(sc, true); 1690 1691 /* 1692 * make sure previous write operation is complete 1693 * TODO: timeout (linux uses jiffies to measure 5 seconds) 1694 */ 1695 for (;;) { 1696 #if defined(RT3050) || defined(RT3052) 1697 if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0) 1698 break; 1699 #else 1700 if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) 1701 break; 1702 #endif 1703 } 1704 1705 #if defined(RT3050) || defined(RT3052) 1706 sw_write(sc, RA_ETH_SW_PCTL0, 1707 PCTL0_WR_CMD | PCTL0_WR_VAL(val) | PCTL0_REG(phy_reg) | 1708 PCTL0_ADDR(phy_addr)); 1709 #else 1710 fe_write(sc, RA_FE_MDIO_ACCESS, 1711 MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) | 1712 MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val)); 1713 fe_write(sc, RA_FE_MDIO_ACCESS, 1714 MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) | 1715 MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val) | 1716 MDIO_ACCESS_TRG); 1717 #endif 1718 1719 1720 /* make sure write operation is complete */ 1721 for (;;) { 1722 #if defined(RT3050) || defined(RT3052) 1723 if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_WR_DONE) != 0) { 1724 ralink_eth_mdio_enable(sc, false); 1725 return; 1726 } 1727 #else 1728 if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0){ 1729 ralink_eth_mdio_enable(sc, false); 1730 return; 1731 } 1732 #endif 1733 } 1734 } 1735