1 /* $NetBSD: if_mvgbe.c,v 1.34 2012/12/28 08:16:53 msaitoh Exp $ */ 2 /* 3 * Copyright (c) 2007, 2008 KIYOHARA Takashi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: if_mvgbe.c,v 1.34 2012/12/28 08:16:53 msaitoh Exp $"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/callout.h> 33 #include <sys/device.h> 34 #include <sys/endian.h> 35 #include <sys/errno.h> 36 #include <sys/evcnt.h> 37 #include <sys/kernel.h> 38 #include <sys/kmem.h> 39 #include <sys/mutex.h> 40 #include <sys/sockio.h> 41 #include <sys/sysctl.h> 42 43 #include <dev/marvell/marvellreg.h> 44 #include <dev/marvell/marvellvar.h> 45 #include <dev/marvell/mvgbereg.h> 46 47 #include <net/if.h> 48 #include <net/if_ether.h> 49 #include <net/if_media.h> 50 51 #include <netinet/in.h> 52 #include <netinet/in_systm.h> 53 #include <netinet/ip.h> 54 55 #include <net/bpf.h> 56 #include <sys/rnd.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include "locators.h" 62 63 /* #define MVGBE_DEBUG 3 */ 64 #ifdef MVGBE_DEBUG 65 #define DPRINTF(x) if (mvgbe_debug) printf x 66 #define DPRINTFN(n,x) if (mvgbe_debug >= (n)) printf x 67 int mvgbe_debug = MVGBE_DEBUG; 68 #else 69 #define DPRINTF(x) 70 #define DPRINTFN(n,x) 71 #endif 72 73 74 #define MVGBE_READ(sc, reg) \ 75 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)) 76 #define MVGBE_WRITE(sc, reg, val) \ 77 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) 78 #define MVGBE_READ_FILTER(sc, reg, val, c) \ 79 bus_space_read_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) 80 #define MVGBE_WRITE_FILTER(sc, reg, val, c) \ 81 bus_space_write_region_4((sc)->sc_iot, (sc)->sc_dafh, (reg), (val), (c)) 82 83 #define MVGBE_TX_RING_CNT 256 84 #define MVGBE_TX_RING_MSK (MVGBE_TX_RING_CNT - 1) 85 #define MVGBE_TX_RING_NEXT(x) (((x) + 1) & MVGBE_TX_RING_MSK) 86 #define MVGBE_RX_RING_CNT 256 87 #define MVGBE_RX_RING_MSK (MVGBE_RX_RING_CNT - 1) 88 #define MVGBE_RX_RING_NEXT(x) (((x) + 1) & MVGBE_RX_RING_MSK) 89 90 CTASSERT(MVGBE_TX_RING_CNT > 1 && MVGBE_TX_RING_NEXT(MVGBE_TX_RING_CNT) == 91 (MVGBE_TX_RING_CNT + 1) % MVGBE_TX_RING_CNT); 92 CTASSERT(MVGBE_RX_RING_CNT > 1 && MVGBE_RX_RING_NEXT(MVGBE_RX_RING_CNT) == 93 (MVGBE_RX_RING_CNT + 1) % MVGBE_RX_RING_CNT); 94 95 #define MVGBE_JSLOTS 384 /* XXXX */ 96 #define MVGBE_JLEN \ 97 ((MVGBE_MRU + MVGBE_HWHEADER_SIZE + MVGBE_RXBUF_ALIGN - 1) & \ 98 ~MVGBE_RXBUF_MASK) 99 #define MVGBE_NTXSEG 30 100 #define MVGBE_JPAGESZ PAGE_SIZE 101 #define MVGBE_RESID \ 102 (MVGBE_JPAGESZ - (MVGBE_JLEN * MVGBE_JSLOTS) % MVGBE_JPAGESZ) 103 #define MVGBE_JMEM \ 104 ((MVGBE_JLEN * MVGBE_JSLOTS) + MVGBE_RESID) 105 106 #define MVGBE_TX_RING_ADDR(sc, i) \ 107 ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ 108 offsetof(struct mvgbe_ring_data, mvgbe_tx_ring[(i)])) 109 110 #define MVGBE_RX_RING_ADDR(sc, i) \ 111 ((sc)->sc_ring_map->dm_segs[0].ds_addr + \ 112 offsetof(struct mvgbe_ring_data, mvgbe_rx_ring[(i)])) 113 114 #define MVGBE_CDOFF(x) offsetof(struct mvgbe_ring_data, x) 115 #define MVGBE_CDTXOFF(x) MVGBE_CDOFF(mvgbe_tx_ring[(x)]) 116 #define MVGBE_CDRXOFF(x) MVGBE_CDOFF(mvgbe_rx_ring[(x)]) 117 118 #define MVGBE_CDTXSYNC(sc, x, n, ops) \ 119 do { \ 120 int __x, __n; \ 121 const int __descsize = sizeof(struct mvgbe_tx_desc); \ 122 \ 123 __x = (x); \ 124 __n = (n); \ 125 \ 126 /* If it will wrap around, sync to the end of the ring. */ \ 127 if ((__x + __n) > MVGBE_TX_RING_CNT) { \ 128 bus_dmamap_sync((sc)->sc_dmat, \ 129 (sc)->sc_ring_map, MVGBE_CDTXOFF(__x), \ 130 __descsize * (MVGBE_TX_RING_CNT - __x), (ops)); \ 131 __n -= (MVGBE_TX_RING_CNT - __x); \ 132 __x = 0; \ 133 } \ 134 \ 135 /* Now sync whatever is left. */ \ 136 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ 137 MVGBE_CDTXOFF((__x)), __descsize * __n, (ops)); \ 138 } while (0 /*CONSTCOND*/) 139 140 #define MVGBE_CDRXSYNC(sc, x, ops) \ 141 do { \ 142 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_ring_map, \ 143 MVGBE_CDRXOFF((x)), sizeof(struct mvgbe_rx_desc), (ops)); \ 144 } while (/*CONSTCOND*/0) 145 146 #define MVGBE_IPGINTTX_DEFAULT 768 147 #define MVGBE_IPGINTRX_DEFAULT 768 148 149 #ifdef MVGBE_EVENT_COUNTERS 150 #define MVGBE_EVCNT_INCR(ev) (ev)->ev_count++ 151 #define MVGBE_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 152 #else 153 #define MVGBE_EVCNT_INCR(ev) /* nothing */ 154 #define MVGBE_EVCNT_ADD(ev, val) /* nothing */ 155 #endif 156 157 struct mvgbe_jpool_entry { 158 int slot; 159 LIST_ENTRY(mvgbe_jpool_entry) jpool_entries; 160 }; 161 162 struct mvgbe_chain { 163 void *mvgbe_desc; 164 struct mbuf *mvgbe_mbuf; 165 struct mvgbe_chain *mvgbe_next; 166 }; 167 168 struct mvgbe_txmap_entry { 169 bus_dmamap_t dmamap; 170 SIMPLEQ_ENTRY(mvgbe_txmap_entry) link; 171 }; 172 173 struct mvgbe_chain_data { 174 struct mvgbe_chain mvgbe_tx_chain[MVGBE_TX_RING_CNT]; 175 struct mvgbe_txmap_entry *mvgbe_tx_map[MVGBE_TX_RING_CNT]; 176 int mvgbe_tx_prod; 177 int mvgbe_tx_cons; 178 int mvgbe_tx_cnt; 179 180 struct mvgbe_chain mvgbe_rx_chain[MVGBE_RX_RING_CNT]; 181 bus_dmamap_t mvgbe_rx_map[MVGBE_RX_RING_CNT]; 182 bus_dmamap_t mvgbe_rx_jumbo_map; 183 int mvgbe_rx_prod; 184 int mvgbe_rx_cons; 185 int mvgbe_rx_cnt; 186 187 /* Stick the jumbo mem management stuff here too. */ 188 void *mvgbe_jslots[MVGBE_JSLOTS]; 189 void *mvgbe_jumbo_buf; 190 }; 191 192 struct mvgbe_ring_data { 193 struct mvgbe_tx_desc mvgbe_tx_ring[MVGBE_TX_RING_CNT]; 194 struct mvgbe_rx_desc mvgbe_rx_ring[MVGBE_RX_RING_CNT]; 195 }; 196 197 struct mvgbec_softc { 198 device_t sc_dev; 199 200 bus_space_tag_t sc_iot; 201 bus_space_handle_t sc_ioh; 202 203 kmutex_t sc_mtx; 204 205 int sc_flags; 206 }; 207 208 struct mvgbe_softc { 209 device_t sc_dev; 210 int sc_port; 211 212 bus_space_tag_t sc_iot; 213 bus_space_handle_t sc_ioh; 214 bus_space_handle_t sc_dafh; /* dest address filter handle */ 215 bus_dma_tag_t sc_dmat; 216 217 struct ethercom sc_ethercom; 218 struct mii_data sc_mii; 219 u_int8_t sc_enaddr[ETHER_ADDR_LEN]; /* station addr */ 220 221 callout_t sc_tick_ch; /* tick callout */ 222 223 struct mvgbe_chain_data sc_cdata; 224 struct mvgbe_ring_data *sc_rdata; 225 bus_dmamap_t sc_ring_map; 226 int sc_if_flags; 227 unsigned int sc_ipginttx; 228 unsigned int sc_ipgintrx; 229 int sc_wdogsoft; 230 231 LIST_HEAD(__mvgbe_jfreehead, mvgbe_jpool_entry) sc_jfree_listhead; 232 LIST_HEAD(__mvgbe_jinusehead, mvgbe_jpool_entry) sc_jinuse_listhead; 233 SIMPLEQ_HEAD(__mvgbe_txmaphead, mvgbe_txmap_entry) sc_txmap_head; 234 235 krndsource_t sc_rnd_source; 236 struct sysctllog *mvgbe_clog; 237 #ifdef MVGBE_EVENT_COUNTERS 238 struct evcnt sc_ev_rxoverrun; 239 struct evcnt sc_ev_wdogsoft; 240 #endif 241 }; 242 243 244 /* Gigabit Ethernet Unit Global part functions */ 245 246 static int mvgbec_match(device_t, struct cfdata *, void *); 247 static void mvgbec_attach(device_t, device_t, void *); 248 249 static int mvgbec_print(void *, const char *); 250 static int mvgbec_search(device_t, cfdata_t, const int *, void *); 251 252 /* MII funcstions */ 253 static int mvgbec_miibus_readreg(device_t, int, int); 254 static void mvgbec_miibus_writereg(device_t, int, int, int); 255 static void mvgbec_miibus_statchg(struct ifnet *); 256 257 static void mvgbec_wininit(struct mvgbec_softc *); 258 259 /* Gigabit Ethernet Port part functions */ 260 261 static int mvgbe_match(device_t, struct cfdata *, void *); 262 static void mvgbe_attach(device_t, device_t, void *); 263 264 static void mvgbe_tick(void *); 265 static int mvgbe_intr(void *); 266 267 static void mvgbe_start(struct ifnet *); 268 static int mvgbe_ioctl(struct ifnet *, u_long, void *); 269 static int mvgbe_init(struct ifnet *); 270 static void mvgbe_stop(struct ifnet *, int); 271 static void mvgbe_watchdog(struct ifnet *); 272 273 static int mvgbe_ifflags_cb(struct ethercom *); 274 275 static int mvgbe_mediachange(struct ifnet *); 276 static void mvgbe_mediastatus(struct ifnet *, struct ifmediareq *); 277 278 static int mvgbe_init_rx_ring(struct mvgbe_softc *); 279 static int mvgbe_init_tx_ring(struct mvgbe_softc *); 280 static int mvgbe_newbuf(struct mvgbe_softc *, int, struct mbuf *, bus_dmamap_t); 281 static int mvgbe_alloc_jumbo_mem(struct mvgbe_softc *); 282 static void *mvgbe_jalloc(struct mvgbe_softc *); 283 static void mvgbe_jfree(struct mbuf *, void *, size_t, void *); 284 static int mvgbe_encap(struct mvgbe_softc *, struct mbuf *, uint32_t *); 285 static void mvgbe_rxeof(struct mvgbe_softc *); 286 static void mvgbe_txeof(struct mvgbe_softc *); 287 static uint8_t mvgbe_crc8(const uint8_t *, size_t); 288 static void mvgbe_filter_setup(struct mvgbe_softc *); 289 #ifdef MVGBE_DEBUG 290 static void mvgbe_dump_txdesc(struct mvgbe_tx_desc *, int); 291 #endif 292 static int mvgbe_ipginttx(struct mvgbec_softc *, struct mvgbe_softc *, 293 unsigned int); 294 static int mvgbe_ipgintrx(struct mvgbec_softc *, struct mvgbe_softc *, 295 unsigned int); 296 static void sysctl_mvgbe_init(struct mvgbe_softc *); 297 static int mvgbe_sysctl_ipginttx(SYSCTLFN_PROTO); 298 static int mvgbe_sysctl_ipgintrx(SYSCTLFN_PROTO); 299 300 CFATTACH_DECL_NEW(mvgbec_gt, sizeof(struct mvgbec_softc), 301 mvgbec_match, mvgbec_attach, NULL, NULL); 302 CFATTACH_DECL_NEW(mvgbec_mbus, sizeof(struct mvgbec_softc), 303 mvgbec_match, mvgbec_attach, NULL, NULL); 304 305 CFATTACH_DECL_NEW(mvgbe, sizeof(struct mvgbe_softc), 306 mvgbe_match, mvgbe_attach, NULL, NULL); 307 308 device_t mvgbec0 = NULL; 309 static int mvgbe_root_num; 310 311 struct mvgbe_port { 312 int model; 313 int unit; 314 int ports; 315 int irqs[3]; 316 int flags; 317 #define FLAGS_FIX_TQTB (1 << 0) 318 #define FLAGS_FIX_MTU (1 << 1) 319 #define FLAGS_IPG1 (1 << 2) 320 #define FLAGS_IPG2 (1 << 3) 321 } mvgbe_ports[] = { 322 { MARVELL_DISCOVERY_II, 0, 3, { 32, 33, 34 }, 0 }, 323 { MARVELL_DISCOVERY_III, 0, 3, { 32, 33, 34 }, 0 }, 324 #if 0 325 { MARVELL_DISCOVERY_LT, 0, ?, { }, 0 }, 326 { MARVELL_DISCOVERY_V, 0, ?, { }, 0 }, 327 { MARVELL_DISCOVERY_VI, 0, ?, { }, 0 }, 328 #endif 329 { MARVELL_ORION_1_88F5082, 0, 1, { 21 }, FLAGS_FIX_MTU }, 330 { MARVELL_ORION_1_88F5180N, 0, 1, { 21 }, FLAGS_FIX_MTU }, 331 { MARVELL_ORION_1_88F5181, 0, 1, { 21 }, FLAGS_FIX_MTU | FLAGS_IPG1 }, 332 { MARVELL_ORION_1_88F5182, 0, 1, { 21 }, FLAGS_FIX_MTU | FLAGS_IPG1 }, 333 { MARVELL_ORION_2_88F5281, 0, 1, { 21 }, FLAGS_FIX_MTU | FLAGS_IPG1 }, 334 { MARVELL_ORION_1_88F6082, 0, 1, { 21 }, FLAGS_FIX_MTU }, 335 { MARVELL_ORION_1_88W8660, 0, 1, { 21 }, FLAGS_FIX_MTU }, 336 337 { MARVELL_KIRKWOOD_88F6180, 0, 1, { 11 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 338 { MARVELL_KIRKWOOD_88F6192, 0, 1, { 11 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 339 { MARVELL_KIRKWOOD_88F6192, 1, 1, { 15 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 340 { MARVELL_KIRKWOOD_88F6281, 0, 1, { 11 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 341 { MARVELL_KIRKWOOD_88F6281, 1, 1, { 15 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 342 { MARVELL_KIRKWOOD_88F6282, 0, 1, { 11 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 343 { MARVELL_KIRKWOOD_88F6282, 1, 1, { 15 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 344 345 { MARVELL_MV78XX0_MV78100, 0, 1, { 40 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 346 { MARVELL_MV78XX0_MV78100, 1, 1, { 44 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 347 { MARVELL_MV78XX0_MV78200, 0, 1, { 40 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 348 { MARVELL_MV78XX0_MV78200, 1, 1, { 44 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 349 { MARVELL_MV78XX0_MV78200, 2, 1, { 48 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 350 { MARVELL_MV78XX0_MV78200, 3, 1, { 52 }, FLAGS_FIX_TQTB | FLAGS_IPG2 }, 351 }; 352 353 354 /* ARGSUSED */ 355 static int 356 mvgbec_match(device_t parent, cfdata_t match, void *aux) 357 { 358 struct marvell_attach_args *mva = aux; 359 int i; 360 361 if (strcmp(mva->mva_name, match->cf_name) != 0) 362 return 0; 363 if (mva->mva_offset == MVA_OFFSET_DEFAULT) 364 return 0; 365 366 for (i = 0; i < __arraycount(mvgbe_ports); i++) 367 if (mva->mva_model == mvgbe_ports[i].model) { 368 mva->mva_size = MVGBE_SIZE; 369 return 1; 370 } 371 return 0; 372 } 373 374 /* ARGSUSED */ 375 static void 376 mvgbec_attach(device_t parent, device_t self, void *aux) 377 { 378 struct mvgbec_softc *csc = device_private(self); 379 struct marvell_attach_args *mva = aux, gbea; 380 struct mvgbe_softc *port; 381 struct mii_softc *mii; 382 device_t child; 383 uint32_t phyaddr; 384 int i, j; 385 386 aprint_naive("\n"); 387 aprint_normal(": Marvell Gigabit Ethernet Controller\n"); 388 389 csc->sc_dev = self; 390 csc->sc_iot = mva->mva_iot; 391 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset, 392 mva->mva_size, &csc->sc_ioh)) { 393 aprint_error_dev(self, "Cannot map registers\n"); 394 return; 395 } 396 397 if (mvgbec0 == NULL) 398 mvgbec0 = self; 399 400 phyaddr = 0; 401 MVGBE_WRITE(csc, MVGBE_PHYADDR, phyaddr); 402 403 mutex_init(&csc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 404 405 /* Disable and clear Gigabit Ethernet Unit interrupts */ 406 MVGBE_WRITE(csc, MVGBE_EUIM, 0); 407 MVGBE_WRITE(csc, MVGBE_EUIC, 0); 408 409 mvgbec_wininit(csc); 410 411 memset(&gbea, 0, sizeof(gbea)); 412 for (i = 0; i < __arraycount(mvgbe_ports); i++) { 413 if (mvgbe_ports[i].model != mva->mva_model || 414 mvgbe_ports[i].unit != mva->mva_unit) 415 continue; 416 417 csc->sc_flags = mvgbe_ports[i].flags; 418 419 for (j = 0; j < mvgbe_ports[i].ports; j++) { 420 gbea.mva_name = "mvgbe"; 421 gbea.mva_model = mva->mva_model; 422 gbea.mva_iot = csc->sc_iot; 423 gbea.mva_ioh = csc->sc_ioh; 424 gbea.mva_unit = j; 425 gbea.mva_dmat = mva->mva_dmat; 426 gbea.mva_irq = mvgbe_ports[i].irqs[j]; 427 child = config_found_sm_loc(csc->sc_dev, "mvgbec", NULL, 428 &gbea, mvgbec_print, mvgbec_search); 429 if (child) { 430 port = device_private(child); 431 mii = LIST_FIRST(&port->sc_mii.mii_phys); 432 if (mii != NULL) 433 phyaddr |= MVGBE_PHYADDR_PHYAD(j, 434 mii->mii_phy); 435 } 436 } 437 break; 438 } 439 MVGBE_WRITE(csc, MVGBE_PHYADDR, phyaddr); 440 } 441 442 static int 443 mvgbec_print(void *aux, const char *pnp) 444 { 445 struct marvell_attach_args *gbea = aux; 446 447 if (pnp) 448 aprint_normal("%s at %s port %d", 449 gbea->mva_name, pnp, gbea->mva_unit); 450 else { 451 if (gbea->mva_unit != MVGBECCF_PORT_DEFAULT) 452 aprint_normal(" port %d", gbea->mva_unit); 453 if (gbea->mva_irq != MVGBECCF_IRQ_DEFAULT) 454 aprint_normal(" irq %d", gbea->mva_irq); 455 } 456 return UNCONF; 457 } 458 459 /* ARGSUSED */ 460 static int 461 mvgbec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 462 { 463 struct marvell_attach_args *gbea = aux; 464 465 if (cf->cf_loc[MVGBECCF_PORT] == gbea->mva_unit && 466 cf->cf_loc[MVGBECCF_IRQ] != MVGBECCF_IRQ_DEFAULT) 467 gbea->mva_irq = cf->cf_loc[MVGBECCF_IRQ]; 468 469 return config_match(parent, cf, aux); 470 } 471 472 static int 473 mvgbec_miibus_readreg(device_t dev, int phy, int reg) 474 { 475 struct mvgbe_softc *sc = device_private(dev); 476 struct mvgbec_softc *csc; 477 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 478 uint32_t smi, val; 479 int i; 480 481 if (mvgbec0 == NULL) { 482 aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n"); 483 return -1; 484 } 485 csc = device_private(mvgbec0); 486 487 mutex_enter(&csc->sc_mtx); 488 489 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 490 DELAY(1); 491 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 492 break; 493 } 494 if (i == MVGBE_PHY_TIMEOUT) { 495 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 496 mutex_exit(&csc->sc_mtx); 497 return -1; 498 } 499 500 smi = 501 MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | MVGBE_SMI_OPCODE_READ; 502 MVGBE_WRITE(csc, MVGBE_SMI, smi); 503 504 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 505 DELAY(1); 506 smi = MVGBE_READ(csc, MVGBE_SMI); 507 if (smi & MVGBE_SMI_READVALID) 508 break; 509 } 510 511 mutex_exit(&csc->sc_mtx); 512 513 DPRINTFN(9, ("mvgbec_miibus_readreg: i=%d, timeout=%d\n", 514 i, MVGBE_PHY_TIMEOUT)); 515 516 val = smi & MVGBE_SMI_DATA_MASK; 517 518 DPRINTFN(9, ("mvgbec_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 519 phy, reg, val)); 520 521 return val; 522 } 523 524 static void 525 mvgbec_miibus_writereg(device_t dev, int phy, int reg, int val) 526 { 527 struct mvgbe_softc *sc = device_private(dev); 528 struct mvgbec_softc *csc; 529 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 530 uint32_t smi; 531 int i; 532 533 if (mvgbec0 == NULL) { 534 aprint_error_ifnet(ifp, "SMI mvgbec0 not found\n"); 535 return; 536 } 537 csc = device_private(mvgbec0); 538 539 DPRINTFN(9, ("mvgbec_miibus_writereg phy=%d reg=%#x val=%#x\n", 540 phy, reg, val)); 541 542 mutex_enter(&csc->sc_mtx); 543 544 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 545 DELAY(1); 546 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 547 break; 548 } 549 if (i == MVGBE_PHY_TIMEOUT) { 550 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 551 mutex_exit(&csc->sc_mtx); 552 return; 553 } 554 555 smi = MVGBE_SMI_PHYAD(phy) | MVGBE_SMI_REGAD(reg) | 556 MVGBE_SMI_OPCODE_WRITE | (val & MVGBE_SMI_DATA_MASK); 557 MVGBE_WRITE(csc, MVGBE_SMI, smi); 558 559 for (i = 0; i < MVGBE_PHY_TIMEOUT; i++) { 560 DELAY(1); 561 if (!(MVGBE_READ(csc, MVGBE_SMI) & MVGBE_SMI_BUSY)) 562 break; 563 } 564 565 mutex_exit(&csc->sc_mtx); 566 567 if (i == MVGBE_PHY_TIMEOUT) 568 aprint_error_ifnet(ifp, "phy write timed out\n"); 569 } 570 571 static void 572 mvgbec_miibus_statchg(struct ifnet *ifp) 573 { 574 575 /* nothing to do */ 576 } 577 578 579 static void 580 mvgbec_wininit(struct mvgbec_softc *sc) 581 { 582 device_t pdev = device_parent(sc->sc_dev); 583 uint64_t base; 584 uint32_t en, ac, size; 585 int window, target, attr, rv, i; 586 static int tags[] = { 587 MARVELL_TAG_SDRAM_CS0, 588 MARVELL_TAG_SDRAM_CS1, 589 MARVELL_TAG_SDRAM_CS2, 590 MARVELL_TAG_SDRAM_CS3, 591 592 MARVELL_TAG_UNDEFINED, 593 }; 594 595 /* First disable all address decode windows */ 596 en = MVGBE_BARE_EN_MASK; 597 MVGBE_WRITE(sc, MVGBE_BARE, en); 598 599 ac = 0; 600 for (window = 0, i = 0; 601 tags[i] != MARVELL_TAG_UNDEFINED && window < MVGBE_NWINDOW; i++) { 602 rv = marvell_winparams_by_tag(pdev, tags[i], 603 &target, &attr, &base, &size); 604 if (rv != 0 || size == 0) 605 continue; 606 607 if (base > 0xffffffffULL) { 608 if (window >= MVGBE_NREMAP) { 609 aprint_error_dev(sc->sc_dev, 610 "can't remap window %d\n", window); 611 continue; 612 } 613 MVGBE_WRITE(sc, MVGBE_HA(window), 614 (base >> 32) & 0xffffffff); 615 } 616 617 MVGBE_WRITE(sc, MVGBE_BASEADDR(window), 618 MVGBE_BASEADDR_TARGET(target) | 619 MVGBE_BASEADDR_ATTR(attr) | 620 MVGBE_BASEADDR_BASE(base)); 621 MVGBE_WRITE(sc, MVGBE_S(window), MVGBE_S_SIZE(size)); 622 623 en &= ~(1 << window); 624 /* set full access (r/w) */ 625 ac |= MVGBE_EPAP_EPAR(window, MVGBE_EPAP_AC_FA); 626 window++; 627 } 628 /* allow to access decode window */ 629 MVGBE_WRITE(sc, MVGBE_EPAP, ac); 630 631 MVGBE_WRITE(sc, MVGBE_BARE, en); 632 } 633 634 635 /* ARGSUSED */ 636 static int 637 mvgbe_match(device_t parent, cfdata_t match, void *aux) 638 { 639 struct marvell_attach_args *mva = aux; 640 uint32_t pbase, maddrh, maddrl; 641 642 pbase = MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE; 643 maddrh = 644 bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAH); 645 maddrl = 646 bus_space_read_4(mva->mva_iot, mva->mva_ioh, pbase + MVGBE_MACAL); 647 if ((maddrh | maddrl) == 0) 648 return 0; 649 650 return 1; 651 } 652 653 /* ARGSUSED */ 654 static void 655 mvgbe_attach(device_t parent, device_t self, void *aux) 656 { 657 struct mvgbe_softc *sc = device_private(self); 658 struct marvell_attach_args *mva = aux; 659 struct mvgbe_txmap_entry *entry; 660 struct ifnet *ifp; 661 bus_dma_segment_t seg; 662 bus_dmamap_t dmamap; 663 int rseg, i; 664 uint32_t maddrh, maddrl; 665 void *kva; 666 667 aprint_naive("\n"); 668 aprint_normal("\n"); 669 670 sc->sc_dev = self; 671 sc->sc_port = mva->mva_unit; 672 sc->sc_iot = mva->mva_iot; 673 callout_init(&sc->sc_tick_ch, 0); 674 callout_setfunc(&sc->sc_tick_ch, mvgbe_tick, sc); 675 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 676 MVGBE_PORTR_BASE + mva->mva_unit * MVGBE_PORTR_SIZE, 677 MVGBE_PORTR_SIZE, &sc->sc_ioh)) { 678 aprint_error_dev(self, "Cannot map registers\n"); 679 return; 680 } 681 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 682 MVGBE_PORTDAFR_BASE + mva->mva_unit * MVGBE_PORTDAFR_SIZE, 683 MVGBE_PORTDAFR_SIZE, &sc->sc_dafh)) { 684 aprint_error_dev(self, 685 "Cannot map destination address filter registers\n"); 686 return; 687 } 688 sc->sc_dmat = mva->mva_dmat; 689 690 maddrh = MVGBE_READ(sc, MVGBE_MACAH); 691 maddrl = MVGBE_READ(sc, MVGBE_MACAL); 692 sc->sc_enaddr[0] = maddrh >> 24; 693 sc->sc_enaddr[1] = maddrh >> 16; 694 sc->sc_enaddr[2] = maddrh >> 8; 695 sc->sc_enaddr[3] = maddrh >> 0; 696 sc->sc_enaddr[4] = maddrl >> 8; 697 sc->sc_enaddr[5] = maddrl >> 0; 698 aprint_normal_dev(self, "Ethernet address %s\n", 699 ether_sprintf(sc->sc_enaddr)); 700 701 /* clear all ethernet port interrupts */ 702 MVGBE_WRITE(sc, MVGBE_IC, 0); 703 MVGBE_WRITE(sc, MVGBE_ICE, 0); 704 705 marvell_intr_establish(mva->mva_irq, IPL_NET, mvgbe_intr, sc); 706 707 /* Allocate the descriptor queues. */ 708 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 709 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 710 aprint_error_dev(self, "can't alloc rx buffers\n"); 711 return; 712 } 713 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, 714 sizeof(struct mvgbe_ring_data), &kva, BUS_DMA_NOWAIT)) { 715 aprint_error_dev(self, "can't map dma buffers (%lu bytes)\n", 716 (u_long)sizeof(struct mvgbe_ring_data)); 717 goto fail1; 718 } 719 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct mvgbe_ring_data), 1, 720 sizeof(struct mvgbe_ring_data), 0, BUS_DMA_NOWAIT, 721 &sc->sc_ring_map)) { 722 aprint_error_dev(self, "can't create dma map\n"); 723 goto fail2; 724 } 725 if (bus_dmamap_load(sc->sc_dmat, sc->sc_ring_map, kva, 726 sizeof(struct mvgbe_ring_data), NULL, BUS_DMA_NOWAIT)) { 727 aprint_error_dev(self, "can't load dma map\n"); 728 goto fail3; 729 } 730 for (i = 0; i < MVGBE_RX_RING_CNT; i++) 731 sc->sc_cdata.mvgbe_rx_chain[i].mvgbe_mbuf = NULL; 732 733 SIMPLEQ_INIT(&sc->sc_txmap_head); 734 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 735 sc->sc_cdata.mvgbe_tx_chain[i].mvgbe_mbuf = NULL; 736 737 if (bus_dmamap_create(sc->sc_dmat, 738 MVGBE_JLEN, MVGBE_NTXSEG, MVGBE_JLEN, 0, 739 BUS_DMA_NOWAIT, &dmamap)) { 740 aprint_error_dev(self, "Can't create TX dmamap\n"); 741 goto fail4; 742 } 743 744 entry = kmem_alloc(sizeof(*entry), KM_SLEEP); 745 if (!entry) { 746 aprint_error_dev(self, "Can't alloc txmap entry\n"); 747 bus_dmamap_destroy(sc->sc_dmat, dmamap); 748 goto fail4; 749 } 750 entry->dmamap = dmamap; 751 SIMPLEQ_INSERT_HEAD(&sc->sc_txmap_head, entry, link); 752 } 753 754 sc->sc_rdata = (struct mvgbe_ring_data *)kva; 755 memset(sc->sc_rdata, 0, sizeof(struct mvgbe_ring_data)); 756 757 /* 758 * We can support 802.1Q VLAN-sized frames and jumbo 759 * Ethernet frames. 760 */ 761 sc->sc_ethercom.ec_capabilities |= 762 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 763 764 /* Try to allocate memory for jumbo buffers. */ 765 if (mvgbe_alloc_jumbo_mem(sc)) { 766 aprint_error_dev(self, "jumbo buffer allocation failed\n"); 767 goto fail4; 768 } 769 770 ifp = &sc->sc_ethercom.ec_if; 771 ifp->if_softc = sc; 772 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 773 ifp->if_start = mvgbe_start; 774 ifp->if_ioctl = mvgbe_ioctl; 775 ifp->if_init = mvgbe_init; 776 ifp->if_stop = mvgbe_stop; 777 ifp->if_watchdog = mvgbe_watchdog; 778 /* 779 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 780 */ 781 sc->sc_ethercom.ec_if.if_capabilities |= 782 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 783 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 784 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 785 /* 786 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums. 787 */ 788 sc->sc_ethercom.ec_if.if_capabilities &= ~IFCAP_CSUM_TCPv4_Tx; 789 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVGBE_TX_RING_CNT - 1, IFQ_MAXLEN)); 790 IFQ_SET_READY(&ifp->if_snd); 791 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 792 793 mvgbe_stop(ifp, 0); 794 795 /* 796 * Do MII setup. 797 */ 798 sc->sc_mii.mii_ifp = ifp; 799 sc->sc_mii.mii_readreg = mvgbec_miibus_readreg; 800 sc->sc_mii.mii_writereg = mvgbec_miibus_writereg; 801 sc->sc_mii.mii_statchg = mvgbec_miibus_statchg; 802 803 sc->sc_ethercom.ec_mii = &sc->sc_mii; 804 ifmedia_init(&sc->sc_mii.mii_media, 0, 805 mvgbe_mediachange, mvgbe_mediastatus); 806 mii_attach(self, &sc->sc_mii, 0xffffffff, 807 MII_PHY_ANY, parent == mvgbec0 ? 0 : 1, 0); 808 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 809 aprint_error_dev(self, "no PHY found!\n"); 810 ifmedia_add(&sc->sc_mii.mii_media, 811 IFM_ETHER|IFM_MANUAL, 0, NULL); 812 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 813 } else 814 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 815 816 /* 817 * Call MI attach routines. 818 */ 819 if_attach(ifp); 820 821 ether_ifattach(ifp, sc->sc_enaddr); 822 ether_set_ifflags_cb(&sc->sc_ethercom, mvgbe_ifflags_cb); 823 824 sysctl_mvgbe_init(sc); 825 #ifdef MVGBE_EVENT_COUNTERS 826 /* Attach event counters. */ 827 evcnt_attach_dynamic(&sc->sc_ev_rxoverrun, EVCNT_TYPE_MISC, 828 NULL, device_xname(sc->sc_dev), "rxoverrrun"); 829 evcnt_attach_dynamic(&sc->sc_ev_wdogsoft, EVCNT_TYPE_MISC, 830 NULL, device_xname(sc->sc_dev), "wdogsoft"); 831 #endif 832 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 833 RND_TYPE_NET, 0); 834 835 return; 836 837 fail4: 838 while ((entry = SIMPLEQ_FIRST(&sc->sc_txmap_head)) != NULL) { 839 SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); 840 bus_dmamap_destroy(sc->sc_dmat, entry->dmamap); 841 } 842 bus_dmamap_unload(sc->sc_dmat, sc->sc_ring_map); 843 fail3: 844 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ring_map); 845 fail2: 846 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct mvgbe_ring_data)); 847 fail1: 848 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 849 return; 850 } 851 852 static int 853 mvgbe_ipginttx(struct mvgbec_softc *csc, struct mvgbe_softc *sc, 854 unsigned int ipginttx) 855 { 856 uint32_t reg; 857 reg = MVGBE_READ(sc, MVGBE_PTFUT); 858 859 if (csc->sc_flags & FLAGS_IPG2) { 860 if (ipginttx > MVGBE_PTFUT_IPGINTTX_V2_MAX) 861 return -1; 862 reg &= ~MVGBE_PTFUT_IPGINTTX_V2_MASK; 863 reg |= MVGBE_PTFUT_IPGINTTX_V2(ipginttx); 864 } else if (csc->sc_flags & FLAGS_IPG1) { 865 if (ipginttx > MVGBE_PTFUT_IPGINTTX_V1_MAX) 866 return -1; 867 reg &= ~MVGBE_PTFUT_IPGINTTX_V1_MASK; 868 reg |= MVGBE_PTFUT_IPGINTTX_V1(ipginttx); 869 } 870 MVGBE_WRITE(sc, MVGBE_PTFUT, reg); 871 872 return 0; 873 } 874 875 static int 876 mvgbe_ipgintrx(struct mvgbec_softc *csc, struct mvgbe_softc *sc, 877 unsigned int ipgintrx) 878 { 879 uint32_t reg; 880 reg = MVGBE_READ(sc, MVGBE_SDC); 881 882 if (csc->sc_flags & FLAGS_IPG2) { 883 if (ipgintrx > MVGBE_SDC_IPGINTRX_V2_MAX) 884 return -1; 885 reg &= ~MVGBE_SDC_IPGINTRX_V2_MASK; 886 reg |= MVGBE_SDC_IPGINTRX_V2(ipgintrx); 887 } else if (csc->sc_flags & FLAGS_IPG1) { 888 if (ipgintrx > MVGBE_SDC_IPGINTRX_V1_MAX) 889 return -1; 890 reg &= ~MVGBE_SDC_IPGINTRX_V1_MASK; 891 reg |= MVGBE_SDC_IPGINTRX_V1(ipgintrx); 892 } 893 MVGBE_WRITE(sc, MVGBE_SDC, reg); 894 895 return 0; 896 } 897 898 static void 899 mvgbe_tick(void *arg) 900 { 901 struct mvgbe_softc *sc = arg; 902 struct mii_data *mii = &sc->sc_mii; 903 int s; 904 905 s = splnet(); 906 mii_tick(mii); 907 /* Need more work */ 908 MVGBE_EVCNT_ADD(&sc->sc_ev_rxoverrun, MVGBE_READ(sc, MVGBE_POFC)); 909 splx(s); 910 911 callout_schedule(&sc->sc_tick_ch, hz); 912 } 913 914 static int 915 mvgbe_intr(void *arg) 916 { 917 struct mvgbe_softc *sc = arg; 918 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 919 uint32_t ic, ice, datum = 0; 920 int claimed = 0; 921 922 for (;;) { 923 ice = MVGBE_READ(sc, MVGBE_ICE); 924 ic = MVGBE_READ(sc, MVGBE_IC); 925 926 DPRINTFN(3, ("mvgbe_intr: ic=%#x, ice=%#x\n", ic, ice)); 927 if (ic == 0 && ice == 0) 928 break; 929 930 datum = datum ^ ic ^ ice; 931 932 MVGBE_WRITE(sc, MVGBE_IC, ~ic); 933 MVGBE_WRITE(sc, MVGBE_ICE, ~ice); 934 935 claimed = 1; 936 937 if (!(ifp->if_flags & IFF_RUNNING)) 938 break; 939 940 if (ice & MVGBE_ICE_LINKCHG) { 941 if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { 942 /* Enable port RX and TX. */ 943 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); 944 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 945 } else { 946 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ(0)); 947 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); 948 } 949 950 /* Notify link change event to mii layer */ 951 mii_pollstat(&sc->sc_mii); 952 } 953 954 if (ic & (MVGBE_IC_RXBUF | MVGBE_IC_RXERROR)) 955 mvgbe_rxeof(sc); 956 957 if (ice & (MVGBE_ICE_TXBUF | MVGBE_ICE_TXERR)) 958 mvgbe_txeof(sc); 959 } 960 961 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 962 mvgbe_start(ifp); 963 964 rnd_add_uint32(&sc->sc_rnd_source, datum); 965 966 return claimed; 967 } 968 969 static void 970 mvgbe_start(struct ifnet *ifp) 971 { 972 struct mvgbe_softc *sc = ifp->if_softc; 973 struct mbuf *m_head = NULL; 974 uint32_t idx = sc->sc_cdata.mvgbe_tx_prod; 975 int pkts = 0; 976 977 DPRINTFN(3, ("mvgbe_start (idx %d, tx_chain[idx] %p)\n", idx, 978 sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf)); 979 980 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 981 return; 982 /* If Link is DOWN, can't start TX */ 983 if (!(MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP)) 984 return; 985 986 while (sc->sc_cdata.mvgbe_tx_chain[idx].mvgbe_mbuf == NULL) { 987 IFQ_POLL(&ifp->if_snd, m_head); 988 if (m_head == NULL) 989 break; 990 991 /* 992 * Pack the data into the transmit ring. If we 993 * don't have room, set the OACTIVE flag and wait 994 * for the NIC to drain the ring. 995 */ 996 if (mvgbe_encap(sc, m_head, &idx)) { 997 ifp->if_flags |= IFF_OACTIVE; 998 break; 999 } 1000 1001 /* now we are committed to transmit the packet */ 1002 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1003 pkts++; 1004 1005 /* 1006 * If there's a BPF listener, bounce a copy of this frame 1007 * to him. 1008 */ 1009 bpf_mtap(ifp, m_head); 1010 } 1011 if (pkts == 0) 1012 return; 1013 1014 /* Transmit at Queue 0 */ 1015 if (idx != sc->sc_cdata.mvgbe_tx_prod) { 1016 sc->sc_cdata.mvgbe_tx_prod = idx; 1017 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 1018 1019 /* 1020 * Set a timeout in case the chip goes out to lunch. 1021 */ 1022 ifp->if_timer = 1; 1023 sc->sc_wdogsoft = 1; 1024 } 1025 } 1026 1027 static int 1028 mvgbe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1029 { 1030 struct mvgbe_softc *sc = ifp->if_softc; 1031 struct ifreq *ifr = data; 1032 int s, error = 0; 1033 1034 s = splnet(); 1035 1036 switch (cmd) { 1037 case SIOCGIFMEDIA: 1038 case SIOCSIFMEDIA: 1039 DPRINTFN(2, ("mvgbe_ioctl MEDIA\n")); 1040 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1041 break; 1042 default: 1043 DPRINTFN(2, ("mvgbe_ioctl ETHER\n")); 1044 error = ether_ioctl(ifp, cmd, data); 1045 if (error == ENETRESET) { 1046 if (ifp->if_flags & IFF_RUNNING) { 1047 mvgbe_filter_setup(sc); 1048 } 1049 error = 0; 1050 } 1051 break; 1052 } 1053 1054 splx(s); 1055 1056 return error; 1057 } 1058 1059 static int 1060 mvgbe_init(struct ifnet *ifp) 1061 { 1062 struct mvgbe_softc *sc = ifp->if_softc; 1063 struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev)); 1064 struct mii_data *mii = &sc->sc_mii; 1065 uint32_t reg; 1066 int i; 1067 1068 DPRINTFN(2, ("mvgbe_init\n")); 1069 1070 /* Cancel pending I/O and free all RX/TX buffers. */ 1071 mvgbe_stop(ifp, 0); 1072 1073 /* clear all ethernet port interrupts */ 1074 MVGBE_WRITE(sc, MVGBE_IC, 0); 1075 MVGBE_WRITE(sc, MVGBE_ICE, 0); 1076 1077 /* Init TX/RX descriptors */ 1078 if (mvgbe_init_tx_ring(sc) == ENOBUFS) { 1079 aprint_error_ifnet(ifp, 1080 "initialization failed: no memory for tx buffers\n"); 1081 return ENOBUFS; 1082 } 1083 if (mvgbe_init_rx_ring(sc) == ENOBUFS) { 1084 aprint_error_ifnet(ifp, 1085 "initialization failed: no memory for rx buffers\n"); 1086 return ENOBUFS; 1087 } 1088 1089 if ((csc->sc_flags & FLAGS_IPG1) || (csc->sc_flags & FLAGS_IPG2)) { 1090 sc->sc_ipginttx = MVGBE_IPGINTTX_DEFAULT; 1091 sc->sc_ipgintrx = MVGBE_IPGINTRX_DEFAULT; 1092 } 1093 if (csc->sc_flags & FLAGS_FIX_MTU) 1094 MVGBE_WRITE(sc, MVGBE_MTU, 0); /* hw reset value is wrong */ 1095 MVGBE_WRITE(sc, MVGBE_PSC, 1096 MVGBE_PSC_ANFC | /* Enable Auto-Neg Flow Ctrl */ 1097 MVGBE_PSC_RESERVED | /* Must be set to 1 */ 1098 MVGBE_PSC_FLFAIL | /* Do NOT Force Link Fail */ 1099 MVGBE_PSC_MRU(MVGBE_PSC_MRU_9022) | /* we want 9k */ 1100 MVGBE_PSC_SETFULLDX); /* Set_FullDx */ 1101 /* XXXX: mvgbe(4) always use RGMII. */ 1102 MVGBE_WRITE(sc, MVGBE_PSC1, 1103 MVGBE_READ(sc, MVGBE_PSC1) | MVGBE_PSC1_RGMIIEN); 1104 /* XXXX: Also always Weighted Round-Robin Priority Mode */ 1105 MVGBE_WRITE(sc, MVGBE_TQFPC, MVGBE_TQFPC_EN(0)); 1106 1107 MVGBE_WRITE(sc, MVGBE_CRDP(0), MVGBE_RX_RING_ADDR(sc, 0)); 1108 MVGBE_WRITE(sc, MVGBE_TCQDP, MVGBE_TX_RING_ADDR(sc, 0)); 1109 1110 if (csc->sc_flags & FLAGS_FIX_TQTB) { 1111 /* 1112 * Queue 0 (offset 0x72700) must be programmed to 0x3fffffff. 1113 * And offset 0x72704 must be programmed to 0x03ffffff. 1114 * Queue 1 through 7 must be programmed to 0x0. 1115 */ 1116 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(0), 0x3fffffff); 1117 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(0), 0x03ffffff); 1118 for (i = 1; i < 8; i++) { 1119 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x0); 1120 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0x0); 1121 } 1122 } else 1123 for (i = 1; i < 8; i++) { 1124 MVGBE_WRITE(sc, MVGBE_TQTBCOUNT(i), 0x3fffffff); 1125 MVGBE_WRITE(sc, MVGBE_TQTBCONFIG(i), 0xffff7fff); 1126 MVGBE_WRITE(sc, MVGBE_TQAC(i), 0xfc0000ff); 1127 } 1128 1129 MVGBE_WRITE(sc, MVGBE_PXC, MVGBE_PXC_RXCS); 1130 MVGBE_WRITE(sc, MVGBE_PXCX, 0); 1131 1132 /* Set SDC register except IPGINT bits */ 1133 MVGBE_WRITE(sc, MVGBE_SDC, 1134 MVGBE_SDC_RXBSZ_16_64BITWORDS | 1135 #if BYTE_ORDER == LITTLE_ENDIAN 1136 MVGBE_SDC_BLMR | /* Big/Little Endian Receive Mode: No swap */ 1137 MVGBE_SDC_BLMT | /* Big/Little Endian Transmit Mode: No swap */ 1138 #endif 1139 MVGBE_SDC_TXBSZ_16_64BITWORDS); 1140 /* And then set IPGINT bits */ 1141 mvgbe_ipgintrx(csc, sc, sc->sc_ipgintrx); 1142 1143 /* Tx side */ 1144 MVGBE_WRITE(sc, MVGBE_PTFUT, 0); 1145 mvgbe_ipginttx(csc, sc, sc->sc_ipginttx); 1146 1147 mvgbe_filter_setup(sc); 1148 1149 mii_mediachg(mii); 1150 1151 /* Enable port */ 1152 reg = MVGBE_READ(sc, MVGBE_PSC); 1153 MVGBE_WRITE(sc, MVGBE_PSC, reg | MVGBE_PSC_PORTEN); 1154 1155 /* If Link is UP, Start RX and TX traffic */ 1156 if (MVGBE_READ(sc, MVGBE_PS) & MVGBE_PS_LINKUP) { 1157 /* Enable port RX/TX. */ 1158 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_ENQ(0)); 1159 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 1160 } 1161 1162 /* Enable interrupt masks */ 1163 MVGBE_WRITE(sc, MVGBE_PIM, 1164 MVGBE_IC_RXBUF | 1165 MVGBE_IC_EXTEND | 1166 MVGBE_IC_RXBUFQ_MASK | 1167 MVGBE_IC_RXERROR | 1168 MVGBE_IC_RXERRQ_MASK); 1169 MVGBE_WRITE(sc, MVGBE_PEIM, 1170 MVGBE_ICE_TXBUF | 1171 MVGBE_ICE_TXERR | 1172 MVGBE_ICE_LINKCHG); 1173 1174 callout_schedule(&sc->sc_tick_ch, hz); 1175 1176 ifp->if_flags |= IFF_RUNNING; 1177 ifp->if_flags &= ~IFF_OACTIVE; 1178 1179 return 0; 1180 } 1181 1182 /* ARGSUSED */ 1183 static void 1184 mvgbe_stop(struct ifnet *ifp, int disable) 1185 { 1186 struct mvgbe_softc *sc = ifp->if_softc; 1187 struct mvgbec_softc *csc = device_private(device_parent(sc->sc_dev)); 1188 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1189 uint32_t reg; 1190 int i, cnt; 1191 1192 DPRINTFN(2, ("mvgbe_stop\n")); 1193 1194 callout_stop(&sc->sc_tick_ch); 1195 1196 /* Stop Rx port activity. Check port Rx activity. */ 1197 reg = MVGBE_READ(sc, MVGBE_RQC); 1198 if (reg & MVGBE_RQC_ENQ_MASK) 1199 /* Issue stop command for active channels only */ 1200 MVGBE_WRITE(sc, MVGBE_RQC, MVGBE_RQC_DISQ_DISABLE(reg)); 1201 1202 /* Stop Tx port activity. Check port Tx activity. */ 1203 if (MVGBE_READ(sc, MVGBE_TQC) & MVGBE_TQC_ENQ) 1204 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_DISQ); 1205 1206 /* Force link down */ 1207 reg = MVGBE_READ(sc, MVGBE_PSC); 1208 MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_FLFAIL); 1209 1210 #define RX_DISABLE_TIMEOUT 0x1000000 1211 #define TX_FIFO_EMPTY_TIMEOUT 0x1000000 1212 /* Wait for all Rx activity to terminate. */ 1213 cnt = 0; 1214 do { 1215 if (cnt >= RX_DISABLE_TIMEOUT) { 1216 aprint_error_ifnet(ifp, 1217 "timeout for RX stopped. rqc 0x%x\n", reg); 1218 break; 1219 } 1220 cnt++; 1221 1222 /* 1223 * Check Receive Queue Command register that all Rx queues 1224 * are stopped 1225 */ 1226 reg = MVGBE_READ(sc, MVGBE_RQC); 1227 } while (reg & 0xff); 1228 1229 /* Double check to verify that TX FIFO is empty */ 1230 cnt = 0; 1231 while (1) { 1232 do { 1233 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { 1234 aprint_error_ifnet(ifp, 1235 "timeout for TX FIFO empty. status 0x%x\n", 1236 reg); 1237 break; 1238 } 1239 cnt++; 1240 1241 reg = MVGBE_READ(sc, MVGBE_PS); 1242 } while 1243 (!(reg & MVGBE_PS_TXFIFOEMP) || reg & MVGBE_PS_TXINPROG); 1244 1245 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) 1246 break; 1247 1248 /* Double check */ 1249 reg = MVGBE_READ(sc, MVGBE_PS); 1250 if (reg & MVGBE_PS_TXFIFOEMP && !(reg & MVGBE_PS_TXINPROG)) 1251 break; 1252 else 1253 aprint_error_ifnet(ifp, 1254 "TX FIFO empty double check failed." 1255 " %d loops, status 0x%x\n", cnt, reg); 1256 } 1257 1258 /* Reset the Enable bit in the Port Serial Control Register */ 1259 reg = MVGBE_READ(sc, MVGBE_PSC); 1260 MVGBE_WRITE(sc, MVGBE_PSC, reg & ~MVGBE_PSC_PORTEN); 1261 1262 /* 1263 * Disable and clear interrupts 1264 * 0) controller interrupt 1265 * 1) port interrupt cause 1266 * 2) port interrupt mask 1267 */ 1268 MVGBE_WRITE(csc, MVGBE_EUIM, 0); 1269 MVGBE_WRITE(csc, MVGBE_EUIC, 0); 1270 MVGBE_WRITE(sc, MVGBE_IC, 0); 1271 MVGBE_WRITE(sc, MVGBE_ICE, 0); 1272 MVGBE_WRITE(sc, MVGBE_PIM, 0); 1273 MVGBE_WRITE(sc, MVGBE_PEIM, 0); 1274 1275 /* Free RX and TX mbufs still in the queues. */ 1276 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1277 if (cdata->mvgbe_rx_chain[i].mvgbe_mbuf != NULL) { 1278 m_freem(cdata->mvgbe_rx_chain[i].mvgbe_mbuf); 1279 cdata->mvgbe_rx_chain[i].mvgbe_mbuf = NULL; 1280 } 1281 } 1282 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 1283 if (cdata->mvgbe_tx_chain[i].mvgbe_mbuf != NULL) { 1284 m_freem(cdata->mvgbe_tx_chain[i].mvgbe_mbuf); 1285 cdata->mvgbe_tx_chain[i].mvgbe_mbuf = NULL; 1286 } 1287 } 1288 1289 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1290 } 1291 1292 static void 1293 mvgbe_watchdog(struct ifnet *ifp) 1294 { 1295 struct mvgbe_softc *sc = ifp->if_softc; 1296 1297 /* 1298 * Reclaim first as there is a possibility of losing Tx completion 1299 * interrupts. 1300 */ 1301 mvgbe_txeof(sc); 1302 if (sc->sc_cdata.mvgbe_tx_cnt != 0) { 1303 if (sc->sc_wdogsoft) { 1304 /* 1305 * There is race condition between CPU and DMA 1306 * engine. When DMA engine encounters queue end, 1307 * it clears MVGBE_TQC_ENQ bit. 1308 */ 1309 MVGBE_WRITE(sc, MVGBE_TQC, MVGBE_TQC_ENQ); 1310 ifp->if_timer = 5; 1311 sc->sc_wdogsoft = 0; 1312 MVGBE_EVCNT_INCR(&sc->sc_ev_wdogsoft); 1313 } else { 1314 aprint_error_ifnet(ifp, "watchdog timeout\n"); 1315 1316 ifp->if_oerrors++; 1317 1318 mvgbe_init(ifp); 1319 } 1320 } 1321 } 1322 1323 static int 1324 mvgbe_ifflags_cb(struct ethercom *ec) 1325 { 1326 struct ifnet *ifp = &ec->ec_if; 1327 struct mvgbe_softc *sc = ifp->if_softc; 1328 int change = ifp->if_flags ^ sc->sc_if_flags; 1329 1330 if (change != 0) 1331 sc->sc_if_flags = ifp->if_flags; 1332 1333 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 1334 return ENETRESET; 1335 1336 if ((change & IFF_PROMISC) != 0) 1337 mvgbe_filter_setup(sc); 1338 1339 return 0; 1340 } 1341 1342 /* 1343 * Set media options. 1344 */ 1345 static int 1346 mvgbe_mediachange(struct ifnet *ifp) 1347 { 1348 return ether_mediachange(ifp); 1349 } 1350 1351 /* 1352 * Report current media status. 1353 */ 1354 static void 1355 mvgbe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1356 { 1357 ether_mediastatus(ifp, ifmr); 1358 } 1359 1360 1361 static int 1362 mvgbe_init_rx_ring(struct mvgbe_softc *sc) 1363 { 1364 struct mvgbe_chain_data *cd = &sc->sc_cdata; 1365 struct mvgbe_ring_data *rd = sc->sc_rdata; 1366 int i; 1367 1368 memset(rd->mvgbe_rx_ring, 0, 1369 sizeof(struct mvgbe_rx_desc) * MVGBE_RX_RING_CNT); 1370 1371 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1372 cd->mvgbe_rx_chain[i].mvgbe_desc = 1373 &rd->mvgbe_rx_ring[i]; 1374 if (i == MVGBE_RX_RING_CNT - 1) { 1375 cd->mvgbe_rx_chain[i].mvgbe_next = 1376 &cd->mvgbe_rx_chain[0]; 1377 rd->mvgbe_rx_ring[i].nextdescptr = 1378 MVGBE_RX_RING_ADDR(sc, 0); 1379 } else { 1380 cd->mvgbe_rx_chain[i].mvgbe_next = 1381 &cd->mvgbe_rx_chain[i + 1]; 1382 rd->mvgbe_rx_ring[i].nextdescptr = 1383 MVGBE_RX_RING_ADDR(sc, i + 1); 1384 } 1385 } 1386 1387 for (i = 0; i < MVGBE_RX_RING_CNT; i++) { 1388 if (mvgbe_newbuf(sc, i, NULL, 1389 sc->sc_cdata.mvgbe_rx_jumbo_map) == ENOBUFS) { 1390 aprint_error_ifnet(&sc->sc_ethercom.ec_if, 1391 "failed alloc of %dth mbuf\n", i); 1392 return ENOBUFS; 1393 } 1394 } 1395 sc->sc_cdata.mvgbe_rx_prod = 0; 1396 sc->sc_cdata.mvgbe_rx_cons = 0; 1397 1398 return 0; 1399 } 1400 1401 static int 1402 mvgbe_init_tx_ring(struct mvgbe_softc *sc) 1403 { 1404 struct mvgbe_chain_data *cd = &sc->sc_cdata; 1405 struct mvgbe_ring_data *rd = sc->sc_rdata; 1406 int i; 1407 1408 memset(sc->sc_rdata->mvgbe_tx_ring, 0, 1409 sizeof(struct mvgbe_tx_desc) * MVGBE_TX_RING_CNT); 1410 1411 for (i = 0; i < MVGBE_TX_RING_CNT; i++) { 1412 cd->mvgbe_tx_chain[i].mvgbe_desc = 1413 &rd->mvgbe_tx_ring[i]; 1414 if (i == MVGBE_TX_RING_CNT - 1) { 1415 cd->mvgbe_tx_chain[i].mvgbe_next = 1416 &cd->mvgbe_tx_chain[0]; 1417 rd->mvgbe_tx_ring[i].nextdescptr = 1418 MVGBE_TX_RING_ADDR(sc, 0); 1419 } else { 1420 cd->mvgbe_tx_chain[i].mvgbe_next = 1421 &cd->mvgbe_tx_chain[i + 1]; 1422 rd->mvgbe_tx_ring[i].nextdescptr = 1423 MVGBE_TX_RING_ADDR(sc, i + 1); 1424 } 1425 rd->mvgbe_tx_ring[i].cmdsts = MVGBE_BUFFER_OWNED_BY_HOST; 1426 } 1427 1428 sc->sc_cdata.mvgbe_tx_prod = 0; 1429 sc->sc_cdata.mvgbe_tx_cons = 0; 1430 sc->sc_cdata.mvgbe_tx_cnt = 0; 1431 1432 MVGBE_CDTXSYNC(sc, 0, MVGBE_TX_RING_CNT, 1433 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1434 1435 return 0; 1436 } 1437 1438 static int 1439 mvgbe_newbuf(struct mvgbe_softc *sc, int i, struct mbuf *m, 1440 bus_dmamap_t dmamap) 1441 { 1442 struct mbuf *m_new = NULL; 1443 struct mvgbe_chain *c; 1444 struct mvgbe_rx_desc *r; 1445 int align; 1446 vaddr_t offset; 1447 1448 if (m == NULL) { 1449 void *buf = NULL; 1450 1451 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1452 if (m_new == NULL) { 1453 aprint_error_ifnet(&sc->sc_ethercom.ec_if, 1454 "no memory for rx list -- packet dropped!\n"); 1455 return ENOBUFS; 1456 } 1457 1458 /* Allocate the jumbo buffer */ 1459 buf = mvgbe_jalloc(sc); 1460 if (buf == NULL) { 1461 m_freem(m_new); 1462 DPRINTFN(1, ("%s jumbo allocation failed -- packet " 1463 "dropped!\n", sc->sc_ethercom.ec_if.if_xname)); 1464 return ENOBUFS; 1465 } 1466 1467 /* Attach the buffer to the mbuf */ 1468 m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; 1469 MEXTADD(m_new, buf, MVGBE_JLEN, 0, mvgbe_jfree, sc); 1470 } else { 1471 /* 1472 * We're re-using a previously allocated mbuf; 1473 * be sure to re-init pointers and lengths to 1474 * default values. 1475 */ 1476 m_new = m; 1477 m_new->m_len = m_new->m_pkthdr.len = MVGBE_JLEN; 1478 m_new->m_data = m_new->m_ext.ext_buf; 1479 } 1480 align = (u_long)m_new->m_data & MVGBE_RXBUF_MASK; 1481 if (align != 0) { 1482 DPRINTFN(1,("align = %d\n", align)); 1483 m_adj(m_new, MVGBE_RXBUF_ALIGN - align); 1484 } 1485 1486 c = &sc->sc_cdata.mvgbe_rx_chain[i]; 1487 r = c->mvgbe_desc; 1488 c->mvgbe_mbuf = m_new; 1489 offset = (vaddr_t)m_new->m_data - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf; 1490 r->bufptr = dmamap->dm_segs[0].ds_addr + offset; 1491 r->bufsize = MVGBE_JLEN & ~MVGBE_RXBUF_MASK; 1492 r->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_ENABLE_INTERRUPT; 1493 1494 /* Invalidate RX buffer */ 1495 bus_dmamap_sync(sc->sc_dmat, dmamap, offset, r->bufsize, 1496 BUS_DMASYNC_PREREAD); 1497 1498 MVGBE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1499 1500 return 0; 1501 } 1502 1503 /* 1504 * Memory management for jumbo frames. 1505 */ 1506 1507 static int 1508 mvgbe_alloc_jumbo_mem(struct mvgbe_softc *sc) 1509 { 1510 char *ptr, *kva; 1511 bus_dma_segment_t seg; 1512 int i, rseg, state, error; 1513 struct mvgbe_jpool_entry *entry; 1514 1515 state = error = 0; 1516 1517 /* Grab a big chunk o' storage. */ 1518 if (bus_dmamem_alloc(sc->sc_dmat, MVGBE_JMEM, PAGE_SIZE, 0, 1519 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1520 aprint_error_dev(sc->sc_dev, "can't alloc rx buffers\n"); 1521 return ENOBUFS; 1522 } 1523 1524 state = 1; 1525 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, MVGBE_JMEM, 1526 (void **)&kva, BUS_DMA_NOWAIT)) { 1527 aprint_error_dev(sc->sc_dev, 1528 "can't map dma buffers (%d bytes)\n", MVGBE_JMEM); 1529 error = ENOBUFS; 1530 goto out; 1531 } 1532 1533 state = 2; 1534 if (bus_dmamap_create(sc->sc_dmat, MVGBE_JMEM, 1, MVGBE_JMEM, 0, 1535 BUS_DMA_NOWAIT, &sc->sc_cdata.mvgbe_rx_jumbo_map)) { 1536 aprint_error_dev(sc->sc_dev, "can't create dma map\n"); 1537 error = ENOBUFS; 1538 goto out; 1539 } 1540 1541 state = 3; 1542 if (bus_dmamap_load(sc->sc_dmat, sc->sc_cdata.mvgbe_rx_jumbo_map, 1543 kva, MVGBE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1544 aprint_error_dev(sc->sc_dev, "can't load dma map\n"); 1545 error = ENOBUFS; 1546 goto out; 1547 } 1548 1549 state = 4; 1550 sc->sc_cdata.mvgbe_jumbo_buf = (void *)kva; 1551 DPRINTFN(1,("mvgbe_jumbo_buf = %p\n", sc->sc_cdata.mvgbe_jumbo_buf)); 1552 1553 LIST_INIT(&sc->sc_jfree_listhead); 1554 LIST_INIT(&sc->sc_jinuse_listhead); 1555 1556 /* 1557 * Now divide it up into 9K pieces and save the addresses 1558 * in an array. 1559 */ 1560 ptr = sc->sc_cdata.mvgbe_jumbo_buf; 1561 for (i = 0; i < MVGBE_JSLOTS; i++) { 1562 sc->sc_cdata.mvgbe_jslots[i] = ptr; 1563 ptr += MVGBE_JLEN; 1564 entry = kmem_alloc(sizeof(struct mvgbe_jpool_entry), KM_SLEEP); 1565 if (entry == NULL) { 1566 aprint_error_dev(sc->sc_dev, 1567 "no memory for jumbo buffer queue!\n"); 1568 error = ENOBUFS; 1569 goto out; 1570 } 1571 entry->slot = i; 1572 if (i) 1573 LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, 1574 jpool_entries); 1575 else 1576 LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, 1577 jpool_entries); 1578 } 1579 out: 1580 if (error != 0) { 1581 switch (state) { 1582 case 4: 1583 bus_dmamap_unload(sc->sc_dmat, 1584 sc->sc_cdata.mvgbe_rx_jumbo_map); 1585 case 3: 1586 bus_dmamap_destroy(sc->sc_dmat, 1587 sc->sc_cdata.mvgbe_rx_jumbo_map); 1588 case 2: 1589 bus_dmamem_unmap(sc->sc_dmat, kva, MVGBE_JMEM); 1590 case 1: 1591 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1592 break; 1593 default: 1594 break; 1595 } 1596 } 1597 1598 return error; 1599 } 1600 1601 /* 1602 * Allocate a jumbo buffer. 1603 */ 1604 static void * 1605 mvgbe_jalloc(struct mvgbe_softc *sc) 1606 { 1607 struct mvgbe_jpool_entry *entry; 1608 1609 entry = LIST_FIRST(&sc->sc_jfree_listhead); 1610 1611 if (entry == NULL) 1612 return NULL; 1613 1614 LIST_REMOVE(entry, jpool_entries); 1615 LIST_INSERT_HEAD(&sc->sc_jinuse_listhead, entry, jpool_entries); 1616 return sc->sc_cdata.mvgbe_jslots[entry->slot]; 1617 } 1618 1619 /* 1620 * Release a jumbo buffer. 1621 */ 1622 static void 1623 mvgbe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1624 { 1625 struct mvgbe_jpool_entry *entry; 1626 struct mvgbe_softc *sc; 1627 int i, s; 1628 1629 /* Extract the softc struct pointer. */ 1630 sc = (struct mvgbe_softc *)arg; 1631 1632 if (sc == NULL) 1633 panic("%s: can't find softc pointer!", __func__); 1634 1635 /* calculate the slot this buffer belongs to */ 1636 1637 i = ((vaddr_t)buf - (vaddr_t)sc->sc_cdata.mvgbe_jumbo_buf) / MVGBE_JLEN; 1638 1639 if ((i < 0) || (i >= MVGBE_JSLOTS)) 1640 panic("%s: asked to free buffer that we don't manage!", 1641 __func__); 1642 1643 s = splvm(); 1644 entry = LIST_FIRST(&sc->sc_jinuse_listhead); 1645 if (entry == NULL) 1646 panic("%s: buffer not in use!", __func__); 1647 entry->slot = i; 1648 LIST_REMOVE(entry, jpool_entries); 1649 LIST_INSERT_HEAD(&sc->sc_jfree_listhead, entry, jpool_entries); 1650 1651 if (__predict_true(m != NULL)) 1652 pool_cache_put(mb_cache, m); 1653 splx(s); 1654 } 1655 1656 static int 1657 mvgbe_encap(struct mvgbe_softc *sc, struct mbuf *m_head, 1658 uint32_t *txidx) 1659 { 1660 struct mvgbe_tx_desc *f = NULL; 1661 struct mvgbe_txmap_entry *entry; 1662 bus_dma_segment_t *txseg; 1663 bus_dmamap_t txmap; 1664 uint32_t first, current, last, cmdsts = 0; 1665 int m_csumflags, i; 1666 bool needs_defrag = false; 1667 1668 DPRINTFN(3, ("mvgbe_encap\n")); 1669 1670 entry = SIMPLEQ_FIRST(&sc->sc_txmap_head); 1671 if (entry == NULL) { 1672 DPRINTFN(2, ("mvgbe_encap: no txmap available\n")); 1673 return ENOBUFS; 1674 } 1675 txmap = entry->dmamap; 1676 1677 first = current = last = *txidx; 1678 1679 /* 1680 * Preserve m_pkthdr.csum_flags here since m_head might be 1681 * updated by m_defrag() 1682 */ 1683 m_csumflags = m_head->m_pkthdr.csum_flags; 1684 1685 do_defrag: 1686 if (__predict_false(needs_defrag == true)) { 1687 /* A small unaligned segment was detected. */ 1688 struct mbuf *m_new; 1689 m_new = m_defrag(m_head, M_DONTWAIT); 1690 if (m_new == NULL) 1691 return EFBIG; 1692 m_head = m_new; 1693 } 1694 1695 /* 1696 * Start packing the mbufs in this chain into 1697 * the fragment pointers. Stop when we run out 1698 * of fragments or hit the end of the mbuf chain. 1699 */ 1700 if (bus_dmamap_load_mbuf(sc->sc_dmat, txmap, m_head, BUS_DMA_NOWAIT)) { 1701 DPRINTFN(1, ("mvgbe_encap: dmamap failed\n")); 1702 return ENOBUFS; 1703 } 1704 1705 txseg = txmap->dm_segs; 1706 1707 if (__predict_true(needs_defrag == false)) { 1708 /* 1709 * Detect rarely encountered DMA limitation. 1710 */ 1711 for (i = 0; i < txmap->dm_nsegs; i++) { 1712 if (((txseg[i].ds_addr & 7) != 0) && 1713 (txseg[i].ds_len <= 8) && 1714 (txseg[i].ds_len >= 1) 1715 ) { 1716 txseg = NULL; 1717 bus_dmamap_unload(sc->sc_dmat, txmap); 1718 needs_defrag = true; 1719 goto do_defrag; 1720 } 1721 } 1722 } 1723 1724 /* Sync the DMA map. */ 1725 bus_dmamap_sync(sc->sc_dmat, txmap, 0, txmap->dm_mapsize, 1726 BUS_DMASYNC_PREWRITE); 1727 1728 if (sc->sc_cdata.mvgbe_tx_cnt + txmap->dm_nsegs >= 1729 MVGBE_TX_RING_CNT) { 1730 DPRINTFN(2, ("mvgbe_encap: too few descriptors free\n")); 1731 bus_dmamap_unload(sc->sc_dmat, txmap); 1732 return ENOBUFS; 1733 } 1734 1735 1736 DPRINTFN(2, ("mvgbe_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); 1737 1738 for (i = 0; i < txmap->dm_nsegs; i++) { 1739 f = &sc->sc_rdata->mvgbe_tx_ring[current]; 1740 f->bufptr = txseg[i].ds_addr; 1741 f->bytecnt = txseg[i].ds_len; 1742 if (i != 0) 1743 f->cmdsts = MVGBE_BUFFER_OWNED_BY_DMA; 1744 last = current; 1745 current = MVGBE_TX_RING_NEXT(current); 1746 } 1747 1748 if (m_csumflags & M_CSUM_IPv4) 1749 cmdsts |= MVGBE_TX_GENERATE_IP_CHKSUM; 1750 if (m_csumflags & M_CSUM_TCPv4) 1751 cmdsts |= 1752 MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_TCP; 1753 if (m_csumflags & M_CSUM_UDPv4) 1754 cmdsts |= 1755 MVGBE_TX_GENERATE_L4_CHKSUM | MVGBE_TX_L4_TYPE_UDP; 1756 if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 1757 const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t); 1758 1759 cmdsts |= MVGBE_TX_IP_NO_FRAG | 1760 MVGBE_TX_IP_HEADER_LEN(iphdr_unitlen); /* unit is 4B */ 1761 } 1762 if (txmap->dm_nsegs == 1) 1763 f->cmdsts = cmdsts | 1764 MVGBE_TX_GENERATE_CRC | 1765 MVGBE_TX_ENABLE_INTERRUPT | 1766 MVGBE_TX_ZERO_PADDING | 1767 MVGBE_TX_FIRST_DESC | 1768 MVGBE_TX_LAST_DESC; 1769 else { 1770 f = &sc->sc_rdata->mvgbe_tx_ring[first]; 1771 f->cmdsts = cmdsts | 1772 MVGBE_TX_GENERATE_CRC | 1773 MVGBE_TX_FIRST_DESC; 1774 1775 f = &sc->sc_rdata->mvgbe_tx_ring[last]; 1776 f->cmdsts = 1777 MVGBE_BUFFER_OWNED_BY_DMA | 1778 MVGBE_TX_ENABLE_INTERRUPT | 1779 MVGBE_TX_ZERO_PADDING | 1780 MVGBE_TX_LAST_DESC; 1781 1782 /* Sync descriptors except first */ 1783 MVGBE_CDTXSYNC(sc, 1784 (MVGBE_TX_RING_CNT - 1 == *txidx) ? 0 : (*txidx) + 1, 1785 txmap->dm_nsegs - 1, 1786 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1787 } 1788 1789 sc->sc_cdata.mvgbe_tx_chain[last].mvgbe_mbuf = m_head; 1790 SIMPLEQ_REMOVE_HEAD(&sc->sc_txmap_head, link); 1791 sc->sc_cdata.mvgbe_tx_map[last] = entry; 1792 1793 /* Finally, sync first descriptor */ 1794 sc->sc_rdata->mvgbe_tx_ring[first].cmdsts |= 1795 MVGBE_BUFFER_OWNED_BY_DMA; 1796 MVGBE_CDTXSYNC(sc, *txidx, 1, 1797 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1798 1799 sc->sc_cdata.mvgbe_tx_cnt += i; 1800 *txidx = current; 1801 1802 DPRINTFN(3, ("mvgbe_encap: completed successfully\n")); 1803 1804 return 0; 1805 } 1806 1807 static void 1808 mvgbe_rxeof(struct mvgbe_softc *sc) 1809 { 1810 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1811 struct mvgbe_rx_desc *cur_rx; 1812 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1813 struct mbuf *m; 1814 bus_dmamap_t dmamap; 1815 uint32_t rxstat; 1816 uint16_t bufsize; 1817 int idx, cur, total_len; 1818 1819 idx = sc->sc_cdata.mvgbe_rx_prod; 1820 1821 DPRINTFN(3, ("mvgbe_rxeof %d\n", idx)); 1822 1823 for (;;) { 1824 cur = idx; 1825 1826 /* Sync the descriptor */ 1827 MVGBE_CDRXSYNC(sc, idx, 1828 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1829 1830 cur_rx = &sc->sc_rdata->mvgbe_rx_ring[idx]; 1831 1832 if ((cur_rx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == 1833 MVGBE_BUFFER_OWNED_BY_DMA) { 1834 /* Invalidate the descriptor -- it's not ready yet */ 1835 MVGBE_CDRXSYNC(sc, idx, BUS_DMASYNC_PREREAD); 1836 sc->sc_cdata.mvgbe_rx_prod = idx; 1837 break; 1838 } 1839 #ifdef DIAGNOSTIC 1840 if ((cur_rx->cmdsts & 1841 (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) != 1842 (MVGBE_RX_LAST_DESC | MVGBE_RX_FIRST_DESC)) 1843 panic( 1844 "mvgbe_rxeof: buffer size is smaller than packet"); 1845 #endif 1846 1847 dmamap = sc->sc_cdata.mvgbe_rx_jumbo_map; 1848 1849 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1850 BUS_DMASYNC_POSTREAD); 1851 1852 m = cdata->mvgbe_rx_chain[idx].mvgbe_mbuf; 1853 cdata->mvgbe_rx_chain[idx].mvgbe_mbuf = NULL; 1854 total_len = cur_rx->bytecnt - ETHER_CRC_LEN; 1855 rxstat = cur_rx->cmdsts; 1856 bufsize = cur_rx->bufsize; 1857 1858 cdata->mvgbe_rx_map[idx] = NULL; 1859 1860 idx = MVGBE_RX_RING_NEXT(idx); 1861 1862 if (rxstat & MVGBE_ERROR_SUMMARY) { 1863 #if 0 1864 int err = rxstat & MVGBE_RX_ERROR_CODE_MASK; 1865 1866 if (err == MVGBE_RX_CRC_ERROR) 1867 ifp->if_ierrors++; 1868 if (err == MVGBE_RX_OVERRUN_ERROR) 1869 ifp->if_ierrors++; 1870 if (err == MVGBE_RX_MAX_FRAME_LEN_ERROR) 1871 ifp->if_ierrors++; 1872 if (err == MVGBE_RX_RESOURCE_ERROR) 1873 ifp->if_ierrors++; 1874 #else 1875 ifp->if_ierrors++; 1876 #endif 1877 mvgbe_newbuf(sc, cur, m, dmamap); 1878 continue; 1879 } 1880 1881 if (rxstat & MVGBE_RX_IP_FRAME_TYPE) { 1882 int flgs = 0; 1883 1884 /* Check IPv4 header checksum */ 1885 flgs |= M_CSUM_IPv4; 1886 if (!(rxstat & MVGBE_RX_IP_HEADER_OK)) 1887 flgs |= M_CSUM_IPv4_BAD; 1888 else if ((bufsize & MVGBE_RX_IP_FRAGMENT) == 0) { 1889 /* 1890 * Check TCPv4/UDPv4 checksum for 1891 * non-fragmented packet only. 1892 * 1893 * It seemd that sometimes 1894 * MVGBE_RX_L4_CHECKSUM_OK bit was set to 0 1895 * even if the checksum is correct and the 1896 * packet was not fragmented. So we don't set 1897 * M_CSUM_TCP_UDP_BAD even if csum bit is 0. 1898 */ 1899 1900 if (((rxstat & MVGBE_RX_L4_TYPE_MASK) == 1901 MVGBE_RX_L4_TYPE_TCP) && 1902 ((rxstat & MVGBE_RX_L4_CHECKSUM_OK) != 0)) 1903 flgs |= M_CSUM_TCPv4; 1904 else if (((rxstat & MVGBE_RX_L4_TYPE_MASK) == 1905 MVGBE_RX_L4_TYPE_UDP) && 1906 ((rxstat & MVGBE_RX_L4_CHECKSUM_OK) != 0)) 1907 flgs |= M_CSUM_UDPv4; 1908 } 1909 m->m_pkthdr.csum_flags = flgs; 1910 } 1911 1912 /* 1913 * Try to allocate a new jumbo buffer. If that 1914 * fails, copy the packet to mbufs and put the 1915 * jumbo buffer back in the ring so it can be 1916 * re-used. If allocating mbufs fails, then we 1917 * have to drop the packet. 1918 */ 1919 if (mvgbe_newbuf(sc, cur, NULL, dmamap) == ENOBUFS) { 1920 struct mbuf *m0; 1921 1922 m0 = m_devget(mtod(m, char *), total_len, 0, ifp, NULL); 1923 mvgbe_newbuf(sc, cur, m, dmamap); 1924 if (m0 == NULL) { 1925 aprint_error_ifnet(ifp, 1926 "no receive buffers available --" 1927 " packet dropped!\n"); 1928 ifp->if_ierrors++; 1929 continue; 1930 } 1931 m = m0; 1932 } else { 1933 m->m_pkthdr.rcvif = ifp; 1934 m->m_pkthdr.len = m->m_len = total_len; 1935 } 1936 1937 /* Skip on first 2byte (HW header) */ 1938 m_adj(m, MVGBE_HWHEADER_SIZE); 1939 1940 ifp->if_ipackets++; 1941 1942 bpf_mtap(ifp, m); 1943 1944 /* pass it on. */ 1945 (*ifp->if_input)(ifp, m); 1946 } 1947 } 1948 1949 static void 1950 mvgbe_txeof(struct mvgbe_softc *sc) 1951 { 1952 struct mvgbe_chain_data *cdata = &sc->sc_cdata; 1953 struct mvgbe_tx_desc *cur_tx; 1954 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1955 struct mvgbe_txmap_entry *entry; 1956 int idx; 1957 1958 DPRINTFN(3, ("mvgbe_txeof\n")); 1959 1960 /* 1961 * Go through our tx ring and free mbufs for those 1962 * frames that have been sent. 1963 */ 1964 idx = cdata->mvgbe_tx_cons; 1965 while (idx != cdata->mvgbe_tx_prod) { 1966 MVGBE_CDTXSYNC(sc, idx, 1, 1967 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1968 1969 cur_tx = &sc->sc_rdata->mvgbe_tx_ring[idx]; 1970 #ifdef MVGBE_DEBUG 1971 if (mvgbe_debug >= 3) 1972 mvgbe_dump_txdesc(cur_tx, idx); 1973 #endif 1974 if ((cur_tx->cmdsts & MVGBE_BUFFER_OWNED_MASK) == 1975 MVGBE_BUFFER_OWNED_BY_DMA) { 1976 MVGBE_CDTXSYNC(sc, idx, 1, BUS_DMASYNC_PREREAD); 1977 break; 1978 } 1979 if (cur_tx->cmdsts & MVGBE_TX_LAST_DESC) 1980 ifp->if_opackets++; 1981 if (cur_tx->cmdsts & MVGBE_ERROR_SUMMARY) { 1982 int err = cur_tx->cmdsts & MVGBE_TX_ERROR_CODE_MASK; 1983 1984 if (err == MVGBE_TX_LATE_COLLISION_ERROR) 1985 ifp->if_collisions++; 1986 if (err == MVGBE_TX_UNDERRUN_ERROR) 1987 ifp->if_oerrors++; 1988 if (err == MVGBE_TX_EXCESSIVE_COLLISION_ERRO) 1989 ifp->if_collisions++; 1990 } 1991 if (cdata->mvgbe_tx_chain[idx].mvgbe_mbuf != NULL) { 1992 entry = cdata->mvgbe_tx_map[idx]; 1993 1994 m_freem(cdata->mvgbe_tx_chain[idx].mvgbe_mbuf); 1995 cdata->mvgbe_tx_chain[idx].mvgbe_mbuf = NULL; 1996 1997 bus_dmamap_sync(sc->sc_dmat, entry->dmamap, 0, 1998 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1999 2000 bus_dmamap_unload(sc->sc_dmat, entry->dmamap); 2001 SIMPLEQ_INSERT_TAIL(&sc->sc_txmap_head, entry, link); 2002 cdata->mvgbe_tx_map[idx] = NULL; 2003 } 2004 cdata->mvgbe_tx_cnt--; 2005 idx = MVGBE_TX_RING_NEXT(idx); 2006 } 2007 if (cdata->mvgbe_tx_cnt == 0) 2008 ifp->if_timer = 0; 2009 2010 if (cdata->mvgbe_tx_cnt < MVGBE_TX_RING_CNT - 2) 2011 ifp->if_flags &= ~IFF_OACTIVE; 2012 2013 cdata->mvgbe_tx_cons = idx; 2014 } 2015 2016 static uint8_t 2017 mvgbe_crc8(const uint8_t *data, size_t size) 2018 { 2019 int bit; 2020 uint8_t byte; 2021 uint8_t crc = 0; 2022 const uint8_t poly = 0x07; 2023 2024 while(size--) 2025 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) 2026 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); 2027 2028 return crc; 2029 } 2030 2031 CTASSERT(MVGBE_NDFSMT == MVGBE_NDFOMT); 2032 2033 static void 2034 mvgbe_filter_setup(struct mvgbe_softc *sc) 2035 { 2036 struct ethercom *ec = &sc->sc_ethercom; 2037 struct ifnet *ifp= &sc->sc_ethercom.ec_if; 2038 struct ether_multi *enm; 2039 struct ether_multistep step; 2040 uint32_t dfut[MVGBE_NDFUT], dfsmt[MVGBE_NDFSMT], dfomt[MVGBE_NDFOMT]; 2041 uint32_t pxc; 2042 int i; 2043 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; 2044 2045 memset(dfut, 0, sizeof(dfut)); 2046 memset(dfsmt, 0, sizeof(dfsmt)); 2047 memset(dfomt, 0, sizeof(dfomt)); 2048 2049 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 2050 goto allmulti; 2051 } 2052 2053 ETHER_FIRST_MULTI(step, ec, enm); 2054 while (enm != NULL) { 2055 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2056 /* ranges are complex and somewhat rare */ 2057 goto allmulti; 2058 } 2059 /* chip handles some IPv4 multicast specially */ 2060 if (memcmp(enm->enm_addrlo, special, 5) == 0) { 2061 i = enm->enm_addrlo[5]; 2062 dfsmt[i>>2] |= 2063 MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 2064 } else { 2065 i = mvgbe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); 2066 dfomt[i>>2] |= 2067 MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 2068 } 2069 2070 ETHER_NEXT_MULTI(step, enm); 2071 } 2072 goto set; 2073 2074 allmulti: 2075 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 2076 for (i = 0; i < MVGBE_NDFSMT; i++) { 2077 dfsmt[i] = dfomt[i] = 2078 MVGBE_DF(0, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 2079 MVGBE_DF(1, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 2080 MVGBE_DF(2, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS) | 2081 MVGBE_DF(3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 2082 } 2083 } 2084 2085 set: 2086 pxc = MVGBE_READ(sc, MVGBE_PXC); 2087 pxc &= ~MVGBE_PXC_UPM; 2088 pxc |= MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP; 2089 if (ifp->if_flags & IFF_BROADCAST) { 2090 pxc &= ~(MVGBE_PXC_RB | MVGBE_PXC_RBIP | MVGBE_PXC_RBARP); 2091 } 2092 if (ifp->if_flags & IFF_PROMISC) { 2093 pxc |= MVGBE_PXC_UPM; 2094 } 2095 MVGBE_WRITE(sc, MVGBE_PXC, pxc); 2096 2097 /* Set Destination Address Filter Unicast Table */ 2098 i = sc->sc_enaddr[5] & 0xf; /* last nibble */ 2099 dfut[i>>2] = MVGBE_DF(i&3, MVGBE_DF_QUEUE(0) | MVGBE_DF_PASS); 2100 MVGBE_WRITE_FILTER(sc, MVGBE_DFUT, dfut, MVGBE_NDFUT); 2101 2102 /* Set Destination Address Filter Multicast Tables */ 2103 MVGBE_WRITE_FILTER(sc, MVGBE_DFSMT, dfsmt, MVGBE_NDFSMT); 2104 MVGBE_WRITE_FILTER(sc, MVGBE_DFOMT, dfomt, MVGBE_NDFOMT); 2105 } 2106 2107 #ifdef MVGBE_DEBUG 2108 static void 2109 mvgbe_dump_txdesc(struct mvgbe_tx_desc *desc, int idx) 2110 { 2111 #define DESC_PRINT(X) \ 2112 if (X) \ 2113 printf("txdesc[%d]." #X "=%#x\n", idx, X); 2114 2115 #if BYTE_ORDER == BIG_ENDIAN 2116 DESC_PRINT(desc->bytecnt); 2117 DESC_PRINT(desc->l4ichk); 2118 DESC_PRINT(desc->cmdsts); 2119 DESC_PRINT(desc->nextdescptr); 2120 DESC_PRINT(desc->bufptr); 2121 #else /* LITTLE_ENDIAN */ 2122 DESC_PRINT(desc->cmdsts); 2123 DESC_PRINT(desc->l4ichk); 2124 DESC_PRINT(desc->bytecnt); 2125 DESC_PRINT(desc->bufptr); 2126 DESC_PRINT(desc->nextdescptr); 2127 #endif 2128 #undef DESC_PRINT 2129 } 2130 #endif 2131 2132 SYSCTL_SETUP(sysctl_mvgbe, "sysctl mvgbe subtree setup") 2133 { 2134 int rc; 2135 const struct sysctlnode *node; 2136 2137 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 2138 0, CTLTYPE_NODE, "hw", NULL, 2139 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 2140 goto err; 2141 } 2142 2143 if ((rc = sysctl_createv(clog, 0, NULL, &node, 2144 0, CTLTYPE_NODE, "mvgbe", 2145 SYSCTL_DESCR("mvgbe interface controls"), 2146 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 2147 goto err; 2148 } 2149 2150 mvgbe_root_num = node->sysctl_num; 2151 return; 2152 2153 err: 2154 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc); 2155 } 2156 2157 static void 2158 sysctl_mvgbe_init(struct mvgbe_softc *sc) 2159 { 2160 const struct sysctlnode *node; 2161 int mvgbe_nodenum; 2162 2163 if (sysctl_createv(&sc->mvgbe_clog, 0, NULL, &node, 2164 0, CTLTYPE_NODE, device_xname(sc->sc_dev), 2165 SYSCTL_DESCR("mvgbe per-controller controls"), 2166 NULL, 0, NULL, 0, CTL_HW, mvgbe_root_num, CTL_CREATE, 2167 CTL_EOL) != 0) { 2168 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 2169 return; 2170 } 2171 mvgbe_nodenum = node->sysctl_num; 2172 2173 /* interrupt moderation sysctls */ 2174 if (sysctl_createv(&sc->mvgbe_clog, 0, NULL, &node, 2175 CTLFLAG_READWRITE, CTLTYPE_INT, "ipginttx", 2176 SYSCTL_DESCR("mvgbe TX interrupt moderation timer"), 2177 mvgbe_sysctl_ipginttx, 0, (void *)sc, 2178 0, CTL_HW, mvgbe_root_num, mvgbe_nodenum, CTL_CREATE, 2179 CTL_EOL) != 0) { 2180 aprint_normal_dev(sc->sc_dev, 2181 "couldn't create ipginttx sysctl node\n"); 2182 } 2183 if (sysctl_createv(&sc->mvgbe_clog, 0, NULL, &node, 2184 CTLFLAG_READWRITE, CTLTYPE_INT, "ipgintrx", 2185 SYSCTL_DESCR("mvgbe RX interrupt moderation timer"), 2186 mvgbe_sysctl_ipgintrx, 0, (void *)sc, 2187 0, CTL_HW, mvgbe_root_num, mvgbe_nodenum, CTL_CREATE, 2188 CTL_EOL) != 0) { 2189 aprint_normal_dev(sc->sc_dev, 2190 "couldn't create ipginttx sysctl node\n"); 2191 } 2192 } 2193 2194 static int 2195 mvgbe_sysctl_ipginttx(SYSCTLFN_ARGS) 2196 { 2197 int error; 2198 unsigned int t; 2199 struct sysctlnode node; 2200 struct mvgbec_softc *csc; 2201 struct mvgbe_softc *sc; 2202 2203 node = *rnode; 2204 sc = node.sysctl_data; 2205 csc = device_private(device_parent(sc->sc_dev)); 2206 t = sc->sc_ipginttx; 2207 node.sysctl_data = &t; 2208 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2209 if (error || newp == NULL) 2210 return error; 2211 2212 if (mvgbe_ipginttx(csc, sc, t) < 0) 2213 return EINVAL; 2214 /* 2215 * update the softc with sysctl-changed value, and mark 2216 * for hardware update 2217 */ 2218 sc->sc_ipginttx = t; 2219 2220 return 0; 2221 } 2222 2223 static int 2224 mvgbe_sysctl_ipgintrx(SYSCTLFN_ARGS) 2225 { 2226 int error; 2227 unsigned int t; 2228 struct sysctlnode node; 2229 struct mvgbec_softc *csc; 2230 struct mvgbe_softc *sc; 2231 2232 node = *rnode; 2233 sc = node.sysctl_data; 2234 csc = device_private(device_parent(sc->sc_dev)); 2235 t = sc->sc_ipgintrx; 2236 node.sysctl_data = &t; 2237 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2238 if (error || newp == NULL) 2239 return error; 2240 2241 if (mvgbe_ipgintrx(csc, sc, t) < 0) 2242 return EINVAL; 2243 /* 2244 * update the softc with sysctl-changed value, and mark 2245 * for hardware update 2246 */ 2247 sc->sc_ipgintrx = t; 2248 2249 return 0; 2250 } 2251