1 /* $NetBSD: if_admsw.c,v 1.5 2008/12/16 22:35:24 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.5 2008/12/16 22:35:24 christos Exp $"); 80 81 #include "bpfilter.h" 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/callout.h> 86 #include <sys/mbuf.h> 87 #include <sys/malloc.h> 88 #include <sys/kernel.h> 89 #include <sys/socket.h> 90 #include <sys/ioctl.h> 91 #include <sys/errno.h> 92 #include <sys/device.h> 93 #include <sys/queue.h> 94 95 #include <prop/proplib.h> 96 97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 98 99 #include <net/if.h> 100 #include <net/if_dl.h> 101 #include <net/if_media.h> 102 #include <net/if_ether.h> 103 104 #if NBPFILTER > 0 105 #include <net/bpf.h> 106 #endif 107 108 #include <machine/bus.h> 109 #include <machine/intr.h> 110 #include <machine/endian.h> 111 112 #include <dev/mii/mii.h> 113 #include <dev/mii/miivar.h> 114 115 #include <sys/gpio.h> 116 #include <dev/gpio/gpiovar.h> 117 118 #include <mips/adm5120/include/adm5120reg.h> 119 #include <mips/adm5120/include/adm5120var.h> 120 #include <mips/adm5120/include/adm5120_obiovar.h> 121 #include <mips/adm5120/dev/if_admswreg.h> 122 #include <mips/adm5120/dev/if_admswvar.h> 123 124 static uint8_t vlan_matrix[SW_DEVS] = { 125 (1 << 6) | (1 << 0), /* CPU + port0 */ 126 (1 << 6) | (1 << 1), /* CPU + port1 */ 127 (1 << 6) | (1 << 2), /* CPU + port2 */ 128 (1 << 6) | (1 << 3), /* CPU + port3 */ 129 (1 << 6) | (1 << 4), /* CPU + port4 */ 130 (1 << 6) | (1 << 5), /* CPU + port5 */ 131 }; 132 133 #ifdef ADMSW_EVENT_COUNTERS 134 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 135 #else 136 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 137 #endif 138 139 static void admsw_start(struct ifnet *); 140 static void admsw_watchdog(struct ifnet *); 141 static int admsw_ioctl(struct ifnet *, u_long, void *); 142 static int admsw_init(struct ifnet *); 143 static void admsw_stop(struct ifnet *, int); 144 145 static void admsw_shutdown(void *); 146 147 static void admsw_reset(struct admsw_softc *); 148 static void admsw_set_filter(struct admsw_softc *); 149 150 static int admsw_intr(void *); 151 static void admsw_txintr(struct admsw_softc *, int); 152 static void admsw_rxintr(struct admsw_softc *, int); 153 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 154 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 155 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 156 157 static int admsw_mediachange(struct ifnet *); 158 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 159 160 static int admsw_match(struct device *, struct cfdata *, void *); 161 static void admsw_attach(struct device *, struct device *, void *); 162 163 CFATTACH_DECL(admsw, sizeof(struct admsw_softc), 164 admsw_match, admsw_attach, NULL, NULL); 165 166 static int 167 admsw_match(struct device *parent, struct cfdata *cf, void *aux) 168 { 169 struct obio_attach_args *aa = aux; 170 171 return strcmp(aa->oba_name, cf->cf_name) == 0; 172 } 173 174 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 175 #define REG_WRITE(o,v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 176 177 178 static void 179 admsw_init_bufs(struct admsw_softc *sc) 180 { 181 int i; 182 struct admsw_desc *desc; 183 184 for (i = 0; i < ADMSW_NTXHDESC; i++) { 185 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 186 m_freem(sc->sc_txhsoft[i].ds_mbuf); 187 sc->sc_txhsoft[i].ds_mbuf = NULL; 188 } 189 desc = &sc->sc_txhdescs[i]; 190 desc->data = 0; 191 desc->cntl = 0; 192 desc->len = MAC_BUFLEN; 193 desc->status = 0; 194 ADMSW_CDTXHSYNC(sc, i, 195 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 196 } 197 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 198 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 199 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 200 201 for (i = 0; i < ADMSW_NRXHDESC; i++) { 202 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 203 if (admsw_add_rxhbuf(sc, i) != 0) 204 panic("admsw_init_bufs\n"); 205 } else 206 ADMSW_INIT_RXHDESC(sc, i); 207 } 208 209 for (i = 0; i < ADMSW_NTXLDESC; i++) { 210 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 211 m_freem(sc->sc_txlsoft[i].ds_mbuf); 212 sc->sc_txlsoft[i].ds_mbuf = NULL; 213 } 214 desc = &sc->sc_txldescs[i]; 215 desc->data = 0; 216 desc->cntl = 0; 217 desc->len = MAC_BUFLEN; 218 desc->status = 0; 219 ADMSW_CDTXLSYNC(sc, i, 220 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 221 } 222 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 223 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 224 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 225 226 for (i = 0; i < ADMSW_NRXLDESC; i++) { 227 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 228 if (admsw_add_rxlbuf(sc, i) != 0) 229 panic("admsw_init_bufs\n"); 230 } else 231 ADMSW_INIT_RXLDESC(sc, i); 232 } 233 234 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 235 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 236 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 237 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 238 239 sc->sc_txfree = ADMSW_NTXLDESC; 240 sc->sc_txnext = 0; 241 sc->sc_txdirty = 0; 242 sc->sc_rxptr = 0; 243 } 244 245 static void 246 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 247 { 248 uint32_t i; 249 250 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24); 251 REG_WRITE(VLAN_G1_REG, i); 252 i = matrix[4] + (matrix[5] << 8); 253 REG_WRITE(VLAN_G2_REG, i); 254 } 255 256 static void 257 admsw_reset(struct admsw_softc *sc) 258 { 259 uint32_t wdog1; 260 int i; 261 262 REG_WRITE(PORT_CONF0_REG, 263 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 264 REG_WRITE(CPUP_CONF_REG, 265 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 266 267 /* Wait for DMA to complete. Overkill. In 3ms, we can 268 * send at least two entire 1500-byte packets at 10 Mb/s. 269 */ 270 DELAY(3000); 271 272 /* The datasheet recommends that we move all PHYs to reset 273 * state prior to software reset. 274 */ 275 REG_WRITE(PHY_CNTL2_REG, 276 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 277 278 /* Reset the switch. */ 279 REG_WRITE(ADMSW_SW_RES, 0x1); 280 281 DELAY(100 * 1000); 282 283 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 284 285 /* begin old code */ 286 REG_WRITE(CPUP_CONF_REG, 287 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 288 CPUP_CONF_DMCP_MASK); 289 290 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 291 292 REG_WRITE(PHY_CNTL2_REG, 293 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK | 294 PHY_CNTL2_AMDIX_MASK); 295 296 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 297 298 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 299 REG_WRITE(ADMSW_INT_ST, INT_MASK); 300 301 /* 302 * While in DDB, we stop servicing interrupts, RX ring 303 * fills up and when free block counter falls behind FC 304 * threshold, the switch starts to emit 802.3x PAUSE 305 * frames. This can upset peer switches. 306 * 307 * Stop this from happening by disabling FC and D2 308 * thresholds. 309 */ 310 REG_WRITE(FC_TH_REG, 311 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 312 313 admsw_setvlan(sc, vlan_matrix); 314 315 for (i = 0; i < SW_DEVS; i++) { 316 REG_WRITE(MAC_WT1_REG, 317 sc->sc_enaddr[2] | 318 (sc->sc_enaddr[3]<<8) | 319 (sc->sc_enaddr[4]<<16) | 320 ((sc->sc_enaddr[5]+i)<<24)); 321 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 322 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 323 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 324 325 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 326 } 327 wdog1 = REG_READ(ADM5120_WDOG1); 328 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 329 } 330 331 static void 332 admsw_attach(struct device *parent, struct device *self, void *aux) 333 { 334 uint8_t enaddr[ETHER_ADDR_LEN]; 335 struct admsw_softc *sc = (void *) self; 336 struct obio_attach_args *aa = aux; 337 struct ifnet *ifp; 338 bus_dma_segment_t seg; 339 int error, i, rseg; 340 prop_data_t pd; 341 342 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 343 344 sc->sc_dmat = aa->oba_dt; 345 sc->sc_st = aa->oba_st; 346 347 pd = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr"); 348 349 if (pd == NULL) { 350 enaddr[0] = 0x02; 351 enaddr[1] = 0xaa; 352 enaddr[2] = 0xbb; 353 enaddr[3] = 0xcc; 354 enaddr[4] = 0xdd; 355 enaddr[5] = 0xee; 356 } else 357 memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); 358 359 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 360 361 printf("%s: base Ethernet address %s\n", sc->sc_dev.dv_xname, 362 ether_sprintf(enaddr)); 363 364 /* Map the device. */ 365 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 366 printf("%s: unable to map device\n", device_xname(&sc->sc_dev)); 367 return; 368 } 369 370 /* Hook up the interrupt handler. */ 371 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 372 373 if (sc->sc_ih == NULL) { 374 printf("%s: unable to register interrupt handler\n", 375 sc->sc_dev.dv_xname); 376 return; 377 } 378 379 /* 380 * Allocate the control data structures, and create and load the 381 * DMA map for it. 382 */ 383 if ((error = bus_dmamem_alloc(sc->sc_dmat, 384 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 385 0)) != 0) { 386 printf("%s: unable to allocate control data, error = %d\n", 387 sc->sc_dev.dv_xname, error); 388 return; 389 } 390 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 391 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 392 0)) != 0) { 393 printf("%s: unable to map control data, error = %d\n", 394 sc->sc_dev.dv_xname, error); 395 return; 396 } 397 if ((error = bus_dmamap_create(sc->sc_dmat, 398 sizeof(struct admsw_control_data), 1, 399 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 400 printf("%s: unable to create control data DMA map, " 401 "error = %d\n", sc->sc_dev.dv_xname, error); 402 return; 403 } 404 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 405 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 406 0)) != 0) { 407 printf("%s: unable to load control data DMA map, error = %d\n", 408 sc->sc_dev.dv_xname, error); 409 return; 410 } 411 412 /* 413 * Create the transmit buffer DMA maps. 414 */ 415 for (i = 0; i < ADMSW_NTXHDESC; i++) { 416 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 417 2, MCLBYTES, 0, 0, 418 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 419 printf("%s: unable to create txh DMA map %d, " 420 "error = %d\n", sc->sc_dev.dv_xname, i, error); 421 return; 422 } 423 sc->sc_txhsoft[i].ds_mbuf = NULL; 424 } 425 for (i = 0; i < ADMSW_NTXLDESC; i++) { 426 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 427 2, MCLBYTES, 0, 0, 428 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 429 printf("%s: unable to create txl DMA map %d, " 430 "error = %d\n", sc->sc_dev.dv_xname, i, error); 431 return; 432 } 433 sc->sc_txlsoft[i].ds_mbuf = NULL; 434 } 435 436 /* 437 * Create the receive buffer DMA maps. 438 */ 439 for (i = 0; i < ADMSW_NRXHDESC; i++) { 440 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 441 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 442 printf("%s: unable to create rxh DMA map %d, " 443 "error = %d\n", sc->sc_dev.dv_xname, i, error); 444 return; 445 } 446 sc->sc_rxhsoft[i].ds_mbuf = NULL; 447 } 448 for (i = 0; i < ADMSW_NRXLDESC; i++) { 449 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 450 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 451 printf("%s: unable to create rxl DMA map %d, " 452 "error = %d\n", sc->sc_dev.dv_xname, i, error); 453 return; 454 } 455 sc->sc_rxlsoft[i].ds_mbuf = NULL; 456 } 457 458 admsw_init_bufs(sc); 459 460 admsw_reset(sc); 461 462 for (i = 0; i < SW_DEVS; i++) { 463 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 464 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 465 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 466 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 467 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 468 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 469 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 470 471 ifp = &sc->sc_ethercom[i].ec_if; 472 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 473 ifp->if_xname[5] += i; 474 ifp->if_softc = sc; 475 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 476 ifp->if_ioctl = admsw_ioctl; 477 ifp->if_start = admsw_start; 478 ifp->if_watchdog = admsw_watchdog; 479 ifp->if_init = admsw_init; 480 ifp->if_stop = admsw_stop; 481 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 482 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN)); 483 IFQ_SET_READY(&ifp->if_snd); 484 485 /* Attach the interface. */ 486 if_attach(ifp); 487 ether_ifattach(ifp, enaddr); 488 enaddr[5]++; 489 } 490 491 #ifdef ADMSW_EVENT_COUNTERS 492 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 493 NULL, sc->sc_dev.dv_xname, "txstall"); 494 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 495 NULL, sc->sc_dev.dv_xname, "rxstall"); 496 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 497 NULL, sc->sc_dev.dv_xname, "txintr"); 498 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 499 NULL, sc->sc_dev.dv_xname, "rxintr"); 500 #if 1 501 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 502 NULL, sc->sc_dev.dv_xname, "rxsync"); 503 #endif 504 #endif 505 506 admwdog_attach(sc); 507 508 /* Make sure the interface is shutdown during reboot. */ 509 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 510 if (sc->sc_sdhook == NULL) 511 printf("%s: WARNING: unable to establish shutdown hook\n", 512 sc->sc_dev.dv_xname); 513 514 /* leave interrupts and cpu port disabled */ 515 return; 516 } 517 518 519 /* 520 * admsw_shutdown: 521 * 522 * Make sure the interface is stopped at reboot time. 523 */ 524 static void 525 admsw_shutdown(void *arg) 526 { 527 struct admsw_softc *sc = arg; 528 int i; 529 530 for (i = 0; i < SW_DEVS; i++) 531 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 532 } 533 534 /* 535 * admsw_start: [ifnet interface function] 536 * 537 * Start packet transmission on the interface. 538 */ 539 static void 540 admsw_start(struct ifnet *ifp) 541 { 542 struct admsw_softc *sc = ifp->if_softc; 543 struct mbuf *m0, *m; 544 struct admsw_descsoft *ds; 545 struct admsw_desc *desc; 546 bus_dmamap_t dmamap; 547 struct ether_header *eh; 548 int error, nexttx, len, i; 549 static int vlan = 0; 550 551 /* 552 * Loop through the send queues, setting up transmit descriptors 553 * unitl we drain the queues, or use up all available transmit 554 * descriptors. 555 */ 556 for (;;) { 557 vlan++; 558 if (vlan == SW_DEVS) 559 vlan = 0; 560 i = vlan; 561 for (;;) { 562 ifp = &sc->sc_ethercom[i].ec_if; 563 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) == 564 IFF_RUNNING) { 565 /* Grab a packet off the queue. */ 566 IFQ_POLL(&ifp->if_snd, m0); 567 if (m0 != NULL) 568 break; 569 } 570 i++; 571 if (i == SW_DEVS) 572 i = 0; 573 if (i == vlan) 574 return; 575 } 576 vlan = i; 577 m = NULL; 578 579 /* Get a spare descriptor. */ 580 if (sc->sc_txfree == 0) { 581 /* No more slots left; notify upper layer. */ 582 ifp->if_flags |= IFF_OACTIVE; 583 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 584 break; 585 } 586 nexttx = sc->sc_txnext; 587 desc = &sc->sc_txldescs[nexttx]; 588 ds = &sc->sc_txlsoft[nexttx]; 589 dmamap = ds->ds_dmamap; 590 591 /* 592 * Load the DMA map. If this fails, the packet either 593 * didn't fit in the alloted number of segments, or we 594 * were short on resources. In this case, we'll copy 595 * and try again. 596 */ 597 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 598 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 599 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 600 MGETHDR(m, M_DONTWAIT, MT_DATA); 601 if (m == NULL) { 602 printf("%s: unable to allocate Tx mbuf\n", 603 sc->sc_dev.dv_xname); 604 break; 605 } 606 if (m0->m_pkthdr.len > MHLEN) { 607 MCLGET(m, M_DONTWAIT); 608 if ((m->m_flags & M_EXT) == 0) { 609 printf("%s: unable to allocate Tx " 610 "cluster\n", sc->sc_dev.dv_xname); 611 m_freem(m); 612 break; 613 } 614 } 615 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 616 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 617 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 618 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 619 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 620 panic("admsw_start: M_TRAILINGSPACE\n"); 621 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 622 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 623 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 624 } 625 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 626 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 627 if (error) { 628 printf("%s: unable to load Tx buffer, " 629 "error = %d\n", sc->sc_dev.dv_xname, error); 630 break; 631 } 632 } 633 634 IFQ_DEQUEUE(&ifp->if_snd, m0); 635 if (m != NULL) { 636 m_freem(m0); 637 m0 = m; 638 } 639 640 /* 641 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 642 */ 643 644 /* Sync the DMA map. */ 645 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 646 BUS_DMASYNC_PREWRITE); 647 648 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 649 panic("admsw_start: dm_nsegs == %d\n", dmamap->dm_nsegs); 650 desc->data = dmamap->dm_segs[0].ds_addr; 651 desc->len = len = dmamap->dm_segs[0].ds_len; 652 if (dmamap->dm_nsegs > 1) { 653 len += dmamap->dm_segs[1].ds_len; 654 desc->cntl = dmamap->dm_segs[1].ds_addr | ADM5120_DMA_BUF2ENABLE; 655 } else 656 desc->cntl = 0; 657 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 658 eh = mtod(m0, struct ether_header *); 659 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 660 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 661 desc->status |= ADM5120_DMA_CSUM; 662 if (nexttx == ADMSW_NTXLDESC - 1) 663 desc->data |= ADM5120_DMA_RINGEND; 664 desc->data |= ADM5120_DMA_OWN; 665 666 /* Sync the descriptor. */ 667 ADMSW_CDTXLSYNC(sc, nexttx, 668 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 669 670 REG_WRITE(SEND_TRIG_REG, 1); 671 /* printf("send slot %d\n",nexttx); */ 672 673 /* 674 * Store a pointer to the packet so we can free it later. 675 */ 676 ds->ds_mbuf = m0; 677 678 /* Advance the Tx pointer. */ 679 sc->sc_txfree--; 680 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 681 682 #if NBPFILTER > 0 683 /* Pass the packet to any BPF listeners. */ 684 if (ifp->if_bpf) 685 bpf_mtap(ifp->if_bpf, m0); 686 #endif /* NBPFILTER */ 687 688 /* Set a watchdog timer in case the chip flakes out. */ 689 sc->sc_ethercom[0].ec_if.if_timer = 5; 690 } 691 } 692 693 /* 694 * admsw_watchdog: [ifnet interface function] 695 * 696 * Watchdog timer handler. 697 */ 698 static void 699 admsw_watchdog(struct ifnet *ifp) 700 { 701 struct admsw_softc *sc = ifp->if_softc; 702 int vlan; 703 704 #if 1 705 /* Check if an interrupt was lost. */ 706 if (sc->sc_txfree == ADMSW_NTXLDESC) { 707 printf("%s: watchdog false alarm\n", sc->sc_dev.dv_xname); 708 return; 709 } 710 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 711 printf("%s: watchdog timer is %d!\n", sc->sc_dev.dv_xname, sc->sc_ethercom[0].ec_if.if_timer); 712 admsw_txintr(sc, 0); 713 if (sc->sc_txfree == ADMSW_NTXLDESC) { 714 printf("%s: tx IRQ lost (queue empty)\n", sc->sc_dev.dv_xname); 715 return; 716 } 717 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 718 printf("%s: tx IRQ lost (timer recharged)\n", sc->sc_dev.dv_xname); 719 return; 720 } 721 #endif 722 723 printf("%s: device timeout, txfree = %d\n", sc->sc_dev.dv_xname, sc->sc_txfree); 724 for (vlan = 0; vlan < SW_DEVS; vlan++) 725 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 726 for (vlan = 0; vlan < SW_DEVS; vlan++) 727 (void) admsw_init(&sc->sc_ethercom[vlan].ec_if); 728 729 /* Try to get more packets going. */ 730 admsw_start(ifp); 731 } 732 733 /* 734 * admsw_ioctl: [ifnet interface function] 735 * 736 * Handle control requests from the operator. 737 */ 738 static int 739 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 740 { 741 struct admsw_softc *sc = ifp->if_softc; 742 struct ifdrv *ifd; 743 int s, error, port; 744 745 s = splnet(); 746 747 switch (cmd) { 748 case SIOCSIFCAP: 749 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) 750 error = 0; 751 break; 752 case SIOCSIFMEDIA: 753 case SIOCGIFMEDIA: 754 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 755 if (port >= SW_DEVS) 756 error = EOPNOTSUPP; 757 else 758 error = ifmedia_ioctl(ifp, (struct ifreq *)data, 759 &sc->sc_ifmedia[port], cmd); 760 break; 761 762 case SIOCGDRVSPEC: 763 case SIOCSDRVSPEC: 764 ifd = (struct ifdrv *) data; 765 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 766 error = EINVAL; 767 break; 768 } 769 if (cmd == SIOCGDRVSPEC) { 770 error = copyout(vlan_matrix, ifd->ifd_data, 771 sizeof(vlan_matrix)); 772 } else { 773 error = copyin(ifd->ifd_data, vlan_matrix, 774 sizeof(vlan_matrix)); 775 admsw_setvlan(sc, vlan_matrix); 776 } 777 break; 778 779 default: 780 error = ether_ioctl(ifp, cmd, data); 781 if (error == ENETRESET) { 782 /* 783 * Multicast list has changed; set the hardware filter 784 * accordingly. 785 */ 786 admsw_set_filter(sc); 787 error = 0; 788 } 789 break; 790 } 791 792 /* Try to get more packets going. */ 793 admsw_start(ifp); 794 795 splx(s); 796 return (error); 797 } 798 799 800 /* 801 * admsw_intr: 802 * 803 * Interrupt service routine. 804 */ 805 static int 806 admsw_intr(void *arg) 807 { 808 struct admsw_softc *sc = arg; 809 uint32_t pending; 810 char buf[64]; 811 812 pending = REG_READ(ADMSW_INT_ST); 813 814 if ((pending & ~(ADMSW_INTR_RHD|ADMSW_INTR_RLD|ADMSW_INTR_SHD|ADMSW_INTR_SLD|ADMSW_INTR_W1TE|ADMSW_INTR_W0TE)) != 0) { 815 snprintb(buf, sizeof(buf), ADMSW_INT_FMT, pending); 816 printf("%s: pending=%s\n", __func__, buf); 817 } 818 REG_WRITE(ADMSW_INT_ST, pending); 819 820 if (sc->ndevs == 0) 821 return (0); 822 823 if ((pending & ADMSW_INTR_RHD) != 0) 824 admsw_rxintr(sc, 1); 825 826 if ((pending & ADMSW_INTR_RLD) != 0) 827 admsw_rxintr(sc, 0); 828 829 if ((pending & ADMSW_INTR_SHD) != 0) 830 admsw_txintr(sc, 1); 831 832 if ((pending & ADMSW_INTR_SLD) != 0) 833 admsw_txintr(sc, 0); 834 835 return (1); 836 } 837 838 /* 839 * admsw_txintr: 840 * 841 * Helper; handle transmit interrupts. 842 */ 843 static void 844 admsw_txintr(struct admsw_softc *sc, int prio) 845 { 846 struct ifnet *ifp; 847 struct admsw_desc *desc; 848 struct admsw_descsoft *ds; 849 int i, vlan; 850 int gotone = 0; 851 852 /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 853 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 854 i = ADMSW_NEXTTXL(i)) { 855 856 ADMSW_CDTXLSYNC(sc, i, 857 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 858 859 desc = &sc->sc_txldescs[i]; 860 ds = &sc->sc_txlsoft[i]; 861 if (desc->data & ADM5120_DMA_OWN) { 862 ADMSW_CDTXLSYNC(sc, i, 863 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 864 break; 865 } 866 867 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 868 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 869 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 870 m_freem(ds->ds_mbuf); 871 ds->ds_mbuf = NULL; 872 873 vlan = ffs(desc->status & 0x3f) - 1; 874 if (vlan < 0 || vlan >= SW_DEVS) 875 panic("admsw_txintr: bad vlan\n"); 876 ifp = &sc->sc_ethercom[vlan].ec_if; 877 gotone = 1; 878 /* printf("clear tx slot %d\n",i); */ 879 880 ifp->if_opackets++; 881 882 sc->sc_txfree++; 883 } 884 885 if (gotone) { 886 sc->sc_txdirty = i; 887 #ifdef ADMSW_EVENT_COUNTERS 888 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 889 #endif 890 for (vlan = 0; vlan < SW_DEVS; vlan++) 891 sc->sc_ethercom[vlan].ec_if.if_flags &= ~IFF_OACTIVE; 892 893 ifp = &sc->sc_ethercom[0].ec_if; 894 895 /* Try to queue more packets. */ 896 admsw_start(ifp); 897 898 /* 899 * If there are no more pending transmissions, 900 * cancel the watchdog timer. 901 */ 902 if (sc->sc_txfree == ADMSW_NTXLDESC) 903 ifp->if_timer = 0; 904 905 } 906 907 /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 908 } 909 910 /* 911 * admsw_rxintr: 912 * 913 * Helper; handle receive interrupts. 914 */ 915 static void 916 admsw_rxintr(struct admsw_softc *sc, int high) 917 { 918 struct ifnet *ifp; 919 struct admsw_descsoft *ds; 920 struct mbuf *m; 921 uint32_t stat; 922 int i, len, port, vlan; 923 924 /* printf("rxintr\n"); */ 925 if (high) 926 panic("admsw_rxintr: high priority packet\n"); 927 928 #ifdef ADMSW_EVENT_COUNTERS 929 int pkts = 0; 930 #endif 931 932 #if 1 933 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 934 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 935 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 936 else { 937 i = sc->sc_rxptr; 938 do { 939 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 940 i = ADMSW_NEXTRXL(i); 941 /* the ring is empty, just return. */ 942 if (i == sc->sc_rxptr) 943 return; 944 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 945 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 946 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 947 948 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 949 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 950 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 951 else { 952 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 953 /* We've fallen behind the chip: catch it. */ 954 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 955 sc->sc_dev.dv_xname, REG_READ(RECV_LBADDR_REG), 956 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 957 sc->sc_rxptr = i; 958 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 959 } 960 } 961 #endif 962 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 963 ds = &sc->sc_rxlsoft[i]; 964 965 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 966 967 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 968 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 969 break; 970 } 971 972 /* printf("process slot %d\n",i); */ 973 974 #ifdef ADMSW_EVENT_COUNTERS 975 pkts++; 976 #endif 977 978 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 979 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 980 981 stat = sc->sc_rxldescs[i].status; 982 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 983 len -= ETHER_CRC_LEN; 984 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 985 for (vlan = 0; vlan < SW_DEVS; vlan++) 986 if ((1 << port) & vlan_matrix[vlan]) 987 break; 988 if (vlan == SW_DEVS) 989 vlan = 0; 990 ifp = &sc->sc_ethercom[vlan].ec_if; 991 992 m = ds->ds_mbuf; 993 if (admsw_add_rxlbuf(sc, i) != 0) { 994 ifp->if_ierrors++; 995 ADMSW_INIT_RXLDESC(sc, i); 996 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 997 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 998 continue; 999 } 1000 1001 m->m_pkthdr.rcvif = ifp; 1002 m->m_pkthdr.len = m->m_len = len; 1003 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 1004 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1005 if (stat & ADM5120_DMA_CSUMFAIL) 1006 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1007 } 1008 #if NBPFILTER > 0 1009 /* Pass this up to any BPF listeners. */ 1010 if (ifp->if_bpf) 1011 bpf_mtap(ifp->if_bpf, m); 1012 #endif /* NBPFILTER > 0 */ 1013 1014 /* Pass it on. */ 1015 (*ifp->if_input)(ifp, m); 1016 ifp->if_ipackets++; 1017 } 1018 #ifdef ADMSW_EVENT_COUNTERS 1019 if (pkts) 1020 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1021 1022 if (pkts == ADMSW_NRXLDESC) 1023 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1024 #endif 1025 1026 /* Update the receive pointer. */ 1027 sc->sc_rxptr = i; 1028 } 1029 1030 /* 1031 * admsw_init: [ifnet interface function] 1032 * 1033 * Initialize the interface. Must be called at splnet(). 1034 */ 1035 static int 1036 admsw_init(struct ifnet *ifp) 1037 { 1038 struct admsw_softc *sc = ifp->if_softc; 1039 1040 /* printf("admsw_init called\n"); */ 1041 1042 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1043 if (sc->ndevs == 0) { 1044 admsw_init_bufs(sc); 1045 admsw_reset(sc); 1046 REG_WRITE(CPUP_CONF_REG, 1047 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1048 CPUP_CONF_DMCP_MASK); 1049 /* clear all pending interrupts */ 1050 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1051 1052 /* enable needed interrupts */ 1053 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1054 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | ADMSW_INTR_RHD | 1055 ADMSW_INTR_RLD | ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1056 } 1057 sc->ndevs++; 1058 } 1059 1060 /* Set the receive filter. */ 1061 admsw_set_filter(sc); 1062 1063 /* mark iface as running */ 1064 ifp->if_flags |= IFF_RUNNING; 1065 ifp->if_flags &= ~IFF_OACTIVE; 1066 1067 return 0; 1068 } 1069 1070 /* 1071 * admsw_stop: [ifnet interface function] 1072 * 1073 * Stop transmission on the interface. 1074 */ 1075 static void 1076 admsw_stop(struct ifnet *ifp, int disable) 1077 { 1078 struct admsw_softc *sc = ifp->if_softc; 1079 1080 /* printf("admsw_stop: %d\n",disable); */ 1081 1082 if (!(ifp->if_flags & IFF_RUNNING)) 1083 return; 1084 1085 if (--sc->ndevs == 0) { 1086 /* printf("debug: de-initializing hardware\n"); */ 1087 1088 /* disable cpu port */ 1089 REG_WRITE(CPUP_CONF_REG, 1090 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1091 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1092 1093 /* XXX We should disable, then clear? --dyoung */ 1094 /* clear all pending interrupts */ 1095 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1096 1097 /* disable interrupts */ 1098 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1099 } 1100 1101 /* Mark the interface as down and cancel the watchdog timer. */ 1102 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1103 ifp->if_timer = 0; 1104 1105 return; 1106 } 1107 1108 /* 1109 * admsw_set_filter: 1110 * 1111 * Set up the receive filter. 1112 */ 1113 static void 1114 admsw_set_filter(struct admsw_softc *sc) 1115 { 1116 int i; 1117 uint32_t allmc, anymc, conf, promisc; 1118 struct ether_multi *enm; 1119 struct ethercom *ec; 1120 struct ifnet *ifp; 1121 struct ether_multistep step; 1122 1123 /* Find which ports should be operated in promisc mode. */ 1124 allmc = anymc = promisc = 0; 1125 for (i = 0; i < SW_DEVS; i++) { 1126 ec = &sc->sc_ethercom[i]; 1127 ifp = &ec->ec_if; 1128 if (ifp->if_flags & IFF_PROMISC) 1129 promisc |= vlan_matrix[i]; 1130 1131 ifp->if_flags &= ~IFF_ALLMULTI; 1132 1133 ETHER_FIRST_MULTI(step, ec, enm); 1134 while (enm != NULL) { 1135 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1136 ETHER_ADDR_LEN) != 0) { 1137 printf("%s: punting on mcast range\n", 1138 __func__); 1139 ifp->if_flags |= IFF_ALLMULTI; 1140 allmc |= vlan_matrix[i]; 1141 break; 1142 } 1143 1144 anymc |= vlan_matrix[i]; 1145 1146 #if 0 1147 /* XXX extract subroutine --dyoung */ 1148 REG_WRITE(MAC_WT1_REG, 1149 enm->enm_addrlo[2] | 1150 (enm->enm_addrlo[3] << 8) | 1151 (enm->enm_addrlo[4] << 16) | 1152 (enm->enm_addrlo[5] << 24)); 1153 REG_WRITE(MAC_WT0_REG, 1154 (i << MAC_WT0_VLANID_SHIFT) | 1155 (enm->enm_addrlo[0] << 16) | 1156 (enm->enm_addrlo[1] << 24) | 1157 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1158 /* timeout? */ 1159 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 1160 #endif 1161 1162 /* load h/w with mcast address, port = CPU */ 1163 ETHER_NEXT_MULTI(step, enm); 1164 } 1165 } 1166 1167 conf = REG_READ(CPUP_CONF_REG); 1168 /* 1 Disable forwarding of unknown & multicast packets to 1169 * CPU on all ports. 1170 * 2 Enable forwarding of unknown & multicast packets to 1171 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1172 */ 1173 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1174 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1175 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1176 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1177 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1178 REG_WRITE(CPUP_CONF_REG, conf); 1179 } 1180 1181 /* 1182 * admsw_add_rxbuf: 1183 * 1184 * Add a receive buffer to the indicated descriptor. 1185 */ 1186 int 1187 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1188 { 1189 struct admsw_descsoft *ds; 1190 struct mbuf *m; 1191 int error; 1192 1193 if (high) 1194 ds = &sc->sc_rxhsoft[idx]; 1195 else 1196 ds = &sc->sc_rxlsoft[idx]; 1197 1198 MGETHDR(m, M_DONTWAIT, MT_DATA); 1199 if (m == NULL) 1200 return (ENOBUFS); 1201 1202 MCLGET(m, M_DONTWAIT); 1203 if ((m->m_flags & M_EXT) == 0) { 1204 m_freem(m); 1205 return (ENOBUFS); 1206 } 1207 1208 if (ds->ds_mbuf != NULL) 1209 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1210 1211 ds->ds_mbuf = m; 1212 1213 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1214 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1215 BUS_DMA_READ | BUS_DMA_NOWAIT); 1216 if (error) { 1217 printf("%s: can't load rx DMA map %d, error = %d\n", 1218 sc->sc_dev.dv_xname, idx, error); 1219 panic("admsw_add_rxbuf"); /* XXX */ 1220 } 1221 1222 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1223 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1224 1225 if (high) 1226 ADMSW_INIT_RXHDESC(sc, idx); 1227 else 1228 ADMSW_INIT_RXLDESC(sc, idx); 1229 1230 return (0); 1231 } 1232 1233 int 1234 admsw_mediachange(struct ifnet *ifp) 1235 { 1236 struct admsw_softc *sc = ifp->if_softc; 1237 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1238 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1239 int old, new, val; 1240 1241 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1242 return (EINVAL); 1243 1244 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1245 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX; 1246 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1247 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1248 val = PHY_CNTL2_100M|PHY_CNTL2_FDX; 1249 else 1250 val = PHY_CNTL2_100M; 1251 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1252 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1253 val = PHY_CNTL2_FDX; 1254 else 1255 val = 0; 1256 } else 1257 return (EINVAL); 1258 1259 old = REG_READ(PHY_CNTL2_REG); 1260 new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port); 1261 new |= (val << port); 1262 1263 if (new != old) 1264 REG_WRITE(PHY_CNTL2_REG, new); 1265 1266 return (0); 1267 } 1268 1269 void 1270 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1271 { 1272 struct admsw_softc *sc = ifp->if_softc; 1273 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1274 int status; 1275 1276 ifmr->ifm_status = IFM_AVALID; 1277 ifmr->ifm_active = IFM_ETHER; 1278 1279 status = REG_READ(PHY_ST_REG) >> port; 1280 1281 if ((status & PHY_ST_LINKUP) == 0) { 1282 ifmr->ifm_active |= IFM_NONE; 1283 return; 1284 } 1285 1286 ifmr->ifm_status |= IFM_ACTIVE; 1287 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1288 if (status & PHY_ST_FDX) 1289 ifmr->ifm_active |= IFM_FDX; 1290 } 1291