1 /* $NetBSD: if_admsw.c,v 1.7 2010/01/22 08:56:05 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.7 2010/01/22 08:56:05 martin Exp $"); 80 81 82 #include <sys/param.h> 83 #include <sys/systm.h> 84 #include <sys/callout.h> 85 #include <sys/mbuf.h> 86 #include <sys/malloc.h> 87 #include <sys/kernel.h> 88 #include <sys/socket.h> 89 #include <sys/ioctl.h> 90 #include <sys/errno.h> 91 #include <sys/device.h> 92 #include <sys/queue.h> 93 94 #include <prop/proplib.h> 95 96 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #include <net/bpf.h> 104 105 #include <machine/bus.h> 106 #include <machine/intr.h> 107 #include <machine/endian.h> 108 109 #include <dev/mii/mii.h> 110 #include <dev/mii/miivar.h> 111 112 #include <sys/gpio.h> 113 #include <dev/gpio/gpiovar.h> 114 115 #include <mips/adm5120/include/adm5120reg.h> 116 #include <mips/adm5120/include/adm5120var.h> 117 #include <mips/adm5120/include/adm5120_obiovar.h> 118 #include <mips/adm5120/dev/if_admswreg.h> 119 #include <mips/adm5120/dev/if_admswvar.h> 120 121 static uint8_t vlan_matrix[SW_DEVS] = { 122 (1 << 6) | (1 << 0), /* CPU + port0 */ 123 (1 << 6) | (1 << 1), /* CPU + port1 */ 124 (1 << 6) | (1 << 2), /* CPU + port2 */ 125 (1 << 6) | (1 << 3), /* CPU + port3 */ 126 (1 << 6) | (1 << 4), /* CPU + port4 */ 127 (1 << 6) | (1 << 5), /* CPU + port5 */ 128 }; 129 130 #ifdef ADMSW_EVENT_COUNTERS 131 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 132 #else 133 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 134 #endif 135 136 static void admsw_start(struct ifnet *); 137 static void admsw_watchdog(struct ifnet *); 138 static int admsw_ioctl(struct ifnet *, u_long, void *); 139 static int admsw_init(struct ifnet *); 140 static void admsw_stop(struct ifnet *, int); 141 142 static void admsw_shutdown(void *); 143 144 static void admsw_reset(struct admsw_softc *); 145 static void admsw_set_filter(struct admsw_softc *); 146 147 static int admsw_intr(void *); 148 static void admsw_txintr(struct admsw_softc *, int); 149 static void admsw_rxintr(struct admsw_softc *, int); 150 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 151 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 152 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 153 154 static int admsw_mediachange(struct ifnet *); 155 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 156 157 static int admsw_match(struct device *, struct cfdata *, void *); 158 static void admsw_attach(struct device *, struct device *, void *); 159 160 CFATTACH_DECL(admsw, sizeof(struct admsw_softc), 161 admsw_match, admsw_attach, NULL, NULL); 162 163 static int 164 admsw_match(struct device *parent, struct cfdata *cf, void *aux) 165 { 166 struct obio_attach_args *aa = aux; 167 168 return strcmp(aa->oba_name, cf->cf_name) == 0; 169 } 170 171 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 172 #define REG_WRITE(o,v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 173 174 175 static void 176 admsw_init_bufs(struct admsw_softc *sc) 177 { 178 int i; 179 struct admsw_desc *desc; 180 181 for (i = 0; i < ADMSW_NTXHDESC; i++) { 182 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 183 m_freem(sc->sc_txhsoft[i].ds_mbuf); 184 sc->sc_txhsoft[i].ds_mbuf = NULL; 185 } 186 desc = &sc->sc_txhdescs[i]; 187 desc->data = 0; 188 desc->cntl = 0; 189 desc->len = MAC_BUFLEN; 190 desc->status = 0; 191 ADMSW_CDTXHSYNC(sc, i, 192 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 193 } 194 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 195 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 196 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 197 198 for (i = 0; i < ADMSW_NRXHDESC; i++) { 199 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 200 if (admsw_add_rxhbuf(sc, i) != 0) 201 panic("admsw_init_bufs\n"); 202 } else 203 ADMSW_INIT_RXHDESC(sc, i); 204 } 205 206 for (i = 0; i < ADMSW_NTXLDESC; i++) { 207 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 208 m_freem(sc->sc_txlsoft[i].ds_mbuf); 209 sc->sc_txlsoft[i].ds_mbuf = NULL; 210 } 211 desc = &sc->sc_txldescs[i]; 212 desc->data = 0; 213 desc->cntl = 0; 214 desc->len = MAC_BUFLEN; 215 desc->status = 0; 216 ADMSW_CDTXLSYNC(sc, i, 217 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 218 } 219 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 220 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 221 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 222 223 for (i = 0; i < ADMSW_NRXLDESC; i++) { 224 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 225 if (admsw_add_rxlbuf(sc, i) != 0) 226 panic("admsw_init_bufs\n"); 227 } else 228 ADMSW_INIT_RXLDESC(sc, i); 229 } 230 231 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 232 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 233 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 234 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 235 236 sc->sc_txfree = ADMSW_NTXLDESC; 237 sc->sc_txnext = 0; 238 sc->sc_txdirty = 0; 239 sc->sc_rxptr = 0; 240 } 241 242 static void 243 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 244 { 245 uint32_t i; 246 247 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24); 248 REG_WRITE(VLAN_G1_REG, i); 249 i = matrix[4] + (matrix[5] << 8); 250 REG_WRITE(VLAN_G2_REG, i); 251 } 252 253 static void 254 admsw_reset(struct admsw_softc *sc) 255 { 256 uint32_t wdog1; 257 int i; 258 259 REG_WRITE(PORT_CONF0_REG, 260 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 261 REG_WRITE(CPUP_CONF_REG, 262 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 263 264 /* Wait for DMA to complete. Overkill. In 3ms, we can 265 * send at least two entire 1500-byte packets at 10 Mb/s. 266 */ 267 DELAY(3000); 268 269 /* The datasheet recommends that we move all PHYs to reset 270 * state prior to software reset. 271 */ 272 REG_WRITE(PHY_CNTL2_REG, 273 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 274 275 /* Reset the switch. */ 276 REG_WRITE(ADMSW_SW_RES, 0x1); 277 278 DELAY(100 * 1000); 279 280 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 281 282 /* begin old code */ 283 REG_WRITE(CPUP_CONF_REG, 284 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 285 CPUP_CONF_DMCP_MASK); 286 287 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 288 289 REG_WRITE(PHY_CNTL2_REG, 290 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK | 291 PHY_CNTL2_AMDIX_MASK); 292 293 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 294 295 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 296 REG_WRITE(ADMSW_INT_ST, INT_MASK); 297 298 /* 299 * While in DDB, we stop servicing interrupts, RX ring 300 * fills up and when free block counter falls behind FC 301 * threshold, the switch starts to emit 802.3x PAUSE 302 * frames. This can upset peer switches. 303 * 304 * Stop this from happening by disabling FC and D2 305 * thresholds. 306 */ 307 REG_WRITE(FC_TH_REG, 308 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 309 310 admsw_setvlan(sc, vlan_matrix); 311 312 for (i = 0; i < SW_DEVS; i++) { 313 REG_WRITE(MAC_WT1_REG, 314 sc->sc_enaddr[2] | 315 (sc->sc_enaddr[3]<<8) | 316 (sc->sc_enaddr[4]<<16) | 317 ((sc->sc_enaddr[5]+i)<<24)); 318 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 319 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 320 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 321 322 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 323 } 324 wdog1 = REG_READ(ADM5120_WDOG1); 325 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 326 } 327 328 static void 329 admsw_attach(struct device *parent, struct device *self, void *aux) 330 { 331 uint8_t enaddr[ETHER_ADDR_LEN]; 332 struct admsw_softc *sc = (void *) self; 333 struct obio_attach_args *aa = aux; 334 struct ifnet *ifp; 335 bus_dma_segment_t seg; 336 int error, i, rseg; 337 prop_data_t pd; 338 339 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 340 341 sc->sc_dmat = aa->oba_dt; 342 sc->sc_st = aa->oba_st; 343 344 pd = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-address"); 345 346 if (pd == NULL) { 347 enaddr[0] = 0x02; 348 enaddr[1] = 0xaa; 349 enaddr[2] = 0xbb; 350 enaddr[3] = 0xcc; 351 enaddr[4] = 0xdd; 352 enaddr[5] = 0xee; 353 } else 354 memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); 355 356 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 357 358 printf("%s: base Ethernet address %s\n", sc->sc_dev.dv_xname, 359 ether_sprintf(enaddr)); 360 361 /* Map the device. */ 362 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 363 printf("%s: unable to map device\n", device_xname(&sc->sc_dev)); 364 return; 365 } 366 367 /* Hook up the interrupt handler. */ 368 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 369 370 if (sc->sc_ih == NULL) { 371 printf("%s: unable to register interrupt handler\n", 372 sc->sc_dev.dv_xname); 373 return; 374 } 375 376 /* 377 * Allocate the control data structures, and create and load the 378 * DMA map for it. 379 */ 380 if ((error = bus_dmamem_alloc(sc->sc_dmat, 381 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 382 0)) != 0) { 383 printf("%s: unable to allocate control data, error = %d\n", 384 sc->sc_dev.dv_xname, error); 385 return; 386 } 387 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 388 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 389 0)) != 0) { 390 printf("%s: unable to map control data, error = %d\n", 391 sc->sc_dev.dv_xname, error); 392 return; 393 } 394 if ((error = bus_dmamap_create(sc->sc_dmat, 395 sizeof(struct admsw_control_data), 1, 396 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 397 printf("%s: unable to create control data DMA map, " 398 "error = %d\n", sc->sc_dev.dv_xname, error); 399 return; 400 } 401 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 402 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 403 0)) != 0) { 404 printf("%s: unable to load control data DMA map, error = %d\n", 405 sc->sc_dev.dv_xname, error); 406 return; 407 } 408 409 /* 410 * Create the transmit buffer DMA maps. 411 */ 412 for (i = 0; i < ADMSW_NTXHDESC; i++) { 413 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 414 2, MCLBYTES, 0, 0, 415 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 416 printf("%s: unable to create txh DMA map %d, " 417 "error = %d\n", sc->sc_dev.dv_xname, i, error); 418 return; 419 } 420 sc->sc_txhsoft[i].ds_mbuf = NULL; 421 } 422 for (i = 0; i < ADMSW_NTXLDESC; i++) { 423 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 424 2, MCLBYTES, 0, 0, 425 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 426 printf("%s: unable to create txl DMA map %d, " 427 "error = %d\n", sc->sc_dev.dv_xname, i, error); 428 return; 429 } 430 sc->sc_txlsoft[i].ds_mbuf = NULL; 431 } 432 433 /* 434 * Create the receive buffer DMA maps. 435 */ 436 for (i = 0; i < ADMSW_NRXHDESC; i++) { 437 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 438 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 439 printf("%s: unable to create rxh DMA map %d, " 440 "error = %d\n", sc->sc_dev.dv_xname, i, error); 441 return; 442 } 443 sc->sc_rxhsoft[i].ds_mbuf = NULL; 444 } 445 for (i = 0; i < ADMSW_NRXLDESC; i++) { 446 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 447 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 448 printf("%s: unable to create rxl DMA map %d, " 449 "error = %d\n", sc->sc_dev.dv_xname, i, error); 450 return; 451 } 452 sc->sc_rxlsoft[i].ds_mbuf = NULL; 453 } 454 455 admsw_init_bufs(sc); 456 457 admsw_reset(sc); 458 459 for (i = 0; i < SW_DEVS; i++) { 460 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 461 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 462 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 463 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 464 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 465 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 466 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 467 468 ifp = &sc->sc_ethercom[i].ec_if; 469 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 470 ifp->if_xname[5] += i; 471 ifp->if_softc = sc; 472 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 473 ifp->if_ioctl = admsw_ioctl; 474 ifp->if_start = admsw_start; 475 ifp->if_watchdog = admsw_watchdog; 476 ifp->if_init = admsw_init; 477 ifp->if_stop = admsw_stop; 478 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 479 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN)); 480 IFQ_SET_READY(&ifp->if_snd); 481 482 /* Attach the interface. */ 483 if_attach(ifp); 484 ether_ifattach(ifp, enaddr); 485 enaddr[5]++; 486 } 487 488 #ifdef ADMSW_EVENT_COUNTERS 489 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 490 NULL, sc->sc_dev.dv_xname, "txstall"); 491 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 492 NULL, sc->sc_dev.dv_xname, "rxstall"); 493 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 494 NULL, sc->sc_dev.dv_xname, "txintr"); 495 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 496 NULL, sc->sc_dev.dv_xname, "rxintr"); 497 #if 1 498 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 499 NULL, sc->sc_dev.dv_xname, "rxsync"); 500 #endif 501 #endif 502 503 admwdog_attach(sc); 504 505 /* Make sure the interface is shutdown during reboot. */ 506 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 507 if (sc->sc_sdhook == NULL) 508 printf("%s: WARNING: unable to establish shutdown hook\n", 509 sc->sc_dev.dv_xname); 510 511 /* leave interrupts and cpu port disabled */ 512 return; 513 } 514 515 516 /* 517 * admsw_shutdown: 518 * 519 * Make sure the interface is stopped at reboot time. 520 */ 521 static void 522 admsw_shutdown(void *arg) 523 { 524 struct admsw_softc *sc = arg; 525 int i; 526 527 for (i = 0; i < SW_DEVS; i++) 528 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 529 } 530 531 /* 532 * admsw_start: [ifnet interface function] 533 * 534 * Start packet transmission on the interface. 535 */ 536 static void 537 admsw_start(struct ifnet *ifp) 538 { 539 struct admsw_softc *sc = ifp->if_softc; 540 struct mbuf *m0, *m; 541 struct admsw_descsoft *ds; 542 struct admsw_desc *desc; 543 bus_dmamap_t dmamap; 544 struct ether_header *eh; 545 int error, nexttx, len, i; 546 static int vlan = 0; 547 548 /* 549 * Loop through the send queues, setting up transmit descriptors 550 * unitl we drain the queues, or use up all available transmit 551 * descriptors. 552 */ 553 for (;;) { 554 vlan++; 555 if (vlan == SW_DEVS) 556 vlan = 0; 557 i = vlan; 558 for (;;) { 559 ifp = &sc->sc_ethercom[i].ec_if; 560 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) == 561 IFF_RUNNING) { 562 /* Grab a packet off the queue. */ 563 IFQ_POLL(&ifp->if_snd, m0); 564 if (m0 != NULL) 565 break; 566 } 567 i++; 568 if (i == SW_DEVS) 569 i = 0; 570 if (i == vlan) 571 return; 572 } 573 vlan = i; 574 m = NULL; 575 576 /* Get a spare descriptor. */ 577 if (sc->sc_txfree == 0) { 578 /* No more slots left; notify upper layer. */ 579 ifp->if_flags |= IFF_OACTIVE; 580 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 581 break; 582 } 583 nexttx = sc->sc_txnext; 584 desc = &sc->sc_txldescs[nexttx]; 585 ds = &sc->sc_txlsoft[nexttx]; 586 dmamap = ds->ds_dmamap; 587 588 /* 589 * Load the DMA map. If this fails, the packet either 590 * didn't fit in the alloted number of segments, or we 591 * were short on resources. In this case, we'll copy 592 * and try again. 593 */ 594 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 595 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 596 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 597 MGETHDR(m, M_DONTWAIT, MT_DATA); 598 if (m == NULL) { 599 printf("%s: unable to allocate Tx mbuf\n", 600 sc->sc_dev.dv_xname); 601 break; 602 } 603 if (m0->m_pkthdr.len > MHLEN) { 604 MCLGET(m, M_DONTWAIT); 605 if ((m->m_flags & M_EXT) == 0) { 606 printf("%s: unable to allocate Tx " 607 "cluster\n", sc->sc_dev.dv_xname); 608 m_freem(m); 609 break; 610 } 611 } 612 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 613 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 614 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 615 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 616 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 617 panic("admsw_start: M_TRAILINGSPACE\n"); 618 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 619 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 620 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 621 } 622 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 623 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 624 if (error) { 625 printf("%s: unable to load Tx buffer, " 626 "error = %d\n", sc->sc_dev.dv_xname, error); 627 break; 628 } 629 } 630 631 IFQ_DEQUEUE(&ifp->if_snd, m0); 632 if (m != NULL) { 633 m_freem(m0); 634 m0 = m; 635 } 636 637 /* 638 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 639 */ 640 641 /* Sync the DMA map. */ 642 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 643 BUS_DMASYNC_PREWRITE); 644 645 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 646 panic("admsw_start: dm_nsegs == %d\n", dmamap->dm_nsegs); 647 desc->data = dmamap->dm_segs[0].ds_addr; 648 desc->len = len = dmamap->dm_segs[0].ds_len; 649 if (dmamap->dm_nsegs > 1) { 650 len += dmamap->dm_segs[1].ds_len; 651 desc->cntl = dmamap->dm_segs[1].ds_addr | ADM5120_DMA_BUF2ENABLE; 652 } else 653 desc->cntl = 0; 654 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 655 eh = mtod(m0, struct ether_header *); 656 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 657 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 658 desc->status |= ADM5120_DMA_CSUM; 659 if (nexttx == ADMSW_NTXLDESC - 1) 660 desc->data |= ADM5120_DMA_RINGEND; 661 desc->data |= ADM5120_DMA_OWN; 662 663 /* Sync the descriptor. */ 664 ADMSW_CDTXLSYNC(sc, nexttx, 665 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 666 667 REG_WRITE(SEND_TRIG_REG, 1); 668 /* printf("send slot %d\n",nexttx); */ 669 670 /* 671 * Store a pointer to the packet so we can free it later. 672 */ 673 ds->ds_mbuf = m0; 674 675 /* Advance the Tx pointer. */ 676 sc->sc_txfree--; 677 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 678 679 /* Pass the packet to any BPF listeners. */ 680 if (ifp->if_bpf) 681 bpf_ops->bpf_mtap(ifp->if_bpf, m0); 682 683 /* Set a watchdog timer in case the chip flakes out. */ 684 sc->sc_ethercom[0].ec_if.if_timer = 5; 685 } 686 } 687 688 /* 689 * admsw_watchdog: [ifnet interface function] 690 * 691 * Watchdog timer handler. 692 */ 693 static void 694 admsw_watchdog(struct ifnet *ifp) 695 { 696 struct admsw_softc *sc = ifp->if_softc; 697 int vlan; 698 699 #if 1 700 /* Check if an interrupt was lost. */ 701 if (sc->sc_txfree == ADMSW_NTXLDESC) { 702 printf("%s: watchdog false alarm\n", sc->sc_dev.dv_xname); 703 return; 704 } 705 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 706 printf("%s: watchdog timer is %d!\n", sc->sc_dev.dv_xname, sc->sc_ethercom[0].ec_if.if_timer); 707 admsw_txintr(sc, 0); 708 if (sc->sc_txfree == ADMSW_NTXLDESC) { 709 printf("%s: tx IRQ lost (queue empty)\n", sc->sc_dev.dv_xname); 710 return; 711 } 712 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 713 printf("%s: tx IRQ lost (timer recharged)\n", sc->sc_dev.dv_xname); 714 return; 715 } 716 #endif 717 718 printf("%s: device timeout, txfree = %d\n", sc->sc_dev.dv_xname, sc->sc_txfree); 719 for (vlan = 0; vlan < SW_DEVS; vlan++) 720 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 721 for (vlan = 0; vlan < SW_DEVS; vlan++) 722 (void) admsw_init(&sc->sc_ethercom[vlan].ec_if); 723 724 /* Try to get more packets going. */ 725 admsw_start(ifp); 726 } 727 728 /* 729 * admsw_ioctl: [ifnet interface function] 730 * 731 * Handle control requests from the operator. 732 */ 733 static int 734 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 735 { 736 struct admsw_softc *sc = ifp->if_softc; 737 struct ifdrv *ifd; 738 int s, error, port; 739 740 s = splnet(); 741 742 switch (cmd) { 743 case SIOCSIFCAP: 744 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) 745 error = 0; 746 break; 747 case SIOCSIFMEDIA: 748 case SIOCGIFMEDIA: 749 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 750 if (port >= SW_DEVS) 751 error = EOPNOTSUPP; 752 else 753 error = ifmedia_ioctl(ifp, (struct ifreq *)data, 754 &sc->sc_ifmedia[port], cmd); 755 break; 756 757 case SIOCGDRVSPEC: 758 case SIOCSDRVSPEC: 759 ifd = (struct ifdrv *) data; 760 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 761 error = EINVAL; 762 break; 763 } 764 if (cmd == SIOCGDRVSPEC) { 765 error = copyout(vlan_matrix, ifd->ifd_data, 766 sizeof(vlan_matrix)); 767 } else { 768 error = copyin(ifd->ifd_data, vlan_matrix, 769 sizeof(vlan_matrix)); 770 admsw_setvlan(sc, vlan_matrix); 771 } 772 break; 773 774 default: 775 error = ether_ioctl(ifp, cmd, data); 776 if (error == ENETRESET) { 777 /* 778 * Multicast list has changed; set the hardware filter 779 * accordingly. 780 */ 781 admsw_set_filter(sc); 782 error = 0; 783 } 784 break; 785 } 786 787 /* Try to get more packets going. */ 788 admsw_start(ifp); 789 790 splx(s); 791 return (error); 792 } 793 794 795 /* 796 * admsw_intr: 797 * 798 * Interrupt service routine. 799 */ 800 static int 801 admsw_intr(void *arg) 802 { 803 struct admsw_softc *sc = arg; 804 uint32_t pending; 805 char buf[64]; 806 807 pending = REG_READ(ADMSW_INT_ST); 808 809 if ((pending & ~(ADMSW_INTR_RHD|ADMSW_INTR_RLD|ADMSW_INTR_SHD|ADMSW_INTR_SLD|ADMSW_INTR_W1TE|ADMSW_INTR_W0TE)) != 0) { 810 snprintb(buf, sizeof(buf), ADMSW_INT_FMT, pending); 811 printf("%s: pending=%s\n", __func__, buf); 812 } 813 REG_WRITE(ADMSW_INT_ST, pending); 814 815 if (sc->ndevs == 0) 816 return (0); 817 818 if ((pending & ADMSW_INTR_RHD) != 0) 819 admsw_rxintr(sc, 1); 820 821 if ((pending & ADMSW_INTR_RLD) != 0) 822 admsw_rxintr(sc, 0); 823 824 if ((pending & ADMSW_INTR_SHD) != 0) 825 admsw_txintr(sc, 1); 826 827 if ((pending & ADMSW_INTR_SLD) != 0) 828 admsw_txintr(sc, 0); 829 830 return (1); 831 } 832 833 /* 834 * admsw_txintr: 835 * 836 * Helper; handle transmit interrupts. 837 */ 838 static void 839 admsw_txintr(struct admsw_softc *sc, int prio) 840 { 841 struct ifnet *ifp; 842 struct admsw_desc *desc; 843 struct admsw_descsoft *ds; 844 int i, vlan; 845 int gotone = 0; 846 847 /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 848 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 849 i = ADMSW_NEXTTXL(i)) { 850 851 ADMSW_CDTXLSYNC(sc, i, 852 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 853 854 desc = &sc->sc_txldescs[i]; 855 ds = &sc->sc_txlsoft[i]; 856 if (desc->data & ADM5120_DMA_OWN) { 857 ADMSW_CDTXLSYNC(sc, i, 858 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 859 break; 860 } 861 862 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 863 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 864 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 865 m_freem(ds->ds_mbuf); 866 ds->ds_mbuf = NULL; 867 868 vlan = ffs(desc->status & 0x3f) - 1; 869 if (vlan < 0 || vlan >= SW_DEVS) 870 panic("admsw_txintr: bad vlan\n"); 871 ifp = &sc->sc_ethercom[vlan].ec_if; 872 gotone = 1; 873 /* printf("clear tx slot %d\n",i); */ 874 875 ifp->if_opackets++; 876 877 sc->sc_txfree++; 878 } 879 880 if (gotone) { 881 sc->sc_txdirty = i; 882 #ifdef ADMSW_EVENT_COUNTERS 883 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 884 #endif 885 for (vlan = 0; vlan < SW_DEVS; vlan++) 886 sc->sc_ethercom[vlan].ec_if.if_flags &= ~IFF_OACTIVE; 887 888 ifp = &sc->sc_ethercom[0].ec_if; 889 890 /* Try to queue more packets. */ 891 admsw_start(ifp); 892 893 /* 894 * If there are no more pending transmissions, 895 * cancel the watchdog timer. 896 */ 897 if (sc->sc_txfree == ADMSW_NTXLDESC) 898 ifp->if_timer = 0; 899 900 } 901 902 /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 903 } 904 905 /* 906 * admsw_rxintr: 907 * 908 * Helper; handle receive interrupts. 909 */ 910 static void 911 admsw_rxintr(struct admsw_softc *sc, int high) 912 { 913 struct ifnet *ifp; 914 struct admsw_descsoft *ds; 915 struct mbuf *m; 916 uint32_t stat; 917 int i, len, port, vlan; 918 919 /* printf("rxintr\n"); */ 920 if (high) 921 panic("admsw_rxintr: high priority packet\n"); 922 923 #ifdef ADMSW_EVENT_COUNTERS 924 int pkts = 0; 925 #endif 926 927 #if 1 928 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 929 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 930 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 931 else { 932 i = sc->sc_rxptr; 933 do { 934 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 935 i = ADMSW_NEXTRXL(i); 936 /* the ring is empty, just return. */ 937 if (i == sc->sc_rxptr) 938 return; 939 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 940 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 941 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 942 943 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 944 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 945 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 946 else { 947 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 948 /* We've fallen behind the chip: catch it. */ 949 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 950 sc->sc_dev.dv_xname, REG_READ(RECV_LBADDR_REG), 951 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 952 sc->sc_rxptr = i; 953 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 954 } 955 } 956 #endif 957 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 958 ds = &sc->sc_rxlsoft[i]; 959 960 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 961 962 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 963 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 964 break; 965 } 966 967 /* printf("process slot %d\n",i); */ 968 969 #ifdef ADMSW_EVENT_COUNTERS 970 pkts++; 971 #endif 972 973 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 974 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 975 976 stat = sc->sc_rxldescs[i].status; 977 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 978 len -= ETHER_CRC_LEN; 979 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 980 for (vlan = 0; vlan < SW_DEVS; vlan++) 981 if ((1 << port) & vlan_matrix[vlan]) 982 break; 983 if (vlan == SW_DEVS) 984 vlan = 0; 985 ifp = &sc->sc_ethercom[vlan].ec_if; 986 987 m = ds->ds_mbuf; 988 if (admsw_add_rxlbuf(sc, i) != 0) { 989 ifp->if_ierrors++; 990 ADMSW_INIT_RXLDESC(sc, i); 991 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 992 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 993 continue; 994 } 995 996 m->m_pkthdr.rcvif = ifp; 997 m->m_pkthdr.len = m->m_len = len; 998 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 999 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1000 if (stat & ADM5120_DMA_CSUMFAIL) 1001 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1002 } 1003 /* Pass this up to any BPF listeners. */ 1004 if (ifp->if_bpf) 1005 bpf_ops->bpf_mtap(ifp->if_bpf, m); 1006 1007 /* Pass it on. */ 1008 (*ifp->if_input)(ifp, m); 1009 ifp->if_ipackets++; 1010 } 1011 #ifdef ADMSW_EVENT_COUNTERS 1012 if (pkts) 1013 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1014 1015 if (pkts == ADMSW_NRXLDESC) 1016 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1017 #endif 1018 1019 /* Update the receive pointer. */ 1020 sc->sc_rxptr = i; 1021 } 1022 1023 /* 1024 * admsw_init: [ifnet interface function] 1025 * 1026 * Initialize the interface. Must be called at splnet(). 1027 */ 1028 static int 1029 admsw_init(struct ifnet *ifp) 1030 { 1031 struct admsw_softc *sc = ifp->if_softc; 1032 1033 /* printf("admsw_init called\n"); */ 1034 1035 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1036 if (sc->ndevs == 0) { 1037 admsw_init_bufs(sc); 1038 admsw_reset(sc); 1039 REG_WRITE(CPUP_CONF_REG, 1040 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1041 CPUP_CONF_DMCP_MASK); 1042 /* clear all pending interrupts */ 1043 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1044 1045 /* enable needed interrupts */ 1046 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1047 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | ADMSW_INTR_RHD | 1048 ADMSW_INTR_RLD | ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1049 } 1050 sc->ndevs++; 1051 } 1052 1053 /* Set the receive filter. */ 1054 admsw_set_filter(sc); 1055 1056 /* mark iface as running */ 1057 ifp->if_flags |= IFF_RUNNING; 1058 ifp->if_flags &= ~IFF_OACTIVE; 1059 1060 return 0; 1061 } 1062 1063 /* 1064 * admsw_stop: [ifnet interface function] 1065 * 1066 * Stop transmission on the interface. 1067 */ 1068 static void 1069 admsw_stop(struct ifnet *ifp, int disable) 1070 { 1071 struct admsw_softc *sc = ifp->if_softc; 1072 1073 /* printf("admsw_stop: %d\n",disable); */ 1074 1075 if (!(ifp->if_flags & IFF_RUNNING)) 1076 return; 1077 1078 if (--sc->ndevs == 0) { 1079 /* printf("debug: de-initializing hardware\n"); */ 1080 1081 /* disable cpu port */ 1082 REG_WRITE(CPUP_CONF_REG, 1083 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1084 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1085 1086 /* XXX We should disable, then clear? --dyoung */ 1087 /* clear all pending interrupts */ 1088 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1089 1090 /* disable interrupts */ 1091 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1092 } 1093 1094 /* Mark the interface as down and cancel the watchdog timer. */ 1095 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1096 ifp->if_timer = 0; 1097 1098 return; 1099 } 1100 1101 /* 1102 * admsw_set_filter: 1103 * 1104 * Set up the receive filter. 1105 */ 1106 static void 1107 admsw_set_filter(struct admsw_softc *sc) 1108 { 1109 int i; 1110 uint32_t allmc, anymc, conf, promisc; 1111 struct ether_multi *enm; 1112 struct ethercom *ec; 1113 struct ifnet *ifp; 1114 struct ether_multistep step; 1115 1116 /* Find which ports should be operated in promisc mode. */ 1117 allmc = anymc = promisc = 0; 1118 for (i = 0; i < SW_DEVS; i++) { 1119 ec = &sc->sc_ethercom[i]; 1120 ifp = &ec->ec_if; 1121 if (ifp->if_flags & IFF_PROMISC) 1122 promisc |= vlan_matrix[i]; 1123 1124 ifp->if_flags &= ~IFF_ALLMULTI; 1125 1126 ETHER_FIRST_MULTI(step, ec, enm); 1127 while (enm != NULL) { 1128 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1129 ETHER_ADDR_LEN) != 0) { 1130 printf("%s: punting on mcast range\n", 1131 __func__); 1132 ifp->if_flags |= IFF_ALLMULTI; 1133 allmc |= vlan_matrix[i]; 1134 break; 1135 } 1136 1137 anymc |= vlan_matrix[i]; 1138 1139 #if 0 1140 /* XXX extract subroutine --dyoung */ 1141 REG_WRITE(MAC_WT1_REG, 1142 enm->enm_addrlo[2] | 1143 (enm->enm_addrlo[3] << 8) | 1144 (enm->enm_addrlo[4] << 16) | 1145 (enm->enm_addrlo[5] << 24)); 1146 REG_WRITE(MAC_WT0_REG, 1147 (i << MAC_WT0_VLANID_SHIFT) | 1148 (enm->enm_addrlo[0] << 16) | 1149 (enm->enm_addrlo[1] << 24) | 1150 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1151 /* timeout? */ 1152 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 1153 #endif 1154 1155 /* load h/w with mcast address, port = CPU */ 1156 ETHER_NEXT_MULTI(step, enm); 1157 } 1158 } 1159 1160 conf = REG_READ(CPUP_CONF_REG); 1161 /* 1 Disable forwarding of unknown & multicast packets to 1162 * CPU on all ports. 1163 * 2 Enable forwarding of unknown & multicast packets to 1164 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1165 */ 1166 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1167 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1168 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1169 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1170 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1171 REG_WRITE(CPUP_CONF_REG, conf); 1172 } 1173 1174 /* 1175 * admsw_add_rxbuf: 1176 * 1177 * Add a receive buffer to the indicated descriptor. 1178 */ 1179 int 1180 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1181 { 1182 struct admsw_descsoft *ds; 1183 struct mbuf *m; 1184 int error; 1185 1186 if (high) 1187 ds = &sc->sc_rxhsoft[idx]; 1188 else 1189 ds = &sc->sc_rxlsoft[idx]; 1190 1191 MGETHDR(m, M_DONTWAIT, MT_DATA); 1192 if (m == NULL) 1193 return (ENOBUFS); 1194 1195 MCLGET(m, M_DONTWAIT); 1196 if ((m->m_flags & M_EXT) == 0) { 1197 m_freem(m); 1198 return (ENOBUFS); 1199 } 1200 1201 if (ds->ds_mbuf != NULL) 1202 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1203 1204 ds->ds_mbuf = m; 1205 1206 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1207 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1208 BUS_DMA_READ | BUS_DMA_NOWAIT); 1209 if (error) { 1210 printf("%s: can't load rx DMA map %d, error = %d\n", 1211 sc->sc_dev.dv_xname, idx, error); 1212 panic("admsw_add_rxbuf"); /* XXX */ 1213 } 1214 1215 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1216 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1217 1218 if (high) 1219 ADMSW_INIT_RXHDESC(sc, idx); 1220 else 1221 ADMSW_INIT_RXLDESC(sc, idx); 1222 1223 return (0); 1224 } 1225 1226 int 1227 admsw_mediachange(struct ifnet *ifp) 1228 { 1229 struct admsw_softc *sc = ifp->if_softc; 1230 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1231 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1232 int old, new, val; 1233 1234 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1235 return (EINVAL); 1236 1237 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1238 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX; 1239 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1240 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1241 val = PHY_CNTL2_100M|PHY_CNTL2_FDX; 1242 else 1243 val = PHY_CNTL2_100M; 1244 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1245 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1246 val = PHY_CNTL2_FDX; 1247 else 1248 val = 0; 1249 } else 1250 return (EINVAL); 1251 1252 old = REG_READ(PHY_CNTL2_REG); 1253 new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port); 1254 new |= (val << port); 1255 1256 if (new != old) 1257 REG_WRITE(PHY_CNTL2_REG, new); 1258 1259 return (0); 1260 } 1261 1262 void 1263 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1264 { 1265 struct admsw_softc *sc = ifp->if_softc; 1266 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1267 int status; 1268 1269 ifmr->ifm_status = IFM_AVALID; 1270 ifmr->ifm_active = IFM_ETHER; 1271 1272 status = REG_READ(PHY_ST_REG) >> port; 1273 1274 if ((status & PHY_ST_LINKUP) == 0) { 1275 ifmr->ifm_active |= IFM_NONE; 1276 return; 1277 } 1278 1279 ifmr->ifm_status |= IFM_ACTIVE; 1280 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1281 if (status & PHY_ST_FDX) 1282 ifmr->ifm_active |= IFM_FDX; 1283 } 1284