1 /* $NetBSD: if_admsw.c,v 1.10 2011/07/10 23:13:23 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.10 2011/07/10 23:13:23 matt Exp $"); 80 81 82 #include <sys/param.h> 83 #include <sys/bus.h> 84 #include <sys/callout.h> 85 #include <sys/device.h> 86 #include <sys/endian.h> 87 #include <sys/errno.h> 88 #include <sys/intr.h> 89 #include <sys/ioctl.h> 90 #include <sys/kernel.h> 91 #include <sys/malloc.h> 92 #include <sys/mbuf.h> 93 #include <sys/socket.h> 94 #include <sys/systm.h> 95 96 #include <prop/proplib.h> 97 98 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 99 100 #include <net/if.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 #include <net/if_ether.h> 104 105 #include <net/bpf.h> 106 107 #include <dev/mii/mii.h> 108 #include <dev/mii/miivar.h> 109 110 #include <sys/gpio.h> 111 #include <dev/gpio/gpiovar.h> 112 113 #include <mips/adm5120/include/adm5120reg.h> 114 #include <mips/adm5120/include/adm5120var.h> 115 #include <mips/adm5120/include/adm5120_obiovar.h> 116 #include <mips/adm5120/dev/if_admswreg.h> 117 #include <mips/adm5120/dev/if_admswvar.h> 118 119 static uint8_t vlan_matrix[SW_DEVS] = { 120 (1 << 6) | (1 << 0), /* CPU + port0 */ 121 (1 << 6) | (1 << 1), /* CPU + port1 */ 122 (1 << 6) | (1 << 2), /* CPU + port2 */ 123 (1 << 6) | (1 << 3), /* CPU + port3 */ 124 (1 << 6) | (1 << 4), /* CPU + port4 */ 125 (1 << 6) | (1 << 5), /* CPU + port5 */ 126 }; 127 128 #ifdef ADMSW_EVENT_COUNTERS 129 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 130 #else 131 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 132 #endif 133 134 static void admsw_start(struct ifnet *); 135 static void admsw_watchdog(struct ifnet *); 136 static int admsw_ioctl(struct ifnet *, u_long, void *); 137 static int admsw_init(struct ifnet *); 138 static void admsw_stop(struct ifnet *, int); 139 140 static void admsw_shutdown(void *); 141 142 static void admsw_reset(struct admsw_softc *); 143 static void admsw_set_filter(struct admsw_softc *); 144 145 static int admsw_intr(void *); 146 static void admsw_txintr(struct admsw_softc *, int); 147 static void admsw_rxintr(struct admsw_softc *, int); 148 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 149 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 150 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 151 152 static int admsw_mediachange(struct ifnet *); 153 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 154 155 static int admsw_match(struct device *, struct cfdata *, void *); 156 static void admsw_attach(struct device *, struct device *, void *); 157 158 CFATTACH_DECL(admsw, sizeof(struct admsw_softc), 159 admsw_match, admsw_attach, NULL, NULL); 160 161 static int 162 admsw_match(struct device *parent, struct cfdata *cf, void *aux) 163 { 164 struct obio_attach_args *aa = aux; 165 166 return strcmp(aa->oba_name, cf->cf_name) == 0; 167 } 168 169 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 170 #define REG_WRITE(o,v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 171 172 173 static void 174 admsw_init_bufs(struct admsw_softc *sc) 175 { 176 int i; 177 struct admsw_desc *desc; 178 179 for (i = 0; i < ADMSW_NTXHDESC; i++) { 180 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 181 m_freem(sc->sc_txhsoft[i].ds_mbuf); 182 sc->sc_txhsoft[i].ds_mbuf = NULL; 183 } 184 desc = &sc->sc_txhdescs[i]; 185 desc->data = 0; 186 desc->cntl = 0; 187 desc->len = MAC_BUFLEN; 188 desc->status = 0; 189 ADMSW_CDTXHSYNC(sc, i, 190 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 191 } 192 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 193 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 194 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 195 196 for (i = 0; i < ADMSW_NRXHDESC; i++) { 197 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 198 if (admsw_add_rxhbuf(sc, i) != 0) 199 panic("admsw_init_bufs\n"); 200 } else 201 ADMSW_INIT_RXHDESC(sc, i); 202 } 203 204 for (i = 0; i < ADMSW_NTXLDESC; i++) { 205 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 206 m_freem(sc->sc_txlsoft[i].ds_mbuf); 207 sc->sc_txlsoft[i].ds_mbuf = NULL; 208 } 209 desc = &sc->sc_txldescs[i]; 210 desc->data = 0; 211 desc->cntl = 0; 212 desc->len = MAC_BUFLEN; 213 desc->status = 0; 214 ADMSW_CDTXLSYNC(sc, i, 215 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 216 } 217 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 218 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 219 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 220 221 for (i = 0; i < ADMSW_NRXLDESC; i++) { 222 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 223 if (admsw_add_rxlbuf(sc, i) != 0) 224 panic("admsw_init_bufs\n"); 225 } else 226 ADMSW_INIT_RXLDESC(sc, i); 227 } 228 229 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 230 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 231 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 232 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 233 234 sc->sc_txfree = ADMSW_NTXLDESC; 235 sc->sc_txnext = 0; 236 sc->sc_txdirty = 0; 237 sc->sc_rxptr = 0; 238 } 239 240 static void 241 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 242 { 243 uint32_t i; 244 245 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24); 246 REG_WRITE(VLAN_G1_REG, i); 247 i = matrix[4] + (matrix[5] << 8); 248 REG_WRITE(VLAN_G2_REG, i); 249 } 250 251 static void 252 admsw_reset(struct admsw_softc *sc) 253 { 254 uint32_t wdog1; 255 int i; 256 257 REG_WRITE(PORT_CONF0_REG, 258 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 259 REG_WRITE(CPUP_CONF_REG, 260 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 261 262 /* Wait for DMA to complete. Overkill. In 3ms, we can 263 * send at least two entire 1500-byte packets at 10 Mb/s. 264 */ 265 DELAY(3000); 266 267 /* The datasheet recommends that we move all PHYs to reset 268 * state prior to software reset. 269 */ 270 REG_WRITE(PHY_CNTL2_REG, 271 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 272 273 /* Reset the switch. */ 274 REG_WRITE(ADMSW_SW_RES, 0x1); 275 276 DELAY(100 * 1000); 277 278 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 279 280 /* begin old code */ 281 REG_WRITE(CPUP_CONF_REG, 282 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 283 CPUP_CONF_DMCP_MASK); 284 285 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 286 287 REG_WRITE(PHY_CNTL2_REG, 288 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK | 289 PHY_CNTL2_AMDIX_MASK); 290 291 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 292 293 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 294 REG_WRITE(ADMSW_INT_ST, INT_MASK); 295 296 /* 297 * While in DDB, we stop servicing interrupts, RX ring 298 * fills up and when free block counter falls behind FC 299 * threshold, the switch starts to emit 802.3x PAUSE 300 * frames. This can upset peer switches. 301 * 302 * Stop this from happening by disabling FC and D2 303 * thresholds. 304 */ 305 REG_WRITE(FC_TH_REG, 306 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 307 308 admsw_setvlan(sc, vlan_matrix); 309 310 for (i = 0; i < SW_DEVS; i++) { 311 REG_WRITE(MAC_WT1_REG, 312 sc->sc_enaddr[2] | 313 (sc->sc_enaddr[3]<<8) | 314 (sc->sc_enaddr[4]<<16) | 315 ((sc->sc_enaddr[5]+i)<<24)); 316 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 317 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 318 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 319 320 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 321 } 322 wdog1 = REG_READ(ADM5120_WDOG1); 323 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 324 } 325 326 static void 327 admsw_attach(struct device *parent, struct device *self, void *aux) 328 { 329 uint8_t enaddr[ETHER_ADDR_LEN]; 330 struct admsw_softc *sc = (void *) self; 331 struct obio_attach_args *aa = aux; 332 struct ifnet *ifp; 333 bus_dma_segment_t seg; 334 int error, i, rseg; 335 prop_data_t pd; 336 337 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 338 339 sc->sc_dmat = aa->oba_dt; 340 sc->sc_st = aa->oba_st; 341 342 pd = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-address"); 343 344 if (pd == NULL) { 345 enaddr[0] = 0x02; 346 enaddr[1] = 0xaa; 347 enaddr[2] = 0xbb; 348 enaddr[3] = 0xcc; 349 enaddr[4] = 0xdd; 350 enaddr[5] = 0xee; 351 } else 352 memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); 353 354 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 355 356 printf("%s: base Ethernet address %s\n", sc->sc_dev.dv_xname, 357 ether_sprintf(enaddr)); 358 359 /* Map the device. */ 360 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 361 printf("%s: unable to map device\n", device_xname(&sc->sc_dev)); 362 return; 363 } 364 365 /* Hook up the interrupt handler. */ 366 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 367 368 if (sc->sc_ih == NULL) { 369 printf("%s: unable to register interrupt handler\n", 370 sc->sc_dev.dv_xname); 371 return; 372 } 373 374 /* 375 * Allocate the control data structures, and create and load the 376 * DMA map for it. 377 */ 378 if ((error = bus_dmamem_alloc(sc->sc_dmat, 379 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 380 0)) != 0) { 381 printf("%s: unable to allocate control data, error = %d\n", 382 sc->sc_dev.dv_xname, error); 383 return; 384 } 385 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 386 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 387 0)) != 0) { 388 printf("%s: unable to map control data, error = %d\n", 389 sc->sc_dev.dv_xname, error); 390 return; 391 } 392 if ((error = bus_dmamap_create(sc->sc_dmat, 393 sizeof(struct admsw_control_data), 1, 394 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 395 printf("%s: unable to create control data DMA map, " 396 "error = %d\n", sc->sc_dev.dv_xname, error); 397 return; 398 } 399 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 400 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 401 0)) != 0) { 402 printf("%s: unable to load control data DMA map, error = %d\n", 403 sc->sc_dev.dv_xname, error); 404 return; 405 } 406 407 /* 408 * Create the transmit buffer DMA maps. 409 */ 410 for (i = 0; i < ADMSW_NTXHDESC; i++) { 411 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 412 2, MCLBYTES, 0, 0, 413 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 414 printf("%s: unable to create txh DMA map %d, " 415 "error = %d\n", sc->sc_dev.dv_xname, i, error); 416 return; 417 } 418 sc->sc_txhsoft[i].ds_mbuf = NULL; 419 } 420 for (i = 0; i < ADMSW_NTXLDESC; i++) { 421 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 422 2, MCLBYTES, 0, 0, 423 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 424 printf("%s: unable to create txl DMA map %d, " 425 "error = %d\n", sc->sc_dev.dv_xname, i, error); 426 return; 427 } 428 sc->sc_txlsoft[i].ds_mbuf = NULL; 429 } 430 431 /* 432 * Create the receive buffer DMA maps. 433 */ 434 for (i = 0; i < ADMSW_NRXHDESC; i++) { 435 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 436 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 437 printf("%s: unable to create rxh DMA map %d, " 438 "error = %d\n", sc->sc_dev.dv_xname, i, error); 439 return; 440 } 441 sc->sc_rxhsoft[i].ds_mbuf = NULL; 442 } 443 for (i = 0; i < ADMSW_NRXLDESC; i++) { 444 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 445 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 446 printf("%s: unable to create rxl DMA map %d, " 447 "error = %d\n", sc->sc_dev.dv_xname, i, error); 448 return; 449 } 450 sc->sc_rxlsoft[i].ds_mbuf = NULL; 451 } 452 453 admsw_init_bufs(sc); 454 455 admsw_reset(sc); 456 457 for (i = 0; i < SW_DEVS; i++) { 458 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 459 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 460 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 461 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 462 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 463 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 464 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 465 466 ifp = &sc->sc_ethercom[i].ec_if; 467 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 468 ifp->if_xname[5] += i; 469 ifp->if_softc = sc; 470 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 471 ifp->if_ioctl = admsw_ioctl; 472 ifp->if_start = admsw_start; 473 ifp->if_watchdog = admsw_watchdog; 474 ifp->if_init = admsw_init; 475 ifp->if_stop = admsw_stop; 476 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 477 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN)); 478 IFQ_SET_READY(&ifp->if_snd); 479 480 /* Attach the interface. */ 481 if_attach(ifp); 482 ether_ifattach(ifp, enaddr); 483 enaddr[5]++; 484 } 485 486 #ifdef ADMSW_EVENT_COUNTERS 487 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 488 NULL, sc->sc_dev.dv_xname, "txstall"); 489 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 490 NULL, sc->sc_dev.dv_xname, "rxstall"); 491 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 492 NULL, sc->sc_dev.dv_xname, "txintr"); 493 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 494 NULL, sc->sc_dev.dv_xname, "rxintr"); 495 #if 1 496 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 497 NULL, sc->sc_dev.dv_xname, "rxsync"); 498 #endif 499 #endif 500 501 admwdog_attach(sc); 502 503 /* Make sure the interface is shutdown during reboot. */ 504 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 505 if (sc->sc_sdhook == NULL) 506 printf("%s: WARNING: unable to establish shutdown hook\n", 507 sc->sc_dev.dv_xname); 508 509 /* leave interrupts and cpu port disabled */ 510 return; 511 } 512 513 514 /* 515 * admsw_shutdown: 516 * 517 * Make sure the interface is stopped at reboot time. 518 */ 519 static void 520 admsw_shutdown(void *arg) 521 { 522 struct admsw_softc *sc = arg; 523 int i; 524 525 for (i = 0; i < SW_DEVS; i++) 526 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 527 } 528 529 /* 530 * admsw_start: [ifnet interface function] 531 * 532 * Start packet transmission on the interface. 533 */ 534 static void 535 admsw_start(struct ifnet *ifp) 536 { 537 struct admsw_softc *sc = ifp->if_softc; 538 struct mbuf *m0, *m; 539 struct admsw_descsoft *ds; 540 struct admsw_desc *desc; 541 bus_dmamap_t dmamap; 542 struct ether_header *eh; 543 int error, nexttx, len, i; 544 static int vlan = 0; 545 546 /* 547 * Loop through the send queues, setting up transmit descriptors 548 * unitl we drain the queues, or use up all available transmit 549 * descriptors. 550 */ 551 for (;;) { 552 vlan++; 553 if (vlan == SW_DEVS) 554 vlan = 0; 555 i = vlan; 556 for (;;) { 557 ifp = &sc->sc_ethercom[i].ec_if; 558 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) == 559 IFF_RUNNING) { 560 /* Grab a packet off the queue. */ 561 IFQ_POLL(&ifp->if_snd, m0); 562 if (m0 != NULL) 563 break; 564 } 565 i++; 566 if (i == SW_DEVS) 567 i = 0; 568 if (i == vlan) 569 return; 570 } 571 vlan = i; 572 m = NULL; 573 574 /* Get a spare descriptor. */ 575 if (sc->sc_txfree == 0) { 576 /* No more slots left; notify upper layer. */ 577 ifp->if_flags |= IFF_OACTIVE; 578 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 579 break; 580 } 581 nexttx = sc->sc_txnext; 582 desc = &sc->sc_txldescs[nexttx]; 583 ds = &sc->sc_txlsoft[nexttx]; 584 dmamap = ds->ds_dmamap; 585 586 /* 587 * Load the DMA map. If this fails, the packet either 588 * didn't fit in the alloted number of segments, or we 589 * were short on resources. In this case, we'll copy 590 * and try again. 591 */ 592 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 593 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 594 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 595 MGETHDR(m, M_DONTWAIT, MT_DATA); 596 if (m == NULL) { 597 printf("%s: unable to allocate Tx mbuf\n", 598 sc->sc_dev.dv_xname); 599 break; 600 } 601 if (m0->m_pkthdr.len > MHLEN) { 602 MCLGET(m, M_DONTWAIT); 603 if ((m->m_flags & M_EXT) == 0) { 604 printf("%s: unable to allocate Tx " 605 "cluster\n", sc->sc_dev.dv_xname); 606 m_freem(m); 607 break; 608 } 609 } 610 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 611 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 612 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 613 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 614 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 615 panic("admsw_start: M_TRAILINGSPACE\n"); 616 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 617 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 618 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 619 } 620 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 621 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 622 if (error) { 623 printf("%s: unable to load Tx buffer, " 624 "error = %d\n", sc->sc_dev.dv_xname, error); 625 break; 626 } 627 } 628 629 IFQ_DEQUEUE(&ifp->if_snd, m0); 630 if (m != NULL) { 631 m_freem(m0); 632 m0 = m; 633 } 634 635 /* 636 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 637 */ 638 639 /* Sync the DMA map. */ 640 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 641 BUS_DMASYNC_PREWRITE); 642 643 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 644 panic("admsw_start: dm_nsegs == %d\n", dmamap->dm_nsegs); 645 desc->data = dmamap->dm_segs[0].ds_addr; 646 desc->len = len = dmamap->dm_segs[0].ds_len; 647 if (dmamap->dm_nsegs > 1) { 648 len += dmamap->dm_segs[1].ds_len; 649 desc->cntl = dmamap->dm_segs[1].ds_addr | ADM5120_DMA_BUF2ENABLE; 650 } else 651 desc->cntl = 0; 652 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 653 eh = mtod(m0, struct ether_header *); 654 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 655 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 656 desc->status |= ADM5120_DMA_CSUM; 657 if (nexttx == ADMSW_NTXLDESC - 1) 658 desc->data |= ADM5120_DMA_RINGEND; 659 desc->data |= ADM5120_DMA_OWN; 660 661 /* Sync the descriptor. */ 662 ADMSW_CDTXLSYNC(sc, nexttx, 663 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 664 665 REG_WRITE(SEND_TRIG_REG, 1); 666 /* printf("send slot %d\n",nexttx); */ 667 668 /* 669 * Store a pointer to the packet so we can free it later. 670 */ 671 ds->ds_mbuf = m0; 672 673 /* Advance the Tx pointer. */ 674 sc->sc_txfree--; 675 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 676 677 /* Pass the packet to any BPF listeners. */ 678 bpf_mtap(ifp, m0); 679 680 /* Set a watchdog timer in case the chip flakes out. */ 681 sc->sc_ethercom[0].ec_if.if_timer = 5; 682 } 683 } 684 685 /* 686 * admsw_watchdog: [ifnet interface function] 687 * 688 * Watchdog timer handler. 689 */ 690 static void 691 admsw_watchdog(struct ifnet *ifp) 692 { 693 struct admsw_softc *sc = ifp->if_softc; 694 int vlan; 695 696 #if 1 697 /* Check if an interrupt was lost. */ 698 if (sc->sc_txfree == ADMSW_NTXLDESC) { 699 printf("%s: watchdog false alarm\n", sc->sc_dev.dv_xname); 700 return; 701 } 702 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 703 printf("%s: watchdog timer is %d!\n", sc->sc_dev.dv_xname, sc->sc_ethercom[0].ec_if.if_timer); 704 admsw_txintr(sc, 0); 705 if (sc->sc_txfree == ADMSW_NTXLDESC) { 706 printf("%s: tx IRQ lost (queue empty)\n", sc->sc_dev.dv_xname); 707 return; 708 } 709 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 710 printf("%s: tx IRQ lost (timer recharged)\n", sc->sc_dev.dv_xname); 711 return; 712 } 713 #endif 714 715 printf("%s: device timeout, txfree = %d\n", sc->sc_dev.dv_xname, sc->sc_txfree); 716 for (vlan = 0; vlan < SW_DEVS; vlan++) 717 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 718 for (vlan = 0; vlan < SW_DEVS; vlan++) 719 (void) admsw_init(&sc->sc_ethercom[vlan].ec_if); 720 721 /* Try to get more packets going. */ 722 admsw_start(ifp); 723 } 724 725 /* 726 * admsw_ioctl: [ifnet interface function] 727 * 728 * Handle control requests from the operator. 729 */ 730 static int 731 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 732 { 733 struct admsw_softc *sc = ifp->if_softc; 734 struct ifdrv *ifd; 735 int s, error, port; 736 737 s = splnet(); 738 739 switch (cmd) { 740 case SIOCSIFCAP: 741 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) 742 error = 0; 743 break; 744 case SIOCSIFMEDIA: 745 case SIOCGIFMEDIA: 746 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 747 if (port >= SW_DEVS) 748 error = EOPNOTSUPP; 749 else 750 error = ifmedia_ioctl(ifp, (struct ifreq *)data, 751 &sc->sc_ifmedia[port], cmd); 752 break; 753 754 case SIOCGDRVSPEC: 755 case SIOCSDRVSPEC: 756 ifd = (struct ifdrv *) data; 757 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 758 error = EINVAL; 759 break; 760 } 761 if (cmd == SIOCGDRVSPEC) { 762 error = copyout(vlan_matrix, ifd->ifd_data, 763 sizeof(vlan_matrix)); 764 } else { 765 error = copyin(ifd->ifd_data, vlan_matrix, 766 sizeof(vlan_matrix)); 767 admsw_setvlan(sc, vlan_matrix); 768 } 769 break; 770 771 default: 772 error = ether_ioctl(ifp, cmd, data); 773 if (error == ENETRESET) { 774 /* 775 * Multicast list has changed; set the hardware filter 776 * accordingly. 777 */ 778 admsw_set_filter(sc); 779 error = 0; 780 } 781 break; 782 } 783 784 /* Try to get more packets going. */ 785 admsw_start(ifp); 786 787 splx(s); 788 return (error); 789 } 790 791 792 /* 793 * admsw_intr: 794 * 795 * Interrupt service routine. 796 */ 797 static int 798 admsw_intr(void *arg) 799 { 800 struct admsw_softc *sc = arg; 801 uint32_t pending; 802 char buf[64]; 803 804 pending = REG_READ(ADMSW_INT_ST); 805 806 if ((pending & ~(ADMSW_INTR_RHD|ADMSW_INTR_RLD|ADMSW_INTR_SHD|ADMSW_INTR_SLD|ADMSW_INTR_W1TE|ADMSW_INTR_W0TE)) != 0) { 807 snprintb(buf, sizeof(buf), ADMSW_INT_FMT, pending); 808 printf("%s: pending=%s\n", __func__, buf); 809 } 810 REG_WRITE(ADMSW_INT_ST, pending); 811 812 if (sc->ndevs == 0) 813 return (0); 814 815 if ((pending & ADMSW_INTR_RHD) != 0) 816 admsw_rxintr(sc, 1); 817 818 if ((pending & ADMSW_INTR_RLD) != 0) 819 admsw_rxintr(sc, 0); 820 821 if ((pending & ADMSW_INTR_SHD) != 0) 822 admsw_txintr(sc, 1); 823 824 if ((pending & ADMSW_INTR_SLD) != 0) 825 admsw_txintr(sc, 0); 826 827 return (1); 828 } 829 830 /* 831 * admsw_txintr: 832 * 833 * Helper; handle transmit interrupts. 834 */ 835 static void 836 admsw_txintr(struct admsw_softc *sc, int prio) 837 { 838 struct ifnet *ifp; 839 struct admsw_desc *desc; 840 struct admsw_descsoft *ds; 841 int i, vlan; 842 int gotone = 0; 843 844 /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 845 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 846 i = ADMSW_NEXTTXL(i)) { 847 848 ADMSW_CDTXLSYNC(sc, i, 849 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 850 851 desc = &sc->sc_txldescs[i]; 852 ds = &sc->sc_txlsoft[i]; 853 if (desc->data & ADM5120_DMA_OWN) { 854 ADMSW_CDTXLSYNC(sc, i, 855 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 856 break; 857 } 858 859 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 860 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 861 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 862 m_freem(ds->ds_mbuf); 863 ds->ds_mbuf = NULL; 864 865 vlan = ffs(desc->status & 0x3f) - 1; 866 if (vlan < 0 || vlan >= SW_DEVS) 867 panic("admsw_txintr: bad vlan\n"); 868 ifp = &sc->sc_ethercom[vlan].ec_if; 869 gotone = 1; 870 /* printf("clear tx slot %d\n",i); */ 871 872 ifp->if_opackets++; 873 874 sc->sc_txfree++; 875 } 876 877 if (gotone) { 878 sc->sc_txdirty = i; 879 #ifdef ADMSW_EVENT_COUNTERS 880 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 881 #endif 882 for (vlan = 0; vlan < SW_DEVS; vlan++) 883 sc->sc_ethercom[vlan].ec_if.if_flags &= ~IFF_OACTIVE; 884 885 ifp = &sc->sc_ethercom[0].ec_if; 886 887 /* Try to queue more packets. */ 888 admsw_start(ifp); 889 890 /* 891 * If there are no more pending transmissions, 892 * cancel the watchdog timer. 893 */ 894 if (sc->sc_txfree == ADMSW_NTXLDESC) 895 ifp->if_timer = 0; 896 897 } 898 899 /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 900 } 901 902 /* 903 * admsw_rxintr: 904 * 905 * Helper; handle receive interrupts. 906 */ 907 static void 908 admsw_rxintr(struct admsw_softc *sc, int high) 909 { 910 struct ifnet *ifp; 911 struct admsw_descsoft *ds; 912 struct mbuf *m; 913 uint32_t stat; 914 int i, len, port, vlan; 915 916 /* printf("rxintr\n"); */ 917 if (high) 918 panic("admsw_rxintr: high priority packet\n"); 919 920 #ifdef ADMSW_EVENT_COUNTERS 921 int pkts = 0; 922 #endif 923 924 #if 1 925 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 926 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 927 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 928 else { 929 i = sc->sc_rxptr; 930 do { 931 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 932 i = ADMSW_NEXTRXL(i); 933 /* the ring is empty, just return. */ 934 if (i == sc->sc_rxptr) 935 return; 936 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 937 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 938 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 939 940 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 941 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 942 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 943 else { 944 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 945 /* We've fallen behind the chip: catch it. */ 946 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 947 sc->sc_dev.dv_xname, REG_READ(RECV_LBADDR_REG), 948 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 949 sc->sc_rxptr = i; 950 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 951 } 952 } 953 #endif 954 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 955 ds = &sc->sc_rxlsoft[i]; 956 957 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 958 959 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 960 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 961 break; 962 } 963 964 /* printf("process slot %d\n",i); */ 965 966 #ifdef ADMSW_EVENT_COUNTERS 967 pkts++; 968 #endif 969 970 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 971 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 972 973 stat = sc->sc_rxldescs[i].status; 974 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 975 len -= ETHER_CRC_LEN; 976 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 977 for (vlan = 0; vlan < SW_DEVS; vlan++) 978 if ((1 << port) & vlan_matrix[vlan]) 979 break; 980 if (vlan == SW_DEVS) 981 vlan = 0; 982 ifp = &sc->sc_ethercom[vlan].ec_if; 983 984 m = ds->ds_mbuf; 985 if (admsw_add_rxlbuf(sc, i) != 0) { 986 ifp->if_ierrors++; 987 ADMSW_INIT_RXLDESC(sc, i); 988 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 989 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 990 continue; 991 } 992 993 m->m_pkthdr.rcvif = ifp; 994 m->m_pkthdr.len = m->m_len = len; 995 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 996 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 997 if (stat & ADM5120_DMA_CSUMFAIL) 998 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 999 } 1000 /* Pass this up to any BPF listeners. */ 1001 bpf_mtap(ifp, m); 1002 1003 /* Pass it on. */ 1004 (*ifp->if_input)(ifp, m); 1005 ifp->if_ipackets++; 1006 } 1007 #ifdef ADMSW_EVENT_COUNTERS 1008 if (pkts) 1009 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1010 1011 if (pkts == ADMSW_NRXLDESC) 1012 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1013 #endif 1014 1015 /* Update the receive pointer. */ 1016 sc->sc_rxptr = i; 1017 } 1018 1019 /* 1020 * admsw_init: [ifnet interface function] 1021 * 1022 * Initialize the interface. Must be called at splnet(). 1023 */ 1024 static int 1025 admsw_init(struct ifnet *ifp) 1026 { 1027 struct admsw_softc *sc = ifp->if_softc; 1028 1029 /* printf("admsw_init called\n"); */ 1030 1031 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1032 if (sc->ndevs == 0) { 1033 admsw_init_bufs(sc); 1034 admsw_reset(sc); 1035 REG_WRITE(CPUP_CONF_REG, 1036 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1037 CPUP_CONF_DMCP_MASK); 1038 /* clear all pending interrupts */ 1039 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1040 1041 /* enable needed interrupts */ 1042 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1043 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | ADMSW_INTR_RHD | 1044 ADMSW_INTR_RLD | ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1045 } 1046 sc->ndevs++; 1047 } 1048 1049 /* Set the receive filter. */ 1050 admsw_set_filter(sc); 1051 1052 /* mark iface as running */ 1053 ifp->if_flags |= IFF_RUNNING; 1054 ifp->if_flags &= ~IFF_OACTIVE; 1055 1056 return 0; 1057 } 1058 1059 /* 1060 * admsw_stop: [ifnet interface function] 1061 * 1062 * Stop transmission on the interface. 1063 */ 1064 static void 1065 admsw_stop(struct ifnet *ifp, int disable) 1066 { 1067 struct admsw_softc *sc = ifp->if_softc; 1068 1069 /* printf("admsw_stop: %d\n",disable); */ 1070 1071 if (!(ifp->if_flags & IFF_RUNNING)) 1072 return; 1073 1074 if (--sc->ndevs == 0) { 1075 /* printf("debug: de-initializing hardware\n"); */ 1076 1077 /* disable cpu port */ 1078 REG_WRITE(CPUP_CONF_REG, 1079 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1080 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1081 1082 /* XXX We should disable, then clear? --dyoung */ 1083 /* clear all pending interrupts */ 1084 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1085 1086 /* disable interrupts */ 1087 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1088 } 1089 1090 /* Mark the interface as down and cancel the watchdog timer. */ 1091 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1092 ifp->if_timer = 0; 1093 1094 return; 1095 } 1096 1097 /* 1098 * admsw_set_filter: 1099 * 1100 * Set up the receive filter. 1101 */ 1102 static void 1103 admsw_set_filter(struct admsw_softc *sc) 1104 { 1105 int i; 1106 uint32_t allmc, anymc, conf, promisc; 1107 struct ether_multi *enm; 1108 struct ethercom *ec; 1109 struct ifnet *ifp; 1110 struct ether_multistep step; 1111 1112 /* Find which ports should be operated in promisc mode. */ 1113 allmc = anymc = promisc = 0; 1114 for (i = 0; i < SW_DEVS; i++) { 1115 ec = &sc->sc_ethercom[i]; 1116 ifp = &ec->ec_if; 1117 if (ifp->if_flags & IFF_PROMISC) 1118 promisc |= vlan_matrix[i]; 1119 1120 ifp->if_flags &= ~IFF_ALLMULTI; 1121 1122 ETHER_FIRST_MULTI(step, ec, enm); 1123 while (enm != NULL) { 1124 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1125 ETHER_ADDR_LEN) != 0) { 1126 printf("%s: punting on mcast range\n", 1127 __func__); 1128 ifp->if_flags |= IFF_ALLMULTI; 1129 allmc |= vlan_matrix[i]; 1130 break; 1131 } 1132 1133 anymc |= vlan_matrix[i]; 1134 1135 #if 0 1136 /* XXX extract subroutine --dyoung */ 1137 REG_WRITE(MAC_WT1_REG, 1138 enm->enm_addrlo[2] | 1139 (enm->enm_addrlo[3] << 8) | 1140 (enm->enm_addrlo[4] << 16) | 1141 (enm->enm_addrlo[5] << 24)); 1142 REG_WRITE(MAC_WT0_REG, 1143 (i << MAC_WT0_VLANID_SHIFT) | 1144 (enm->enm_addrlo[0] << 16) | 1145 (enm->enm_addrlo[1] << 24) | 1146 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1147 /* timeout? */ 1148 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 1149 #endif 1150 1151 /* load h/w with mcast address, port = CPU */ 1152 ETHER_NEXT_MULTI(step, enm); 1153 } 1154 } 1155 1156 conf = REG_READ(CPUP_CONF_REG); 1157 /* 1 Disable forwarding of unknown & multicast packets to 1158 * CPU on all ports. 1159 * 2 Enable forwarding of unknown & multicast packets to 1160 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1161 */ 1162 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1163 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1164 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1165 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1166 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1167 REG_WRITE(CPUP_CONF_REG, conf); 1168 } 1169 1170 /* 1171 * admsw_add_rxbuf: 1172 * 1173 * Add a receive buffer to the indicated descriptor. 1174 */ 1175 int 1176 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1177 { 1178 struct admsw_descsoft *ds; 1179 struct mbuf *m; 1180 int error; 1181 1182 if (high) 1183 ds = &sc->sc_rxhsoft[idx]; 1184 else 1185 ds = &sc->sc_rxlsoft[idx]; 1186 1187 MGETHDR(m, M_DONTWAIT, MT_DATA); 1188 if (m == NULL) 1189 return (ENOBUFS); 1190 1191 MCLGET(m, M_DONTWAIT); 1192 if ((m->m_flags & M_EXT) == 0) { 1193 m_freem(m); 1194 return (ENOBUFS); 1195 } 1196 1197 if (ds->ds_mbuf != NULL) 1198 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1199 1200 ds->ds_mbuf = m; 1201 1202 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1203 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1204 BUS_DMA_READ | BUS_DMA_NOWAIT); 1205 if (error) { 1206 printf("%s: can't load rx DMA map %d, error = %d\n", 1207 sc->sc_dev.dv_xname, idx, error); 1208 panic("admsw_add_rxbuf"); /* XXX */ 1209 } 1210 1211 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1212 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1213 1214 if (high) 1215 ADMSW_INIT_RXHDESC(sc, idx); 1216 else 1217 ADMSW_INIT_RXLDESC(sc, idx); 1218 1219 return (0); 1220 } 1221 1222 int 1223 admsw_mediachange(struct ifnet *ifp) 1224 { 1225 struct admsw_softc *sc = ifp->if_softc; 1226 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1227 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1228 int old, new, val; 1229 1230 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1231 return (EINVAL); 1232 1233 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1234 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX; 1235 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1236 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1237 val = PHY_CNTL2_100M|PHY_CNTL2_FDX; 1238 else 1239 val = PHY_CNTL2_100M; 1240 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1241 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1242 val = PHY_CNTL2_FDX; 1243 else 1244 val = 0; 1245 } else 1246 return (EINVAL); 1247 1248 old = REG_READ(PHY_CNTL2_REG); 1249 new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port); 1250 new |= (val << port); 1251 1252 if (new != old) 1253 REG_WRITE(PHY_CNTL2_REG, new); 1254 1255 return (0); 1256 } 1257 1258 void 1259 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1260 { 1261 struct admsw_softc *sc = ifp->if_softc; 1262 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1263 int status; 1264 1265 ifmr->ifm_status = IFM_AVALID; 1266 ifmr->ifm_active = IFM_ETHER; 1267 1268 status = REG_READ(PHY_ST_REG) >> port; 1269 1270 if ((status & PHY_ST_LINKUP) == 0) { 1271 ifmr->ifm_active |= IFM_NONE; 1272 return; 1273 } 1274 1275 ifmr->ifm_status |= IFM_ACTIVE; 1276 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1277 if (status & PHY_ST_FDX) 1278 ifmr->ifm_active |= IFM_FDX; 1279 } 1280