1 /* $NetBSD: if_admsw.c,v 1.3 2007/04/22 19:26:25 dyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.3 2007/04/22 19:26:25 dyoung Exp $"); 80 81 #include "bpfilter.h" 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/callout.h> 86 #include <sys/mbuf.h> 87 #include <sys/malloc.h> 88 #include <sys/kernel.h> 89 #include <sys/socket.h> 90 #include <sys/ioctl.h> 91 #include <sys/errno.h> 92 #include <sys/device.h> 93 #include <sys/queue.h> 94 95 #include <prop/proplib.h> 96 97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 98 99 #include <net/if.h> 100 #include <net/if_dl.h> 101 #include <net/if_media.h> 102 #include <net/if_ether.h> 103 104 #if NBPFILTER > 0 105 #include <net/bpf.h> 106 #endif 107 108 #include <machine/bus.h> 109 #include <machine/intr.h> 110 #include <machine/endian.h> 111 112 #include <dev/mii/mii.h> 113 #include <dev/mii/miivar.h> 114 115 #include <sys/gpio.h> 116 #include <dev/gpio/gpiovar.h> 117 118 #include <mips/adm5120/include/adm5120reg.h> 119 #include <mips/adm5120/include/adm5120var.h> 120 #include <mips/adm5120/include/adm5120_obiovar.h> 121 #include <mips/adm5120/dev/if_admswreg.h> 122 #include <mips/adm5120/dev/if_admswvar.h> 123 124 static uint8_t vlan_matrix[SW_DEVS] = { 125 (1 << 6) | (1 << 0), /* CPU + port0 */ 126 (1 << 6) | (1 << 1), /* CPU + port1 */ 127 (1 << 6) | (1 << 2), /* CPU + port2 */ 128 (1 << 6) | (1 << 3), /* CPU + port3 */ 129 (1 << 6) | (1 << 4), /* CPU + port4 */ 130 (1 << 6) | (1 << 5), /* CPU + port5 */ 131 }; 132 133 #ifdef ADMSW_EVENT_COUNTERS 134 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 135 #else 136 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 137 #endif 138 139 static void admsw_start(struct ifnet *); 140 static void admsw_watchdog(struct ifnet *); 141 static int admsw_ioctl(struct ifnet *, u_long, void *); 142 static int admsw_init(struct ifnet *); 143 static void admsw_stop(struct ifnet *, int); 144 145 static void admsw_shutdown(void *); 146 147 static void admsw_reset(struct admsw_softc *); 148 static void admsw_set_filter(struct admsw_softc *); 149 150 static int admsw_intr(void *); 151 static void admsw_txintr(struct admsw_softc *, int); 152 static void admsw_rxintr(struct admsw_softc *, int); 153 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 154 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 155 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 156 157 static int admsw_mediachange(struct ifnet *); 158 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 159 160 static int admsw_match(struct device *, struct cfdata *, void *); 161 static void admsw_attach(struct device *, struct device *, void *); 162 163 CFATTACH_DECL(admsw, sizeof(struct admsw_softc), 164 admsw_match, admsw_attach, NULL, NULL); 165 166 static int 167 admsw_match(struct device *parent, struct cfdata *cf, void *aux) 168 { 169 struct obio_attach_args *aa = aux; 170 171 return strcmp(aa->oba_name, cf->cf_name) == 0; 172 } 173 174 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 175 #define REG_WRITE(o,v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 176 177 178 static void 179 admsw_init_bufs(struct admsw_softc *sc) 180 { 181 int i; 182 struct admsw_desc *desc; 183 184 for (i = 0; i < ADMSW_NTXHDESC; i++) { 185 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 186 m_freem(sc->sc_txhsoft[i].ds_mbuf); 187 sc->sc_txhsoft[i].ds_mbuf = NULL; 188 } 189 desc = &sc->sc_txhdescs[i]; 190 desc->data = 0; 191 desc->cntl = 0; 192 desc->len = MAC_BUFLEN; 193 desc->status = 0; 194 ADMSW_CDTXHSYNC(sc, i, 195 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 196 } 197 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 198 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 199 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 200 201 for (i = 0; i < ADMSW_NRXHDESC; i++) { 202 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 203 if (admsw_add_rxhbuf(sc, i) != 0) 204 panic("admsw_init_bufs\n"); 205 } else 206 ADMSW_INIT_RXHDESC(sc, i); 207 } 208 209 for (i = 0; i < ADMSW_NTXLDESC; i++) { 210 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 211 m_freem(sc->sc_txlsoft[i].ds_mbuf); 212 sc->sc_txlsoft[i].ds_mbuf = NULL; 213 } 214 desc = &sc->sc_txldescs[i]; 215 desc->data = 0; 216 desc->cntl = 0; 217 desc->len = MAC_BUFLEN; 218 desc->status = 0; 219 ADMSW_CDTXLSYNC(sc, i, 220 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 221 } 222 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 223 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 224 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 225 226 for (i = 0; i < ADMSW_NRXLDESC; i++) { 227 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 228 if (admsw_add_rxlbuf(sc, i) != 0) 229 panic("admsw_init_bufs\n"); 230 } else 231 ADMSW_INIT_RXLDESC(sc, i); 232 } 233 234 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 235 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 236 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 237 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 238 239 sc->sc_txfree = ADMSW_NTXLDESC; 240 sc->sc_txnext = 0; 241 sc->sc_txdirty = 0; 242 sc->sc_rxptr = 0; 243 } 244 245 static void 246 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 247 { 248 uint32_t i; 249 250 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24); 251 REG_WRITE(VLAN_G1_REG, i); 252 i = matrix[4] + (matrix[5] << 8); 253 REG_WRITE(VLAN_G2_REG, i); 254 } 255 256 static void 257 admsw_reset(struct admsw_softc *sc) 258 { 259 uint32_t wdog1; 260 int i; 261 262 REG_WRITE(PORT_CONF0_REG, 263 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 264 REG_WRITE(CPUP_CONF_REG, 265 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 266 267 /* Wait for DMA to complete. Overkill. In 3ms, we can 268 * send at least two entire 1500-byte packets at 10 Mb/s. 269 */ 270 DELAY(3000); 271 272 /* The datasheet recommends that we move all PHYs to reset 273 * state prior to software reset. 274 */ 275 REG_WRITE(PHY_CNTL2_REG, 276 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 277 278 /* Reset the switch. */ 279 REG_WRITE(ADMSW_SW_RES, 0x1); 280 281 DELAY(100 * 1000); 282 283 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 284 285 /* begin old code */ 286 REG_WRITE(CPUP_CONF_REG, 287 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 288 CPUP_CONF_DMCP_MASK); 289 290 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 291 292 REG_WRITE(PHY_CNTL2_REG, 293 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK | 294 PHY_CNTL2_AMDIX_MASK); 295 296 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 297 298 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 299 REG_WRITE(ADMSW_INT_ST, INT_MASK); 300 301 /* 302 * While in DDB, we stop servicing interrupts, RX ring 303 * fills up and when free block counter falls behind FC 304 * threshold, the switch starts to emit 802.3x PAUSE 305 * frames. This can upset peer switches. 306 * 307 * Stop this from happening by disabling FC and D2 308 * thresholds. 309 */ 310 REG_WRITE(FC_TH_REG, 311 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 312 313 admsw_setvlan(sc, vlan_matrix); 314 315 for (i = 0; i < SW_DEVS; i++) { 316 REG_WRITE(MAC_WT1_REG, 317 sc->sc_enaddr[2] | 318 (sc->sc_enaddr[3]<<8) | 319 (sc->sc_enaddr[4]<<16) | 320 ((sc->sc_enaddr[5]+i)<<24)); 321 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 322 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 323 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 324 325 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 326 } 327 wdog1 = REG_READ(ADM5120_WDOG1); 328 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 329 } 330 331 static void 332 admsw_attach(struct device *parent, struct device *self, void *aux) 333 { 334 uint8_t enaddr[ETHER_ADDR_LEN]; 335 struct admsw_softc *sc = (void *) self; 336 struct obio_attach_args *aa = aux; 337 struct ifnet *ifp; 338 bus_dma_segment_t seg; 339 int error, i, rseg; 340 prop_data_t pd; 341 342 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 343 344 sc->sc_dmat = aa->oba_dt; 345 sc->sc_st = aa->oba_st; 346 347 pd = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr"); 348 349 if (pd == NULL) { 350 enaddr[0] = 0x02; 351 enaddr[1] = 0xaa; 352 enaddr[2] = 0xbb; 353 enaddr[3] = 0xcc; 354 enaddr[4] = 0xdd; 355 enaddr[5] = 0xee; 356 } else 357 memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); 358 359 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 360 361 printf("%s: base Ethernet address %s\n", sc->sc_dev.dv_xname, 362 ether_sprintf(enaddr)); 363 364 /* Map the device. */ 365 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 366 printf("%s: unable to map device\n", device_xname(&sc->sc_dev)); 367 return; 368 } 369 370 /* Hook up the interrupt handler. */ 371 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 372 373 if (sc->sc_ih == NULL) { 374 printf("%s: unable to register interrupt handler\n", 375 sc->sc_dev.dv_xname); 376 return; 377 } 378 379 /* 380 * Allocate the control data structures, and create and load the 381 * DMA map for it. 382 */ 383 if ((error = bus_dmamem_alloc(sc->sc_dmat, 384 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 385 0)) != 0) { 386 printf("%s: unable to allocate control data, error = %d\n", 387 sc->sc_dev.dv_xname, error); 388 return; 389 } 390 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 391 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 392 0)) != 0) { 393 printf("%s: unable to map control data, error = %d\n", 394 sc->sc_dev.dv_xname, error); 395 return; 396 } 397 if ((error = bus_dmamap_create(sc->sc_dmat, 398 sizeof(struct admsw_control_data), 1, 399 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 400 printf("%s: unable to create control data DMA map, " 401 "error = %d\n", sc->sc_dev.dv_xname, error); 402 return; 403 } 404 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 405 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 406 0)) != 0) { 407 printf("%s: unable to load control data DMA map, error = %d\n", 408 sc->sc_dev.dv_xname, error); 409 return; 410 } 411 412 /* 413 * Create the transmit buffer DMA maps. 414 */ 415 for (i = 0; i < ADMSW_NTXHDESC; i++) { 416 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 417 2, MCLBYTES, 0, 0, 418 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 419 printf("%s: unable to create txh DMA map %d, " 420 "error = %d\n", sc->sc_dev.dv_xname, i, error); 421 return; 422 } 423 sc->sc_txhsoft[i].ds_mbuf = NULL; 424 } 425 for (i = 0; i < ADMSW_NTXLDESC; i++) { 426 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 427 2, MCLBYTES, 0, 0, 428 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 429 printf("%s: unable to create txl DMA map %d, " 430 "error = %d\n", sc->sc_dev.dv_xname, i, error); 431 return; 432 } 433 sc->sc_txlsoft[i].ds_mbuf = NULL; 434 } 435 436 /* 437 * Create the receive buffer DMA maps. 438 */ 439 for (i = 0; i < ADMSW_NRXHDESC; i++) { 440 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 441 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 442 printf("%s: unable to create rxh DMA map %d, " 443 "error = %d\n", sc->sc_dev.dv_xname, i, error); 444 return; 445 } 446 sc->sc_rxhsoft[i].ds_mbuf = NULL; 447 } 448 for (i = 0; i < ADMSW_NRXLDESC; i++) { 449 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 450 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 451 printf("%s: unable to create rxl DMA map %d, " 452 "error = %d\n", sc->sc_dev.dv_xname, i, error); 453 return; 454 } 455 sc->sc_rxlsoft[i].ds_mbuf = NULL; 456 } 457 458 admsw_init_bufs(sc); 459 460 admsw_reset(sc); 461 462 for (i = 0; i < SW_DEVS; i++) { 463 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 464 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 465 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 466 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 467 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 468 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 469 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 470 471 ifp = &sc->sc_ethercom[i].ec_if; 472 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 473 ifp->if_xname[5] += i; 474 ifp->if_softc = sc; 475 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 476 ifp->if_ioctl = admsw_ioctl; 477 ifp->if_start = admsw_start; 478 ifp->if_watchdog = admsw_watchdog; 479 ifp->if_init = admsw_init; 480 ifp->if_stop = admsw_stop; 481 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 482 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN)); 483 IFQ_SET_READY(&ifp->if_snd); 484 485 /* Attach the interface. */ 486 if_attach(ifp); 487 ether_ifattach(ifp, enaddr); 488 enaddr[5]++; 489 } 490 491 #ifdef ADMSW_EVENT_COUNTERS 492 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 493 NULL, sc->sc_dev.dv_xname, "txstall"); 494 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 495 NULL, sc->sc_dev.dv_xname, "rxstall"); 496 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 497 NULL, sc->sc_dev.dv_xname, "txintr"); 498 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 499 NULL, sc->sc_dev.dv_xname, "rxintr"); 500 #if 1 501 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 502 NULL, sc->sc_dev.dv_xname, "rxsync"); 503 #endif 504 #endif 505 506 admwdog_attach(sc); 507 508 /* Make sure the interface is shutdown during reboot. */ 509 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 510 if (sc->sc_sdhook == NULL) 511 printf("%s: WARNING: unable to establish shutdown hook\n", 512 sc->sc_dev.dv_xname); 513 514 /* leave interrupts and cpu port disabled */ 515 return; 516 } 517 518 519 /* 520 * admsw_shutdown: 521 * 522 * Make sure the interface is stopped at reboot time. 523 */ 524 static void 525 admsw_shutdown(void *arg) 526 { 527 struct admsw_softc *sc = arg; 528 int i; 529 530 for (i = 0; i < SW_DEVS; i++) 531 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 532 } 533 534 /* 535 * admsw_start: [ifnet interface function] 536 * 537 * Start packet transmission on the interface. 538 */ 539 static void 540 admsw_start(struct ifnet *ifp) 541 { 542 struct admsw_softc *sc = ifp->if_softc; 543 struct mbuf *m0, *m; 544 struct admsw_descsoft *ds; 545 struct admsw_desc *desc; 546 bus_dmamap_t dmamap; 547 struct ether_header *eh; 548 int error, nexttx, len, i; 549 static int vlan = 0; 550 551 /* 552 * Loop through the send queues, setting up transmit descriptors 553 * unitl we drain the queues, or use up all available transmit 554 * descriptors. 555 */ 556 for (;;) { 557 vlan++; 558 if (vlan == SW_DEVS) 559 vlan = 0; 560 i = vlan; 561 for (;;) { 562 ifp = &sc->sc_ethercom[i].ec_if; 563 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) == 564 IFF_RUNNING) { 565 /* Grab a packet off the queue. */ 566 IFQ_POLL(&ifp->if_snd, m0); 567 if (m0 != NULL) 568 break; 569 } 570 i++; 571 if (i == SW_DEVS) 572 i = 0; 573 if (i == vlan) 574 return; 575 } 576 vlan = i; 577 m = NULL; 578 579 /* Get a spare descriptor. */ 580 if (sc->sc_txfree == 0) { 581 /* No more slots left; notify upper layer. */ 582 ifp->if_flags |= IFF_OACTIVE; 583 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 584 break; 585 } 586 nexttx = sc->sc_txnext; 587 desc = &sc->sc_txldescs[nexttx]; 588 ds = &sc->sc_txlsoft[nexttx]; 589 dmamap = ds->ds_dmamap; 590 591 /* 592 * Load the DMA map. If this fails, the packet either 593 * didn't fit in the alloted number of segments, or we 594 * were short on resources. In this case, we'll copy 595 * and try again. 596 */ 597 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 598 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 599 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 600 MGETHDR(m, M_DONTWAIT, MT_DATA); 601 if (m == NULL) { 602 printf("%s: unable to allocate Tx mbuf\n", 603 sc->sc_dev.dv_xname); 604 break; 605 } 606 if (m0->m_pkthdr.len > MHLEN) { 607 MCLGET(m, M_DONTWAIT); 608 if ((m->m_flags & M_EXT) == 0) { 609 printf("%s: unable to allocate Tx " 610 "cluster\n", sc->sc_dev.dv_xname); 611 m_freem(m); 612 break; 613 } 614 } 615 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 616 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 617 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 618 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 619 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 620 panic("admsw_start: M_TRAILINGSPACE\n"); 621 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 622 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 623 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 624 } 625 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 626 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 627 if (error) { 628 printf("%s: unable to load Tx buffer, " 629 "error = %d\n", sc->sc_dev.dv_xname, error); 630 break; 631 } 632 } 633 634 IFQ_DEQUEUE(&ifp->if_snd, m0); 635 if (m != NULL) { 636 m_freem(m0); 637 m0 = m; 638 } 639 640 /* 641 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 642 */ 643 644 /* Sync the DMA map. */ 645 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 646 BUS_DMASYNC_PREWRITE); 647 648 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 649 panic("admsw_start: dm_nsegs == %d\n", dmamap->dm_nsegs); 650 desc->data = dmamap->dm_segs[0].ds_addr; 651 desc->len = len = dmamap->dm_segs[0].ds_len; 652 if (dmamap->dm_nsegs > 1) { 653 len += dmamap->dm_segs[1].ds_len; 654 desc->cntl = dmamap->dm_segs[1].ds_addr | ADM5120_DMA_BUF2ENABLE; 655 } else 656 desc->cntl = 0; 657 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 658 eh = mtod(m0, struct ether_header *); 659 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 660 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 661 desc->status |= ADM5120_DMA_CSUM; 662 if (nexttx == ADMSW_NTXLDESC - 1) 663 desc->data |= ADM5120_DMA_RINGEND; 664 desc->data |= ADM5120_DMA_OWN; 665 666 /* Sync the descriptor. */ 667 ADMSW_CDTXLSYNC(sc, nexttx, 668 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 669 670 REG_WRITE(SEND_TRIG_REG, 1); 671 /* printf("send slot %d\n",nexttx); */ 672 673 /* 674 * Store a pointer to the packet so we can free it later. 675 */ 676 ds->ds_mbuf = m0; 677 678 /* Advance the Tx pointer. */ 679 sc->sc_txfree--; 680 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 681 682 #if NBPFILTER > 0 683 /* Pass the packet to any BPF listeners. */ 684 if (ifp->if_bpf) 685 bpf_mtap(ifp->if_bpf, m0); 686 #endif /* NBPFILTER */ 687 688 /* Set a watchdog timer in case the chip flakes out. */ 689 sc->sc_ethercom[0].ec_if.if_timer = 5; 690 } 691 } 692 693 /* 694 * admsw_watchdog: [ifnet interface function] 695 * 696 * Watchdog timer handler. 697 */ 698 static void 699 admsw_watchdog(struct ifnet *ifp) 700 { 701 struct admsw_softc *sc = ifp->if_softc; 702 int vlan; 703 704 #if 1 705 /* Check if an interrupt was lost. */ 706 if (sc->sc_txfree == ADMSW_NTXLDESC) { 707 printf("%s: watchdog false alarm\n", sc->sc_dev.dv_xname); 708 return; 709 } 710 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 711 printf("%s: watchdog timer is %d!\n", sc->sc_dev.dv_xname, sc->sc_ethercom[0].ec_if.if_timer); 712 admsw_txintr(sc, 0); 713 if (sc->sc_txfree == ADMSW_NTXLDESC) { 714 printf("%s: tx IRQ lost (queue empty)\n", sc->sc_dev.dv_xname); 715 return; 716 } 717 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 718 printf("%s: tx IRQ lost (timer recharged)\n", sc->sc_dev.dv_xname); 719 return; 720 } 721 #endif 722 723 printf("%s: device timeout, txfree = %d\n", sc->sc_dev.dv_xname, sc->sc_txfree); 724 for (vlan = 0; vlan < SW_DEVS; vlan++) 725 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 726 for (vlan = 0; vlan < SW_DEVS; vlan++) 727 (void) admsw_init(&sc->sc_ethercom[vlan].ec_if); 728 729 /* Try to get more packets going. */ 730 admsw_start(ifp); 731 } 732 733 /* 734 * admsw_ioctl: [ifnet interface function] 735 * 736 * Handle control requests from the operator. 737 */ 738 static int 739 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 740 { 741 struct admsw_softc *sc = ifp->if_softc; 742 struct ifdrv *ifd; 743 int s, error, port; 744 745 s = splnet(); 746 747 switch (cmd) { 748 case SIOCSIFMEDIA: 749 case SIOCGIFMEDIA: 750 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 751 if (port >= SW_DEVS) 752 error = EOPNOTSUPP; 753 else 754 error = ifmedia_ioctl(ifp, (struct ifreq *)data, 755 &sc->sc_ifmedia[port], cmd); 756 break; 757 758 case SIOCGDRVSPEC: 759 case SIOCSDRVSPEC: 760 ifd = (struct ifdrv *) data; 761 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 762 error = EINVAL; 763 break; 764 } 765 if (cmd == SIOCGDRVSPEC) { 766 error = copyout(vlan_matrix, ifd->ifd_data, 767 sizeof(vlan_matrix)); 768 } else { 769 error = copyin(ifd->ifd_data, vlan_matrix, 770 sizeof(vlan_matrix)); 771 admsw_setvlan(sc, vlan_matrix); 772 } 773 break; 774 775 default: 776 error = ether_ioctl(ifp, cmd, data); 777 if (error == ENETRESET) { 778 /* 779 * Multicast list has changed; set the hardware filter 780 * accordingly. 781 */ 782 admsw_set_filter(sc); 783 error = 0; 784 } 785 break; 786 } 787 788 /* Try to get more packets going. */ 789 admsw_start(ifp); 790 791 splx(s); 792 return (error); 793 } 794 795 796 /* 797 * admsw_intr: 798 * 799 * Interrupt service routine. 800 */ 801 static int 802 admsw_intr(void *arg) 803 { 804 struct admsw_softc *sc = arg; 805 uint32_t pending; 806 char buf[64]; 807 808 pending = REG_READ(ADMSW_INT_ST); 809 810 if ((pending & ~(ADMSW_INTR_RHD|ADMSW_INTR_RLD|ADMSW_INTR_SHD|ADMSW_INTR_SLD|ADMSW_INTR_W1TE|ADMSW_INTR_W0TE)) != 0) { 811 printf("%s: pending=%s\n", __func__, 812 bitmask_snprintf(pending, ADMSW_INT_FMT, buf, sizeof(buf))); 813 } 814 REG_WRITE(ADMSW_INT_ST, pending); 815 816 if (sc->ndevs == 0) 817 return (0); 818 819 if ((pending & ADMSW_INTR_RHD) != 0) 820 admsw_rxintr(sc, 1); 821 822 if ((pending & ADMSW_INTR_RLD) != 0) 823 admsw_rxintr(sc, 0); 824 825 if ((pending & ADMSW_INTR_SHD) != 0) 826 admsw_txintr(sc, 1); 827 828 if ((pending & ADMSW_INTR_SLD) != 0) 829 admsw_txintr(sc, 0); 830 831 return (1); 832 } 833 834 /* 835 * admsw_txintr: 836 * 837 * Helper; handle transmit interrupts. 838 */ 839 static void 840 admsw_txintr(struct admsw_softc *sc, int prio) 841 { 842 struct ifnet *ifp; 843 struct admsw_desc *desc; 844 struct admsw_descsoft *ds; 845 int i, vlan; 846 int gotone = 0; 847 848 /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 849 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 850 i = ADMSW_NEXTTXL(i)) { 851 852 ADMSW_CDTXLSYNC(sc, i, 853 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 854 855 desc = &sc->sc_txldescs[i]; 856 ds = &sc->sc_txlsoft[i]; 857 if (desc->data & ADM5120_DMA_OWN) { 858 ADMSW_CDTXLSYNC(sc, i, 859 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 860 break; 861 } 862 863 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 864 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 865 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 866 m_freem(ds->ds_mbuf); 867 ds->ds_mbuf = NULL; 868 869 vlan = ffs(desc->status & 0x3f) - 1; 870 if (vlan < 0 || vlan >= SW_DEVS) 871 panic("admsw_txintr: bad vlan\n"); 872 ifp = &sc->sc_ethercom[vlan].ec_if; 873 gotone = 1; 874 /* printf("clear tx slot %d\n",i); */ 875 876 ifp->if_opackets++; 877 878 sc->sc_txfree++; 879 } 880 881 if (gotone) { 882 sc->sc_txdirty = i; 883 #ifdef ADMSW_EVENT_COUNTERS 884 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 885 #endif 886 for (vlan = 0; vlan < SW_DEVS; vlan++) 887 sc->sc_ethercom[vlan].ec_if.if_flags &= ~IFF_OACTIVE; 888 889 ifp = &sc->sc_ethercom[0].ec_if; 890 891 /* Try to queue more packets. */ 892 admsw_start(ifp); 893 894 /* 895 * If there are no more pending transmissions, 896 * cancel the watchdog timer. 897 */ 898 if (sc->sc_txfree == ADMSW_NTXLDESC) 899 ifp->if_timer = 0; 900 901 } 902 903 /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 904 } 905 906 /* 907 * admsw_rxintr: 908 * 909 * Helper; handle receive interrupts. 910 */ 911 static void 912 admsw_rxintr(struct admsw_softc *sc, int high) 913 { 914 struct ifnet *ifp; 915 struct admsw_descsoft *ds; 916 struct mbuf *m; 917 uint32_t stat; 918 int i, len, port, vlan; 919 920 /* printf("rxintr\n"); */ 921 if (high) 922 panic("admsw_rxintr: high priority packet\n"); 923 924 #ifdef ADMSW_EVENT_COUNTERS 925 int pkts = 0; 926 #endif 927 928 #if 1 929 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 930 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 931 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 932 else { 933 i = sc->sc_rxptr; 934 do { 935 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 936 i = ADMSW_NEXTRXL(i); 937 /* the ring is empty, just return. */ 938 if (i == sc->sc_rxptr) 939 return; 940 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 941 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 942 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 943 944 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 945 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 946 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 947 else { 948 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 949 /* We've fallen behind the chip: catch it. */ 950 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 951 sc->sc_dev.dv_xname, REG_READ(RECV_LBADDR_REG), 952 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 953 sc->sc_rxptr = i; 954 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 955 } 956 } 957 #endif 958 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 959 ds = &sc->sc_rxlsoft[i]; 960 961 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 962 963 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 964 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 965 break; 966 } 967 968 /* printf("process slot %d\n",i); */ 969 970 #ifdef ADMSW_EVENT_COUNTERS 971 pkts++; 972 #endif 973 974 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 975 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 976 977 stat = sc->sc_rxldescs[i].status; 978 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 979 len -= ETHER_CRC_LEN; 980 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 981 for (vlan = 0; vlan < SW_DEVS; vlan++) 982 if ((1 << port) & vlan_matrix[vlan]) 983 break; 984 if (vlan == SW_DEVS) 985 vlan = 0; 986 ifp = &sc->sc_ethercom[vlan].ec_if; 987 988 m = ds->ds_mbuf; 989 if (admsw_add_rxlbuf(sc, i) != 0) { 990 ifp->if_ierrors++; 991 ADMSW_INIT_RXLDESC(sc, i); 992 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 993 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 994 continue; 995 } 996 997 m->m_pkthdr.rcvif = ifp; 998 m->m_pkthdr.len = m->m_len = len; 999 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 1000 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1001 if (stat & ADM5120_DMA_CSUMFAIL) 1002 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1003 } 1004 #if NBPFILTER > 0 1005 /* Pass this up to any BPF listeners. */ 1006 if (ifp->if_bpf) 1007 bpf_mtap(ifp->if_bpf, m); 1008 #endif /* NBPFILTER > 0 */ 1009 1010 /* Pass it on. */ 1011 (*ifp->if_input)(ifp, m); 1012 ifp->if_ipackets++; 1013 } 1014 #ifdef ADMSW_EVENT_COUNTERS 1015 if (pkts) 1016 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1017 1018 if (pkts == ADMSW_NRXLDESC) 1019 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1020 #endif 1021 1022 /* Update the receive pointer. */ 1023 sc->sc_rxptr = i; 1024 } 1025 1026 /* 1027 * admsw_init: [ifnet interface function] 1028 * 1029 * Initialize the interface. Must be called at splnet(). 1030 */ 1031 static int 1032 admsw_init(struct ifnet *ifp) 1033 { 1034 struct admsw_softc *sc = ifp->if_softc; 1035 1036 /* printf("admsw_init called\n"); */ 1037 1038 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1039 if (sc->ndevs == 0) { 1040 admsw_init_bufs(sc); 1041 admsw_reset(sc); 1042 REG_WRITE(CPUP_CONF_REG, 1043 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1044 CPUP_CONF_DMCP_MASK); 1045 /* clear all pending interrupts */ 1046 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1047 1048 /* enable needed interrupts */ 1049 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1050 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | ADMSW_INTR_RHD | 1051 ADMSW_INTR_RLD | ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1052 } 1053 sc->ndevs++; 1054 } 1055 1056 /* Set the receive filter. */ 1057 admsw_set_filter(sc); 1058 1059 /* mark iface as running */ 1060 ifp->if_flags |= IFF_RUNNING; 1061 ifp->if_flags &= ~IFF_OACTIVE; 1062 1063 return 0; 1064 } 1065 1066 /* 1067 * admsw_stop: [ifnet interface function] 1068 * 1069 * Stop transmission on the interface. 1070 */ 1071 static void 1072 admsw_stop(struct ifnet *ifp, int disable) 1073 { 1074 struct admsw_softc *sc = ifp->if_softc; 1075 1076 /* printf("admsw_stop: %d\n",disable); */ 1077 1078 if (!(ifp->if_flags & IFF_RUNNING)) 1079 return; 1080 1081 if (--sc->ndevs == 0) { 1082 /* printf("debug: de-initializing hardware\n"); */ 1083 1084 /* disable cpu port */ 1085 REG_WRITE(CPUP_CONF_REG, 1086 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1087 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1088 1089 /* XXX We should disable, then clear? --dyoung */ 1090 /* clear all pending interrupts */ 1091 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1092 1093 /* disable interrupts */ 1094 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1095 } 1096 1097 /* Mark the interface as down and cancel the watchdog timer. */ 1098 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1099 ifp->if_timer = 0; 1100 1101 return; 1102 } 1103 1104 /* 1105 * admsw_set_filter: 1106 * 1107 * Set up the receive filter. 1108 */ 1109 static void 1110 admsw_set_filter(struct admsw_softc *sc) 1111 { 1112 int i; 1113 uint32_t allmc, anymc, conf, promisc; 1114 struct ether_multi *enm; 1115 struct ethercom *ec; 1116 struct ifnet *ifp; 1117 struct ether_multistep step; 1118 1119 /* Find which ports should be operated in promisc mode. */ 1120 allmc = anymc = promisc = 0; 1121 for (i = 0; i < SW_DEVS; i++) { 1122 ec = &sc->sc_ethercom[i]; 1123 ifp = &ec->ec_if; 1124 if (ifp->if_flags & IFF_PROMISC) 1125 promisc |= vlan_matrix[i]; 1126 1127 ifp->if_flags &= ~IFF_ALLMULTI; 1128 1129 ETHER_FIRST_MULTI(step, ec, enm); 1130 while (enm != NULL) { 1131 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1132 ETHER_ADDR_LEN) != 0) { 1133 printf("%s: punting on mcast range\n", 1134 __func__); 1135 ifp->if_flags |= IFF_ALLMULTI; 1136 allmc |= vlan_matrix[i]; 1137 break; 1138 } 1139 1140 anymc |= vlan_matrix[i]; 1141 1142 #if 0 1143 /* XXX extract subroutine --dyoung */ 1144 REG_WRITE(MAC_WT1_REG, 1145 enm->enm_addrlo[2] | 1146 (enm->enm_addrlo[3] << 8) | 1147 (enm->enm_addrlo[4] << 16) | 1148 (enm->enm_addrlo[5] << 24)); 1149 REG_WRITE(MAC_WT0_REG, 1150 (i << MAC_WT0_VLANID_SHIFT) | 1151 (enm->enm_addrlo[0] << 16) | 1152 (enm->enm_addrlo[1] << 24) | 1153 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1154 /* timeout? */ 1155 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 1156 #endif 1157 1158 /* load h/w with mcast address, port = CPU */ 1159 ETHER_NEXT_MULTI(step, enm); 1160 } 1161 } 1162 1163 conf = REG_READ(CPUP_CONF_REG); 1164 /* 1 Disable forwarding of unknown & multicast packets to 1165 * CPU on all ports. 1166 * 2 Enable forwarding of unknown & multicast packets to 1167 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1168 */ 1169 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1170 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1171 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1172 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1173 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1174 REG_WRITE(CPUP_CONF_REG, conf); 1175 } 1176 1177 /* 1178 * admsw_add_rxbuf: 1179 * 1180 * Add a receive buffer to the indicated descriptor. 1181 */ 1182 int 1183 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1184 { 1185 struct admsw_descsoft *ds; 1186 struct mbuf *m; 1187 int error; 1188 1189 if (high) 1190 ds = &sc->sc_rxhsoft[idx]; 1191 else 1192 ds = &sc->sc_rxlsoft[idx]; 1193 1194 MGETHDR(m, M_DONTWAIT, MT_DATA); 1195 if (m == NULL) 1196 return (ENOBUFS); 1197 1198 MCLGET(m, M_DONTWAIT); 1199 if ((m->m_flags & M_EXT) == 0) { 1200 m_freem(m); 1201 return (ENOBUFS); 1202 } 1203 1204 if (ds->ds_mbuf != NULL) 1205 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1206 1207 ds->ds_mbuf = m; 1208 1209 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1210 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1211 BUS_DMA_READ | BUS_DMA_NOWAIT); 1212 if (error) { 1213 printf("%s: can't load rx DMA map %d, error = %d\n", 1214 sc->sc_dev.dv_xname, idx, error); 1215 panic("admsw_add_rxbuf"); /* XXX */ 1216 } 1217 1218 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1219 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1220 1221 if (high) 1222 ADMSW_INIT_RXHDESC(sc, idx); 1223 else 1224 ADMSW_INIT_RXLDESC(sc, idx); 1225 1226 return (0); 1227 } 1228 1229 int 1230 admsw_mediachange(struct ifnet *ifp) 1231 { 1232 struct admsw_softc *sc = ifp->if_softc; 1233 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1234 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1235 int old, new, val; 1236 1237 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1238 return (EINVAL); 1239 1240 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1241 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX; 1242 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1243 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1244 val = PHY_CNTL2_100M|PHY_CNTL2_FDX; 1245 else 1246 val = PHY_CNTL2_100M; 1247 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1248 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1249 val = PHY_CNTL2_FDX; 1250 else 1251 val = 0; 1252 } else 1253 return (EINVAL); 1254 1255 old = REG_READ(PHY_CNTL2_REG); 1256 new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port); 1257 new |= (val << port); 1258 1259 if (new != old) 1260 REG_WRITE(PHY_CNTL2_REG, new); 1261 1262 return (0); 1263 } 1264 1265 void 1266 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1267 { 1268 struct admsw_softc *sc = ifp->if_softc; 1269 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1270 int status; 1271 1272 ifmr->ifm_status = IFM_AVALID; 1273 ifmr->ifm_active = IFM_ETHER; 1274 1275 status = REG_READ(PHY_ST_REG) >> port; 1276 1277 if ((status & PHY_ST_LINKUP) == 0) { 1278 ifmr->ifm_active |= IFM_NONE; 1279 return; 1280 } 1281 1282 ifmr->ifm_status |= IFM_ACTIVE; 1283 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1284 if (status & PHY_ST_FDX) 1285 ifmr->ifm_active |= IFM_FDX; 1286 } 1287