1 /* $NetBSD: if_admsw.c,v 1.12 2014/06/16 16:48:16 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.12 2014/06/16 16:48:16 msaitoh Exp $"); 80 81 82 #include <sys/param.h> 83 #include <sys/bus.h> 84 #include <sys/callout.h> 85 #include <sys/device.h> 86 #include <sys/endian.h> 87 #include <sys/errno.h> 88 #include <sys/intr.h> 89 #include <sys/ioctl.h> 90 #include <sys/kernel.h> 91 #include <sys/malloc.h> 92 #include <sys/mbuf.h> 93 #include <sys/socket.h> 94 #include <sys/systm.h> 95 96 #include <prop/proplib.h> 97 98 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 99 100 #include <net/if.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 #include <net/if_ether.h> 104 105 #include <net/bpf.h> 106 107 #include <dev/mii/mii.h> 108 #include <dev/mii/miivar.h> 109 110 #include <sys/gpio.h> 111 #include <dev/gpio/gpiovar.h> 112 113 #include <mips/adm5120/include/adm5120reg.h> 114 #include <mips/adm5120/include/adm5120var.h> 115 #include <mips/adm5120/include/adm5120_obiovar.h> 116 #include <mips/adm5120/dev/if_admswreg.h> 117 #include <mips/adm5120/dev/if_admswvar.h> 118 119 static uint8_t vlan_matrix[SW_DEVS] = { 120 (1 << 6) | (1 << 0), /* CPU + port0 */ 121 (1 << 6) | (1 << 1), /* CPU + port1 */ 122 (1 << 6) | (1 << 2), /* CPU + port2 */ 123 (1 << 6) | (1 << 3), /* CPU + port3 */ 124 (1 << 6) | (1 << 4), /* CPU + port4 */ 125 (1 << 6) | (1 << 5), /* CPU + port5 */ 126 }; 127 128 #ifdef ADMSW_EVENT_COUNTERS 129 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 130 #else 131 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 132 #endif 133 134 static void admsw_start(struct ifnet *); 135 static void admsw_watchdog(struct ifnet *); 136 static int admsw_ioctl(struct ifnet *, u_long, void *); 137 static int admsw_init(struct ifnet *); 138 static void admsw_stop(struct ifnet *, int); 139 140 static void admsw_shutdown(void *); 141 142 static void admsw_reset(struct admsw_softc *); 143 static void admsw_set_filter(struct admsw_softc *); 144 145 static int admsw_intr(void *); 146 static void admsw_txintr(struct admsw_softc *, int); 147 static void admsw_rxintr(struct admsw_softc *, int); 148 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 149 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 150 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 151 152 static int admsw_mediachange(struct ifnet *); 153 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 154 155 static int admsw_match(device_t, cfdata_t, void *); 156 static void admsw_attach(device_t, device_t, void *); 157 158 CFATTACH_DECL_NEW(admsw, sizeof(struct admsw_softc), 159 admsw_match, admsw_attach, NULL, NULL); 160 161 static int 162 admsw_match(device_t parent, cfdata_t cf, void *aux) 163 { 164 struct obio_attach_args *aa = aux; 165 166 return strcmp(aa->oba_name, cf->cf_name) == 0; 167 } 168 169 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 170 #define REG_WRITE(o,v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 171 172 173 static void 174 admsw_init_bufs(struct admsw_softc *sc) 175 { 176 int i; 177 struct admsw_desc *desc; 178 179 for (i = 0; i < ADMSW_NTXHDESC; i++) { 180 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 181 m_freem(sc->sc_txhsoft[i].ds_mbuf); 182 sc->sc_txhsoft[i].ds_mbuf = NULL; 183 } 184 desc = &sc->sc_txhdescs[i]; 185 desc->data = 0; 186 desc->cntl = 0; 187 desc->len = MAC_BUFLEN; 188 desc->status = 0; 189 ADMSW_CDTXHSYNC(sc, i, 190 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 191 } 192 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 193 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 194 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 195 196 for (i = 0; i < ADMSW_NRXHDESC; i++) { 197 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 198 if (admsw_add_rxhbuf(sc, i) != 0) 199 panic("admsw_init_bufs\n"); 200 } else 201 ADMSW_INIT_RXHDESC(sc, i); 202 } 203 204 for (i = 0; i < ADMSW_NTXLDESC; i++) { 205 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 206 m_freem(sc->sc_txlsoft[i].ds_mbuf); 207 sc->sc_txlsoft[i].ds_mbuf = NULL; 208 } 209 desc = &sc->sc_txldescs[i]; 210 desc->data = 0; 211 desc->cntl = 0; 212 desc->len = MAC_BUFLEN; 213 desc->status = 0; 214 ADMSW_CDTXLSYNC(sc, i, 215 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 216 } 217 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 218 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 219 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 220 221 for (i = 0; i < ADMSW_NRXLDESC; i++) { 222 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 223 if (admsw_add_rxlbuf(sc, i) != 0) 224 panic("admsw_init_bufs\n"); 225 } else 226 ADMSW_INIT_RXLDESC(sc, i); 227 } 228 229 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 230 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 231 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 232 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 233 234 sc->sc_txfree = ADMSW_NTXLDESC; 235 sc->sc_txnext = 0; 236 sc->sc_txdirty = 0; 237 sc->sc_rxptr = 0; 238 } 239 240 static void 241 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 242 { 243 uint32_t i; 244 245 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24); 246 REG_WRITE(VLAN_G1_REG, i); 247 i = matrix[4] + (matrix[5] << 8); 248 REG_WRITE(VLAN_G2_REG, i); 249 } 250 251 static void 252 admsw_reset(struct admsw_softc *sc) 253 { 254 uint32_t wdog1; 255 int i; 256 257 REG_WRITE(PORT_CONF0_REG, 258 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 259 REG_WRITE(CPUP_CONF_REG, 260 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 261 262 /* Wait for DMA to complete. Overkill. In 3ms, we can 263 * send at least two entire 1500-byte packets at 10 Mb/s. 264 */ 265 DELAY(3000); 266 267 /* The datasheet recommends that we move all PHYs to reset 268 * state prior to software reset. 269 */ 270 REG_WRITE(PHY_CNTL2_REG, 271 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 272 273 /* Reset the switch. */ 274 REG_WRITE(ADMSW_SW_RES, 0x1); 275 276 DELAY(100 * 1000); 277 278 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 279 280 /* begin old code */ 281 REG_WRITE(CPUP_CONF_REG, 282 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 283 CPUP_CONF_DMCP_MASK); 284 285 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 286 287 REG_WRITE(PHY_CNTL2_REG, 288 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK | 289 PHY_CNTL2_AMDIX_MASK); 290 291 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 292 293 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 294 REG_WRITE(ADMSW_INT_ST, INT_MASK); 295 296 /* 297 * While in DDB, we stop servicing interrupts, RX ring 298 * fills up and when free block counter falls behind FC 299 * threshold, the switch starts to emit 802.3x PAUSE 300 * frames. This can upset peer switches. 301 * 302 * Stop this from happening by disabling FC and D2 303 * thresholds. 304 */ 305 REG_WRITE(FC_TH_REG, 306 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 307 308 admsw_setvlan(sc, vlan_matrix); 309 310 for (i = 0; i < SW_DEVS; i++) { 311 REG_WRITE(MAC_WT1_REG, 312 sc->sc_enaddr[2] | 313 (sc->sc_enaddr[3]<<8) | 314 (sc->sc_enaddr[4]<<16) | 315 ((sc->sc_enaddr[5]+i)<<24)); 316 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 317 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 318 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 319 320 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 321 } 322 wdog1 = REG_READ(ADM5120_WDOG1); 323 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 324 } 325 326 static void 327 admsw_attach(device_t parent, device_t self, void *aux) 328 { 329 uint8_t enaddr[ETHER_ADDR_LEN]; 330 struct admsw_softc *sc = device_private(self); 331 struct obio_attach_args *aa = aux; 332 struct ifnet *ifp; 333 bus_dma_segment_t seg; 334 int error, i, rseg; 335 prop_data_t pd; 336 337 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 338 339 sc->sc_dev = self; 340 sc->sc_dmat = aa->oba_dt; 341 sc->sc_st = aa->oba_st; 342 343 pd = prop_dictionary_get(device_properties(self), "mac-address"); 344 345 if (pd == NULL) { 346 enaddr[0] = 0x02; 347 enaddr[1] = 0xaa; 348 enaddr[2] = 0xbb; 349 enaddr[3] = 0xcc; 350 enaddr[4] = 0xdd; 351 enaddr[5] = 0xee; 352 } else 353 memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); 354 355 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 356 357 printf("%s: base Ethernet address %s\n", device_xname(sc->sc_dev), 358 ether_sprintf(enaddr)); 359 360 /* Map the device. */ 361 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 362 printf("%s: unable to map device\n", device_xname(sc->sc_dev)); 363 return; 364 } 365 366 /* Hook up the interrupt handler. */ 367 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 368 369 if (sc->sc_ih == NULL) { 370 printf("%s: unable to register interrupt handler\n", 371 device_xname(sc->sc_dev)); 372 return; 373 } 374 375 /* 376 * Allocate the control data structures, and create and load the 377 * DMA map for it. 378 */ 379 if ((error = bus_dmamem_alloc(sc->sc_dmat, 380 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 381 0)) != 0) { 382 printf("%s: unable to allocate control data, error = %d\n", 383 device_xname(sc->sc_dev), error); 384 return; 385 } 386 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 387 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 388 0)) != 0) { 389 printf("%s: unable to map control data, error = %d\n", 390 device_xname(sc->sc_dev), error); 391 return; 392 } 393 if ((error = bus_dmamap_create(sc->sc_dmat, 394 sizeof(struct admsw_control_data), 1, 395 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 396 printf("%s: unable to create control data DMA map, " 397 "error = %d\n", device_xname(sc->sc_dev), error); 398 return; 399 } 400 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 401 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 402 0)) != 0) { 403 printf("%s: unable to load control data DMA map, error = %d\n", 404 device_xname(sc->sc_dev), error); 405 return; 406 } 407 408 /* 409 * Create the transmit buffer DMA maps. 410 */ 411 for (i = 0; i < ADMSW_NTXHDESC; i++) { 412 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 413 2, MCLBYTES, 0, 0, 414 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 415 printf("%s: unable to create txh DMA map %d, " 416 "error = %d\n", device_xname(sc->sc_dev), i, error); 417 return; 418 } 419 sc->sc_txhsoft[i].ds_mbuf = NULL; 420 } 421 for (i = 0; i < ADMSW_NTXLDESC; i++) { 422 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 423 2, MCLBYTES, 0, 0, 424 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 425 printf("%s: unable to create txl DMA map %d, " 426 "error = %d\n", device_xname(sc->sc_dev), i, error); 427 return; 428 } 429 sc->sc_txlsoft[i].ds_mbuf = NULL; 430 } 431 432 /* 433 * Create the receive buffer DMA maps. 434 */ 435 for (i = 0; i < ADMSW_NRXHDESC; i++) { 436 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 437 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 438 printf("%s: unable to create rxh DMA map %d, " 439 "error = %d\n", device_xname(sc->sc_dev), i, error); 440 return; 441 } 442 sc->sc_rxhsoft[i].ds_mbuf = NULL; 443 } 444 for (i = 0; i < ADMSW_NRXLDESC; i++) { 445 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 446 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 447 printf("%s: unable to create rxl DMA map %d, " 448 "error = %d\n", device_xname(sc->sc_dev), i, error); 449 return; 450 } 451 sc->sc_rxlsoft[i].ds_mbuf = NULL; 452 } 453 454 admsw_init_bufs(sc); 455 456 admsw_reset(sc); 457 458 for (i = 0; i < SW_DEVS; i++) { 459 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 460 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 461 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 462 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 463 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 464 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 465 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 466 467 ifp = &sc->sc_ethercom[i].ec_if; 468 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 469 ifp->if_xname[5] += i; 470 ifp->if_softc = sc; 471 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 472 ifp->if_ioctl = admsw_ioctl; 473 ifp->if_start = admsw_start; 474 ifp->if_watchdog = admsw_watchdog; 475 ifp->if_init = admsw_init; 476 ifp->if_stop = admsw_stop; 477 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 478 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN)); 479 IFQ_SET_READY(&ifp->if_snd); 480 481 /* Attach the interface. */ 482 if_attach(ifp); 483 ether_ifattach(ifp, enaddr); 484 enaddr[5]++; 485 } 486 487 #ifdef ADMSW_EVENT_COUNTERS 488 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 489 NULL, device_xname(sc->sc_dev), "txstall"); 490 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 491 NULL, device_xname(sc->sc_dev), "rxstall"); 492 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 493 NULL, device_xname(sc->sc_dev), "txintr"); 494 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 495 NULL, device_xname(sc->sc_dev), "rxintr"); 496 #if 1 497 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 498 NULL, device_xname(sc->sc_dev), "rxsync"); 499 #endif 500 #endif 501 502 admwdog_attach(sc); 503 504 /* Make sure the interface is shutdown during reboot. */ 505 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 506 if (sc->sc_sdhook == NULL) 507 printf("%s: WARNING: unable to establish shutdown hook\n", 508 device_xname(sc->sc_dev)); 509 510 /* leave interrupts and cpu port disabled */ 511 return; 512 } 513 514 515 /* 516 * admsw_shutdown: 517 * 518 * Make sure the interface is stopped at reboot time. 519 */ 520 static void 521 admsw_shutdown(void *arg) 522 { 523 struct admsw_softc *sc = arg; 524 int i; 525 526 for (i = 0; i < SW_DEVS; i++) 527 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 528 } 529 530 /* 531 * admsw_start: [ifnet interface function] 532 * 533 * Start packet transmission on the interface. 534 */ 535 static void 536 admsw_start(struct ifnet *ifp) 537 { 538 struct admsw_softc *sc = ifp->if_softc; 539 struct mbuf *m0, *m; 540 struct admsw_descsoft *ds; 541 struct admsw_desc *desc; 542 bus_dmamap_t dmamap; 543 struct ether_header *eh; 544 int error, nexttx, len, i; 545 static int vlan = 0; 546 547 /* 548 * Loop through the send queues, setting up transmit descriptors 549 * unitl we drain the queues, or use up all available transmit 550 * descriptors. 551 */ 552 for (;;) { 553 vlan++; 554 if (vlan == SW_DEVS) 555 vlan = 0; 556 i = vlan; 557 for (;;) { 558 ifp = &sc->sc_ethercom[i].ec_if; 559 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) == 560 IFF_RUNNING) { 561 /* Grab a packet off the queue. */ 562 IFQ_POLL(&ifp->if_snd, m0); 563 if (m0 != NULL) 564 break; 565 } 566 i++; 567 if (i == SW_DEVS) 568 i = 0; 569 if (i == vlan) 570 return; 571 } 572 vlan = i; 573 m = NULL; 574 575 /* Get a spare descriptor. */ 576 if (sc->sc_txfree == 0) { 577 /* No more slots left; notify upper layer. */ 578 ifp->if_flags |= IFF_OACTIVE; 579 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 580 break; 581 } 582 nexttx = sc->sc_txnext; 583 desc = &sc->sc_txldescs[nexttx]; 584 ds = &sc->sc_txlsoft[nexttx]; 585 dmamap = ds->ds_dmamap; 586 587 /* 588 * Load the DMA map. If this fails, the packet either 589 * didn't fit in the alloted number of segments, or we 590 * were short on resources. In this case, we'll copy 591 * and try again. 592 */ 593 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 594 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 595 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 596 MGETHDR(m, M_DONTWAIT, MT_DATA); 597 if (m == NULL) { 598 printf("%s: unable to allocate Tx mbuf\n", 599 device_xname(sc->sc_dev)); 600 break; 601 } 602 if (m0->m_pkthdr.len > MHLEN) { 603 MCLGET(m, M_DONTWAIT); 604 if ((m->m_flags & M_EXT) == 0) { 605 printf("%s: unable to allocate Tx " 606 "cluster\n", device_xname(sc->sc_dev)); 607 m_freem(m); 608 break; 609 } 610 } 611 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 612 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 613 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 614 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 615 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 616 panic("admsw_start: M_TRAILINGSPACE\n"); 617 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 618 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 619 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 620 } 621 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 622 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 623 if (error) { 624 printf("%s: unable to load Tx buffer, " 625 "error = %d\n", device_xname(sc->sc_dev), error); 626 break; 627 } 628 } 629 630 IFQ_DEQUEUE(&ifp->if_snd, m0); 631 if (m != NULL) { 632 m_freem(m0); 633 m0 = m; 634 } 635 636 /* 637 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 638 */ 639 640 /* Sync the DMA map. */ 641 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 642 BUS_DMASYNC_PREWRITE); 643 644 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 645 panic("admsw_start: dm_nsegs == %d\n", dmamap->dm_nsegs); 646 desc->data = dmamap->dm_segs[0].ds_addr; 647 desc->len = len = dmamap->dm_segs[0].ds_len; 648 if (dmamap->dm_nsegs > 1) { 649 len += dmamap->dm_segs[1].ds_len; 650 desc->cntl = dmamap->dm_segs[1].ds_addr | ADM5120_DMA_BUF2ENABLE; 651 } else 652 desc->cntl = 0; 653 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 654 eh = mtod(m0, struct ether_header *); 655 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 656 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 657 desc->status |= ADM5120_DMA_CSUM; 658 if (nexttx == ADMSW_NTXLDESC - 1) 659 desc->data |= ADM5120_DMA_RINGEND; 660 desc->data |= ADM5120_DMA_OWN; 661 662 /* Sync the descriptor. */ 663 ADMSW_CDTXLSYNC(sc, nexttx, 664 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 665 666 REG_WRITE(SEND_TRIG_REG, 1); 667 /* printf("send slot %d\n",nexttx); */ 668 669 /* 670 * Store a pointer to the packet so we can free it later. 671 */ 672 ds->ds_mbuf = m0; 673 674 /* Advance the Tx pointer. */ 675 sc->sc_txfree--; 676 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 677 678 /* Pass the packet to any BPF listeners. */ 679 bpf_mtap(ifp, m0); 680 681 /* Set a watchdog timer in case the chip flakes out. */ 682 sc->sc_ethercom[0].ec_if.if_timer = 5; 683 } 684 } 685 686 /* 687 * admsw_watchdog: [ifnet interface function] 688 * 689 * Watchdog timer handler. 690 */ 691 static void 692 admsw_watchdog(struct ifnet *ifp) 693 { 694 struct admsw_softc *sc = ifp->if_softc; 695 int vlan; 696 697 #if 1 698 /* Check if an interrupt was lost. */ 699 if (sc->sc_txfree == ADMSW_NTXLDESC) { 700 printf("%s: watchdog false alarm\n", device_xname(sc->sc_dev)); 701 return; 702 } 703 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 704 printf("%s: watchdog timer is %d!\n", device_xname(sc->sc_dev), sc->sc_ethercom[0].ec_if.if_timer); 705 admsw_txintr(sc, 0); 706 if (sc->sc_txfree == ADMSW_NTXLDESC) { 707 printf("%s: tx IRQ lost (queue empty)\n", device_xname(sc->sc_dev)); 708 return; 709 } 710 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 711 printf("%s: tx IRQ lost (timer recharged)\n", device_xname(sc->sc_dev)); 712 return; 713 } 714 #endif 715 716 printf("%s: device timeout, txfree = %d\n", device_xname(sc->sc_dev), sc->sc_txfree); 717 for (vlan = 0; vlan < SW_DEVS; vlan++) 718 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 719 for (vlan = 0; vlan < SW_DEVS; vlan++) 720 (void) admsw_init(&sc->sc_ethercom[vlan].ec_if); 721 722 /* Try to get more packets going. */ 723 admsw_start(ifp); 724 } 725 726 /* 727 * admsw_ioctl: [ifnet interface function] 728 * 729 * Handle control requests from the operator. 730 */ 731 static int 732 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 733 { 734 struct admsw_softc *sc = ifp->if_softc; 735 struct ifdrv *ifd; 736 int s, error, port; 737 738 s = splnet(); 739 740 switch (cmd) { 741 case SIOCSIFCAP: 742 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) 743 error = 0; 744 break; 745 case SIOCSIFMEDIA: 746 case SIOCGIFMEDIA: 747 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 748 if (port >= SW_DEVS) 749 error = EOPNOTSUPP; 750 else 751 error = ifmedia_ioctl(ifp, (struct ifreq *)data, 752 &sc->sc_ifmedia[port], cmd); 753 break; 754 755 case SIOCGDRVSPEC: 756 case SIOCSDRVSPEC: 757 ifd = (struct ifdrv *) data; 758 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 759 error = EINVAL; 760 break; 761 } 762 if (cmd == SIOCGDRVSPEC) { 763 error = copyout(vlan_matrix, ifd->ifd_data, 764 sizeof(vlan_matrix)); 765 } else { 766 error = copyin(ifd->ifd_data, vlan_matrix, 767 sizeof(vlan_matrix)); 768 admsw_setvlan(sc, vlan_matrix); 769 } 770 break; 771 772 default: 773 error = ether_ioctl(ifp, cmd, data); 774 if (error == ENETRESET) { 775 /* 776 * Multicast list has changed; set the hardware filter 777 * accordingly. 778 */ 779 admsw_set_filter(sc); 780 error = 0; 781 } 782 break; 783 } 784 785 /* Try to get more packets going. */ 786 admsw_start(ifp); 787 788 splx(s); 789 return (error); 790 } 791 792 793 /* 794 * admsw_intr: 795 * 796 * Interrupt service routine. 797 */ 798 static int 799 admsw_intr(void *arg) 800 { 801 struct admsw_softc *sc = arg; 802 uint32_t pending; 803 char buf[64]; 804 805 pending = REG_READ(ADMSW_INT_ST); 806 807 if ((pending & ~(ADMSW_INTR_RHD|ADMSW_INTR_RLD|ADMSW_INTR_SHD|ADMSW_INTR_SLD|ADMSW_INTR_W1TE|ADMSW_INTR_W0TE)) != 0) { 808 snprintb(buf, sizeof(buf), ADMSW_INT_FMT, pending); 809 printf("%s: pending=%s\n", __func__, buf); 810 } 811 REG_WRITE(ADMSW_INT_ST, pending); 812 813 if (sc->ndevs == 0) 814 return (0); 815 816 if ((pending & ADMSW_INTR_RHD) != 0) 817 admsw_rxintr(sc, 1); 818 819 if ((pending & ADMSW_INTR_RLD) != 0) 820 admsw_rxintr(sc, 0); 821 822 if ((pending & ADMSW_INTR_SHD) != 0) 823 admsw_txintr(sc, 1); 824 825 if ((pending & ADMSW_INTR_SLD) != 0) 826 admsw_txintr(sc, 0); 827 828 return (1); 829 } 830 831 /* 832 * admsw_txintr: 833 * 834 * Helper; handle transmit interrupts. 835 */ 836 static void 837 admsw_txintr(struct admsw_softc *sc, int prio) 838 { 839 struct ifnet *ifp; 840 struct admsw_desc *desc; 841 struct admsw_descsoft *ds; 842 int i, vlan; 843 int gotone = 0; 844 845 /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 846 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 847 i = ADMSW_NEXTTXL(i)) { 848 849 ADMSW_CDTXLSYNC(sc, i, 850 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 851 852 desc = &sc->sc_txldescs[i]; 853 ds = &sc->sc_txlsoft[i]; 854 if (desc->data & ADM5120_DMA_OWN) { 855 ADMSW_CDTXLSYNC(sc, i, 856 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 857 break; 858 } 859 860 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 861 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 862 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 863 m_freem(ds->ds_mbuf); 864 ds->ds_mbuf = NULL; 865 866 vlan = ffs(desc->status & 0x3f) - 1; 867 if (vlan < 0 || vlan >= SW_DEVS) 868 panic("admsw_txintr: bad vlan\n"); 869 ifp = &sc->sc_ethercom[vlan].ec_if; 870 gotone = 1; 871 /* printf("clear tx slot %d\n",i); */ 872 873 ifp->if_opackets++; 874 875 sc->sc_txfree++; 876 } 877 878 if (gotone) { 879 sc->sc_txdirty = i; 880 #ifdef ADMSW_EVENT_COUNTERS 881 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 882 #endif 883 for (vlan = 0; vlan < SW_DEVS; vlan++) 884 sc->sc_ethercom[vlan].ec_if.if_flags &= ~IFF_OACTIVE; 885 886 ifp = &sc->sc_ethercom[0].ec_if; 887 888 /* Try to queue more packets. */ 889 admsw_start(ifp); 890 891 /* 892 * If there are no more pending transmissions, 893 * cancel the watchdog timer. 894 */ 895 if (sc->sc_txfree == ADMSW_NTXLDESC) 896 ifp->if_timer = 0; 897 898 } 899 900 /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 901 } 902 903 /* 904 * admsw_rxintr: 905 * 906 * Helper; handle receive interrupts. 907 */ 908 static void 909 admsw_rxintr(struct admsw_softc *sc, int high) 910 { 911 struct ifnet *ifp; 912 struct admsw_descsoft *ds; 913 struct mbuf *m; 914 uint32_t stat; 915 int i, len, port, vlan; 916 917 /* printf("rxintr\n"); */ 918 if (high) 919 panic("admsw_rxintr: high priority packet\n"); 920 921 #ifdef ADMSW_EVENT_COUNTERS 922 int pkts = 0; 923 #endif 924 925 #if 1 926 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 927 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 928 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 929 else { 930 i = sc->sc_rxptr; 931 do { 932 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 933 i = ADMSW_NEXTRXL(i); 934 /* the ring is empty, just return. */ 935 if (i == sc->sc_rxptr) 936 return; 937 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 938 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 939 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 940 941 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 942 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 943 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 944 else { 945 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 946 /* We've fallen behind the chip: catch it. */ 947 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 948 device_xname(sc->sc_dev), REG_READ(RECV_LBADDR_REG), 949 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 950 sc->sc_rxptr = i; 951 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 952 } 953 } 954 #endif 955 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 956 ds = &sc->sc_rxlsoft[i]; 957 958 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 959 960 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 961 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 962 break; 963 } 964 965 /* printf("process slot %d\n",i); */ 966 967 #ifdef ADMSW_EVENT_COUNTERS 968 pkts++; 969 #endif 970 971 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 972 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 973 974 stat = sc->sc_rxldescs[i].status; 975 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 976 len -= ETHER_CRC_LEN; 977 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 978 for (vlan = 0; vlan < SW_DEVS; vlan++) 979 if ((1 << port) & vlan_matrix[vlan]) 980 break; 981 if (vlan == SW_DEVS) 982 vlan = 0; 983 ifp = &sc->sc_ethercom[vlan].ec_if; 984 985 m = ds->ds_mbuf; 986 if (admsw_add_rxlbuf(sc, i) != 0) { 987 ifp->if_ierrors++; 988 ADMSW_INIT_RXLDESC(sc, i); 989 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 990 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 991 continue; 992 } 993 994 m->m_pkthdr.rcvif = ifp; 995 m->m_pkthdr.len = m->m_len = len; 996 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 997 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 998 if (stat & ADM5120_DMA_CSUMFAIL) 999 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1000 } 1001 /* Pass this up to any BPF listeners. */ 1002 bpf_mtap(ifp, m); 1003 1004 /* Pass it on. */ 1005 (*ifp->if_input)(ifp, m); 1006 ifp->if_ipackets++; 1007 } 1008 #ifdef ADMSW_EVENT_COUNTERS 1009 if (pkts) 1010 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1011 1012 if (pkts == ADMSW_NRXLDESC) 1013 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1014 #endif 1015 1016 /* Update the receive pointer. */ 1017 sc->sc_rxptr = i; 1018 } 1019 1020 /* 1021 * admsw_init: [ifnet interface function] 1022 * 1023 * Initialize the interface. Must be called at splnet(). 1024 */ 1025 static int 1026 admsw_init(struct ifnet *ifp) 1027 { 1028 struct admsw_softc *sc = ifp->if_softc; 1029 1030 /* printf("admsw_init called\n"); */ 1031 1032 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1033 if (sc->ndevs == 0) { 1034 admsw_init_bufs(sc); 1035 admsw_reset(sc); 1036 REG_WRITE(CPUP_CONF_REG, 1037 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1038 CPUP_CONF_DMCP_MASK); 1039 /* clear all pending interrupts */ 1040 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1041 1042 /* enable needed interrupts */ 1043 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1044 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | ADMSW_INTR_RHD | 1045 ADMSW_INTR_RLD | ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1046 } 1047 sc->ndevs++; 1048 } 1049 1050 /* Set the receive filter. */ 1051 admsw_set_filter(sc); 1052 1053 /* mark iface as running */ 1054 ifp->if_flags |= IFF_RUNNING; 1055 ifp->if_flags &= ~IFF_OACTIVE; 1056 1057 return 0; 1058 } 1059 1060 /* 1061 * admsw_stop: [ifnet interface function] 1062 * 1063 * Stop transmission on the interface. 1064 */ 1065 static void 1066 admsw_stop(struct ifnet *ifp, int disable) 1067 { 1068 struct admsw_softc *sc = ifp->if_softc; 1069 1070 /* printf("admsw_stop: %d\n",disable); */ 1071 1072 if (!(ifp->if_flags & IFF_RUNNING)) 1073 return; 1074 1075 if (--sc->ndevs == 0) { 1076 /* printf("debug: de-initializing hardware\n"); */ 1077 1078 /* disable cpu port */ 1079 REG_WRITE(CPUP_CONF_REG, 1080 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1081 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1082 1083 /* XXX We should disable, then clear? --dyoung */ 1084 /* clear all pending interrupts */ 1085 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1086 1087 /* disable interrupts */ 1088 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1089 } 1090 1091 /* Mark the interface as down and cancel the watchdog timer. */ 1092 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1093 ifp->if_timer = 0; 1094 1095 return; 1096 } 1097 1098 /* 1099 * admsw_set_filter: 1100 * 1101 * Set up the receive filter. 1102 */ 1103 static void 1104 admsw_set_filter(struct admsw_softc *sc) 1105 { 1106 int i; 1107 uint32_t allmc, anymc, conf, promisc; 1108 struct ether_multi *enm; 1109 struct ethercom *ec; 1110 struct ifnet *ifp; 1111 struct ether_multistep step; 1112 1113 /* Find which ports should be operated in promisc mode. */ 1114 allmc = anymc = promisc = 0; 1115 for (i = 0; i < SW_DEVS; i++) { 1116 ec = &sc->sc_ethercom[i]; 1117 ifp = &ec->ec_if; 1118 if (ifp->if_flags & IFF_PROMISC) 1119 promisc |= vlan_matrix[i]; 1120 1121 ifp->if_flags &= ~IFF_ALLMULTI; 1122 1123 ETHER_FIRST_MULTI(step, ec, enm); 1124 while (enm != NULL) { 1125 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1126 ETHER_ADDR_LEN) != 0) { 1127 printf("%s: punting on mcast range\n", 1128 __func__); 1129 ifp->if_flags |= IFF_ALLMULTI; 1130 allmc |= vlan_matrix[i]; 1131 break; 1132 } 1133 1134 anymc |= vlan_matrix[i]; 1135 1136 #if 0 1137 /* XXX extract subroutine --dyoung */ 1138 REG_WRITE(MAC_WT1_REG, 1139 enm->enm_addrlo[2] | 1140 (enm->enm_addrlo[3] << 8) | 1141 (enm->enm_addrlo[4] << 16) | 1142 (enm->enm_addrlo[5] << 24)); 1143 REG_WRITE(MAC_WT0_REG, 1144 (i << MAC_WT0_VLANID_SHIFT) | 1145 (enm->enm_addrlo[0] << 16) | 1146 (enm->enm_addrlo[1] << 24) | 1147 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1148 /* timeout? */ 1149 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 1150 #endif 1151 1152 /* load h/w with mcast address, port = CPU */ 1153 ETHER_NEXT_MULTI(step, enm); 1154 } 1155 } 1156 1157 conf = REG_READ(CPUP_CONF_REG); 1158 /* 1 Disable forwarding of unknown & multicast packets to 1159 * CPU on all ports. 1160 * 2 Enable forwarding of unknown & multicast packets to 1161 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1162 */ 1163 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1164 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1165 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1166 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1167 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1168 REG_WRITE(CPUP_CONF_REG, conf); 1169 } 1170 1171 /* 1172 * admsw_add_rxbuf: 1173 * 1174 * Add a receive buffer to the indicated descriptor. 1175 */ 1176 int 1177 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1178 { 1179 struct admsw_descsoft *ds; 1180 struct mbuf *m; 1181 int error; 1182 1183 if (high) 1184 ds = &sc->sc_rxhsoft[idx]; 1185 else 1186 ds = &sc->sc_rxlsoft[idx]; 1187 1188 MGETHDR(m, M_DONTWAIT, MT_DATA); 1189 if (m == NULL) 1190 return (ENOBUFS); 1191 1192 MCLGET(m, M_DONTWAIT); 1193 if ((m->m_flags & M_EXT) == 0) { 1194 m_freem(m); 1195 return (ENOBUFS); 1196 } 1197 1198 if (ds->ds_mbuf != NULL) 1199 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1200 1201 ds->ds_mbuf = m; 1202 1203 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1204 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1205 BUS_DMA_READ | BUS_DMA_NOWAIT); 1206 if (error) { 1207 printf("%s: can't load rx DMA map %d, error = %d\n", 1208 device_xname(sc->sc_dev), idx, error); 1209 panic("admsw_add_rxbuf"); /* XXX */ 1210 } 1211 1212 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1213 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1214 1215 if (high) 1216 ADMSW_INIT_RXHDESC(sc, idx); 1217 else 1218 ADMSW_INIT_RXLDESC(sc, idx); 1219 1220 return (0); 1221 } 1222 1223 int 1224 admsw_mediachange(struct ifnet *ifp) 1225 { 1226 struct admsw_softc *sc = ifp->if_softc; 1227 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1228 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1229 int old, new, val; 1230 1231 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1232 return (EINVAL); 1233 1234 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1235 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX; 1236 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1237 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1238 val = PHY_CNTL2_100M|PHY_CNTL2_FDX; 1239 else 1240 val = PHY_CNTL2_100M; 1241 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1242 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1243 val = PHY_CNTL2_FDX; 1244 else 1245 val = 0; 1246 } else 1247 return (EINVAL); 1248 1249 old = REG_READ(PHY_CNTL2_REG); 1250 new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port); 1251 new |= (val << port); 1252 1253 if (new != old) 1254 REG_WRITE(PHY_CNTL2_REG, new); 1255 1256 return (0); 1257 } 1258 1259 void 1260 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1261 { 1262 struct admsw_softc *sc = ifp->if_softc; 1263 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1264 int status; 1265 1266 ifmr->ifm_status = IFM_AVALID; 1267 ifmr->ifm_active = IFM_ETHER; 1268 1269 status = REG_READ(PHY_ST_REG) >> port; 1270 1271 if ((status & PHY_ST_LINKUP) == 0) { 1272 ifmr->ifm_active |= IFM_NONE; 1273 return; 1274 } 1275 1276 ifmr->ifm_status |= IFM_ACTIVE; 1277 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1278 if (status & PHY_ST_FDX) 1279 ifmr->ifm_active |= IFM_FDX; 1280 else 1281 ifmr->ifm_active |= IFM_HDX; 1282 } 1283