1 /* $NetBSD: if_admsw.c,v 1.28 2020/01/29 05:30:14 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.28 2020/01/29 05:30:14 thorpej Exp $"); 80 81 82 #include <sys/param.h> 83 #include <sys/bus.h> 84 #include <sys/callout.h> 85 #include <sys/device.h> 86 #include <sys/endian.h> 87 #include <sys/errno.h> 88 #include <sys/intr.h> 89 #include <sys/ioctl.h> 90 #include <sys/kernel.h> 91 #include <sys/malloc.h> 92 #include <sys/mbuf.h> 93 #include <sys/socket.h> 94 #include <sys/systm.h> 95 96 #include <prop/proplib.h> 97 98 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 99 100 #include <net/if.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 #include <net/if_ether.h> 104 #include <net/bpf.h> 105 106 #include <sys/gpio.h> 107 #include <dev/gpio/gpiovar.h> 108 109 #include <mips/adm5120/include/adm5120reg.h> 110 #include <mips/adm5120/include/adm5120var.h> 111 #include <mips/adm5120/include/adm5120_obiovar.h> 112 #include <mips/adm5120/dev/if_admswreg.h> 113 #include <mips/adm5120/dev/if_admswvar.h> 114 115 static uint8_t vlan_matrix[SW_DEVS] = { 116 (1 << 6) | (1 << 0), /* CPU + port0 */ 117 (1 << 6) | (1 << 1), /* CPU + port1 */ 118 (1 << 6) | (1 << 2), /* CPU + port2 */ 119 (1 << 6) | (1 << 3), /* CPU + port3 */ 120 (1 << 6) | (1 << 4), /* CPU + port4 */ 121 (1 << 6) | (1 << 5), /* CPU + port5 */ 122 }; 123 124 #ifdef ADMSW_EVENT_COUNTERS 125 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 126 #else 127 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 128 #endif 129 130 static void admsw_start(struct ifnet *); 131 static void admsw_watchdog(struct ifnet *); 132 static int admsw_ioctl(struct ifnet *, u_long, void *); 133 static int admsw_init(struct ifnet *); 134 static void admsw_stop(struct ifnet *, int); 135 136 static void admsw_shutdown(void *); 137 138 static void admsw_reset(struct admsw_softc *); 139 static void admsw_set_filter(struct admsw_softc *); 140 141 static int admsw_intr(void *); 142 static void admsw_txintr(struct admsw_softc *, int); 143 static void admsw_rxintr(struct admsw_softc *, int); 144 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 145 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 146 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 147 148 static int admsw_mediachange(struct ifnet *); 149 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 150 151 static int admsw_match(device_t, cfdata_t, void *); 152 static void admsw_attach(device_t, device_t, void *); 153 154 CFATTACH_DECL_NEW(admsw, sizeof(struct admsw_softc), 155 admsw_match, admsw_attach, NULL, NULL); 156 157 static int 158 admsw_match(device_t parent, cfdata_t cf, void *aux) 159 { 160 struct obio_attach_args *aa = aux; 161 162 return strcmp(aa->oba_name, cf->cf_name) == 0; 163 } 164 165 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 166 #define REG_WRITE(o, v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 167 168 169 static void 170 admsw_init_bufs(struct admsw_softc *sc) 171 { 172 int i; 173 struct admsw_desc *desc; 174 175 for (i = 0; i < ADMSW_NTXHDESC; i++) { 176 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 177 m_freem(sc->sc_txhsoft[i].ds_mbuf); 178 sc->sc_txhsoft[i].ds_mbuf = NULL; 179 } 180 desc = &sc->sc_txhdescs[i]; 181 desc->data = 0; 182 desc->cntl = 0; 183 desc->len = MAC_BUFLEN; 184 desc->status = 0; 185 ADMSW_CDTXHSYNC(sc, i, 186 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 187 } 188 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 189 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 190 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 191 192 for (i = 0; i < ADMSW_NRXHDESC; i++) { 193 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 194 if (admsw_add_rxhbuf(sc, i) != 0) 195 panic("admsw_init_bufs\n"); 196 } else 197 ADMSW_INIT_RXHDESC(sc, i); 198 } 199 200 for (i = 0; i < ADMSW_NTXLDESC; i++) { 201 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 202 m_freem(sc->sc_txlsoft[i].ds_mbuf); 203 sc->sc_txlsoft[i].ds_mbuf = NULL; 204 } 205 desc = &sc->sc_txldescs[i]; 206 desc->data = 0; 207 desc->cntl = 0; 208 desc->len = MAC_BUFLEN; 209 desc->status = 0; 210 ADMSW_CDTXLSYNC(sc, i, 211 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 212 } 213 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 214 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 215 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 216 217 for (i = 0; i < ADMSW_NRXLDESC; i++) { 218 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 219 if (admsw_add_rxlbuf(sc, i) != 0) 220 panic("admsw_init_bufs\n"); 221 } else 222 ADMSW_INIT_RXLDESC(sc, i); 223 } 224 225 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 226 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 227 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 228 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 229 230 sc->sc_txfree = ADMSW_NTXLDESC; 231 sc->sc_txnext = 0; 232 sc->sc_txdirty = 0; 233 sc->sc_rxptr = 0; 234 } 235 236 static void 237 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 238 { 239 uint32_t i; 240 241 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) 242 + (matrix[3] << 24); 243 REG_WRITE(VLAN_G1_REG, i); 244 i = matrix[4] + (matrix[5] << 8); 245 REG_WRITE(VLAN_G2_REG, i); 246 } 247 248 static void 249 admsw_reset(struct admsw_softc *sc) 250 { 251 uint32_t wdog1; 252 int i; 253 254 REG_WRITE(PORT_CONF0_REG, 255 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 256 REG_WRITE(CPUP_CONF_REG, 257 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 258 259 /* Wait for DMA to complete. Overkill. In 3ms, we can 260 * send at least two entire 1500-byte packets at 10 Mb/s. 261 */ 262 DELAY(3000); 263 264 /* The datasheet recommends that we move all PHYs to reset 265 * state prior to software reset. 266 */ 267 REG_WRITE(PHY_CNTL2_REG, 268 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 269 270 /* Reset the switch. */ 271 REG_WRITE(ADMSW_SW_RES, 0x1); 272 273 DELAY(100 * 1000); 274 275 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 276 277 /* begin old code */ 278 REG_WRITE(CPUP_CONF_REG, 279 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 280 CPUP_CONF_DMCP_MASK); 281 282 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 283 284 REG_WRITE(PHY_CNTL2_REG, 285 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | 286 PHY_CNTL2_PHYR_MASK | PHY_CNTL2_AMDIX_MASK); 287 288 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 289 290 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 291 REG_WRITE(ADMSW_INT_ST, INT_MASK); 292 293 /* 294 * While in DDB, we stop servicing interrupts, RX ring 295 * fills up and when free block counter falls behind FC 296 * threshold, the switch starts to emit 802.3x PAUSE 297 * frames. This can upset peer switches. 298 * 299 * Stop this from happening by disabling FC and D2 300 * thresholds. 301 */ 302 REG_WRITE(FC_TH_REG, 303 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 304 305 admsw_setvlan(sc, vlan_matrix); 306 307 for (i = 0; i < SW_DEVS; i++) { 308 REG_WRITE(MAC_WT1_REG, 309 sc->sc_enaddr[2] | 310 (sc->sc_enaddr[3]<<8) | 311 (sc->sc_enaddr[4]<<16) | 312 ((sc->sc_enaddr[5]+i)<<24)); 313 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 314 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 315 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 316 317 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)) 318 ; 319 } 320 wdog1 = REG_READ(ADM5120_WDOG1); 321 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 322 } 323 324 static void 325 admsw_attach(device_t parent, device_t self, void *aux) 326 { 327 uint8_t enaddr[ETHER_ADDR_LEN]; 328 struct admsw_softc *sc = device_private(self); 329 struct obio_attach_args *aa = aux; 330 struct ifnet *ifp; 331 bus_dma_segment_t seg; 332 int error, i, rseg; 333 prop_data_t pd; 334 335 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 336 337 sc->sc_dev = self; 338 sc->sc_dmat = aa->oba_dt; 339 sc->sc_st = aa->oba_st; 340 341 pd = prop_dictionary_get(device_properties(self), "mac-address"); 342 343 if (pd == NULL) { 344 enaddr[0] = 0x02; 345 enaddr[1] = 0xaa; 346 enaddr[2] = 0xbb; 347 enaddr[3] = 0xcc; 348 enaddr[4] = 0xdd; 349 enaddr[5] = 0xee; 350 } else 351 memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); 352 353 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 354 355 printf("%s: base Ethernet address %s\n", device_xname(sc->sc_dev), 356 ether_sprintf(enaddr)); 357 358 /* Map the device. */ 359 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 360 printf("%s: unable to map device\n", device_xname(sc->sc_dev)); 361 return; 362 } 363 364 /* Hook up the interrupt handler. */ 365 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 366 367 if (sc->sc_ih == NULL) { 368 printf("%s: unable to register interrupt handler\n", 369 device_xname(sc->sc_dev)); 370 return; 371 } 372 373 /* 374 * Allocate the control data structures, and create and load the 375 * DMA map for it. 376 */ 377 if ((error = bus_dmamem_alloc(sc->sc_dmat, 378 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 379 0)) != 0) { 380 printf("%s: unable to allocate control data, error = %d\n", 381 device_xname(sc->sc_dev), error); 382 return; 383 } 384 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 385 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 386 0)) != 0) { 387 printf("%s: unable to map control data, error = %d\n", 388 device_xname(sc->sc_dev), error); 389 return; 390 } 391 if ((error = bus_dmamap_create(sc->sc_dmat, 392 sizeof(struct admsw_control_data), 1, 393 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 394 printf("%s: unable to create control data DMA map, " 395 "error = %d\n", device_xname(sc->sc_dev), error); 396 return; 397 } 398 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 399 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 400 0)) != 0) { 401 printf("%s: unable to load control data DMA map, error = %d\n", 402 device_xname(sc->sc_dev), error); 403 return; 404 } 405 406 /* 407 * Create the transmit buffer DMA maps. 408 */ 409 for (i = 0; i < ADMSW_NTXHDESC; i++) { 410 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 411 2, MCLBYTES, 0, 0, 412 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 413 printf("%s: unable to create txh DMA map %d, " 414 "error = %d\n", device_xname(sc->sc_dev), i, error); 415 return; 416 } 417 sc->sc_txhsoft[i].ds_mbuf = NULL; 418 } 419 for (i = 0; i < ADMSW_NTXLDESC; i++) { 420 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 421 2, MCLBYTES, 0, 0, 422 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 423 printf("%s: unable to create txl DMA map %d, " 424 "error = %d\n", device_xname(sc->sc_dev), i, error); 425 return; 426 } 427 sc->sc_txlsoft[i].ds_mbuf = NULL; 428 } 429 430 /* 431 * Create the receive buffer DMA maps. 432 */ 433 for (i = 0; i < ADMSW_NRXHDESC; i++) { 434 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 435 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 436 printf("%s: unable to create rxh DMA map %d, " 437 "error = %d\n", device_xname(sc->sc_dev), i, error); 438 return; 439 } 440 sc->sc_rxhsoft[i].ds_mbuf = NULL; 441 } 442 for (i = 0; i < ADMSW_NRXLDESC; i++) { 443 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 444 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 445 printf("%s: unable to create rxl DMA map %d, " 446 "error = %d\n", device_xname(sc->sc_dev), i, error); 447 return; 448 } 449 sc->sc_rxlsoft[i].ds_mbuf = NULL; 450 } 451 452 admsw_init_bufs(sc); 453 454 admsw_reset(sc); 455 456 for (i = 0; i < SW_DEVS; i++) { 457 sc->sc_ethercom[i].ec_ifmedia = &sc->sc_ifmedia[i]; 458 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 459 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 460 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 461 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 462 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 463 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 464 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 465 466 ifp = &sc->sc_ethercom[i].ec_if; 467 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 468 ifp->if_xname[5] += i; 469 ifp->if_softc = sc; 470 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 471 ifp->if_ioctl = admsw_ioctl; 472 ifp->if_start = admsw_start; 473 ifp->if_watchdog = admsw_watchdog; 474 ifp->if_init = admsw_init; 475 ifp->if_stop = admsw_stop; 476 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 477 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ADMSW_NTXLDESC, IFQ_MAXLEN)); 478 IFQ_SET_READY(&ifp->if_snd); 479 480 /* Attach the interface. */ 481 if_attach(ifp); 482 if_deferred_start_init(ifp, NULL); 483 ether_ifattach(ifp, enaddr); 484 enaddr[5]++; 485 } 486 487 #ifdef ADMSW_EVENT_COUNTERS 488 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 489 NULL, device_xname(sc->sc_dev), "txstall"); 490 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 491 NULL, device_xname(sc->sc_dev), "rxstall"); 492 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 493 NULL, device_xname(sc->sc_dev), "txintr"); 494 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 495 NULL, device_xname(sc->sc_dev), "rxintr"); 496 #if 1 497 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 498 NULL, device_xname(sc->sc_dev), "rxsync"); 499 #endif 500 #endif 501 502 admwdog_attach(sc); 503 504 /* Make sure the interface is shutdown during reboot. */ 505 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 506 if (sc->sc_sdhook == NULL) 507 printf("%s: WARNING: unable to establish shutdown hook\n", 508 device_xname(sc->sc_dev)); 509 510 /* leave interrupts and cpu port disabled */ 511 return; 512 } 513 514 515 /* 516 * admsw_shutdown: 517 * 518 * Make sure the interface is stopped at reboot time. 519 */ 520 static void 521 admsw_shutdown(void *arg) 522 { 523 struct admsw_softc *sc = arg; 524 int i; 525 526 for (i = 0; i < SW_DEVS; i++) 527 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 528 } 529 530 /* 531 * admsw_start: [ifnet interface function] 532 * 533 * Start packet transmission on the interface. 534 */ 535 static void 536 admsw_start(struct ifnet *ifp) 537 { 538 struct admsw_softc *sc = ifp->if_softc; 539 struct mbuf *m0, *m; 540 struct admsw_descsoft *ds; 541 struct admsw_desc *desc; 542 bus_dmamap_t dmamap; 543 struct ether_header *eh; 544 int error, nexttx, len, i; 545 static int vlan = 0; 546 547 /* 548 * Loop through the send queues, setting up transmit descriptors 549 * unitl we drain the queues, or use up all available transmit 550 * descriptors. 551 */ 552 for (;;) { 553 vlan++; 554 if (vlan == SW_DEVS) 555 vlan = 0; 556 i = vlan; 557 for (;;) { 558 ifp = &sc->sc_ethercom[i].ec_if; 559 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) == 560 IFF_RUNNING) { 561 /* Grab a packet off the queue. */ 562 IFQ_POLL(&ifp->if_snd, m0); 563 if (m0 != NULL) 564 break; 565 } 566 i++; 567 if (i == SW_DEVS) 568 i = 0; 569 if (i == vlan) 570 return; 571 } 572 vlan = i; 573 m = NULL; 574 575 /* Get a spare descriptor. */ 576 if (sc->sc_txfree == 0) { 577 /* No more slots left; notify upper layer. */ 578 ifp->if_flags |= IFF_OACTIVE; 579 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 580 break; 581 } 582 nexttx = sc->sc_txnext; 583 desc = &sc->sc_txldescs[nexttx]; 584 ds = &sc->sc_txlsoft[nexttx]; 585 dmamap = ds->ds_dmamap; 586 587 /* 588 * Load the DMA map. If this fails, the packet either 589 * didn't fit in the alloted number of segments, or we 590 * were short on resources. In this case, we'll copy 591 * and try again. 592 */ 593 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 594 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 595 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { 596 MGETHDR(m, M_DONTWAIT, MT_DATA); 597 if (m == NULL) { 598 printf("%s: unable to allocate Tx mbuf\n", 599 device_xname(sc->sc_dev)); 600 break; 601 } 602 if (m0->m_pkthdr.len > MHLEN) { 603 MCLGET(m, M_DONTWAIT); 604 if ((m->m_flags & M_EXT) == 0) { 605 printf("%s: unable to allocate Tx " 606 "cluster\n", device_xname(sc->sc_dev)); 607 m_freem(m); 608 break; 609 } 610 } 611 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 612 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 613 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 614 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 615 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 616 panic("admsw_start: M_TRAILINGSPACE\n"); 617 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 618 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 619 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 620 } 621 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 622 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 623 if (error) { 624 printf("%s: unable to load Tx buffer, error = " 625 "%d\n", device_xname(sc->sc_dev), error); 626 break; 627 } 628 } 629 630 IFQ_DEQUEUE(&ifp->if_snd, m0); 631 if (m != NULL) { 632 m_freem(m0); 633 m0 = m; 634 } 635 636 /* 637 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 638 */ 639 640 /* Sync the DMA map. */ 641 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 642 BUS_DMASYNC_PREWRITE); 643 644 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 645 panic("admsw_start: dm_nsegs == %d\n", 646 dmamap->dm_nsegs); 647 desc->data = dmamap->dm_segs[0].ds_addr; 648 desc->len = len = dmamap->dm_segs[0].ds_len; 649 if (dmamap->dm_nsegs > 1) { 650 len += dmamap->dm_segs[1].ds_len; 651 desc->cntl = dmamap->dm_segs[1].ds_addr 652 | ADM5120_DMA_BUF2ENABLE; 653 } else 654 desc->cntl = 0; 655 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 656 eh = mtod(m0, struct ether_header *); 657 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 658 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 659 desc->status |= ADM5120_DMA_CSUM; 660 if (nexttx == ADMSW_NTXLDESC - 1) 661 desc->data |= ADM5120_DMA_RINGEND; 662 desc->data |= ADM5120_DMA_OWN; 663 664 /* Sync the descriptor. */ 665 ADMSW_CDTXLSYNC(sc, nexttx, 666 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 667 668 REG_WRITE(SEND_TRIG_REG, 1); 669 /* printf("send slot %d\n", nexttx); */ 670 671 /* 672 * Store a pointer to the packet so we can free it later. 673 */ 674 ds->ds_mbuf = m0; 675 676 /* Advance the Tx pointer. */ 677 sc->sc_txfree--; 678 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 679 680 /* Pass the packet to any BPF listeners. */ 681 bpf_mtap(ifp, m0, BPF_D_OUT); 682 683 /* Set a watchdog timer in case the chip flakes out. */ 684 sc->sc_ethercom[0].ec_if.if_timer = 5; 685 } 686 } 687 688 /* 689 * admsw_watchdog: [ifnet interface function] 690 * 691 * Watchdog timer handler. 692 */ 693 static void 694 admsw_watchdog(struct ifnet *ifp) 695 { 696 struct admsw_softc *sc = ifp->if_softc; 697 int vlan; 698 699 #if 1 700 /* Check if an interrupt was lost. */ 701 if (sc->sc_txfree == ADMSW_NTXLDESC) { 702 printf("%s: watchdog false alarm\n", device_xname(sc->sc_dev)); 703 return; 704 } 705 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 706 printf("%s: watchdog timer is %d!\n", device_xname(sc->sc_dev), 707 sc->sc_ethercom[0].ec_if.if_timer); 708 admsw_txintr(sc, 0); 709 if (sc->sc_txfree == ADMSW_NTXLDESC) { 710 printf("%s: tx IRQ lost (queue empty)\n", 711 device_xname(sc->sc_dev)); 712 return; 713 } 714 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 715 printf("%s: tx IRQ lost (timer recharged)\n", 716 device_xname(sc->sc_dev)); 717 return; 718 } 719 #endif 720 721 printf("%s: device timeout, txfree = %d\n", 722 device_xname(sc->sc_dev), sc->sc_txfree); 723 for (vlan = 0; vlan < SW_DEVS; vlan++) 724 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 725 for (vlan = 0; vlan < SW_DEVS; vlan++) 726 (void)admsw_init(&sc->sc_ethercom[vlan].ec_if); 727 728 /* Try to get more packets going. */ 729 admsw_start(ifp); 730 } 731 732 /* 733 * admsw_ioctl: [ifnet interface function] 734 * 735 * Handle control requests from the operator. 736 */ 737 static int 738 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 739 { 740 struct admsw_softc *sc = ifp->if_softc; 741 struct ifdrv *ifd; 742 int s, error, port; 743 744 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 745 if (port >= SW_DEVS) 746 return EOPNOTSUPP; 747 748 s = splnet(); 749 750 switch (cmd) { 751 case SIOCSIFCAP: 752 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) 753 error = 0; 754 break; 755 case SIOCGDRVSPEC: 756 case SIOCSDRVSPEC: 757 ifd = (struct ifdrv *) data; 758 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 759 error = EINVAL; 760 break; 761 } 762 if (cmd == SIOCGDRVSPEC) { 763 error = copyout(vlan_matrix, ifd->ifd_data, 764 sizeof(vlan_matrix)); 765 } else { 766 error = copyin(ifd->ifd_data, vlan_matrix, 767 sizeof(vlan_matrix)); 768 admsw_setvlan(sc, vlan_matrix); 769 } 770 break; 771 772 default: 773 error = ether_ioctl(ifp, cmd, data); 774 if (error == ENETRESET) { 775 /* 776 * Multicast list has changed; set the hardware filter 777 * accordingly. 778 */ 779 admsw_set_filter(sc); 780 error = 0; 781 } 782 break; 783 } 784 785 /* Try to get more packets going. */ 786 admsw_start(ifp); 787 788 splx(s); 789 return error; 790 } 791 792 793 /* 794 * admsw_intr: 795 * 796 * Interrupt service routine. 797 */ 798 static int 799 admsw_intr(void *arg) 800 { 801 struct admsw_softc *sc = arg; 802 uint32_t pending; 803 char buf[64]; 804 805 pending = REG_READ(ADMSW_INT_ST); 806 807 if ((pending & ~(ADMSW_INTR_RHD | ADMSW_INTR_RLD | ADMSW_INTR_SHD | 808 ADMSW_INTR_SLD | ADMSW_INTR_W1TE | ADMSW_INTR_W0TE)) != 0) { 809 snprintb(buf, sizeof(buf), ADMSW_INT_FMT, pending); 810 printf("%s: pending=%s\n", __func__, buf); 811 } 812 REG_WRITE(ADMSW_INT_ST, pending); 813 814 if (sc->ndevs == 0) 815 return 0; 816 817 if ((pending & ADMSW_INTR_RHD) != 0) 818 admsw_rxintr(sc, 1); 819 820 if ((pending & ADMSW_INTR_RLD) != 0) 821 admsw_rxintr(sc, 0); 822 823 if ((pending & ADMSW_INTR_SHD) != 0) 824 admsw_txintr(sc, 1); 825 826 if ((pending & ADMSW_INTR_SLD) != 0) 827 admsw_txintr(sc, 0); 828 829 return 1; 830 } 831 832 /* 833 * admsw_txintr: 834 * 835 * Helper; handle transmit interrupts. 836 */ 837 static void 838 admsw_txintr(struct admsw_softc *sc, int prio) 839 { 840 struct ifnet *ifp; 841 struct admsw_desc *desc; 842 struct admsw_descsoft *ds; 843 int i, vlan; 844 int gotone = 0; 845 846 /* printf("txintr: txdirty: %d, txfree: %d\n", sc->sc_txdirty, sc->sc_txfree); */ 847 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 848 i = ADMSW_NEXTTXL(i)) { 849 850 ADMSW_CDTXLSYNC(sc, i, 851 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 852 853 desc = &sc->sc_txldescs[i]; 854 ds = &sc->sc_txlsoft[i]; 855 if (desc->data & ADM5120_DMA_OWN) { 856 ADMSW_CDTXLSYNC(sc, i, 857 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 858 break; 859 } 860 861 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 862 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 863 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 864 m_freem(ds->ds_mbuf); 865 ds->ds_mbuf = NULL; 866 867 vlan = ffs(desc->status & 0x3f) - 1; 868 if (vlan < 0 || vlan >= SW_DEVS) 869 panic("admsw_txintr: bad vlan\n"); 870 ifp = &sc->sc_ethercom[vlan].ec_if; 871 gotone = 1; 872 /* printf("clear tx slot %d\n", i); */ 873 874 if_statinc(ifp, if_opackets); 875 876 sc->sc_txfree++; 877 } 878 879 if (gotone) { 880 sc->sc_txdirty = i; 881 #ifdef ADMSW_EVENT_COUNTERS 882 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 883 #endif 884 for (vlan = 0; vlan < SW_DEVS; vlan++) 885 sc->sc_ethercom[vlan].ec_if.if_flags &= ~IFF_OACTIVE; 886 887 ifp = &sc->sc_ethercom[0].ec_if; 888 889 /* Try to queue more packets. */ 890 if_schedule_deferred_start(ifp); 891 892 /* 893 * If there are no more pending transmissions, 894 * cancel the watchdog timer. 895 */ 896 if (sc->sc_txfree == ADMSW_NTXLDESC) 897 ifp->if_timer = 0; 898 899 } 900 901 /* printf("txintr end: txdirty: %d, txfree: %d\n", sc->sc_txdirty, sc->sc_txfree); */ 902 } 903 904 /* 905 * admsw_rxintr: 906 * 907 * Helper; handle receive interrupts. 908 */ 909 static void 910 admsw_rxintr(struct admsw_softc *sc, int high) 911 { 912 struct ifnet *ifp; 913 struct admsw_descsoft *ds; 914 struct mbuf *m; 915 uint32_t stat; 916 int i, len, port, vlan; 917 918 /* printf("rxintr\n"); */ 919 if (high) 920 panic("admsw_rxintr: high priority packet\n"); 921 922 #ifdef ADMSW_EVENT_COUNTERS 923 int pkts = 0; 924 #endif 925 926 #if 1 927 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 928 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 929 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 930 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 931 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 932 else { 933 i = sc->sc_rxptr; 934 do { 935 ADMSW_CDRXLSYNC(sc, i, 936 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 937 i = ADMSW_NEXTRXL(i); 938 /* the ring is empty, just return. */ 939 if (i == sc->sc_rxptr) 940 return; 941 ADMSW_CDRXLSYNC(sc, i, 942 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 943 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 944 ADMSW_CDRXLSYNC(sc, i, 945 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 946 947 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 948 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 949 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 950 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 951 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 952 else { 953 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 954 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 955 /* We've fallen behind the chip: catch it. */ 956 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 957 device_xname(sc->sc_dev), REG_READ(RECV_LBADDR_REG), 958 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 959 sc->sc_rxptr = i; 960 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 961 } 962 } 963 #endif 964 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 965 ds = &sc->sc_rxlsoft[i]; 966 967 ADMSW_CDRXLSYNC(sc, i, 968 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 969 970 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 971 ADMSW_CDRXLSYNC(sc, i, 972 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 973 break; 974 } 975 976 /* printf("process slot %d\n", i); */ 977 978 #ifdef ADMSW_EVENT_COUNTERS 979 pkts++; 980 #endif 981 982 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 983 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 984 985 stat = sc->sc_rxldescs[i].status; 986 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 987 len -= ETHER_CRC_LEN; 988 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 989 for (vlan = 0; vlan < SW_DEVS; vlan++) 990 if ((1 << port) & vlan_matrix[vlan]) 991 break; 992 if (vlan == SW_DEVS) 993 vlan = 0; 994 ifp = &sc->sc_ethercom[vlan].ec_if; 995 996 m = ds->ds_mbuf; 997 if (admsw_add_rxlbuf(sc, i) != 0) { 998 if_statinc(ifp, if_ierrors); 999 ADMSW_INIT_RXLDESC(sc, i); 1000 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1001 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1002 continue; 1003 } 1004 1005 m_set_rcvif(m, ifp); 1006 m->m_pkthdr.len = m->m_len = len; 1007 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 1008 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1009 if (stat & ADM5120_DMA_CSUMFAIL) 1010 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1011 } 1012 1013 /* Pass it on. */ 1014 if_percpuq_enqueue(ifp->if_percpuq, m); 1015 } 1016 #ifdef ADMSW_EVENT_COUNTERS 1017 if (pkts) 1018 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1019 1020 if (pkts == ADMSW_NRXLDESC) 1021 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1022 #endif 1023 1024 /* Update the receive pointer. */ 1025 sc->sc_rxptr = i; 1026 } 1027 1028 /* 1029 * admsw_init: [ifnet interface function] 1030 * 1031 * Initialize the interface. Must be called at splnet(). 1032 */ 1033 static int 1034 admsw_init(struct ifnet *ifp) 1035 { 1036 struct admsw_softc *sc = ifp->if_softc; 1037 1038 /* printf("admsw_init called\n"); */ 1039 1040 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1041 if (sc->ndevs == 0) { 1042 admsw_init_bufs(sc); 1043 admsw_reset(sc); 1044 REG_WRITE(CPUP_CONF_REG, 1045 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1046 CPUP_CONF_DMCP_MASK); 1047 /* Clear all pending interrupts */ 1048 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1049 1050 /* Enable needed interrupts */ 1051 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1052 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | 1053 ADMSW_INTR_RHD | ADMSW_INTR_RLD | 1054 ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1055 } 1056 sc->ndevs++; 1057 } 1058 1059 /* Set the receive filter. */ 1060 admsw_set_filter(sc); 1061 1062 /* Mark iface as running */ 1063 ifp->if_flags |= IFF_RUNNING; 1064 ifp->if_flags &= ~IFF_OACTIVE; 1065 1066 return 0; 1067 } 1068 1069 /* 1070 * admsw_stop: [ifnet interface function] 1071 * 1072 * Stop transmission on the interface. 1073 */ 1074 static void 1075 admsw_stop(struct ifnet *ifp, int disable) 1076 { 1077 struct admsw_softc *sc = ifp->if_softc; 1078 1079 /* printf("admsw_stop: %d\n", disable); */ 1080 1081 if (!(ifp->if_flags & IFF_RUNNING)) 1082 return; 1083 1084 if (--sc->ndevs == 0) { 1085 /* printf("debug: de-initializing hardware\n"); */ 1086 1087 /* Disable cpu port */ 1088 REG_WRITE(CPUP_CONF_REG, 1089 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1090 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1091 1092 /* XXX We should disable, then clear? --dyoung */ 1093 /* Clear all pending interrupts */ 1094 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1095 1096 /* Disable interrupts */ 1097 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1098 } 1099 1100 /* Mark the interface as down and cancel the watchdog timer. */ 1101 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1102 ifp->if_timer = 0; 1103 1104 return; 1105 } 1106 1107 /* 1108 * admsw_set_filter: 1109 * 1110 * Set up the receive filter. 1111 */ 1112 static void 1113 admsw_set_filter(struct admsw_softc *sc) 1114 { 1115 int i; 1116 uint32_t allmc, anymc, conf, promisc; 1117 struct ether_multi *enm; 1118 struct ethercom *ec; 1119 struct ifnet *ifp; 1120 struct ether_multistep step; 1121 1122 /* Find which ports should be operated in promisc mode. */ 1123 allmc = anymc = promisc = 0; 1124 for (i = 0; i < SW_DEVS; i++) { 1125 ec = &sc->sc_ethercom[i]; 1126 ifp = &ec->ec_if; 1127 if (ifp->if_flags & IFF_PROMISC) 1128 promisc |= vlan_matrix[i]; 1129 1130 ifp->if_flags &= ~IFF_ALLMULTI; 1131 1132 ETHER_LOCK(ec); 1133 ETHER_FIRST_MULTI(step, ec, enm); 1134 while (enm != NULL) { 1135 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1136 ETHER_ADDR_LEN) != 0) { 1137 printf("%s: punting on mcast range\n", 1138 __func__); 1139 ifp->if_flags |= IFF_ALLMULTI; 1140 allmc |= vlan_matrix[i]; 1141 break; 1142 } 1143 1144 anymc |= vlan_matrix[i]; 1145 1146 #if 0 1147 /* XXX extract subroutine --dyoung */ 1148 REG_WRITE(MAC_WT1_REG, 1149 enm->enm_addrlo[2] | 1150 (enm->enm_addrlo[3] << 8) | 1151 (enm->enm_addrlo[4] << 16) | 1152 (enm->enm_addrlo[5] << 24)); 1153 REG_WRITE(MAC_WT0_REG, 1154 (i << MAC_WT0_VLANID_SHIFT) | 1155 (enm->enm_addrlo[0] << 16) | 1156 (enm->enm_addrlo[1] << 24) | 1157 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1158 /* Timeout? */ 1159 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)) 1160 ; 1161 #endif 1162 1163 /* Load h/w with mcast address, port = CPU */ 1164 ETHER_NEXT_MULTI(step, enm); 1165 } 1166 ETHER_UNLOCK(ec); 1167 } 1168 1169 conf = REG_READ(CPUP_CONF_REG); 1170 /* 1 Disable forwarding of unknown & multicast packets to 1171 * CPU on all ports. 1172 * 2 Enable forwarding of unknown & multicast packets to 1173 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1174 */ 1175 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1176 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1177 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1178 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1179 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1180 REG_WRITE(CPUP_CONF_REG, conf); 1181 } 1182 1183 /* 1184 * admsw_add_rxbuf: 1185 * 1186 * Add a receive buffer to the indicated descriptor. 1187 */ 1188 int 1189 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1190 { 1191 struct admsw_descsoft *ds; 1192 struct mbuf *m; 1193 int error; 1194 1195 if (high) 1196 ds = &sc->sc_rxhsoft[idx]; 1197 else 1198 ds = &sc->sc_rxlsoft[idx]; 1199 1200 MGETHDR(m, M_DONTWAIT, MT_DATA); 1201 if (m == NULL) 1202 return ENOBUFS; 1203 1204 MCLGET(m, M_DONTWAIT); 1205 if ((m->m_flags & M_EXT) == 0) { 1206 m_freem(m); 1207 return ENOBUFS; 1208 } 1209 1210 if (ds->ds_mbuf != NULL) 1211 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1212 1213 ds->ds_mbuf = m; 1214 1215 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1216 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1217 BUS_DMA_READ | BUS_DMA_NOWAIT); 1218 if (error) { 1219 printf("%s: can't load rx DMA map %d, error = %d\n", 1220 device_xname(sc->sc_dev), idx, error); 1221 panic("admsw_add_rxbuf"); /* XXX */ 1222 } 1223 1224 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1225 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1226 1227 if (high) 1228 ADMSW_INIT_RXHDESC(sc, idx); 1229 else 1230 ADMSW_INIT_RXLDESC(sc, idx); 1231 1232 return 0; 1233 } 1234 1235 int 1236 admsw_mediachange(struct ifnet *ifp) 1237 { 1238 struct admsw_softc *sc = ifp->if_softc; 1239 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1240 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1241 int old, new, val; 1242 1243 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1244 return EINVAL; 1245 1246 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1247 val = PHY_CNTL2_AUTONEG | PHY_CNTL2_100M | PHY_CNTL2_FDX; 1248 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1249 if ((ifm->ifm_media & IFM_FDX) != 0) 1250 val = PHY_CNTL2_100M | PHY_CNTL2_FDX; 1251 else 1252 val = PHY_CNTL2_100M; 1253 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1254 if ((ifm->ifm_media & IFM_FDX) != 0) 1255 val = PHY_CNTL2_FDX; 1256 else 1257 val = 0; 1258 } else 1259 return EINVAL; 1260 1261 old = REG_READ(PHY_CNTL2_REG); 1262 new = old & ~((PHY_CNTL2_AUTONEG | PHY_CNTL2_100M | PHY_CNTL2_FDX) 1263 << port); 1264 new |= (val << port); 1265 1266 if (new != old) 1267 REG_WRITE(PHY_CNTL2_REG, new); 1268 1269 return 0; 1270 } 1271 1272 void 1273 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1274 { 1275 struct admsw_softc *sc = ifp->if_softc; 1276 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1277 int status; 1278 1279 ifmr->ifm_status = IFM_AVALID; 1280 ifmr->ifm_active = IFM_ETHER; 1281 1282 status = REG_READ(PHY_ST_REG) >> port; 1283 1284 if ((status & PHY_ST_LINKUP) == 0) { 1285 ifmr->ifm_active |= IFM_NONE; 1286 return; 1287 } 1288 1289 ifmr->ifm_status |= IFM_ACTIVE; 1290 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1291 if (status & PHY_ST_FDX) 1292 ifmr->ifm_active |= IFM_FDX; 1293 else 1294 ifmr->ifm_active |= IFM_HDX; 1295 } 1296