1 /* $NetBSD: if_admsw.c,v 1.17 2018/06/26 06:47:58 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.17 2018/06/26 06:47:58 msaitoh Exp $"); 80 81 82 #include <sys/param.h> 83 #include <sys/bus.h> 84 #include <sys/callout.h> 85 #include <sys/device.h> 86 #include <sys/endian.h> 87 #include <sys/errno.h> 88 #include <sys/intr.h> 89 #include <sys/ioctl.h> 90 #include <sys/kernel.h> 91 #include <sys/malloc.h> 92 #include <sys/mbuf.h> 93 #include <sys/socket.h> 94 #include <sys/systm.h> 95 96 #include <prop/proplib.h> 97 98 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 99 100 #include <net/if.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 #include <net/if_ether.h> 104 105 #include <net/bpf.h> 106 107 #include <dev/mii/mii.h> 108 #include <dev/mii/miivar.h> 109 110 #include <sys/gpio.h> 111 #include <dev/gpio/gpiovar.h> 112 113 #include <mips/adm5120/include/adm5120reg.h> 114 #include <mips/adm5120/include/adm5120var.h> 115 #include <mips/adm5120/include/adm5120_obiovar.h> 116 #include <mips/adm5120/dev/if_admswreg.h> 117 #include <mips/adm5120/dev/if_admswvar.h> 118 119 static uint8_t vlan_matrix[SW_DEVS] = { 120 (1 << 6) | (1 << 0), /* CPU + port0 */ 121 (1 << 6) | (1 << 1), /* CPU + port1 */ 122 (1 << 6) | (1 << 2), /* CPU + port2 */ 123 (1 << 6) | (1 << 3), /* CPU + port3 */ 124 (1 << 6) | (1 << 4), /* CPU + port4 */ 125 (1 << 6) | (1 << 5), /* CPU + port5 */ 126 }; 127 128 #ifdef ADMSW_EVENT_COUNTERS 129 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 130 #else 131 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 132 #endif 133 134 static void admsw_start(struct ifnet *); 135 static void admsw_watchdog(struct ifnet *); 136 static int admsw_ioctl(struct ifnet *, u_long, void *); 137 static int admsw_init(struct ifnet *); 138 static void admsw_stop(struct ifnet *, int); 139 140 static void admsw_shutdown(void *); 141 142 static void admsw_reset(struct admsw_softc *); 143 static void admsw_set_filter(struct admsw_softc *); 144 145 static int admsw_intr(void *); 146 static void admsw_txintr(struct admsw_softc *, int); 147 static void admsw_rxintr(struct admsw_softc *, int); 148 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 149 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 150 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 151 152 static int admsw_mediachange(struct ifnet *); 153 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 154 155 static int admsw_match(device_t, cfdata_t, void *); 156 static void admsw_attach(device_t, device_t, void *); 157 158 CFATTACH_DECL_NEW(admsw, sizeof(struct admsw_softc), 159 admsw_match, admsw_attach, NULL, NULL); 160 161 static int 162 admsw_match(device_t parent, cfdata_t cf, void *aux) 163 { 164 struct obio_attach_args *aa = aux; 165 166 return strcmp(aa->oba_name, cf->cf_name) == 0; 167 } 168 169 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 170 #define REG_WRITE(o,v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 171 172 173 static void 174 admsw_init_bufs(struct admsw_softc *sc) 175 { 176 int i; 177 struct admsw_desc *desc; 178 179 for (i = 0; i < ADMSW_NTXHDESC; i++) { 180 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 181 m_freem(sc->sc_txhsoft[i].ds_mbuf); 182 sc->sc_txhsoft[i].ds_mbuf = NULL; 183 } 184 desc = &sc->sc_txhdescs[i]; 185 desc->data = 0; 186 desc->cntl = 0; 187 desc->len = MAC_BUFLEN; 188 desc->status = 0; 189 ADMSW_CDTXHSYNC(sc, i, 190 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 191 } 192 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 193 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 194 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 195 196 for (i = 0; i < ADMSW_NRXHDESC; i++) { 197 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 198 if (admsw_add_rxhbuf(sc, i) != 0) 199 panic("admsw_init_bufs\n"); 200 } else 201 ADMSW_INIT_RXHDESC(sc, i); 202 } 203 204 for (i = 0; i < ADMSW_NTXLDESC; i++) { 205 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 206 m_freem(sc->sc_txlsoft[i].ds_mbuf); 207 sc->sc_txlsoft[i].ds_mbuf = NULL; 208 } 209 desc = &sc->sc_txldescs[i]; 210 desc->data = 0; 211 desc->cntl = 0; 212 desc->len = MAC_BUFLEN; 213 desc->status = 0; 214 ADMSW_CDTXLSYNC(sc, i, 215 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 216 } 217 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 218 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 219 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 220 221 for (i = 0; i < ADMSW_NRXLDESC; i++) { 222 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 223 if (admsw_add_rxlbuf(sc, i) != 0) 224 panic("admsw_init_bufs\n"); 225 } else 226 ADMSW_INIT_RXLDESC(sc, i); 227 } 228 229 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 230 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 231 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 232 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 233 234 sc->sc_txfree = ADMSW_NTXLDESC; 235 sc->sc_txnext = 0; 236 sc->sc_txdirty = 0; 237 sc->sc_rxptr = 0; 238 } 239 240 static void 241 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 242 { 243 uint32_t i; 244 245 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) + (matrix[3] << 24); 246 REG_WRITE(VLAN_G1_REG, i); 247 i = matrix[4] + (matrix[5] << 8); 248 REG_WRITE(VLAN_G2_REG, i); 249 } 250 251 static void 252 admsw_reset(struct admsw_softc *sc) 253 { 254 uint32_t wdog1; 255 int i; 256 257 REG_WRITE(PORT_CONF0_REG, 258 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 259 REG_WRITE(CPUP_CONF_REG, 260 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 261 262 /* Wait for DMA to complete. Overkill. In 3ms, we can 263 * send at least two entire 1500-byte packets at 10 Mb/s. 264 */ 265 DELAY(3000); 266 267 /* The datasheet recommends that we move all PHYs to reset 268 * state prior to software reset. 269 */ 270 REG_WRITE(PHY_CNTL2_REG, 271 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 272 273 /* Reset the switch. */ 274 REG_WRITE(ADMSW_SW_RES, 0x1); 275 276 DELAY(100 * 1000); 277 278 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 279 280 /* begin old code */ 281 REG_WRITE(CPUP_CONF_REG, 282 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 283 CPUP_CONF_DMCP_MASK); 284 285 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 286 287 REG_WRITE(PHY_CNTL2_REG, 288 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | PHY_CNTL2_PHYR_MASK | 289 PHY_CNTL2_AMDIX_MASK); 290 291 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 292 293 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 294 REG_WRITE(ADMSW_INT_ST, INT_MASK); 295 296 /* 297 * While in DDB, we stop servicing interrupts, RX ring 298 * fills up and when free block counter falls behind FC 299 * threshold, the switch starts to emit 802.3x PAUSE 300 * frames. This can upset peer switches. 301 * 302 * Stop this from happening by disabling FC and D2 303 * thresholds. 304 */ 305 REG_WRITE(FC_TH_REG, 306 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 307 308 admsw_setvlan(sc, vlan_matrix); 309 310 for (i = 0; i < SW_DEVS; i++) { 311 REG_WRITE(MAC_WT1_REG, 312 sc->sc_enaddr[2] | 313 (sc->sc_enaddr[3]<<8) | 314 (sc->sc_enaddr[4]<<16) | 315 ((sc->sc_enaddr[5]+i)<<24)); 316 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 317 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 318 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 319 320 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 321 } 322 wdog1 = REG_READ(ADM5120_WDOG1); 323 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 324 } 325 326 static void 327 admsw_attach(device_t parent, device_t self, void *aux) 328 { 329 uint8_t enaddr[ETHER_ADDR_LEN]; 330 struct admsw_softc *sc = device_private(self); 331 struct obio_attach_args *aa = aux; 332 struct ifnet *ifp; 333 bus_dma_segment_t seg; 334 int error, i, rseg; 335 prop_data_t pd; 336 337 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 338 339 sc->sc_dev = self; 340 sc->sc_dmat = aa->oba_dt; 341 sc->sc_st = aa->oba_st; 342 343 pd = prop_dictionary_get(device_properties(self), "mac-address"); 344 345 if (pd == NULL) { 346 enaddr[0] = 0x02; 347 enaddr[1] = 0xaa; 348 enaddr[2] = 0xbb; 349 enaddr[3] = 0xcc; 350 enaddr[4] = 0xdd; 351 enaddr[5] = 0xee; 352 } else 353 memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); 354 355 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 356 357 printf("%s: base Ethernet address %s\n", device_xname(sc->sc_dev), 358 ether_sprintf(enaddr)); 359 360 /* Map the device. */ 361 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 362 printf("%s: unable to map device\n", device_xname(sc->sc_dev)); 363 return; 364 } 365 366 /* Hook up the interrupt handler. */ 367 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 368 369 if (sc->sc_ih == NULL) { 370 printf("%s: unable to register interrupt handler\n", 371 device_xname(sc->sc_dev)); 372 return; 373 } 374 375 /* 376 * Allocate the control data structures, and create and load the 377 * DMA map for it. 378 */ 379 if ((error = bus_dmamem_alloc(sc->sc_dmat, 380 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 381 0)) != 0) { 382 printf("%s: unable to allocate control data, error = %d\n", 383 device_xname(sc->sc_dev), error); 384 return; 385 } 386 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 387 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 388 0)) != 0) { 389 printf("%s: unable to map control data, error = %d\n", 390 device_xname(sc->sc_dev), error); 391 return; 392 } 393 if ((error = bus_dmamap_create(sc->sc_dmat, 394 sizeof(struct admsw_control_data), 1, 395 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 396 printf("%s: unable to create control data DMA map, " 397 "error = %d\n", device_xname(sc->sc_dev), error); 398 return; 399 } 400 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 401 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 402 0)) != 0) { 403 printf("%s: unable to load control data DMA map, error = %d\n", 404 device_xname(sc->sc_dev), error); 405 return; 406 } 407 408 /* 409 * Create the transmit buffer DMA maps. 410 */ 411 for (i = 0; i < ADMSW_NTXHDESC; i++) { 412 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 413 2, MCLBYTES, 0, 0, 414 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 415 printf("%s: unable to create txh DMA map %d, " 416 "error = %d\n", device_xname(sc->sc_dev), i, error); 417 return; 418 } 419 sc->sc_txhsoft[i].ds_mbuf = NULL; 420 } 421 for (i = 0; i < ADMSW_NTXLDESC; i++) { 422 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 423 2, MCLBYTES, 0, 0, 424 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 425 printf("%s: unable to create txl DMA map %d, " 426 "error = %d\n", device_xname(sc->sc_dev), i, error); 427 return; 428 } 429 sc->sc_txlsoft[i].ds_mbuf = NULL; 430 } 431 432 /* 433 * Create the receive buffer DMA maps. 434 */ 435 for (i = 0; i < ADMSW_NRXHDESC; i++) { 436 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 437 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 438 printf("%s: unable to create rxh DMA map %d, " 439 "error = %d\n", device_xname(sc->sc_dev), i, error); 440 return; 441 } 442 sc->sc_rxhsoft[i].ds_mbuf = NULL; 443 } 444 for (i = 0; i < ADMSW_NRXLDESC; i++) { 445 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 446 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 447 printf("%s: unable to create rxl DMA map %d, " 448 "error = %d\n", device_xname(sc->sc_dev), i, error); 449 return; 450 } 451 sc->sc_rxlsoft[i].ds_mbuf = NULL; 452 } 453 454 admsw_init_bufs(sc); 455 456 admsw_reset(sc); 457 458 for (i = 0; i < SW_DEVS; i++) { 459 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 460 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 461 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 462 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 463 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 464 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 465 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 466 467 ifp = &sc->sc_ethercom[i].ec_if; 468 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 469 ifp->if_xname[5] += i; 470 ifp->if_softc = sc; 471 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 472 ifp->if_ioctl = admsw_ioctl; 473 ifp->if_start = admsw_start; 474 ifp->if_watchdog = admsw_watchdog; 475 ifp->if_init = admsw_init; 476 ifp->if_stop = admsw_stop; 477 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 478 IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN)); 479 IFQ_SET_READY(&ifp->if_snd); 480 481 /* Attach the interface. */ 482 if_attach(ifp); 483 if_deferred_start_init(ifp, NULL); 484 ether_ifattach(ifp, enaddr); 485 enaddr[5]++; 486 } 487 488 #ifdef ADMSW_EVENT_COUNTERS 489 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 490 NULL, device_xname(sc->sc_dev), "txstall"); 491 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 492 NULL, device_xname(sc->sc_dev), "rxstall"); 493 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 494 NULL, device_xname(sc->sc_dev), "txintr"); 495 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 496 NULL, device_xname(sc->sc_dev), "rxintr"); 497 #if 1 498 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 499 NULL, device_xname(sc->sc_dev), "rxsync"); 500 #endif 501 #endif 502 503 admwdog_attach(sc); 504 505 /* Make sure the interface is shutdown during reboot. */ 506 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 507 if (sc->sc_sdhook == NULL) 508 printf("%s: WARNING: unable to establish shutdown hook\n", 509 device_xname(sc->sc_dev)); 510 511 /* leave interrupts and cpu port disabled */ 512 return; 513 } 514 515 516 /* 517 * admsw_shutdown: 518 * 519 * Make sure the interface is stopped at reboot time. 520 */ 521 static void 522 admsw_shutdown(void *arg) 523 { 524 struct admsw_softc *sc = arg; 525 int i; 526 527 for (i = 0; i < SW_DEVS; i++) 528 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 529 } 530 531 /* 532 * admsw_start: [ifnet interface function] 533 * 534 * Start packet transmission on the interface. 535 */ 536 static void 537 admsw_start(struct ifnet *ifp) 538 { 539 struct admsw_softc *sc = ifp->if_softc; 540 struct mbuf *m0, *m; 541 struct admsw_descsoft *ds; 542 struct admsw_desc *desc; 543 bus_dmamap_t dmamap; 544 struct ether_header *eh; 545 int error, nexttx, len, i; 546 static int vlan = 0; 547 548 /* 549 * Loop through the send queues, setting up transmit descriptors 550 * unitl we drain the queues, or use up all available transmit 551 * descriptors. 552 */ 553 for (;;) { 554 vlan++; 555 if (vlan == SW_DEVS) 556 vlan = 0; 557 i = vlan; 558 for (;;) { 559 ifp = &sc->sc_ethercom[i].ec_if; 560 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) == 561 IFF_RUNNING) { 562 /* Grab a packet off the queue. */ 563 IFQ_POLL(&ifp->if_snd, m0); 564 if (m0 != NULL) 565 break; 566 } 567 i++; 568 if (i == SW_DEVS) 569 i = 0; 570 if (i == vlan) 571 return; 572 } 573 vlan = i; 574 m = NULL; 575 576 /* Get a spare descriptor. */ 577 if (sc->sc_txfree == 0) { 578 /* No more slots left; notify upper layer. */ 579 ifp->if_flags |= IFF_OACTIVE; 580 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 581 break; 582 } 583 nexttx = sc->sc_txnext; 584 desc = &sc->sc_txldescs[nexttx]; 585 ds = &sc->sc_txlsoft[nexttx]; 586 dmamap = ds->ds_dmamap; 587 588 /* 589 * Load the DMA map. If this fails, the packet either 590 * didn't fit in the alloted number of segments, or we 591 * were short on resources. In this case, we'll copy 592 * and try again. 593 */ 594 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 595 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 596 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 597 MGETHDR(m, M_DONTWAIT, MT_DATA); 598 if (m == NULL) { 599 printf("%s: unable to allocate Tx mbuf\n", 600 device_xname(sc->sc_dev)); 601 break; 602 } 603 if (m0->m_pkthdr.len > MHLEN) { 604 MCLGET(m, M_DONTWAIT); 605 if ((m->m_flags & M_EXT) == 0) { 606 printf("%s: unable to allocate Tx " 607 "cluster\n", device_xname(sc->sc_dev)); 608 m_freem(m); 609 break; 610 } 611 } 612 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 613 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 614 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 615 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 616 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 617 panic("admsw_start: M_TRAILINGSPACE\n"); 618 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 619 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 620 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 621 } 622 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 623 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 624 if (error) { 625 printf("%s: unable to load Tx buffer, " 626 "error = %d\n", device_xname(sc->sc_dev), error); 627 break; 628 } 629 } 630 631 IFQ_DEQUEUE(&ifp->if_snd, m0); 632 if (m != NULL) { 633 m_freem(m0); 634 m0 = m; 635 } 636 637 /* 638 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 639 */ 640 641 /* Sync the DMA map. */ 642 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 643 BUS_DMASYNC_PREWRITE); 644 645 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 646 panic("admsw_start: dm_nsegs == %d\n", dmamap->dm_nsegs); 647 desc->data = dmamap->dm_segs[0].ds_addr; 648 desc->len = len = dmamap->dm_segs[0].ds_len; 649 if (dmamap->dm_nsegs > 1) { 650 len += dmamap->dm_segs[1].ds_len; 651 desc->cntl = dmamap->dm_segs[1].ds_addr | ADM5120_DMA_BUF2ENABLE; 652 } else 653 desc->cntl = 0; 654 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 655 eh = mtod(m0, struct ether_header *); 656 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 657 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 658 desc->status |= ADM5120_DMA_CSUM; 659 if (nexttx == ADMSW_NTXLDESC - 1) 660 desc->data |= ADM5120_DMA_RINGEND; 661 desc->data |= ADM5120_DMA_OWN; 662 663 /* Sync the descriptor. */ 664 ADMSW_CDTXLSYNC(sc, nexttx, 665 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 666 667 REG_WRITE(SEND_TRIG_REG, 1); 668 /* printf("send slot %d\n",nexttx); */ 669 670 /* 671 * Store a pointer to the packet so we can free it later. 672 */ 673 ds->ds_mbuf = m0; 674 675 /* Advance the Tx pointer. */ 676 sc->sc_txfree--; 677 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 678 679 /* Pass the packet to any BPF listeners. */ 680 bpf_mtap(ifp, m0, BPF_D_OUT); 681 682 /* Set a watchdog timer in case the chip flakes out. */ 683 sc->sc_ethercom[0].ec_if.if_timer = 5; 684 } 685 } 686 687 /* 688 * admsw_watchdog: [ifnet interface function] 689 * 690 * Watchdog timer handler. 691 */ 692 static void 693 admsw_watchdog(struct ifnet *ifp) 694 { 695 struct admsw_softc *sc = ifp->if_softc; 696 int vlan; 697 698 #if 1 699 /* Check if an interrupt was lost. */ 700 if (sc->sc_txfree == ADMSW_NTXLDESC) { 701 printf("%s: watchdog false alarm\n", device_xname(sc->sc_dev)); 702 return; 703 } 704 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 705 printf("%s: watchdog timer is %d!\n", device_xname(sc->sc_dev), sc->sc_ethercom[0].ec_if.if_timer); 706 admsw_txintr(sc, 0); 707 if (sc->sc_txfree == ADMSW_NTXLDESC) { 708 printf("%s: tx IRQ lost (queue empty)\n", device_xname(sc->sc_dev)); 709 return; 710 } 711 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 712 printf("%s: tx IRQ lost (timer recharged)\n", device_xname(sc->sc_dev)); 713 return; 714 } 715 #endif 716 717 printf("%s: device timeout, txfree = %d\n", device_xname(sc->sc_dev), sc->sc_txfree); 718 for (vlan = 0; vlan < SW_DEVS; vlan++) 719 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 720 for (vlan = 0; vlan < SW_DEVS; vlan++) 721 (void) admsw_init(&sc->sc_ethercom[vlan].ec_if); 722 723 /* Try to get more packets going. */ 724 admsw_start(ifp); 725 } 726 727 /* 728 * admsw_ioctl: [ifnet interface function] 729 * 730 * Handle control requests from the operator. 731 */ 732 static int 733 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 734 { 735 struct admsw_softc *sc = ifp->if_softc; 736 struct ifdrv *ifd; 737 int s, error, port; 738 739 s = splnet(); 740 741 switch (cmd) { 742 case SIOCSIFCAP: 743 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) 744 error = 0; 745 break; 746 case SIOCSIFMEDIA: 747 case SIOCGIFMEDIA: 748 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 749 if (port >= SW_DEVS) 750 error = EOPNOTSUPP; 751 else 752 error = ifmedia_ioctl(ifp, (struct ifreq *)data, 753 &sc->sc_ifmedia[port], cmd); 754 break; 755 756 case SIOCGDRVSPEC: 757 case SIOCSDRVSPEC: 758 ifd = (struct ifdrv *) data; 759 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 760 error = EINVAL; 761 break; 762 } 763 if (cmd == SIOCGDRVSPEC) { 764 error = copyout(vlan_matrix, ifd->ifd_data, 765 sizeof(vlan_matrix)); 766 } else { 767 error = copyin(ifd->ifd_data, vlan_matrix, 768 sizeof(vlan_matrix)); 769 admsw_setvlan(sc, vlan_matrix); 770 } 771 break; 772 773 default: 774 error = ether_ioctl(ifp, cmd, data); 775 if (error == ENETRESET) { 776 /* 777 * Multicast list has changed; set the hardware filter 778 * accordingly. 779 */ 780 admsw_set_filter(sc); 781 error = 0; 782 } 783 break; 784 } 785 786 /* Try to get more packets going. */ 787 admsw_start(ifp); 788 789 splx(s); 790 return (error); 791 } 792 793 794 /* 795 * admsw_intr: 796 * 797 * Interrupt service routine. 798 */ 799 static int 800 admsw_intr(void *arg) 801 { 802 struct admsw_softc *sc = arg; 803 uint32_t pending; 804 char buf[64]; 805 806 pending = REG_READ(ADMSW_INT_ST); 807 808 if ((pending & ~(ADMSW_INTR_RHD|ADMSW_INTR_RLD|ADMSW_INTR_SHD|ADMSW_INTR_SLD|ADMSW_INTR_W1TE|ADMSW_INTR_W0TE)) != 0) { 809 snprintb(buf, sizeof(buf), ADMSW_INT_FMT, pending); 810 printf("%s: pending=%s\n", __func__, buf); 811 } 812 REG_WRITE(ADMSW_INT_ST, pending); 813 814 if (sc->ndevs == 0) 815 return (0); 816 817 if ((pending & ADMSW_INTR_RHD) != 0) 818 admsw_rxintr(sc, 1); 819 820 if ((pending & ADMSW_INTR_RLD) != 0) 821 admsw_rxintr(sc, 0); 822 823 if ((pending & ADMSW_INTR_SHD) != 0) 824 admsw_txintr(sc, 1); 825 826 if ((pending & ADMSW_INTR_SLD) != 0) 827 admsw_txintr(sc, 0); 828 829 return (1); 830 } 831 832 /* 833 * admsw_txintr: 834 * 835 * Helper; handle transmit interrupts. 836 */ 837 static void 838 admsw_txintr(struct admsw_softc *sc, int prio) 839 { 840 struct ifnet *ifp; 841 struct admsw_desc *desc; 842 struct admsw_descsoft *ds; 843 int i, vlan; 844 int gotone = 0; 845 846 /* printf("txintr: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 847 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 848 i = ADMSW_NEXTTXL(i)) { 849 850 ADMSW_CDTXLSYNC(sc, i, 851 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 852 853 desc = &sc->sc_txldescs[i]; 854 ds = &sc->sc_txlsoft[i]; 855 if (desc->data & ADM5120_DMA_OWN) { 856 ADMSW_CDTXLSYNC(sc, i, 857 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 858 break; 859 } 860 861 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 862 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 863 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 864 m_freem(ds->ds_mbuf); 865 ds->ds_mbuf = NULL; 866 867 vlan = ffs(desc->status & 0x3f) - 1; 868 if (vlan < 0 || vlan >= SW_DEVS) 869 panic("admsw_txintr: bad vlan\n"); 870 ifp = &sc->sc_ethercom[vlan].ec_if; 871 gotone = 1; 872 /* printf("clear tx slot %d\n",i); */ 873 874 ifp->if_opackets++; 875 876 sc->sc_txfree++; 877 } 878 879 if (gotone) { 880 sc->sc_txdirty = i; 881 #ifdef ADMSW_EVENT_COUNTERS 882 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 883 #endif 884 for (vlan = 0; vlan < SW_DEVS; vlan++) 885 sc->sc_ethercom[vlan].ec_if.if_flags &= ~IFF_OACTIVE; 886 887 ifp = &sc->sc_ethercom[0].ec_if; 888 889 /* Try to queue more packets. */ 890 if_schedule_deferred_start(ifp); 891 892 /* 893 * If there are no more pending transmissions, 894 * cancel the watchdog timer. 895 */ 896 if (sc->sc_txfree == ADMSW_NTXLDESC) 897 ifp->if_timer = 0; 898 899 } 900 901 /* printf("txintr end: txdirty: %d, txfree: %d\n",sc->sc_txdirty, sc->sc_txfree); */ 902 } 903 904 /* 905 * admsw_rxintr: 906 * 907 * Helper; handle receive interrupts. 908 */ 909 static void 910 admsw_rxintr(struct admsw_softc *sc, int high) 911 { 912 struct ifnet *ifp; 913 struct admsw_descsoft *ds; 914 struct mbuf *m; 915 uint32_t stat; 916 int i, len, port, vlan; 917 918 /* printf("rxintr\n"); */ 919 if (high) 920 panic("admsw_rxintr: high priority packet\n"); 921 922 #ifdef ADMSW_EVENT_COUNTERS 923 int pkts = 0; 924 #endif 925 926 #if 1 927 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 928 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 929 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 930 else { 931 i = sc->sc_rxptr; 932 do { 933 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 934 i = ADMSW_NEXTRXL(i); 935 /* the ring is empty, just return. */ 936 if (i == sc->sc_rxptr) 937 return; 938 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 939 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 940 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 941 942 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 943 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 944 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 945 else { 946 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 947 /* We've fallen behind the chip: catch it. */ 948 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 949 device_xname(sc->sc_dev), REG_READ(RECV_LBADDR_REG), 950 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 951 sc->sc_rxptr = i; 952 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 953 } 954 } 955 #endif 956 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 957 ds = &sc->sc_rxlsoft[i]; 958 959 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 960 961 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 962 ADMSW_CDRXLSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 963 break; 964 } 965 966 /* printf("process slot %d\n",i); */ 967 968 #ifdef ADMSW_EVENT_COUNTERS 969 pkts++; 970 #endif 971 972 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 973 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 974 975 stat = sc->sc_rxldescs[i].status; 976 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 977 len -= ETHER_CRC_LEN; 978 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 979 for (vlan = 0; vlan < SW_DEVS; vlan++) 980 if ((1 << port) & vlan_matrix[vlan]) 981 break; 982 if (vlan == SW_DEVS) 983 vlan = 0; 984 ifp = &sc->sc_ethercom[vlan].ec_if; 985 986 m = ds->ds_mbuf; 987 if (admsw_add_rxlbuf(sc, i) != 0) { 988 ifp->if_ierrors++; 989 ADMSW_INIT_RXLDESC(sc, i); 990 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 991 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 992 continue; 993 } 994 995 m_set_rcvif(m, ifp); 996 m->m_pkthdr.len = m->m_len = len; 997 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 998 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 999 if (stat & ADM5120_DMA_CSUMFAIL) 1000 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1001 } 1002 1003 /* Pass it on. */ 1004 if_percpuq_enqueue(ifp->if_percpuq, m); 1005 } 1006 #ifdef ADMSW_EVENT_COUNTERS 1007 if (pkts) 1008 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1009 1010 if (pkts == ADMSW_NRXLDESC) 1011 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1012 #endif 1013 1014 /* Update the receive pointer. */ 1015 sc->sc_rxptr = i; 1016 } 1017 1018 /* 1019 * admsw_init: [ifnet interface function] 1020 * 1021 * Initialize the interface. Must be called at splnet(). 1022 */ 1023 static int 1024 admsw_init(struct ifnet *ifp) 1025 { 1026 struct admsw_softc *sc = ifp->if_softc; 1027 1028 /* printf("admsw_init called\n"); */ 1029 1030 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1031 if (sc->ndevs == 0) { 1032 admsw_init_bufs(sc); 1033 admsw_reset(sc); 1034 REG_WRITE(CPUP_CONF_REG, 1035 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1036 CPUP_CONF_DMCP_MASK); 1037 /* clear all pending interrupts */ 1038 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1039 1040 /* enable needed interrupts */ 1041 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1042 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | ADMSW_INTR_RHD | 1043 ADMSW_INTR_RLD | ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1044 } 1045 sc->ndevs++; 1046 } 1047 1048 /* Set the receive filter. */ 1049 admsw_set_filter(sc); 1050 1051 /* mark iface as running */ 1052 ifp->if_flags |= IFF_RUNNING; 1053 ifp->if_flags &= ~IFF_OACTIVE; 1054 1055 return 0; 1056 } 1057 1058 /* 1059 * admsw_stop: [ifnet interface function] 1060 * 1061 * Stop transmission on the interface. 1062 */ 1063 static void 1064 admsw_stop(struct ifnet *ifp, int disable) 1065 { 1066 struct admsw_softc *sc = ifp->if_softc; 1067 1068 /* printf("admsw_stop: %d\n",disable); */ 1069 1070 if (!(ifp->if_flags & IFF_RUNNING)) 1071 return; 1072 1073 if (--sc->ndevs == 0) { 1074 /* printf("debug: de-initializing hardware\n"); */ 1075 1076 /* disable cpu port */ 1077 REG_WRITE(CPUP_CONF_REG, 1078 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1079 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1080 1081 /* XXX We should disable, then clear? --dyoung */ 1082 /* clear all pending interrupts */ 1083 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1084 1085 /* disable interrupts */ 1086 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1087 } 1088 1089 /* Mark the interface as down and cancel the watchdog timer. */ 1090 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1091 ifp->if_timer = 0; 1092 1093 return; 1094 } 1095 1096 /* 1097 * admsw_set_filter: 1098 * 1099 * Set up the receive filter. 1100 */ 1101 static void 1102 admsw_set_filter(struct admsw_softc *sc) 1103 { 1104 int i; 1105 uint32_t allmc, anymc, conf, promisc; 1106 struct ether_multi *enm; 1107 struct ethercom *ec; 1108 struct ifnet *ifp; 1109 struct ether_multistep step; 1110 1111 /* Find which ports should be operated in promisc mode. */ 1112 allmc = anymc = promisc = 0; 1113 for (i = 0; i < SW_DEVS; i++) { 1114 ec = &sc->sc_ethercom[i]; 1115 ifp = &ec->ec_if; 1116 if (ifp->if_flags & IFF_PROMISC) 1117 promisc |= vlan_matrix[i]; 1118 1119 ifp->if_flags &= ~IFF_ALLMULTI; 1120 1121 ETHER_FIRST_MULTI(step, ec, enm); 1122 while (enm != NULL) { 1123 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1124 ETHER_ADDR_LEN) != 0) { 1125 printf("%s: punting on mcast range\n", 1126 __func__); 1127 ifp->if_flags |= IFF_ALLMULTI; 1128 allmc |= vlan_matrix[i]; 1129 break; 1130 } 1131 1132 anymc |= vlan_matrix[i]; 1133 1134 #if 0 1135 /* XXX extract subroutine --dyoung */ 1136 REG_WRITE(MAC_WT1_REG, 1137 enm->enm_addrlo[2] | 1138 (enm->enm_addrlo[3] << 8) | 1139 (enm->enm_addrlo[4] << 16) | 1140 (enm->enm_addrlo[5] << 24)); 1141 REG_WRITE(MAC_WT0_REG, 1142 (i << MAC_WT0_VLANID_SHIFT) | 1143 (enm->enm_addrlo[0] << 16) | 1144 (enm->enm_addrlo[1] << 24) | 1145 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1146 /* timeout? */ 1147 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)); 1148 #endif 1149 1150 /* load h/w with mcast address, port = CPU */ 1151 ETHER_NEXT_MULTI(step, enm); 1152 } 1153 } 1154 1155 conf = REG_READ(CPUP_CONF_REG); 1156 /* 1 Disable forwarding of unknown & multicast packets to 1157 * CPU on all ports. 1158 * 2 Enable forwarding of unknown & multicast packets to 1159 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1160 */ 1161 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1162 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1163 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1164 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1165 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1166 REG_WRITE(CPUP_CONF_REG, conf); 1167 } 1168 1169 /* 1170 * admsw_add_rxbuf: 1171 * 1172 * Add a receive buffer to the indicated descriptor. 1173 */ 1174 int 1175 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1176 { 1177 struct admsw_descsoft *ds; 1178 struct mbuf *m; 1179 int error; 1180 1181 if (high) 1182 ds = &sc->sc_rxhsoft[idx]; 1183 else 1184 ds = &sc->sc_rxlsoft[idx]; 1185 1186 MGETHDR(m, M_DONTWAIT, MT_DATA); 1187 if (m == NULL) 1188 return (ENOBUFS); 1189 1190 MCLGET(m, M_DONTWAIT); 1191 if ((m->m_flags & M_EXT) == 0) { 1192 m_freem(m); 1193 return (ENOBUFS); 1194 } 1195 1196 if (ds->ds_mbuf != NULL) 1197 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1198 1199 ds->ds_mbuf = m; 1200 1201 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1202 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1203 BUS_DMA_READ | BUS_DMA_NOWAIT); 1204 if (error) { 1205 printf("%s: can't load rx DMA map %d, error = %d\n", 1206 device_xname(sc->sc_dev), idx, error); 1207 panic("admsw_add_rxbuf"); /* XXX */ 1208 } 1209 1210 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1211 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1212 1213 if (high) 1214 ADMSW_INIT_RXHDESC(sc, idx); 1215 else 1216 ADMSW_INIT_RXLDESC(sc, idx); 1217 1218 return (0); 1219 } 1220 1221 int 1222 admsw_mediachange(struct ifnet *ifp) 1223 { 1224 struct admsw_softc *sc = ifp->if_softc; 1225 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1226 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1227 int old, new, val; 1228 1229 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1230 return (EINVAL); 1231 1232 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1233 val = PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX; 1234 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1235 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1236 val = PHY_CNTL2_100M|PHY_CNTL2_FDX; 1237 else 1238 val = PHY_CNTL2_100M; 1239 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1240 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1241 val = PHY_CNTL2_FDX; 1242 else 1243 val = 0; 1244 } else 1245 return (EINVAL); 1246 1247 old = REG_READ(PHY_CNTL2_REG); 1248 new = old & ~((PHY_CNTL2_AUTONEG|PHY_CNTL2_100M|PHY_CNTL2_FDX) << port); 1249 new |= (val << port); 1250 1251 if (new != old) 1252 REG_WRITE(PHY_CNTL2_REG, new); 1253 1254 return (0); 1255 } 1256 1257 void 1258 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1259 { 1260 struct admsw_softc *sc = ifp->if_softc; 1261 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1262 int status; 1263 1264 ifmr->ifm_status = IFM_AVALID; 1265 ifmr->ifm_active = IFM_ETHER; 1266 1267 status = REG_READ(PHY_ST_REG) >> port; 1268 1269 if ((status & PHY_ST_LINKUP) == 0) { 1270 ifmr->ifm_active |= IFM_NONE; 1271 return; 1272 } 1273 1274 ifmr->ifm_status |= IFM_ACTIVE; 1275 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1276 if (status & PHY_ST_FDX) 1277 ifmr->ifm_active |= IFM_FDX; 1278 else 1279 ifmr->ifm_active |= IFM_HDX; 1280 } 1281