1 /* $NetBSD: if_admsw.c,v 1.31 2024/02/10 09:30:05 andvar Exp $ */ 2 3 /*- 4 * Copyright (c) 2007 Ruslan Ermilov and Vsevolod Lobko. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or 8 * without modification, are permitted provided that the following 9 * conditions are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above 13 * copyright notice, this list of conditions and the following 14 * disclaimer in the documentation and/or other materials provided 15 * with the distribution. 16 * 3. The names of the authors may not be used to endorse or promote 17 * products derived from this software without specific prior 18 * written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY 21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 23 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, 27 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY 31 * OF SUCH DAMAGE. 32 */ 33 /* 34 * Copyright (c) 2001 Wasabi Systems, Inc. 35 * All rights reserved. 36 * 37 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed for the NetBSD Project by 50 * Wasabi Systems, Inc. 51 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 52 * or promote products derived from this software without specific prior 53 * written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for Alchemy Semiconductor Au1x00 Ethernet Media 70 * Access Controller. 71 * 72 * TODO: 73 * 74 * Better Rx buffer management; we want to get new Rx buffers 75 * to the chip more quickly than we currently do. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_admsw.c,v 1.31 2024/02/10 09:30:05 andvar Exp $"); 80 81 82 #include <sys/param.h> 83 #include <sys/bus.h> 84 #include <sys/callout.h> 85 #include <sys/device.h> 86 #include <sys/endian.h> 87 #include <sys/errno.h> 88 #include <sys/intr.h> 89 #include <sys/ioctl.h> 90 #include <sys/kernel.h> 91 #include <sys/mbuf.h> 92 #include <sys/socket.h> 93 #include <sys/systm.h> 94 95 #include <prop/proplib.h> 96 97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 98 99 #include <net/if.h> 100 #include <net/if_dl.h> 101 #include <net/if_media.h> 102 #include <net/if_ether.h> 103 #include <net/bpf.h> 104 105 #include <sys/gpio.h> 106 #include <dev/gpio/gpiovar.h> 107 108 #include <mips/adm5120/include/adm5120reg.h> 109 #include <mips/adm5120/include/adm5120var.h> 110 #include <mips/adm5120/include/adm5120_obiovar.h> 111 #include <mips/adm5120/dev/if_admswreg.h> 112 #include <mips/adm5120/dev/if_admswvar.h> 113 114 static uint8_t vlan_matrix[SW_DEVS] = { 115 (1 << 6) | (1 << 0), /* CPU + port0 */ 116 (1 << 6) | (1 << 1), /* CPU + port1 */ 117 (1 << 6) | (1 << 2), /* CPU + port2 */ 118 (1 << 6) | (1 << 3), /* CPU + port3 */ 119 (1 << 6) | (1 << 4), /* CPU + port4 */ 120 (1 << 6) | (1 << 5), /* CPU + port5 */ 121 }; 122 123 #ifdef ADMSW_EVENT_COUNTERS 124 #define ADMSW_EVCNT_INCR(ev) (ev)->ev_count++ 125 #else 126 #define ADMSW_EVCNT_INCR(ev) /* nothing */ 127 #endif 128 129 static void admsw_start(struct ifnet *); 130 static void admsw_watchdog(struct ifnet *); 131 static int admsw_ioctl(struct ifnet *, u_long, void *); 132 static int admsw_init(struct ifnet *); 133 static void admsw_stop(struct ifnet *, int); 134 135 static void admsw_shutdown(void *); 136 137 static void admsw_reset(struct admsw_softc *); 138 static void admsw_set_filter(struct admsw_softc *); 139 140 static int admsw_intr(void *); 141 static void admsw_txintr(struct admsw_softc *, int); 142 static void admsw_rxintr(struct admsw_softc *, int); 143 static int admsw_add_rxbuf(struct admsw_softc *, int, int); 144 #define admsw_add_rxhbuf(sc, idx) admsw_add_rxbuf(sc, idx, 1) 145 #define admsw_add_rxlbuf(sc, idx) admsw_add_rxbuf(sc, idx, 0) 146 147 static int admsw_mediachange(struct ifnet *); 148 static void admsw_mediastatus(struct ifnet *, struct ifmediareq *); 149 150 static int admsw_match(device_t, cfdata_t, void *); 151 static void admsw_attach(device_t, device_t, void *); 152 153 CFATTACH_DECL_NEW(admsw, sizeof(struct admsw_softc), 154 admsw_match, admsw_attach, NULL, NULL); 155 156 static int 157 admsw_match(device_t parent, cfdata_t cf, void *aux) 158 { 159 struct obio_attach_args *aa = aux; 160 161 return strcmp(aa->oba_name, cf->cf_name) == 0; 162 } 163 164 #define REG_READ(o) bus_space_read_4(sc->sc_st, sc->sc_ioh, (o)) 165 #define REG_WRITE(o, v) bus_space_write_4(sc->sc_st, sc->sc_ioh, (o),(v)) 166 167 168 static void 169 admsw_init_bufs(struct admsw_softc *sc) 170 { 171 int i; 172 struct admsw_desc *desc; 173 174 for (i = 0; i < ADMSW_NTXHDESC; i++) { 175 if (sc->sc_txhsoft[i].ds_mbuf != NULL) { 176 m_freem(sc->sc_txhsoft[i].ds_mbuf); 177 sc->sc_txhsoft[i].ds_mbuf = NULL; 178 } 179 desc = &sc->sc_txhdescs[i]; 180 desc->data = 0; 181 desc->cntl = 0; 182 desc->len = MAC_BUFLEN; 183 desc->status = 0; 184 ADMSW_CDTXHSYNC(sc, i, 185 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 186 } 187 sc->sc_txhdescs[ADMSW_NTXHDESC - 1].data |= ADM5120_DMA_RINGEND; 188 ADMSW_CDTXHSYNC(sc, ADMSW_NTXHDESC - 1, 189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 190 191 for (i = 0; i < ADMSW_NRXHDESC; i++) { 192 if (sc->sc_rxhsoft[i].ds_mbuf == NULL) { 193 if (admsw_add_rxhbuf(sc, i) != 0) 194 panic("admsw_init_bufs\n"); 195 } else 196 ADMSW_INIT_RXHDESC(sc, i); 197 } 198 199 for (i = 0; i < ADMSW_NTXLDESC; i++) { 200 if (sc->sc_txlsoft[i].ds_mbuf != NULL) { 201 m_freem(sc->sc_txlsoft[i].ds_mbuf); 202 sc->sc_txlsoft[i].ds_mbuf = NULL; 203 } 204 desc = &sc->sc_txldescs[i]; 205 desc->data = 0; 206 desc->cntl = 0; 207 desc->len = MAC_BUFLEN; 208 desc->status = 0; 209 ADMSW_CDTXLSYNC(sc, i, 210 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 211 } 212 sc->sc_txldescs[ADMSW_NTXLDESC - 1].data |= ADM5120_DMA_RINGEND; 213 ADMSW_CDTXLSYNC(sc, ADMSW_NTXLDESC - 1, 214 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 215 216 for (i = 0; i < ADMSW_NRXLDESC; i++) { 217 if (sc->sc_rxlsoft[i].ds_mbuf == NULL) { 218 if (admsw_add_rxlbuf(sc, i) != 0) 219 panic("admsw_init_bufs\n"); 220 } else 221 ADMSW_INIT_RXLDESC(sc, i); 222 } 223 224 REG_WRITE(SEND_HBADDR_REG, ADMSW_CDTXHADDR(sc, 0)); 225 REG_WRITE(SEND_LBADDR_REG, ADMSW_CDTXLADDR(sc, 0)); 226 REG_WRITE(RECV_HBADDR_REG, ADMSW_CDRXHADDR(sc, 0)); 227 REG_WRITE(RECV_LBADDR_REG, ADMSW_CDRXLADDR(sc, 0)); 228 229 sc->sc_txfree = ADMSW_NTXLDESC; 230 sc->sc_txnext = 0; 231 sc->sc_txdirty = 0; 232 sc->sc_rxptr = 0; 233 } 234 235 static void 236 admsw_setvlan(struct admsw_softc *sc, char matrix[6]) 237 { 238 uint32_t i; 239 240 i = matrix[0] + (matrix[1] << 8) + (matrix[2] << 16) 241 + (matrix[3] << 24); 242 REG_WRITE(VLAN_G1_REG, i); 243 i = matrix[4] + (matrix[5] << 8); 244 REG_WRITE(VLAN_G2_REG, i); 245 } 246 247 static void 248 admsw_reset(struct admsw_softc *sc) 249 { 250 uint32_t wdog1; 251 int i; 252 253 REG_WRITE(PORT_CONF0_REG, 254 REG_READ(PORT_CONF0_REG) | PORT_CONF0_DP_MASK); 255 REG_WRITE(CPUP_CONF_REG, 256 REG_READ(CPUP_CONF_REG) | CPUP_CONF_DCPUP); 257 258 /* Wait for DMA to complete. Overkill. In 3ms, we can 259 * send at least two entire 1500-byte packets at 10 Mb/s. 260 */ 261 DELAY(3000); 262 263 /* The datasheet recommends that we move all PHYs to reset 264 * state prior to software reset. 265 */ 266 REG_WRITE(PHY_CNTL2_REG, 267 REG_READ(PHY_CNTL2_REG) & ~PHY_CNTL2_PHYR_MASK); 268 269 /* Reset the switch. */ 270 REG_WRITE(ADMSW_SW_RES, 0x1); 271 272 DELAY(100 * 1000); 273 274 REG_WRITE(ADMSW_BOOT_DONE, ADMSW_BOOT_DONE_BO); 275 276 /* begin old code */ 277 REG_WRITE(CPUP_CONF_REG, 278 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 279 CPUP_CONF_DMCP_MASK); 280 281 REG_WRITE(PORT_CONF0_REG, PORT_CONF0_EMCP_MASK | PORT_CONF0_EMBP_MASK); 282 283 REG_WRITE(PHY_CNTL2_REG, 284 REG_READ(PHY_CNTL2_REG) | PHY_CNTL2_ANE_MASK | 285 PHY_CNTL2_PHYR_MASK | PHY_CNTL2_AMDIX_MASK); 286 287 REG_WRITE(PHY_CNTL3_REG, REG_READ(PHY_CNTL3_REG) | PHY_CNTL3_RNT); 288 289 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 290 REG_WRITE(ADMSW_INT_ST, INT_MASK); 291 292 /* 293 * While in DDB, we stop servicing interrupts, RX ring 294 * fills up and when free block counter falls behind FC 295 * threshold, the switch starts to emit 802.3x PAUSE 296 * frames. This can upset peer switches. 297 * 298 * Stop this from happening by disabling FC and D2 299 * thresholds. 300 */ 301 REG_WRITE(FC_TH_REG, 302 REG_READ(FC_TH_REG) & ~(FC_TH_FCS_MASK | FC_TH_D2S_MASK)); 303 304 admsw_setvlan(sc, vlan_matrix); 305 306 for (i = 0; i < SW_DEVS; i++) { 307 REG_WRITE(MAC_WT1_REG, 308 sc->sc_enaddr[2] | 309 (sc->sc_enaddr[3]<<8) | 310 (sc->sc_enaddr[4]<<16) | 311 ((sc->sc_enaddr[5]+i)<<24)); 312 REG_WRITE(MAC_WT0_REG, (i<<MAC_WT0_VLANID_SHIFT) | 313 (sc->sc_enaddr[0]<<16) | (sc->sc_enaddr[1]<<24) | 314 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 315 316 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)) 317 ; 318 } 319 wdog1 = REG_READ(ADM5120_WDOG1); 320 REG_WRITE(ADM5120_WDOG1, wdog1 & ~ADM5120_WDOG1_WDE); 321 } 322 323 static void 324 admsw_attach(device_t parent, device_t self, void *aux) 325 { 326 uint8_t enaddr[ETHER_ADDR_LEN]; 327 struct admsw_softc *sc = device_private(self); 328 struct obio_attach_args *aa = aux; 329 struct ifnet *ifp; 330 bus_dma_segment_t seg; 331 int error, i, rseg; 332 prop_data_t pd; 333 334 printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); 335 336 sc->sc_dev = self; 337 sc->sc_dmat = aa->oba_dt; 338 sc->sc_st = aa->oba_st; 339 340 pd = prop_dictionary_get(device_properties(self), "mac-address"); 341 342 if (pd == NULL) { 343 enaddr[0] = 0x02; 344 enaddr[1] = 0xaa; 345 enaddr[2] = 0xbb; 346 enaddr[3] = 0xcc; 347 enaddr[4] = 0xdd; 348 enaddr[5] = 0xee; 349 } else 350 memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); 351 352 memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); 353 354 printf("%s: base Ethernet address %s\n", device_xname(sc->sc_dev), 355 ether_sprintf(enaddr)); 356 357 /* Map the device. */ 358 if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { 359 printf("%s: unable to map device\n", device_xname(sc->sc_dev)); 360 return; 361 } 362 363 /* Hook up the interrupt handler. */ 364 sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); 365 366 if (sc->sc_ih == NULL) { 367 printf("%s: unable to register interrupt handler\n", 368 device_xname(sc->sc_dev)); 369 return; 370 } 371 372 /* 373 * Allocate the control data structures, and create and load the 374 * DMA map for it. 375 */ 376 if ((error = bus_dmamem_alloc(sc->sc_dmat, 377 sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 378 0)) != 0) { 379 printf("%s: unable to allocate control data, error = %d\n", 380 device_xname(sc->sc_dev), error); 381 return; 382 } 383 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 384 sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 385 0)) != 0) { 386 printf("%s: unable to map control data, error = %d\n", 387 device_xname(sc->sc_dev), error); 388 return; 389 } 390 if ((error = bus_dmamap_create(sc->sc_dmat, 391 sizeof(struct admsw_control_data), 1, 392 sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 393 printf("%s: unable to create control data DMA map, " 394 "error = %d\n", device_xname(sc->sc_dev), error); 395 return; 396 } 397 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 398 sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 399 0)) != 0) { 400 printf("%s: unable to load control data DMA map, error = %d\n", 401 device_xname(sc->sc_dev), error); 402 return; 403 } 404 405 /* 406 * Create the transmit buffer DMA maps. 407 */ 408 for (i = 0; i < ADMSW_NTXHDESC; i++) { 409 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 410 2, MCLBYTES, 0, 0, 411 &sc->sc_txhsoft[i].ds_dmamap)) != 0) { 412 printf("%s: unable to create txh DMA map %d, " 413 "error = %d\n", device_xname(sc->sc_dev), i, error); 414 return; 415 } 416 sc->sc_txhsoft[i].ds_mbuf = NULL; 417 } 418 for (i = 0; i < ADMSW_NTXLDESC; i++) { 419 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 420 2, MCLBYTES, 0, 0, 421 &sc->sc_txlsoft[i].ds_dmamap)) != 0) { 422 printf("%s: unable to create txl DMA map %d, " 423 "error = %d\n", device_xname(sc->sc_dev), i, error); 424 return; 425 } 426 sc->sc_txlsoft[i].ds_mbuf = NULL; 427 } 428 429 /* 430 * Create the receive buffer DMA maps. 431 */ 432 for (i = 0; i < ADMSW_NRXHDESC; i++) { 433 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 434 MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { 435 printf("%s: unable to create rxh DMA map %d, " 436 "error = %d\n", device_xname(sc->sc_dev), i, error); 437 return; 438 } 439 sc->sc_rxhsoft[i].ds_mbuf = NULL; 440 } 441 for (i = 0; i < ADMSW_NRXLDESC; i++) { 442 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 443 MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { 444 printf("%s: unable to create rxl DMA map %d, " 445 "error = %d\n", device_xname(sc->sc_dev), i, error); 446 return; 447 } 448 sc->sc_rxlsoft[i].ds_mbuf = NULL; 449 } 450 451 admsw_init_bufs(sc); 452 453 admsw_reset(sc); 454 455 for (i = 0; i < SW_DEVS; i++) { 456 sc->sc_ethercom[i].ec_ifmedia = &sc->sc_ifmedia[i]; 457 ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); 458 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); 459 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 460 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); 461 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 462 ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); 463 ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); 464 465 ifp = &sc->sc_ethercom[i].ec_if; 466 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 467 ifp->if_xname[5] += i; 468 ifp->if_softc = sc; 469 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 470 ifp->if_ioctl = admsw_ioctl; 471 ifp->if_start = admsw_start; 472 ifp->if_watchdog = admsw_watchdog; 473 ifp->if_init = admsw_init; 474 ifp->if_stop = admsw_stop; 475 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 476 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ADMSW_NTXLDESC, IFQ_MAXLEN)); 477 IFQ_SET_READY(&ifp->if_snd); 478 479 /* Attach the interface. */ 480 if_attach(ifp); 481 if_deferred_start_init(ifp, NULL); 482 ether_ifattach(ifp, enaddr); 483 enaddr[5]++; 484 } 485 486 #ifdef ADMSW_EVENT_COUNTERS 487 evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, 488 NULL, device_xname(sc->sc_dev), "txstall"); 489 evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, 490 NULL, device_xname(sc->sc_dev), "rxstall"); 491 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, 492 NULL, device_xname(sc->sc_dev), "txintr"); 493 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, 494 NULL, device_xname(sc->sc_dev), "rxintr"); 495 #if 1 496 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, 497 NULL, device_xname(sc->sc_dev), "rxsync"); 498 #endif 499 #endif 500 501 admwdog_attach(sc); 502 503 /* Make sure the interface is shutdown during reboot. */ 504 sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); 505 if (sc->sc_sdhook == NULL) 506 printf("%s: WARNING: unable to establish shutdown hook\n", 507 device_xname(sc->sc_dev)); 508 509 /* leave interrupts and cpu port disabled */ 510 return; 511 } 512 513 514 /* 515 * admsw_shutdown: 516 * 517 * Make sure the interface is stopped at reboot time. 518 */ 519 static void 520 admsw_shutdown(void *arg) 521 { 522 struct admsw_softc *sc = arg; 523 int i; 524 525 for (i = 0; i < SW_DEVS; i++) 526 admsw_stop(&sc->sc_ethercom[i].ec_if, 1); 527 } 528 529 /* 530 * admsw_start: [ifnet interface function] 531 * 532 * Start packet transmission on the interface. 533 */ 534 static void 535 admsw_start(struct ifnet *ifp) 536 { 537 struct admsw_softc *sc = ifp->if_softc; 538 struct mbuf *m0, *m; 539 struct admsw_descsoft *ds; 540 struct admsw_desc *desc; 541 bus_dmamap_t dmamap; 542 struct ether_header *eh; 543 int error, nexttx, len, i; 544 static int vlan = 0; 545 546 /* 547 * Loop through the send queues, setting up transmit descriptors 548 * unitl we drain the queues, or use up all available transmit 549 * descriptors. 550 */ 551 for (;;) { 552 vlan++; 553 if (vlan == SW_DEVS) 554 vlan = 0; 555 i = vlan; 556 for (;;) { 557 ifp = &sc->sc_ethercom[i].ec_if; 558 if ((ifp->if_flags & IFF_RUNNING) == 0) 559 continue; 560 /* Grab a packet off the queue. */ 561 IFQ_POLL(&ifp->if_snd, m0); 562 if (m0 != NULL) 563 break; 564 i++; 565 if (i == SW_DEVS) 566 i = 0; 567 if (i == vlan) 568 return; 569 } 570 vlan = i; 571 m = NULL; 572 573 /* Get a spare descriptor. */ 574 if (sc->sc_txfree == 0) { 575 /* No more slots left. */ 576 ADMSW_EVCNT_INCR(&sc->sc_ev_txstall); 577 break; 578 } 579 nexttx = sc->sc_txnext; 580 desc = &sc->sc_txldescs[nexttx]; 581 ds = &sc->sc_txlsoft[nexttx]; 582 dmamap = ds->ds_dmamap; 583 584 /* 585 * Load the DMA map. If this fails, the packet either 586 * didn't fit in the allotted number of segments, or we 587 * were short on resources. In this case, we'll copy 588 * and try again. 589 */ 590 if (m0->m_pkthdr.len < ETHER_MIN_LEN || 591 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 592 BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { 593 MGETHDR(m, M_DONTWAIT, MT_DATA); 594 if (m == NULL) { 595 printf("%s: unable to allocate Tx mbuf\n", 596 device_xname(sc->sc_dev)); 597 break; 598 } 599 if (m0->m_pkthdr.len > MHLEN) { 600 MCLGET(m, M_DONTWAIT); 601 if ((m->m_flags & M_EXT) == 0) { 602 printf("%s: unable to allocate Tx " 603 "cluster\n", device_xname(sc->sc_dev)); 604 m_freem(m); 605 break; 606 } 607 } 608 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; 609 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 610 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 611 if (m->m_pkthdr.len < ETHER_MIN_LEN) { 612 if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) 613 panic("admsw_start: M_TRAILINGSPACE\n"); 614 memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, 615 ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); 616 m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; 617 } 618 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 619 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 620 if (error) { 621 printf("%s: unable to load Tx buffer, error = " 622 "%d\n", device_xname(sc->sc_dev), error); 623 break; 624 } 625 } 626 627 IFQ_DEQUEUE(&ifp->if_snd, m0); 628 if (m != NULL) { 629 m_freem(m0); 630 m0 = m; 631 } 632 633 /* 634 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 635 */ 636 637 /* Sync the DMA map. */ 638 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 639 BUS_DMASYNC_PREWRITE); 640 641 if (dmamap->dm_nsegs != 1 && dmamap->dm_nsegs != 2) 642 panic("admsw_start: dm_nsegs == %d\n", 643 dmamap->dm_nsegs); 644 desc->data = dmamap->dm_segs[0].ds_addr; 645 desc->len = len = dmamap->dm_segs[0].ds_len; 646 if (dmamap->dm_nsegs > 1) { 647 len += dmamap->dm_segs[1].ds_len; 648 desc->cntl = dmamap->dm_segs[1].ds_addr 649 | ADM5120_DMA_BUF2ENABLE; 650 } else 651 desc->cntl = 0; 652 desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); 653 eh = mtod(m0, struct ether_header *); 654 if (ntohs(eh->ether_type) == ETHERTYPE_IP && 655 m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 656 desc->status |= ADM5120_DMA_CSUM; 657 if (nexttx == ADMSW_NTXLDESC - 1) 658 desc->data |= ADM5120_DMA_RINGEND; 659 desc->data |= ADM5120_DMA_OWN; 660 661 /* Sync the descriptor. */ 662 ADMSW_CDTXLSYNC(sc, nexttx, 663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 664 665 REG_WRITE(SEND_TRIG_REG, 1); 666 /* printf("send slot %d\n", nexttx); */ 667 668 /* 669 * Store a pointer to the packet so we can free it later. 670 */ 671 ds->ds_mbuf = m0; 672 673 /* Advance the Tx pointer. */ 674 sc->sc_txfree--; 675 sc->sc_txnext = ADMSW_NEXTTXL(nexttx); 676 677 /* Pass the packet to any BPF listeners. */ 678 bpf_mtap(ifp, m0, BPF_D_OUT); 679 680 /* Set a watchdog timer in case the chip flakes out. */ 681 sc->sc_ethercom[0].ec_if.if_timer = 5; 682 } 683 } 684 685 /* 686 * admsw_watchdog: [ifnet interface function] 687 * 688 * Watchdog timer handler. 689 */ 690 static void 691 admsw_watchdog(struct ifnet *ifp) 692 { 693 struct admsw_softc *sc = ifp->if_softc; 694 int vlan; 695 696 #if 1 697 /* Check if an interrupt was lost. */ 698 if (sc->sc_txfree == ADMSW_NTXLDESC) { 699 printf("%s: watchdog false alarm\n", device_xname(sc->sc_dev)); 700 return; 701 } 702 if (sc->sc_ethercom[0].ec_if.if_timer != 0) 703 printf("%s: watchdog timer is %d!\n", device_xname(sc->sc_dev), 704 sc->sc_ethercom[0].ec_if.if_timer); 705 admsw_txintr(sc, 0); 706 if (sc->sc_txfree == ADMSW_NTXLDESC) { 707 printf("%s: tx IRQ lost (queue empty)\n", 708 device_xname(sc->sc_dev)); 709 return; 710 } 711 if (sc->sc_ethercom[0].ec_if.if_timer != 0) { 712 printf("%s: tx IRQ lost (timer recharged)\n", 713 device_xname(sc->sc_dev)); 714 return; 715 } 716 #endif 717 718 printf("%s: device timeout, txfree = %d\n", 719 device_xname(sc->sc_dev), sc->sc_txfree); 720 for (vlan = 0; vlan < SW_DEVS; vlan++) 721 admsw_stop(&sc->sc_ethercom[vlan].ec_if, 0); 722 for (vlan = 0; vlan < SW_DEVS; vlan++) 723 (void)admsw_init(&sc->sc_ethercom[vlan].ec_if); 724 725 /* Try to get more packets going. */ 726 admsw_start(ifp); 727 } 728 729 /* 730 * admsw_ioctl: [ifnet interface function] 731 * 732 * Handle control requests from the operator. 733 */ 734 static int 735 admsw_ioctl(struct ifnet *ifp, u_long cmd, void *data) 736 { 737 struct admsw_softc *sc = ifp->if_softc; 738 struct ifdrv *ifd; 739 int s, error, port; 740 741 port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 742 if (port >= SW_DEVS) 743 return EOPNOTSUPP; 744 745 s = splnet(); 746 747 switch (cmd) { 748 case SIOCSIFCAP: 749 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) 750 error = 0; 751 break; 752 case SIOCGDRVSPEC: 753 case SIOCSDRVSPEC: 754 ifd = (struct ifdrv *) data; 755 if (ifd->ifd_cmd != 0 || ifd->ifd_len != sizeof(vlan_matrix)) { 756 error = EINVAL; 757 break; 758 } 759 if (cmd == SIOCGDRVSPEC) { 760 error = copyout(vlan_matrix, ifd->ifd_data, 761 sizeof(vlan_matrix)); 762 } else { 763 error = copyin(ifd->ifd_data, vlan_matrix, 764 sizeof(vlan_matrix)); 765 admsw_setvlan(sc, vlan_matrix); 766 } 767 break; 768 769 default: 770 error = ether_ioctl(ifp, cmd, data); 771 if (error == ENETRESET) { 772 /* 773 * Multicast list has changed; set the hardware filter 774 * accordingly. 775 */ 776 admsw_set_filter(sc); 777 error = 0; 778 } 779 break; 780 } 781 782 /* Try to get more packets going. */ 783 admsw_start(ifp); 784 785 splx(s); 786 return error; 787 } 788 789 790 /* 791 * admsw_intr: 792 * 793 * Interrupt service routine. 794 */ 795 static int 796 admsw_intr(void *arg) 797 { 798 struct admsw_softc *sc = arg; 799 uint32_t pending; 800 char buf[64]; 801 802 pending = REG_READ(ADMSW_INT_ST); 803 804 if ((pending & ~(ADMSW_INTR_RHD | ADMSW_INTR_RLD | ADMSW_INTR_SHD | 805 ADMSW_INTR_SLD | ADMSW_INTR_W1TE | ADMSW_INTR_W0TE)) != 0) { 806 snprintb(buf, sizeof(buf), ADMSW_INT_FMT, pending); 807 printf("%s: pending=%s\n", __func__, buf); 808 } 809 REG_WRITE(ADMSW_INT_ST, pending); 810 811 if (sc->ndevs == 0) 812 return 0; 813 814 if ((pending & ADMSW_INTR_RHD) != 0) 815 admsw_rxintr(sc, 1); 816 817 if ((pending & ADMSW_INTR_RLD) != 0) 818 admsw_rxintr(sc, 0); 819 820 if ((pending & ADMSW_INTR_SHD) != 0) 821 admsw_txintr(sc, 1); 822 823 if ((pending & ADMSW_INTR_SLD) != 0) 824 admsw_txintr(sc, 0); 825 826 return 1; 827 } 828 829 /* 830 * admsw_txintr: 831 * 832 * Helper; handle transmit interrupts. 833 */ 834 static void 835 admsw_txintr(struct admsw_softc *sc, int prio) 836 { 837 struct ifnet *ifp; 838 struct admsw_desc *desc; 839 struct admsw_descsoft *ds; 840 int i, vlan; 841 int gotone = 0; 842 843 /* printf("txintr: txdirty: %d, txfree: %d\n", sc->sc_txdirty, sc->sc_txfree); */ 844 for (i = sc->sc_txdirty; sc->sc_txfree != ADMSW_NTXLDESC; 845 i = ADMSW_NEXTTXL(i)) { 846 847 ADMSW_CDTXLSYNC(sc, i, 848 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 849 850 desc = &sc->sc_txldescs[i]; 851 ds = &sc->sc_txlsoft[i]; 852 if (desc->data & ADM5120_DMA_OWN) { 853 ADMSW_CDTXLSYNC(sc, i, 854 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 855 break; 856 } 857 858 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 859 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 860 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 861 m_freem(ds->ds_mbuf); 862 ds->ds_mbuf = NULL; 863 864 vlan = ffs(desc->status & 0x3f) - 1; 865 if (vlan < 0 || vlan >= SW_DEVS) 866 panic("admsw_txintr: bad vlan\n"); 867 ifp = &sc->sc_ethercom[vlan].ec_if; 868 gotone = 1; 869 /* printf("clear tx slot %d\n", i); */ 870 871 if_statinc(ifp, if_opackets); 872 873 sc->sc_txfree++; 874 } 875 876 if (gotone) { 877 sc->sc_txdirty = i; 878 #ifdef ADMSW_EVENT_COUNTERS 879 ADMSW_EVCNT_INCR(&sc->sc_ev_txintr); 880 #endif 881 ifp = &sc->sc_ethercom[0].ec_if; 882 883 /* Try to queue more packets. */ 884 if_schedule_deferred_start(ifp); 885 886 /* 887 * If there are no more pending transmissions, 888 * cancel the watchdog timer. 889 */ 890 if (sc->sc_txfree == ADMSW_NTXLDESC) 891 ifp->if_timer = 0; 892 893 } 894 895 /* printf("txintr end: txdirty: %d, txfree: %d\n", sc->sc_txdirty, sc->sc_txfree); */ 896 } 897 898 /* 899 * admsw_rxintr: 900 * 901 * Helper; handle receive interrupts. 902 */ 903 static void 904 admsw_rxintr(struct admsw_softc *sc, int high) 905 { 906 struct ifnet *ifp; 907 struct admsw_descsoft *ds; 908 struct mbuf *m; 909 uint32_t stat; 910 int i, len, port, vlan; 911 912 /* printf("rxintr\n"); */ 913 if (high) 914 panic("admsw_rxintr: high priority packet\n"); 915 916 #ifdef ADMSW_EVENT_COUNTERS 917 int pkts = 0; 918 #endif 919 920 #if 1 921 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 922 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 923 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 924 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 925 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 926 else { 927 i = sc->sc_rxptr; 928 do { 929 ADMSW_CDRXLSYNC(sc, i, 930 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 931 i = ADMSW_NEXTRXL(i); 932 /* the ring is empty, just return. */ 933 if (i == sc->sc_rxptr) 934 return; 935 ADMSW_CDRXLSYNC(sc, i, 936 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 937 } while (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN); 938 ADMSW_CDRXLSYNC(sc, i, 939 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 940 941 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 942 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 943 if ((sc->sc_rxldescs[sc->sc_rxptr].data & ADM5120_DMA_OWN) == 0) 944 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 945 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 946 else { 947 ADMSW_CDRXLSYNC(sc, sc->sc_rxptr, 948 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 949 /* We've fallen behind the chip: catch it. */ 950 printf("%s: RX ring resync, base=%x, work=%x, %d -> %d\n", 951 device_xname(sc->sc_dev), REG_READ(RECV_LBADDR_REG), 952 REG_READ(RECV_LWADDR_REG), sc->sc_rxptr, i); 953 sc->sc_rxptr = i; 954 ADMSW_EVCNT_INCR(&sc->sc_ev_rxsync); 955 } 956 } 957 #endif 958 for (i = sc->sc_rxptr;; i = ADMSW_NEXTRXL(i)) { 959 ds = &sc->sc_rxlsoft[i]; 960 961 ADMSW_CDRXLSYNC(sc, i, 962 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 963 964 if (sc->sc_rxldescs[i].data & ADM5120_DMA_OWN) { 965 ADMSW_CDRXLSYNC(sc, i, 966 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 967 break; 968 } 969 970 /* printf("process slot %d\n", i); */ 971 972 #ifdef ADMSW_EVENT_COUNTERS 973 pkts++; 974 #endif 975 976 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 977 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 978 979 stat = sc->sc_rxldescs[i].status; 980 len = (stat & ADM5120_DMA_LEN) >> ADM5120_DMA_LENSHIFT; 981 len -= ETHER_CRC_LEN; 982 port = (stat & ADM5120_DMA_PORTID) >> ADM5120_DMA_PORTSHIFT; 983 for (vlan = 0; vlan < SW_DEVS; vlan++) 984 if ((1 << port) & vlan_matrix[vlan]) 985 break; 986 if (vlan == SW_DEVS) 987 vlan = 0; 988 ifp = &sc->sc_ethercom[vlan].ec_if; 989 990 m = ds->ds_mbuf; 991 if (admsw_add_rxlbuf(sc, i) != 0) { 992 if_statinc(ifp, if_ierrors); 993 ADMSW_INIT_RXLDESC(sc, i); 994 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 995 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 996 continue; 997 } 998 999 m_set_rcvif(m, ifp); 1000 m->m_pkthdr.len = m->m_len = len; 1001 if ((stat & ADM5120_DMA_TYPE) == ADM5120_DMA_TYPE_IP) { 1002 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1003 if (stat & ADM5120_DMA_CSUMFAIL) 1004 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1005 } 1006 1007 /* Pass it on. */ 1008 if_percpuq_enqueue(ifp->if_percpuq, m); 1009 } 1010 #ifdef ADMSW_EVENT_COUNTERS 1011 if (pkts) 1012 ADMSW_EVCNT_INCR(&sc->sc_ev_rxintr); 1013 1014 if (pkts == ADMSW_NRXLDESC) 1015 ADMSW_EVCNT_INCR(&sc->sc_ev_rxstall); 1016 #endif 1017 1018 /* Update the receive pointer. */ 1019 sc->sc_rxptr = i; 1020 } 1021 1022 /* 1023 * admsw_init: [ifnet interface function] 1024 * 1025 * Initialize the interface. Must be called at splnet(). 1026 */ 1027 static int 1028 admsw_init(struct ifnet *ifp) 1029 { 1030 struct admsw_softc *sc = ifp->if_softc; 1031 1032 /* printf("admsw_init called\n"); */ 1033 1034 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1035 if (sc->ndevs == 0) { 1036 admsw_init_bufs(sc); 1037 admsw_reset(sc); 1038 REG_WRITE(CPUP_CONF_REG, 1039 CPUP_CONF_CRCP | CPUP_CONF_DUNP_MASK | 1040 CPUP_CONF_DMCP_MASK); 1041 /* Clear all pending interrupts */ 1042 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1043 1044 /* Enable needed interrupts */ 1045 REG_WRITE(ADMSW_INT_MASK, REG_READ(ADMSW_INT_MASK) & 1046 ~(ADMSW_INTR_SHD | ADMSW_INTR_SLD | 1047 ADMSW_INTR_RHD | ADMSW_INTR_RLD | 1048 ADMSW_INTR_HDF | ADMSW_INTR_LDF)); 1049 } 1050 sc->ndevs++; 1051 } 1052 1053 /* Set the receive filter. */ 1054 admsw_set_filter(sc); 1055 1056 /* Mark iface as running */ 1057 ifp->if_flags |= IFF_RUNNING; 1058 1059 return 0; 1060 } 1061 1062 /* 1063 * admsw_stop: [ifnet interface function] 1064 * 1065 * Stop transmission on the interface. 1066 */ 1067 static void 1068 admsw_stop(struct ifnet *ifp, int disable) 1069 { 1070 struct admsw_softc *sc = ifp->if_softc; 1071 1072 /* printf("admsw_stop: %d\n", disable); */ 1073 1074 if (!(ifp->if_flags & IFF_RUNNING)) 1075 return; 1076 1077 if (--sc->ndevs == 0) { 1078 /* printf("debug: de-initializing hardware\n"); */ 1079 1080 /* Disable cpu port */ 1081 REG_WRITE(CPUP_CONF_REG, 1082 CPUP_CONF_DCPUP | CPUP_CONF_CRCP | 1083 CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK); 1084 1085 /* XXX We should disable, then clear? --dyoung */ 1086 /* Clear all pending interrupts */ 1087 REG_WRITE(ADMSW_INT_ST, INT_MASK); 1088 1089 /* Disable interrupts */ 1090 REG_WRITE(ADMSW_INT_MASK, INT_MASK); 1091 } 1092 1093 /* Mark the interface as down and cancel the watchdog timer. */ 1094 ifp->if_flags &= ~IFF_RUNNING; 1095 ifp->if_timer = 0; 1096 1097 return; 1098 } 1099 1100 /* 1101 * admsw_set_filter: 1102 * 1103 * Set up the receive filter. 1104 */ 1105 static void 1106 admsw_set_filter(struct admsw_softc *sc) 1107 { 1108 int i; 1109 uint32_t allmc, anymc, conf, promisc; 1110 struct ether_multi *enm; 1111 struct ethercom *ec; 1112 struct ifnet *ifp; 1113 struct ether_multistep step; 1114 1115 /* Find which ports should be operated in promisc mode. */ 1116 allmc = anymc = promisc = 0; 1117 for (i = 0; i < SW_DEVS; i++) { 1118 ec = &sc->sc_ethercom[i]; 1119 ifp = &ec->ec_if; 1120 if (ifp->if_flags & IFF_PROMISC) 1121 promisc |= vlan_matrix[i]; 1122 1123 ifp->if_flags &= ~IFF_ALLMULTI; 1124 1125 ETHER_LOCK(ec); 1126 ETHER_FIRST_MULTI(step, ec, enm); 1127 while (enm != NULL) { 1128 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1129 ETHER_ADDR_LEN) != 0) { 1130 printf("%s: punting on mcast range\n", 1131 __func__); 1132 ifp->if_flags |= IFF_ALLMULTI; 1133 allmc |= vlan_matrix[i]; 1134 break; 1135 } 1136 1137 anymc |= vlan_matrix[i]; 1138 1139 #if 0 1140 /* XXX extract subroutine --dyoung */ 1141 REG_WRITE(MAC_WT1_REG, 1142 enm->enm_addrlo[2] | 1143 (enm->enm_addrlo[3] << 8) | 1144 (enm->enm_addrlo[4] << 16) | 1145 (enm->enm_addrlo[5] << 24)); 1146 REG_WRITE(MAC_WT0_REG, 1147 (i << MAC_WT0_VLANID_SHIFT) | 1148 (enm->enm_addrlo[0] << 16) | 1149 (enm->enm_addrlo[1] << 24) | 1150 MAC_WT0_WRITE | MAC_WT0_VLANID_EN); 1151 /* Timeout? */ 1152 while (!(REG_READ(MAC_WT0_REG) & MAC_WT0_WRITE_DONE)) 1153 ; 1154 #endif 1155 1156 /* Load h/w with mcast address, port = CPU */ 1157 ETHER_NEXT_MULTI(step, enm); 1158 } 1159 ETHER_UNLOCK(ec); 1160 } 1161 1162 conf = REG_READ(CPUP_CONF_REG); 1163 /* 1 Disable forwarding of unknown & multicast packets to 1164 * CPU on all ports. 1165 * 2 Enable forwarding of unknown & multicast packets to 1166 * CPU on ports where IFF_PROMISC or IFF_ALLMULTI is set. 1167 */ 1168 conf |= CPUP_CONF_DUNP_MASK | CPUP_CONF_DMCP_MASK; 1169 /* Enable forwarding of unknown packets to CPU on selected ports. */ 1170 conf ^= ((promisc << CPUP_CONF_DUNP_SHIFT) & CPUP_CONF_DUNP_MASK); 1171 conf ^= ((allmc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1172 conf ^= ((anymc << CPUP_CONF_DMCP_SHIFT) & CPUP_CONF_DMCP_MASK); 1173 REG_WRITE(CPUP_CONF_REG, conf); 1174 } 1175 1176 /* 1177 * admsw_add_rxbuf: 1178 * 1179 * Add a receive buffer to the indicated descriptor. 1180 */ 1181 int 1182 admsw_add_rxbuf(struct admsw_softc *sc, int idx, int high) 1183 { 1184 struct admsw_descsoft *ds; 1185 struct mbuf *m; 1186 int error; 1187 1188 if (high) 1189 ds = &sc->sc_rxhsoft[idx]; 1190 else 1191 ds = &sc->sc_rxlsoft[idx]; 1192 1193 MGETHDR(m, M_DONTWAIT, MT_DATA); 1194 if (m == NULL) 1195 return ENOBUFS; 1196 1197 MCLGET(m, M_DONTWAIT); 1198 if ((m->m_flags & M_EXT) == 0) { 1199 m_freem(m); 1200 return ENOBUFS; 1201 } 1202 1203 if (ds->ds_mbuf != NULL) 1204 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1205 1206 ds->ds_mbuf = m; 1207 1208 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1209 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1210 BUS_DMA_READ | BUS_DMA_NOWAIT); 1211 if (error) { 1212 printf("%s: can't load rx DMA map %d, error = %d\n", 1213 device_xname(sc->sc_dev), idx, error); 1214 panic("admsw_add_rxbuf"); /* XXX */ 1215 } 1216 1217 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1218 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1219 1220 if (high) 1221 ADMSW_INIT_RXHDESC(sc, idx); 1222 else 1223 ADMSW_INIT_RXLDESC(sc, idx); 1224 1225 return 0; 1226 } 1227 1228 int 1229 admsw_mediachange(struct ifnet *ifp) 1230 { 1231 struct admsw_softc *sc = ifp->if_softc; 1232 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1233 struct ifmedia *ifm = &sc->sc_ifmedia[port]; 1234 int old, new, val; 1235 1236 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1237 return EINVAL; 1238 1239 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1240 val = PHY_CNTL2_AUTONEG | PHY_CNTL2_100M | PHY_CNTL2_FDX; 1241 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1242 if ((ifm->ifm_media & IFM_FDX) != 0) 1243 val = PHY_CNTL2_100M | PHY_CNTL2_FDX; 1244 else 1245 val = PHY_CNTL2_100M; 1246 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1247 if ((ifm->ifm_media & IFM_FDX) != 0) 1248 val = PHY_CNTL2_FDX; 1249 else 1250 val = 0; 1251 } else 1252 return EINVAL; 1253 1254 old = REG_READ(PHY_CNTL2_REG); 1255 new = old & ~((PHY_CNTL2_AUTONEG | PHY_CNTL2_100M | PHY_CNTL2_FDX) 1256 << port); 1257 new |= (val << port); 1258 1259 if (new != old) 1260 REG_WRITE(PHY_CNTL2_REG, new); 1261 1262 return 0; 1263 } 1264 1265 void 1266 admsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1267 { 1268 struct admsw_softc *sc = ifp->if_softc; 1269 int port = (struct ethercom *)ifp - sc->sc_ethercom; /* XXX */ 1270 int status; 1271 1272 ifmr->ifm_status = IFM_AVALID; 1273 ifmr->ifm_active = IFM_ETHER; 1274 1275 status = REG_READ(PHY_ST_REG) >> port; 1276 1277 if ((status & PHY_ST_LINKUP) == 0) { 1278 ifmr->ifm_active |= IFM_NONE; 1279 return; 1280 } 1281 1282 ifmr->ifm_status |= IFM_ACTIVE; 1283 ifmr->ifm_active |= (status & PHY_ST_100M) ? IFM_100_TX : IFM_10_T; 1284 if (status & PHY_ST_FDX) 1285 ifmr->ifm_active |= IFM_FDX; 1286 else 1287 ifmr->ifm_active |= IFM_HDX; 1288 } 1289