1 /* $NetBSD: if_mvxpe.c,v 1.19 2018/06/26 06:48:01 msaitoh Exp $ */ 2 /* 3 * Copyright (c) 2015 Internet Initiative Japan Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.19 2018/06/26 06:48:01 msaitoh Exp $"); 29 30 #include "opt_multiprocessor.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/callout.h> 35 #include <sys/device.h> 36 #include <sys/endian.h> 37 #include <sys/errno.h> 38 #include <sys/evcnt.h> 39 #include <sys/kernel.h> 40 #include <sys/kmem.h> 41 #include <sys/mutex.h> 42 #include <sys/sockio.h> 43 #include <sys/sysctl.h> 44 #include <sys/syslog.h> 45 #include <sys/rndsource.h> 46 47 #include <net/if.h> 48 #include <net/if_ether.h> 49 #include <net/if_media.h> 50 #include <net/bpf.h> 51 52 #include <netinet/in.h> 53 #include <netinet/in_systm.h> 54 #include <netinet/ip.h> 55 56 #include <dev/mii/mii.h> 57 #include <dev/mii/miivar.h> 58 59 #include <dev/marvell/marvellreg.h> 60 #include <dev/marvell/marvellvar.h> 61 #include <dev/marvell/mvxpbmvar.h> 62 #include <dev/marvell/if_mvxpereg.h> 63 #include <dev/marvell/if_mvxpevar.h> 64 65 #include "locators.h" 66 67 #if BYTE_ORDER == BIG_ENDIAN 68 #error "BIG ENDIAN not supported" 69 #endif 70 71 #ifdef MVXPE_DEBUG 72 #define STATIC /* nothing */ 73 #else 74 #define STATIC static 75 #endif 76 77 /* autoconf(9) */ 78 STATIC int mvxpe_match(device_t, struct cfdata *, void *); 79 STATIC void mvxpe_attach(device_t, device_t, void *); 80 STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *); 81 CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc), 82 mvxpe_match, mvxpe_attach, NULL, NULL); 83 STATIC void mvxpe_sc_lock(struct mvxpe_softc *); 84 STATIC void mvxpe_sc_unlock(struct mvxpe_softc *); 85 86 /* MII */ 87 STATIC int mvxpe_miibus_readreg(device_t, int, int); 88 STATIC void mvxpe_miibus_writereg(device_t, int, int, int); 89 STATIC void mvxpe_miibus_statchg(struct ifnet *); 90 91 /* Addres Decoding Window */ 92 STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *); 93 94 /* Device Register Initialization */ 95 STATIC int mvxpe_initreg(struct ifnet *); 96 97 /* Descriptor Ring Control for each of queues */ 98 STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t); 99 STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int); 100 STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int); 101 STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int); 102 STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int); 103 STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int); 104 STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int); 105 106 /* Rx/Tx Queue Control */ 107 STATIC int mvxpe_rx_queue_init(struct ifnet *, int); 108 STATIC int mvxpe_tx_queue_init(struct ifnet *, int); 109 STATIC int mvxpe_rx_queue_enable(struct ifnet *, int); 110 STATIC int mvxpe_tx_queue_enable(struct ifnet *, int); 111 STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int); 112 STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int); 113 STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int); 114 STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int); 115 116 /* Interrupt Handlers */ 117 STATIC void mvxpe_disable_intr(struct mvxpe_softc *); 118 STATIC void mvxpe_enable_intr(struct mvxpe_softc *); 119 STATIC int mvxpe_rxtxth_intr(void *); 120 STATIC int mvxpe_misc_intr(void *); 121 STATIC int mvxpe_rxtx_intr(void *); 122 STATIC void mvxpe_tick(void *); 123 124 /* struct ifnet and mii callbacks*/ 125 STATIC void mvxpe_start(struct ifnet *); 126 STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *); 127 STATIC int mvxpe_init(struct ifnet *); 128 STATIC void mvxpe_stop(struct ifnet *, int); 129 STATIC void mvxpe_watchdog(struct ifnet *); 130 STATIC int mvxpe_ifflags_cb(struct ethercom *); 131 STATIC int mvxpe_mediachange(struct ifnet *); 132 STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *); 133 134 /* Link State Notify */ 135 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc); 136 STATIC void mvxpe_linkup(struct mvxpe_softc *); 137 STATIC void mvxpe_linkdown(struct mvxpe_softc *); 138 STATIC void mvxpe_linkreset(struct mvxpe_softc *); 139 140 /* Tx Subroutines */ 141 STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *); 142 STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int); 143 STATIC void mvxpe_tx_set_csumflag(struct ifnet *, 144 struct mvxpe_tx_desc *, struct mbuf *); 145 STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t); 146 STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int); 147 148 /* Rx Subroutines */ 149 STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t); 150 STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int); 151 STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *); 152 STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t); 153 STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int); 154 STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int); 155 STATIC void mvxpe_rx_set_csumflag(struct ifnet *, 156 struct mvxpe_rx_desc *, struct mbuf *); 157 158 /* MAC address filter */ 159 STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t); 160 STATIC void mvxpe_filter_setup(struct mvxpe_softc *); 161 162 /* sysctl(9) */ 163 STATIC int sysctl_read_mib(SYSCTLFN_PROTO); 164 STATIC int sysctl_clear_mib(SYSCTLFN_PROTO); 165 STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO); 166 STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO); 167 STATIC void sysctl_mvxpe_init(struct mvxpe_softc *); 168 169 /* MIB */ 170 STATIC void mvxpe_clear_mib(struct mvxpe_softc *); 171 STATIC void mvxpe_update_mib(struct mvxpe_softc *); 172 173 /* for Debug */ 174 STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__)); 175 STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__)); 176 177 STATIC int mvxpe_root_num; 178 STATIC kmutex_t mii_mutex; 179 STATIC int mii_init = 0; 180 #ifdef MVXPE_DEBUG 181 STATIC int mvxpe_debug = MVXPE_DEBUG; 182 #endif 183 184 /* 185 * List of MIB register and names 186 */ 187 STATIC struct mvxpe_mib_def { 188 uint32_t regnum; 189 int reg64; 190 const char *sysctl_name; 191 const char *desc; 192 int ext; 193 #define MVXPE_MIBEXT_IF_OERRORS 1 194 #define MVXPE_MIBEXT_IF_IERRORS 2 195 #define MVXPE_MIBEXT_IF_COLLISIONS 3 196 } mvxpe_mib_list[] = { 197 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct", 198 "Good Octets Rx", 0}, 199 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct", 200 "Bad Octets Rx", 0}, 201 {MVXPE_MIB_TX_MAC_TRNS_ERR, 0, "tx_mac_err", 202 "MAC Transmit Error", MVXPE_MIBEXT_IF_OERRORS}, 203 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame", 204 "Good Frames Rx", 0}, 205 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame", 206 "Bad Frames Rx", 0}, 207 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame", 208 "Broadcast Frames Rx", 0}, 209 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame", 210 "Multicast Frames Rx", 0}, 211 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64", 212 "Frame Size 1 - 64", 0}, 213 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127", 214 "Frame Size 65 - 127", 0}, 215 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255", 216 "Frame Size 128 - 255", 0}, 217 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511", 218 "Frame Size 256 - 511"}, 219 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023", 220 "Frame Size 512 - 1023", 0}, 221 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_fame_1024_max", 222 "Frame Size 1024 - Max", 0}, 223 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct", 224 "Good Octets Tx", 0}, 225 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame", 226 "Good Frames Tx", 0}, 227 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision", 228 "Excessive Collision", MVXPE_MIBEXT_IF_OERRORS}, 229 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame", 230 "Multicast Frames Tx"}, 231 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame", 232 "Broadcast Frames Tx"}, 233 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err", 234 "Unknown MAC Control", 0}, 235 {MVXPE_MIB_FC_SENT, 0, "fc_tx", 236 "Flow Control Tx", 0}, 237 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good", 238 "Good Flow Control Rx", 0}, 239 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad", 240 "Bad Flow Control Rx", 0}, 241 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize", 242 "Undersized Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 243 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment", 244 "Fragmented Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 245 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize", 246 "Oversized Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 247 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber", 248 "Jabber Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 249 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err", 250 "MAC Rx Errors", MVXPE_MIBEXT_IF_IERRORS}, 251 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err", 252 "MAC CRC Errors", MVXPE_MIBEXT_IF_IERRORS}, 253 {MVXPE_MIB_MAC_COL, 0, "mac_collision", 254 "MAC Collision", MVXPE_MIBEXT_IF_COLLISIONS}, 255 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision", 256 "MAC Late Collision", MVXPE_MIBEXT_IF_OERRORS}, 257 }; 258 259 /* 260 * autoconf(9) 261 */ 262 /* ARGSUSED */ 263 STATIC int 264 mvxpe_match(device_t parent, cfdata_t match, void *aux) 265 { 266 struct marvell_attach_args *mva = aux; 267 bus_size_t pv_off; 268 uint32_t pv; 269 270 if (strcmp(mva->mva_name, match->cf_name) != 0) 271 return 0; 272 if (mva->mva_offset == MVA_OFFSET_DEFAULT) 273 return 0; 274 275 /* check port version */ 276 pv_off = mva->mva_offset + MVXPE_PV; 277 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off); 278 if (MVXPE_PV_GET_VERSION(pv) < 0x10) 279 return 0; /* old version is not supported */ 280 281 return 1; 282 } 283 284 /* ARGSUSED */ 285 STATIC void 286 mvxpe_attach(device_t parent, device_t self, void *aux) 287 { 288 struct mvxpe_softc *sc = device_private(self); 289 struct mii_softc *mii; 290 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 291 struct marvell_attach_args *mva = aux; 292 prop_dictionary_t dict; 293 prop_data_t enaddrp = NULL; 294 uint32_t phyaddr, maddrh, maddrl; 295 uint8_t enaddr[ETHER_ADDR_LEN]; 296 int q; 297 298 aprint_naive("\n"); 299 aprint_normal(": Marvell ARMADA GbE Controller\n"); 300 memset(sc, 0, sizeof(*sc)); 301 sc->sc_dev = self; 302 sc->sc_port = mva->mva_unit; 303 sc->sc_iot = mva->mva_iot; 304 sc->sc_dmat = mva->mva_dmat; 305 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 306 callout_init(&sc->sc_tick_ch, 0); 307 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc); 308 309 /* 310 * BUS space 311 */ 312 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 313 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) { 314 aprint_error_dev(self, "Cannot map registers\n"); 315 goto fail; 316 } 317 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 318 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE, 319 &sc->sc_mibh)) { 320 aprint_error_dev(self, 321 "Cannot map destination address filter registers\n"); 322 goto fail; 323 } 324 sc->sc_version = MVXPE_READ(sc, MVXPE_PV); 325 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version); 326 327 /* 328 * Buffer Manager(BM) subsystem. 329 */ 330 sc->sc_bm = mvxpbm_device(mva); 331 if (sc->sc_bm == NULL) { 332 aprint_error_dev(self, "no Buffer Manager.\n"); 333 goto fail; 334 } 335 aprint_normal_dev(self, 336 "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm)); 337 aprint_normal_dev(sc->sc_dev, 338 "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n", 339 mvxpbm_buf_size(sc->sc_bm) / 1024, 340 mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm)); 341 342 /* 343 * make sure DMA engines are in reset state 344 */ 345 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001); 346 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001); 347 348 /* 349 * Address decoding window 350 */ 351 mvxpe_wininit(sc, mva->mva_tags); 352 353 /* 354 * MAC address 355 */ 356 dict = device_properties(self); 357 if (dict) 358 enaddrp = prop_dictionary_get(dict, "mac-address"); 359 if (enaddrp) { 360 memcpy(enaddr, prop_data_data_nocopy(enaddrp), ETHER_ADDR_LEN); 361 maddrh = enaddr[0] << 24; 362 maddrh |= enaddr[1] << 16; 363 maddrh |= enaddr[2] << 8; 364 maddrh |= enaddr[3]; 365 maddrl = enaddr[4] << 8; 366 maddrl |= enaddr[5]; 367 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh); 368 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl); 369 } 370 else { 371 /* 372 * even if enaddr is not found in dictionary, 373 * the port may be initialized by IPL program such as U-BOOT. 374 */ 375 maddrh = MVXPE_READ(sc, MVXPE_MACAH); 376 maddrl = MVXPE_READ(sc, MVXPE_MACAL); 377 if ((maddrh | maddrl) == 0) { 378 aprint_error_dev(self, "No Ethernet address\n"); 379 return; 380 } 381 } 382 sc->sc_enaddr[0] = maddrh >> 24; 383 sc->sc_enaddr[1] = maddrh >> 16; 384 sc->sc_enaddr[2] = maddrh >> 8; 385 sc->sc_enaddr[3] = maddrh >> 0; 386 sc->sc_enaddr[4] = maddrl >> 8; 387 sc->sc_enaddr[5] = maddrl >> 0; 388 aprint_normal_dev(self, "Ethernet address %s\n", 389 ether_sprintf(sc->sc_enaddr)); 390 391 /* 392 * Register interrupt handlers 393 * XXX: handle Ethernet unit intr. and Error intr. 394 */ 395 mvxpe_disable_intr(sc); 396 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc); 397 398 /* 399 * MIB buffer allocation 400 */ 401 sc->sc_sysctl_mib_size = 402 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib); 403 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_NOSLEEP); 404 if (sc->sc_sysctl_mib == NULL) 405 goto fail; 406 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size); 407 408 /* 409 * Device DMA Buffer allocation 410 */ 411 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 412 if (mvxpe_ring_alloc_queue(sc, q) != 0) 413 goto fail; 414 mvxpe_ring_init_queue(sc, q); 415 } 416 417 /* 418 * We can support 802.1Q VLAN-sized frames and jumbo 419 * Ethernet frames. 420 */ 421 sc->sc_ethercom.ec_capabilities |= 422 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 423 ifp->if_softc = sc; 424 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 425 ifp->if_start = mvxpe_start; 426 ifp->if_ioctl = mvxpe_ioctl; 427 ifp->if_init = mvxpe_init; 428 ifp->if_stop = mvxpe_stop; 429 ifp->if_watchdog = mvxpe_watchdog; 430 431 /* 432 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware. 433 */ 434 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx; 435 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx; 436 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx; 437 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx; 438 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx; 439 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx; 440 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx; 441 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx; 442 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx; 443 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx; 444 445 /* 446 * Initialize struct ifnet 447 */ 448 IFQ_SET_MAXLEN(&ifp->if_snd, max(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN)); 449 IFQ_SET_READY(&ifp->if_snd); 450 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname)); 451 452 /* 453 * Enable DMA engines and Initiazlie Device Registers. 454 */ 455 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000); 456 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000); 457 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM); 458 mvxpe_sc_lock(sc); /* XXX */ 459 mvxpe_filter_setup(sc); 460 mvxpe_sc_unlock(sc); 461 mvxpe_initreg(ifp); 462 463 /* 464 * Now MAC is working, setup MII. 465 */ 466 if (mii_init == 0) { 467 /* 468 * MII bus is shared by all MACs and all PHYs in SoC. 469 * serializing the bus access should be safe. 470 */ 471 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET); 472 mii_init = 1; 473 } 474 sc->sc_mii.mii_ifp = ifp; 475 sc->sc_mii.mii_readreg = mvxpe_miibus_readreg; 476 sc->sc_mii.mii_writereg = mvxpe_miibus_writereg; 477 sc->sc_mii.mii_statchg = mvxpe_miibus_statchg; 478 479 sc->sc_ethercom.ec_mii = &sc->sc_mii; 480 ifmedia_init(&sc->sc_mii.mii_media, 0, 481 mvxpe_mediachange, mvxpe_mediastatus); 482 /* 483 * XXX: phy addressing highly depends on Board Design. 484 * we assume phyaddress == MAC unit number here, 485 * but some boards may not. 486 */ 487 mii_attach(self, &sc->sc_mii, 0xffffffff, 488 MII_PHY_ANY, sc->sc_dev->dv_unit, 0); 489 mii = LIST_FIRST(&sc->sc_mii.mii_phys); 490 if (mii == NULL) { 491 aprint_error_dev(self, "no PHY found!\n"); 492 ifmedia_add(&sc->sc_mii.mii_media, 493 IFM_ETHER|IFM_MANUAL, 0, NULL); 494 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 495 } else { 496 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 497 phyaddr = MVXPE_PHYADDR_PHYAD(mii->mii_phy); 498 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr); 499 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR)); 500 } 501 502 /* 503 * Call MI attach routines. 504 */ 505 if_attach(ifp); 506 if_deferred_start_init(ifp, NULL); 507 508 ether_ifattach(ifp, sc->sc_enaddr); 509 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb); 510 511 sysctl_mvxpe_init(sc); 512 mvxpe_evcnt_attach(sc); 513 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 514 RND_TYPE_NET, RND_FLAG_DEFAULT); 515 516 return; 517 518 fail: 519 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) 520 mvxpe_ring_dealloc_queue(sc, q); 521 if (sc->sc_sysctl_mib) 522 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size); 523 524 return; 525 } 526 527 STATIC int 528 mvxpe_evcnt_attach(struct mvxpe_softc *sc) 529 { 530 #ifdef MVXPE_EVENT_COUNTERS 531 int q; 532 533 /* Master Interrupt Handler */ 534 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR, 535 NULL, device_xname(sc->sc_dev), "RxTxTH Intr."); 536 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR, 537 NULL, device_xname(sc->sc_dev), "RxTx Intr."); 538 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR, 539 NULL, device_xname(sc->sc_dev), "MISC Intr."); 540 541 /* RXTXTH Interrupt */ 542 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR, 543 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary"); 544 545 /* MISC Interrupt */ 546 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR, 547 NULL, device_xname(sc->sc_dev), "MISC phy status changed"); 548 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR, 549 NULL, device_xname(sc->sc_dev), "MISC link status changed"); 550 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR, 551 NULL, device_xname(sc->sc_dev), "MISC internal address error"); 552 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR, 553 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun"); 554 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR, 555 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error"); 556 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR, 557 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame"); 558 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR, 559 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun"); 560 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR, 561 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err"); 562 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR, 563 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error"); 564 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR, 565 NULL, device_xname(sc->sc_dev), "MISC Tx resource erorr"); 566 567 /* RxTx Interrupt */ 568 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR, 569 NULL, device_xname(sc->sc_dev), "RxTx Rx resource erorr"); 570 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR, 571 NULL, device_xname(sc->sc_dev), "RxTx Rx pakcet"); 572 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR, 573 NULL, device_xname(sc->sc_dev), "RxTx Tx complete"); 574 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR, 575 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary"); 576 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR, 577 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary"); 578 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR, 579 NULL, device_xname(sc->sc_dev), "RxTx MISC summary"); 580 581 /* Link */ 582 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC, 583 NULL, device_xname(sc->sc_dev), "link up"); 584 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC, 585 NULL, device_xname(sc->sc_dev), "link down"); 586 587 /* Rx Descriptor */ 588 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC, 589 NULL, device_xname(sc->sc_dev), "Rx CRC error counter"); 590 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC, 591 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter"); 592 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC, 593 NULL, device_xname(sc->sc_dev), "Rx too large frame counter"); 594 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC, 595 NULL, device_xname(sc->sc_dev), "Rx resource error counter"); 596 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC, 597 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs"); 598 599 /* Tx Descriptor */ 600 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC, 601 NULL, device_xname(sc->sc_dev), "Tx late collision counter"); 602 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC, 603 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter"); 604 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC, 605 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter"); 606 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC, 607 NULL, device_xname(sc->sc_dev), "Tx unkonwn erorr counter"); 608 609 /* Status Registers */ 610 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC, 611 NULL, device_xname(sc->sc_dev), "Rx discard counter"); 612 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC, 613 NULL, device_xname(sc->sc_dev), "Rx overrun counter"); 614 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC, 615 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter"); 616 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC, 617 NULL, device_xname(sc->sc_dev), "Tx dorpped counter"); 618 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC, 619 NULL, device_xname(sc->sc_dev), "LP_IDLE counter"); 620 621 /* Device Driver Errors */ 622 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC, 623 NULL, device_xname(sc->sc_dev), "watchdog timer expired"); 624 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC, 625 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed"); 626 #define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q 627 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 628 static const char *rxq_desc[] = { 629 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 630 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 631 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 632 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 633 }; 634 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC, 635 NULL, device_xname(sc->sc_dev), rxq_desc[q]); 636 } 637 #undef MVXPE_QUEUE_DESC 638 #define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q 639 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 640 static const char *txq_desc[] = { 641 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 642 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 643 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 644 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 645 }; 646 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC, 647 NULL, device_xname(sc->sc_dev), txq_desc[q]); 648 } 649 #undef MVXPE_QUEUE_DESC 650 #define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q 651 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 652 static const char *rxqe_desc[] = { 653 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 654 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 655 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 656 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 657 }; 658 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC, 659 NULL, device_xname(sc->sc_dev), rxqe_desc[q]); 660 } 661 #undef MVXPE_QUEUE_DESC 662 #define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q 663 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 664 static const char *txqe_desc[] = { 665 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 666 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 667 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 668 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 669 }; 670 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC, 671 NULL, device_xname(sc->sc_dev), txqe_desc[q]); 672 } 673 #undef MVXPE_QUEUE_DESC 674 675 #endif /* MVXPE_EVENT_COUNTERS */ 676 return 0; 677 } 678 679 STATIC void 680 mvxpe_sc_lock(struct mvxpe_softc *sc) 681 { 682 mutex_enter(&sc->sc_mtx); 683 } 684 685 STATIC void 686 mvxpe_sc_unlock(struct mvxpe_softc *sc) 687 { 688 mutex_exit(&sc->sc_mtx); 689 } 690 691 /* 692 * MII 693 */ 694 STATIC int 695 mvxpe_miibus_readreg(device_t dev, int phy, int reg) 696 { 697 struct mvxpe_softc *sc = device_private(dev); 698 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 699 uint32_t smi, val; 700 int i; 701 702 mutex_enter(&mii_mutex); 703 704 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 705 DELAY(1); 706 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 707 break; 708 } 709 if (i == MVXPE_PHY_TIMEOUT) { 710 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 711 mutex_exit(&mii_mutex); 712 return -1; 713 } 714 715 smi = 716 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ; 717 MVXPE_WRITE(sc, MVXPE_SMI, smi); 718 719 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 720 DELAY(1); 721 smi = MVXPE_READ(sc, MVXPE_SMI); 722 if (smi & MVXPE_SMI_READVALID) 723 break; 724 } 725 726 mutex_exit(&mii_mutex); 727 728 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT); 729 730 val = smi & MVXPE_SMI_DATA_MASK; 731 732 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#x\n", phy, reg, val); 733 734 return val; 735 } 736 737 STATIC void 738 mvxpe_miibus_writereg(device_t dev, int phy, int reg, int val) 739 { 740 struct mvxpe_softc *sc = device_private(dev); 741 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 742 uint32_t smi; 743 int i; 744 745 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#x\n", phy, reg, val); 746 747 mutex_enter(&mii_mutex); 748 749 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 750 DELAY(1); 751 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 752 break; 753 } 754 if (i == MVXPE_PHY_TIMEOUT) { 755 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 756 mutex_exit(&mii_mutex); 757 return; 758 } 759 760 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | 761 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK); 762 MVXPE_WRITE(sc, MVXPE_SMI, smi); 763 764 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 765 DELAY(1); 766 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 767 break; 768 } 769 770 mutex_exit(&mii_mutex); 771 772 if (i == MVXPE_PHY_TIMEOUT) 773 aprint_error_ifnet(ifp, "phy write timed out\n"); 774 } 775 776 STATIC void 777 mvxpe_miibus_statchg(struct ifnet *ifp) 778 { 779 780 /* nothing to do */ 781 } 782 783 /* 784 * Address Decoding Window 785 */ 786 STATIC void 787 mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags) 788 { 789 device_t pdev = device_parent(sc->sc_dev); 790 uint64_t base; 791 uint32_t en, ac, size; 792 int window, target, attr, rv, i; 793 794 /* First disable all address decode windows */ 795 en = MVXPE_BARE_EN_MASK; 796 MVXPE_WRITE(sc, MVXPE_BARE, en); 797 798 ac = 0; 799 for (window = 0, i = 0; 800 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) { 801 rv = marvell_winparams_by_tag(pdev, tags[i], 802 &target, &attr, &base, &size); 803 if (rv != 0 || size == 0) 804 continue; 805 806 if (base > 0xffffffffULL) { 807 if (window >= MVXPE_NREMAP) { 808 aprint_error_dev(sc->sc_dev, 809 "can't remap window %d\n", window); 810 continue; 811 } 812 MVXPE_WRITE(sc, MVXPE_HA(window), 813 (base >> 32) & 0xffffffff); 814 } 815 816 MVXPE_WRITE(sc, MVXPE_BASEADDR(window), 817 MVXPE_BASEADDR_TARGET(target) | 818 MVXPE_BASEADDR_ATTR(attr) | 819 MVXPE_BASEADDR_BASE(base)); 820 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size)); 821 822 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n", 823 window, base, size); 824 825 en &= ~(1 << window); 826 /* set full access (r/w) */ 827 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA); 828 window++; 829 } 830 /* allow to access decode window */ 831 MVXPE_WRITE(sc, MVXPE_EPAP, ac); 832 833 MVXPE_WRITE(sc, MVXPE_BARE, en); 834 } 835 836 /* 837 * Device Register Initialization 838 * reset device registers to device driver default value. 839 * the device is not enabled here. 840 */ 841 STATIC int 842 mvxpe_initreg(struct ifnet *ifp) 843 { 844 struct mvxpe_softc *sc = ifp->if_softc; 845 int serdes = 0; 846 uint32_t reg; 847 int q, i; 848 849 DPRINTIFNET(ifp, 1, "initializing device register\n"); 850 851 /* Init TX/RX Queue Registers */ 852 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 853 mvxpe_rx_lockq(sc, q); 854 if (mvxpe_rx_queue_init(ifp, q) != 0) { 855 aprint_error_ifnet(ifp, 856 "initialization failed: cannot initialize queue\n"); 857 mvxpe_rx_unlockq(sc, q); 858 return ENOBUFS; 859 } 860 mvxpe_rx_unlockq(sc, q); 861 862 mvxpe_tx_lockq(sc, q); 863 if (mvxpe_tx_queue_init(ifp, q) != 0) { 864 aprint_error_ifnet(ifp, 865 "initialization failed: cannot initialize queue\n"); 866 mvxpe_tx_unlockq(sc, q); 867 return ENOBUFS; 868 } 869 mvxpe_tx_unlockq(sc, q); 870 } 871 872 /* Tx MTU Limit */ 873 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU); 874 875 /* Check SGMII or SERDES(asume IPL/U-BOOT initialize this) */ 876 reg = MVXPE_READ(sc, MVXPE_PMACC0); 877 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0) 878 serdes = 1; 879 880 /* Ethernet Unit Control */ 881 reg = MVXPE_READ(sc, MVXPE_EUC); 882 reg |= MVXPE_EUC_POLLING; 883 MVXPE_WRITE(sc, MVXPE_EUC, reg); 884 885 /* Auto Negotiation */ 886 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */ 887 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */ 888 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */ 889 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */ 890 if (serdes) { 891 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */ 892 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */ 893 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */ 894 } 895 MVXPE_WRITE(sc, MVXPE_PANC, reg); 896 897 /* EEE: Low Power Idle */ 898 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI); 899 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS); 900 MVXPE_WRITE(sc, MVXPE_LPIC0, reg); 901 902 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS); 903 MVXPE_WRITE(sc, MVXPE_LPIC1, reg); 904 905 reg = MVXPE_LPIC2_MUSTSET; 906 MVXPE_WRITE(sc, MVXPE_LPIC2, reg); 907 908 /* Port MAC Control set 0 */ 909 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */ 910 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */ 911 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU); 912 if (serdes) 913 reg |= MVXPE_PMACC0_PORTTYPE; 914 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 915 916 /* Port MAC Control set 1 is only used for loop-back test */ 917 918 /* Port MAC Control set 2 */ 919 reg = MVXPE_READ(sc, MVXPE_PMACC2); 920 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN); 921 reg |= MVXPE_PMACC2_MUSTSET; 922 MVXPE_WRITE(sc, MVXPE_PMACC2, reg); 923 924 /* Port MAC Control set 3 is used for IPG tune */ 925 926 /* Port MAC Control set 4 is not used */ 927 928 /* Port Configuration */ 929 /* Use queue 0 only */ 930 reg = MVXPE_READ(sc, MVXPE_PXC); 931 reg &= ~(MVXPE_PXC_RXQ_MASK | MVXPE_PXC_RXQARP_MASK | 932 MVXPE_PXC_TCPQ_MASK | MVXPE_PXC_UDPQ_MASK | MVXPE_PXC_BPDUQ_MASK); 933 MVXPE_WRITE(sc, MVXPE_PXC, reg); 934 935 /* Port Configuration Extended: enable Tx CRC generation */ 936 reg = MVXPE_READ(sc, MVXPE_PXCX); 937 reg &= ~MVXPE_PXCX_TXCRCDIS; 938 MVXPE_WRITE(sc, MVXPE_PXCX, reg); 939 940 /* clear MIB counter registers(clear by read) */ 941 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) 942 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum)); 943 944 /* Set SDC register except IPGINT bits */ 945 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS; 946 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS; 947 reg |= MVXPE_SDC_BLMR; 948 reg |= MVXPE_SDC_BLMT; 949 MVXPE_WRITE(sc, MVXPE_SDC, reg); 950 951 return 0; 952 } 953 954 /* 955 * Descriptor Ring Controls for each of queues 956 */ 957 STATIC void * 958 mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size) 959 { 960 bus_dma_segment_t segs; 961 void *kva = NULL; 962 int nsegs; 963 964 /* 965 * Allocate the descriptor queues. 966 * struct mvxpe_ring_data contians array of descriptor per queue. 967 */ 968 if (bus_dmamem_alloc(sc->sc_dmat, 969 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) { 970 aprint_error_dev(sc->sc_dev, 971 "can't alloc device memory (%zu bytes)\n", size); 972 return NULL; 973 } 974 if (bus_dmamem_map(sc->sc_dmat, 975 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) { 976 aprint_error_dev(sc->sc_dev, 977 "can't map dma buffers (%zu bytes)\n", size); 978 goto fail1; 979 } 980 981 if (bus_dmamap_create(sc->sc_dmat, 982 size, 1, size, 0, BUS_DMA_NOWAIT, map)) { 983 aprint_error_dev(sc->sc_dev, "can't create dma map\n"); 984 goto fail2; 985 } 986 if (bus_dmamap_load(sc->sc_dmat, 987 *map, kva, size, NULL, BUS_DMA_NOWAIT)) { 988 aprint_error_dev(sc->sc_dev, "can't load dma map\n"); 989 goto fail3; 990 } 991 memset(kva, 0, size); 992 return kva; 993 994 fail3: 995 bus_dmamap_destroy(sc->sc_dmat, *map); 996 memset(map, 0, sizeof(*map)); 997 fail2: 998 bus_dmamem_unmap(sc->sc_dmat, kva, size); 999 fail1: 1000 bus_dmamem_free(sc->sc_dmat, &segs, nsegs); 1001 return NULL; 1002 } 1003 1004 STATIC int 1005 mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q) 1006 { 1007 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1008 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1009 1010 /* 1011 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of 1012 * queue length. real queue length is limited by 1013 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len. 1014 * 1015 * because descriptor ring reallocation needs reprogramming of 1016 * DMA registers, we allocate enough descriptor for hard limit 1017 * of queue length. 1018 */ 1019 rx->rx_descriptors = 1020 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map, 1021 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT)); 1022 if (rx->rx_descriptors == NULL) 1023 goto fail; 1024 1025 tx->tx_descriptors = 1026 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map, 1027 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT)); 1028 if (tx->tx_descriptors == NULL) 1029 goto fail; 1030 1031 return 0; 1032 fail: 1033 mvxpe_ring_dealloc_queue(sc, q); 1034 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n"); 1035 return ENOMEM; 1036 } 1037 1038 STATIC void 1039 mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q) 1040 { 1041 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1042 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1043 bus_dma_segment_t *segs; 1044 bus_size_t size; 1045 void *kva; 1046 int nsegs; 1047 1048 /* Rx */ 1049 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q); 1050 if (kva) { 1051 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs; 1052 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs; 1053 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize; 1054 1055 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q)); 1056 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q)); 1057 bus_dmamem_unmap(sc->sc_dmat, kva, size); 1058 bus_dmamem_free(sc->sc_dmat, segs, nsegs); 1059 } 1060 1061 /* Tx */ 1062 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q); 1063 if (kva) { 1064 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs; 1065 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs; 1066 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize; 1067 1068 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q)); 1069 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q)); 1070 bus_dmamem_unmap(sc->sc_dmat, kva, size); 1071 bus_dmamem_free(sc->sc_dmat, segs, nsegs); 1072 } 1073 1074 /* Clear doungling pointers all */ 1075 memset(rx, 0, sizeof(*rx)); 1076 memset(tx, 0, sizeof(*tx)); 1077 } 1078 1079 STATIC void 1080 mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q) 1081 { 1082 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q); 1083 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q); 1084 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1085 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1086 static const int rx_default_queue_len[] = { 1087 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1, 1088 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3, 1089 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5, 1090 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7, 1091 }; 1092 static const int tx_default_queue_len[] = { 1093 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1, 1094 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3, 1095 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5, 1096 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7, 1097 }; 1098 extern uint32_t mvTclk; 1099 int i; 1100 1101 /* Rx handle */ 1102 for (i = 0; i < MVXPE_RX_RING_CNT; i++) { 1103 MVXPE_RX_DESC(sc, q, i) = &rxd[i]; 1104 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i; 1105 MVXPE_RX_PKTBUF(sc, q, i) = NULL; 1106 } 1107 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET); 1108 rx->rx_dma = rx->rx_cpu = 0; 1109 rx->rx_queue_len = rx_default_queue_len[q]; 1110 if (rx->rx_queue_len > MVXPE_RX_RING_CNT) 1111 rx->rx_queue_len = MVXPE_RX_RING_CNT; 1112 rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO; 1113 rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO; 1114 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */ 1115 1116 /* Tx handle */ 1117 for (i = 0; i < MVXPE_TX_RING_CNT; i++) { 1118 MVXPE_TX_DESC(sc, q, i) = &txd[i]; 1119 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i; 1120 MVXPE_TX_MBUF(sc, q, i) = NULL; 1121 /* Tx handle needs DMA map for busdma_load_mbuf() */ 1122 if (bus_dmamap_create(sc->sc_dmat, 1123 mvxpbm_chunk_size(sc->sc_bm), 1124 MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0, 1125 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, 1126 &MVXPE_TX_MAP(sc, q, i))) { 1127 aprint_error_dev(sc->sc_dev, 1128 "can't create dma map (tx ring %d)\n", i); 1129 } 1130 } 1131 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET); 1132 tx->tx_dma = tx->tx_cpu = 0; 1133 tx->tx_queue_len = tx_default_queue_len[q]; 1134 if (tx->tx_queue_len > MVXPE_TX_RING_CNT) 1135 tx->tx_queue_len = MVXPE_TX_RING_CNT; 1136 tx->tx_used = 0; 1137 tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO; 1138 } 1139 1140 STATIC void 1141 mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q) 1142 { 1143 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1144 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1145 struct mbuf *m; 1146 int i; 1147 1148 KASSERT_RX_MTX(sc, q); 1149 KASSERT_TX_MTX(sc, q); 1150 1151 /* Rx handle */ 1152 for (i = 0; i < MVXPE_RX_RING_CNT; i++) { 1153 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL) 1154 continue; 1155 mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i)); 1156 MVXPE_RX_PKTBUF(sc, q, i) = NULL; 1157 } 1158 rx->rx_dma = rx->rx_cpu = 0; 1159 1160 /* Tx handle */ 1161 for (i = 0; i < MVXPE_TX_RING_CNT; i++) { 1162 m = MVXPE_TX_MBUF(sc, q, i); 1163 if (m == NULL) 1164 continue; 1165 MVXPE_TX_MBUF(sc, q, i) = NULL; 1166 bus_dmamap_sync(sc->sc_dmat, 1167 MVXPE_TX_MAP(sc, q, i), 0, m->m_pkthdr.len, 1168 BUS_DMASYNC_POSTWRITE); 1169 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i)); 1170 m_freem(m); 1171 } 1172 tx->tx_dma = tx->tx_cpu = 0; 1173 tx->tx_used = 0; 1174 } 1175 1176 STATIC void 1177 mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops) 1178 { 1179 int wrap; 1180 1181 KASSERT_RX_MTX(sc, q); 1182 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT); 1183 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT); 1184 1185 wrap = (idx + count) - MVXPE_RX_RING_CNT; 1186 if (wrap > 0) { 1187 count -= wrap; 1188 KASSERT(count > 0); 1189 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q), 1190 0, sizeof(struct mvxpe_rx_desc) * wrap, ops); 1191 } 1192 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q), 1193 MVXPE_RX_DESC_OFF(sc, q, idx), 1194 sizeof(struct mvxpe_rx_desc) * count, ops); 1195 } 1196 1197 STATIC void 1198 mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops) 1199 { 1200 int wrap = 0; 1201 1202 KASSERT_TX_MTX(sc, q); 1203 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT); 1204 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT); 1205 1206 wrap = (idx + count) - MVXPE_TX_RING_CNT; 1207 if (wrap > 0) { 1208 count -= wrap; 1209 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q), 1210 0, sizeof(struct mvxpe_tx_desc) * wrap, ops); 1211 } 1212 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q), 1213 MVXPE_TX_DESC_OFF(sc, q, idx), 1214 sizeof(struct mvxpe_tx_desc) * count, ops); 1215 } 1216 1217 /* 1218 * Rx/Tx Queue Control 1219 */ 1220 STATIC int 1221 mvxpe_rx_queue_init(struct ifnet *ifp, int q) 1222 { 1223 struct mvxpe_softc *sc = ifp->if_softc; 1224 uint32_t reg; 1225 1226 KASSERT_RX_MTX(sc, q); 1227 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0); 1228 1229 /* descriptor address */ 1230 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q)); 1231 1232 /* Rx buffer size and descriptor ring size */ 1233 reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3); 1234 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT); 1235 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg); 1236 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n", 1237 q, MVXPE_READ(sc, MVXPE_PRXDQS(q))); 1238 1239 /* Rx packet offset address */ 1240 reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3); 1241 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg); 1242 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n", 1243 q, MVXPE_READ(sc, MVXPE_PRXC(q))); 1244 1245 /* Rx DMA SNOOP */ 1246 reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU); 1247 reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU); 1248 MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg); 1249 1250 /* if DMA is not working, register is not updated */ 1251 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q)); 1252 return 0; 1253 } 1254 1255 STATIC int 1256 mvxpe_tx_queue_init(struct ifnet *ifp, int q) 1257 { 1258 struct mvxpe_softc *sc = ifp->if_softc; 1259 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1260 uint32_t reg; 1261 1262 KASSERT_TX_MTX(sc, q); 1263 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0); 1264 1265 /* descriptor address */ 1266 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q)); 1267 1268 /* Tx threshold, and descriptor ring size */ 1269 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 1270 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT); 1271 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg); 1272 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n", 1273 q, MVXPE_READ(sc, MVXPE_PTXDQS(q))); 1274 1275 /* if DMA is not working, register is not updated */ 1276 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q)); 1277 return 0; 1278 } 1279 1280 STATIC int 1281 mvxpe_rx_queue_enable(struct ifnet *ifp, int q) 1282 { 1283 struct mvxpe_softc *sc = ifp->if_softc; 1284 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1285 uint32_t reg; 1286 1287 KASSERT_RX_MTX(sc, q); 1288 1289 /* Set Rx interrupt threshold */ 1290 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 1291 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free); 1292 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg); 1293 1294 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time); 1295 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg); 1296 1297 /* Unmask RXTX_TH Intr. */ 1298 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1299 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */ 1300 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alart */ 1301 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1302 1303 /* Enable Rx queue */ 1304 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK; 1305 reg |= MVXPE_RQC_ENQ(q); 1306 MVXPE_WRITE(sc, MVXPE_RQC, reg); 1307 1308 return 0; 1309 } 1310 1311 STATIC int 1312 mvxpe_tx_queue_enable(struct ifnet *ifp, int q) 1313 { 1314 struct mvxpe_softc *sc = ifp->if_softc; 1315 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1316 uint32_t reg; 1317 1318 KASSERT_TX_MTX(sc, q); 1319 1320 /* Set Tx interrupt threshold */ 1321 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q)); 1322 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */ 1323 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 1324 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg); 1325 1326 /* Unmask RXTX_TH Intr. */ 1327 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1328 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */ 1329 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1330 1331 /* Don't update MVXPE_TQC here, there is no packet yet. */ 1332 return 0; 1333 } 1334 1335 STATIC void 1336 mvxpe_rx_lockq(struct mvxpe_softc *sc, int q) 1337 { 1338 KASSERT(q >= 0); 1339 KASSERT(q < MVXPE_QUEUE_SIZE); 1340 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx); 1341 } 1342 1343 STATIC void 1344 mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q) 1345 { 1346 KASSERT(q >= 0); 1347 KASSERT(q < MVXPE_QUEUE_SIZE); 1348 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx); 1349 } 1350 1351 STATIC void 1352 mvxpe_tx_lockq(struct mvxpe_softc *sc, int q) 1353 { 1354 KASSERT(q >= 0); 1355 KASSERT(q < MVXPE_QUEUE_SIZE); 1356 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx); 1357 } 1358 1359 STATIC void 1360 mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q) 1361 { 1362 KASSERT(q >= 0); 1363 KASSERT(q < MVXPE_QUEUE_SIZE); 1364 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx); 1365 } 1366 1367 /* 1368 * Interrupt Handlers 1369 */ 1370 STATIC void 1371 mvxpe_disable_intr(struct mvxpe_softc *sc) 1372 { 1373 MVXPE_WRITE(sc, MVXPE_EUIM, 0); 1374 MVXPE_WRITE(sc, MVXPE_EUIC, 0); 1375 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0); 1376 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0); 1377 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0); 1378 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0); 1379 MVXPE_WRITE(sc, MVXPE_PMIM, 0); 1380 MVXPE_WRITE(sc, MVXPE_PMIC, 0); 1381 MVXPE_WRITE(sc, MVXPE_PIE, 0); 1382 } 1383 1384 STATIC void 1385 mvxpe_enable_intr(struct mvxpe_softc *sc) 1386 { 1387 uint32_t reg; 1388 1389 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */ 1390 reg = MVXPE_READ(sc, MVXPE_PMIM); 1391 reg |= MVXPE_PMI_PHYSTATUSCHNG; 1392 reg |= MVXPE_PMI_LINKCHANGE; 1393 reg |= MVXPE_PMI_IAE; 1394 reg |= MVXPE_PMI_RXOVERRUN; 1395 reg |= MVXPE_PMI_RXCRCERROR; 1396 reg |= MVXPE_PMI_RXLARGEPACKET; 1397 reg |= MVXPE_PMI_TXUNDRN; 1398 #if 0 1399 /* 1400 * The device may raise false interrupts for SERDES even if the device 1401 * is not configured to use SERDES connection. 1402 */ 1403 reg |= MVXPE_PMI_PRBSERROR; 1404 reg |= MVXPE_PMI_SRSE; 1405 #else 1406 reg &= ~MVXPE_PMI_PRBSERROR; 1407 reg &= ~MVXPE_PMI_SRSE; 1408 #endif 1409 reg |= MVXPE_PMI_TREQ_MASK; 1410 MVXPE_WRITE(sc, MVXPE_PMIM, reg); 1411 1412 /* Enable Summary Bit to check all interrupt cause. */ 1413 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1414 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY; 1415 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY; 1416 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY; 1417 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1418 1419 /* Enable All Queue Interrupt */ 1420 reg = MVXPE_READ(sc, MVXPE_PIE); 1421 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK; 1422 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK; 1423 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1424 } 1425 1426 STATIC int 1427 mvxpe_rxtxth_intr(void *arg) 1428 { 1429 struct mvxpe_softc *sc = arg; 1430 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1431 uint32_t ic, queues, datum = 0; 1432 1433 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n"); 1434 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth); 1435 1436 mvxpe_sc_lock(sc); 1437 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC); 1438 if (ic == 0) { 1439 mvxpe_sc_unlock(sc); 1440 return 0; 1441 } 1442 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic); 1443 datum = datum ^ ic; 1444 1445 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic); 1446 1447 /* ack maintance interrupt first */ 1448 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) { 1449 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n"); 1450 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr); 1451 } 1452 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) { 1453 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n"); 1454 mvxpe_misc_intr(sc); 1455 } 1456 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) { 1457 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n"); 1458 mvxpe_rxtx_intr(sc); 1459 } 1460 if (!(ifp->if_flags & IFF_RUNNING)) { 1461 mvxpe_sc_unlock(sc); 1462 return 1; 1463 } 1464 1465 /* RxTxTH interrupt */ 1466 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic); 1467 if (queues) { 1468 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n"); 1469 mvxpe_rx(sc, queues); 1470 } 1471 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic); 1472 if (queues) { 1473 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n"); 1474 mvxpe_tx_complete(sc, queues); 1475 } 1476 queues = MVXPE_PRXTXTI_GET_RDTAQ(ic); 1477 if (queues) { 1478 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n"); 1479 mvxpe_rx_refill(sc, queues); 1480 } 1481 mvxpe_sc_unlock(sc); 1482 1483 if_schedule_deferred_start(ifp); 1484 1485 rnd_add_uint32(&sc->sc_rnd_source, datum); 1486 1487 return 1; 1488 } 1489 1490 STATIC int 1491 mvxpe_misc_intr(void *arg) 1492 { 1493 struct mvxpe_softc *sc = arg; 1494 #ifdef MVXPE_DEBUG 1495 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1496 #endif 1497 uint32_t ic; 1498 uint32_t datum = 0; 1499 int claimed = 0; 1500 1501 DPRINTSC(sc, 2, "got MISC_INTR\n"); 1502 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc); 1503 1504 KASSERT_SC_MTX(sc); 1505 1506 for (;;) { 1507 ic = MVXPE_READ(sc, MVXPE_PMIC); 1508 ic &= MVXPE_READ(sc, MVXPE_PMIM); 1509 if (ic == 0) 1510 break; 1511 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic); 1512 datum = datum ^ ic; 1513 claimed = 1; 1514 1515 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic); 1516 if (ic & MVXPE_PMI_PHYSTATUSCHNG) { 1517 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n"); 1518 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng); 1519 } 1520 if (ic & MVXPE_PMI_LINKCHANGE) { 1521 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n"); 1522 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange); 1523 mvxpe_linkupdate(sc); 1524 } 1525 if (ic & MVXPE_PMI_IAE) { 1526 DPRINTIFNET(ifp, 2, "+IAE\n"); 1527 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae); 1528 } 1529 if (ic & MVXPE_PMI_RXOVERRUN) { 1530 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n"); 1531 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun); 1532 } 1533 if (ic & MVXPE_PMI_RXCRCERROR) { 1534 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n"); 1535 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc); 1536 } 1537 if (ic & MVXPE_PMI_RXLARGEPACKET) { 1538 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n"); 1539 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket); 1540 } 1541 if (ic & MVXPE_PMI_TXUNDRN) { 1542 DPRINTIFNET(ifp, 2, "+TXUNDRN\n"); 1543 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun); 1544 } 1545 if (ic & MVXPE_PMI_PRBSERROR) { 1546 DPRINTIFNET(ifp, 2, "+PRBSERROR\n"); 1547 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr); 1548 } 1549 if (ic & MVXPE_PMI_TREQ_MASK) { 1550 DPRINTIFNET(ifp, 2, "+TREQ\n"); 1551 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq); 1552 } 1553 } 1554 if (datum) 1555 rnd_add_uint32(&sc->sc_rnd_source, datum); 1556 1557 return claimed; 1558 } 1559 1560 STATIC int 1561 mvxpe_rxtx_intr(void *arg) 1562 { 1563 struct mvxpe_softc *sc = arg; 1564 #ifdef MVXPE_DEBUG 1565 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1566 #endif 1567 uint32_t datum = 0; 1568 uint32_t prxtxic; 1569 int claimed = 0; 1570 1571 DPRINTSC(sc, 2, "got RXTX_Intr\n"); 1572 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx); 1573 1574 KASSERT_SC_MTX(sc); 1575 1576 for (;;) { 1577 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC); 1578 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM); 1579 if (prxtxic == 0) 1580 break; 1581 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic); 1582 datum = datum ^ prxtxic; 1583 claimed = 1; 1584 1585 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic); 1586 1587 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) { 1588 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n"); 1589 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq); 1590 } 1591 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) { 1592 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n"); 1593 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq); 1594 } 1595 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) { 1596 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n"); 1597 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq); 1598 } 1599 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) { 1600 DPRINTIFNET(ifp, 1, "PRXTXTHIC Sumary\n"); 1601 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth); 1602 } 1603 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) { 1604 DPRINTIFNET(ifp, 1, "PTXERROR Sumary\n"); 1605 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr); 1606 } 1607 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) { 1608 DPRINTIFNET(ifp, 1, "PMISCIC Sumary\n"); 1609 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc); 1610 } 1611 } 1612 if (datum) 1613 rnd_add_uint32(&sc->sc_rnd_source, datum); 1614 1615 return claimed; 1616 } 1617 1618 STATIC void 1619 mvxpe_tick(void *arg) 1620 { 1621 struct mvxpe_softc *sc = arg; 1622 struct mii_data *mii = &sc->sc_mii; 1623 1624 mvxpe_sc_lock(sc); 1625 1626 mii_tick(mii); 1627 mii_pollstat(&sc->sc_mii); 1628 1629 /* read mib registers(clear by read) */ 1630 mvxpe_update_mib(sc); 1631 1632 /* read counter registers(clear by read) */ 1633 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc, 1634 MVXPE_READ(sc, MVXPE_PDFC)); 1635 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc, 1636 MVXPE_READ(sc, MVXPE_POFC)); 1637 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs, 1638 MVXPE_READ(sc, MVXPE_TXBADFCS)); 1639 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped, 1640 MVXPE_READ(sc, MVXPE_TXDROPPED)); 1641 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic, 1642 MVXPE_READ(sc, MVXPE_LPIC)); 1643 1644 mvxpe_sc_unlock(sc); 1645 1646 callout_schedule(&sc->sc_tick_ch, hz); 1647 } 1648 1649 1650 /* 1651 * struct ifnet and mii callbacks 1652 */ 1653 STATIC void 1654 mvxpe_start(struct ifnet *ifp) 1655 { 1656 struct mvxpe_softc *sc = ifp->if_softc; 1657 struct mbuf *m; 1658 int q; 1659 1660 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) { 1661 DPRINTIFNET(ifp, 1, "not running\n"); 1662 return; 1663 } 1664 1665 mvxpe_sc_lock(sc); 1666 if (!MVXPE_IS_LINKUP(sc)) { 1667 /* If Link is DOWN, can't start TX */ 1668 DPRINTIFNET(ifp, 1, "link fail\n"); 1669 for (;;) { 1670 /* 1671 * discard stale packets all. 1672 * these may confuse DAD, ARP or timer based protocols. 1673 */ 1674 IFQ_DEQUEUE(&ifp->if_snd, m); 1675 if (m == NULL) 1676 break; 1677 m_freem(m); 1678 } 1679 mvxpe_sc_unlock(sc); 1680 return; 1681 } 1682 for (;;) { 1683 /* 1684 * don't use IFQ_POLL(). 1685 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE 1686 * on SMP enabled networking stack. 1687 */ 1688 IFQ_DEQUEUE(&ifp->if_snd, m); 1689 if (m == NULL) 1690 break; 1691 1692 q = mvxpe_tx_queue_select(sc, m); 1693 if (q < 0) 1694 break; 1695 /* mutex is held in mvxpe_tx_queue_select() */ 1696 1697 if (mvxpe_tx_queue(sc, m, q) != 0) { 1698 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n"); 1699 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr); 1700 mvxpe_tx_unlockq(sc, q); 1701 break; 1702 } 1703 mvxpe_tx_unlockq(sc, q); 1704 KASSERT(sc->sc_tx_ring[q].tx_used >= 0); 1705 KASSERT(sc->sc_tx_ring[q].tx_used <= 1706 sc->sc_tx_ring[q].tx_queue_len); 1707 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n"); 1708 sc->sc_tx_pending++; 1709 ifp->if_opackets++; 1710 ifp->if_timer = 1; 1711 sc->sc_wdogsoft = 1; 1712 bpf_mtap(ifp, m, BPF_D_OUT); 1713 } 1714 mvxpe_sc_unlock(sc); 1715 1716 return; 1717 } 1718 1719 STATIC int 1720 mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1721 { 1722 struct mvxpe_softc *sc = ifp->if_softc; 1723 struct ifreq *ifr = data; 1724 int error = 0; 1725 int s; 1726 1727 switch (cmd) { 1728 case SIOCGIFMEDIA: 1729 case SIOCSIFMEDIA: 1730 DPRINTIFNET(ifp, 2, "mvxpe_ioctl MEDIA\n"); 1731 s = splnet(); /* XXX: is there suitable mutex? */ 1732 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1733 splx(s); 1734 break; 1735 default: 1736 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n"); 1737 error = ether_ioctl(ifp, cmd, data); 1738 if (error == ENETRESET) { 1739 if (ifp->if_flags & IFF_RUNNING) { 1740 mvxpe_sc_lock(sc); 1741 mvxpe_filter_setup(sc); 1742 mvxpe_sc_unlock(sc); 1743 } 1744 error = 0; 1745 } 1746 break; 1747 } 1748 1749 return error; 1750 } 1751 1752 STATIC int 1753 mvxpe_init(struct ifnet *ifp) 1754 { 1755 struct mvxpe_softc *sc = ifp->if_softc; 1756 struct mii_data *mii = &sc->sc_mii; 1757 uint32_t reg; 1758 int q; 1759 1760 mvxpe_sc_lock(sc); 1761 1762 /* Start DMA Engine */ 1763 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000); 1764 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000); 1765 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM); 1766 1767 /* Enable port */ 1768 reg = MVXPE_READ(sc, MVXPE_PMACC0); 1769 reg |= MVXPE_PMACC0_PORTEN; 1770 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 1771 1772 /* Link up */ 1773 mvxpe_linkup(sc); 1774 1775 /* Enable All Queue and interrupt of each Queue */ 1776 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1777 mvxpe_rx_lockq(sc, q); 1778 mvxpe_rx_queue_enable(ifp, q); 1779 mvxpe_rx_queue_refill(sc, q); 1780 mvxpe_rx_unlockq(sc, q); 1781 1782 mvxpe_tx_lockq(sc, q); 1783 mvxpe_tx_queue_enable(ifp, q); 1784 mvxpe_tx_unlockq(sc, q); 1785 } 1786 1787 /* Enable interrupt */ 1788 mvxpe_enable_intr(sc); 1789 1790 /* Set Counter */ 1791 callout_schedule(&sc->sc_tick_ch, hz); 1792 1793 /* Media check */ 1794 mii_mediachg(mii); 1795 1796 ifp->if_flags |= IFF_RUNNING; 1797 ifp->if_flags &= ~IFF_OACTIVE; 1798 1799 mvxpe_sc_unlock(sc); 1800 return 0; 1801 } 1802 1803 /* ARGSUSED */ 1804 STATIC void 1805 mvxpe_stop(struct ifnet *ifp, int disable) 1806 { 1807 struct mvxpe_softc *sc = ifp->if_softc; 1808 uint32_t reg; 1809 int q, cnt; 1810 1811 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n"); 1812 1813 mvxpe_sc_lock(sc); 1814 1815 callout_stop(&sc->sc_tick_ch); 1816 1817 /* Link down */ 1818 mvxpe_linkdown(sc); 1819 1820 /* Disable Rx interrupt */ 1821 reg = MVXPE_READ(sc, MVXPE_PIE); 1822 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK; 1823 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1824 1825 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1826 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK; 1827 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK; 1828 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1829 1830 /* Wait for all Rx activity to terminate. */ 1831 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK; 1832 reg = MVXPE_RQC_DIS(reg); 1833 MVXPE_WRITE(sc, MVXPE_RQC, reg); 1834 cnt = 0; 1835 do { 1836 if (cnt >= RX_DISABLE_TIMEOUT) { 1837 aprint_error_ifnet(ifp, 1838 "timeout for RX stopped. rqc 0x%x\n", reg); 1839 break; 1840 } 1841 cnt++; 1842 reg = MVXPE_READ(sc, MVXPE_RQC); 1843 } while (reg & MVXPE_RQC_EN_MASK); 1844 1845 /* Wait for all Tx activety to terminate. */ 1846 reg = MVXPE_READ(sc, MVXPE_PIE); 1847 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK; 1848 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1849 1850 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1851 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK; 1852 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1853 1854 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK; 1855 reg = MVXPE_TQC_DIS(reg); 1856 MVXPE_WRITE(sc, MVXPE_TQC, reg); 1857 cnt = 0; 1858 do { 1859 if (cnt >= TX_DISABLE_TIMEOUT) { 1860 aprint_error_ifnet(ifp, 1861 "timeout for TX stopped. tqc 0x%x\n", reg); 1862 break; 1863 } 1864 cnt++; 1865 reg = MVXPE_READ(sc, MVXPE_TQC); 1866 } while (reg & MVXPE_TQC_EN_MASK); 1867 1868 /* Wait for all Tx FIFO is empty */ 1869 cnt = 0; 1870 do { 1871 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { 1872 aprint_error_ifnet(ifp, 1873 "timeout for TX FIFO drained. ps0 0x%x\n", reg); 1874 break; 1875 } 1876 cnt++; 1877 reg = MVXPE_READ(sc, MVXPE_PS0); 1878 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG)); 1879 1880 /* Reset the MAC Port Enable bit */ 1881 reg = MVXPE_READ(sc, MVXPE_PMACC0); 1882 reg &= ~MVXPE_PMACC0_PORTEN; 1883 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 1884 1885 /* Disable each of queue */ 1886 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1887 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1888 1889 mvxpe_rx_lockq(sc, q); 1890 mvxpe_tx_lockq(sc, q); 1891 1892 /* Disable Rx packet buffer refill request */ 1893 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 1894 reg |= MVXPE_PRXDQTH_NODT(0); 1895 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg); 1896 1897 if (disable) { 1898 /* 1899 * Hold Reset state of DMA Engine 1900 * (must write 0x0 to restart it) 1901 */ 1902 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001); 1903 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001); 1904 mvxpe_ring_flush_queue(sc, q); 1905 } 1906 1907 mvxpe_tx_unlockq(sc, q); 1908 mvxpe_rx_unlockq(sc, q); 1909 } 1910 1911 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1912 1913 mvxpe_sc_unlock(sc); 1914 } 1915 1916 STATIC void 1917 mvxpe_watchdog(struct ifnet *ifp) 1918 { 1919 struct mvxpe_softc *sc = ifp->if_softc; 1920 int q; 1921 1922 mvxpe_sc_lock(sc); 1923 1924 /* 1925 * Reclaim first as there is a possibility of losing Tx completion 1926 * interrupts. 1927 */ 1928 mvxpe_tx_complete(sc, 0xff); 1929 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1930 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1931 1932 if (tx->tx_dma != tx->tx_cpu) { 1933 if (sc->sc_wdogsoft) { 1934 /* 1935 * There is race condition between CPU and DMA 1936 * engine. When DMA engine encounters queue end, 1937 * it clears MVXPE_TQC_ENQ bit. 1938 * XXX: how about enhanced mode? 1939 */ 1940 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q)); 1941 ifp->if_timer = 5; 1942 sc->sc_wdogsoft = 0; 1943 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft); 1944 } else { 1945 aprint_error_ifnet(ifp, "watchdog timeout\n"); 1946 ifp->if_oerrors++; 1947 mvxpe_linkreset(sc); 1948 mvxpe_sc_unlock(sc); 1949 1950 /* trigger reinitialize sequence */ 1951 mvxpe_stop(ifp, 1); 1952 mvxpe_init(ifp); 1953 1954 mvxpe_sc_lock(sc); 1955 } 1956 } 1957 } 1958 mvxpe_sc_unlock(sc); 1959 } 1960 1961 STATIC int 1962 mvxpe_ifflags_cb(struct ethercom *ec) 1963 { 1964 struct ifnet *ifp = &ec->ec_if; 1965 struct mvxpe_softc *sc = ifp->if_softc; 1966 int change = ifp->if_flags ^ sc->sc_if_flags; 1967 1968 mvxpe_sc_lock(sc); 1969 1970 if (change != 0) 1971 sc->sc_if_flags = ifp->if_flags; 1972 1973 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) { 1974 mvxpe_sc_unlock(sc); 1975 return ENETRESET; 1976 } 1977 1978 if ((change & IFF_PROMISC) != 0) 1979 mvxpe_filter_setup(sc); 1980 1981 if ((change & IFF_UP) != 0) 1982 mvxpe_linkreset(sc); 1983 1984 mvxpe_sc_unlock(sc); 1985 return 0; 1986 } 1987 1988 STATIC int 1989 mvxpe_mediachange(struct ifnet *ifp) 1990 { 1991 return ether_mediachange(ifp); 1992 } 1993 1994 STATIC void 1995 mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1996 { 1997 ether_mediastatus(ifp, ifmr); 1998 } 1999 2000 /* 2001 * Link State Notify 2002 */ 2003 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc) 2004 { 2005 int linkup; /* bool */ 2006 2007 KASSERT_SC_MTX(sc); 2008 2009 /* tell miibus */ 2010 mii_pollstat(&sc->sc_mii); 2011 2012 /* syslog */ 2013 linkup = MVXPE_IS_LINKUP(sc); 2014 if (sc->sc_linkstate == linkup) 2015 return; 2016 2017 #ifdef DEBUG 2018 log(LOG_DEBUG, 2019 "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down"); 2020 #endif 2021 if (linkup) 2022 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up); 2023 else 2024 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down); 2025 2026 sc->sc_linkstate = linkup; 2027 } 2028 2029 STATIC void 2030 mvxpe_linkup(struct mvxpe_softc *sc) 2031 { 2032 uint32_t reg; 2033 2034 KASSERT_SC_MTX(sc); 2035 2036 /* set EEE parameters */ 2037 reg = MVXPE_READ(sc, MVXPE_LPIC1); 2038 if (sc->sc_cf.cf_lpi) 2039 reg |= MVXPE_LPIC1_LPIRE; 2040 else 2041 reg &= ~MVXPE_LPIC1_LPIRE; 2042 MVXPE_WRITE(sc, MVXPE_LPIC1, reg); 2043 2044 /* set auto-negotiation parameters */ 2045 reg = MVXPE_READ(sc, MVXPE_PANC); 2046 if (sc->sc_cf.cf_fc) { 2047 /* flow control negotiation */ 2048 reg |= MVXPE_PANC_PAUSEADV; 2049 reg |= MVXPE_PANC_ANFCEN; 2050 } 2051 else { 2052 reg &= ~MVXPE_PANC_PAUSEADV; 2053 reg &= ~MVXPE_PANC_ANFCEN; 2054 } 2055 reg &= ~MVXPE_PANC_FORCELINKFAIL; 2056 reg &= ~MVXPE_PANC_FORCELINKPASS; 2057 MVXPE_WRITE(sc, MVXPE_PANC, reg); 2058 2059 mii_mediachg(&sc->sc_mii); 2060 } 2061 2062 STATIC void 2063 mvxpe_linkdown(struct mvxpe_softc *sc) 2064 { 2065 struct mii_softc *mii; 2066 uint32_t reg; 2067 2068 KASSERT_SC_MTX(sc); 2069 return; 2070 2071 reg = MVXPE_READ(sc, MVXPE_PANC); 2072 reg |= MVXPE_PANC_FORCELINKFAIL; 2073 reg &= MVXPE_PANC_FORCELINKPASS; 2074 MVXPE_WRITE(sc, MVXPE_PANC, reg); 2075 2076 mii = LIST_FIRST(&sc->sc_mii.mii_phys); 2077 if (mii) 2078 mii_phy_down(mii); 2079 } 2080 2081 STATIC void 2082 mvxpe_linkreset(struct mvxpe_softc *sc) 2083 { 2084 struct mii_softc *mii; 2085 2086 KASSERT_SC_MTX(sc); 2087 2088 /* force reset PHY first */ 2089 mii = LIST_FIRST(&sc->sc_mii.mii_phys); 2090 if (mii) 2091 mii_phy_reset(mii); 2092 2093 /* reinit MAC and PHY */ 2094 mvxpe_linkdown(sc); 2095 if ((sc->sc_if_flags & IFF_UP) != 0) 2096 mvxpe_linkup(sc); 2097 } 2098 2099 /* 2100 * Tx Subroutines 2101 */ 2102 STATIC int 2103 mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m) 2104 { 2105 int q = 0; 2106 2107 /* XXX: get attribute from ALTQ framework? */ 2108 mvxpe_tx_lockq(sc, q); 2109 return 0; 2110 } 2111 2112 STATIC int 2113 mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q) 2114 { 2115 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2116 bus_dma_segment_t *txsegs; 2117 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 2118 struct mvxpe_tx_desc *t = NULL; 2119 uint32_t ptxsu; 2120 int txnsegs; 2121 int start, used; 2122 int i; 2123 2124 KASSERT_TX_MTX(sc, q); 2125 KASSERT(tx->tx_used >= 0); 2126 KASSERT(tx->tx_used <= tx->tx_queue_len); 2127 2128 /* load mbuf using dmamap of 1st descriptor */ 2129 if (bus_dmamap_load_mbuf(sc->sc_dmat, 2130 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) { 2131 m_freem(m); 2132 return ENOBUFS; 2133 } 2134 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs; 2135 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs; 2136 if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) { 2137 /* we have no enough descriptors or mbuf is broken */ 2138 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu)); 2139 m_freem(m); 2140 return ENOBUFS; 2141 } 2142 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu); 2143 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL); 2144 2145 /* remember mbuf using 1st descriptor */ 2146 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m; 2147 bus_dmamap_sync(sc->sc_dmat, 2148 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len, 2149 BUS_DMASYNC_PREWRITE); 2150 2151 /* load to tx descriptors */ 2152 start = tx->tx_cpu; 2153 used = 0; 2154 for (i = 0; i < txnsegs; i++) { 2155 if (__predict_false(txsegs[i].ds_len == 0)) 2156 continue; 2157 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu); 2158 t->command = 0; 2159 t->l4ichk = 0; 2160 t->flags = 0; 2161 if (i == 0) { 2162 /* 1st descriptor */ 2163 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0); 2164 t->command |= MVXPE_TX_CMD_PADDING; 2165 t->command |= MVXPE_TX_CMD_F; 2166 mvxpe_tx_set_csumflag(ifp, t, m); 2167 } 2168 t->bufptr = txsegs[i].ds_addr; 2169 t->bytecnt = txsegs[i].ds_len; 2170 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1); 2171 tx->tx_used++; 2172 used++; 2173 } 2174 /* t is last descriptor here */ 2175 KASSERT(t != NULL); 2176 t->command |= MVXPE_TX_CMD_L; 2177 2178 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used); 2179 #ifdef MVXPE_DEBUG 2180 if (mvxpe_debug > 2) 2181 for (i = start; i <= tx->tx_cpu; i++) { 2182 t = MVXPE_TX_DESC(sc, q, i); 2183 mvxpe_dump_txdesc(t, i); 2184 } 2185 #endif 2186 mvxpe_ring_sync_tx(sc, q, start, used, 2187 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2188 2189 while (used > 255) { 2190 ptxsu = MVXPE_PTXSU_NOWD(255); 2191 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2192 used -= 255; 2193 } 2194 if (used > 0) { 2195 ptxsu = MVXPE_PTXSU_NOWD(used); 2196 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2197 } 2198 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q)); 2199 2200 DPRINTSC(sc, 2, 2201 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q))); 2202 DPRINTSC(sc, 2, 2203 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q))); 2204 DPRINTSC(sc, 2, 2205 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q))); 2206 DPRINTSC(sc, 2, 2207 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q))); 2208 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC)); 2209 DPRINTIFNET(ifp, 2, 2210 "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n", 2211 tx->tx_cpu, tx->tx_dma, tx->tx_used); 2212 return 0; 2213 } 2214 2215 STATIC void 2216 mvxpe_tx_set_csumflag(struct ifnet *ifp, 2217 struct mvxpe_tx_desc *t, struct mbuf *m) 2218 { 2219 struct ether_header *eh; 2220 int csum_flags; 2221 uint32_t iphl = 0, ipoff = 0; 2222 2223 2224 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags; 2225 2226 eh = mtod(m, struct ether_header *); 2227 switch (htons(eh->ether_type)) { 2228 case ETHERTYPE_IP: 2229 case ETHERTYPE_IPV6: 2230 ipoff = ETHER_HDR_LEN; 2231 break; 2232 case ETHERTYPE_VLAN: 2233 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2234 break; 2235 } 2236 2237 if (csum_flags & (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) { 2238 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2239 t->command |= MVXPE_TX_CMD_L3_IP4; 2240 } 2241 else if (csum_flags & (M_CSUM_TCPv6|M_CSUM_UDPv6)) { 2242 iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2243 t->command |= MVXPE_TX_CMD_L3_IP6; 2244 } 2245 else { 2246 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE; 2247 return; 2248 } 2249 2250 2251 /* L3 */ 2252 if (csum_flags & M_CSUM_IPv4) { 2253 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM; 2254 } 2255 2256 /* L4 */ 2257 if ((csum_flags & 2258 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TCPv6|M_CSUM_UDPv6)) == 0) { 2259 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE; 2260 } 2261 else if (csum_flags & M_CSUM_TCPv4) { 2262 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2263 t->command |= MVXPE_TX_CMD_L4_TCP; 2264 } 2265 else if (csum_flags & M_CSUM_UDPv4) { 2266 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2267 t->command |= MVXPE_TX_CMD_L4_UDP; 2268 } 2269 else if (csum_flags & M_CSUM_TCPv6) { 2270 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2271 t->command |= MVXPE_TX_CMD_L4_TCP; 2272 } 2273 else if (csum_flags & M_CSUM_UDPv6) { 2274 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2275 t->command |= MVXPE_TX_CMD_L4_UDP; 2276 } 2277 2278 t->l4ichk = 0; 2279 t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2); 2280 t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff); 2281 } 2282 2283 STATIC void 2284 mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues) 2285 { 2286 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2287 int q; 2288 2289 DPRINTSC(sc, 2, "tx completed.\n"); 2290 2291 KASSERT_SC_MTX(sc); 2292 2293 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 2294 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2295 continue; 2296 mvxpe_tx_lockq(sc, q); 2297 mvxpe_tx_queue_complete(sc, q); 2298 mvxpe_tx_unlockq(sc, q); 2299 } 2300 KASSERT(sc->sc_tx_pending >= 0); 2301 if (sc->sc_tx_pending == 0) 2302 ifp->if_timer = 0; 2303 } 2304 2305 STATIC void 2306 mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q) 2307 { 2308 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 2309 struct mvxpe_tx_desc *t; 2310 struct mbuf *m; 2311 uint32_t ptxs, ptxsu, ndesc; 2312 int i; 2313 2314 KASSERT_TX_MTX(sc, q); 2315 2316 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q)); 2317 ndesc = MVXPE_PTXS_GET_TBC(ptxs); 2318 if (ndesc == 0) 2319 return; 2320 2321 DPRINTSC(sc, 2, 2322 "tx complete queue %d, %d descriptors.\n", q, ndesc); 2323 2324 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc, 2325 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2326 2327 for (i = 0; i < ndesc; i++) { 2328 int error = 0; 2329 2330 t = MVXPE_TX_DESC(sc, q, tx->tx_dma); 2331 if (t->flags & MVXPE_TX_F_ES) { 2332 DPRINTSC(sc, 1, 2333 "tx error queue %d desc %d\n", 2334 q, tx->tx_dma); 2335 switch (t->flags & MVXPE_TX_F_EC_MASK) { 2336 case MVXPE_TX_F_EC_LC: 2337 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc); 2338 break; 2339 case MVXPE_TX_F_EC_UR: 2340 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur); 2341 break; 2342 case MVXPE_TX_F_EC_RL: 2343 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl); 2344 break; 2345 default: 2346 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth); 2347 break; 2348 } 2349 error = 1; 2350 } 2351 m = MVXPE_TX_MBUF(sc, q, tx->tx_dma); 2352 if (m != NULL) { 2353 KASSERT((t->command & MVXPE_TX_CMD_F) != 0); 2354 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL; 2355 bus_dmamap_sync(sc->sc_dmat, 2356 MVXPE_TX_MAP(sc, q, tx->tx_dma), 0, m->m_pkthdr.len, 2357 BUS_DMASYNC_POSTWRITE); 2358 bus_dmamap_unload(sc->sc_dmat, 2359 MVXPE_TX_MAP(sc, q, tx->tx_dma)); 2360 m_freem(m); 2361 sc->sc_tx_pending--; 2362 } 2363 else 2364 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0); 2365 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1); 2366 tx->tx_used--; 2367 if (error) 2368 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]); 2369 else 2370 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]); 2371 } 2372 KASSERT(tx->tx_used >= 0); 2373 KASSERT(tx->tx_used <= tx->tx_queue_len); 2374 while (ndesc > 255) { 2375 ptxsu = MVXPE_PTXSU_NORB(255); 2376 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2377 ndesc -= 255; 2378 } 2379 if (ndesc > 0) { 2380 ptxsu = MVXPE_PTXSU_NORB(ndesc); 2381 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2382 } 2383 DPRINTSC(sc, 2, 2384 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n", 2385 q, tx->tx_cpu, tx->tx_dma, tx->tx_used); 2386 } 2387 2388 /* 2389 * Rx Subroutines 2390 */ 2391 STATIC void 2392 mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues) 2393 { 2394 int q, npkt; 2395 2396 KASSERT_SC_MTX(sc); 2397 2398 while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) { 2399 /* mutex is held by rx_queue_select */ 2400 mvxpe_rx_queue(sc, q, npkt); 2401 mvxpe_rx_unlockq(sc, q); 2402 } 2403 } 2404 2405 STATIC void 2406 mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt) 2407 { 2408 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2409 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2410 struct mvxpe_rx_desc *r; 2411 struct mvxpbm_chunk *chunk; 2412 struct mbuf *m; 2413 uint32_t prxsu; 2414 int error = 0; 2415 int i; 2416 2417 KASSERT_RX_MTX(sc, q); 2418 2419 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt, 2420 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2421 2422 for (i = 0; i < npkt; i++) { 2423 /* get descriptor and packet */ 2424 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma); 2425 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL; 2426 r = MVXPE_RX_DESC(sc, q, rx->rx_dma); 2427 mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD); 2428 2429 /* check errors */ 2430 if (r->status & MVXPE_RX_ES) { 2431 switch (r->status & MVXPE_RX_EC_MASK) { 2432 case MVXPE_RX_EC_CE: 2433 DPRINTIFNET(ifp, 1, "CRC error\n"); 2434 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce); 2435 break; 2436 case MVXPE_RX_EC_OR: 2437 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n"); 2438 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or); 2439 break; 2440 case MVXPE_RX_EC_MF: 2441 DPRINTIFNET(ifp, 1, "Rx too large frame\n"); 2442 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf); 2443 break; 2444 case MVXPE_RX_EC_RE: 2445 DPRINTIFNET(ifp, 1, "Rx resource error\n"); 2446 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re); 2447 break; 2448 } 2449 error = 1; 2450 goto rx_done; 2451 } 2452 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) { 2453 DPRINTIFNET(ifp, 1, "not support scatter buf\n"); 2454 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat); 2455 error = 1; 2456 goto rx_done; 2457 } 2458 2459 if (chunk == NULL) { 2460 device_printf(sc->sc_dev, 2461 "got rx interrupt, but no chunk\n"); 2462 error = 1; 2463 goto rx_done; 2464 } 2465 2466 /* extract packet buffer */ 2467 if (mvxpbm_init_mbuf_hdr(chunk) != 0) { 2468 error = 1; 2469 goto rx_done; 2470 } 2471 m = chunk->m; 2472 m_set_rcvif(m, ifp); 2473 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN; 2474 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */ 2475 mvxpe_rx_set_csumflag(ifp, r, m); 2476 if_percpuq_enqueue(ifp->if_percpuq, m); 2477 chunk = NULL; /* the BM chunk goes to networking stack now */ 2478 rx_done: 2479 if (chunk) { 2480 /* rx error. just return the chunk to BM. */ 2481 mvxpbm_free_chunk(chunk); 2482 } 2483 if (error) 2484 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]); 2485 else 2486 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]); 2487 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1); 2488 } 2489 /* DMA status update */ 2490 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q); 2491 while (npkt > 255) { 2492 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255); 2493 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2494 npkt -= 255; 2495 } 2496 if (npkt > 0) { 2497 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt); 2498 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2499 } 2500 2501 DPRINTSC(sc, 2, 2502 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q))); 2503 DPRINTSC(sc, 2, 2504 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q))); 2505 DPRINTSC(sc, 2, 2506 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q))); 2507 DPRINTSC(sc, 2, 2508 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q))); 2509 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC)); 2510 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n", 2511 rx->rx_cpu, rx->rx_dma); 2512 } 2513 2514 STATIC int 2515 mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue) 2516 { 2517 uint32_t prxs, npkt; 2518 int q; 2519 2520 KASSERT_SC_MTX(sc); 2521 KASSERT(queue != NULL); 2522 DPRINTSC(sc, 2, "selecting rx queue\n"); 2523 2524 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) { 2525 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2526 continue; 2527 2528 prxs = MVXPE_READ(sc, MVXPE_PRXS(q)); 2529 npkt = MVXPE_PRXS_GET_ODC(prxs); 2530 if (npkt == 0) 2531 continue; 2532 2533 DPRINTSC(sc, 2, 2534 "queue %d selected: prxs=%#x, %u pakcet received.\n", 2535 q, prxs, npkt); 2536 *queue = q; 2537 mvxpe_rx_lockq(sc, q); 2538 return npkt; 2539 } 2540 2541 return 0; 2542 } 2543 2544 STATIC void 2545 mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues) 2546 { 2547 int q; 2548 2549 KASSERT_SC_MTX(sc); 2550 2551 /* XXX: check rx bit array */ 2552 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 2553 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2554 continue; 2555 2556 mvxpe_rx_lockq(sc, q); 2557 mvxpe_rx_queue_refill(sc, q); 2558 mvxpe_rx_unlockq(sc, q); 2559 } 2560 } 2561 2562 STATIC void 2563 mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q) 2564 { 2565 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2566 uint32_t prxs, prxsu, ndesc; 2567 int idx, refill = 0; 2568 int npkt; 2569 2570 KASSERT_RX_MTX(sc, q); 2571 2572 prxs = MVXPE_READ(sc, MVXPE_PRXS(q)); 2573 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs); 2574 refill = rx->rx_queue_len - ndesc; 2575 if (refill <= 0) 2576 return; 2577 DPRINTPRXS(2, q); 2578 DPRINTSC(sc, 2, "%d buffers to refill.\n", refill); 2579 2580 idx = rx->rx_cpu; 2581 for (npkt = 0; npkt < refill; npkt++) 2582 if (mvxpe_rx_queue_add(sc, q) != 0) 2583 break; 2584 DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt); 2585 if (npkt == 0) 2586 return; 2587 2588 mvxpe_ring_sync_rx(sc, q, idx, npkt, 2589 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2590 2591 while (npkt > 255) { 2592 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255); 2593 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2594 npkt -= 255; 2595 } 2596 if (npkt > 0) { 2597 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt); 2598 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2599 } 2600 DPRINTPRXS(2, q); 2601 return; 2602 } 2603 2604 STATIC int 2605 mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q) 2606 { 2607 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2608 struct mvxpe_rx_desc *r; 2609 struct mvxpbm_chunk *chunk = NULL; 2610 2611 KASSERT_RX_MTX(sc, q); 2612 2613 /* Allocate the packet buffer */ 2614 chunk = mvxpbm_alloc(sc->sc_bm); 2615 if (chunk == NULL) { 2616 DPRINTSC(sc, 1, "BM chunk allocation failed.\n"); 2617 return ENOBUFS; 2618 } 2619 2620 /* Add the packet to descritor */ 2621 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL); 2622 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk; 2623 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD); 2624 2625 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu); 2626 r->bufptr = chunk->buf_pa; 2627 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu); 2628 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1); 2629 return 0; 2630 } 2631 2632 STATIC void 2633 mvxpe_rx_set_csumflag(struct ifnet *ifp, 2634 struct mvxpe_rx_desc *r, struct mbuf *m0) 2635 { 2636 uint32_t csum_flags = 0; 2637 2638 if ((r->status & (MVXPE_RX_IP_HEADER_OK|MVXPE_RX_L3_IP)) == 0) 2639 return; /* not a IP packet */ 2640 2641 /* L3 */ 2642 if (r->status & MVXPE_RX_L3_IP) { 2643 csum_flags |= M_CSUM_IPv4 & ifp->if_csum_flags_rx; 2644 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0 && 2645 (csum_flags & M_CSUM_IPv4) != 0) { 2646 csum_flags |= M_CSUM_IPv4_BAD; 2647 goto finish; 2648 } 2649 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) { 2650 /* 2651 * r->l4chk has partial checksum of each framgment. 2652 * but there is no way to use it in NetBSD. 2653 */ 2654 return; 2655 } 2656 } 2657 2658 /* L4 */ 2659 switch (r->status & MVXPE_RX_L4_MASK) { 2660 case MVXPE_RX_L4_TCP: 2661 if (r->status & MVXPE_RX_L3_IP) 2662 csum_flags |= M_CSUM_TCPv4 & ifp->if_csum_flags_rx; 2663 else 2664 csum_flags |= M_CSUM_TCPv6 & ifp->if_csum_flags_rx; 2665 break; 2666 case MVXPE_RX_L4_UDP: 2667 if (r->status & MVXPE_RX_L3_IP) 2668 csum_flags |= M_CSUM_UDPv4 & ifp->if_csum_flags_rx; 2669 else 2670 csum_flags |= M_CSUM_UDPv6 & ifp->if_csum_flags_rx; 2671 break; 2672 case MVXPE_RX_L4_OTH: 2673 default: 2674 break; 2675 } 2676 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0 && (csum_flags & 2677 (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0) 2678 csum_flags |= M_CSUM_TCP_UDP_BAD; 2679 finish: 2680 m0->m_pkthdr.csum_flags = csum_flags; 2681 } 2682 2683 /* 2684 * MAC address filter 2685 */ 2686 STATIC uint8_t 2687 mvxpe_crc8(const uint8_t *data, size_t size) 2688 { 2689 int bit; 2690 uint8_t byte; 2691 uint8_t crc = 0; 2692 const uint8_t poly = 0x07; 2693 2694 while(size--) 2695 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) 2696 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); 2697 2698 return crc; 2699 } 2700 2701 CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT); 2702 2703 STATIC void 2704 mvxpe_filter_setup(struct mvxpe_softc *sc) 2705 { 2706 struct ethercom *ec = &sc->sc_ethercom; 2707 struct ifnet *ifp= &sc->sc_ethercom.ec_if; 2708 struct ether_multi *enm; 2709 struct ether_multistep step; 2710 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT]; 2711 uint32_t pxc; 2712 int i; 2713 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; 2714 2715 KASSERT_SC_MTX(sc); 2716 2717 memset(dfut, 0, sizeof(dfut)); 2718 memset(dfsmt, 0, sizeof(dfsmt)); 2719 memset(dfomt, 0, sizeof(dfomt)); 2720 2721 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 2722 goto allmulti; 2723 } 2724 2725 ETHER_FIRST_MULTI(step, ec, enm); 2726 while (enm != NULL) { 2727 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2728 /* ranges are complex and somewhat rare */ 2729 goto allmulti; 2730 } 2731 /* chip handles some IPv4 multicast specially */ 2732 if (memcmp(enm->enm_addrlo, special, 5) == 0) { 2733 i = enm->enm_addrlo[5]; 2734 dfsmt[i>>2] |= 2735 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2736 } else { 2737 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); 2738 dfomt[i>>2] |= 2739 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2740 } 2741 2742 ETHER_NEXT_MULTI(step, enm); 2743 } 2744 goto set; 2745 2746 allmulti: 2747 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 2748 for (i = 0; i < MVXPE_NDFSMT; i++) { 2749 dfsmt[i] = dfomt[i] = 2750 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2751 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2752 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2753 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2754 } 2755 } 2756 2757 set: 2758 pxc = MVXPE_READ(sc, MVXPE_PXC); 2759 pxc &= ~MVXPE_PXC_UPM; 2760 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP; 2761 if (ifp->if_flags & IFF_BROADCAST) { 2762 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP); 2763 } 2764 if (ifp->if_flags & IFF_PROMISC) { 2765 pxc |= MVXPE_PXC_UPM; 2766 } 2767 MVXPE_WRITE(sc, MVXPE_PXC, pxc); 2768 2769 /* Set Destination Address Filter Unicast Table */ 2770 if (ifp->if_flags & IFF_PROMISC) { 2771 /* pass all unicast addresses */ 2772 for (i = 0; i < MVXPE_NDFUT; i++) { 2773 dfut[i] = 2774 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2775 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2776 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2777 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2778 } 2779 } 2780 else { 2781 i = sc->sc_enaddr[5] & 0xf; /* last nibble */ 2782 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2783 } 2784 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT); 2785 2786 /* Set Destination Address Filter Multicast Tables */ 2787 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT); 2788 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT); 2789 } 2790 2791 /* 2792 * sysctl(9) 2793 */ 2794 SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup") 2795 { 2796 int rc; 2797 const struct sysctlnode *node; 2798 2799 if ((rc = sysctl_createv(clog, 0, NULL, &node, 2800 0, CTLTYPE_NODE, "mvxpe", 2801 SYSCTL_DESCR("mvxpe interface controls"), 2802 NULL, 0, NULL, 0, 2803 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 2804 goto err; 2805 } 2806 2807 mvxpe_root_num = node->sysctl_num; 2808 return; 2809 2810 err: 2811 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc); 2812 } 2813 2814 STATIC int 2815 sysctl_read_mib(SYSCTLFN_ARGS) 2816 { 2817 struct mvxpe_sysctl_mib *arg; 2818 struct mvxpe_softc *sc; 2819 struct sysctlnode node; 2820 uint64_t val; 2821 int err; 2822 2823 node = *rnode; 2824 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data; 2825 if (arg == NULL) 2826 return EINVAL; 2827 2828 sc = arg->sc; 2829 if (sc == NULL) 2830 return EINVAL; 2831 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list)) 2832 return EINVAL; 2833 2834 mvxpe_sc_lock(sc); 2835 val = arg->counter; 2836 mvxpe_sc_unlock(sc); 2837 2838 node.sysctl_data = &val; 2839 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2840 if (err) 2841 return err; 2842 if (newp) 2843 return EINVAL; 2844 2845 return 0; 2846 } 2847 2848 2849 STATIC int 2850 sysctl_clear_mib(SYSCTLFN_ARGS) 2851 { 2852 struct mvxpe_softc *sc; 2853 struct sysctlnode node; 2854 int val; 2855 int err; 2856 2857 node = *rnode; 2858 sc = (struct mvxpe_softc *)rnode->sysctl_data; 2859 if (sc == NULL) 2860 return EINVAL; 2861 2862 val = 0; 2863 node.sysctl_data = &val; 2864 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2865 if (err || newp == NULL) 2866 return err; 2867 if (val < 0 || val > 1) 2868 return EINVAL; 2869 if (val == 1) { 2870 mvxpe_sc_lock(sc); 2871 mvxpe_clear_mib(sc); 2872 mvxpe_sc_unlock(sc); 2873 } 2874 2875 return 0; 2876 } 2877 2878 STATIC int 2879 sysctl_set_queue_length(SYSCTLFN_ARGS) 2880 { 2881 struct mvxpe_sysctl_queue *arg; 2882 struct mvxpe_rx_ring *rx = NULL; 2883 struct mvxpe_tx_ring *tx = NULL; 2884 struct mvxpe_softc *sc; 2885 struct sysctlnode node; 2886 uint32_t reg; 2887 int val; 2888 int err; 2889 2890 node = *rnode; 2891 2892 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data; 2893 if (arg == NULL) 2894 return EINVAL; 2895 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT) 2896 return EINVAL; 2897 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX) 2898 return EINVAL; 2899 2900 sc = arg->sc; 2901 if (sc == NULL) 2902 return EINVAL; 2903 2904 /* read queue length */ 2905 mvxpe_sc_lock(sc); 2906 switch (arg->rxtx) { 2907 case MVXPE_SYSCTL_RX: 2908 mvxpe_rx_lockq(sc, arg->queue); 2909 rx = MVXPE_RX_RING(sc, arg->queue); 2910 val = rx->rx_queue_len; 2911 mvxpe_rx_unlockq(sc, arg->queue); 2912 break; 2913 case MVXPE_SYSCTL_TX: 2914 mvxpe_tx_lockq(sc, arg->queue); 2915 tx = MVXPE_TX_RING(sc, arg->queue); 2916 val = tx->tx_queue_len; 2917 mvxpe_tx_unlockq(sc, arg->queue); 2918 break; 2919 } 2920 2921 node.sysctl_data = &val; 2922 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2923 if (err || newp == NULL) { 2924 mvxpe_sc_unlock(sc); 2925 return err; 2926 } 2927 2928 /* update queue length */ 2929 if (val < 8 || val > MVXPE_RX_RING_CNT) { 2930 mvxpe_sc_unlock(sc); 2931 return EINVAL; 2932 } 2933 switch (arg->rxtx) { 2934 case MVXPE_SYSCTL_RX: 2935 mvxpe_rx_lockq(sc, arg->queue); 2936 rx->rx_queue_len = val; 2937 rx->rx_queue_th_received = 2938 rx->rx_queue_len / MVXPE_RXTH_RATIO; 2939 rx->rx_queue_th_free = 2940 rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO; 2941 2942 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 2943 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free); 2944 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg); 2945 2946 mvxpe_rx_unlockq(sc, arg->queue); 2947 break; 2948 case MVXPE_SYSCTL_TX: 2949 mvxpe_tx_lockq(sc, arg->queue); 2950 tx->tx_queue_len = val; 2951 tx->tx_queue_th_free = 2952 tx->tx_queue_len / MVXPE_TXTH_RATIO; 2953 2954 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 2955 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT); 2956 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg); 2957 2958 mvxpe_tx_unlockq(sc, arg->queue); 2959 break; 2960 } 2961 mvxpe_sc_unlock(sc); 2962 2963 return 0; 2964 } 2965 2966 STATIC int 2967 sysctl_set_queue_rxthtime(SYSCTLFN_ARGS) 2968 { 2969 struct mvxpe_sysctl_queue *arg; 2970 struct mvxpe_rx_ring *rx = NULL; 2971 struct mvxpe_softc *sc; 2972 struct sysctlnode node; 2973 extern uint32_t mvTclk; 2974 uint32_t reg, time_mvtclk; 2975 int time_us; 2976 int err; 2977 2978 node = *rnode; 2979 2980 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data; 2981 if (arg == NULL) 2982 return EINVAL; 2983 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT) 2984 return EINVAL; 2985 if (arg->rxtx != MVXPE_SYSCTL_RX) 2986 return EINVAL; 2987 2988 sc = arg->sc; 2989 if (sc == NULL) 2990 return EINVAL; 2991 2992 /* read queue length */ 2993 mvxpe_sc_lock(sc); 2994 mvxpe_rx_lockq(sc, arg->queue); 2995 rx = MVXPE_RX_RING(sc, arg->queue); 2996 time_mvtclk = rx->rx_queue_th_time; 2997 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk; 2998 node.sysctl_data = &time_us; 2999 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", 3000 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue))); 3001 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 3002 if (err || newp == NULL) { 3003 mvxpe_rx_unlockq(sc, arg->queue); 3004 mvxpe_sc_unlock(sc); 3005 return err; 3006 } 3007 3008 /* update queue length (0[sec] - 1[sec]) */ 3009 if (time_us < 0 || time_us > (1000 * 1000)) { 3010 mvxpe_rx_unlockq(sc, arg->queue); 3011 mvxpe_sc_unlock(sc); 3012 return EINVAL; 3013 } 3014 time_mvtclk = 3015 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL); 3016 rx->rx_queue_th_time = time_mvtclk; 3017 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time); 3018 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg); 3019 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg); 3020 mvxpe_rx_unlockq(sc, arg->queue); 3021 mvxpe_sc_unlock(sc); 3022 3023 return 0; 3024 } 3025 3026 3027 STATIC void 3028 sysctl_mvxpe_init(struct mvxpe_softc *sc) 3029 { 3030 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3031 const struct sysctlnode *node; 3032 int mvxpe_nodenum; 3033 int mvxpe_mibnum; 3034 int mvxpe_rxqueuenum; 3035 int mvxpe_txqueuenum; 3036 int q, i; 3037 3038 /* hw.mvxpe.mvxpe[unit] */ 3039 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3040 0, CTLTYPE_NODE, ifp->if_xname, 3041 SYSCTL_DESCR("mvxpe per-controller controls"), 3042 NULL, 0, NULL, 0, 3043 CTL_HW, mvxpe_root_num, CTL_CREATE, 3044 CTL_EOL) != 0) { 3045 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3046 return; 3047 } 3048 mvxpe_nodenum = node->sysctl_num; 3049 3050 /* hw.mvxpe.mvxpe[unit].mib */ 3051 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3052 0, CTLTYPE_NODE, "mib", 3053 SYSCTL_DESCR("mvxpe per-controller MIB counters"), 3054 NULL, 0, NULL, 0, 3055 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, 3056 CTL_EOL) != 0) { 3057 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3058 return; 3059 } 3060 mvxpe_mibnum = node->sysctl_num; 3061 3062 /* hw.mvxpe.mvxpe[unit].rx */ 3063 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3064 0, CTLTYPE_NODE, "rx", 3065 SYSCTL_DESCR("Rx Queues"), 3066 NULL, 0, NULL, 0, 3067 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) { 3068 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3069 return; 3070 } 3071 mvxpe_rxqueuenum = node->sysctl_num; 3072 3073 /* hw.mvxpe.mvxpe[unit].tx */ 3074 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3075 0, CTLTYPE_NODE, "tx", 3076 SYSCTL_DESCR("Tx Queues"), 3077 NULL, 0, NULL, 0, 3078 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) { 3079 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3080 return; 3081 } 3082 mvxpe_txqueuenum = node->sysctl_num; 3083 3084 #ifdef MVXPE_DEBUG 3085 /* hw.mvxpe.debug */ 3086 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3087 CTLFLAG_READWRITE, CTLTYPE_INT, "debug", 3088 SYSCTL_DESCR("mvxpe device driver debug control"), 3089 NULL, 0, &mvxpe_debug, 0, 3090 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) { 3091 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3092 return; 3093 } 3094 #endif 3095 /* 3096 * MIB access 3097 */ 3098 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */ 3099 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3100 const char *name = mvxpe_mib_list[i].sysctl_name; 3101 const char *desc = mvxpe_mib_list[i].desc; 3102 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i]; 3103 3104 mib_arg->sc = sc; 3105 mib_arg->index = i; 3106 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3107 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc, 3108 sysctl_read_mib, 0, (void *)mib_arg, 0, 3109 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum, 3110 CTL_CREATE, CTL_EOL) != 0) { 3111 aprint_normal_dev(sc->sc_dev, 3112 "couldn't create sysctl node\n"); 3113 break; 3114 } 3115 } 3116 3117 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 3118 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q]; 3119 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q]; 3120 #define MVXPE_SYSCTL_NAME(num) "queue" # num 3121 static const char *sysctl_queue_names[] = { 3122 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1), 3123 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3), 3124 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5), 3125 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7), 3126 }; 3127 #undef MVXPE_SYSCTL_NAME 3128 #ifdef SYSCTL_INCLUDE_DESCR 3129 #define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num 3130 static const char *sysctl_queue_descrs[] = { 3131 MVXPE_SYSCTL_DESCR(0), MVXPE_SYSCTL_DESCR(1), 3132 MVXPE_SYSCTL_DESCR(2), MVXPE_SYSCTL_DESCR(3), 3133 MVXPE_SYSCTL_DESCR(4), MVXPE_SYSCTL_DESCR(5), 3134 MVXPE_SYSCTL_DESCR(6), MVXPE_SYSCTL_DESCR(7), 3135 }; 3136 #undef MVXPE_SYSCTL_DESCR 3137 #endif /* SYSCTL_INCLUDE_DESCR */ 3138 int mvxpe_curnum; 3139 3140 rxarg->sc = txarg->sc = sc; 3141 rxarg->queue = txarg->queue = q; 3142 rxarg->rxtx = MVXPE_SYSCTL_RX; 3143 txarg->rxtx = MVXPE_SYSCTL_TX; 3144 3145 /* hw.mvxpe.mvxpe[unit].rx.[queue] */ 3146 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3147 0, CTLTYPE_NODE, 3148 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]), 3149 NULL, 0, NULL, 0, 3150 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3151 CTL_CREATE, CTL_EOL) != 0) { 3152 aprint_normal_dev(sc->sc_dev, 3153 "couldn't create sysctl node\n"); 3154 break; 3155 } 3156 mvxpe_curnum = node->sysctl_num; 3157 3158 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */ 3159 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3160 CTLFLAG_READWRITE, CTLTYPE_INT, "length", 3161 SYSCTL_DESCR("maximum length of the queue"), 3162 sysctl_set_queue_length, 0, (void *)rxarg, 0, 3163 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3164 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3165 aprint_normal_dev(sc->sc_dev, 3166 "couldn't create sysctl node\n"); 3167 break; 3168 } 3169 3170 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */ 3171 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3172 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us", 3173 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"), 3174 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0, 3175 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3176 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3177 aprint_normal_dev(sc->sc_dev, 3178 "couldn't create sysctl node\n"); 3179 break; 3180 } 3181 3182 /* hw.mvxpe.mvxpe[unit].tx.[queue] */ 3183 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3184 0, CTLTYPE_NODE, 3185 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]), 3186 NULL, 0, NULL, 0, 3187 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum, 3188 CTL_CREATE, CTL_EOL) != 0) { 3189 aprint_normal_dev(sc->sc_dev, 3190 "couldn't create sysctl node\n"); 3191 break; 3192 } 3193 mvxpe_curnum = node->sysctl_num; 3194 3195 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */ 3196 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3197 CTLFLAG_READWRITE, CTLTYPE_INT, "length", 3198 SYSCTL_DESCR("maximum length of the queue"), 3199 sysctl_set_queue_length, 0, (void *)txarg, 0, 3200 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum, 3201 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3202 aprint_normal_dev(sc->sc_dev, 3203 "couldn't create sysctl node\n"); 3204 break; 3205 } 3206 } 3207 3208 /* hw.mvxpe.mvxpe[unit].clear_mib */ 3209 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3210 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib", 3211 SYSCTL_DESCR("mvxpe device driver debug control"), 3212 sysctl_clear_mib, 0, (void *)sc, 0, 3213 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, 3214 CTL_EOL) != 0) { 3215 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3216 return; 3217 } 3218 3219 } 3220 3221 /* 3222 * MIB 3223 */ 3224 STATIC void 3225 mvxpe_clear_mib(struct mvxpe_softc *sc) 3226 { 3227 int i; 3228 3229 KASSERT_SC_MTX(sc); 3230 3231 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3232 if (mvxpe_mib_list[i].reg64) 3233 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4)); 3234 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3235 sc->sc_sysctl_mib[i].counter = 0; 3236 } 3237 } 3238 3239 STATIC void 3240 mvxpe_update_mib(struct mvxpe_softc *sc) 3241 { 3242 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3243 int i; 3244 3245 KASSERT_SC_MTX(sc); 3246 3247 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3248 uint32_t val_hi; 3249 uint32_t val_lo; 3250 uint64_t val; 3251 3252 if (mvxpe_mib_list[i].reg64) { 3253 /* XXX: implement bus_space_read_8() */ 3254 val_lo = MVXPE_READ_MIB(sc, 3255 (mvxpe_mib_list[i].regnum + 4)); 3256 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3257 } 3258 else { 3259 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3260 val_hi = 0; 3261 } 3262 3263 if ((val_lo | val_hi) == 0) 3264 continue; 3265 3266 val = ((uint64_t)val_hi << 32) | (uint64_t)val_lo; 3267 sc->sc_sysctl_mib[i].counter += val; 3268 3269 switch (mvxpe_mib_list[i].ext) { 3270 case MVXPE_MIBEXT_IF_OERRORS: 3271 ifp->if_oerrors += val; 3272 break; 3273 case MVXPE_MIBEXT_IF_IERRORS: 3274 ifp->if_ierrors += val; 3275 break; 3276 case MVXPE_MIBEXT_IF_COLLISIONS: 3277 ifp->if_collisions += val; 3278 break; 3279 default: 3280 break; 3281 } 3282 3283 } 3284 } 3285 3286 /* 3287 * for Debug 3288 */ 3289 STATIC void 3290 mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx) 3291 { 3292 #define DESC_PRINT(X) \ 3293 if (X) \ 3294 printf("txdesc[%d]." #X "=%#x\n", idx, X); 3295 3296 DESC_PRINT(desc->command); 3297 DESC_PRINT(desc->l4ichk); 3298 DESC_PRINT(desc->bytecnt); 3299 DESC_PRINT(desc->bufptr); 3300 DESC_PRINT(desc->flags); 3301 #undef DESC_PRINT 3302 } 3303 3304 STATIC void 3305 mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx) 3306 { 3307 #define DESC_PRINT(X) \ 3308 if (X) \ 3309 printf("rxdesc[%d]." #X "=%#x\n", idx, X); 3310 3311 DESC_PRINT(desc->status); 3312 DESC_PRINT(desc->bytecnt); 3313 DESC_PRINT(desc->bufptr); 3314 DESC_PRINT(desc->l4chk); 3315 #undef DESC_PRINT 3316 } 3317