1 /* $NetBSD: if_scx.c,v 1.25 2021/08/02 12:56:22 andvar Exp $ */ 2 3 /*- 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Tohru Nishimura. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 33 /* 34 * Socionext SC2A11 SynQuacer NetSec GbE driver 35 * 36 * Multiple Tx and Rx queues exist inside and dedicated descriptor 37 * fields specifies which queue is to use. Three internal micro-processors 38 * to handle incoming frames, outgoing frames and packet data crypto 39 * processing. uP programs are stored in an external flash memory and 40 * have to be loaded by device driver. 41 * NetSec uses Synopsys DesignWare Core EMAC. DWC implementation 42 * register (0x20) is known to have 0x10.36 and feature register (0x1058) 43 * to report XX.XX. 44 */ 45 46 #define NOT_MP_SAFE 0 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: if_scx.c,v 1.25 2021/08/02 12:56:22 andvar Exp $"); 50 51 #include <sys/param.h> 52 #include <sys/bus.h> 53 #include <sys/intr.h> 54 #include <sys/device.h> 55 #include <sys/callout.h> 56 #include <sys/mbuf.h> 57 #include <sys/malloc.h> 58 #include <sys/errno.h> 59 #include <sys/rndsource.h> 60 #include <sys/kernel.h> 61 #include <sys/systm.h> 62 63 #include <net/if.h> 64 #include <net/if_media.h> 65 #include <net/if_dl.h> 66 #include <net/if_ether.h> 67 #include <dev/mii/mii.h> 68 #include <dev/mii/miivar.h> 69 #include <net/bpf.h> 70 71 #include <dev/fdt/fdtvar.h> 72 #include <dev/acpi/acpireg.h> 73 #include <dev/acpi/acpivar.h> 74 #include <dev/acpi/acpi_intr.h> 75 76 /* Socionext SC2A11 descriptor format */ 77 struct tdes { 78 uint32_t t0, t1, t2, t3; 79 }; 80 81 struct rdes { 82 uint32_t r0, r1, r2, r3; 83 }; 84 85 #define T0_OWN (1U<<31) /* desc is ready to Tx */ 86 #define T0_EOD (1U<<30) /* end of descriptor array */ 87 #define T0_DRID (24) /* 29:24 D-RID */ 88 #define T0_PT (1U<<21) /* 23:21 PT */ 89 #define T0_TRID (16) /* 20:16 T-RID */ 90 #define T0_FS (1U<<9) /* first segment of frame */ 91 #define T0_LS (1U<<8) /* last segment of frame */ 92 #define T0_CSUM (1U<<7) /* enable check sum offload */ 93 #define T0_SGOL (1U<<6) /* enable TCP segment offload */ 94 #define T0_TRS (1U<<4) /* 5:4 TRS */ 95 #define T0_IOC (0) /* XXX TBD interrupt when completed */ 96 /* T1 segment address 63:32 */ 97 /* T2 segment address 31:0 */ 98 /* T3 31:16 TCP segment length, 15:0 segment length to transmit */ 99 100 #define R0_OWN (1U<<31) /* desc is empty */ 101 #define R0_EOD (1U<<30) /* end of descriptor array */ 102 #define R0_SRID (24) /* 29:24 S-RID */ 103 #define R0_FR (1U<<23) /* FR */ 104 #define R0_ER (1U<<21) /* Rx error indication */ 105 #define R0_ERR (3U<<16) /* 18:16 receive error code */ 106 #define R0_TDRID (14) /* 15:14 TD-RID */ 107 #define R0_FS (1U<<9) /* first segment of frame */ 108 #define R0_LS (1U<<8) /* last segment of frame */ 109 #define R0_CSUM (3U<<6) /* 7:6 checksum status */ 110 #define R0_CERR (2U<<6) /* 0 (undone), 1 (found ok), 2 (bad) */ 111 /* R1 frame address 63:32 */ 112 /* R2 frame address 31:0 */ 113 /* R3 31:16 received frame length, 15:0 buffer length to receive */ 114 115 /* 116 * SC2A11 NetSec registers. 0x100 - 1204 117 */ 118 #define SWRESET 0x104 119 #define COMINIT 0x120 120 #define xINTSR 0x200 /* aggregated interrupt status report */ 121 #define IRQ_RX (1U<<1) /* top level Rx interrupt */ 122 #define IRQ_TX (1U<<0) /* top level Rx interrupt */ 123 #define xINTAEN 0x204 /* INT_A enable */ 124 #define xINTA_SET 0x234 /* bit to set */ 125 #define xINTA_CLR 0x238 /* bit to clr */ 126 #define xINTBEN 0x23c /* INT_B enable */ 127 #define xINTB_SET 0x240 /* bit to set */ 128 #define xINTB_CLR 0x244 /* bit to clr */ 129 /* 0x00c - 048 */ /* pkt,tls,s0,s1 SR/IE/SET/CLR */ 130 #define TXISR 0x400 131 #define TXIEN 0x404 132 #define TXI_SET 0x428 133 #define TXI_CLR 0x42c 134 #define TXI_NTOWNR (1U<<17) 135 #define TXI_TR_ERR (1U<<16) 136 #define TXI_TXDONE (1U<<15) 137 #define TXI_TMREXP (1U<<14) 138 #define RXISR 0x440 139 #define RXIEN 0x444 140 #define RXI_SET 0x468 141 #define RXI_CLR 0x46c 142 #define RXI_RC_ERR (1U<<16) 143 #define RXI_PKTCNT (1U<<15) 144 #define RXI_TMREXP (1U<<14) 145 #define TXTIMER 0x41c 146 #define RXTIMER 0x45c 147 #define TXCOUNT 0x410 148 #define RXCOUNT 0x454 149 #define H2MENG 0x210 /* DMAC host2media ucode port */ 150 #define M2HENG 0x21c /* DMAC media2host ucode port */ 151 #define PKTENG 0x0d0 /* packet engine ucode port */ 152 #define CLKEN 0x100 /* clock distribution enable */ 153 #define CLK_G (1U<<5) 154 #define CLK_ALL 0x13 /* 0x24 ??? */ 155 #define MACADRH 0x10c /* ??? */ 156 #define MACADRL 0x110 /* ??? */ 157 #define MCVER 0x22c /* micro controller version */ 158 #define HWVER 0x230 /* hardware version */ 159 160 /* 0x800 */ /* dec Tx SR/EN/SET/CLR */ 161 /* 0x840 */ /* enc Rx SR/EN/SET/CLR */ 162 /* 0x880 */ /* enc TLS Tx SR/IE/SET/CLR */ 163 /* 0x8c0 */ /* dec TLS Tx SR/IE/SET/CLR */ 164 /* 0x900 */ /* enc TLS Rx SR/IE/SET/CLR */ 165 /* 0x940 */ /* dec TLS Rx SR/IE/SET/CLR */ 166 /* 0x980 */ /* enc RAW Tx SR/IE/SET/CLR */ 167 /* 0x9c0 */ /* dec RAW Tx SR/IE/SET/CLR */ 168 /* 0xA00 */ /* enc RAW Rx SR/IE/SET/CLR */ 169 /* 0xA40 */ /* dec RAW Rx SR/IE/SET/CLR */ 170 171 /* indirect GMAC registers. accessed thru MACCMD/MACDATA operation */ 172 #define MACCMD 0x11c4 /* gmac operation */ 173 #define CMD_IOWR (1U<<28) /* write op */ 174 #define CMD_BUSY (1U<<31) /* busy bit */ 175 #define MACSTAT 0x1024 /* gmac status */ 176 #define MACDATA 0x11c0 /* gmac rd/wr data */ 177 #define MACINTE 0x1028 /* interrupt enable */ 178 #define DESC_INIT 0x11fc /* desc engine init */ 179 #define DESC_SRST 0x1204 /* desc engine sw reset */ 180 181 /* 182 * GMAC registers. not memory mapped, but handled by indirect access. 183 * Mostly identical to Synopsys DesignWare Core Ethernet. 184 */ 185 #define GMACMCR 0x0000 /* MAC configuration */ 186 #define MCR_IBN (1U<<30) /* ??? */ 187 #define MCR_CST (1U<<25) /* strip CRC */ 188 #define MCR_TC (1U<<24) /* keep RGMII PHY notified */ 189 #define MCR_JE (1U<<20) /* ignore oversized >9018 condition */ 190 #define MCR_IFG (7U<<17) /* 19:17 IFG value 0~7 */ 191 #define MCR_DRCS (1U<<16) /* ignore (G)MII HDX Tx error */ 192 #define MCR_USEMII (1U<<15) /* 1: RMII/MII, 0: RGMII (_PS) */ 193 #define MCR_SPD100 (1U<<14) /* force speed 100 (_FES) */ 194 #define MCR_DO (1U<<13) /* */ 195 #define MCR_LOOP (1U<<12) /* */ 196 #define MCR_USEFDX (1U<<11) /* force full duplex */ 197 #define MCR_IPCEN (1U<<10) /* handle checksum */ 198 #define MCR_ACS (1U<<7) /* auto pad strip CRC */ 199 #define MCR_TE (1U<<3) /* run Tx MAC engine, 0 to stop */ 200 #define MCR_RE (1U<<2) /* run Rx MAC engine, 0 to stop */ 201 #define MCR_PREA (3U) /* 1:0 preamble len. 0~2 */ 202 #define _MCR_FDX 0x0000280c /* XXX TBD */ 203 #define _MCR_HDX 0x0001a00c /* XXX TBD */ 204 #define GMACAFR 0x0004 /* frame DA/SA address filter */ 205 #define AFR_RA (1U<<31) /* accept all irrespective of filt. */ 206 #define AFR_HPF (1U<<10) /* hash+perfect filter, or hash only */ 207 #define AFR_SAF (1U<<9) /* source address filter */ 208 #define AFR_SAIF (1U<<8) /* SA inverse filtering */ 209 #define AFR_PCF (2U<<6) /* */ 210 #define AFR_DBF (1U<<5) /* reject broadcast frame */ 211 #define AFR_PM (1U<<4) /* accept all multicast frame */ 212 #define AFR_DAIF (1U<<3) /* DA inverse filtering */ 213 #define AFR_MHTE (1U<<2) /* use multicast hash table */ 214 #define AFR_UHTE (1U<<1) /* use hash table for unicast */ 215 #define AFR_PR (1U<<0) /* run promisc mode */ 216 #define GMACGAR 0x0010 /* MDIO operation */ 217 #define GAR_PHY (11) /* mii phy 15:11 */ 218 #define GAR_REG (6) /* mii reg 10:6 */ 219 #define GAR_CTL (2) /* control 5:2 */ 220 #define GAR_IOWR (1U<<1) /* MDIO write op */ 221 #define GAR_BUSY (1U) /* busy bit */ 222 #define GMACGDR 0x0014 /* MDIO rd/wr data */ 223 #define GMACFCR 0x0018 /* 802.3x flowcontrol */ 224 /* 31:16 pause timer value, 5:4 pause timer threthold */ 225 #define FCR_RFE (1U<<2) /* accept PAUSE to throttle Tx */ 226 #define FCR_TFE (1U<<1) /* generate PAUSE to moderate Rx lvl */ 227 #define GMACVTAG 0x001c /* VLAN tag control */ 228 #define GMACIMPL 0x0020 /* implementation number XX.YY */ 229 #define GMACLPIS 0x0030 /* AXI LPI control */ 230 #define GMACLPIC 0x0034 /* AXI LPI control */ 231 #define GMACISR 0x0038 /* interrupt status, clear when read */ 232 #define GMACIMR 0x003c /* interrupt enable */ 233 #define ISR_TS (1U<<9) /* time stamp operation detected */ 234 #define ISR_CO (1U<<7) /* Rx checksum offload completed */ 235 #define ISR_TX (1U<<6) /* Tx completed */ 236 #define ISR_RX (1U<<5) /* Rx completed */ 237 #define ISR_ANY (1U<<4) /* any of above 5-7 report */ 238 #define ISR_LC (1U<<0) /* link status change detected */ 239 #define GMACMAH0 0x0040 /* my own MAC address 47:32 */ 240 #define GMACMAL0 0x0044 /* my own MAC address 31:0 */ 241 #define GMACMAH(i) ((i)*8+0x40) /* supplemental MAC addr 1-15 */ 242 #define GMACMAL(i) ((i)*8+0x44) /* 31:0 MAC address low part */ 243 /* MAH bit-31: slot in use, 30: SA to match, 29:24 byte-wise don'care */ 244 #define GMACAMAH(i) ((i)*8+0x800) /* supplemental MAC addr 16-31 */ 245 #define GMACAMAL(i) ((i)*8+0x804) /* 31: MAC address low part */ 246 /* MAH bit-31: slot in use, no other bit is effective */ 247 #define GMACMHTH 0x0008 /* 64bit multicast hash table 63:32 */ 248 #define GMACMHTL 0x000c /* 64bit multicast hash table 31:0 */ 249 #define GMACMHT(i) ((i)*4+0x500) /* 256-bit alternative mcast hash 0-7 */ 250 #define GMACVHT 0x0588 /* 16-bit VLAN tag hash */ 251 #define GMACMIISR 0x00d8 /* resolved xMII link status */ 252 /* 3: link up detected, 2:1 resolved speed (0/1/2), 1: fdx detected */ 253 254 /* 0x0700 - 0734 ??? */ 255 256 #define GMACBMR 0x1000 /* DMA bus mode control */ 257 /* 24 4PBL 8??? 258 * 23 USP 259 * 22:17 RPBL 260 * 16 fixed burst, or undefined b. 261 * 15:14 priority between Rx and Tx 262 * 3 rxtx ratio 41 263 * 2 rxtx ratio 31 264 * 1 rxtx ratio 21 265 * 0 rxtx ratio 11 266 * 13:8 PBL packet burst len 267 * 7 alternative des8 268 * 0 reset op. (SC) 269 */ 270 #define _BMR 0x00412080 /* XXX TBD */ 271 #define _BMR0 0x00020181 /* XXX TBD */ 272 #define BMR_RST (1) /* reset op. self clear when done */ 273 #define GMACTPD 0x1004 /* write any to resume tdes */ 274 #define GMACRPD 0x1008 /* write any to resume rdes */ 275 #define GMACRDLA 0x100c /* rdes base address 32bit paddr */ 276 #define GMACTDLA 0x1010 /* tdes base address 32bit paddr */ 277 #define _RDLA 0x18000 /* XXX TBD system SRAM ? */ 278 #define _TDLA 0x1c000 /* XXX TBD system SRAM ? */ 279 #define GMACDSR 0x1014 /* DMA status detail report; W1C */ 280 #define GMACOMR 0x1018 /* DMA operation */ 281 #define OMR_TSF (1U<<25) /* 1: Tx store&forword, 0: immed. */ 282 #define OMR_RSF (1U<<21) /* 1: Rx store&forward, 0: immed. */ 283 #define OMR_ST (1U<<13) /* run Tx DMA engine, 0 to stop */ 284 #define OMR_EFC (1U<<8) /* transmit PAUSE to throttle Rx lvl. */ 285 #define OMR_FEF (1U<<7) /* allow to receive error frames */ 286 #define OMR_RS (1U<<1) /* run Rx DMA engine, 0 to stop */ 287 #define GMACIE 0x101c /* interrupt enable */ 288 #define GMACEVCS 0x1020 /* missed frame or ovf detected */ 289 #define GMACRWDT 0x1024 /* receive watchdog timer count */ 290 #define GMACAXIB 0x1028 /* AXI bus mode control */ 291 #define GMACAXIS 0x102c /* AXI status report */ 292 /* 0x1048 - 1054 */ /* descriptor and buffer cur. address */ 293 #define HWFEA 0x1058 /* feature report */ 294 295 #define GMACEVCTL 0x0100 /* event counter control */ 296 #define GMACEVCNT(i) ((i)*4+0x114) /* event counter 0x114 - 0x284 */ 297 298 /* memory mapped CSR register */ 299 #define CSR_READ(sc,off) \ 300 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (off)) 301 #define CSR_WRITE(sc,off,val) \ 302 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (off), (val)) 303 304 /* flash memory access */ 305 #define EE_READ(sc,off) \ 306 bus_space_read_4((sc)->sc_st, (sc)->sc_eesh, (off)) 307 308 /* 309 * flash memory layout 310 * 0x00 - 07 48-bit MAC station address. 4 byte wise in BE order. 311 * 0x08 - 0b H->MAC xfer uengine program start addr 63:32. 312 * 0x0c - 0f H2M program addr 31:0 (these are absolute addr, not relative) 313 * 0x10 - 13 H2M program length in 4 byte count. 314 * 0x14 - 0b M->HOST xfer uengine program start addr 63:32. 315 * 0x18 - 0f M2H program addr 31:0 (absolute, not relative) 316 * 0x1c - 13 M2H program length in 4 byte count. 317 * 0x20 - 23 packet uengine program addr 31:0, (absolute, not relative) 318 * 0x24 - 27 packet program length in 4 byte count. 319 * 320 * above ucode are loaded via mapped reg 0x210, 0x21c and 0x0c0. 321 */ 322 323 /* 324 * all below are software constraction. 325 */ 326 #define MD_NTXSEGS 16 /* fixed */ 327 #define MD_TXQUEUELEN 8 /* tunable */ 328 #define MD_TXQUEUELEN_MASK (MD_TXQUEUELEN - 1) 329 #define MD_TXQUEUE_GC (MD_TXQUEUELEN / 4) 330 #define MD_NTXDESC 128 331 #define MD_NTXDESC_MASK (MD_NTXDESC - 1) 332 #define MD_NEXTTX(x) (((x) + 1) & MD_NTXDESC_MASK) 333 #define MD_NEXTTXS(x) (((x) + 1) & MD_TXQUEUELEN_MASK) 334 335 #define MD_NRXDESC 64 /* tunable */ 336 #define MD_NRXDESC_MASK (MD_NRXDESC - 1) 337 #define MD_NEXTRX(x) (((x) + 1) & MD_NRXDESC_MASK) 338 339 struct control_data { 340 struct tdes cd_txdescs[MD_NTXDESC]; 341 struct rdes cd_rxdescs[MD_NRXDESC]; 342 }; 343 #define SCX_CDOFF(x) offsetof(struct control_data, x) 344 #define SCX_CDTXOFF(x) SCX_CDOFF(cd_txdescs[(x)]) 345 #define SCX_CDRXOFF(x) SCX_CDOFF(cd_rxdescs[(x)]) 346 347 struct scx_txsoft { 348 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 349 bus_dmamap_t txs_dmamap; /* our DMA map */ 350 int txs_firstdesc; /* first descriptor in packet */ 351 int txs_lastdesc; /* last descriptor in packet */ 352 int txs_ndesc; /* # of descriptors used */ 353 }; 354 355 struct scx_rxsoft { 356 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 357 bus_dmamap_t rxs_dmamap; /* our DMA map */ 358 }; 359 360 struct scx_softc { 361 device_t sc_dev; /* generic device information */ 362 bus_space_tag_t sc_st; /* bus space tag */ 363 bus_space_handle_t sc_sh; /* bus space handle */ 364 bus_size_t sc_sz; /* csr map size */ 365 bus_space_handle_t sc_eesh; /* eeprom section handle */ 366 bus_size_t sc_eesz; /* eeprom map size */ 367 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 368 bus_dma_tag_t sc_dmat32; 369 struct ethercom sc_ethercom; /* Ethernet common data */ 370 struct mii_data sc_mii; /* MII */ 371 callout_t sc_callout; /* PHY monitor callout */ 372 bus_dma_segment_t sc_seg; /* descriptor store seg */ 373 int sc_nseg; /* descriptor store nseg */ 374 void *sc_ih; /* interrupt cookie */ 375 int sc_phy_id; /* PHY address */ 376 int sc_flowflags; /* 802.3x PAUSE flow control */ 377 uint32_t sc_mdclk; /* GAR 5:2 clock selection */ 378 uint32_t sc_t0coso; /* T0_CSUM | T0_SGOL to run */ 379 int sc_ucodeloaded; /* ucode for H2M/M2H/PKT */ 380 int sc_100mii; /* 1 for RMII/MII, 0 for RGMII */ 381 int sc_phandle; /* fdt phandle */ 382 uint64_t sc_freq; 383 384 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 385 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 386 387 struct control_data *sc_control_data; 388 #define sc_txdescs sc_control_data->cd_txdescs 389 #define sc_rxdescs sc_control_data->cd_rxdescs 390 391 struct scx_txsoft sc_txsoft[MD_TXQUEUELEN]; 392 struct scx_rxsoft sc_rxsoft[MD_NRXDESC]; 393 int sc_txfree; /* number of free Tx descriptors */ 394 int sc_txnext; /* next ready Tx descriptor */ 395 int sc_txsfree; /* number of free Tx jobs */ 396 int sc_txsnext; /* next ready Tx job */ 397 int sc_txsdirty; /* dirty Tx jobs */ 398 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 399 400 krndsource_t rnd_source; /* random source */ 401 }; 402 403 #define SCX_CDTXADDR(sc, x) ((sc)->sc_cddma + SCX_CDTXOFF((x))) 404 #define SCX_CDRXADDR(sc, x) ((sc)->sc_cddma + SCX_CDRXOFF((x))) 405 406 #define SCX_CDTXSYNC(sc, x, n, ops) \ 407 do { \ 408 int __x, __n; \ 409 \ 410 __x = (x); \ 411 __n = (n); \ 412 \ 413 /* If it will wrap around, sync to the end of the ring. */ \ 414 if ((__x + __n) > MD_NTXDESC) { \ 415 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 416 SCX_CDTXOFF(__x), sizeof(struct tdes) * \ 417 (MD_NTXDESC - __x), (ops)); \ 418 __n -= (MD_NTXDESC - __x); \ 419 __x = 0; \ 420 } \ 421 \ 422 /* Now sync whatever is left. */ \ 423 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 424 SCX_CDTXOFF(__x), sizeof(struct tdes) * __n, (ops)); \ 425 } while (/*CONSTCOND*/0) 426 427 #define SCX_CDRXSYNC(sc, x, ops) \ 428 do { \ 429 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 430 SCX_CDRXOFF((x)), sizeof(struct rdes), (ops)); \ 431 } while (/*CONSTCOND*/0) 432 433 #define SCX_INIT_RXDESC(sc, x) \ 434 do { \ 435 struct scx_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 436 struct rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 437 struct mbuf *__m = __rxs->rxs_mbuf; \ 438 bus_addr_t __paddr =__rxs->rxs_dmamap->dm_segs[0].ds_addr; \ 439 __m->m_data = __m->m_ext.ext_buf; \ 440 __rxd->r3 = __rxs->rxs_dmamap->dm_segs[0].ds_len; \ 441 __rxd->r2 = htole32(BUS_ADDR_LO32(__paddr)); \ 442 __rxd->r1 = htole32(BUS_ADDR_HI32(__paddr)); \ 443 __rxd->r0 = R0_OWN | R0_FS | R0_LS; \ 444 if ((x) == MD_NRXDESC - 1) __rxd->r0 |= R0_EOD; \ 445 } while (/*CONSTCOND*/0) 446 447 static int scx_fdt_match(device_t, cfdata_t, void *); 448 static void scx_fdt_attach(device_t, device_t, void *); 449 static int scx_acpi_match(device_t, cfdata_t, void *); 450 static void scx_acpi_attach(device_t, device_t, void *); 451 452 const CFATTACH_DECL_NEW(scx_fdt, sizeof(struct scx_softc), 453 scx_fdt_match, scx_fdt_attach, NULL, NULL); 454 455 const CFATTACH_DECL_NEW(scx_acpi, sizeof(struct scx_softc), 456 scx_acpi_match, scx_acpi_attach, NULL, NULL); 457 458 static void scx_attach_i(struct scx_softc *); 459 static void scx_reset(struct scx_softc *); 460 static int scx_init(struct ifnet *); 461 static void scx_stop(struct ifnet *, int); 462 static int scx_ioctl(struct ifnet *, u_long, void *); 463 static void scx_set_rcvfilt(struct scx_softc *); 464 static void scx_start(struct ifnet *); 465 static void scx_watchdog(struct ifnet *); 466 static int scx_intr(void *); 467 static void txreap(struct scx_softc *); 468 static void rxintr(struct scx_softc *); 469 static int add_rxbuf(struct scx_softc *, int); 470 static void rxdrain(struct scx_softc *sc); 471 static void mii_statchg(struct ifnet *); 472 static void scx_ifmedia_sts(struct ifnet *, struct ifmediareq *); 473 static int mii_readreg(device_t, int, int, uint16_t *); 474 static int mii_writereg(device_t, int, int, uint16_t); 475 static void phy_tick(void *); 476 477 static void loaducode(struct scx_softc *); 478 static void injectucode(struct scx_softc *, int, bus_addr_t, bus_size_t); 479 480 static int get_mdioclk(uint32_t); 481 482 #define WAIT_FOR_SET(sc, reg, set, fail) \ 483 wait_for_bits(sc, reg, set, ~0, fail) 484 #define WAIT_FOR_CLR(sc, reg, clr, fail) \ 485 wait_for_bits(sc, reg, 0, clr, fail) 486 487 static int 488 wait_for_bits(struct scx_softc *sc, int reg, 489 uint32_t set, uint32_t clr, uint32_t fail) 490 { 491 uint32_t val; 492 int ntries; 493 494 for (ntries = 0; ntries < 1000; ntries++) { 495 val = CSR_READ(sc, reg); 496 if ((val & set) || !(val & clr)) 497 return 0; 498 if (val & fail) 499 return 1; 500 DELAY(1); 501 } 502 return 1; 503 } 504 505 /* GMAC register indirect access */ 506 static int 507 mac_read(struct scx_softc *sc, int reg) 508 { 509 510 CSR_WRITE(sc, MACCMD, reg); 511 (void)WAIT_FOR_CLR(sc, MACCMD, CMD_BUSY, 0); 512 return CSR_READ(sc, MACDATA); 513 } 514 515 static void 516 mac_write(struct scx_softc *sc, int reg, int val) 517 { 518 519 CSR_WRITE(sc, MACDATA, val); 520 CSR_WRITE(sc, MACCMD, reg | CMD_IOWR); 521 (void)WAIT_FOR_CLR(sc, MACCMD, CMD_BUSY, 0); 522 } 523 524 static const struct device_compatible_entry compat_data[] = { 525 { .compat = "socionext,synquacer-netsec" }, 526 DEVICE_COMPAT_EOL 527 }; 528 529 static int 530 scx_fdt_match(device_t parent, cfdata_t cf, void *aux) 531 { 532 struct fdt_attach_args * const faa = aux; 533 534 return of_compatible_match(faa->faa_phandle, compat_data); 535 } 536 537 static void 538 scx_fdt_attach(device_t parent, device_t self, void *aux) 539 { 540 struct scx_softc * const sc = device_private(self); 541 struct fdt_attach_args * const faa = aux; 542 const int phandle = faa->faa_phandle; 543 bus_space_tag_t bst = faa->faa_bst; 544 bus_space_handle_t bsh; 545 bus_space_handle_t eebsh; 546 bus_addr_t addr[2]; 547 bus_size_t size[2]; 548 char intrstr[128]; 549 const char *phy_mode; 550 551 if (fdtbus_get_reg(phandle, 0, addr+0, size+0) != 0 552 || bus_space_map(faa->faa_bst, addr[0], size[0], 0, &bsh) != 0) { 553 aprint_error(": unable to map device csr\n"); 554 return; 555 } 556 if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) { 557 aprint_error(": failed to decode interrupt\n"); 558 goto fail; 559 } 560 sc->sc_ih = fdtbus_intr_establish(phandle, 0, IPL_NET, 561 NOT_MP_SAFE, scx_intr, sc); 562 if (sc->sc_ih == NULL) { 563 aprint_error_dev(self, "couldn't establish interrupt\n"); 564 goto fail; 565 } 566 if (fdtbus_get_reg(phandle, 1, addr+1, size+1) != 0 567 || bus_space_map(faa->faa_bst, addr[1], size[1], 0, &eebsh) != 0) { 568 aprint_error(": unable to map device eeprom\n"); 569 goto fail; 570 } 571 572 aprint_naive("\n"); 573 /* aprint_normal(": Gigabit Ethernet Controller\n"); */ 574 aprint_normal_dev(self, "interrupt on %s\n", intrstr); 575 576 sc->sc_dev = self; 577 sc->sc_st = bst; 578 sc->sc_sh = bsh; 579 sc->sc_sz = size[0]; 580 sc->sc_eesh = eebsh; 581 sc->sc_eesz = size[1]; 582 sc->sc_dmat = faa->faa_dmat; 583 sc->sc_dmat32 = faa->faa_dmat; /* XXX */ 584 sc->sc_phandle = phandle; 585 586 phy_mode = fdtbus_get_string(phandle, "phy-mode"); 587 if (phy_mode == NULL) 588 aprint_error(": missing 'phy-mode' property\n"); 589 sc->sc_100mii = (phy_mode && strcmp(phy_mode, "rgmii") != 0); 590 591 scx_attach_i(sc); 592 return; 593 fail: 594 if (sc->sc_eesz) 595 bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz); 596 if (sc->sc_sz) 597 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 598 return; 599 } 600 601 static int 602 scx_acpi_match(device_t parent, cfdata_t cf, void *aux) 603 { 604 static const char * compatible[] = { 605 "SCX0001", 606 NULL 607 }; 608 struct acpi_attach_args *aa = aux; 609 610 if (aa->aa_node->ad_type != ACPI_TYPE_DEVICE) 611 return 0; 612 return acpi_match_hid(aa->aa_node->ad_devinfo, compatible); 613 } 614 615 static void 616 scx_acpi_attach(device_t parent, device_t self, void *aux) 617 { 618 struct scx_softc * const sc = device_private(self); 619 struct acpi_attach_args * const aa = aux; 620 ACPI_HANDLE handle = aa->aa_node->ad_handle; 621 bus_space_tag_t bst = aa->aa_memt; 622 bus_space_handle_t bsh, eebsh; 623 struct acpi_resources res; 624 struct acpi_mem *mem; 625 struct acpi_irq *irq; 626 char *phy_mode; 627 ACPI_INTEGER acpi_phy, acpi_freq; 628 ACPI_STATUS rv; 629 630 rv = acpi_resource_parse(self, handle, "_CRS", 631 &res, &acpi_resource_parse_ops_default); 632 if (ACPI_FAILURE(rv)) 633 return; 634 mem = acpi_res_mem(&res, 0); 635 irq = acpi_res_irq(&res, 0); 636 if (mem == NULL || irq == NULL || mem->ar_length == 0) { 637 aprint_error(": incomplete csr resources\n"); 638 return; 639 } 640 if (bus_space_map(bst, mem->ar_base, mem->ar_length, 0, &bsh) != 0) { 641 aprint_error(": couldn't map registers\n"); 642 return; 643 } 644 sc->sc_sz = mem->ar_length; 645 sc->sc_ih = acpi_intr_establish(self, (uint64_t)handle, IPL_NET, 646 NOT_MP_SAFE, scx_intr, sc, device_xname(self)); 647 if (sc->sc_ih == NULL) { 648 aprint_error_dev(self, "couldn't establish interrupt\n"); 649 goto fail; 650 } 651 mem = acpi_res_mem(&res, 1); /* EEPROM for MAC address and ucode */ 652 if (mem == NULL || mem->ar_length == 0) { 653 aprint_error(": incomplete eeprom resources\n"); 654 goto fail; 655 } 656 if (bus_space_map(bst, mem->ar_base, mem->ar_length, 0, &eebsh) != 0) { 657 aprint_error(": couldn't map registers\n"); 658 goto fail; 659 } 660 sc->sc_eesz = mem->ar_length; 661 662 rv = acpi_dsd_string(handle, "phy-mode", &phy_mode); 663 if (ACPI_FAILURE(rv)) { 664 aprint_error(": missing 'phy-mode' property\n"); 665 phy_mode = NULL; 666 } 667 rv = acpi_dsd_integer(handle, "phy-channel", &acpi_phy); 668 if (ACPI_FAILURE(rv)) 669 acpi_phy = 31; 670 rv = acpi_dsd_integer(handle, "socionext,phy-clock-frequency", 671 &acpi_freq); 672 if (ACPI_FAILURE(rv)) 673 acpi_freq = 999; 674 675 aprint_naive("\n"); 676 /* aprint_normal(": Gigabit Ethernet Controller\n"); */ 677 678 sc->sc_dev = self; 679 sc->sc_st = bst; 680 sc->sc_sh = bsh; 681 sc->sc_eesh = eebsh; 682 sc->sc_dmat = aa->aa_dmat64; 683 sc->sc_dmat32 = aa->aa_dmat; /* descriptor needs dma32 */ 684 685 aprint_normal_dev(self, 686 "phy mode %s, phy id %d, freq %ld\n", phy_mode, (int)acpi_phy, acpi_freq); 687 sc->sc_100mii = (phy_mode && strcmp(phy_mode, "rgmii") != 0); 688 sc->sc_phy_id = (int)acpi_phy; 689 sc->sc_freq = acpi_freq; 690 aprint_normal_dev(self, 691 "GMACGAR %08x\n", mac_read(sc, GMACGAR)); 692 693 scx_attach_i(sc); 694 695 acpi_resource_cleanup(&res); 696 return; 697 fail: 698 if (sc->sc_eesz > 0) 699 bus_space_unmap(sc->sc_st, sc->sc_eesh, sc->sc_eesz); 700 if (sc->sc_sz > 0) 701 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 702 acpi_resource_cleanup(&res); 703 return; 704 } 705 706 static void 707 scx_attach_i(struct scx_softc *sc) 708 { 709 struct ifnet * const ifp = &sc->sc_ethercom.ec_if; 710 struct mii_data * const mii = &sc->sc_mii; 711 struct ifmedia * const ifm = &mii->mii_media; 712 uint32_t hwver, dwimp, dwfea; 713 uint8_t enaddr[ETHER_ADDR_LEN]; 714 bus_dma_segment_t seg; 715 uint32_t csr; 716 int i, nseg, error = 0; 717 718 hwver = CSR_READ(sc, HWVER); /* Socionext version */ 719 dwimp = mac_read(sc, GMACIMPL); /* DW EMAC XX.YY */ 720 dwfea = mac_read(sc, HWFEA); /* DW feature */ 721 aprint_normal_dev(sc->sc_dev, 722 "Socionext NetSec GbE %d.%d (impl 0x%x, feature 0x%x)\n", 723 hwver >> 16, hwver & 0xffff, 724 dwimp, dwfea); 725 726 /* fetch MAC address in flash. stored in big endian order */ 727 csr = EE_READ(sc, 0x00); 728 enaddr[0] = csr >> 24; 729 enaddr[1] = csr >> 16; 730 enaddr[2] = csr >> 8; 731 enaddr[3] = csr; 732 csr = bus_space_read_4(sc->sc_st, sc->sc_eesh, 4); 733 csr = EE_READ(sc, 0x04); 734 enaddr[4] = csr >> 24; 735 enaddr[5] = csr >> 16; 736 aprint_normal_dev(sc->sc_dev, 737 "Ethernet address %s\n", ether_sprintf(enaddr)); 738 739 sc->sc_mdclk = get_mdioclk(sc->sc_freq); /* 5:2 clk control */ 740 741 if (sc->sc_ucodeloaded == 0) 742 loaducode(sc); 743 744 mii->mii_ifp = ifp; 745 mii->mii_readreg = mii_readreg; 746 mii->mii_writereg = mii_writereg; 747 mii->mii_statchg = mii_statchg; 748 749 sc->sc_ethercom.ec_mii = mii; 750 ifmedia_init(ifm, 0, ether_mediachange, scx_ifmedia_sts); 751 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 752 MII_OFFSET_ANY, MIIF_DOPAUSE); 753 if (LIST_FIRST(&mii->mii_phys) == NULL) { 754 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); 755 ifmedia_set(ifm, IFM_ETHER | IFM_NONE); 756 } else 757 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); 758 ifm->ifm_media = ifm->ifm_cur->ifm_media; /* as if user has requested */ 759 760 /* 761 * Allocate the control data structures, and create and load the 762 * DMA map for it. 763 */ 764 error = bus_dmamem_alloc(sc->sc_dmat32, 765 sizeof(struct control_data), PAGE_SIZE, 0, &seg, 1, &nseg, 0); 766 if (error != 0) { 767 aprint_error_dev(sc->sc_dev, 768 "unable to allocate control data, error = %d\n", error); 769 goto fail_0; 770 } 771 error = bus_dmamem_map(sc->sc_dmat32, &seg, nseg, 772 sizeof(struct control_data), (void **)&sc->sc_control_data, 773 BUS_DMA_COHERENT); 774 if (error != 0) { 775 aprint_error_dev(sc->sc_dev, 776 "unable to map control data, error = %d\n", error); 777 goto fail_1; 778 } 779 error = bus_dmamap_create(sc->sc_dmat32, 780 sizeof(struct control_data), 1, 781 sizeof(struct control_data), 0, 0, &sc->sc_cddmamap); 782 if (error != 0) { 783 aprint_error_dev(sc->sc_dev, 784 "unable to create control data DMA map, " 785 "error = %d\n", error); 786 goto fail_2; 787 } 788 error = bus_dmamap_load(sc->sc_dmat32, sc->sc_cddmamap, 789 sc->sc_control_data, sizeof(struct control_data), NULL, 0); 790 if (error != 0) { 791 aprint_error_dev(sc->sc_dev, 792 "unable to load control data DMA map, error = %d\n", 793 error); 794 goto fail_3; 795 } 796 for (i = 0; i < MD_TXQUEUELEN; i++) { 797 if ((error = bus_dmamap_create(sc->sc_dmat32, MCLBYTES, 798 MD_NTXSEGS, MCLBYTES, 0, 0, 799 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 800 aprint_error_dev(sc->sc_dev, 801 "unable to create tx DMA map %d, error = %d\n", 802 i, error); 803 goto fail_4; 804 } 805 } 806 for (i = 0; i < MD_NRXDESC; i++) { 807 if ((error = bus_dmamap_create(sc->sc_dmat32, MCLBYTES, 808 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 809 aprint_error_dev(sc->sc_dev, 810 "unable to create rx DMA map %d, error = %d\n", 811 i, error); 812 goto fail_5; 813 } 814 sc->sc_rxsoft[i].rxs_mbuf = NULL; 815 } 816 sc->sc_seg = seg; 817 sc->sc_nseg = nseg; 818 aprint_normal_dev(sc->sc_dev, "descriptor ds_addr %lx, ds_len %lx, nseg %d\n", seg.ds_addr, seg.ds_len, nseg); 819 820 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 821 ifp->if_softc = sc; 822 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 823 ifp->if_ioctl = scx_ioctl; 824 ifp->if_start = scx_start; 825 ifp->if_watchdog = scx_watchdog; 826 ifp->if_init = scx_init; 827 ifp->if_stop = scx_stop; 828 IFQ_SET_READY(&ifp->if_snd); 829 830 sc->sc_flowflags = 0; 831 832 if_attach(ifp); 833 if_deferred_start_init(ifp, NULL); 834 ether_ifattach(ifp, enaddr); 835 836 callout_init(&sc->sc_callout, 0); 837 callout_setfunc(&sc->sc_callout, phy_tick, sc); 838 839 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 840 RND_TYPE_NET, RND_FLAG_DEFAULT); 841 842 return; 843 844 fail_5: 845 for (i = 0; i < MD_NRXDESC; i++) { 846 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 847 bus_dmamap_destroy(sc->sc_dmat, 848 sc->sc_rxsoft[i].rxs_dmamap); 849 } 850 fail_4: 851 for (i = 0; i < MD_TXQUEUELEN; i++) { 852 if (sc->sc_txsoft[i].txs_dmamap != NULL) 853 bus_dmamap_destroy(sc->sc_dmat, 854 sc->sc_txsoft[i].txs_dmamap); 855 } 856 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 857 fail_3: 858 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 859 fail_2: 860 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 861 sizeof(struct control_data)); 862 fail_1: 863 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 864 fail_0: 865 if (sc->sc_phandle) 866 fdtbus_intr_disestablish(sc->sc_phandle, sc->sc_ih); 867 else 868 acpi_intr_disestablish(sc->sc_ih); 869 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 870 return; 871 } 872 873 static void 874 scx_reset(struct scx_softc *sc) 875 { 876 int loop = 0, busy; 877 878 mac_write(sc, GMACOMR, 0); 879 mac_write(sc, GMACBMR, BMR_RST); 880 do { 881 DELAY(1); 882 busy = mac_read(sc, GMACBMR) & BMR_RST; 883 } while (++loop < 3000 && busy); 884 mac_write(sc, GMACBMR, _BMR); 885 mac_write(sc, GMACAFR, 0); 886 887 CSR_WRITE(sc, CLKEN, CLK_ALL); /* distribute clock sources */ 888 CSR_WRITE(sc, SWRESET, 0); /* reset operation */ 889 CSR_WRITE(sc, SWRESET, 1U<<31); /* manifest run */ 890 CSR_WRITE(sc, COMINIT, 3); /* DB|CLS */ 891 892 mac_write(sc, GMACEVCTL, 1); 893 } 894 895 static int 896 scx_init(struct ifnet *ifp) 897 { 898 struct scx_softc *sc = ifp->if_softc; 899 const uint8_t *ea = CLLADDR(ifp->if_sadl); 900 uint32_t csr; 901 int i, error; 902 903 /* Cancel pending I/O. */ 904 scx_stop(ifp, 0); 905 906 /* Reset the chip to a known state. */ 907 scx_reset(sc); 908 909 /* build sane Tx */ 910 memset(sc->sc_txdescs, 0, sizeof(struct tdes) * MD_NTXDESC); 911 sc->sc_txdescs[MD_NTXDESC - 1].t0 |= T0_EOD; /* tie off the ring */ 912 SCX_CDTXSYNC(sc, 0, MD_NTXDESC, 913 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 914 sc->sc_txfree = MD_NTXDESC; 915 sc->sc_txnext = 0; 916 for (i = 0; i < MD_TXQUEUELEN; i++) 917 sc->sc_txsoft[i].txs_mbuf = NULL; 918 sc->sc_txsfree = MD_TXQUEUELEN; 919 sc->sc_txsnext = 0; 920 sc->sc_txsdirty = 0; 921 922 /* load Rx descriptors with fresh mbuf */ 923 for (i = 0; i < MD_NRXDESC; i++) { 924 if (sc->sc_rxsoft[i].rxs_mbuf == NULL) { 925 if ((error = add_rxbuf(sc, i)) != 0) { 926 aprint_error_dev(sc->sc_dev, 927 "unable to allocate or map rx " 928 "buffer %d, error = %d\n", 929 i, error); 930 rxdrain(sc); 931 goto out; 932 } 933 } 934 else 935 SCX_INIT_RXDESC(sc, i); 936 } 937 sc->sc_rxdescs[MD_NRXDESC - 1].r0 = R0_EOD; 938 sc->sc_rxptr = 0; 939 sc->sc_rxptr = 0; 940 941 /* set my address in perfect match slot 0. little endian order */ 942 csr = (ea[3] << 24) | (ea[2] << 16) | (ea[1] << 8) | ea[0]; 943 mac_write(sc, GMACMAL0, csr); 944 csr = (ea[5] << 8) | ea[4]; 945 mac_write(sc, GMACMAH0, csr); 946 947 /* accept multicast frame or run promisc mode */ 948 scx_set_rcvfilt(sc); 949 950 /* set current media */ 951 if ((error = ether_mediachange(ifp)) != 0) 952 goto out; 953 954 /* XXX 32 bit paddr XXX hand Tx/Rx rings to HW XXX */ 955 mac_write(sc, GMACTDLA, SCX_CDTXADDR(sc, 0)); 956 mac_write(sc, GMACRDLA, SCX_CDRXADDR(sc, 0)); 957 958 /* kick to start GMAC engine */ 959 CSR_WRITE(sc, RXI_CLR, ~0); 960 CSR_WRITE(sc, TXI_CLR, ~0); 961 csr = mac_read(sc, GMACOMR); 962 mac_write(sc, GMACOMR, csr | OMR_RS | OMR_ST); 963 964 ifp->if_flags |= IFF_RUNNING; 965 ifp->if_flags &= ~IFF_OACTIVE; 966 967 /* start one second timer */ 968 callout_schedule(&sc->sc_callout, hz); 969 out: 970 return error; 971 } 972 973 static void 974 scx_stop(struct ifnet *ifp, int disable) 975 { 976 struct scx_softc *sc = ifp->if_softc; 977 978 /* Stop the one second clock. */ 979 callout_stop(&sc->sc_callout); 980 981 /* Down the MII. */ 982 mii_down(&sc->sc_mii); 983 984 /* Mark the interface down and cancel the watchdog timer. */ 985 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 986 ifp->if_timer = 0; 987 } 988 989 static int 990 scx_ioctl(struct ifnet *ifp, u_long cmd, void *data) 991 { 992 struct scx_softc *sc = ifp->if_softc; 993 struct ifreq *ifr = (struct ifreq *)data; 994 struct ifmedia *ifm = &sc->sc_mii.mii_media; 995 int s, error; 996 997 s = splnet(); 998 999 switch (cmd) { 1000 case SIOCSIFMEDIA: 1001 /* Flow control requires full-duplex mode. */ 1002 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1003 (ifr->ifr_media & IFM_FDX) == 0) 1004 ifr->ifr_media &= ~IFM_ETH_FMASK; 1005 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1006 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1007 /* We can do both TXPAUSE and RXPAUSE. */ 1008 ifr->ifr_media |= 1009 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1010 } 1011 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1012 } 1013 error = ifmedia_ioctl(ifp, ifr, ifm, cmd); 1014 break; 1015 default: 1016 error = ether_ioctl(ifp, cmd, data); 1017 if (error != ENETRESET) 1018 break; 1019 error = 0; 1020 if (cmd == SIOCSIFCAP) 1021 error = (*ifp->if_init)(ifp); 1022 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1023 ; 1024 else if (ifp->if_flags & IFF_RUNNING) { 1025 /* 1026 * Multicast list has changed; set the hardware filter 1027 * accordingly. 1028 */ 1029 scx_set_rcvfilt(sc); 1030 } 1031 break; 1032 } 1033 1034 splx(s); 1035 return error; 1036 } 1037 1038 static void 1039 scx_set_rcvfilt(struct scx_softc *sc) 1040 { 1041 struct ethercom * const ec = &sc->sc_ethercom; 1042 struct ifnet * const ifp = &ec->ec_if; 1043 struct ether_multistep step; 1044 struct ether_multi *enm; 1045 uint32_t mchash[2]; /* 2x 32 = 64 bit */ 1046 uint32_t csr, crc; 1047 int i; 1048 1049 csr = mac_read(sc, GMACAFR); 1050 csr &= ~(AFR_PR | AFR_PM | AFR_MHTE | AFR_HPF); 1051 mac_write(sc, GMACAFR, csr); 1052 1053 /* clear 15 entry supplemental perfect match filter */ 1054 for (i = 1; i < 16; i++) 1055 mac_write(sc, GMACMAH(i), 0); 1056 /* build 64 bit multicast hash filter */ 1057 crc = mchash[1] = mchash[0] = 0; 1058 1059 ETHER_LOCK(ec); 1060 if (ifp->if_flags & IFF_PROMISC) { 1061 ec->ec_flags |= ETHER_F_ALLMULTI; 1062 ETHER_UNLOCK(ec); 1063 /* run promisc. mode */ 1064 csr |= AFR_PR; 1065 goto update; 1066 } 1067 ec->ec_flags &= ~ETHER_F_ALLMULTI; 1068 ETHER_FIRST_MULTI(step, ec, enm); 1069 i = 1; /* slot 0 is occupied */ 1070 while (enm != NULL) { 1071 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1072 /* 1073 * We must listen to a range of multicast addresses. 1074 * For now, just accept all multicasts, rather than 1075 * trying to set only those filter bits needed to match 1076 * the range. (At this time, the only use of address 1077 * ranges is for IP multicast routing, for which the 1078 * range is big enough to require all bits set.) 1079 */ 1080 ec->ec_flags |= ETHER_F_ALLMULTI; 1081 ETHER_UNLOCK(ec); 1082 /* accept all multi */ 1083 csr |= AFR_PM; 1084 goto update; 1085 } 1086 printf("[%d] %s\n", i, ether_sprintf(enm->enm_addrlo)); 1087 if (i < 16) { 1088 /* use 15 entry perfect match filter */ 1089 uint32_t addr; 1090 uint8_t *ep = enm->enm_addrlo; 1091 addr = (ep[3] << 24) | (ep[2] << 16) 1092 | (ep[1] << 8) | ep[0]; 1093 mac_write(sc, GMACMAL(i), addr); 1094 addr = (ep[5] << 8) | ep[4]; 1095 mac_write(sc, GMACMAH(i), addr | 1U<<31); 1096 } else { 1097 /* use hash table when too many */ 1098 /* bit_reserve_32(~crc) !? */ 1099 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1100 /* 1(31) 5(30:26) bit sampling */ 1101 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 1102 } 1103 ETHER_NEXT_MULTI(step, enm); 1104 i++; 1105 } 1106 ETHER_UNLOCK(ec); 1107 if (crc) 1108 csr |= AFR_MHTE; 1109 csr |= AFR_HPF; /* use hash+perfect */ 1110 mac_write(sc, GMACMHTH, mchash[1]); 1111 mac_write(sc, GMACMHTL, mchash[0]); 1112 update: 1113 /* With PR or PM, MHTE/MHTL/MHTH are never consulted. really? */ 1114 mac_write(sc, GMACAFR, csr); 1115 return; 1116 } 1117 1118 static void 1119 scx_start(struct ifnet *ifp) 1120 { 1121 struct scx_softc *sc = ifp->if_softc; 1122 struct mbuf *m0, *m; 1123 struct scx_txsoft *txs; 1124 bus_dmamap_t dmamap; 1125 int error, nexttx, lasttx, ofree, seg; 1126 uint32_t tdes0; 1127 1128 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1129 return; 1130 1131 /* Remember the previous number of free descriptors. */ 1132 ofree = sc->sc_txfree; 1133 1134 /* 1135 * Loop through the send queue, setting up transmit descriptors 1136 * until we drain the queue, or use up all available transmit 1137 * descriptors. 1138 */ 1139 for (;;) { 1140 IFQ_POLL(&ifp->if_snd, m0); 1141 if (m0 == NULL) 1142 break; 1143 1144 if (sc->sc_txsfree < MD_TXQUEUE_GC) { 1145 txreap(sc); 1146 if (sc->sc_txsfree == 0) 1147 break; 1148 } 1149 txs = &sc->sc_txsoft[sc->sc_txsnext]; 1150 dmamap = txs->txs_dmamap; 1151 1152 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1153 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1154 if (error) { 1155 if (error == EFBIG) { 1156 aprint_error_dev(sc->sc_dev, 1157 "Tx packet consumes too many " 1158 "DMA segments, dropping...\n"); 1159 IFQ_DEQUEUE(&ifp->if_snd, m0); 1160 m_freem(m0); 1161 continue; 1162 } 1163 /* Short on resources, just stop for now. */ 1164 break; 1165 } 1166 1167 if (dmamap->dm_nsegs > sc->sc_txfree) { 1168 /* 1169 * Not enough free descriptors to transmit this 1170 * packet. We haven't committed anything yet, 1171 * so just unload the DMA map, put the packet 1172 * back on the queue, and punt. Notify the upper 1173 * layer that there are not more slots left. 1174 */ 1175 ifp->if_flags |= IFF_OACTIVE; 1176 bus_dmamap_unload(sc->sc_dmat, dmamap); 1177 break; 1178 } 1179 1180 IFQ_DEQUEUE(&ifp->if_snd, m0); 1181 1182 /* 1183 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1184 */ 1185 1186 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1187 BUS_DMASYNC_PREWRITE); 1188 1189 tdes0 = 0; /* to postpone 1st segment T0_OWN write */ 1190 lasttx = -1; 1191 for (nexttx = sc->sc_txnext, seg = 0; 1192 seg < dmamap->dm_nsegs; 1193 seg++, nexttx = MD_NEXTTX(nexttx)) { 1194 struct tdes *tdes = &sc->sc_txdescs[nexttx]; 1195 bus_addr_t paddr = dmamap->dm_segs[seg].ds_addr; 1196 /* 1197 * If this is the first descriptor we're 1198 * enqueueing, don't set the OWN bit just 1199 * yet. That could cause a race condition. 1200 * We'll do it below. 1201 */ 1202 tdes->t3 = dmamap->dm_segs[seg].ds_len; 1203 tdes->t2 = htole32(BUS_ADDR_LO32(paddr)); 1204 tdes->t1 = htole32(BUS_ADDR_HI32(paddr)); 1205 tdes->t0 = tdes0 | (tdes->t0 & T0_EOD) | 1206 (15 << T0_TRID) | T0_PT | 1207 sc->sc_t0coso | T0_TRS; 1208 tdes0 = T0_OWN; /* 2nd and other segments */ 1209 lasttx = nexttx; 1210 } 1211 /* 1212 * Outgoing NFS mbuf must be unloaded when Tx completed. 1213 * Without T1_IC NFS mbuf is left unack'ed for excessive 1214 * time and NFS stops to proceed until scx_watchdog() 1215 * calls txreap() to reclaim the unack'ed mbuf. 1216 * It's painful to traverse every mbuf chain to determine 1217 * whether someone is waiting for Tx completion. 1218 */ 1219 m = m0; 1220 do { 1221 if ((m->m_flags & M_EXT) && m->m_ext.ext_free) { 1222 sc->sc_txdescs[lasttx].t0 |= T0_IOC; /* !!! */ 1223 break; 1224 } 1225 } while ((m = m->m_next) != NULL); 1226 1227 /* Write deferred 1st segment T0_OWN at the final stage */ 1228 sc->sc_txdescs[lasttx].t0 |= T0_LS; 1229 sc->sc_txdescs[sc->sc_txnext].t0 |= (T0_FS | T0_OWN); 1230 SCX_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1231 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1232 1233 /* Tell DMA start transmit */ 1234 mac_write(sc, GMACTPD, 1); 1235 1236 txs->txs_mbuf = m0; 1237 txs->txs_firstdesc = sc->sc_txnext; 1238 txs->txs_lastdesc = lasttx; 1239 txs->txs_ndesc = dmamap->dm_nsegs; 1240 1241 sc->sc_txfree -= txs->txs_ndesc; 1242 sc->sc_txnext = nexttx; 1243 sc->sc_txsfree--; 1244 sc->sc_txsnext = MD_NEXTTXS(sc->sc_txsnext); 1245 /* 1246 * Pass the packet to any BPF listeners. 1247 */ 1248 bpf_mtap(ifp, m0, BPF_D_OUT); 1249 } 1250 1251 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1252 /* No more slots left; notify upper layer. */ 1253 ifp->if_flags |= IFF_OACTIVE; 1254 } 1255 if (sc->sc_txfree != ofree) { 1256 /* Set a watchdog timer in case the chip flakes out. */ 1257 ifp->if_timer = 5; 1258 } 1259 } 1260 1261 static void 1262 scx_watchdog(struct ifnet *ifp) 1263 { 1264 struct scx_softc *sc = ifp->if_softc; 1265 1266 /* 1267 * Since we're not interrupting every packet, sweep 1268 * up before we report an error. 1269 */ 1270 txreap(sc); 1271 1272 if (sc->sc_txfree != MD_NTXDESC) { 1273 aprint_error_dev(sc->sc_dev, 1274 "device timeout (txfree %d txsfree %d txnext %d)\n", 1275 sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext); 1276 if_statinc(ifp, if_oerrors); 1277 1278 /* Reset the interface. */ 1279 scx_init(ifp); 1280 } 1281 1282 scx_start(ifp); 1283 } 1284 1285 static int 1286 scx_intr(void *arg) 1287 { 1288 struct scx_softc *sc = arg; 1289 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1290 1291 (void)ifp; 1292 /* XXX decode interrupt cause to pick isr() XXX */ 1293 rxintr(sc); 1294 txreap(sc); 1295 return 1; 1296 } 1297 1298 static void 1299 txreap(struct scx_softc *sc) 1300 { 1301 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1302 struct scx_txsoft *txs; 1303 uint32_t txstat; 1304 int i; 1305 1306 ifp->if_flags &= ~IFF_OACTIVE; 1307 1308 for (i = sc->sc_txsdirty; sc->sc_txsfree != MD_TXQUEUELEN; 1309 i = MD_NEXTTXS(i), sc->sc_txsfree++) { 1310 txs = &sc->sc_txsoft[i]; 1311 1312 SCX_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 1313 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1314 1315 txstat = sc->sc_txdescs[txs->txs_lastdesc].t0; 1316 if (txstat & T0_OWN) /* desc is still in use */ 1317 break; 1318 1319 /* There is no way to tell transmission status per frame */ 1320 1321 if_statinc(ifp, if_opackets); 1322 1323 sc->sc_txfree += txs->txs_ndesc; 1324 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1325 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1326 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1327 m_freem(txs->txs_mbuf); 1328 txs->txs_mbuf = NULL; 1329 } 1330 sc->sc_txsdirty = i; 1331 if (sc->sc_txsfree == MD_TXQUEUELEN) 1332 ifp->if_timer = 0; 1333 } 1334 1335 static void 1336 rxintr(struct scx_softc *sc) 1337 { 1338 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1339 struct scx_rxsoft *rxs; 1340 struct mbuf *m; 1341 uint32_t rxstat; 1342 int i, len; 1343 1344 for (i = sc->sc_rxptr; /*CONSTCOND*/ 1; i = MD_NEXTRX(i)) { 1345 rxs = &sc->sc_rxsoft[i]; 1346 1347 SCX_CDRXSYNC(sc, i, 1348 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1349 1350 rxstat = sc->sc_rxdescs[i].r0; 1351 if (rxstat & R0_OWN) /* desc is left empty */ 1352 break; 1353 1354 /* R0_FS | R0_LS must have been marked for this desc */ 1355 1356 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1357 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1358 1359 len = sc->sc_rxdescs[i].r3 >> 16; /* 31:16 received */ 1360 len -= ETHER_CRC_LEN; /* Trim CRC off */ 1361 m = rxs->rxs_mbuf; 1362 1363 if (add_rxbuf(sc, i) != 0) { 1364 if_statinc(ifp, if_ierrors); 1365 SCX_INIT_RXDESC(sc, i); 1366 bus_dmamap_sync(sc->sc_dmat, 1367 rxs->rxs_dmamap, 0, 1368 rxs->rxs_dmamap->dm_mapsize, 1369 BUS_DMASYNC_PREREAD); 1370 continue; 1371 } 1372 1373 m_set_rcvif(m, ifp); 1374 m->m_pkthdr.len = m->m_len = len; 1375 1376 if (rxstat & R0_CSUM) { 1377 uint32_t csum = M_CSUM_IPv4; 1378 if (rxstat & R0_CERR) 1379 csum |= M_CSUM_IPv4_BAD; 1380 m->m_pkthdr.csum_flags |= csum; 1381 } 1382 if_percpuq_enqueue(ifp->if_percpuq, m); 1383 } 1384 sc->sc_rxptr = i; 1385 } 1386 1387 static int 1388 add_rxbuf(struct scx_softc *sc, int i) 1389 { 1390 struct scx_rxsoft *rxs = &sc->sc_rxsoft[i]; 1391 struct mbuf *m; 1392 int error; 1393 1394 MGETHDR(m, M_DONTWAIT, MT_DATA); 1395 if (m == NULL) 1396 return ENOBUFS; 1397 1398 MCLGET(m, M_DONTWAIT); 1399 if ((m->m_flags & M_EXT) == 0) { 1400 m_freem(m); 1401 return ENOBUFS; 1402 } 1403 1404 if (rxs->rxs_mbuf != NULL) 1405 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1406 1407 rxs->rxs_mbuf = m; 1408 1409 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1410 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1411 if (error) { 1412 aprint_error_dev(sc->sc_dev, 1413 "can't load rx DMA map %d, error = %d\n", i, error); 1414 panic("add_rxbuf"); 1415 } 1416 1417 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1418 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1419 SCX_INIT_RXDESC(sc, i); 1420 1421 return 0; 1422 } 1423 1424 static void 1425 rxdrain(struct scx_softc *sc) 1426 { 1427 struct scx_rxsoft *rxs; 1428 int i; 1429 1430 for (i = 0; i < MD_NRXDESC; i++) { 1431 rxs = &sc->sc_rxsoft[i]; 1432 if (rxs->rxs_mbuf != NULL) { 1433 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1434 m_freem(rxs->rxs_mbuf); 1435 rxs->rxs_mbuf = NULL; 1436 } 1437 } 1438 } 1439 1440 void 1441 mii_statchg(struct ifnet *ifp) 1442 { 1443 struct scx_softc *sc = ifp->if_softc; 1444 struct mii_data *mii = &sc->sc_mii; 1445 const int Mbps[4] = { 10, 100, 1000, 0 }; 1446 uint32_t miisr, mcr, fcr; 1447 int spd; 1448 1449 /* decode MIISR register value */ 1450 miisr = mac_read(sc, GMACMIISR); 1451 spd = Mbps[(miisr >> 1) & 03]; 1452 #if 1 1453 printf("MII link status (0x%x) %s", 1454 miisr, (miisr & 8) ? "up" : "down"); 1455 if (miisr & 8) { 1456 printf(" spd%d", spd); 1457 if (miisr & 01) 1458 printf(",full-duplex"); 1459 } 1460 printf("\n"); 1461 #endif 1462 /* Get flow control negotiation result. */ 1463 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1464 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) 1465 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1466 1467 /* Adjust speed 1000/100/10. */ 1468 mcr = mac_read(sc, GMACMCR); 1469 if (spd == 1000) 1470 mcr &= ~MCR_USEMII; /* RGMII+SPD1000 */ 1471 else { 1472 if (spd == 100 && sc->sc_100mii) 1473 mcr |= MCR_SPD100; 1474 mcr |= MCR_USEMII; 1475 } 1476 mcr |= MCR_CST | MCR_JE; 1477 if (sc->sc_100mii == 0) 1478 mcr |= MCR_IBN; 1479 1480 /* Adjust duplexity and PAUSE flow control. */ 1481 mcr &= ~MCR_USEFDX; 1482 fcr = mac_read(sc, GMACFCR) & ~(FCR_TFE | FCR_RFE); 1483 if (miisr & 01) { 1484 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) 1485 fcr |= FCR_TFE; 1486 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 1487 fcr |= FCR_RFE; 1488 mcr |= MCR_USEFDX; 1489 } 1490 mac_write(sc, GMACMCR, mcr); 1491 mac_write(sc, GMACFCR, fcr); 1492 1493 printf("%ctxfe, %crxfe\n", 1494 (fcr & FCR_TFE) ? '+' : '-', (fcr & FCR_RFE) ? '+' : '-'); 1495 } 1496 1497 static void 1498 scx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1499 { 1500 struct scx_softc *sc = ifp->if_softc; 1501 struct mii_data *mii = &sc->sc_mii; 1502 1503 mii_pollstat(mii); 1504 ifmr->ifm_status = mii->mii_media_status; 1505 ifmr->ifm_active = sc->sc_flowflags | 1506 (mii->mii_media_active & ~IFM_ETH_FMASK); 1507 } 1508 1509 static int 1510 mii_readreg(device_t self, int phy, int reg, uint16_t *val) 1511 { 1512 struct scx_softc *sc = device_private(self); 1513 uint32_t miia; 1514 int ntries; 1515 1516 #define CLK_150_250M (1<<2) 1517 uint32_t clk = CSR_READ(sc, CLKEN); 1518 CSR_WRITE(sc, CLKEN, clk | CLK_G); 1519 1520 miia = (phy << GAR_PHY) | (reg << GAR_REG) | CLK_150_250M; 1521 mac_write(sc, GMACGAR, miia | GAR_BUSY); 1522 for (ntries = 0; ntries < 1000; ntries++) { 1523 if ((mac_read(sc, GMACGAR) & GAR_BUSY) == 0) 1524 goto unbusy; 1525 DELAY(1); 1526 } 1527 return ETIMEDOUT; 1528 unbusy: 1529 *val = mac_read(sc, GMACGDR); 1530 return 0; 1531 } 1532 1533 static int 1534 mii_writereg(device_t self, int phy, int reg, uint16_t val) 1535 { 1536 struct scx_softc *sc = device_private(self); 1537 uint32_t miia; 1538 uint16_t dummy; 1539 int ntries; 1540 1541 uint32_t clk = CSR_READ(sc, CLKEN); 1542 CSR_WRITE(sc, CLKEN, clk | CLK_G); 1543 1544 miia = (phy << GAR_PHY) | (reg << GAR_REG) | sc->sc_mdclk; 1545 mac_write(sc, GMACGDR, val); 1546 mac_write(sc, GMACGAR, miia | GAR_IOWR | GAR_BUSY); 1547 for (ntries = 0; ntries < 1000; ntries++) { 1548 if ((mac_read(sc, GMACGAR) & GAR_BUSY) == 0) 1549 goto unbusy; 1550 DELAY(1); 1551 } 1552 return ETIMEDOUT; 1553 unbusy: 1554 mii_readreg(self, phy, MII_PHYIDR1, &dummy); /* dummy read cycle */ 1555 return 0; 1556 } 1557 1558 static void 1559 phy_tick(void *arg) 1560 { 1561 struct scx_softc *sc = arg; 1562 struct mii_data *mii = &sc->sc_mii; 1563 int s; 1564 1565 s = splnet(); 1566 mii_tick(mii); 1567 splx(s); 1568 #ifdef SCX_EVENT_COUNTERS /* if tally counter details are made clear */ 1569 #endif 1570 callout_schedule(&sc->sc_callout, hz); 1571 } 1572 1573 /* 1574 * 3 independent uengines exist to process host2media, media2host and 1575 * packet data flows. 1576 */ 1577 static void 1578 loaducode(struct scx_softc *sc) 1579 { 1580 uint32_t up, lo, sz; 1581 uint64_t addr; 1582 1583 sc->sc_ucodeloaded = 1; 1584 1585 up = EE_READ(sc, 0x08); /* H->M ucode addr high */ 1586 lo = EE_READ(sc, 0x0c); /* H->M ucode addr low */ 1587 sz = EE_READ(sc, 0x10); /* H->M ucode size */ 1588 sz *= 4; 1589 addr = ((uint64_t)up << 32) | lo; 1590 aprint_normal_dev(sc->sc_dev, "0x%x H2M ucode %u\n", lo, sz); 1591 injectucode(sc, H2MENG, (bus_addr_t)addr, (bus_size_t)sz); 1592 1593 up = EE_READ(sc, 0x14); /* M->H ucode addr high */ 1594 lo = EE_READ(sc, 0x18); /* M->H ucode addr low */ 1595 sz = EE_READ(sc, 0x1c); /* M->H ucode size */ 1596 sz *= 4; 1597 addr = ((uint64_t)up << 32) | lo; 1598 injectucode(sc, M2HENG, (bus_addr_t)addr, (bus_size_t)sz); 1599 aprint_normal_dev(sc->sc_dev, "0x%x M2H ucode %u\n", lo, sz); 1600 1601 lo = EE_READ(sc, 0x20); /* PKT ucode addr */ 1602 sz = EE_READ(sc, 0x24); /* PKT ucode size */ 1603 sz *= 4; 1604 injectucode(sc, PKTENG, (bus_addr_t)lo, (bus_size_t)sz); 1605 aprint_normal_dev(sc->sc_dev, "0x%x PKT ucode %u\n", lo, sz); 1606 } 1607 1608 static void 1609 injectucode(struct scx_softc *sc, int port, 1610 bus_addr_t addr, bus_size_t size) 1611 { 1612 bus_space_handle_t bsh; 1613 bus_size_t off; 1614 uint32_t ucode; 1615 1616 if (bus_space_map(sc->sc_st, addr, size, 0, &bsh) != 0) { 1617 aprint_error_dev(sc->sc_dev, 1618 "eeprom map failure for ucode port 0x%x\n", port); 1619 return; 1620 } 1621 for (off = 0; off < size; off += 4) { 1622 ucode = bus_space_read_4(sc->sc_st, bsh, off); 1623 CSR_WRITE(sc, port, ucode); 1624 } 1625 bus_space_unmap(sc->sc_st, bsh, size); 1626 } 1627 1628 /* bit selection to determine MDIO speed */ 1629 1630 static int 1631 get_mdioclk(uint32_t freq) 1632 { 1633 1634 const struct { 1635 uint16_t freq, bit; /* GAR 5:2 MDIO frequency selection */ 1636 } mdioclk[] = { 1637 { 35, 2 }, /* 25-35 MHz */ 1638 { 60, 3 }, /* 35-60 MHz */ 1639 { 100, 0 }, /* 60-100 MHz */ 1640 { 150, 1 }, /* 100-150 MHz */ 1641 { 250, 4 }, /* 150-250 MHz */ 1642 { 300, 5 }, /* 250-300 MHz */ 1643 }; 1644 int i; 1645 1646 freq /= 1000 * 1000; 1647 /* convert MDIO clk to a divisor value */ 1648 if (freq < mdioclk[0].freq) 1649 return mdioclk[0].bit; 1650 for (i = 1; i < __arraycount(mdioclk); i++) { 1651 if (freq < mdioclk[i].freq) 1652 return mdioclk[i-1].bit; 1653 } 1654 return mdioclk[__arraycount(mdioclk) - 1].bit << GAR_CTL; 1655 } 1656