1 /* $NetBSD: if_gmc.c,v 1.15 2022/09/17 19:46:59 thorpej Exp $ */
2 /*-
3 * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt@3am-software.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/callout.h>
33 #include <sys/device.h>
34 #include <sys/ioctl.h>
35 #include <sys/kernel.h>
36 #include <sys/kmem.h>
37 #include <sys/mbuf.h>
38
39 #include <sys/bus.h>
40 #include <machine/intr.h>
41
42 #include <arm/gemini/gemini_reg.h>
43 #include <arm/gemini/gemini_gmacreg.h>
44 #include <arm/gemini/gemini_gmacvar.h>
45
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_dl.h>
49
50 __KERNEL_RCSID(0, "$NetBSD: if_gmc.c,v 1.15 2022/09/17 19:46:59 thorpej Exp $");
51
52 #define MAX_TXSEG 32
53
54 struct gmc_softc {
55 device_t sc_dev;
56 struct gmac_softc *sc_psc;
57 struct gmc_softc *sc_sibling;
58 bus_dma_tag_t sc_dmat;
59 bus_space_tag_t sc_iot;
60 bus_space_handle_t sc_ioh;
61 bus_space_handle_t sc_dma_ioh;
62 bus_space_handle_t sc_gmac_ioh;
63 struct ethercom sc_ec;
64 struct mii_data sc_mii;
65 void *sc_ih;
66 bool sc_port1;
67 uint8_t sc_phy;
68 gmac_hwqueue_t *sc_rxq;
69 gmac_hwqueue_t *sc_txq[6];
70 callout_t sc_mii_ch;
71
72 uint32_t sc_gmac_status;
73 uint32_t sc_gmac_sta_add[3];
74 uint32_t sc_gmac_mcast_filter[2];
75 uint32_t sc_gmac_rx_filter;
76 uint32_t sc_gmac_config[2];
77 uint32_t sc_dmavr;
78
79 uint32_t sc_int_mask[5];
80 uint32_t sc_int_enabled[5];
81 };
82
83 #define sc_if sc_ec.ec_if
84
85 static bool
gmc_txqueue(struct gmc_softc * sc,gmac_hwqueue_t * hwq,struct mbuf * m)86 gmc_txqueue(struct gmc_softc *sc, gmac_hwqueue_t *hwq, struct mbuf *m)
87 {
88 bus_dmamap_t map;
89 uint32_t desc0, desc1, desc3;
90 struct mbuf *last_m, *m0;
91 size_t count, i;
92 int error;
93 gmac_desc_t *d;
94
95 KASSERT(hwq != NULL);
96
97 map = gmac_mapcache_get(hwq->hwq_hqm->hqm_mc);
98 if (map == NULL)
99 return false;
100
101 for (last_m = NULL, m0 = m, count = 0;
102 m0 != NULL;
103 last_m = m0, m0 = m0->m_next) {
104 vaddr_t addr = (uintptr_t)m0->m_data;
105 if (m0->m_len == 0)
106 continue;
107 if (addr & 1) {
108 if (last_m != NULL && M_TRAILINGSPACE(last_m) > 0) {
109 last_m->m_data[last_m->m_len++] = *m->m_data++;
110 m->m_len--;
111 } else if (M_TRAILINGSPACE(m0) > 0) {
112 memmove(m0->m_data + 1, m0->m_data, m0->m_len);
113 m0->m_data++;
114 } else if (M_LEADINGSPACE(m0) > 0) {
115 memmove(m0->m_data - 1, m0->m_data, m0->m_len);
116 m0->m_data--;
117 } else {
118 panic("gmc_txqueue: odd addr %p", m0->m_data);
119 }
120 }
121 count += ((addr & PGOFSET) + m->m_len + PGOFSET) >> PGSHIFT;
122 }
123
124 gmac_hwqueue_sync(hwq);
125 if (hwq->hwq_free <= count) {
126 gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
127 return false;
128 }
129
130 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
131 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
132 if (error) {
133 aprint_error_dev(sc->sc_dev, "ifstart: load failed: %d\n",
134 error);
135 gmac_mapcache_put(hwq->hwq_hqm->hqm_mc, map);
136 m_freem(m);
137 if_statinc(&sc->sc_if, if_oerrors);
138 return true;
139 }
140 KASSERT(map->dm_nsegs > 0);
141
142 /*
143 * Sync the mbuf contents to memory/cache.
144 */
145 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
146 BUS_DMASYNC_PREWRITE);
147
148 /*
149 * Now we need to load the descriptors...
150 */
151 desc0 = map->dm_nsegs << 16;
152 desc1 = m->m_pkthdr.len;
153 desc3 = DESC3_SOF;
154 i = 0;
155 d = NULL;
156 do {
157 #if 0
158 if (i > 0)
159 aprint_debug_dev(sc->sc_dev,
160 "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
161 i-1, d, d->d_desc0, d->d_desc1,
162 d->d_bufaddr, d->d_desc3);
163 #endif
164 d = gmac_hwqueue_desc(hwq, i);
165 KASSERT(map->dm_segs[i].ds_len > 0);
166 KASSERT((map->dm_segs[i].ds_addr & 1) == 0);
167 d->d_desc0 = htole32(map->dm_segs[i].ds_len | desc0);
168 d->d_desc1 = htole32(desc1);
169 d->d_bufaddr = htole32(map->dm_segs[i].ds_addr);
170 d->d_desc3 = htole32(desc3);
171 desc3 = 0;
172 } while (++i < map->dm_nsegs);
173
174 d->d_desc3 |= htole32(DESC3_EOF | DESC3_EOFIE);
175 #if 0
176 aprint_debug_dev(sc->sc_dev,
177 "gmac_txqueue: %zu@%p=%#x/%#x/%#x/%#x\n",
178 i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
179 #endif
180 M_SETCTX(m, map);
181 IF_ENQUEUE(&hwq->hwq_ifq, m);
182 /*
183 * Last descriptor has been marked. Give them to the h/w.
184 * This will sync for us.
185 */
186 gmac_hwqueue_produce(hwq, map->dm_nsegs);
187 #if 0
188 aprint_debug_dev(sc->sc_dev,
189 "gmac_txqueue: *%zu@%p=%#x/%#x/%#x/%#x\n",
190 i-1, d, d->d_desc0, d->d_desc1, d->d_bufaddr, d->d_desc3);
191 #endif
192 return true;
193 }
194
195 static void
gmc_filter_change(struct gmc_softc * sc)196 gmc_filter_change(struct gmc_softc *sc)
197 {
198 struct ethercom *ec = &sc->sc_ec;
199 struct ether_multi *enm;
200 struct ether_multistep step;
201 uint32_t mhash[2];
202 uint32_t new0, new1, new2;
203 const char * const eaddr = CLLADDR(sc->sc_if.if_sadl);
204
205 new0 = eaddr[0] | ((eaddr[1] | (eaddr[2] | (eaddr[3] << 8)) << 8) << 8);
206 new1 = eaddr[4] | (eaddr[5] << 8);
207 new2 = 0;
208 if (sc->sc_gmac_sta_add[0] != new0
209 || sc->sc_gmac_sta_add[1] != new1
210 || sc->sc_gmac_sta_add[2] != new2) {
211 bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD0,
212 new0);
213 bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD1,
214 new1);
215 bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STA_ADD2,
216 new2);
217 sc->sc_gmac_sta_add[0] = new0;
218 sc->sc_gmac_sta_add[1] = new1;
219 sc->sc_gmac_sta_add[2] = new2;
220 }
221
222 mhash[0] = 0;
223 mhash[1] = 0;
224 ETHER_LOCK(ec);
225 ETHER_FIRST_MULTI(step, ec, enm);
226 while (enm != NULL) {
227 size_t i;
228 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
229 mhash[0] = mhash[1] = 0xffffffff;
230 break;
231 }
232 i = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
233 mhash[(i >> 5) & 1] |= 1 << (i & 31);
234 ETHER_NEXT_MULTI(step, enm);
235 }
236 ETHER_UNLOCK(ec);
237
238 if (sc->sc_gmac_mcast_filter[0] != mhash[0]
239 || sc->sc_gmac_mcast_filter[1] != mhash[1]) {
240 bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
241 GMAC_MCAST_FILTER0, mhash[0]);
242 bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh,
243 GMAC_MCAST_FILTER1, mhash[1]);
244 sc->sc_gmac_mcast_filter[0] = mhash[0];
245 sc->sc_gmac_mcast_filter[1] = mhash[1];
246 }
247
248 new0 = sc->sc_gmac_rx_filter & ~RXFILTER_PROMISC;
249 new0 |= RXFILTER_BROADCAST | RXFILTER_UNICAST | RXFILTER_MULTICAST;
250 if (sc->sc_if.if_flags & IFF_PROMISC)
251 new0 |= RXFILTER_PROMISC;
252
253 if (new0 != sc->sc_gmac_rx_filter) {
254 bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_RX_FILTER,
255 new0);
256 sc->sc_gmac_rx_filter = new0;
257 }
258 }
259
260 static void
gmc_mii_tick(void * arg)261 gmc_mii_tick(void *arg)
262 {
263 struct gmc_softc * const sc = arg;
264 struct gmac_softc * const psc = sc->sc_psc;
265 int s = splnet();
266
267 /*
268 * If we had to increase the number of receive mbufs due to fifo
269 * overflows, we need a way to decrease them. So every second we
270 * receive less than or equal to MIN_RXMAPS packets, we decrement
271 * swfree_min until it returns to MIN_RXMAPS.
272 */
273 if (psc->sc_rxpkts_per_sec <= MIN_RXMAPS
274 && psc->sc_swfree_min > MIN_RXMAPS) {
275 psc->sc_swfree_min--;
276 gmac_swfree_min_update(psc);
277 }
278 /*
279 * If only one GMAC is running or this is port0, reset the count.
280 */
281 if (psc->sc_running != 3 || !sc->sc_port1)
282 psc->sc_rxpkts_per_sec = 0;
283
284 mii_tick(&sc->sc_mii);
285 if (sc->sc_if.if_flags & IFF_RUNNING)
286 callout_schedule(&sc->sc_mii_ch, hz);
287
288 splx(s);
289 }
290
291 static int
gmc_mediachange(struct ifnet * ifp)292 gmc_mediachange(struct ifnet *ifp)
293 {
294 struct gmc_softc * const sc = ifp->if_softc;
295
296 if ((ifp->if_flags & IFF_UP) == 0)
297 return 0;
298
299 return mii_mediachg(&sc->sc_mii);
300 }
301
302 static void
gmc_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)303 gmc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
304 {
305 struct gmc_softc * const sc = ifp->if_softc;
306
307 mii_pollstat(&sc->sc_mii);
308 ifmr->ifm_status = sc->sc_mii.mii_media_status;
309 ifmr->ifm_active = sc->sc_mii.mii_media_active;
310 }
311
312 static void
gmc_mii_statchg(struct ifnet * ifp)313 gmc_mii_statchg(struct ifnet *ifp)
314 {
315 struct gmc_softc * const sc = ifp->if_softc;
316 uint32_t gmac_status;
317
318 gmac_status = sc->sc_gmac_status;
319
320 gmac_status &= ~STATUS_PHYMODE_MASK;
321 gmac_status |= STATUS_PHYMODE_RGMII_A;
322
323 gmac_status &= ~STATUS_SPEED_MASK;
324 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T) {
325 gmac_status |= STATUS_SPEED_1000M;
326 } else if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) {
327 gmac_status |= STATUS_SPEED_100M;
328 } else {
329 gmac_status |= STATUS_SPEED_10M;
330 }
331
332 if (sc->sc_mii.mii_media_active & IFM_FDX)
333 gmac_status |= STATUS_DUPLEX_FULL;
334 else
335 gmac_status &= ~STATUS_DUPLEX_FULL;
336
337 if (sc->sc_mii.mii_media_status & IFM_ACTIVE)
338 gmac_status |= STATUS_LINK_ON;
339 else
340 gmac_status &= ~STATUS_LINK_ON;
341
342 if (sc->sc_gmac_status != gmac_status) {
343 aprint_debug_dev(sc->sc_dev,
344 "status change old=%#x new=%#x active=%#x\n",
345 sc->sc_gmac_status, gmac_status,
346 sc->sc_mii.mii_media_active);
347 sc->sc_gmac_status = gmac_status;
348 bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_STATUS,
349 sc->sc_gmac_status);
350 }
351
352 (*sc->sc_mii.mii_writereg)(sc->sc_dev, sc->sc_phy, 0x0018, 0x0041);
353 }
354
355 static int
gmc_ifioctl(struct ifnet * ifp,u_long cmd,void * data)356 gmc_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
357 {
358 struct gmc_softc * const sc = ifp->if_softc;
359 int s;
360 int error;
361 s = splnet();
362
363 switch (cmd) {
364 default:
365 error = ether_ioctl(ifp, cmd, data);
366 if (error == ENETRESET) {
367 if (ifp->if_flags & IFF_RUNNING) {
368 /*
369 * If the interface is running, we have to
370 * update its multicast filter.
371 */
372 gmc_filter_change(sc);
373 }
374 error = 0;
375 }
376 }
377
378 splx(s);
379 return error;
380 }
381
382 static void
gmc_ifstart(struct ifnet * ifp)383 gmc_ifstart(struct ifnet *ifp)
384 {
385 struct gmc_softc * const sc = ifp->if_softc;
386
387 #if 0
388 if ((sc->sc_gmac_status & STATUS_LINK_ON) == 0)
389 return;
390 #endif
391 if ((ifp->if_flags & IFF_RUNNING) == 0)
392 return;
393
394 for (;;) {
395 struct mbuf *m;
396 IF_POLL(&ifp->if_snd, m);
397 if (m == NULL)
398 break;
399 if (!gmc_txqueue(sc, sc->sc_txq[0], m)) {
400 break;
401 }
402 IF_DEQUEUE(&ifp->if_snd, m);
403 }
404 }
405
406 static void
gmc_ifstop(struct ifnet * ifp,int disable)407 gmc_ifstop(struct ifnet *ifp, int disable)
408 {
409 struct gmc_softc * const sc = ifp->if_softc;
410 struct gmac_softc * const psc = sc->sc_psc;
411
412 psc->sc_running &= ~(sc->sc_port1 ? 2 : 1);
413 psc->sc_int_enabled[0] &= ~sc->sc_int_enabled[0];
414 psc->sc_int_enabled[1] &= ~sc->sc_int_enabled[1];
415 psc->sc_int_enabled[2] &= ~sc->sc_int_enabled[2];
416 psc->sc_int_enabled[3] &= ~sc->sc_int_enabled[3];
417 psc->sc_int_enabled[4] &= ~sc->sc_int_enabled[4] | INT4_SW_FREEQ_EMPTY;
418 if (psc->sc_running == 0) {
419 psc->sc_int_enabled[4] &= ~INT4_SW_FREEQ_EMPTY;
420 KASSERT(psc->sc_int_enabled[0] == 0);
421 KASSERT(psc->sc_int_enabled[1] == 0);
422 KASSERT(psc->sc_int_enabled[2] == 0);
423 KASSERT(psc->sc_int_enabled[3] == 0);
424 KASSERT(psc->sc_int_enabled[4] == 0);
425 } else if (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0)
426 == sc->sc_port1) {
427 psc->sc_int_select[4] &= ~INT4_SW_FREEQ_EMPTY;
428 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
429 psc->sc_int_select[4]);
430 }
431 gmac_intr_update(psc);
432 if (disable) {
433 #if 0
434 if (psc->sc_running == 0) {
435 gmac_mapcache_destroy(&psc->sc_txmaps);
436 gmac_mapcache_destroy(&psc->sc_rxmaps);
437 }
438 #endif
439 }
440 }
441
442 static int
gmc_ifinit(struct ifnet * ifp)443 gmc_ifinit(struct ifnet *ifp)
444 {
445 struct gmc_softc * const sc = ifp->if_softc;
446 struct gmac_softc * const psc = sc->sc_psc;
447 uint32_t new, mask;
448
449 gmac_mapcache_fill(psc->sc_rxmaps, MIN_RXMAPS);
450 gmac_mapcache_fill(psc->sc_txmaps, MIN_TXMAPS);
451
452 if (sc->sc_rxq == NULL) {
453 gmac_hwqmem_t *hqm;
454 hqm = gmac_hwqmem_create(psc->sc_rxmaps, 16, /*RXQ_NDESCS,*/ 1,
455 HQM_CONSUMER | HQM_RX);
456 sc->sc_rxq = gmac_hwqueue_create(hqm, sc->sc_iot,
457 sc->sc_ioh, GMAC_DEF_RXQn_RWPTR(sc->sc_port1),
458 GMAC_DEF_RXQn_BASE(sc->sc_port1), 0);
459 if (sc->sc_rxq == NULL) {
460 gmac_hwqmem_destroy(hqm);
461 goto failed;
462 }
463 sc->sc_rxq->hwq_ifp = ifp;
464 sc->sc_rxq->hwq_producer = psc->sc_swfreeq;
465 }
466
467 if (sc->sc_txq[0] == NULL) {
468 gmac_hwqueue_t *hwq, *last_hwq;
469 gmac_hwqmem_t *hqm;
470 size_t i;
471
472 hqm = gmac_hwqmem_create(psc->sc_txmaps, TXQ_NDESCS, 6,
473 HQM_PRODUCER | HQM_TX);
474 KASSERT(hqm != NULL);
475 for (i = 0; i < __arraycount(sc->sc_txq); i++) {
476 sc->sc_txq[i] = gmac_hwqueue_create(hqm, sc->sc_iot,
477 sc->sc_dma_ioh, GMAC_SW_TX_Qn_RWPTR(i),
478 GMAC_SW_TX_Q_BASE, i);
479 if (sc->sc_txq[i] == NULL) {
480 if (i == 0)
481 gmac_hwqmem_destroy(hqm);
482 goto failed;
483 }
484 sc->sc_txq[i]->hwq_ifp = ifp;
485
486 last_hwq = NULL;
487 SLIST_FOREACH(hwq, &psc->sc_hwfreeq->hwq_producers,
488 hwq_link) {
489 if (sc->sc_txq[i]->hwq_qoff < hwq->hwq_qoff)
490 break;
491 last_hwq = hwq;
492 }
493 if (last_hwq == NULL)
494 SLIST_INSERT_HEAD(
495 &psc->sc_hwfreeq->hwq_producers,
496 sc->sc_txq[i], hwq_link);
497 else
498 SLIST_INSERT_AFTER(last_hwq, sc->sc_txq[i],
499 hwq_link);
500 }
501 }
502
503 gmc_filter_change(sc);
504
505 mask = DMAVR_LOOPBACK | DMAVR_DROP_SMALL_ACK | DMAVR_EXTRABYTES_MASK
506 | DMAVR_RXBURSTSIZE_MASK | DMAVR_RXBUSWIDTH_MASK
507 | DMAVR_TXBURSTSIZE_MASK | DMAVR_TXBUSWIDTH_MASK;
508 new = DMAVR_RXDMA_ENABLE | DMAVR_TXDMA_ENABLE
509 | DMAVR_EXTRABYTES(2)
510 | DMAVR_RXBURSTSIZE(DMAVR_BURSTSIZE_32W)
511 | DMAVR_RXBUSWIDTH(DMAVR_BUSWIDTH_32BITS)
512 | DMAVR_TXBURSTSIZE(DMAVR_BURSTSIZE_32W)
513 | DMAVR_TXBUSWIDTH(DMAVR_BUSWIDTH_32BITS);
514 new |= sc->sc_dmavr & ~mask;
515 if (sc->sc_dmavr != new) {
516 sc->sc_dmavr = new;
517 bus_space_write_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR,
518 sc->sc_dmavr);
519 aprint_debug_dev(sc->sc_dev, "gmc_ifinit: dmavr=%#x/%#x\n",
520 sc->sc_dmavr,
521 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR));
522 }
523
524 mask = CONFIG0_MAXLEN_MASK | CONFIG0_TX_DISABLE | CONFIG0_RX_DISABLE
525 | CONFIG0_LOOPBACK |/*CONFIG0_SIM_TEST|*/CONFIG0_INVERSE_RXC_RGMII
526 | CONFIG0_RGMII_INBAND_STATUS_ENABLE;
527 new = CONFIG0_MAXLEN(CONFIG0_MAXLEN_1536) | CONFIG0_R_LATCHED_MMII;
528 new |= (sc->sc_gmac_config[0] & ~mask);
529 if (sc->sc_gmac_config[0] != new) {
530 sc->sc_gmac_config[0] = new;
531 bus_space_write_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0,
532 sc->sc_gmac_config[0]);
533 aprint_debug_dev(sc->sc_dev, "gmc_ifinit: config0=%#x/%#x\n",
534 sc->sc_gmac_config[0],
535 bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh, GMAC_CONFIG0));
536 }
537
538 psc->sc_rxpkts_per_sec +=
539 gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
540
541 /*
542 * If we will be the only active interface, make sure the sw freeq
543 * interrupt gets routed to use.
544 */
545 if (psc->sc_running == 0
546 && (((psc->sc_int_select[4] & INT4_SW_FREEQ_EMPTY) != 0) != sc->sc_port1)) {
547 psc->sc_int_select[4] ^= INT4_SW_FREEQ_EMPTY;
548 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK,
549 psc->sc_int_select[4]);
550 }
551 sc->sc_int_enabled[0] = sc->sc_int_mask[0]
552 & (INT0_TXDERR|INT0_TXPERR|INT0_RXDERR|INT0_RXPERR|INT0_SWTXQ_EOF);
553 sc->sc_int_enabled[1] = sc->sc_int_mask[1] & INT1_DEF_RXQ_EOF;
554 sc->sc_int_enabled[4] = INT4_SW_FREEQ_EMPTY | (sc->sc_int_mask[4]
555 & (INT4_TX_FAIL | INT4_MIB_HEMIWRAP | INT4_RX_FIFO_OVRN
556 | INT4_RGMII_STSCHG));
557
558 psc->sc_int_enabled[0] |= sc->sc_int_enabled[0];
559 psc->sc_int_enabled[1] |= sc->sc_int_enabled[1];
560 psc->sc_int_enabled[4] |= sc->sc_int_enabled[4];
561
562 gmac_intr_update(psc);
563
564 if ((ifp->if_flags & IFF_RUNNING) == 0)
565 mii_tick(&sc->sc_mii);
566
567 ifp->if_flags |= IFF_RUNNING;
568 psc->sc_running |= (sc->sc_port1 ? 2 : 1);
569
570 callout_schedule(&sc->sc_mii_ch, hz);
571
572 return 0;
573
574 failed:
575 gmc_ifstop(ifp, true);
576 return ENOMEM;
577 }
578
579 static int
gmc_intr(void * arg)580 gmc_intr(void *arg)
581 {
582 struct gmc_softc * const sc = arg;
583 uint32_t int0_status, int1_status, int4_status;
584 uint32_t status;
585 bool do_ifstart = false;
586 int rv = 0;
587
588 aprint_debug_dev(sc->sc_dev, "gmac_intr: entry\n");
589
590 int0_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
591 GMAC_INT0_STATUS);
592 int1_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
593 GMAC_INT1_STATUS);
594 int4_status = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
595 GMAC_INT4_STATUS);
596
597 aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
598 int0_status, int1_status,
599 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
600 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
601 int4_status);
602
603 #if 0
604 aprint_debug_dev(sc->sc_dev, "gmac_intr: mask=%#x/%#x/%#x/%#x/%#x\n",
605 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_MASK),
606 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_MASK),
607 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_MASK),
608 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_MASK),
609 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_MASK));
610 #endif
611
612 status = int0_status & sc->sc_int_mask[0];
613 if (status & (INT0_TXDERR | INT0_TXPERR)) {
614 aprint_error_dev(sc->sc_dev,
615 "transmit%s%s error: %#x %08x bufaddr %#x\n",
616 status & INT0_TXDERR ? " data" : "",
617 status & INT0_TXPERR ? " protocol" : "",
618 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
619 GMAC_DMA_TX_CUR_DESC),
620 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
621 GMAC_SW_TX_Q0_RWPTR),
622 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
623 GMAC_DMA_TX_DESC2));
624 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
625 status & (INT0_TXDERR | INT0_TXPERR));
626 Debugger();
627 }
628 if (status & (INT0_RXDERR | INT0_RXPERR)) {
629 aprint_error_dev(sc->sc_dev,
630 "receive%s%s error: %#x %#x=%#x/%#x/%#x/%#x\n",
631 status & INT0_RXDERR ? " data" : "",
632 status & INT0_RXPERR ? " protocol" : "",
633 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
634 GMAC_DMA_RX_CUR_DESC),
635 bus_space_read_4(sc->sc_iot, sc->sc_ioh,
636 GMAC_SWFREEQ_RWPTR),
637 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
638 GMAC_DMA_RX_DESC0),
639 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
640 GMAC_DMA_RX_DESC1),
641 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
642 GMAC_DMA_RX_DESC2),
643 bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh,
644 GMAC_DMA_RX_DESC3));
645 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS,
646 status & (INT0_RXDERR | INT0_RXPERR));
647 Debugger();
648 }
649 if (status & INT0_SWTXQ_EOF) {
650 status &= INT0_SWTXQ_EOF;
651 for (int i = 0; status && i < __arraycount(sc->sc_txq); i++) {
652 if (status & INT0_SWTXQn_EOF(i)) {
653 gmac_hwqueue_sync(sc->sc_txq[i]);
654 bus_space_write_4(sc->sc_iot, sc->sc_ioh,
655 GMAC_INT0_STATUS,
656 sc->sc_int_mask[0] & (INT0_SWTXQn_EOF(i)
657 | INT0_SWTXQn_FIN(i)));
658 status &= ~INT0_SWTXQn_EOF(i);
659 }
660 }
661 do_ifstart = true;
662 rv = 1;
663 }
664
665 if (int4_status & INT4_SW_FREEQ_EMPTY) {
666 struct gmac_softc * const psc = sc->sc_psc;
667 psc->sc_rxpkts_per_sec +=
668 gmac_rxproduce(psc->sc_swfreeq, psc->sc_swfree_min);
669 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS,
670 status & INT4_SW_FREEQ_EMPTY);
671 rv = 1;
672 }
673
674 status = int1_status & sc->sc_int_mask[1];
675 if (status & INT1_DEF_RXQ_EOF) {
676 struct gmac_softc * const psc = sc->sc_psc;
677 psc->sc_rxpkts_per_sec +=
678 gmac_hwqueue_consume(sc->sc_rxq, psc->sc_swfree_min);
679 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS,
680 status & INT1_DEF_RXQ_EOF);
681 rv = 1;
682 }
683
684 status = int4_status & sc->sc_int_enabled[4];
685 if (status & INT4_TX_FAIL) {
686 }
687 if (status & INT4_MIB_HEMIWRAP) {
688 }
689 if (status & INT4_RX_XON) {
690 }
691 if (status & INT4_RX_XOFF) {
692 }
693 if (status & INT4_TX_XON) {
694 }
695 if (status & INT4_TX_XOFF) {
696 }
697 if (status & INT4_RX_FIFO_OVRN) {
698 #if 0
699 if (sc->sc_psc->sc_swfree_min < MAX_RXMAPS) {
700 sc->sc_psc->sc_swfree_min++;
701 gmac_swfree_min_update(psc);
702 }
703 #endif
704 if_statinc(&sc->sc_if, if_ierrors);
705 }
706 if (status & INT4_RGMII_STSCHG) {
707 mii_pollstat(&sc->sc_mii);
708 }
709 bus_space_write_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS, status);
710
711 if (do_ifstart)
712 if_schedule_deferred_start(&sc->sc_if);
713
714 aprint_debug_dev(sc->sc_dev, "gmac_intr: sts=%#x/%#x/%#x/%#x/%#x\n",
715 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT0_STATUS),
716 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT1_STATUS),
717 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT2_STATUS),
718 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT3_STATUS),
719 bus_space_read_4(sc->sc_iot, sc->sc_ioh, GMAC_INT4_STATUS));
720 aprint_debug_dev(sc->sc_dev, "gmac_intr: exit rv=%d\n", rv);
721 return rv;
722 }
723
724 static int
gmc_match(device_t parent,cfdata_t cf,void * aux)725 gmc_match(device_t parent, cfdata_t cf, void *aux)
726 {
727 struct gmac_softc *psc = device_private(parent);
728 struct gmac_attach_args *gma = aux;
729
730 if ((unsigned int)gma->gma_phy > 31)
731 return 0;
732 if ((unsigned int)gma->gma_port > 1)
733 return 0;
734 if (gma->gma_intr < 1 || gma->gma_intr > 2)
735 return 0;
736
737 if (psc->sc_ports & (1 << gma->gma_port))
738 return 0;
739
740 return 1;
741 }
742
743 static void
gmc_attach(device_t parent,device_t self,void * aux)744 gmc_attach(device_t parent, device_t self, void *aux)
745 {
746 struct gmac_softc * const psc = device_private(parent);
747 struct gmc_softc * const sc = device_private(self);
748 struct gmac_attach_args *gma = aux;
749 struct ifnet * const ifp = &sc->sc_if;
750 struct mii_data * const mii = &sc->sc_mii;
751 static const char eaddrs[2][6] = {
752 "\x0\x52\xc3\x11\x22\x33",
753 "\x0\x52\xc3\x44\x55\x66",
754 };
755
756 psc->sc_ports |= 1 << gma->gma_port;
757 sc->sc_port1 = (gma->gma_port == 1);
758 sc->sc_phy = gma->gma_phy;
759
760 sc->sc_dev = self;
761 sc->sc_psc = psc;
762 sc->sc_iot = psc->sc_iot;
763 sc->sc_ioh = psc->sc_ioh;
764 sc->sc_dmat = psc->sc_dmat;
765
766 bus_space_subregion(sc->sc_iot, sc->sc_ioh,
767 GMAC_PORTn_DMA_OFFSET(gma->gma_port), GMAC_PORTn_DMA_SIZE,
768 &sc->sc_dma_ioh);
769 bus_space_subregion(sc->sc_iot, sc->sc_ioh,
770 GMAC_PORTn_GMAC_OFFSET(gma->gma_port), GMAC_PORTn_GMAC_SIZE,
771 &sc->sc_gmac_ioh);
772 aprint_normal("\n");
773 aprint_naive("\n");
774
775 strlcpy(ifp->if_xname, device_xname(self), sizeof(ifp->if_xname));
776 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
777 ifp->if_softc = sc;
778 ifp->if_ioctl = gmc_ifioctl;
779 ifp->if_stop = gmc_ifstop;
780 ifp->if_start = gmc_ifstart;
781 ifp->if_init = gmc_ifinit;
782
783 IFQ_SET_READY(&ifp->if_snd);
784
785 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
786 sc->sc_ec.ec_mii = mii;
787
788 mii->mii_ifp = ifp;
789 mii->mii_statchg = gmc_mii_statchg;
790 mii->mii_readreg = gma->gma_mii_readreg;
791 mii->mii_writereg = gma->gma_mii_writereg;
792
793 ifmedia_init(&mii->mii_media, 0, gmc_mediachange, gmc_mediastatus);
794
795 if_attach(ifp);
796 if_deferred_start_init(ifp, NULL);
797 ether_ifattach(ifp, eaddrs[gma->gma_port]);
798 mii_attach(sc->sc_dev, mii, 0xffffffff,
799 gma->gma_phy, MII_OFFSET_ANY, 0);
800
801 if (LIST_EMPTY(&mii->mii_phys)) {
802 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
803 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
804 } else {
805 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
806 // ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX);
807 }
808
809 sc->sc_gmac_status = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
810 GMAC_STATUS);
811 sc->sc_gmac_sta_add[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
812 GMAC_STA_ADD0);
813 sc->sc_gmac_sta_add[1] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
814 GMAC_STA_ADD1);
815 sc->sc_gmac_sta_add[2] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
816 GMAC_STA_ADD2);
817 sc->sc_gmac_mcast_filter[0] = bus_space_read_4(sc->sc_iot,
818 sc->sc_gmac_ioh, GMAC_MCAST_FILTER0);
819 sc->sc_gmac_mcast_filter[1] = bus_space_read_4(sc->sc_iot,
820 sc->sc_gmac_ioh, GMAC_MCAST_FILTER1);
821 sc->sc_gmac_rx_filter = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
822 GMAC_RX_FILTER);
823 sc->sc_gmac_config[0] = bus_space_read_4(sc->sc_iot, sc->sc_gmac_ioh,
824 GMAC_CONFIG0);
825 sc->sc_dmavr = bus_space_read_4(sc->sc_iot, sc->sc_dma_ioh, GMAC_DMAVR);
826
827 /* sc->sc_int_enabled is already zeroed */
828 sc->sc_int_mask[0] = (sc->sc_port1 ? INT0_GMAC1 : INT0_GMAC0);
829 sc->sc_int_mask[1] = (sc->sc_port1 ? INT1_GMAC1 : INT1_GMAC0);
830 sc->sc_int_mask[2] = (sc->sc_port1 ? INT2_GMAC1 : INT2_GMAC0);
831 sc->sc_int_mask[3] = (sc->sc_port1 ? INT3_GMAC1 : INT3_GMAC0);
832 sc->sc_int_mask[4] = (sc->sc_port1 ? INT4_GMAC1 : INT4_GMAC0);
833
834 if (!sc->sc_port1) {
835 sc->sc_ih = intr_establish(gma->gma_intr, IPL_NET, IST_LEVEL_HIGH,
836 gmc_intr, sc);
837 KASSERT(sc->sc_ih != NULL);
838 }
839
840 callout_init(&sc->sc_mii_ch, 0);
841 callout_setfunc(&sc->sc_mii_ch, gmc_mii_tick, sc);
842
843 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
844 ether_sprintf(CLLADDR(sc->sc_if.if_sadl)));
845 }
846
847 CFATTACH_DECL_NEW(gmc, sizeof(struct gmc_softc),
848 gmc_match, gmc_attach, NULL, NULL);
849