1 /* $NetBSD: if_cpsw.c,v 1.17 2023/02/27 21:15:09 sekiya Exp $ */
2
3 /*
4 * Copyright (c) 2013 Jonathan A. Kollasch
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*-
30 * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(1, "$NetBSD: if_cpsw.c,v 1.17 2023/02/27 21:15:09 sekiya Exp $");
57
58 #include <sys/param.h>
59 #include <sys/bus.h>
60 #include <sys/device.h>
61 #include <sys/ioctl.h>
62 #include <sys/intr.h>
63 #include <sys/kmem.h>
64 #include <sys/mutex.h>
65 #include <sys/systm.h>
66 #include <sys/kernel.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/fdt/fdtvar.h>
77
78 #include <arm/ti/if_cpswreg.h>
79
80 #define FDT_INTR_FLAGS 0
81
82 #define CPSW_TXFRAGS 16
83
84 #define CPSW_CPPI_RAM_SIZE (0x2000)
85 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
86 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
87 (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
88 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
89 #define CPSW_CPPI_RAM_RXDESCS_BASE \
90 (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
91
92 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
93 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
94
95 CTASSERT(powerof2(CPSW_NTXDESCS));
96 CTASSERT(powerof2(CPSW_NRXDESCS));
97
98 #undef CPSW_DEBUG_DMA /* define this for DMA debugging */
99
100 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
101
102 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
103 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
104
105 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
106 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
107
108 struct cpsw_ring_data {
109 bus_dmamap_t tx_dm[CPSW_NTXDESCS];
110 struct mbuf *tx_mb[CPSW_NTXDESCS];
111 bus_dmamap_t rx_dm[CPSW_NRXDESCS];
112 struct mbuf *rx_mb[CPSW_NRXDESCS];
113 };
114
115 struct cpsw_softc {
116 device_t sc_dev;
117 bus_space_tag_t sc_bst;
118 bus_space_handle_t sc_bsh;
119 bus_size_t sc_bss;
120 bus_dma_tag_t sc_bdt;
121 bus_space_handle_t sc_bsh_txdescs;
122 bus_space_handle_t sc_bsh_rxdescs;
123 bus_addr_t sc_txdescs_pa;
124 bus_addr_t sc_rxdescs_pa;
125 struct ethercom sc_ec;
126 struct mii_data sc_mii;
127 bool sc_phy_has_1000t;
128 bool sc_attached;
129 callout_t sc_tick_ch;
130 void *sc_ih;
131 struct cpsw_ring_data *sc_rdp;
132 volatile u_int sc_txnext;
133 volatile u_int sc_txhead;
134 volatile u_int sc_rxhead;
135 bool sc_txbusy;
136 void *sc_rxthih;
137 void *sc_rxih;
138 void *sc_txih;
139 void *sc_miscih;
140 void *sc_txpad;
141 bus_dmamap_t sc_txpad_dm;
142 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
143 uint8_t sc_enaddr[ETHER_ADDR_LEN];
144 volatile bool sc_txrun;
145 volatile bool sc_rxrun;
146 volatile bool sc_txeoq;
147 volatile bool sc_rxeoq;
148 };
149
150 static int cpsw_match(device_t, cfdata_t, void *);
151 static void cpsw_attach(device_t, device_t, void *);
152 static int cpsw_detach(device_t, int);
153
154 static void cpsw_start(struct ifnet *);
155 static int cpsw_ioctl(struct ifnet *, u_long, void *);
156 static void cpsw_watchdog(struct ifnet *);
157 static int cpsw_init(struct ifnet *);
158 static void cpsw_stop(struct ifnet *, int);
159
160 static int cpsw_mii_readreg(device_t, int, int, uint16_t *);
161 static int cpsw_mii_writereg(device_t, int, int, uint16_t);
162 static void cpsw_mii_statchg(struct ifnet *);
163
164 static int cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
165 static void cpsw_tick(void *);
166
167 static int cpsw_rxthintr(void *);
168 static int cpsw_rxintr(void *);
169 static int cpsw_txintr(void *);
170 static int cpsw_miscintr(void *);
171
172 /* ALE support */
173 #define CPSW_MAX_ALE_ENTRIES 1024
174
175 static int cpsw_ale_update_addresses(struct cpsw_softc *, int purge);
176
177 CFATTACH_DECL_NEW(cpsw, sizeof(struct cpsw_softc),
178 cpsw_match, cpsw_attach, cpsw_detach, NULL);
179
180 #include <sys/kernhist.h>
181 KERNHIST_DEFINE(cpswhist);
182
183 #define CPSWHIST_CALLARGS(A,B,C,D) do { \
184 KERNHIST_CALLARGS(cpswhist, "%jx %jx %jx %jx", \
185 (uintptr_t)(A), (uintptr_t)(B), (uintptr_t)(C), (uintptr_t)(D));\
186 } while (0)
187
188
189 static inline u_int
cpsw_txdesc_adjust(u_int x,int y)190 cpsw_txdesc_adjust(u_int x, int y)
191 {
192 return (((x) + y) & (CPSW_NTXDESCS - 1));
193 }
194
195 static inline u_int
cpsw_rxdesc_adjust(u_int x,int y)196 cpsw_rxdesc_adjust(u_int x, int y)
197 {
198 return (((x) + y) & (CPSW_NRXDESCS - 1));
199 }
200
201 static inline uint32_t
cpsw_read_4(struct cpsw_softc * const sc,bus_size_t const offset)202 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
203 {
204 return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
205 }
206
207 static inline void
cpsw_write_4(struct cpsw_softc * const sc,bus_size_t const offset,uint32_t const value)208 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
209 uint32_t const value)
210 {
211 bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
212 }
213
214 static inline void
cpsw_set_txdesc_next(struct cpsw_softc * const sc,const u_int i,uint32_t n)215 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
216 {
217 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
218
219 KERNHIST_FUNC(__func__);
220 CPSWHIST_CALLARGS(sc, i, n, 0);
221
222 bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
223 }
224
225 static inline void
cpsw_set_rxdesc_next(struct cpsw_softc * const sc,const u_int i,uint32_t n)226 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
227 {
228 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
229
230 KERNHIST_FUNC(__func__);
231 CPSWHIST_CALLARGS(sc, i, n, 0);
232
233 bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
234 }
235
236 static inline void
cpsw_get_txdesc(struct cpsw_softc * const sc,const u_int i,struct cpsw_cpdma_bd * const bdp)237 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
238 struct cpsw_cpdma_bd * const bdp)
239 {
240 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
241 uint32_t * const dp = bdp->word;
242 const bus_size_t c = __arraycount(bdp->word);
243
244 KERNHIST_FUNC(__func__);
245 CPSWHIST_CALLARGS(sc, i, bdp, 0);
246
247 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
248 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
249 dp[0], dp[1], dp[2], dp[3]);
250 }
251
252 static inline void
cpsw_set_txdesc(struct cpsw_softc * const sc,const u_int i,struct cpsw_cpdma_bd * const bdp)253 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
254 struct cpsw_cpdma_bd * const bdp)
255 {
256 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
257 uint32_t * const dp = bdp->word;
258 const bus_size_t c = __arraycount(bdp->word);
259
260 KERNHIST_FUNC(__func__);
261 CPSWHIST_CALLARGS(sc, i, bdp, 0);
262 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
263 dp[0], dp[1], dp[2], dp[3]);
264
265 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o, dp, c);
266 }
267
268 static inline void
cpsw_get_rxdesc(struct cpsw_softc * const sc,const u_int i,struct cpsw_cpdma_bd * const bdp)269 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
270 struct cpsw_cpdma_bd * const bdp)
271 {
272 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
273 uint32_t * const dp = bdp->word;
274 const bus_size_t c = __arraycount(bdp->word);
275
276 KERNHIST_FUNC(__func__);
277 CPSWHIST_CALLARGS(sc, i, bdp, 0);
278
279 bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
280
281 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
282 dp[0], dp[1], dp[2], dp[3]);
283 }
284
285 static inline void
cpsw_set_rxdesc(struct cpsw_softc * const sc,const u_int i,struct cpsw_cpdma_bd * const bdp)286 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
287 struct cpsw_cpdma_bd * const bdp)
288 {
289 const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
290 uint32_t * const dp = bdp->word;
291 const bus_size_t c = __arraycount(bdp->word);
292
293 KERNHIST_FUNC(__func__);
294 CPSWHIST_CALLARGS(sc, i, bdp, 0);
295 KERNHIST_LOG(cpswhist, "%08x %08x %08x %08x\n",
296 dp[0], dp[1], dp[2], dp[3]);
297
298 bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, dp, c);
299 }
300
301 static inline bus_addr_t
cpsw_txdesc_paddr(struct cpsw_softc * const sc,u_int x)302 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
303 {
304 KASSERT(x < CPSW_NTXDESCS);
305 return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
306 }
307
308 static inline bus_addr_t
cpsw_rxdesc_paddr(struct cpsw_softc * const sc,u_int x)309 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
310 {
311 KASSERT(x < CPSW_NRXDESCS);
312 return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
313 }
314
315 static const struct device_compatible_entry compat_data[] = {
316 { .compat = "ti,am335x-cpsw-switch" },
317 { .compat = "ti,am335x-cpsw" },
318 { .compat = "ti,cpsw" },
319 DEVICE_COMPAT_EOL
320 };
321
322 static int
cpsw_match(device_t parent,cfdata_t cf,void * aux)323 cpsw_match(device_t parent, cfdata_t cf, void *aux)
324 {
325 struct fdt_attach_args * const faa = aux;
326
327 return of_compatible_match(faa->faa_phandle, compat_data);
328 }
329
330 static bool
cpsw_phy_has_1000t(struct cpsw_softc * const sc)331 cpsw_phy_has_1000t(struct cpsw_softc * const sc)
332 {
333 struct ifmedia_entry *ifm;
334
335 TAILQ_FOREACH(ifm, &sc->sc_mii.mii_media.ifm_list, ifm_list) {
336 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T)
337 return true;
338 }
339 return false;
340 }
341
342 static int
cpsw_detach(device_t self,int flags)343 cpsw_detach(device_t self, int flags)
344 {
345 struct cpsw_softc * const sc = device_private(self);
346 struct ifnet *ifp = &sc->sc_ec.ec_if;
347 u_int i;
348
349 /* Succeed now if there's no work to do. */
350 if (!sc->sc_attached)
351 return 0;
352
353 sc->sc_attached = false;
354
355 /* Stop the interface. Callouts are stopped in it. */
356 cpsw_stop(ifp, 1);
357
358 /* Destroy our callout. */
359 callout_destroy(&sc->sc_tick_ch);
360
361 /* Let go of the interrupts */
362 intr_disestablish(sc->sc_rxthih);
363 intr_disestablish(sc->sc_rxih);
364 intr_disestablish(sc->sc_txih);
365 intr_disestablish(sc->sc_miscih);
366
367 ether_ifdetach(ifp);
368 if_detach(ifp);
369
370 /* Delete all media. */
371 ifmedia_fini(&sc->sc_mii.mii_media);
372
373 /* Free the packet padding buffer */
374 kmem_free(sc->sc_txpad, ETHER_MIN_LEN);
375 bus_dmamap_destroy(sc->sc_bdt, sc->sc_txpad_dm);
376
377 /* Destroy all the descriptors */
378 for (i = 0; i < CPSW_NTXDESCS; i++)
379 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->tx_dm[i]);
380 for (i = 0; i < CPSW_NRXDESCS; i++)
381 bus_dmamap_destroy(sc->sc_bdt, sc->sc_rdp->rx_dm[i]);
382 kmem_free(sc->sc_rdp, sizeof(*sc->sc_rdp));
383
384 /* Unmap */
385 bus_space_unmap(sc->sc_bst, sc->sc_bsh, sc->sc_bss);
386
387
388 return 0;
389 }
390
391 static void
cpsw_attach(device_t parent,device_t self,void * aux)392 cpsw_attach(device_t parent, device_t self, void *aux)
393 {
394 struct fdt_attach_args * const faa = aux;
395 struct cpsw_softc * const sc = device_private(self);
396 struct ethercom * const ec = &sc->sc_ec;
397 struct ifnet * const ifp = &ec->ec_if;
398 struct mii_data * const mii = &sc->sc_mii;
399 const int phandle = faa->faa_phandle;
400 const uint8_t *macaddr;
401 bus_addr_t addr;
402 bus_size_t size;
403 int error, slave, len;
404 char xname[16];
405 u_int i;
406
407 KERNHIST_INIT(cpswhist, 4096);
408
409 if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
410 aprint_error(": couldn't get registers\n");
411 return;
412 }
413
414 sc->sc_dev = self;
415
416 aprint_normal(": TI Layer 2 3-Port Switch\n");
417 aprint_naive("\n");
418
419 callout_init(&sc->sc_tick_ch, 0);
420 callout_setfunc(&sc->sc_tick_ch, cpsw_tick, sc);
421
422 macaddr = NULL;
423 slave = of_find_firstchild_byname(phandle, "slave");
424 if (slave == -1) {
425 slave = of_find_firstchild_byname(phandle, "ethernet-ports");
426 if (slave != -1) {
427 slave = of_find_firstchild_byname(slave, "port");
428 }
429 }
430 if (slave != -1) {
431 macaddr = fdtbus_get_prop(slave, "mac-address", &len);
432 if (len != ETHER_ADDR_LEN)
433 macaddr = NULL;
434 }
435 if (macaddr == NULL) {
436 #if 0
437 /* grab mac_id0 from AM335x control module */
438 uint32_t reg_lo, reg_hi;
439
440 if (sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, ®_lo) == 0 &&
441 sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, ®_hi) == 0) {
442 sc->sc_enaddr[0] = (reg_hi >> 0) & 0xff;
443 sc->sc_enaddr[1] = (reg_hi >> 8) & 0xff;
444 sc->sc_enaddr[2] = (reg_hi >> 16) & 0xff;
445 sc->sc_enaddr[3] = (reg_hi >> 24) & 0xff;
446 sc->sc_enaddr[4] = (reg_lo >> 0) & 0xff;
447 sc->sc_enaddr[5] = (reg_lo >> 8) & 0xff;
448 } else
449 #endif
450 {
451 aprint_error_dev(sc->sc_dev,
452 "using fake station address\n");
453 /* 'N' happens to have the Local bit set */
454 #if 0
455 sc->sc_enaddr[0] = 'N';
456 sc->sc_enaddr[1] = 'e';
457 sc->sc_enaddr[2] = 't';
458 sc->sc_enaddr[3] = 'B';
459 sc->sc_enaddr[4] = 'S';
460 sc->sc_enaddr[5] = 'D';
461 #else
462 /* XXX Glor */
463 sc->sc_enaddr[0] = 0xd4;
464 sc->sc_enaddr[1] = 0x94;
465 sc->sc_enaddr[2] = 0xa1;
466 sc->sc_enaddr[3] = 0x97;
467 sc->sc_enaddr[4] = 0x03;
468 sc->sc_enaddr[5] = 0x94;
469 #endif
470 }
471 } else {
472 memcpy(sc->sc_enaddr, macaddr, ETHER_ADDR_LEN);
473 }
474
475 snprintf(xname, sizeof(xname), "%s rxth", device_xname(self));
476 sc->sc_rxthih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_RXTH,
477 IPL_VM, FDT_INTR_FLAGS, cpsw_rxthintr, sc, xname);
478
479 snprintf(xname, sizeof(xname), "%s rx", device_xname(self));
480 sc->sc_rxih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_RX,
481 IPL_VM, FDT_INTR_FLAGS, cpsw_rxintr, sc, xname);
482
483 snprintf(xname, sizeof(xname), "%s tx", device_xname(self));
484 sc->sc_txih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_TX,
485 IPL_VM, FDT_INTR_FLAGS, cpsw_txintr, sc, xname);
486
487 snprintf(xname, sizeof(xname), "%s misc", device_xname(self));
488 sc->sc_miscih = fdtbus_intr_establish_xname(phandle, CPSW_INTROFF_MISC,
489 IPL_VM, FDT_INTR_FLAGS, cpsw_miscintr, sc, xname);
490
491 sc->sc_bst = faa->faa_bst;
492 sc->sc_bss = size;
493 sc->sc_bdt = faa->faa_dmat;
494
495 error = bus_space_map(sc->sc_bst, addr, size, 0,
496 &sc->sc_bsh);
497 if (error) {
498 aprint_error_dev(sc->sc_dev,
499 "can't map registers: %d\n", error);
500 return;
501 }
502
503 sc->sc_txdescs_pa = addr + CPSW_CPPI_RAM_TXDESCS_BASE;
504 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
505 CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
506 &sc->sc_bsh_txdescs);
507 if (error) {
508 aprint_error_dev(sc->sc_dev,
509 "can't subregion tx ring SRAM: %d\n", error);
510 return;
511 }
512 aprint_debug_dev(sc->sc_dev, "txdescs at %p\n",
513 (void *)sc->sc_bsh_txdescs);
514
515 sc->sc_rxdescs_pa = addr + CPSW_CPPI_RAM_RXDESCS_BASE;
516 error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
517 CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
518 &sc->sc_bsh_rxdescs);
519 if (error) {
520 aprint_error_dev(sc->sc_dev,
521 "can't subregion rx ring SRAM: %d\n", error);
522 return;
523 }
524 aprint_debug_dev(sc->sc_dev, "rxdescs at %p\n",
525 (void *)sc->sc_bsh_rxdescs);
526
527 sc->sc_rdp = kmem_alloc(sizeof(*sc->sc_rdp), KM_SLEEP);
528
529 for (i = 0; i < CPSW_NTXDESCS; i++) {
530 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
531 CPSW_TXFRAGS, MCLBYTES, 0, 0,
532 &sc->sc_rdp->tx_dm[i])) != 0) {
533 aprint_error_dev(sc->sc_dev,
534 "unable to create tx DMA map: %d\n", error);
535 }
536 sc->sc_rdp->tx_mb[i] = NULL;
537 }
538
539 for (i = 0; i < CPSW_NRXDESCS; i++) {
540 if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
541 MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
542 aprint_error_dev(sc->sc_dev,
543 "unable to create rx DMA map: %d\n", error);
544 }
545 sc->sc_rdp->rx_mb[i] = NULL;
546 }
547
548 sc->sc_txpad = kmem_zalloc(ETHER_MIN_LEN, KM_SLEEP);
549 bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
550 BUS_DMA_WAITOK, &sc->sc_txpad_dm);
551 bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
552 ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK | BUS_DMA_WRITE);
553 bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
554 BUS_DMASYNC_PREWRITE);
555
556 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
557 ether_sprintf(sc->sc_enaddr));
558
559 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
560 ifp->if_softc = sc;
561 ifp->if_capabilities = 0;
562 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
563 ifp->if_start = cpsw_start;
564 ifp->if_ioctl = cpsw_ioctl;
565 ifp->if_init = cpsw_init;
566 ifp->if_stop = cpsw_stop;
567 ifp->if_watchdog = cpsw_watchdog;
568 IFQ_SET_READY(&ifp->if_snd);
569
570 cpsw_stop(ifp, 0);
571
572 mii->mii_ifp = ifp;
573 mii->mii_readreg = cpsw_mii_readreg;
574 mii->mii_writereg = cpsw_mii_writereg;
575 mii->mii_statchg = cpsw_mii_statchg;
576
577 sc->sc_ec.ec_mii = mii;
578 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
579
580 /* Initialize MDIO */
581 cpsw_write_4(sc, MDIOCONTROL,
582 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
583 /* Clear ALE */
584 cpsw_write_4(sc, CPSW_ALE_CONTROL, ALECTL_CLEAR_TABLE);
585
586 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 0, 0);
587 if (LIST_FIRST(&mii->mii_phys) == NULL) {
588 aprint_error_dev(self, "no PHY found!\n");
589 sc->sc_phy_has_1000t = false;
590 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
591 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
592 } else {
593 sc->sc_phy_has_1000t = cpsw_phy_has_1000t(sc);
594
595 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
596 }
597
598 if_attach(ifp);
599 if_deferred_start_init(ifp, NULL);
600 ether_ifattach(ifp, sc->sc_enaddr);
601
602 /* The attach is successful. */
603 sc->sc_attached = true;
604
605 return;
606 }
607
608 static void
cpsw_start(struct ifnet * ifp)609 cpsw_start(struct ifnet *ifp)
610 {
611 struct cpsw_softc * const sc = ifp->if_softc;
612 struct cpsw_ring_data * const rdp = sc->sc_rdp;
613 struct cpsw_cpdma_bd bd;
614 uint32_t * const dw = bd.word;
615 struct mbuf *m;
616 bus_dmamap_t dm;
617 u_int eopi __diagused = ~0;
618 u_int seg;
619 u_int txfree;
620 int txstart = -1;
621 int error;
622 bool pad;
623 u_int mlen;
624
625 KERNHIST_FUNC(__func__);
626 CPSWHIST_CALLARGS(sc, 0, 0, 0);
627
628 if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
629 return;
630 }
631 if (__predict_false(sc->sc_txbusy)) {
632 return;
633 }
634
635 if (sc->sc_txnext >= sc->sc_txhead)
636 txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
637 else
638 txfree = sc->sc_txhead - sc->sc_txnext - 1;
639
640 KERNHIST_LOG(cpswhist, "start txf %x txh %x txn %x txr %x\n",
641 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
642
643 while (txfree > 0) {
644 IFQ_POLL(&ifp->if_snd, m);
645 if (m == NULL)
646 break;
647
648 dm = rdp->tx_dm[sc->sc_txnext];
649
650 error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
651 if (error == EFBIG) {
652 device_printf(sc->sc_dev, "won't fit\n");
653 IFQ_DEQUEUE(&ifp->if_snd, m);
654 m_freem(m);
655 if_statinc(ifp, if_oerrors);
656 continue;
657 } else if (error != 0) {
658 device_printf(sc->sc_dev, "error\n");
659 break;
660 }
661
662 if (dm->dm_nsegs + 1 >= txfree) {
663 sc->sc_txbusy = true;
664 bus_dmamap_unload(sc->sc_bdt, dm);
665 break;
666 }
667
668 mlen = m_length(m);
669 pad = mlen < CPSW_PAD_LEN;
670
671 KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
672 rdp->tx_mb[sc->sc_txnext] = m;
673 IFQ_DEQUEUE(&ifp->if_snd, m);
674
675 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
676 BUS_DMASYNC_PREWRITE);
677
678 if (txstart == -1)
679 txstart = sc->sc_txnext;
680 eopi = sc->sc_txnext;
681 for (seg = 0; seg < dm->dm_nsegs; seg++) {
682 dw[0] = cpsw_txdesc_paddr(sc,
683 TXDESC_NEXT(sc->sc_txnext));
684 dw[1] = dm->dm_segs[seg].ds_addr;
685 dw[2] = dm->dm_segs[seg].ds_len;
686 dw[3] = 0;
687
688 if (seg == 0)
689 dw[3] |= CPDMA_BD_SOP | CPDMA_BD_OWNER |
690 MAX(mlen, CPSW_PAD_LEN);
691
692 if ((seg == dm->dm_nsegs - 1) && !pad)
693 dw[3] |= CPDMA_BD_EOP;
694
695 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
696 txfree--;
697 eopi = sc->sc_txnext;
698 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
699 }
700 if (pad) {
701 dw[0] = cpsw_txdesc_paddr(sc,
702 TXDESC_NEXT(sc->sc_txnext));
703 dw[1] = sc->sc_txpad_pa;
704 dw[2] = CPSW_PAD_LEN - mlen;
705 dw[3] = CPDMA_BD_EOP;
706
707 cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
708 txfree--;
709 eopi = sc->sc_txnext;
710 sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
711 }
712
713 bpf_mtap(ifp, m, BPF_D_OUT);
714 }
715
716 if (txstart >= 0) {
717 ifp->if_timer = 5;
718 /* terminate the new chain */
719 KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
720 cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
721 KERNHIST_LOG(cpswhist, "CP %x HDP %x s %x e %x\n",
722 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
723 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), txstart, eopi);
724 /* link the new chain on */
725 cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
726 cpsw_txdesc_paddr(sc, txstart));
727 if (sc->sc_txeoq) {
728 /* kick the dma engine */
729 sc->sc_txeoq = false;
730 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
731 cpsw_txdesc_paddr(sc, txstart));
732 }
733 }
734 KERNHIST_LOG(cpswhist, "end txf %x txh %x txn %x txr %x\n",
735 txfree, sc->sc_txhead, sc->sc_txnext, sc->sc_txrun);
736 }
737
738 static int
cpsw_ioctl(struct ifnet * ifp,u_long cmd,void * data)739 cpsw_ioctl(struct ifnet *ifp, u_long cmd, void *data)
740 {
741 const int s = splnet();
742 int error = 0;
743
744 switch (cmd) {
745 default:
746 error = ether_ioctl(ifp, cmd, data);
747 if (error == ENETRESET) {
748 error = 0;
749 }
750 break;
751 }
752
753 splx(s);
754
755 return error;
756 }
757
758 static void
cpsw_watchdog(struct ifnet * ifp)759 cpsw_watchdog(struct ifnet *ifp)
760 {
761 struct cpsw_softc *sc = ifp->if_softc;
762
763 device_printf(sc->sc_dev, "device timeout\n");
764
765 if_statinc(ifp, if_oerrors);
766 cpsw_init(ifp);
767 cpsw_start(ifp);
768 }
769
770 static int
cpsw_mii_wait(struct cpsw_softc * const sc,int reg)771 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
772 {
773 u_int tries;
774
775 for (tries = 0; tries < 1000; tries++) {
776 if ((cpsw_read_4(sc, reg) & __BIT(31)) == 0)
777 return 0;
778 delay(1);
779 }
780 return ETIMEDOUT;
781 }
782
783 static int
cpsw_mii_readreg(device_t dev,int phy,int reg,uint16_t * val)784 cpsw_mii_readreg(device_t dev, int phy, int reg, uint16_t *val)
785 {
786 struct cpsw_softc * const sc = device_private(dev);
787 uint32_t v;
788
789 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
790 return -1;
791
792 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) |
793 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
794
795 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
796 return -1;
797
798 v = cpsw_read_4(sc, MDIOUSERACCESS0);
799 if (v & __BIT(29)) {
800 *val = v & 0xffff;
801 return 0;
802 }
803
804 return -1;
805 }
806
807 static int
cpsw_mii_writereg(device_t dev,int phy,int reg,uint16_t val)808 cpsw_mii_writereg(device_t dev, int phy, int reg, uint16_t val)
809 {
810 struct cpsw_softc * const sc = device_private(dev);
811 uint32_t v;
812
813 KASSERT((val & 0xffff0000UL) == 0);
814
815 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
816 goto out;
817
818 cpsw_write_4(sc, MDIOUSERACCESS0, (1 << 31) | (1 << 30) |
819 ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
820
821 if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
822 goto out;
823
824 v = cpsw_read_4(sc, MDIOUSERACCESS0);
825 if ((v & __BIT(29)) == 0) {
826 out:
827 device_printf(sc->sc_dev, "%s error\n", __func__);
828 return -1;
829 }
830
831 return 0;
832 }
833
834 static void
cpsw_mii_statchg(struct ifnet * ifp)835 cpsw_mii_statchg(struct ifnet *ifp)
836 {
837 return;
838 }
839
840 static int
cpsw_new_rxbuf(struct cpsw_softc * const sc,const u_int i)841 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
842 {
843 struct cpsw_ring_data * const rdp = sc->sc_rdp;
844 const u_int h = RXDESC_PREV(i);
845 struct cpsw_cpdma_bd bd;
846 uint32_t * const dw = bd.word;
847 struct mbuf *m;
848 int error = ENOBUFS;
849
850 MGETHDR(m, M_DONTWAIT, MT_DATA);
851 if (m == NULL) {
852 goto reuse;
853 }
854
855 MCLGET(m, M_DONTWAIT);
856 if ((m->m_flags & M_EXT) == 0) {
857 m_freem(m);
858 goto reuse;
859 }
860
861 /* We have a new buffer, prepare it for the ring. */
862
863 if (rdp->rx_mb[i] != NULL)
864 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
865
866 m->m_len = m->m_pkthdr.len = MCLBYTES;
867
868 rdp->rx_mb[i] = m;
869
870 error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
871 BUS_DMA_READ | BUS_DMA_NOWAIT);
872 if (error) {
873 device_printf(sc->sc_dev, "can't load rx DMA map %d: %d\n",
874 i, error);
875 }
876
877 bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
878 0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
879
880 error = 0;
881
882 reuse:
883 /* (re-)setup the descriptor */
884 dw[0] = 0;
885 dw[1] = rdp->rx_dm[i]->dm_segs[0].ds_addr;
886 dw[2] = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
887 dw[3] = CPDMA_BD_OWNER;
888
889 cpsw_set_rxdesc(sc, i, &bd);
890 /* and link onto ring */
891 cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
892
893 return error;
894 }
895
896 static int
cpsw_init(struct ifnet * ifp)897 cpsw_init(struct ifnet *ifp)
898 {
899 struct cpsw_softc * const sc = ifp->if_softc;
900 struct mii_data * const mii = &sc->sc_mii;
901 int i;
902
903 cpsw_stop(ifp, 0);
904
905 sc->sc_txnext = 0;
906 sc->sc_txhead = 0;
907
908 /* Reset wrapper */
909 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
910 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
911 ;
912
913 /* Reset SS */
914 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
915 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
916 ;
917
918 /* Clear table and enable ALE */
919 cpsw_write_4(sc, CPSW_ALE_CONTROL,
920 ALECTL_ENABLE_ALE | ALECTL_CLEAR_TABLE);
921
922 /* Reset and init Sliver port 1 and 2 */
923 for (i = 0; i < CPSW_ETH_PORTS; i++) {
924 uint32_t macctl;
925
926 /* Reset */
927 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
928 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
929 ;
930 /* Set Slave Mapping */
931 cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
932 cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
933 cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
934 /* Set MAC Address */
935 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
936 sc->sc_enaddr[0] | (sc->sc_enaddr[1] << 8) |
937 (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[3] << 24));
938 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
939 sc->sc_enaddr[4] | (sc->sc_enaddr[5] << 8));
940
941 /* Set MACCONTROL for ports 0,1 */
942 macctl = SLMACCTL_FULLDUPLEX | SLMACCTL_GMII_EN |
943 SLMACCTL_IFCTL_A;
944 if (sc->sc_phy_has_1000t)
945 macctl |= SLMACCTL_GIG;
946 cpsw_write_4(sc, CPSW_SL_MACCONTROL(i), macctl);
947
948 /* Set ALE port to forwarding(3) */
949 cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
950 }
951
952 /* Set Host Port Mapping */
953 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
954 cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
955
956 /* Set ALE port to forwarding(3) */
957 cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
958
959 /* Initialize addrs */
960 cpsw_ale_update_addresses(sc, 1);
961
962 cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
963 cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
964
965 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
966 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
967 ;
968
969 for (i = 0; i < 8; i++) {
970 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
971 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
972 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
973 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
974 }
975
976 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
977 CPSW_CPPI_RAM_TXDESCS_SIZE/4);
978
979 sc->sc_txhead = 0;
980 sc->sc_txnext = 0;
981
982 cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
983
984 bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
985 CPSW_CPPI_RAM_RXDESCS_SIZE/4);
986 /* Initialize RX Buffer Descriptors */
987 cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
988 for (i = 0; i < CPSW_NRXDESCS; i++) {
989 cpsw_new_rxbuf(sc, i);
990 }
991 sc->sc_rxhead = 0;
992
993 /* turn off flow control */
994 cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0);
995
996 /* align layer 3 header to 32-bit */
997 cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
998
999 /* Clear all interrupt Masks */
1000 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
1001 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
1002
1003 /* Enable TX & RX DMA */
1004 cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
1005 cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
1006
1007 /* Enable TX and RX interrupt receive for core 0 */
1008 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
1009 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
1010 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
1011
1012 /* Enable host Error Interrupt */
1013 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
1014
1015 /* Enable interrupts for TX and RX Channel 0 */
1016 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
1017 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
1018
1019 /* Ack stalled irqs */
1020 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1021 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1022 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1023 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1024
1025 /* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
1026 /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
1027 cpsw_write_4(sc, MDIOCONTROL,
1028 MDIOCTL_ENABLE | MDIOCTL_FAULTENB | MDIOCTL_CLKDIV(0xff));
1029
1030 mii_mediachg(mii);
1031
1032 /* Write channel 0 RX HDP */
1033 cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
1034 sc->sc_rxrun = true;
1035 sc->sc_rxeoq = false;
1036
1037 sc->sc_txrun = true;
1038 sc->sc_txeoq = true;
1039 callout_schedule(&sc->sc_tick_ch, hz);
1040 ifp->if_flags |= IFF_RUNNING;
1041 sc->sc_txbusy = false;
1042
1043 return 0;
1044 }
1045
1046 static void
cpsw_stop(struct ifnet * ifp,int disable)1047 cpsw_stop(struct ifnet *ifp, int disable)
1048 {
1049 struct cpsw_softc * const sc = ifp->if_softc;
1050 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1051 u_int i;
1052
1053 aprint_debug_dev(sc->sc_dev, "%s: ifp %p disable %d\n", __func__,
1054 ifp, disable);
1055
1056 if ((ifp->if_flags & IFF_RUNNING) == 0)
1057 return;
1058
1059 callout_stop(&sc->sc_tick_ch);
1060 mii_down(&sc->sc_mii);
1061
1062 cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
1063 cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
1064 cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
1065 cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
1066 cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x0);
1067
1068 cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
1069 cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
1070 i = 0;
1071 while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
1072 delay(10);
1073 if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
1074 sc->sc_txrun = false;
1075 if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
1076 sc->sc_rxrun = false;
1077 i++;
1078 }
1079 //printf("%s toredown complete in %u\n", __func__, i);
1080
1081 /* Reset wrapper */
1082 cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
1083 while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1)
1084 ;
1085
1086 /* Reset SS */
1087 cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
1088 while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1)
1089 ;
1090
1091 for (i = 0; i < CPSW_ETH_PORTS; i++) {
1092 cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
1093 while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1)
1094 ;
1095 }
1096
1097 /* Reset CPDMA */
1098 cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
1099 while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1)
1100 ;
1101
1102 /* Release any queued transmit buffers. */
1103 for (i = 0; i < CPSW_NTXDESCS; i++) {
1104 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1105 m_freem(rdp->tx_mb[i]);
1106 rdp->tx_mb[i] = NULL;
1107 }
1108
1109 ifp->if_flags &= ~IFF_RUNNING;
1110 ifp->if_timer = 0;
1111 sc->sc_txbusy = false;
1112
1113 if (!disable)
1114 return;
1115
1116 for (i = 0; i < CPSW_NRXDESCS; i++) {
1117 bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1118 m_freem(rdp->rx_mb[i]);
1119 rdp->rx_mb[i] = NULL;
1120 }
1121 }
1122
1123 static void
cpsw_tick(void * arg)1124 cpsw_tick(void *arg)
1125 {
1126 struct cpsw_softc * const sc = arg;
1127 struct mii_data * const mii = &sc->sc_mii;
1128 const int s = splnet();
1129
1130 mii_tick(mii);
1131
1132 splx(s);
1133
1134 callout_schedule(&sc->sc_tick_ch, hz);
1135 }
1136
1137 static int
cpsw_rxthintr(void * arg)1138 cpsw_rxthintr(void *arg)
1139 {
1140 struct cpsw_softc * const sc = arg;
1141
1142 /* this won't deassert the interrupt though */
1143 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1144
1145 return 1;
1146 }
1147
1148 static int
cpsw_rxintr(void * arg)1149 cpsw_rxintr(void *arg)
1150 {
1151 struct cpsw_softc * const sc = arg;
1152 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1153 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1154 struct cpsw_cpdma_bd bd;
1155 const uint32_t * const dw = bd.word;
1156 bus_dmamap_t dm;
1157 struct mbuf *m;
1158 u_int i;
1159 u_int len, off;
1160
1161 KERNHIST_FUNC(__func__);
1162 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1163
1164 for (;;) {
1165 KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1166
1167 i = sc->sc_rxhead;
1168 KERNHIST_LOG(cpswhist, "rxhead %x CP %x\n", i,
1169 cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0)), 0, 0);
1170 dm = rdp->rx_dm[i];
1171 m = rdp->rx_mb[i];
1172
1173 KASSERT(dm != NULL);
1174 KASSERT(m != NULL);
1175
1176 cpsw_get_rxdesc(sc, i, &bd);
1177
1178 if (ISSET(dw[3], CPDMA_BD_OWNER))
1179 break;
1180
1181 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1182 sc->sc_rxrun = false;
1183 return 1;
1184 }
1185
1186 #if defined(CPSW_DEBUG_DMA)
1187 if ((dw[3] & (CPDMA_BD_SOP | CPDMA_BD_EOP)) !=
1188 (CPDMA_BD_SOP | CPDMA_BD_EOP)) {
1189 Debugger();
1190 }
1191 #endif
1192
1193 bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1194 BUS_DMASYNC_POSTREAD);
1195
1196 if (cpsw_new_rxbuf(sc, i) != 0) {
1197 /* drop current packet, reuse buffer for new */
1198 if_statinc(ifp, if_ierrors);
1199 goto next;
1200 }
1201
1202 off = __SHIFTOUT(dw[2], (uint32_t)__BITS(26, 16));
1203 len = __SHIFTOUT(dw[3], (uint32_t)__BITS(10, 0));
1204
1205 if (ISSET(dw[3], CPDMA_BD_PASSCRC))
1206 len -= ETHER_CRC_LEN;
1207
1208 m_set_rcvif(m, ifp);
1209 m->m_pkthdr.len = m->m_len = len;
1210 m->m_data += off;
1211
1212 if_percpuq_enqueue(ifp->if_percpuq, m);
1213
1214 next:
1215 sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1216 if (ISSET(dw[3], CPDMA_BD_EOQ)) {
1217 sc->sc_rxeoq = true;
1218 break;
1219 } else {
1220 sc->sc_rxeoq = false;
1221 }
1222 cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1223 cpsw_rxdesc_paddr(sc, i));
1224 }
1225
1226 #if defined(CPSW_DEBUG_DMA)
1227 if (sc->sc_rxeoq) {
1228 device_printf(sc->sc_dev, "rxeoq\n");
1229 Debugger();
1230 }
1231 #endif
1232
1233 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1234
1235 return 1;
1236 }
1237
1238 static int
cpsw_txintr(void * arg)1239 cpsw_txintr(void *arg)
1240 {
1241 struct cpsw_softc * const sc = arg;
1242 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1243 struct cpsw_ring_data * const rdp = sc->sc_rdp;
1244 struct cpsw_cpdma_bd bd;
1245 const uint32_t * const dw = bd.word;
1246 bool handled = false;
1247 uint32_t tx0_cp;
1248 u_int cpi;
1249
1250 KERNHIST_FUNC(__func__);
1251 CPSWHIST_CALLARGS(sc, 0, 0, 0);
1252
1253 KASSERT(sc->sc_txrun);
1254
1255 KERNHIST_LOG(cpswhist, "before txnext %x txhead %x txrun %x\n",
1256 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1257
1258 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1259
1260 if (tx0_cp == 0xfffffffc) {
1261 /* Teardown, ack it */
1262 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1263 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1264 sc->sc_txrun = false;
1265 return 0;
1266 }
1267
1268 for (;;) {
1269 tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1270 cpi = (tx0_cp - sc->sc_txdescs_pa) / sizeof(struct cpsw_cpdma_bd);
1271 KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1272
1273 KERNHIST_LOG(cpswhist, "txnext %x txhead %x txrun %x cpi %x\n",
1274 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, cpi);
1275
1276 cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1277
1278 #if defined(CPSW_DEBUG_DMA)
1279 if (dw[2] == 0) {
1280 //Debugger();
1281 }
1282 #endif
1283
1284 if (ISSET(dw[3], CPDMA_BD_SOP) == 0)
1285 goto next;
1286
1287 if (ISSET(dw[3], CPDMA_BD_OWNER)) {
1288 printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1289 sc->sc_txnext);
1290 break;
1291 }
1292
1293 if (ISSET(dw[3], CPDMA_BD_TDOWNCMPLT)) {
1294 sc->sc_txrun = false;
1295 return 1;
1296 }
1297
1298 bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1299 0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1300 BUS_DMASYNC_POSTWRITE);
1301 bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1302
1303 m_freem(rdp->tx_mb[sc->sc_txhead]);
1304 rdp->tx_mb[sc->sc_txhead] = NULL;
1305
1306 if_statinc(ifp, if_opackets);
1307
1308 handled = true;
1309
1310 sc->sc_txbusy = false;
1311
1312 next:
1313 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1314 sc->sc_txeoq = true;
1315 }
1316 if (sc->sc_txhead == cpi) {
1317 cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1318 cpsw_txdesc_paddr(sc, cpi));
1319 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1320 break;
1321 }
1322 sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1323 if (ISSET(dw[3], CPDMA_BD_EOP) && ISSET(dw[3], CPDMA_BD_EOQ)) {
1324 sc->sc_txeoq = true;
1325 break;
1326 }
1327 }
1328
1329 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1330
1331 if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1332 if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1333 sc->sc_txeoq = false;
1334 cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1335 cpsw_txdesc_paddr(sc, sc->sc_txhead));
1336 }
1337 }
1338
1339 KERNHIST_LOG(cpswhist, "after txnext %x txhead %x txrun %x\n",
1340 sc->sc_txnext, sc->sc_txhead, sc->sc_txrun, 0);
1341 KERNHIST_LOG(cpswhist, "CP %x HDP %x\n",
1342 cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0)),
1343 cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)), 0, 0);
1344
1345 if (handled && sc->sc_txnext == sc->sc_txhead)
1346 ifp->if_timer = 0;
1347
1348 if (handled)
1349 if_schedule_deferred_start(ifp);
1350
1351 return handled;
1352 }
1353
1354 static int
cpsw_miscintr(void * arg)1355 cpsw_miscintr(void *arg)
1356 {
1357 struct cpsw_softc * const sc = arg;
1358 uint32_t miscstat;
1359 uint32_t dmastat;
1360 uint32_t stat;
1361
1362 miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1363 device_printf(sc->sc_dev, "%s %x FIRE\n", __func__, miscstat);
1364
1365 #define CPSW_MISC_HOST_PEND __BIT32(2)
1366 #define CPSW_MISC_STAT_PEND __BIT32(3)
1367
1368 if (ISSET(miscstat, CPSW_MISC_HOST_PEND)) {
1369 /* Host Error */
1370 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1371 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1372
1373 printf("rxhead %02x\n", sc->sc_rxhead);
1374
1375 stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1376 printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1377 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1378 printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1379 stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1380 printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1381 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1382 printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1383 stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1384 printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1385
1386 //Debugger();
1387
1388 cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1389 dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1390 printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1391 }
1392
1393 cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1394
1395 return 1;
1396 }
1397
1398 /*
1399 *
1400 * ALE support routines.
1401 *
1402 */
1403
1404 static void
cpsw_ale_entry_init(uint32_t * ale_entry)1405 cpsw_ale_entry_init(uint32_t *ale_entry)
1406 {
1407 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1408 }
1409
1410 static void
cpsw_ale_entry_set_mac(uint32_t * ale_entry,const uint8_t * mac)1411 cpsw_ale_entry_set_mac(uint32_t *ale_entry, const uint8_t *mac)
1412 {
1413 ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
1414 ale_entry[1] = mac[0] << 8 | mac[1];
1415 }
1416
1417 static void
cpsw_ale_entry_set_bcast_mac(uint32_t * ale_entry)1418 cpsw_ale_entry_set_bcast_mac(uint32_t *ale_entry)
1419 {
1420 ale_entry[0] = 0xffffffff;
1421 ale_entry[1] = 0x0000ffff;
1422 }
1423
1424 static void
cpsw_ale_entry_set(uint32_t * ale_entry,ale_entry_field_t field,uint32_t val)1425 cpsw_ale_entry_set(uint32_t *ale_entry, ale_entry_field_t field, uint32_t val)
1426 {
1427 /* Entry type[61:60] is addr entry(1), Mcast fwd state[63:62] is fw(3)*/
1428 switch (field) {
1429 case ALE_ENTRY_TYPE:
1430 /* [61:60] */
1431 ale_entry[1] |= (val & 0x3) << 28;
1432 break;
1433 case ALE_MCAST_FWD_STATE:
1434 /* [63:62] */
1435 ale_entry[1] |= (val & 0x3) << 30;
1436 break;
1437 case ALE_PORT_MASK:
1438 /* [68:66] */
1439 ale_entry[2] |= (val & 0x7) << 2;
1440 break;
1441 case ALE_PORT_NUMBER:
1442 /* [67:66] */
1443 ale_entry[2] |= (val & 0x3) << 2;
1444 break;
1445 default:
1446 panic("Invalid ALE entry field: %d\n", field);
1447 }
1448
1449 return;
1450 }
1451
1452 static bool
cpsw_ale_entry_mac_match(const uint32_t * ale_entry,const uint8_t * mac)1453 cpsw_ale_entry_mac_match(const uint32_t *ale_entry, const uint8_t *mac)
1454 {
1455 return (((ale_entry[1] >> 8) & 0xff) == mac[0]) &&
1456 (((ale_entry[1] >> 0) & 0xff) == mac[1]) &&
1457 (((ale_entry[0] >>24) & 0xff) == mac[2]) &&
1458 (((ale_entry[0] >>16) & 0xff) == mac[3]) &&
1459 (((ale_entry[0] >> 8) & 0xff) == mac[4]) &&
1460 (((ale_entry[0] >> 0) & 0xff) == mac[5]);
1461 }
1462
1463 static void
cpsw_ale_set_outgoing_mac(struct cpsw_softc * sc,int port,const uint8_t * mac)1464 cpsw_ale_set_outgoing_mac(struct cpsw_softc *sc, int port, const uint8_t *mac)
1465 {
1466 cpsw_write_4(sc, CPSW_PORT_P_SA_HI(port),
1467 mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]);
1468 cpsw_write_4(sc, CPSW_PORT_P_SA_LO(port),
1469 mac[5] << 8 | mac[4]);
1470 }
1471
1472 static void
cpsw_ale_read_entry(struct cpsw_softc * sc,uint16_t idx,uint32_t * ale_entry)1473 cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry)
1474 {
1475 cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023);
1476 ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0);
1477 ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1);
1478 ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2);
1479 }
1480
1481 static void
cpsw_ale_write_entry(struct cpsw_softc * sc,uint16_t idx,const uint32_t * ale_entry)1482 cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx,
1483 const uint32_t *ale_entry)
1484 {
1485 cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]);
1486 cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]);
1487 cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]);
1488 cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023));
1489 }
1490
1491 static int
cpsw_ale_remove_all_mc_entries(struct cpsw_softc * sc)1492 cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc)
1493 {
1494 int i;
1495 uint32_t ale_entry[3];
1496
1497 /* First two entries are link address and broadcast. */
1498 for (i = 2; i < CPSW_MAX_ALE_ENTRIES; i++) {
1499 cpsw_ale_read_entry(sc, i, ale_entry);
1500 if (((ale_entry[1] >> 28) & 3) == 1 && /* Address entry */
1501 ((ale_entry[1] >> 8) & 1) == 1) { /* MCast link addr */
1502 ale_entry[0] = ale_entry[1] = ale_entry[2] = 0;
1503 cpsw_ale_write_entry(sc, i, ale_entry);
1504 }
1505 }
1506 return CPSW_MAX_ALE_ENTRIES;
1507 }
1508
1509 static int
cpsw_ale_mc_entry_set(struct cpsw_softc * sc,uint8_t portmask,uint8_t * mac)1510 cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmask, uint8_t *mac)
1511 {
1512 int free_index = -1, matching_index = -1, i;
1513 uint32_t ale_entry[3];
1514
1515 /* Find a matching entry or a free entry. */
1516 for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) {
1517 cpsw_ale_read_entry(sc, i, ale_entry);
1518
1519 /* Entry Type[61:60] is 0 for free entry */
1520 if (free_index < 0 && ((ale_entry[1] >> 28) & 3) == 0) {
1521 free_index = i;
1522 }
1523
1524 if (cpsw_ale_entry_mac_match(ale_entry, mac)) {
1525 matching_index = i;
1526 break;
1527 }
1528 }
1529
1530 if (matching_index < 0) {
1531 if (free_index < 0)
1532 return ENOMEM;
1533 i = free_index;
1534 }
1535
1536 cpsw_ale_entry_init(ale_entry);
1537
1538 cpsw_ale_entry_set_mac(ale_entry, mac);
1539 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1540 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1541 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, portmask);
1542
1543 cpsw_ale_write_entry(sc, i, ale_entry);
1544
1545 return 0;
1546 }
1547
1548 static int
cpsw_ale_update_addresses(struct cpsw_softc * sc,int purge)1549 cpsw_ale_update_addresses(struct cpsw_softc *sc, int purge)
1550 {
1551 uint8_t *mac = sc->sc_enaddr;
1552 uint32_t ale_entry[3];
1553 int i;
1554 struct ethercom * const ec = &sc->sc_ec;
1555 struct ether_multi *ifma;
1556
1557 cpsw_ale_entry_init(ale_entry);
1558 /* Route incoming packets for our MAC address to Port 0 (host). */
1559 /* For simplicity, keep this entry at table index 0 in the ALE. */
1560 cpsw_ale_entry_set_mac(ale_entry, mac);
1561 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1562 cpsw_ale_entry_set(ale_entry, ALE_PORT_NUMBER, 0);
1563 cpsw_ale_write_entry(sc, 0, ale_entry);
1564
1565 /* Set outgoing MAC Address for Ports 1 and 2. */
1566 for (i = CPSW_CPPI_PORTS; i < (CPSW_ETH_PORTS + CPSW_CPPI_PORTS); ++i)
1567 cpsw_ale_set_outgoing_mac(sc, i, mac);
1568
1569 /* Keep the broadcast address at table entry 1. */
1570 cpsw_ale_entry_init(ale_entry);
1571 cpsw_ale_entry_set_bcast_mac(ale_entry);
1572 cpsw_ale_entry_set(ale_entry, ALE_ENTRY_TYPE, ALE_TYPE_ADDRESS);
1573 cpsw_ale_entry_set(ale_entry, ALE_MCAST_FWD_STATE, ALE_FWSTATE_FWONLY);
1574 cpsw_ale_entry_set(ale_entry, ALE_PORT_MASK, ALE_PORT_MASK_ALL);
1575 cpsw_ale_write_entry(sc, 1, ale_entry);
1576
1577 /* SIOCDELMULTI doesn't specify the particular address
1578 being removed, so we have to remove all and rebuild. */
1579 if (purge)
1580 cpsw_ale_remove_all_mc_entries(sc);
1581
1582 /* Set other multicast addrs desired. */
1583 ETHER_LOCK(ec);
1584 LIST_FOREACH(ifma, &ec->ec_multiaddrs, enm_list) {
1585 cpsw_ale_mc_entry_set(sc, ALE_PORT_MASK_ALL, ifma->enm_addrlo);
1586 }
1587 ETHER_UNLOCK(ec);
1588
1589 return 0;
1590 }
1591