xref: /openbsd-src/sys/dev/fdt/if_mvpp.c (revision 365a24f4da5afba104b57ff7e590a1f7b1edb1dc)
1 /*	$OpenBSD: if_mvpp.c,v 1.52 2024/03/18 21:37:44 patrick Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Copyright (C) 2016 Marvell International Ltd.
20  *
21  * Marvell BSD License Option
22  *
23  * If you received this File from Marvell, you may opt to use, redistribute
24  * and/or modify this File under the following licensing terms.
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions are met:
27  *
28  *   * Redistributions of source code must retain the above copyright notice,
29  *     this list of conditions and the following disclaimer.
30  *
31  *   * Redistributions in binary form must reproduce the above copyright
32  *     notice, this list of conditions and the following disclaimer in the
33  *     documentation and/or other materials provided with the distribution.
34  *
35  *   * Neither the name of Marvell nor the names of its contributors may be
36  *     used to endorse or promote products derived from this software without
37  *     specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
43  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
49  * POSSIBILITY OF SUCH DAMAGE.
50  */
51 
52 #include "bpfilter.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/device.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/queue.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/timeout.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include <machine/cpufunc.h>
68 #include <machine/bus.h>
69 #include <machine/fdt.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/ppp_defs.h>
74 
75 #include <dev/ofw/openfirm.h>
76 #include <dev/ofw/ofw_clock.h>
77 #include <dev/ofw/ofw_gpio.h>
78 #include <dev/ofw/ofw_misc.h>
79 #include <dev/ofw/ofw_pinctrl.h>
80 #include <dev/ofw/ofw_regulator.h>
81 #include <dev/ofw/fdt.h>
82 
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89 
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93 
94 #include <netinet6/in6_var.h>
95 #include <netinet/ip6.h>
96 
97 #include <dev/fdt/if_mvppreg.h>
98 
99 struct mvpp2_buf {
100 	bus_dmamap_t		mb_map;
101 	struct mbuf		*mb_m;
102 };
103 
104 #define MVPP2_NTXDESC	512
105 #define MVPP2_NTXSEGS	16
106 #define MVPP2_NRXDESC	512
107 
108 struct mvpp2_bm_pool {
109 	struct mvpp2_dmamem	*bm_mem;
110 	struct mvpp2_buf	*rxbuf;
111 	uint32_t		*freelist;
112 	int			free_prod;
113 	int			free_cons;
114 };
115 
116 #define MVPP2_BM_SIZE		64
117 #define MVPP2_BM_POOL_PTR_ALIGN	128
118 #define MVPP2_BM_POOLS_NUM	8
119 #define MVPP2_BM_ALIGN		32
120 
121 struct mvpp2_tx_queue {
122 	uint8_t			id;
123 	uint8_t			log_id;
124 	struct mvpp2_dmamem	*ring;
125 	struct mvpp2_buf	*buf;
126 	struct mvpp2_tx_desc	*descs;
127 	int			prod;
128 	int			cons;
129 
130 	uint32_t		done_pkts_coal;
131 };
132 
133 struct mvpp2_rx_queue {
134 	uint8_t			id;
135 	struct mvpp2_dmamem	*ring;
136 	struct mvpp2_rx_desc	*descs;
137 	int			prod;
138 	struct if_rxring	rxring;
139 	int			cons;
140 
141 	uint32_t		pkts_coal;
142 	uint32_t		time_coal;
143 };
144 
145 struct mvpp2_dmamem {
146 	bus_dmamap_t		mdm_map;
147 	bus_dma_segment_t	mdm_seg;
148 	size_t			mdm_size;
149 	caddr_t			mdm_kva;
150 };
151 #define MVPP2_DMA_MAP(_mdm)	((_mdm)->mdm_map)
152 #define MVPP2_DMA_LEN(_mdm)	((_mdm)->mdm_size)
153 #define MVPP2_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
154 #define MVPP2_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
155 
156 struct mvpp2_port;
157 struct mvpp2_softc {
158 	struct device		sc_dev;
159 	int			sc_node;
160 	bus_space_tag_t		sc_iot;
161 	bus_space_handle_t	sc_ioh_base;
162 	bus_space_handle_t	sc_ioh_iface;
163 	paddr_t			sc_ioh_paddr;
164 	bus_size_t		sc_iosize_base;
165 	bus_size_t		sc_iosize_iface;
166 	bus_dma_tag_t		sc_dmat;
167 	struct regmap		*sc_rm;
168 
169 	uint32_t		sc_tclk;
170 
171 	struct mvpp2_bm_pool	*sc_bm_pools;
172 	int			sc_npools;
173 
174 	struct mvpp2_prs_shadow	*sc_prs_shadow;
175 	uint8_t			*sc_prs_double_vlans;
176 
177 	int			sc_aggr_ntxq;
178 	struct mvpp2_tx_queue	*sc_aggr_txqs;
179 
180 	struct mvpp2_port	**sc_ports;
181 };
182 
183 struct mvpp2_port {
184 	struct device		sc_dev;
185 	struct mvpp2_softc	*sc;
186 	int			sc_node;
187 	bus_dma_tag_t		sc_dmat;
188 	int			sc_id;
189 	int			sc_gop_id;
190 
191 	struct arpcom		sc_ac;
192 #define sc_lladdr	sc_ac.ac_enaddr
193 	struct mii_data		sc_mii;
194 #define sc_media	sc_mii.mii_media
195 	struct mii_bus		*sc_mdio;
196 
197 	enum {
198 		PHY_MODE_XAUI,
199 		PHY_MODE_10GBASER,
200 		PHY_MODE_2500BASEX,
201 		PHY_MODE_1000BASEX,
202 		PHY_MODE_SGMII,
203 		PHY_MODE_RGMII,
204 		PHY_MODE_RGMII_ID,
205 		PHY_MODE_RGMII_RXID,
206 		PHY_MODE_RGMII_TXID,
207 	}			sc_phy_mode;
208 	int			sc_fixed_link;
209 	int			sc_inband_status;
210 	int			sc_link;
211 	int			sc_phyloc;
212 	int			sc_sfp;
213 
214 	int			sc_ntxq;
215 	int			sc_nrxq;
216 
217 	struct mvpp2_tx_queue	*sc_txqs;
218 	struct mvpp2_rx_queue	*sc_rxqs;
219 
220 	struct timeout		sc_tick;
221 
222 	uint32_t		sc_tx_time_coal;
223 };
224 
225 #define MVPP2_MAX_PORTS		4
226 
227 struct mvpp2_attach_args {
228 	int			ma_node;
229 	bus_dma_tag_t		ma_dmat;
230 };
231 
232 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
233 
234 static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
235 
236 int	mvpp2_match(struct device *, void *, void *);
237 void	mvpp2_attach(struct device *, struct device *, void *);
238 void	mvpp2_attach_deferred(struct device *);
239 
240 const struct cfattach mvppc_ca = {
241 	sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
242 };
243 
244 struct cfdriver mvppc_cd = {
245 	NULL, "mvppc", DV_DULL
246 };
247 
248 int	mvpp2_port_match(struct device *, void *, void *);
249 void	mvpp2_port_attach(struct device *, struct device *, void *);
250 
251 const struct cfattach mvpp_ca = {
252 	sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
253 };
254 
255 struct cfdriver mvpp_cd = {
256 	NULL, "mvpp", DV_IFNET
257 };
258 
259 void	mvpp2_port_attach_sfp(struct device *);
260 
261 uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
262 void	mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
263 uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
264 void	mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
265 uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
266 void	mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
267 uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
268 void	mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
269 uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
270 void	mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
271 
272 int	mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
273 void	mvpp2_start(struct ifnet *);
274 int	mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
275 void	mvpp2_watchdog(struct ifnet *);
276 
277 int	mvpp2_media_change(struct ifnet *);
278 void	mvpp2_media_status(struct ifnet *, struct ifmediareq *);
279 
280 int	mvpp2_mii_readreg(struct device *, int, int);
281 void	mvpp2_mii_writereg(struct device *, int, int, int);
282 void	mvpp2_mii_statchg(struct device *);
283 void	mvpp2_inband_statchg(struct mvpp2_port *);
284 void	mvpp2_port_change(struct mvpp2_port *);
285 
286 void	mvpp2_tick(void *);
287 void	mvpp2_rxtick(void *);
288 
289 int	mvpp2_link_intr(void *);
290 int	mvpp2_intr(void *);
291 void	mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
292 void	mvpp2_txq_proc(struct mvpp2_port *, struct mvpp2_tx_queue *);
293 void	mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
294 void	mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
295 void	mvpp2_rx_refill(struct mvpp2_port *);
296 
297 void	mvpp2_up(struct mvpp2_port *);
298 void	mvpp2_down(struct mvpp2_port *);
299 void	mvpp2_iff(struct mvpp2_port *);
300 
301 void	mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
302 void	mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
303 void	mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
304 void	mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
305 void	mvpp2_rxq_hw_drop(struct mvpp2_port *, struct mvpp2_rx_queue *);
306 void	mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
307 void	mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
308 void	mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
309 
310 void	mvpp2_mac_reset_assert(struct mvpp2_port *);
311 void	mvpp2_pcs_reset_assert(struct mvpp2_port *);
312 void	mvpp2_pcs_reset_deassert(struct mvpp2_port *);
313 void	mvpp2_mac_config(struct mvpp2_port *);
314 void	mvpp2_xlg_config(struct mvpp2_port *);
315 void	mvpp2_gmac_config(struct mvpp2_port *);
316 void	mvpp2_comphy_config(struct mvpp2_port *, int);
317 void	mvpp2_gop_config(struct mvpp2_port *);
318 void	mvpp2_gop_intr_mask(struct mvpp2_port *);
319 void	mvpp2_gop_intr_unmask(struct mvpp2_port *);
320 
321 struct mvpp2_dmamem *
322 	mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
323 void	mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
324 struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
325 void	mvpp2_fill_rx_ring(struct mvpp2_softc *);
326 
327 void	mvpp2_interrupts_enable(struct mvpp2_port *, int);
328 void	mvpp2_interrupts_disable(struct mvpp2_port *, int);
329 int	mvpp2_egress_port(struct mvpp2_port *);
330 int	mvpp2_txq_phys(int, int);
331 void	mvpp2_defaults_set(struct mvpp2_port *);
332 void	mvpp2_ingress_enable(struct mvpp2_port *);
333 void	mvpp2_ingress_disable(struct mvpp2_port *);
334 void	mvpp2_egress_enable(struct mvpp2_port *);
335 void	mvpp2_egress_disable(struct mvpp2_port *);
336 void	mvpp2_port_enable(struct mvpp2_port *);
337 void	mvpp2_port_disable(struct mvpp2_port *);
338 void	mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
339 int	mvpp2_rxq_received(struct mvpp2_port *, int);
340 void	mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
341 void	mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
342 void	mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
343 	    uint32_t);
344 void	mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
345 	    uint32_t);
346 void	mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
347 	    uint32_t);
348 void	mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
349 
350 void	mvpp2_axi_config(struct mvpp2_softc *);
351 void	mvpp2_bm_pool_init(struct mvpp2_softc *);
352 void	mvpp2_rx_fifo_init(struct mvpp2_softc *);
353 void	mvpp2_tx_fifo_init(struct mvpp2_softc *);
354 int	mvpp2_prs_default_init(struct mvpp2_softc *);
355 void	mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
356 void	mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
357 void	mvpp2_prs_def_flow_init(struct mvpp2_softc *);
358 void	mvpp2_prs_mh_init(struct mvpp2_softc *);
359 void	mvpp2_prs_mac_init(struct mvpp2_softc *);
360 void	mvpp2_prs_dsa_init(struct mvpp2_softc *);
361 int	mvpp2_prs_etype_init(struct mvpp2_softc *);
362 int	mvpp2_prs_vlan_init(struct mvpp2_softc *);
363 int	mvpp2_prs_pppoe_init(struct mvpp2_softc *);
364 int	mvpp2_prs_ip6_init(struct mvpp2_softc *);
365 int	mvpp2_prs_ip4_init(struct mvpp2_softc *);
366 void	mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
367 	    uint32_t, uint32_t);
368 void	mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
369 void	mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
370 void	mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
371 uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
372 void	mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
373 	    uint8_t, uint8_t);
374 void	mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
375 	    uint8_t *, uint8_t *);
376 int	mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
377 void	mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
378 int	mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
379 int	mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
380 void	mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
381 	    uint32_t *, uint32_t *);
382 void	mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
383 int	mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
384 void	mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
385 void	mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
386 void	mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
387 void	mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
388 void	mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
389 void	mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
390 	    uint32_t);
391 void	mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
392 void	mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
393 int	mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
394 int	mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *, int);
395 int	mvpp2_prs_flow_find(struct mvpp2_softc *, int);
396 int	mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
397 void	mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
398 void	mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int, int);
399 void	mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
400 void	mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
401 	    int, int, int);
402 struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
403 	    int);
404 int	mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
405 int	mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
406 struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
407 	    uint16_t, uint16_t);
408 int	mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
409 	    uint32_t);
410 int	mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
411 int	mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
412 int	mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
413 int	mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
414 int	mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int, const uint8_t *,
415 	    uint8_t *, int);
416 int	mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
417 	    uint8_t *);
418 int	mvpp2_prs_mac_da_accept(struct mvpp2_port *, const uint8_t *, int);
419 void	mvpp2_prs_mac_del_all(struct mvpp2_port *);
420 int	mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
421 int	mvpp2_prs_def_flow(struct mvpp2_port *);
422 void	mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
423 void	mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
424 void	mvpp2_cls_init(struct mvpp2_softc *);
425 void	mvpp2_cls_port_config(struct mvpp2_port *);
426 void	mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
427 
428 int
429 mvpp2_match(struct device *parent, void *cfdata, void *aux)
430 {
431 	struct fdt_attach_args *faa = aux;
432 
433 	return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
434 }
435 
436 void
437 mvpp2_attach(struct device *parent, struct device *self, void *aux)
438 {
439 	struct mvpp2_softc *sc = (void *)self;
440 	struct fdt_attach_args *faa = aux;
441 
442 	if (faa->fa_nreg < 2) {
443 		printf(": no registers\n");
444 		return;
445 	}
446 
447 	sc->sc_node = faa->fa_node;
448 	sc->sc_iot = faa->fa_iot;
449 	sc->sc_dmat = faa->fa_dmat;
450 
451 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
452 	    faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
453 		printf(": can't map registers\n");
454 		return;
455 	}
456 	sc->sc_iosize_base = faa->fa_reg[0].size;
457 
458 	sc->sc_ioh_paddr = bus_space_mmap(sc->sc_iot, faa->fa_reg[0].addr,
459 	    0, PROT_READ | PROT_WRITE, 0);
460 	KASSERT(sc->sc_ioh_paddr != -1);
461 	sc->sc_ioh_paddr &= PMAP_PA_MASK;
462 
463 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
464 	    faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
465 		printf(": can't map registers\n");
466 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
467 		    sc->sc_iosize_base);
468 		return;
469 	}
470 	sc->sc_iosize_iface = faa->fa_reg[1].size;
471 
472 	sc->sc_rm = regmap_byphandle(OF_getpropint(faa->fa_node,
473 	    "marvell,system-controller", 0));
474 
475 	clock_enable_all(faa->fa_node);
476 	sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
477 
478 	printf("\n");
479 
480 	config_defer(self, mvpp2_attach_deferred);
481 }
482 
483 void
484 mvpp2_attach_deferred(struct device *self)
485 {
486 	struct mvpp2_softc *sc = (void *)self;
487 	struct mvpp2_attach_args maa;
488 	struct mvpp2_tx_queue *txq;
489 	int i, node;
490 
491 	mvpp2_axi_config(sc);
492 
493 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_iface, MVPP22_SMI_MISC_CFG_REG,
494 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh_iface,
495 	    MVPP22_SMI_MISC_CFG_REG) & ~MVPP22_SMI_POLLING_EN);
496 
497 	sc->sc_aggr_ntxq = 1;
498 	sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
499 	    sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
500 
501 	for (i = 0; i < sc->sc_aggr_ntxq; i++) {
502 		txq = &sc->sc_aggr_txqs[i];
503 		txq->id = i;
504 		mvpp2_aggr_txq_hw_init(sc, txq);
505 	}
506 
507 	mvpp2_rx_fifo_init(sc);
508 	mvpp2_tx_fifo_init(sc);
509 
510 	mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
511 
512 	mvpp2_bm_pool_init(sc);
513 
514 	sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
515 	    sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
516 
517 	mvpp2_prs_default_init(sc);
518 	mvpp2_cls_init(sc);
519 
520 	memset(&maa, 0, sizeof(maa));
521 	for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
522 		maa.ma_node = node;
523 		maa.ma_dmat = sc->sc_dmat;
524 		config_found(self, &maa, NULL);
525 	}
526 }
527 
528 void
529 mvpp2_axi_config(struct mvpp2_softc *sc)
530 {
531 	uint32_t reg;
532 
533 	mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG, 0);
534 
535 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
536 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
537 	mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
538 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
539 	mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
540 	mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
541 
542 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
543 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
544 	mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
545 	mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
546 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
547 	mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
548 
549 	reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
550 	    (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
551 	mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
552 	mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
553 
554 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
555 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
556 	mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
557 
558 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
559 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
560 	mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
561 }
562 
563 void
564 mvpp2_bm_pool_init(struct mvpp2_softc *sc)
565 {
566 	struct mvpp2_bm_pool *bm;
567 	struct mvpp2_buf *rxb;
568 	uint64_t phys, virt;
569 	int i, j, inuse;
570 
571 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
572 		mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
573 		mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
574 	}
575 
576 	sc->sc_npools = ncpus;
577 	sc->sc_npools = min(sc->sc_npools, MVPP2_BM_POOLS_NUM);
578 
579 	sc->sc_bm_pools = mallocarray(sc->sc_npools, sizeof(*sc->sc_bm_pools),
580 	    M_DEVBUF, M_WAITOK | M_ZERO);
581 
582 	for (i = 0; i < sc->sc_npools; i++) {
583 		bm = &sc->sc_bm_pools[i];
584 		bm->bm_mem = mvpp2_dmamem_alloc(sc,
585 		    MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
586 		    MVPP2_BM_POOL_PTR_ALIGN);
587 		KASSERT(bm->bm_mem != NULL);
588 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
589 		    MVPP2_DMA_LEN(bm->bm_mem),
590 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
591 
592 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
593 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
594 		    MVPP2_BM_STOP_MASK);
595 
596 		mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
597 		    (uint64_t)MVPP2_DMA_DVA(bm->bm_mem) & 0xffffffff);
598 		mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
599 		    ((uint64_t)MVPP2_DMA_DVA(bm->bm_mem) >> 32)
600 		    & MVPP22_BM_POOL_BASE_HIGH_MASK);
601 		mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
602 		    MVPP2_BM_SIZE);
603 
604 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
605 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
606 		    MVPP2_BM_START_MASK);
607 
608 		/*
609 		 * U-Boot might not have cleaned its pools.  The pool needs
610 		 * to be empty before we fill it, otherwise our packets are
611 		 * written to wherever U-Boot allocated memory.  Cleaning it
612 		 * up ourselves is worrying as well, since the BM's pages are
613 		 * probably in our own memory.  Best we can do is stop the BM,
614 		 * set new memory and drain the pool.
615 		 */
616 		inuse = mvpp2_read(sc, MVPP2_BM_POOL_PTRS_NUM_REG(i)) &
617 		    MVPP2_BM_POOL_PTRS_NUM_MASK;
618 		inuse += mvpp2_read(sc, MVPP2_BM_BPPI_PTRS_NUM_REG(i)) &
619 		    MVPP2_BM_BPPI_PTRS_NUM_MASK;
620 		if (inuse)
621 			inuse++;
622 		for (j = 0; j < inuse; j++)
623 			mvpp2_read(sc, MVPP2_BM_PHY_ALLOC_REG(i));
624 
625 		mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
626 		    roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
627 
628 		bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
629 		    M_DEVBUF, M_WAITOK);
630 		bm->freelist = mallocarray(MVPP2_BM_SIZE, sizeof(*bm->freelist),
631 		    M_DEVBUF, M_WAITOK | M_ZERO);
632 
633 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
634 			rxb = &bm->rxbuf[j];
635 			bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
636 			    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
637 			rxb->mb_m = NULL;
638 		}
639 
640 		/* Use pool-id and rxbuf index as cookie. */
641 		for (j = 0; j < MVPP2_BM_SIZE; j++)
642 			bm->freelist[j] = (i << 16) | (j << 0);
643 
644 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
645 			rxb = &bm->rxbuf[j];
646 			rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
647 			if (rxb->mb_m == NULL)
648 				break;
649 
650 			KASSERT(bm->freelist[bm->free_cons] != -1);
651 			virt = bm->freelist[bm->free_cons];
652 			bm->freelist[bm->free_cons] = -1;
653 			bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
654 
655 			phys = rxb->mb_map->dm_segs[0].ds_addr;
656 			mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
657 			    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
658 			    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
659 			    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
660 			mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
661 			    virt & 0xffffffff);
662 			mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
663 			    phys & 0xffffffff);
664 		}
665 	}
666 }
667 
668 void
669 mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
670 {
671 	int i;
672 
673 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
674 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
675 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
676 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
677 
678 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
679 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
680 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
681 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
682 
683 	for (i = 2; i < MVPP2_MAX_PORTS; i++) {
684 		mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
685 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
686 		mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
687 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
688 	}
689 
690 	mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
691 	mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
692 }
693 
694 void
695 mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
696 {
697 	int i;
698 
699 	mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
700 	    MVPP22_TX_FIFO_DATA_SIZE_10KB);
701 	mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
702 	    MVPP2_TX_FIFO_THRESHOLD_10KB);
703 
704 	for (i = 1; i < MVPP2_MAX_PORTS; i++) {
705 		mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
706 		    MVPP22_TX_FIFO_DATA_SIZE_3KB);
707 		mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
708 		    MVPP2_TX_FIFO_THRESHOLD_3KB);
709 	}
710 }
711 
712 int
713 mvpp2_prs_default_init(struct mvpp2_softc *sc)
714 {
715 	int i, j, ret;
716 
717 	mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
718 
719 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
720 		mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
721 		for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
722 			mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
723 
724 		mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
725 		for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
726 			mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
727 	}
728 
729 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
730 		mvpp2_prs_hw_inv(sc, i);
731 
732 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
733 		mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
734 		    MVPP2_PRS_PORT_LU_MAX, 0);
735 
736 	mvpp2_prs_def_flow_init(sc);
737 	mvpp2_prs_mh_init(sc);
738 	mvpp2_prs_mac_init(sc);
739 	mvpp2_prs_dsa_init(sc);
740 	ret = mvpp2_prs_etype_init(sc);
741 	if (ret)
742 		return ret;
743 	ret = mvpp2_prs_vlan_init(sc);
744 	if (ret)
745 		return ret;
746 	ret = mvpp2_prs_pppoe_init(sc);
747 	if (ret)
748 		return ret;
749 	ret = mvpp2_prs_ip6_init(sc);
750 	if (ret)
751 		return ret;
752 	ret = mvpp2_prs_ip4_init(sc);
753 	if (ret)
754 		return ret;
755 
756 	return 0;
757 }
758 
759 void
760 mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
761 {
762 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
763 	mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
764 	    MVPP2_PRS_TCAM_INV_MASK);
765 }
766 
767 void
768 mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
769     int lu_first, int lu_max, int offset)
770 {
771 	uint32_t reg;
772 
773 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
774 	reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
775 	reg |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
776 	mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
777 
778 	reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
779 	reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
780 	reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
781 	mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
782 
783 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
784 	reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
785 	reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
786 	mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
787 }
788 
789 void
790 mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
791 {
792 	struct mvpp2_prs_entry pe;
793 	int i;
794 
795 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
796 		memset(&pe, 0, sizeof(pe));
797 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
798 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
799 		mvpp2_prs_tcam_port_map_set(&pe, 0);
800 		mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
801 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
802 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
803 		mvpp2_prs_hw_write(sc, &pe);
804 	}
805 }
806 
807 void
808 mvpp2_prs_mh_init(struct mvpp2_softc *sc)
809 {
810 	struct mvpp2_prs_entry pe;
811 
812 	memset(&pe, 0, sizeof(pe));
813 	pe.index = MVPP2_PE_MH_DEFAULT;
814 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
815 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
816 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
817 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
818 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
819 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
820 	mvpp2_prs_hw_write(sc, &pe);
821 }
822 
823 void
824 mvpp2_prs_mac_init(struct mvpp2_softc *sc)
825 {
826 	struct mvpp2_prs_entry pe;
827 
828 	memset(&pe, 0, sizeof(pe));
829 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
830 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
831 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
832 	    MVPP2_PRS_RI_DROP_MASK);
833 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
834 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
835 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
836 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
837 	mvpp2_prs_hw_write(sc, &pe);
838 	mvpp2_prs_mac_drop_all_set(sc, 0, 0);
839 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_UNI_CAST, 0);
840 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_MULTI_CAST, 0);
841 }
842 
843 void
844 mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
845 {
846 	struct mvpp2_prs_entry pe;
847 
848 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
849 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
850 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
851 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
852 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
853 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
854 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
855 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
856 	memset(&pe, 0, sizeof(pe));
857 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
858 	pe.index = MVPP2_PE_DSA_DEFAULT;
859 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
860 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
861 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
862 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
863 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
864 	mvpp2_prs_hw_write(sc, &pe);
865 }
866 
867 int
868 mvpp2_prs_etype_init(struct mvpp2_softc *sc)
869 {
870 	struct mvpp2_prs_entry pe;
871 	int tid;
872 
873 	/* Ethertype: PPPoE */
874 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
875 	    MVPP2_PE_LAST_FREE_TID);
876 	if (tid < 0)
877 		return tid;
878 	memset(&pe, 0, sizeof(pe));
879 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
880 	pe.index = tid;
881 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_PPPOE);
882 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
883 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
884 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
885 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
886 	    MVPP2_PRS_RI_PPPOE_MASK);
887 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
888 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
889 	sc->sc_prs_shadow[pe.index].finish = 0;
890 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
891 	    MVPP2_PRS_RI_PPPOE_MASK);
892 	mvpp2_prs_hw_write(sc, &pe);
893 
894 	/* Ethertype: ARP */
895 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
896 	    MVPP2_PE_LAST_FREE_TID);
897 	if (tid < 0)
898 		return tid;
899 	memset(&pe, 0, sizeof(pe));
900 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
901 	pe.index = tid;
902 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_ARP);
903 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
904 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
905 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
906 	    MVPP2_PRS_RI_L3_PROTO_MASK);
907 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
908 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
909 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
910 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
911 	sc->sc_prs_shadow[pe.index].finish = 1;
912 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
913 	    MVPP2_PRS_RI_L3_PROTO_MASK);
914 	mvpp2_prs_hw_write(sc, &pe);
915 
916 	/* Ethertype: LBTD */
917 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
918 	    MVPP2_PE_LAST_FREE_TID);
919 	if (tid < 0)
920 		return tid;
921 	memset(&pe, 0, sizeof(pe));
922 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
923 	pe.index = tid;
924 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
925 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
926 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
927 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
928 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
929 	    MVPP2_PRS_RI_UDF3_MASK);
930 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
931 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
932 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
933 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
934 	sc->sc_prs_shadow[pe.index].finish = 1;
935 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
936 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
937 	    MVPP2_PRS_RI_UDF3_MASK);
938 	mvpp2_prs_hw_write(sc, &pe);
939 
940 	/* Ethertype: IPv4 without options */
941 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
942 	    MVPP2_PE_LAST_FREE_TID);
943 	if (tid < 0)
944 		return tid;
945 	memset(&pe, 0, sizeof(pe));
946 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
947 	pe.index = tid;
948 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IP);
949 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
950 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
951 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
952 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
953 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
954 	    MVPP2_PRS_RI_L3_PROTO_MASK);
955 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
956 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
957 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
958 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
959 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
960 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
961 	sc->sc_prs_shadow[pe.index].finish = 0;
962 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
963 	    MVPP2_PRS_RI_L3_PROTO_MASK);
964 	mvpp2_prs_hw_write(sc, &pe);
965 
966 	/* Ethertype: IPv4 with options */
967 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
968 	    MVPP2_PE_LAST_FREE_TID);
969 	if (tid < 0)
970 		return tid;
971 	pe.index = tid;
972 
973 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
974 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
975 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
976 	    MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
977 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
978 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
979 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
980 	    MVPP2_PRS_RI_L3_PROTO_MASK);
981 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
982 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
983 	sc->sc_prs_shadow[pe.index].finish = 0;
984 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
985 	    MVPP2_PRS_RI_L3_PROTO_MASK);
986 	mvpp2_prs_hw_write(sc, &pe);
987 
988 	/* Ethertype: IPv6 without options */
989 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
990 	    MVPP2_PE_LAST_FREE_TID);
991 	if (tid < 0)
992 		return tid;
993 	memset(&pe, 0, sizeof(pe));
994 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
995 	pe.index = tid;
996 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IPV6);
997 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
998 	    MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
999 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1000 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1001 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1002 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1003 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1004 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1005 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1006 	sc->sc_prs_shadow[pe.index].finish = 0;
1007 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
1008 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1009 	mvpp2_prs_hw_write(sc, &pe);
1010 
1011 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1012 	memset(&pe, 0, sizeof(pe));
1013 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1014 	pe.index = MVPP2_PE_ETH_TYPE_UN;
1015 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1016 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1017 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1018 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1019 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1020 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1021 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1022 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1023 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1024 	sc->sc_prs_shadow[pe.index].finish = 1;
1025 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
1026 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1027 	mvpp2_prs_hw_write(sc, &pe);
1028 
1029 	return 0;
1030 }
1031 
1032 int
1033 mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
1034 {
1035 	struct mvpp2_prs_entry pe;
1036 	int ret;
1037 
1038 	sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
1039 	    sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
1040 
1041 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_QINQ,
1042 	    MVPP2_PRS_PORT_MASK);
1043 	if (ret)
1044 		return ret;
1045 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_VLAN,
1046 	    MVPP2_PRS_PORT_MASK);
1047 	if (ret)
1048 		return ret;
1049 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_QINQ, MVPP2_PRS_SINGLE_VLAN_AI,
1050 	    MVPP2_PRS_PORT_MASK);
1051 	if (ret)
1052 		return ret;
1053 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_VLAN, MVPP2_PRS_SINGLE_VLAN_AI,
1054 	    MVPP2_PRS_PORT_MASK);
1055 	if (ret)
1056 		return ret;
1057 
1058 	memset(&pe, 0, sizeof(pe));
1059 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1060 	pe.index = MVPP2_PE_VLAN_DBL;
1061 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1062 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1063 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1064 	    MVPP2_PRS_RI_VLAN_MASK);
1065 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1066 	    MVPP2_PRS_DBL_VLAN_AI_BIT);
1067 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1068 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1069 	mvpp2_prs_hw_write(sc, &pe);
1070 
1071 	memset(&pe, 0, sizeof(pe));
1072 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1073 	pe.index = MVPP2_PE_VLAN_NONE;
1074 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1075 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1076 	    MVPP2_PRS_RI_VLAN_MASK);
1077 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1078 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1079 	mvpp2_prs_hw_write(sc, &pe);
1080 
1081 	return 0;
1082 }
1083 
1084 int
1085 mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
1086 {
1087 	struct mvpp2_prs_entry pe;
1088 	int tid;
1089 
1090 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1091 	    MVPP2_PE_LAST_FREE_TID);
1092 	if (tid < 0)
1093 		return tid;
1094 
1095 	memset(&pe, 0, sizeof(pe));
1096 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1097 	pe.index = tid;
1098 	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1099 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1100 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1101 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1102 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1103 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1104 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1105 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1106 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1107 	mvpp2_prs_hw_write(sc, &pe);
1108 
1109 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1110 	    MVPP2_PE_LAST_FREE_TID);
1111 	if (tid < 0)
1112 		return tid;
1113 
1114 	pe.index = tid;
1115 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1116 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1117 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
1118 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1119 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1120 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
1121 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1122 	mvpp2_prs_hw_write(sc, &pe);
1123 
1124 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1125 	    MVPP2_PE_LAST_FREE_TID);
1126 	if (tid < 0)
1127 		return tid;
1128 
1129 	memset(&pe, 0, sizeof(pe));
1130 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1131 	pe.index = tid;
1132 	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1133 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1134 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1135 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1136 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1137 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1138 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1139 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1140 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1141 	mvpp2_prs_hw_write(sc, &pe);
1142 
1143 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1144 	    MVPP2_PE_LAST_FREE_TID);
1145 	if (tid < 0)
1146 		return tid;
1147 
1148 	memset(&pe, 0, sizeof(pe));
1149 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1150 	pe.index = tid;
1151 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1152 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1153 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1154 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1155 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1156 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1157 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1158 	mvpp2_prs_hw_write(sc, &pe);
1159 
1160 	return 0;
1161 }
1162 
1163 int
1164 mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
1165 {
1166 	struct mvpp2_prs_entry pe;
1167 	int tid, ret;
1168 
1169 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1170 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1171 	if (ret)
1172 		return ret;
1173 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1174 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1175 	if (ret)
1176 		return ret;
1177 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_ICMPV6,
1178 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1179 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1180 	if (ret)
1181 		return ret;
1182 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
1183 	    MVPP2_PRS_RI_UDF7_MASK);
1184 	if (ret)
1185 		return ret;
1186 	ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1187 	if (ret)
1188 		return ret;
1189 
1190 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1191 	    MVPP2_PE_LAST_FREE_TID);
1192 	if (tid < 0)
1193 		return tid;
1194 
1195 	memset(&pe, 0, sizeof(pe));
1196 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1197 	pe.index = tid;
1198 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1199 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1200 	mvpp2_prs_sram_ri_update(&pe,
1201 	    MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
1202 	    MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
1203 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1204 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1205 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1206 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1207 	mvpp2_prs_hw_write(sc, &pe);
1208 
1209 	memset(&pe, 0, sizeof(pe));
1210 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1211 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1212 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1213 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1214 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1215 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1216 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1217 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1218 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1219 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1220 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1221 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1222 	mvpp2_prs_hw_write(sc, &pe);
1223 
1224 	memset(&pe, 0, sizeof(pe));
1225 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1226 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1227 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1228 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1229 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1230 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1231 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1232 	    MVPP2_PRS_IPV6_EXT_AI_BIT);
1233 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1234 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1235 	mvpp2_prs_hw_write(sc, &pe);
1236 
1237 	memset(&pe, 0, sizeof(pe));
1238 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1239 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1240 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1241 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1242 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1243 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1244 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1245 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1246 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1247 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1248 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
1249 	mvpp2_prs_hw_write(sc, &pe);
1250 
1251 	return 0;
1252 }
1253 
1254 int
1255 mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
1256 {
1257 	struct mvpp2_prs_entry pe;
1258 	int ret;
1259 
1260 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1261 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1262 	if (ret)
1263 		return ret;
1264 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1265 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1266 	if (ret)
1267 		return ret;
1268 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_IGMP,
1269 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1270 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1271 	if (ret)
1272 		return ret;
1273 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
1274 	if (ret)
1275 		return ret;
1276 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1277 	if (ret)
1278 		return ret;
1279 
1280 	memset(&pe, 0, sizeof(pe));
1281 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1282 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1283 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1284 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1285 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1286 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1287 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1288 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1289 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1290 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1291 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1292 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1293 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1294 	mvpp2_prs_hw_write(sc, &pe);
1295 
1296 	memset(&pe, 0, sizeof(pe));
1297 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1298 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1299 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1300 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1301 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1302 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1303 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1304 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1305 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1306 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1307 	mvpp2_prs_hw_write(sc, &pe);
1308 
1309 	return 0;
1310 }
1311 
1312 int
1313 mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
1314 {
1315 	struct mvpp2_attach_args *maa = aux;
1316 	char buf[32];
1317 
1318 	if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
1319 	    strcmp(buf, "disabled") == 0)
1320 		return 0;
1321 
1322 	return 1;
1323 }
1324 
1325 void
1326 mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
1327 {
1328 	struct mvpp2_port *sc = (void *)self;
1329 	struct mvpp2_attach_args *maa = aux;
1330 	struct mvpp2_tx_queue *txq;
1331 	struct mvpp2_rx_queue *rxq;
1332 	struct ifnet *ifp;
1333 	uint32_t phy, reg;
1334 	int i, idx, len, node;
1335 	int mii_flags = 0;
1336 	char *phy_mode;
1337 	char *managed;
1338 
1339 	sc->sc = (void *)parent;
1340 	sc->sc_node = maa->ma_node;
1341 	sc->sc_dmat = maa->ma_dmat;
1342 
1343 	sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
1344 	sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
1345 	sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
1346 
1347 	len = OF_getproplen(sc->sc_node, "phy-mode");
1348 	if (len <= 0) {
1349 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
1350 		return;
1351 	}
1352 
1353 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
1354 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
1355 	if (!strncmp(phy_mode, "10gbase-r", strlen("10gbase-r")))
1356 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1357 	else if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
1358 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1359 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
1360 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1361 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
1362 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1363 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
1364 		sc->sc_phy_mode = PHY_MODE_SGMII;
1365 	else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
1366 		sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
1367 	else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
1368 		sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
1369 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
1370 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
1371 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
1372 		sc->sc_phy_mode = PHY_MODE_RGMII;
1373 	else {
1374 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
1375 		    phy_mode);
1376 		return;
1377 	}
1378 	free(phy_mode, M_TEMP, len);
1379 
1380 	/* Lookup PHY. */
1381 	phy = OF_getpropint(sc->sc_node, "phy", 0);
1382 	if (phy) {
1383 		node = OF_getnodebyphandle(phy);
1384 		if (!node) {
1385 			printf(": no phy\n");
1386 			return;
1387 		}
1388 		sc->sc_mdio = mii_byphandle(phy);
1389 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
1390 		sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
1391 		sc->sc_mii.mii_node = node;
1392 	}
1393 
1394 	if (sc->sc_sfp)
1395 		config_mountroot(self, mvpp2_port_attach_sfp);
1396 
1397 	if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
1398 		managed = malloc(len, M_TEMP, M_WAITOK);
1399 		OF_getprop(sc->sc_node, "managed", managed, len);
1400 		if (!strncmp(managed, "in-band-status",
1401 		    strlen("in-band-status")))
1402 			sc->sc_inband_status = 1;
1403 		free(managed, M_TEMP, len);
1404 	}
1405 
1406 	if (OF_getprop(sc->sc_node, "local-mac-address",
1407 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
1408 		memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
1409 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
1410 
1411 	sc->sc_ntxq = sc->sc_nrxq = 1;
1412 	sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
1413 	    M_DEVBUF, M_WAITOK | M_ZERO);
1414 	sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
1415 	    M_DEVBUF, M_WAITOK | M_ZERO);
1416 
1417 	for (i = 0; i < sc->sc_ntxq; i++) {
1418 		txq = &sc->sc_txqs[i];
1419 		txq->id = mvpp2_txq_phys(sc->sc_id, i);
1420 		txq->log_id = i;
1421 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
1422 	}
1423 
1424 	sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
1425 
1426 	for (i = 0; i < sc->sc_nrxq; i++) {
1427 		rxq = &sc->sc_rxqs[i];
1428 		rxq->id = sc->sc_id * 32 + i;
1429 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
1430 		rxq->time_coal = MVPP2_RX_COAL_USEC;
1431 	}
1432 
1433 	mvpp2_egress_disable(sc);
1434 	mvpp2_port_disable(sc);
1435 
1436 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
1437 	    sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
1438 	    0 /* queue vector id */);
1439 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
1440 	    sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
1441 	    0 /* first rxq */);
1442 
1443 	mvpp2_ingress_disable(sc);
1444 	mvpp2_defaults_set(sc);
1445 
1446 	mvpp2_cls_oversize_rxq_set(sc);
1447 	mvpp2_cls_port_config(sc);
1448 
1449 	/*
1450 	 * We have one pool per core, so all RX queues on a specific
1451 	 * core share that pool.  Also long and short uses the same
1452 	 * pool.
1453 	 */
1454 	for (i = 0; i < sc->sc_nrxq; i++) {
1455 		mvpp2_rxq_long_pool_set(sc, i, i);
1456 		mvpp2_rxq_short_pool_set(sc, i, i);
1457 	}
1458 
1459 	mvpp2_mac_reset_assert(sc);
1460 	mvpp2_pcs_reset_assert(sc);
1461 
1462 	timeout_set(&sc->sc_tick, mvpp2_tick, sc);
1463 
1464 	ifp = &sc->sc_ac.ac_if;
1465 	ifp->if_softc = sc;
1466 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1467 	ifp->if_ioctl = mvpp2_ioctl;
1468 	ifp->if_start = mvpp2_start;
1469 	ifp->if_watchdog = mvpp2_watchdog;
1470 	ifq_init_maxlen(&ifp->if_snd, MVPP2_NTXDESC - 1);
1471 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1472 
1473 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1474 
1475 	sc->sc_mii.mii_ifp = ifp;
1476 	sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
1477 	sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
1478 	sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
1479 
1480 	ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
1481 
1482 	if (sc->sc_mdio) {
1483 		switch (sc->sc_phy_mode) {
1484 		case PHY_MODE_1000BASEX:
1485 			mii_flags |= MIIF_IS_1000X;
1486 			break;
1487 		case PHY_MODE_SGMII:
1488 			mii_flags |= MIIF_SGMII;
1489 			break;
1490 		case PHY_MODE_RGMII_ID:
1491 			mii_flags |= MIIF_RXID | MIIF_TXID;
1492 			break;
1493 		case PHY_MODE_RGMII_RXID:
1494 			mii_flags |= MIIF_RXID;
1495 			break;
1496 		case PHY_MODE_RGMII_TXID:
1497 			mii_flags |= MIIF_TXID;
1498 			break;
1499 		default:
1500 			break;
1501 		}
1502 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
1503 		    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY,
1504 		    mii_flags);
1505 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1506 			printf("%s: no PHY found!\n", self->dv_xname);
1507 			ifmedia_add(&sc->sc_mii.mii_media,
1508 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
1509 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1510 		} else
1511 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1512 	} else {
1513 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1514 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1515 
1516 		if (sc->sc_inband_status) {
1517 			switch (sc->sc_phy_mode) {
1518 			case PHY_MODE_1000BASEX:
1519 				sc->sc_mii.mii_media_active =
1520 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
1521 				break;
1522 			case PHY_MODE_2500BASEX:
1523 				sc->sc_mii.mii_media_active =
1524 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
1525 				break;
1526 			case PHY_MODE_10GBASER:
1527 				sc->sc_mii.mii_media_active =
1528 				    IFM_ETHER|IFM_10G_KR|IFM_FDX;
1529 				break;
1530 			default:
1531 				break;
1532 			}
1533 			mvpp2_inband_statchg(sc);
1534 		} else {
1535 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
1536 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1537 			mvpp2_mii_statchg(self);
1538 		}
1539 
1540 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
1541 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1542 	}
1543 
1544 	if_attach(ifp);
1545 	ether_ifattach(ifp);
1546 
1547 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1548 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1549 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1550 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1551 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1552 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1553 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1554 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1555 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1556 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1557 	}
1558 
1559 	if (sc->sc_gop_id == 0) {
1560 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
1561 		reg |= MV_XLG_INTERRUPT_LINK_CHANGE;
1562 		mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
1563 	}
1564 
1565 	mvpp2_gop_intr_unmask(sc);
1566 
1567 	idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
1568 	if (idx >= 0)
1569 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1570 		    mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
1571 	idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
1572 	if (idx < 0)
1573 		idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
1574 	if (idx >= 0)
1575 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1576 		    mvpp2_intr, sc, sc->sc_dev.dv_xname);
1577 }
1578 
1579 void
1580 mvpp2_port_attach_sfp(struct device *self)
1581 {
1582 	struct mvpp2_port *sc = (struct mvpp2_port *)self;
1583 	uint32_t reg;
1584 
1585 	rw_enter(&mvpp2_sff_lock, RW_WRITE);
1586 	sfp_disable(sc->sc_sfp);
1587 	sfp_add_media(sc->sc_sfp, &sc->sc_mii);
1588 	rw_exit(&mvpp2_sff_lock);
1589 
1590 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1591 	case IFM_10G_SR:
1592 	case IFM_10G_LR:
1593 	case IFM_10G_LRM:
1594 	case IFM_10G_ER:
1595 	case IFM_10G_SFP_CU:
1596 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1597 		sc->sc_mii.mii_media_status = IFM_AVALID;
1598 		sc->sc_inband_status = 1;
1599 		break;
1600 	case IFM_2500_SX:
1601 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1602 		sc->sc_mii.mii_media_status = IFM_AVALID;
1603 		sc->sc_inband_status = 1;
1604 		break;
1605 	case IFM_1000_CX:
1606 	case IFM_1000_LX:
1607 	case IFM_1000_SX:
1608 	case IFM_1000_T:
1609 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1610 		sc->sc_mii.mii_media_status = IFM_AVALID;
1611 		sc->sc_inband_status = 1;
1612 		break;
1613 	}
1614 
1615 	if (sc->sc_inband_status) {
1616 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1617 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1618 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1619 	}
1620 }
1621 
1622 uint32_t
1623 mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
1624 {
1625 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
1626 }
1627 
1628 void
1629 mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
1630 {
1631 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
1632 }
1633 
1634 uint32_t
1635 mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
1636 {
1637 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1638 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
1639 }
1640 
1641 void
1642 mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1643 {
1644 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1645 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
1646 	    data);
1647 }
1648 
1649 uint32_t
1650 mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
1651 {
1652 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1653 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
1654 }
1655 
1656 void
1657 mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1658 {
1659 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1660 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
1661 	    data);
1662 }
1663 
1664 uint32_t
1665 mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1666 {
1667 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1668 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
1669 }
1670 
1671 void
1672 mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1673 {
1674 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1675 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
1676 	    data);
1677 }
1678 
1679 uint32_t
1680 mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1681 {
1682 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1683 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
1684 }
1685 
1686 void
1687 mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1688 {
1689 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1690 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
1691 	    data);
1692 }
1693 
1694 static inline int
1695 mvpp2_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
1696 {
1697 	int error;
1698 
1699 	error = bus_dmamap_load_mbuf(dmat, map, m, BUS_DMA_NOWAIT);
1700 	if (error != EFBIG)
1701 		return (error);
1702 
1703 	error = m_defrag(m, M_DONTWAIT);
1704 	if (error != 0)
1705 		return (error);
1706 
1707 	return bus_dmamap_load_mbuf(dmat, map, m, BUS_DMA_NOWAIT);
1708 }
1709 
1710 void
1711 mvpp2_start(struct ifnet *ifp)
1712 {
1713 	struct mvpp2_port *sc = ifp->if_softc;
1714 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1715 	struct mvpp2_tx_desc *txd;
1716 	struct mbuf *m;
1717 	bus_dmamap_t map;
1718 	uint32_t command;
1719 	int i, current, first, last;
1720 	int free, prod, used;
1721 
1722 	if (!(ifp->if_flags & IFF_RUNNING))
1723 		return;
1724 	if (ifq_is_oactive(&ifp->if_snd))
1725 		return;
1726 	if (ifq_empty(&ifp->if_snd))
1727 		return;
1728 	if (!sc->sc_link)
1729 		return;
1730 
1731 	used = 0;
1732 	prod = txq->prod;
1733 	free = txq->cons;
1734 	if (free <= prod)
1735 		free += MVPP2_AGGR_TXQ_SIZE;
1736 	free -= prod;
1737 
1738 	for (;;) {
1739 		if (free <= MVPP2_NTXSEGS) {
1740 			ifq_set_oactive(&ifp->if_snd);
1741 			break;
1742 		}
1743 
1744 		m = ifq_dequeue(&ifp->if_snd);
1745 		if (m == NULL)
1746 			break;
1747 
1748 		first = last = current = prod;
1749 		map = txq->buf[current].mb_map;
1750 
1751 		if (mvpp2_load_mbuf(sc->sc_dmat, map, m) != 0) {
1752 			ifp->if_oerrors++;
1753 			m_freem(m);
1754 			continue;
1755 		}
1756 
1757 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1758 		    BUS_DMASYNC_PREWRITE);
1759 
1760 		command = MVPP2_TXD_L4_CSUM_NOT |
1761 		    MVPP2_TXD_IP_CSUM_DISABLE;
1762 		for (i = 0; i < map->dm_nsegs; i++) {
1763 			txd = &txq->descs[current];
1764 			memset(txd, 0, sizeof(*txd));
1765 			txd->buf_phys_addr_hw_cmd2 =
1766 			    map->dm_segs[i].ds_addr & ~0x1f;
1767 			txd->packet_offset =
1768 			    map->dm_segs[i].ds_addr & 0x1f;
1769 			txd->data_size = map->dm_segs[i].ds_len;
1770 			txd->phys_txq = sc->sc_txqs[0].id;
1771 			txd->command = command |
1772 			    MVPP2_TXD_PADDING_DISABLE;
1773 			if (i == 0)
1774 				txd->command |= MVPP2_TXD_F_DESC;
1775 			if (i == (map->dm_nsegs - 1))
1776 				txd->command |= MVPP2_TXD_L_DESC;
1777 
1778 			bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
1779 			    current * sizeof(*txd), sizeof(*txd),
1780 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1781 
1782 			last = current;
1783 			current = (current + 1) % MVPP2_AGGR_TXQ_SIZE;
1784 			KASSERT(current != txq->cons);
1785 		}
1786 
1787 		KASSERT(txq->buf[last].mb_m == NULL);
1788 		txq->buf[first].mb_map = txq->buf[last].mb_map;
1789 		txq->buf[last].mb_map = map;
1790 		txq->buf[last].mb_m = m;
1791 
1792 #if NBPFILTER > 0
1793 		if (ifp->if_bpf)
1794 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1795 #endif
1796 
1797 		free -= map->dm_nsegs;
1798 		used += map->dm_nsegs;
1799 		prod = current;
1800 	}
1801 
1802 	if (used)
1803 		mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, used);
1804 
1805 	if (txq->prod != prod)
1806 		txq->prod = prod;
1807 }
1808 
1809 int
1810 mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1811 {
1812 	struct mvpp2_port *sc = ifp->if_softc;
1813 	struct ifreq *ifr = (struct ifreq *)addr;
1814 	int error = 0, s;
1815 
1816 	s = splnet();
1817 
1818 	switch (cmd) {
1819 	case SIOCSIFADDR:
1820 		ifp->if_flags |= IFF_UP;
1821 		/* FALLTHROUGH */
1822 	case SIOCSIFFLAGS:
1823 		if (ifp->if_flags & IFF_UP) {
1824 			if (ifp->if_flags & IFF_RUNNING)
1825 				error = ENETRESET;
1826 			else
1827 				mvpp2_up(sc);
1828 		} else {
1829 			if (ifp->if_flags & IFF_RUNNING)
1830 				mvpp2_down(sc);
1831 		}
1832 		break;
1833 
1834 	case SIOCGIFMEDIA:
1835 	case SIOCSIFMEDIA:
1836 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1837 		break;
1838 
1839 	case SIOCGIFRXR:
1840 		error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1841 		break;
1842 
1843 	case SIOCGIFSFFPAGE:
1844 		error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
1845 		if (error != 0)
1846 			break;
1847 
1848 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1849 		rw_exit(&mvpp2_sff_lock);
1850 		break;
1851 
1852 	default:
1853 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1854 		break;
1855 	}
1856 
1857 	if (error == ENETRESET) {
1858 		if (ifp->if_flags & IFF_RUNNING)
1859 			mvpp2_iff(sc);
1860 		error = 0;
1861 	}
1862 
1863 	splx(s);
1864 	return (error);
1865 }
1866 
1867 int
1868 mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
1869 {
1870 	struct mvpp2_rx_queue *rxq;
1871 	struct if_rxring_info *ifrs, *ifr;
1872 	unsigned int i;
1873 	int error;
1874 
1875 	ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
1876 	    M_WAITOK|M_ZERO|M_CANFAIL);
1877 	if (ifrs == NULL)
1878 		return (ENOMEM);
1879 
1880 	for (i = 0; i < sc->sc_nrxq; i++) {
1881 		rxq = &sc->sc_rxqs[i];
1882 		ifr = &ifrs[i];
1883 
1884 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
1885 		ifr->ifr_size = MCLBYTES;
1886 		ifr->ifr_info = rxq->rxring;
1887 	}
1888 
1889 	error = if_rxr_info_ioctl(ifri, i, ifrs);
1890 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
1891 
1892 	return (error);
1893 }
1894 
1895 void
1896 mvpp2_watchdog(struct ifnet *ifp)
1897 {
1898 	printf("%s\n", __func__);
1899 }
1900 
1901 int
1902 mvpp2_media_change(struct ifnet *ifp)
1903 {
1904 	struct mvpp2_port *sc = ifp->if_softc;
1905 
1906 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1907 		mii_mediachg(&sc->sc_mii);
1908 
1909 	return (0);
1910 }
1911 
1912 void
1913 mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1914 {
1915 	struct mvpp2_port *sc = ifp->if_softc;
1916 
1917 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1918 		mii_pollstat(&sc->sc_mii);
1919 
1920 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1921 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1922 }
1923 
1924 int
1925 mvpp2_mii_readreg(struct device *self, int phy, int reg)
1926 {
1927 	struct mvpp2_port *sc = (void *)self;
1928 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
1929 }
1930 
1931 void
1932 mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
1933 {
1934 	struct mvpp2_port *sc = (void *)self;
1935 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
1936 }
1937 
1938 void
1939 mvpp2_mii_statchg(struct device *self)
1940 {
1941 	struct mvpp2_port *sc = (void *)self;
1942 	mvpp2_port_change(sc);
1943 }
1944 
1945 void
1946 mvpp2_inband_statchg(struct mvpp2_port *sc)
1947 {
1948 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
1949 	uint32_t reg;
1950 
1951 	sc->sc_mii.mii_media_status = IFM_AVALID;
1952 	sc->sc_mii.mii_media_active = IFM_ETHER;
1953 
1954 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1955 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1956 		reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
1957 		if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS)
1958 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1959 		sc->sc_mii.mii_media_active |= IFM_FDX;
1960 		sc->sc_mii.mii_media_active |= subtype;
1961 	} else {
1962 		reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
1963 		if (reg & MVPP2_PORT_STATUS0_LINKUP)
1964 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1965 		if (reg & MVPP2_PORT_STATUS0_FULLDX)
1966 			sc->sc_mii.mii_media_active |= IFM_FDX;
1967 		if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
1968 			sc->sc_mii.mii_media_active |= subtype;
1969 		else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
1970 			sc->sc_mii.mii_media_active |= subtype;
1971 		else if (reg & MVPP2_PORT_STATUS0_GMIISPEED)
1972 			sc->sc_mii.mii_media_active |= IFM_1000_T;
1973 		else if (reg & MVPP2_PORT_STATUS0_MIISPEED)
1974 			sc->sc_mii.mii_media_active |= IFM_100_TX;
1975 		else
1976 			sc->sc_mii.mii_media_active |= IFM_10_T;
1977 	}
1978 
1979 	mvpp2_port_change(sc);
1980 }
1981 
1982 void
1983 mvpp2_port_change(struct mvpp2_port *sc)
1984 {
1985 	uint32_t reg;
1986 
1987 	sc->sc_link = !!(sc->sc_mii.mii_media_status & IFM_ACTIVE);
1988 
1989 	if (sc->sc_inband_status)
1990 		return;
1991 
1992 	if (sc->sc_link) {
1993 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1994 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1995 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1996 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
1997 			reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS;
1998 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1999 		} else {
2000 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2001 			reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2002 			reg |= MVPP2_GMAC_FORCE_LINK_PASS;
2003 			reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2004 			reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2005 			reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2006 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_KX ||
2007 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
2008 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_CX ||
2009 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_LX ||
2010 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_KX ||
2011 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
2012 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
2013 				reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2014 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
2015 				reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
2016 			if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
2017 				reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2018 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2019 		}
2020 	} else {
2021 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2022 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
2023 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2024 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2025 			reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2026 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2027 		} else {
2028 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2029 			reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2030 			reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2031 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2032 		}
2033 	}
2034 }
2035 
2036 void
2037 mvpp2_tick(void *arg)
2038 {
2039 	struct mvpp2_port *sc = arg;
2040 	int s;
2041 
2042 	s = splnet();
2043 	mii_tick(&sc->sc_mii);
2044 	splx(s);
2045 
2046 	timeout_add_sec(&sc->sc_tick, 1);
2047 }
2048 
2049 int
2050 mvpp2_link_intr(void *arg)
2051 {
2052 	struct mvpp2_port *sc = arg;
2053 	uint32_t reg;
2054 	int event = 0;
2055 
2056 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2057 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2058 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
2059 		if (reg & MV_XLG_INTERRUPT_LINK_CHANGE)
2060 			event = 1;
2061 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2062 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2063 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
2064 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
2065 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2066 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2067 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2068 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_CAUSE_REG);
2069 		if (reg & MVPP2_GMAC_INT_CAUSE_LINK_CHANGE)
2070 			event = 1;
2071 	}
2072 
2073 	if (event && sc->sc_inband_status)
2074 		mvpp2_inband_statchg(sc);
2075 
2076 	return (1);
2077 }
2078 
2079 int
2080 mvpp2_intr(void *arg)
2081 {
2082 	struct mvpp2_port *sc = arg;
2083 	uint32_t reg;
2084 
2085 	reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
2086 	if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
2087 		mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
2088 		mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
2089 		    reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
2090 	}
2091 	if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
2092 		mvpp2_tx_proc(sc,
2093 		    (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
2094 		    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
2095 
2096 	if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
2097 		mvpp2_rx_proc(sc,
2098 		    reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
2099 
2100 	return (1);
2101 }
2102 
2103 void
2104 mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
2105 {
2106 	struct mvpp2_tx_queue *txq;
2107 	int i;
2108 
2109 	for (i = 0; i < sc->sc_ntxq; i++) {
2110 		txq = &sc->sc_txqs[i];
2111 		if ((queues & (1 << i)) == 0)
2112 			continue;
2113 		mvpp2_txq_proc(sc, txq);
2114 	}
2115 }
2116 
2117 void
2118 mvpp2_txq_proc(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2119 {
2120 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2121 	struct mvpp2_tx_queue *aggr_txq = &sc->sc->sc_aggr_txqs[0];
2122 	struct mvpp2_buf *txb;
2123 	int i, idx, nsent;
2124 
2125 	/* XXX: this is a percpu register! */
2126 	nsent = (mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id)) &
2127 	    MVPP2_TRANSMITTED_COUNT_MASK) >>
2128 	    MVPP2_TRANSMITTED_COUNT_OFFSET;
2129 
2130 	for (i = 0; i < nsent; i++) {
2131 		idx = aggr_txq->cons;
2132 		KASSERT(idx < MVPP2_AGGR_TXQ_SIZE);
2133 
2134 		txb = &aggr_txq->buf[idx];
2135 		if (txb->mb_m) {
2136 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2137 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2138 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2139 
2140 			m_freem(txb->mb_m);
2141 			txb->mb_m = NULL;
2142 		}
2143 
2144 		aggr_txq->cons = (aggr_txq->cons + 1) % MVPP2_AGGR_TXQ_SIZE;
2145 	}
2146 
2147 	if (ifq_is_oactive(&ifp->if_snd))
2148 		ifq_restart(&ifp->if_snd);
2149 }
2150 
2151 void
2152 mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
2153 {
2154 	struct mvpp2_rx_queue *rxq;
2155 	int i;
2156 
2157 	for (i = 0; i < sc->sc_nrxq; i++) {
2158 		rxq = &sc->sc_rxqs[i];
2159 		if ((queues & (1 << i)) == 0)
2160 			continue;
2161 		mvpp2_rxq_proc(sc, rxq);
2162 	}
2163 
2164 	mvpp2_rx_refill(sc);
2165 }
2166 
2167 void
2168 mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2169 {
2170 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2171 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2172 	struct mvpp2_rx_desc *rxd;
2173 	struct mvpp2_bm_pool *bm;
2174 	struct mvpp2_buf *rxb;
2175 	struct mbuf *m;
2176 	uint64_t virt;
2177 	uint32_t i, nrecv, pool;
2178 
2179 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2180 	if (!nrecv)
2181 		return;
2182 
2183 	pool = curcpu()->ci_cpuid;
2184 	KASSERT(pool < sc->sc->sc_npools);
2185 	bm = &sc->sc->sc_bm_pools[pool];
2186 
2187 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2188 	    MVPP2_DMA_LEN(rxq->ring),
2189 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2190 
2191 	for (i = 0; i < nrecv; i++) {
2192 		rxd = &rxq->descs[rxq->cons];
2193 		virt = rxd->buf_cookie_bm_qset_cls_info;
2194 		KASSERT(((virt >> 16) & 0xffff) == pool);
2195 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2196 		rxb = &bm->rxbuf[virt & 0xffff];
2197 		KASSERT(rxb->mb_m != NULL);
2198 
2199 		bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
2200 		    rxd->data_size, BUS_DMASYNC_POSTREAD);
2201 		bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
2202 
2203 		m = rxb->mb_m;
2204 		rxb->mb_m = NULL;
2205 
2206 		m->m_pkthdr.len = m->m_len = rxd->data_size;
2207 		m_adj(m, MVPP2_MH_SIZE);
2208 		ml_enqueue(&ml, m);
2209 
2210 		KASSERT(bm->freelist[bm->free_prod] == -1);
2211 		bm->freelist[bm->free_prod] = virt & 0xffffffff;
2212 		bm->free_prod = (bm->free_prod + 1) % MVPP2_BM_SIZE;
2213 
2214 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
2215 	}
2216 
2217 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2218 	    MVPP2_DMA_LEN(rxq->ring),
2219 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2220 
2221 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2222 
2223 	if_input(ifp, &ml);
2224 }
2225 
2226 /*
2227  * We have a pool per core, and since we should not assume that
2228  * RX buffers are always used in order, keep a list of rxbuf[]
2229  * indices that should be filled with an mbuf, if possible.
2230  */
2231 void
2232 mvpp2_rx_refill(struct mvpp2_port *sc)
2233 {
2234 	struct mvpp2_bm_pool *bm;
2235 	struct mvpp2_buf *rxb;
2236 	uint64_t phys, virt;
2237 	int pool;
2238 
2239 	pool = curcpu()->ci_cpuid;
2240 	KASSERT(pool < sc->sc->sc_npools);
2241 	bm = &sc->sc->sc_bm_pools[pool];
2242 
2243 	while (bm->freelist[bm->free_cons] != -1) {
2244 		virt = bm->freelist[bm->free_cons];
2245 		KASSERT(((virt >> 16) & 0xffff) == pool);
2246 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2247 		rxb = &bm->rxbuf[virt & 0xffff];
2248 		KASSERT(rxb->mb_m == NULL);
2249 
2250 		rxb->mb_m = mvpp2_alloc_mbuf(sc->sc, rxb->mb_map);
2251 		if (rxb->mb_m == NULL)
2252 			break;
2253 
2254 		bm->freelist[bm->free_cons] = -1;
2255 		bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
2256 
2257 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2258 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2259 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2260 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2261 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2262 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2263 		    virt & 0xffffffff);
2264 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2265 		    phys & 0xffffffff);
2266 	}
2267 }
2268 
2269 void
2270 mvpp2_up(struct mvpp2_port *sc)
2271 {
2272 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2273 	int i;
2274 
2275 	if (sc->sc_sfp) {
2276 		rw_enter(&mvpp2_sff_lock, RW_WRITE);
2277 		sfp_enable(sc->sc_sfp);
2278 		rw_exit(&mvpp2_sff_lock);
2279 	}
2280 
2281 	mvpp2_prs_mac_da_accept(sc, etherbroadcastaddr, 1);
2282 	mvpp2_prs_mac_da_accept(sc, sc->sc_lladdr, 1);
2283 	mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
2284 	mvpp2_prs_def_flow(sc);
2285 
2286 	for (i = 0; i < sc->sc_ntxq; i++)
2287 		mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
2288 
2289 	mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
2290 
2291 	for (i = 0; i < sc->sc_nrxq; i++)
2292 		mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
2293 
2294 	/* FIXME: rx buffer fill */
2295 
2296 	/* Configure media. */
2297 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
2298 		mii_mediachg(&sc->sc_mii);
2299 
2300 	/* Program promiscuous mode and multicast filters. */
2301 	mvpp2_iff(sc);
2302 
2303 	ifp->if_flags |= IFF_RUNNING;
2304 	ifq_clr_oactive(&ifp->if_snd);
2305 
2306 	mvpp2_txp_max_tx_size_set(sc);
2307 
2308 	/* XXX: single vector */
2309 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
2310 	    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
2311 	    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
2312 	    MVPP2_CAUSE_MISC_SUM_MASK);
2313 	mvpp2_interrupts_enable(sc, (1 << 0));
2314 
2315 	mvpp2_mac_config(sc);
2316 	mvpp2_egress_enable(sc);
2317 	mvpp2_ingress_enable(sc);
2318 
2319 	timeout_add_sec(&sc->sc_tick, 1);
2320 }
2321 
2322 void
2323 mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
2324 {
2325 	struct mvpp2_buf *txb;
2326 	int i;
2327 
2328 	txq->ring = mvpp2_dmamem_alloc(sc,
2329 	    MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
2330 	KASSERT(txq->ring != NULL);
2331 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2332 
2333 	txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
2334 	    M_DEVBUF, M_WAITOK);
2335 
2336 	for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
2337 		txb = &txq->buf[i];
2338 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2339 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2340 		txb->mb_m = NULL;
2341 	}
2342 
2343 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2344 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2345 
2346 	txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
2347 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
2348 	    MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_OFFS);
2349 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
2350 	    MVPP2_AGGR_TXQ_SIZE);
2351 }
2352 
2353 void
2354 mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2355 {
2356 	struct mvpp2_buf *txb;
2357 	int desc, desc_per_txq;
2358 	uint32_t reg;
2359 	int i;
2360 
2361 	txq->prod = txq->cons = 0;
2362 //	txq->last_desc = txq->size - 1;
2363 
2364 	txq->ring = mvpp2_dmamem_alloc(sc->sc,
2365 	    MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
2366 	KASSERT(txq->ring != NULL);
2367 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2368 
2369 	txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
2370 	    M_DEVBUF, M_WAITOK);
2371 
2372 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2373 		txb = &txq->buf[i];
2374 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2375 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2376 		txb->mb_m = NULL;
2377 	}
2378 
2379 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2380 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2381 
2382 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2383 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
2384 	    MVPP2_DMA_DVA(txq->ring));
2385 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
2386 	    MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
2387 	mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
2388 	mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
2389 	    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2390 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
2391 	reg &= ~MVPP2_TXQ_PENDING_MASK;
2392 	mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
2393 
2394 	desc_per_txq = 16;
2395 	desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
2396 	    (txq->log_id * desc_per_txq);
2397 
2398 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
2399 	    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2400 	    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2401 
2402 	/* WRR / EJP configuration - indirect access */
2403 	mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2404 	    mvpp2_egress_port(sc));
2405 
2406 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2407 	reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2408 	reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2409 	reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2410 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
2411 
2412 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2413 	    MVPP2_TXQ_TOKEN_SIZE_MAX);
2414 
2415 	mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
2416 
2417 	mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2418 }
2419 
2420 void
2421 mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2422 {
2423 	rxq->prod = rxq->cons = 0;
2424 
2425 	rxq->ring = mvpp2_dmamem_alloc(sc->sc,
2426 	    MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
2427 	KASSERT(rxq->ring != NULL);
2428 	rxq->descs = MVPP2_DMA_KVA(rxq->ring);
2429 
2430 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
2431 	    0, MVPP2_DMA_LEN(rxq->ring),
2432 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2433 
2434 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2435 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2436 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
2437 	    MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_OFFS);
2438 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
2439 	mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
2440 	mvpp2_rxq_offset_set(sc, rxq->id, 0);
2441 	mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
2442 	mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
2443 	mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
2444 }
2445 
2446 void
2447 mvpp2_mac_reset_assert(struct mvpp2_port *sc)
2448 {
2449 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2450 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
2451 	    MVPP2_PORT_CTRL2_PORTMACRESET);
2452 	if (sc->sc_gop_id == 0)
2453 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
2454 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2455 		    ~MV_XLG_MAC_CTRL0_MACRESETN);
2456 }
2457 
2458 void
2459 mvpp2_pcs_reset_assert(struct mvpp2_port *sc)
2460 {
2461 	uint32_t reg;
2462 
2463 	if (sc->sc_gop_id != 0)
2464 		return;
2465 
2466 	reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2467 	reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET;
2468 	reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET;
2469 	reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET;
2470 	reg &= ~MVPP22_MPCS_MAC_CLK_RESET;
2471 	mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2472 	reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2473 	reg &= ~MVPP22_XPCS_PCSRESET;
2474 	mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2475 }
2476 
2477 void
2478 mvpp2_pcs_reset_deassert(struct mvpp2_port *sc)
2479 {
2480 	uint32_t reg;
2481 
2482 	if (sc->sc_gop_id != 0)
2483 		return;
2484 
2485 	if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2486 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2487 		reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET;
2488 		reg |= MVPP22_MPCS_TX_SD_CLK_RESET;
2489 		reg |= MVPP22_MPCS_RX_SD_CLK_RESET;
2490 		reg |= MVPP22_MPCS_MAC_CLK_RESET;
2491 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2492 	} else if (sc->sc_phy_mode == PHY_MODE_XAUI) {
2493 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2494 		reg |= MVPP22_XPCS_PCSRESET;
2495 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2496 	}
2497 }
2498 
2499 void
2500 mvpp2_mac_config(struct mvpp2_port *sc)
2501 {
2502 	uint32_t reg;
2503 
2504 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2505 	reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2506 	reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2507 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2508 	if (sc->sc_gop_id == 0) {
2509 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2510 		reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2511 		reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2512 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2513 	}
2514 
2515 	mvpp2_port_disable(sc);
2516 
2517 	mvpp2_mac_reset_assert(sc);
2518 	mvpp2_pcs_reset_assert(sc);
2519 
2520 	mvpp2_gop_intr_mask(sc);
2521 	mvpp2_comphy_config(sc, 0);
2522 
2523 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2524 	    sc->sc_phy_mode == PHY_MODE_XAUI))
2525 		mvpp2_xlg_config(sc);
2526 	else
2527 		mvpp2_gmac_config(sc);
2528 
2529 	mvpp2_comphy_config(sc, 1);
2530 	mvpp2_gop_config(sc);
2531 
2532 	mvpp2_pcs_reset_deassert(sc);
2533 
2534 	if (sc->sc_gop_id == 0) {
2535 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
2536 		reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
2537 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2538 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2539 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
2540 		else
2541 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
2542 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
2543 	}
2544 
2545 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2546 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2547 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
2548 		reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
2549 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2550 		    MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
2551 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
2552 	} else {
2553 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
2554 		reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2555 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2556 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS;
2557 		mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
2558 	}
2559 
2560 	mvpp2_gop_intr_unmask(sc);
2561 
2562 	if (!(sc->sc_phy_mode == PHY_MODE_10GBASER ||
2563 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2564 		mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2565 		    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2566 		    ~MVPP2_PORT_CTRL2_PORTMACRESET);
2567 		while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2568 		    MVPP2_PORT_CTRL2_PORTMACRESET)
2569 			;
2570 	}
2571 
2572 	mvpp2_port_enable(sc);
2573 
2574 	if (sc->sc_inband_status) {
2575 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2576 		reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2577 		reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2578 		mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2579 		if (sc->sc_gop_id == 0) {
2580 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2581 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2582 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2583 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2584 		}
2585 	} else
2586 		mvpp2_port_change(sc);
2587 }
2588 
2589 void
2590 mvpp2_xlg_config(struct mvpp2_port *sc)
2591 {
2592 	uint32_t ctl0, ctl4;
2593 
2594 	ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2595 	ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
2596 
2597 	ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN;
2598 	ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
2599 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN;
2600 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN;
2601 
2602 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
2603 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl4);
2604 
2605 	/* Port reset */
2606 	while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2607 	    MV_XLG_MAC_CTRL0_MACRESETN) == 0)
2608 		;
2609 }
2610 
2611 void
2612 mvpp2_gmac_config(struct mvpp2_port *sc)
2613 {
2614 	uint32_t ctl0, ctl2, ctl4, panc;
2615 
2616 	/* Setup phy. */
2617 	ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
2618 	ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
2619 	ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
2620 	panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2621 
2622 	ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
2623 	ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
2624 	    MVPP2_GMAC_INBAND_AN_MASK);
2625 	panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
2626 	    MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
2627 	    MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
2628 	    MVPP2_GMAC_IN_BAND_AUTONEG);
2629 
2630 	switch (sc->sc_phy_mode) {
2631 	case PHY_MODE_XAUI:
2632 	case PHY_MODE_10GBASER:
2633 		break;
2634 	case PHY_MODE_2500BASEX:
2635 	case PHY_MODE_1000BASEX:
2636 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2637 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2638 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2639 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2640 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2641 		break;
2642 	case PHY_MODE_SGMII:
2643 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2644 		ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
2645 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2646 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2647 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2648 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2649 		break;
2650 	case PHY_MODE_RGMII:
2651 	case PHY_MODE_RGMII_ID:
2652 	case PHY_MODE_RGMII_RXID:
2653 	case PHY_MODE_RGMII_TXID:
2654 		ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL;
2655 		ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2656 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2657 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2658 		break;
2659 	}
2660 
2661 	/* Use Auto-Negotiation for Inband Status only */
2662 	if (sc->sc_inband_status) {
2663 		panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2664 		panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2665 		panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2666 		panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
2667 		/* TODO: read mode from SFP */
2668 		if (sc->sc_phy_mode == PHY_MODE_SGMII) {
2669 			/* SGMII */
2670 			panc |= MVPP2_GMAC_AN_SPEED_EN;
2671 			panc |= MVPP2_GMAC_AN_DUPLEX_EN;
2672 		} else {
2673 			/* 802.3z */
2674 			ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
2675 			panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2676 			panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2677 		}
2678 	}
2679 
2680 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
2681 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
2682 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
2683 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2684 }
2685 
2686 #define COMPHY_BASE		0x120000
2687 #define COMPHY_SIP_POWER_ON	0x82000001
2688 #define COMPHY_SIP_POWER_OFF	0x82000002
2689 #define COMPHY_SPEED(x)		((x) << 2)
2690 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
2691 #define  COMPHY_SPEED_2_5G		1
2692 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
2693 #define  COMPHY_SPEED_5G		3
2694 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
2695 #define  COMPHY_SPEED_6G		5
2696 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
2697 #define COMPHY_UNIT(x)		((x) << 8)
2698 #define COMPHY_MODE(x)		((x) << 12)
2699 #define  COMPHY_MODE_SATA		1
2700 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
2701 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
2702 #define  COMPHY_MODE_USB3H		4
2703 #define  COMPHY_MODE_USB3D		5
2704 #define  COMPHY_MODE_PCIE		6
2705 #define  COMPHY_MODE_RXAUI		7
2706 #define  COMPHY_MODE_XFI		8
2707 #define  COMPHY_MODE_SFI		9
2708 #define  COMPHY_MODE_USB3		10
2709 #define  COMPHY_MODE_AP			11
2710 
2711 void
2712 mvpp2_comphy_config(struct mvpp2_port *sc, int on)
2713 {
2714 	int node, phys[2], lane, unit;
2715 	uint32_t mode;
2716 
2717 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
2718 	    sizeof(phys))
2719 		return;
2720 	node = OF_getnodebyphandle(phys[0]);
2721 	if (!node)
2722 		return;
2723 
2724 	lane = OF_getpropint(node, "reg", 0);
2725 	unit = phys[1];
2726 
2727 	switch (sc->sc_phy_mode) {
2728 	case PHY_MODE_XAUI:
2729 		mode = COMPHY_MODE(COMPHY_MODE_RXAUI) |
2730 		    COMPHY_UNIT(unit);
2731 		break;
2732 	case PHY_MODE_10GBASER:
2733 		mode = COMPHY_MODE(COMPHY_MODE_XFI) |
2734 		    COMPHY_SPEED(COMPHY_SPEED_10_3125G) |
2735 		    COMPHY_UNIT(unit);
2736 		break;
2737 	case PHY_MODE_2500BASEX:
2738 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
2739 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
2740 		    COMPHY_UNIT(unit);
2741 		break;
2742 	case PHY_MODE_1000BASEX:
2743 	case PHY_MODE_SGMII:
2744 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
2745 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
2746 		    COMPHY_UNIT(unit);
2747 		break;
2748 	default:
2749 		return;
2750 	}
2751 
2752 	if (on)
2753 		smc_call(COMPHY_SIP_POWER_ON, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2754 		    lane, mode);
2755 	else
2756 		smc_call(COMPHY_SIP_POWER_OFF, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2757 		    lane, 0);
2758 }
2759 
2760 void
2761 mvpp2_gop_config(struct mvpp2_port *sc)
2762 {
2763 	uint32_t reg;
2764 
2765 	if (sc->sc->sc_rm == NULL)
2766 		return;
2767 
2768 	if (sc->sc_phy_mode == PHY_MODE_RGMII ||
2769 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2770 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2771 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2772 		if (sc->sc_gop_id == 0)
2773 			return;
2774 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2775 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
2776 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2777 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2778 		if (sc->sc_gop_id == 2)
2779 			reg |= GENCONF_CTRL0_PORT0_RGMII |
2780 			    GENCONF_CTRL0_PORT1_RGMII;
2781 		else if (sc->sc_gop_id == 3)
2782 			reg |= GENCONF_CTRL0_PORT1_RGMII_MII;
2783 		regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2784 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2785 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2786 	    sc->sc_phy_mode == PHY_MODE_SGMII) {
2787 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2788 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
2789 		    GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
2790 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2791 		if (sc->sc_gop_id > 1) {
2792 			reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2793 			if (sc->sc_gop_id == 2)
2794 				reg &= ~GENCONF_CTRL0_PORT0_RGMII;
2795 			else if (sc->sc_gop_id == 3)
2796 				reg &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
2797 			regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2798 		}
2799 	} else if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2800 		if (sc->sc_gop_id != 0)
2801 			return;
2802 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2803 		reg &= ~MVPP22_XPCS_PCSMODE_MASK;
2804 		reg &= ~MVPP22_XPCS_LANEACTIVE_MASK;
2805 		reg |= 2 << MVPP22_XPCS_LANEACTIVE_OFFS;
2806 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2807 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS40G_COMMON_CONTROL);
2808 		reg &= ~MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK;
2809 		mvpp2_mpcs_write(sc, MVPP22_MPCS40G_COMMON_CONTROL, reg);
2810 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2811 		reg &= ~MVPP22_MPCS_CLK_DIVISION_RATIO_MASK;
2812 		reg |= MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT;
2813 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2814 	} else
2815 		return;
2816 
2817 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1);
2818 	reg |= GENCONF_PORT_CTRL1_RESET(sc->sc_gop_id) |
2819 	    GENCONF_PORT_CTRL1_EN(sc->sc_gop_id);
2820 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1, reg);
2821 
2822 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2823 	reg |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
2824 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2825 
2826 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1);
2827 	reg |= GENCONF_SOFT_RESET1_GOP;
2828 	regmap_write_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1, reg);
2829 }
2830 
2831 void
2832 mvpp2_gop_intr_mask(struct mvpp2_port *sc)
2833 {
2834 	uint32_t reg;
2835 
2836 	if (sc->sc_gop_id == 0) {
2837 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2838 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2839 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2840 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2841 	}
2842 
2843 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2844 	reg &= ~MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2845 	mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2846 }
2847 
2848 void
2849 mvpp2_gop_intr_unmask(struct mvpp2_port *sc)
2850 {
2851 	uint32_t reg;
2852 
2853 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2854 	reg |= MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2855 	mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2856 
2857 	if (sc->sc_gop_id == 0) {
2858 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2859 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2860 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2861 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2862 		else
2863 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2864 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2865 	}
2866 }
2867 
2868 void
2869 mvpp2_down(struct mvpp2_port *sc)
2870 {
2871 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2872 	uint32_t reg;
2873 	int i;
2874 
2875 	timeout_del(&sc->sc_tick);
2876 
2877 	ifp->if_flags &= ~IFF_RUNNING;
2878 	ifq_clr_oactive(&ifp->if_snd);
2879 
2880 	mvpp2_egress_disable(sc);
2881 	mvpp2_ingress_disable(sc);
2882 
2883 	mvpp2_mac_reset_assert(sc);
2884 	mvpp2_pcs_reset_assert(sc);
2885 
2886 	/* XXX: single vector */
2887 	mvpp2_interrupts_disable(sc, (1 << 0));
2888 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
2889 
2890 	reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
2891 	reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2892 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2893 
2894 	for (i = 0; i < sc->sc_ntxq; i++)
2895 		mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
2896 
2897 	reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2898 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2899 
2900 	for (i = 0; i < sc->sc_nrxq; i++)
2901 		mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
2902 
2903 	if (sc->sc_sfp) {
2904 		rw_enter(&mvpp2_sff_lock, RW_WRITE);
2905 		sfp_disable(sc->sc_sfp);
2906 		rw_exit(&mvpp2_sff_lock);
2907 	}
2908 }
2909 
2910 void
2911 mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2912 {
2913 	struct mvpp2_buf *txb;
2914 	int i, pending;
2915 	uint32_t reg;
2916 
2917 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2918 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
2919 	reg |= MVPP2_TXQ_DRAIN_EN_MASK;
2920 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2921 
2922 	/*
2923 	 * the queue has been stopped so wait for all packets
2924 	 * to be transmitted.
2925 	 */
2926 	i = 0;
2927 	do {
2928 		if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2929 			printf("%s: port %d: cleaning queue %d timed out\n",
2930 			    sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
2931 			break;
2932 		}
2933 		delay(1000);
2934 		i++;
2935 
2936 		pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
2937 		    MVPP2_TXQ_PENDING_MASK;
2938 	} while (pending);
2939 
2940 	reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2941 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2942 
2943 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2944 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2945 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
2946 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
2947 	mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2948 
2949 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2950 		txb = &txq->buf[i];
2951 		if (txb->mb_m) {
2952 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2953 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2954 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2955 			m_freem(txb->mb_m);
2956 		}
2957 		bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
2958 	}
2959 
2960 	mvpp2_dmamem_free(sc->sc, txq->ring);
2961 	free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
2962 	    MVPP2_NTXDESC);
2963 }
2964 
2965 void
2966 mvpp2_rxq_hw_drop(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2967 {
2968 	struct mvpp2_rx_desc *rxd;
2969 	struct mvpp2_bm_pool *bm;
2970 	uint64_t phys, virt;
2971 	uint32_t i, nrecv, pool;
2972 	struct mvpp2_buf *rxb;
2973 
2974 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2975 	if (!nrecv)
2976 		return;
2977 
2978 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2979 	    MVPP2_DMA_LEN(rxq->ring),
2980 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2981 
2982 	for (i = 0; i < nrecv; i++) {
2983 		rxd = &rxq->descs[rxq->cons];
2984 		virt = rxd->buf_cookie_bm_qset_cls_info;
2985 		pool = (virt >> 16) & 0xffff;
2986 		KASSERT(pool < sc->sc->sc_npools);
2987 		bm = &sc->sc->sc_bm_pools[pool];
2988 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2989 		rxb = &bm->rxbuf[virt & 0xffff];
2990 		KASSERT(rxb->mb_m != NULL);
2991 		virt &= 0xffffffff;
2992 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2993 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2994 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2995 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2996 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2997 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2998 		    virt & 0xffffffff);
2999 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
3000 		    phys & 0xffffffff);
3001 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
3002 	}
3003 
3004 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
3005 	    MVPP2_DMA_LEN(rxq->ring),
3006 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3007 
3008 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
3009 }
3010 
3011 void
3012 mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
3013 {
3014 	mvpp2_rxq_hw_drop(sc, rxq);
3015 
3016 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3017 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3018 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
3019 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
3020 
3021 	mvpp2_dmamem_free(sc->sc, rxq->ring);
3022 }
3023 
3024 void
3025 mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3026 {
3027 	uint32_t val;
3028 	int prxq;
3029 
3030 	/* get queue physical ID */
3031 	prxq = port->sc_rxqs[lrxq].id;
3032 
3033 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3034 	val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3035 	val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
3036 
3037 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3038 }
3039 
3040 void
3041 mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3042 {
3043 	uint32_t val;
3044 	int prxq;
3045 
3046 	/* get queue physical ID */
3047 	prxq = port->sc_rxqs[lrxq].id;
3048 
3049 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3050 	val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3051 	val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
3052 
3053 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3054 }
3055 
3056 void
3057 mvpp2_iff(struct mvpp2_port *sc)
3058 {
3059 	struct arpcom *ac = &sc->sc_ac;
3060 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3061 	struct ether_multi *enm;
3062 	struct ether_multistep step;
3063 
3064 	ifp->if_flags &= ~IFF_ALLMULTI;
3065 
3066 	/* Removes all but broadcast and (new) lladdr */
3067 	mvpp2_prs_mac_del_all(sc);
3068 
3069 	if (ifp->if_flags & IFF_PROMISC) {
3070 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3071 		    MVPP2_PRS_L2_UNI_CAST, 1);
3072 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3073 		    MVPP2_PRS_L2_MULTI_CAST, 1);
3074 		return;
3075 	}
3076 
3077 	mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3078 	    MVPP2_PRS_L2_UNI_CAST, 0);
3079 	mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3080 	    MVPP2_PRS_L2_MULTI_CAST, 0);
3081 
3082 	if (ac->ac_multirangecnt > 0 ||
3083 	    ac->ac_multicnt > MVPP2_PRS_MAC_MC_FILT_MAX) {
3084 		ifp->if_flags |= IFF_ALLMULTI;
3085 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3086 		    MVPP2_PRS_L2_MULTI_CAST, 1);
3087 	} else {
3088 		ETHER_FIRST_MULTI(step, ac, enm);
3089 		while (enm != NULL) {
3090 			mvpp2_prs_mac_da_accept(sc, enm->enm_addrlo, 1);
3091 			ETHER_NEXT_MULTI(step, enm);
3092 		}
3093 	}
3094 }
3095 
3096 struct mvpp2_dmamem *
3097 mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
3098 {
3099 	struct mvpp2_dmamem *mdm;
3100 	int nsegs;
3101 
3102 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
3103 	mdm->mdm_size = size;
3104 
3105 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3106 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
3107 		goto mdmfree;
3108 
3109 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
3110 	    &nsegs, BUS_DMA_WAITOK) != 0)
3111 		goto destroy;
3112 
3113 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
3114 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
3115 		goto free;
3116 
3117 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
3118 	    NULL, BUS_DMA_WAITOK) != 0)
3119 		goto unmap;
3120 
3121 	bzero(mdm->mdm_kva, size);
3122 
3123 	return (mdm);
3124 
3125 unmap:
3126 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
3127 free:
3128 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3129 destroy:
3130 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3131 mdmfree:
3132 	free(mdm, M_DEVBUF, 0);
3133 
3134 	return (NULL);
3135 }
3136 
3137 void
3138 mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
3139 {
3140 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
3141 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3142 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3143 	free(mdm, M_DEVBUF, 0);
3144 }
3145 
3146 struct mbuf *
3147 mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
3148 {
3149 	struct mbuf *m = NULL;
3150 
3151 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
3152 	if (!m)
3153 		return (NULL);
3154 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3155 
3156 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
3157 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
3158 		m_freem(m);
3159 		return (NULL);
3160 	}
3161 
3162 	bus_dmamap_sync(sc->sc_dmat, map, 0,
3163 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
3164 
3165 	return (m);
3166 }
3167 
3168 void
3169 mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
3170 {
3171 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3172 	    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3173 }
3174 
3175 void
3176 mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
3177 {
3178 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3179 	    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3180 }
3181 
3182 int
3183 mvpp2_egress_port(struct mvpp2_port *port)
3184 {
3185 	return MVPP2_MAX_TCONT + port->sc_id;
3186 }
3187 
3188 int
3189 mvpp2_txq_phys(int port, int txq)
3190 {
3191 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
3192 }
3193 
3194 void
3195 mvpp2_defaults_set(struct mvpp2_port *port)
3196 {
3197 	int val, queue;
3198 
3199 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3200 	    mvpp2_egress_port(port));
3201 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3202 
3203 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
3204 		mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
3205 
3206 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
3207 	    (1000 * 1000));
3208 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
3209 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3210 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3211 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3212 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
3213 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
3214 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3215 
3216 	/* set maximum_low_latency_packet_size value to 256 */
3217 	mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
3218 	    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3219 	    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3220 
3221 	/* mask all interrupts to all present cpus */
3222 	mvpp2_interrupts_disable(port, (0xf << 0));
3223 }
3224 
3225 void
3226 mvpp2_ingress_enable(struct mvpp2_port *port)
3227 {
3228 	uint32_t val;
3229 	int lrxq, queue;
3230 
3231 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3232 		queue = port->sc_rxqs[lrxq].id;
3233 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3234 		val &= ~MVPP2_RXQ_DISABLE_MASK;
3235 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3236 	}
3237 }
3238 
3239 void
3240 mvpp2_ingress_disable(struct mvpp2_port *port)
3241 {
3242 	uint32_t val;
3243 	int lrxq, queue;
3244 
3245 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3246 		queue = port->sc_rxqs[lrxq].id;
3247 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3248 		val |= MVPP2_RXQ_DISABLE_MASK;
3249 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3250 	}
3251 }
3252 
3253 void
3254 mvpp2_egress_enable(struct mvpp2_port *port)
3255 {
3256 	struct mvpp2_tx_queue *txq;
3257 	uint32_t qmap;
3258 	int queue;
3259 
3260 	qmap = 0;
3261 	for (queue = 0; queue < port->sc_ntxq; queue++) {
3262 		txq = &port->sc_txqs[queue];
3263 
3264 		if (txq->descs != NULL) {
3265 			qmap |= (1 << queue);
3266 		}
3267 	}
3268 
3269 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3270 	    mvpp2_egress_port(port));
3271 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3272 }
3273 
3274 void
3275 mvpp2_egress_disable(struct mvpp2_port *port)
3276 {
3277 	uint32_t reg_data;
3278 	int i;
3279 
3280 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3281 	    mvpp2_egress_port(port));
3282 	reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3283 	    MVPP2_TXP_SCHED_ENQ_MASK;
3284 	if (reg_data)
3285 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,
3286 		    reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET);
3287 
3288 	i = 0;
3289 	do {
3290 		if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3291 			printf("%s: tx stop timed out, status=0x%08x\n",
3292 			    port->sc_dev.dv_xname, reg_data);
3293 			break;
3294 		}
3295 		delay(1000);
3296 		i++;
3297 		reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
3298 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3299 }
3300 
3301 void
3302 mvpp2_port_enable(struct mvpp2_port *port)
3303 {
3304 	uint32_t val;
3305 
3306 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3307 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3308 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3309 		val |= MV_XLG_MAC_CTRL0_PORTEN;
3310 		val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS;
3311 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3312 	} else {
3313 		val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3314 		val |= MVPP2_GMAC_PORT_EN_MASK;
3315 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3316 		mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3317 	}
3318 }
3319 
3320 void
3321 mvpp2_port_disable(struct mvpp2_port *port)
3322 {
3323 	uint32_t val;
3324 
3325 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3326 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3327 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3328 		val &= ~MV_XLG_MAC_CTRL0_PORTEN;
3329 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3330 	}
3331 
3332 	val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3333 	val &= ~MVPP2_GMAC_PORT_EN_MASK;
3334 	mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3335 }
3336 
3337 int
3338 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3339 {
3340 	uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
3341 
3342 	return val & MVPP2_RXQ_OCCUPIED_MASK;
3343 }
3344 
3345 void
3346 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3347     int used_count, int free_count)
3348 {
3349 	uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3350 	mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3351 }
3352 
3353 void
3354 mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
3355 {
3356 	uint32_t val;
3357 
3358 	offset = offset >> 5;
3359 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3360 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3361 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3362 	    MVPP2_RXQ_PACKET_OFFSET_MASK);
3363 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3364 }
3365 
3366 void
3367 mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3368 {
3369 	uint32_t val, size, mtu;
3370 	int txq;
3371 
3372 	mtu = MCLBYTES * 8;
3373 	if (mtu > MVPP2_TXP_MTU_MAX)
3374 		mtu = MVPP2_TXP_MTU_MAX;
3375 
3376 	/* WA for wrong token bucket update: set MTU value = 3*real MTU value */
3377 	mtu = 3 * mtu;
3378 
3379 	/* indirect access to reg_valisters */
3380 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3381 	    mvpp2_egress_port(port));
3382 
3383 	/* set MTU */
3384 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
3385 	val &= ~MVPP2_TXP_MTU_MAX;
3386 	val |= mtu;
3387 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
3388 
3389 	/* TXP token size and all TXqs token size must be larger that MTU */
3390 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3391 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3392 	if (size < mtu) {
3393 		size = mtu;
3394 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3395 		val |= size;
3396 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3397 	}
3398 
3399 	for (txq = 0; txq < port->sc_ntxq; txq++) {
3400 		val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3401 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3402 
3403 		if (size < mtu) {
3404 			size = mtu;
3405 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3406 			val |= size;
3407 			mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
3408 		}
3409 	}
3410 }
3411 
3412 void
3413 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3414     uint32_t pkts)
3415 {
3416 	rxq->pkts_coal =
3417 	    pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
3418 	    pkts : MVPP2_OCCUPIED_THRESH_MASK;
3419 
3420 	mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3421 	mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
3422 
3423 }
3424 
3425 void
3426 mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3427     uint32_t pkts)
3428 {
3429 	txq->done_pkts_coal =
3430 	    pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
3431 	    pkts : MVPP2_TRANSMITTED_THRESH_MASK;
3432 
3433 	mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
3434 	mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
3435 	    txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
3436 }
3437 
3438 void
3439 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3440     uint32_t usec)
3441 {
3442 	uint32_t val;
3443 
3444 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3445 	mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3446 
3447 	rxq->time_coal = usec;
3448 }
3449 
3450 void
3451 mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
3452 {
3453 	uint32_t val;
3454 
3455 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3456 	mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
3457 
3458 	port->sc_tx_time_coal = usec;
3459 }
3460 
3461 void
3462 mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
3463     uint32_t ri, uint32_t ri_mask)
3464 {
3465 	sc->sc_prs_shadow[index].ri_mask = ri_mask;
3466 	sc->sc_prs_shadow[index].ri = ri;
3467 }
3468 
3469 void
3470 mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3471 {
3472 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
3473 
3474 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
3475 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
3476 }
3477 
3478 void
3479 mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
3480 {
3481 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3482 
3483 	if (add)
3484 		pe->tcam.byte[enable_off] &= ~(1 << port);
3485 	else
3486 		pe->tcam.byte[enable_off] |= (1 << port);
3487 }
3488 
3489 void
3490 mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
3491 {
3492 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3493 	uint8_t mask = MVPP2_PRS_PORT_MASK;
3494 
3495 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
3496 	pe->tcam.byte[enable_off] &= ~mask;
3497 	pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
3498 }
3499 
3500 uint32_t
3501 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
3502 {
3503 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3504 
3505 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
3506 }
3507 
3508 void
3509 mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
3510     uint8_t byte, uint8_t enable)
3511 {
3512 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
3513 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
3514 }
3515 
3516 void
3517 mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
3518     uint8_t *byte, uint8_t *enable)
3519 {
3520 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
3521 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
3522 }
3523 
3524 int
3525 mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
3526 {
3527 	int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
3528 	uint16_t tcam_data;
3529 
3530 	tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
3531 	    pe->tcam.byte[byte_offset];
3532 	return tcam_data == data;
3533 }
3534 
3535 void
3536 mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
3537 {
3538 	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
3539 
3540 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
3541 		if (!(enable & BIT(i)))
3542 			continue;
3543 
3544 		if (bits & BIT(i))
3545 			pe->tcam.byte[ai_idx] |= BIT(i);
3546 		else
3547 			pe->tcam.byte[ai_idx] &= ~BIT(i);
3548 	}
3549 
3550 	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
3551 }
3552 
3553 int
3554 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
3555 {
3556 	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
3557 }
3558 
3559 void
3560 mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
3561     uint32_t *word, uint32_t *enable)
3562 {
3563 	int index, position;
3564 	uint8_t byte, mask;
3565 
3566 	for (index = 0; index < 4; index++) {
3567 		position = (data_offset * sizeof(int)) + index;
3568 		mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
3569 		((uint8_t *)word)[index] = byte;
3570 		((uint8_t *)enable)[index] = mask;
3571 	}
3572 }
3573 
3574 void
3575 mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
3576     uint16_t ether_type)
3577 {
3578 	mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
3579 	mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
3580 }
3581 
3582 void
3583 mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3584 {
3585 	pe->sram.byte[bit / 8] |= (val << (bit % 8));
3586 }
3587 
3588 void
3589 mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3590 {
3591 	pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
3592 }
3593 
3594 void
3595 mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3596 {
3597 	int i;
3598 
3599 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
3600 		if (!(mask & BIT(i)))
3601 			continue;
3602 
3603 		if (bits & BIT(i))
3604 			mvpp2_prs_sram_bits_set(pe,
3605 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3606 		else
3607 			mvpp2_prs_sram_bits_clear(pe,
3608 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3609 
3610 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
3611 	}
3612 }
3613 
3614 int
3615 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
3616 {
3617 	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
3618 }
3619 
3620 void
3621 mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3622 {
3623 	int i;
3624 
3625 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
3626 		if (!(mask & BIT(i)))
3627 			continue;
3628 
3629 		if (bits & BIT(i))
3630 			mvpp2_prs_sram_bits_set(pe,
3631 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3632 		else
3633 			mvpp2_prs_sram_bits_clear(pe,
3634 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3635 
3636 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
3637 	}
3638 }
3639 
3640 int
3641 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
3642 {
3643 	uint8_t bits;
3644 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
3645 	int ai_en_off = ai_off + 1;
3646 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
3647 
3648 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
3649 	    (pe->sram.byte[ai_en_off] << (8 - ai_shift));
3650 
3651 	return bits;
3652 }
3653 
3654 void
3655 mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
3656 {
3657 	if (shift < 0) {
3658 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3659 		shift = -shift;
3660 	} else {
3661 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3662 	}
3663 
3664 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
3665 	    shift & MVPP2_PRS_SRAM_SHIFT_MASK;
3666 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
3667 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
3668 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
3669 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3670 }
3671 
3672 void
3673 mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
3674     uint32_t op)
3675 {
3676 	uint8_t udf_byte, udf_byte_offset;
3677 	uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
3678 
3679 	udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
3680 	    MVPP2_PRS_SRAM_UDF_BITS);
3681 	udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
3682 	op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
3683 	    MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
3684 	op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
3685 
3686 	if (offset < 0) {
3687 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3688 		offset = -offset;
3689 	} else {
3690 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3691 	}
3692 
3693 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
3694 	    MVPP2_PRS_SRAM_UDF_MASK);
3695 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
3696 	pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
3697 	pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
3698 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
3699 	    MVPP2_PRS_SRAM_UDF_TYPE_MASK);
3700 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
3701 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
3702 	    MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
3703 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
3704 	pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
3705 	    op_sel_udf_byte_offset);
3706 	pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
3707 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3708 }
3709 
3710 void
3711 mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3712 {
3713 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
3714 
3715 	mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
3716 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
3717 }
3718 
3719 void
3720 mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
3721 {
3722 	sc->sc_prs_shadow[index].valid = 1;
3723 	sc->sc_prs_shadow[index].lu = lu;
3724 }
3725 
3726 int
3727 mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3728 {
3729 	int i;
3730 
3731 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3732 		return EINVAL;
3733 
3734 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3735 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3736 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3737 		mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
3738 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3739 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3740 		mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
3741 
3742 	return 0;
3743 }
3744 
3745 int
3746 mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe, int tid)
3747 {
3748 	int i;
3749 
3750 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3751 		return EINVAL;
3752 
3753 	memset(pe, 0, sizeof(*pe));
3754 	pe->index = tid;
3755 
3756 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3757 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
3758 	    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
3759 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
3760 		return EINVAL;
3761 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3762 		pe->tcam.word[i] =
3763 		    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
3764 
3765 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3766 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3767 		pe->sram.word[i] =
3768 		    mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
3769 
3770 	return 0;
3771 }
3772 
3773 int
3774 mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
3775 {
3776 	struct mvpp2_prs_entry pe;
3777 	uint8_t bits;
3778 	int tid;
3779 
3780 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
3781 		if (!sc->sc_prs_shadow[tid].valid ||
3782 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
3783 			continue;
3784 
3785 		mvpp2_prs_hw_read(sc, &pe, tid);
3786 		bits = mvpp2_prs_sram_ai_get(&pe);
3787 
3788 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
3789 			return tid;
3790 	}
3791 
3792 	return -1;
3793 }
3794 
3795 int
3796 mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
3797 {
3798 	uint8_t tmp;
3799 	int tid;
3800 
3801 	if (start > end) {
3802 		tmp = end;
3803 		end = start;
3804 		start = tmp;
3805 	}
3806 
3807 	for (tid = start; tid <= end; tid++) {
3808 		if (!sc->sc_prs_shadow[tid].valid)
3809 			return tid;
3810 	}
3811 
3812 	return -1;
3813 }
3814 
3815 void
3816 mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
3817 {
3818 	struct mvpp2_prs_entry pe;
3819 
3820 	if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
3821 		mvpp2_prs_hw_read(sc, &pe, MVPP2_PE_DROP_ALL);
3822 	} else {
3823 		memset(&pe, 0, sizeof(pe));
3824 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3825 		pe.index = MVPP2_PE_DROP_ALL;
3826 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3827 		    MVPP2_PRS_RI_DROP_MASK);
3828 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3829 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3830 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3831 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3832 	}
3833 
3834 	mvpp2_prs_tcam_port_set(&pe, port, add);
3835 	mvpp2_prs_hw_write(sc, &pe);
3836 }
3837 
3838 void
3839 mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int l2_cast,
3840     int add)
3841 {
3842 	struct mvpp2_prs_entry pe;
3843 	uint8_t cast_match;
3844 	uint32_t ri;
3845 	int tid;
3846 
3847 	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
3848 		cast_match = MVPP2_PRS_UCAST_VAL;
3849 		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
3850 		ri = MVPP2_PRS_RI_L2_UCAST;
3851 	} else {
3852 		cast_match = MVPP2_PRS_MCAST_VAL;
3853 		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
3854 		ri = MVPP2_PRS_RI_L2_MCAST;
3855 	}
3856 
3857 	if (sc->sc_prs_shadow[tid].valid) {
3858 		mvpp2_prs_hw_read(sc, &pe, tid);
3859 	} else {
3860 		memset(&pe, 0, sizeof(pe));
3861 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3862 		pe.index = tid;
3863 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3864 		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
3865 		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
3866 		    MVPP2_PRS_CAST_MASK);
3867 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3868 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3869 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3870 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3871 	}
3872 
3873 	mvpp2_prs_tcam_port_set(&pe, port, add);
3874 	mvpp2_prs_hw_write(sc, &pe);
3875 }
3876 
3877 void
3878 mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
3879     int tagged, int extend)
3880 {
3881 	struct mvpp2_prs_entry pe;
3882 	int32_t tid, shift;
3883 
3884 	if (extend) {
3885 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3886 		shift = 8;
3887 	} else {
3888 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3889 		shift = 4;
3890 	}
3891 
3892 	if (sc->sc_prs_shadow[tid].valid) {
3893 		mvpp2_prs_hw_read(sc, &pe, tid);
3894 	} else {
3895 		memset(&pe, 0, sizeof(pe));
3896 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3897 		pe.index = tid;
3898 		mvpp2_prs_sram_shift_set(&pe, shift,
3899 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3900 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3901 		if (tagged) {
3902 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
3903 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3904 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3905 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3906 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3907 		} else {
3908 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3909 			    MVPP2_PRS_RI_VLAN_MASK);
3910 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3911 		}
3912 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3913 	}
3914 
3915 	mvpp2_prs_tcam_port_set(&pe, port, add);
3916 	mvpp2_prs_hw_write(sc, &pe);
3917 }
3918 
3919 void
3920 mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
3921     int add, int tagged, int extend)
3922 {
3923 	struct mvpp2_prs_entry pe;
3924 	int32_t tid, shift, port_mask;
3925 
3926 	if (extend) {
3927 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3928 		port_mask = 0;
3929 		shift = 8;
3930 	} else {
3931 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3932 		port_mask = MVPP2_PRS_PORT_MASK;
3933 		shift = 4;
3934 	}
3935 
3936 	if (sc->sc_prs_shadow[tid].valid) {
3937 		mvpp2_prs_hw_read(sc, &pe, tid);
3938 	} else {
3939 		memset(&pe, 0, sizeof(pe));
3940 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3941 		pe.index = tid;
3942 		mvpp2_prs_match_etype(&pe, 0, 0xdada);
3943 		mvpp2_prs_match_etype(&pe, 2, 0);
3944 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
3945 		    MVPP2_PRS_RI_DSA_MASK);
3946 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
3947 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3948 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3949 		if (tagged) {
3950 			mvpp2_prs_tcam_data_byte_set(&pe,
3951 			    MVPP2_ETH_TYPE_LEN + 2 + 3,
3952 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3953 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3954 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3955 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3956 		} else {
3957 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3958 			    MVPP2_PRS_RI_VLAN_MASK);
3959 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3960 		}
3961 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
3962 	}
3963 
3964 	mvpp2_prs_tcam_port_set(&pe, port, add);
3965 	mvpp2_prs_hw_write(sc, &pe);
3966 }
3967 
3968 struct mvpp2_prs_entry *
3969 mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
3970 {
3971 	struct mvpp2_prs_entry *pe;
3972 	uint32_t ri_bits, ai_bits;
3973 	int match, tid;
3974 
3975 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3976 	if (pe == NULL)
3977 		return NULL;
3978 
3979 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3980 
3981 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3982 		if (!sc->sc_prs_shadow[tid].valid ||
3983 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3984 			continue;
3985 		mvpp2_prs_hw_read(sc, pe, tid);
3986 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
3987 		if (!match)
3988 			continue;
3989 		ri_bits = mvpp2_prs_sram_ri_get(pe);
3990 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3991 		ai_bits = mvpp2_prs_tcam_ai_get(pe);
3992 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
3993 		if (ai != ai_bits)
3994 			continue;
3995 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3996 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3997 			return pe;
3998 	}
3999 
4000 	free(pe, M_TEMP, sizeof(*pe));
4001 	return NULL;
4002 }
4003 
4004 int
4005 mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
4006 {
4007 	struct mvpp2_prs_entry *pe;
4008 	uint32_t ri_bits;
4009 	int tid_aux, tid;
4010 	int ret = 0;
4011 
4012 	pe = mvpp2_prs_vlan_find(sc, tpid, ai);
4013 	if (pe == NULL) {
4014 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
4015 		    MVPP2_PE_FIRST_FREE_TID);
4016 		if (tid < 0)
4017 			return tid;
4018 
4019 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4020 		if (pe == NULL)
4021 			return ENOMEM;
4022 
4023 		/* get last double vlan tid */
4024 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
4025 		    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
4026 			if (!sc->sc_prs_shadow[tid_aux].valid ||
4027 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4028 				continue;
4029 			mvpp2_prs_hw_read(sc, pe, tid_aux);
4030 			ri_bits = mvpp2_prs_sram_ri_get(pe);
4031 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
4032 			    MVPP2_PRS_RI_VLAN_DOUBLE)
4033 				break;
4034 		}
4035 
4036 		if (tid <= tid_aux) {
4037 			ret = EINVAL;
4038 			goto error;
4039 		}
4040 
4041 		memset(pe, 0, sizeof(*pe));
4042 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4043 		pe->index = tid;
4044 		mvpp2_prs_match_etype(pe, 0, tpid);
4045 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
4046 		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
4047 				   MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4048 		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
4049 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
4050 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
4051 			    MVPP2_PRS_RI_VLAN_MASK);
4052 		} else {
4053 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
4054 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
4055 			    MVPP2_PRS_RI_VLAN_MASK);
4056 		}
4057 		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
4058 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4059 	}
4060 
4061 	mvpp2_prs_tcam_port_map_set(pe, port_map);
4062 	mvpp2_prs_hw_write(sc, pe);
4063 
4064 error:
4065 	free(pe, M_TEMP, sizeof(*pe));
4066 	return ret;
4067 }
4068 
4069 int
4070 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
4071 {
4072 	int i;
4073 
4074 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
4075 		if (!sc->sc_prs_double_vlans[i])
4076 			return i;
4077 
4078 	return -1;
4079 }
4080 
4081 struct mvpp2_prs_entry *
4082 mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
4083 {
4084 	struct mvpp2_prs_entry *pe;
4085 	uint32_t ri_mask;
4086 	int match, tid;
4087 
4088 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4089 	if (pe == NULL)
4090 		return NULL;
4091 
4092 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4093 
4094 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
4095 		if (!sc->sc_prs_shadow[tid].valid ||
4096 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
4097 			continue;
4098 
4099 		mvpp2_prs_hw_read(sc, pe, tid);
4100 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
4101 		    mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
4102 		if (!match)
4103 			continue;
4104 		ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
4105 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
4106 			return pe;
4107 	}
4108 
4109 	free(pe, M_TEMP, sizeof(*pe));
4110 	return NULL;
4111 }
4112 
4113 int
4114 mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
4115     uint32_t port_map)
4116 {
4117 	struct mvpp2_prs_entry *pe;
4118 	int tid_aux, tid, ai, ret = 0;
4119 	uint32_t ri_bits;
4120 
4121 	pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
4122 	if (pe == NULL) {
4123 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4124 		    MVPP2_PE_LAST_FREE_TID);
4125 		if (tid < 0)
4126 			return tid;
4127 
4128 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4129 		if (pe == NULL)
4130 			return ENOMEM;
4131 
4132 		ai = mvpp2_prs_double_vlan_ai_free_get(sc);
4133 		if (ai < 0) {
4134 			ret = ai;
4135 			goto error;
4136 		}
4137 
4138 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
4139 		    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
4140 			if (!sc->sc_prs_shadow[tid_aux].valid ||
4141 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4142 				continue;
4143 			mvpp2_prs_hw_read(sc, pe, tid_aux);
4144 			ri_bits = mvpp2_prs_sram_ri_get(pe);
4145 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
4146 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
4147 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
4148 				break;
4149 		}
4150 
4151 		if (tid >= tid_aux) {
4152 			ret = ERANGE;
4153 			goto error;
4154 		}
4155 
4156 		memset(pe, 0, sizeof(*pe));
4157 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4158 		pe->index = tid;
4159 		sc->sc_prs_double_vlans[ai] = 1;
4160 		mvpp2_prs_match_etype(pe, 0, tpid1);
4161 		mvpp2_prs_match_etype(pe, 4, tpid2);
4162 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
4163 		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
4164 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4165 		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
4166 		    MVPP2_PRS_RI_VLAN_MASK);
4167 		mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
4168 		    MVPP2_PRS_SRAM_AI_MASK);
4169 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4170 	}
4171 
4172 	mvpp2_prs_tcam_port_map_set(pe, port_map);
4173 	mvpp2_prs_hw_write(sc, pe);
4174 
4175 error:
4176 	free(pe, M_TEMP, sizeof(*pe));
4177 	return ret;
4178 }
4179 
4180 int
4181 mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4182     uint32_t ri_mask)
4183 {
4184 	struct mvpp2_prs_entry pe;
4185 	int tid;
4186 
4187 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4188 	    (proto != IPPROTO_IGMP))
4189 		return EINVAL;
4190 
4191 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4192 	    MVPP2_PE_LAST_FREE_TID);
4193 	if (tid < 0)
4194 		return tid;
4195 
4196 	memset(&pe, 0, sizeof(pe));
4197 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4198 	pe.index = tid;
4199 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
4200 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4201 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4202 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4203 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4204 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4205 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4206 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
4207 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
4208 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4209 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
4210 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4211 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4212 	mvpp2_prs_hw_write(sc, &pe);
4213 
4214 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4215 	    MVPP2_PE_LAST_FREE_TID);
4216 	if (tid < 0)
4217 		return tid;
4218 
4219 	pe.index = tid;
4220 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
4221 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
4222 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4223 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
4224 	    ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4225 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
4226 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
4227 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4228 	mvpp2_prs_hw_write(sc, &pe);
4229 
4230 	return 0;
4231 }
4232 
4233 int
4234 mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4235 {
4236 	struct mvpp2_prs_entry pe;
4237 	int mask, tid;
4238 
4239 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4240 	    MVPP2_PE_LAST_FREE_TID);
4241 	if (tid < 0)
4242 		return tid;
4243 
4244 	memset(&pe, 0, sizeof(pe));
4245 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4246 	pe.index = tid;
4247 
4248 	switch (l3_cast) {
4249 	case MVPP2_PRS_L3_MULTI_CAST:
4250 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
4251 		    MVPP2_PRS_IPV4_MC_MASK);
4252 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4253 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4254 		break;
4255 	case  MVPP2_PRS_L3_BROAD_CAST:
4256 		mask = MVPP2_PRS_IPV4_BC_MASK;
4257 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
4258 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
4259 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
4260 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
4261 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
4262 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4263 		break;
4264 	default:
4265 		return EINVAL;
4266 	}
4267 
4268 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4269 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4270 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4271 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4272 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4273 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4274 	mvpp2_prs_hw_write(sc, &pe);
4275 
4276 	return 0;
4277 }
4278 
4279 int
4280 mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4281     uint32_t ri_mask)
4282 {
4283 	struct mvpp2_prs_entry pe;
4284 	int tid;
4285 
4286 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4287 	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
4288 		return EINVAL;
4289 
4290 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4291 	    MVPP2_PE_LAST_FREE_TID);
4292 	if (tid < 0)
4293 		return tid;
4294 
4295 	memset(&pe, 0, sizeof(pe));
4296 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4297 	pe.index = tid;
4298 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4299 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4300 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4301 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4302 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4303 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4304 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4305 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4306 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4307 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4308 	mvpp2_prs_hw_write(sc, &pe);
4309 
4310 	return 0;
4311 }
4312 
4313 int
4314 mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4315 {
4316 	struct mvpp2_prs_entry pe;
4317 	int tid;
4318 
4319 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
4320 		return EINVAL;
4321 
4322 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4323 	    MVPP2_PE_LAST_FREE_TID);
4324 	if (tid < 0)
4325 		return tid;
4326 
4327 	memset(&pe, 0, sizeof(pe));
4328 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4329 	pe.index = tid;
4330 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
4331 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4332 	    MVPP2_PRS_RI_L3_ADDR_MASK);
4333 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4334 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4335 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4336 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
4337 	    MVPP2_PRS_IPV6_MC_MASK);
4338 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4339 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4340 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4341 	mvpp2_prs_hw_write(sc, &pe);
4342 
4343 	return 0;
4344 }
4345 
4346 int
4347 mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
4348     uint8_t *mask)
4349 {
4350 	uint8_t tcam_byte, tcam_mask;
4351 	int index;
4352 
4353 	for (index = 0; index < ETHER_ADDR_LEN; index++) {
4354 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
4355 		    &tcam_mask);
4356 		if (tcam_mask != mask[index])
4357 			return 0;
4358 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
4359 			return 0;
4360 	}
4361 
4362 	return 1;
4363 }
4364 
4365 int
4366 mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
4367     uint8_t *mask, int udf_type)
4368 {
4369 	struct mvpp2_prs_entry pe;
4370 	int tid;
4371 
4372 	for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END;
4373 	    tid++) {
4374 		uint32_t entry_pmap;
4375 
4376 		if (!sc->sc_prs_shadow[tid].valid ||
4377 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4378 		    (sc->sc_prs_shadow[tid].udf != udf_type))
4379 			continue;
4380 
4381 		mvpp2_prs_hw_read(sc, &pe, tid);
4382 		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
4383 		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
4384 		    entry_pmap == pmap)
4385 			return tid;
4386 	}
4387 
4388 	return -1;
4389 }
4390 
4391 int
4392 mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const uint8_t *da, int add)
4393 {
4394 	struct mvpp2_softc *sc = port->sc;
4395 	struct mvpp2_prs_entry pe;
4396 	uint32_t pmap, len, ri;
4397 	uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4398 	int tid;
4399 
4400 	memset(&pe, 0, sizeof(pe));
4401 
4402 	tid = mvpp2_prs_mac_da_range_find(sc, BIT(port->sc_id), da, mask,
4403 	    MVPP2_PRS_UDF_MAC_DEF);
4404 	if (tid < 0) {
4405 		if (!add)
4406 			return 0;
4407 
4408 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_MAC_RANGE_START,
4409 		    MVPP2_PE_MAC_RANGE_END);
4410 		if (tid < 0)
4411 			return tid;
4412 
4413 		pe.index = tid;
4414 		mvpp2_prs_tcam_port_map_set(&pe, 0);
4415 	} else {
4416 		mvpp2_prs_hw_read(sc, &pe, tid);
4417 	}
4418 
4419 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
4420 
4421 	mvpp2_prs_tcam_port_set(&pe, port->sc_id, add);
4422 
4423 	/* invalidate the entry if no ports are left enabled */
4424 	pmap = mvpp2_prs_tcam_port_map_get(&pe);
4425 	if (pmap == 0) {
4426 		if (add)
4427 			return -1;
4428 		mvpp2_prs_hw_inv(sc, pe.index);
4429 		sc->sc_prs_shadow[pe.index].valid = 0;
4430 		return 0;
4431 	}
4432 
4433 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
4434 
4435 	len = ETHER_ADDR_LEN;
4436 	while (len--)
4437 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
4438 
4439 	if (ETHER_IS_BROADCAST(da))
4440 		ri = MVPP2_PRS_RI_L2_BCAST;
4441 	else if (ETHER_IS_MULTICAST(da))
4442 		ri = MVPP2_PRS_RI_L2_MCAST;
4443 	else
4444 		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
4445 
4446 	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4447 	    MVPP2_PRS_RI_MAC_ME_MASK);
4448 	mvpp2_prs_shadow_ri_set(sc, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4449 	    MVPP2_PRS_RI_MAC_ME_MASK);
4450 	mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
4451 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4452 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
4453 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
4454 	mvpp2_prs_hw_write(sc, &pe);
4455 
4456 	return 0;
4457 }
4458 
4459 void
4460 mvpp2_prs_mac_del_all(struct mvpp2_port *port)
4461 {
4462 	struct mvpp2_softc *sc = port->sc;
4463 	struct mvpp2_prs_entry pe;
4464 	uint32_t pmap;
4465 	int index, tid;
4466 
4467 	for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END;
4468 	    tid++) {
4469 		uint8_t da[ETHER_ADDR_LEN], da_mask[ETHER_ADDR_LEN];
4470 
4471 		if (!sc->sc_prs_shadow[tid].valid ||
4472 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4473 		    (sc->sc_prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
4474 			continue;
4475 
4476 		mvpp2_prs_hw_read(sc, &pe, tid);
4477 		pmap = mvpp2_prs_tcam_port_map_get(&pe);
4478 
4479 		if (!(pmap & (1 << port->sc_id)))
4480 			continue;
4481 
4482 		for (index = 0; index < ETHER_ADDR_LEN; index++)
4483 			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
4484 			    &da_mask[index]);
4485 
4486 		if (ETHER_IS_BROADCAST(da) || ETHER_IS_EQ(da, port->sc_lladdr))
4487 			continue;
4488 
4489 		mvpp2_prs_mac_da_accept(port, da, 0);
4490 	}
4491 }
4492 
4493 int
4494 mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
4495 {
4496 	switch (type) {
4497 	case MVPP2_TAG_TYPE_EDSA:
4498 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4499 		    MVPP2_PRS_EDSA);
4500 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4501 		    MVPP2_PRS_EDSA);
4502 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4503 		    MVPP2_PRS_DSA);
4504 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4505 		    MVPP2_PRS_DSA);
4506 		break;
4507 	case MVPP2_TAG_TYPE_DSA:
4508 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4509 		    MVPP2_PRS_DSA);
4510 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4511 		    MVPP2_PRS_DSA);
4512 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4513 		    MVPP2_PRS_EDSA);
4514 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4515 		    MVPP2_PRS_EDSA);
4516 		break;
4517 	case MVPP2_TAG_TYPE_MH:
4518 	case MVPP2_TAG_TYPE_NONE:
4519 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4520 		    MVPP2_PRS_DSA);
4521 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4522 		    MVPP2_PRS_DSA);
4523 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4524 		    MVPP2_PRS_EDSA);
4525 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4526 		    MVPP2_PRS_EDSA);
4527 		break;
4528 	default:
4529 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4530 			return EINVAL;
4531 		break;
4532 	}
4533 
4534 	return 0;
4535 }
4536 
4537 int
4538 mvpp2_prs_def_flow(struct mvpp2_port *port)
4539 {
4540 	struct mvpp2_prs_entry pe;
4541 	int tid;
4542 
4543 	memset(&pe, 0, sizeof(pe));
4544 
4545 	tid = mvpp2_prs_flow_find(port->sc, port->sc_id);
4546 	if (tid < 0) {
4547 		tid = mvpp2_prs_tcam_first_free(port->sc,
4548 		    MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
4549 		if (tid < 0)
4550 			return tid;
4551 
4552 		pe.index = tid;
4553 		mvpp2_prs_sram_ai_update(&pe, port->sc_id,
4554 		    MVPP2_PRS_FLOW_ID_MASK);
4555 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4556 		mvpp2_prs_shadow_set(port->sc, pe.index, MVPP2_PRS_LU_FLOWS);
4557 	} else {
4558 		mvpp2_prs_hw_read(port->sc, &pe, tid);
4559 	}
4560 
4561 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4562 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->sc_id));
4563 	mvpp2_prs_hw_write(port->sc, &pe);
4564 	return 0;
4565 }
4566 
4567 void
4568 mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
4569 {
4570 	mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4571 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4572 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4573 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4574 }
4575 
4576 void
4577 mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
4578 {
4579 	uint32_t val;
4580 
4581 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4582 	mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
4583 	mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
4584 }
4585 
4586 void
4587 mvpp2_cls_init(struct mvpp2_softc *sc)
4588 {
4589 	struct mvpp2_cls_lookup_entry le;
4590 	struct mvpp2_cls_flow_entry fe;
4591 	int index;
4592 
4593 	mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4594 	memset(&fe.data, 0, sizeof(fe.data));
4595 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4596 		fe.index = index;
4597 		mvpp2_cls_flow_write(sc, &fe);
4598 	}
4599 	le.data = 0;
4600 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4601 		le.lkpid = index;
4602 		le.way = 0;
4603 		mvpp2_cls_lookup_write(sc, &le);
4604 		le.way = 1;
4605 		mvpp2_cls_lookup_write(sc, &le);
4606 	}
4607 }
4608 
4609 void
4610 mvpp2_cls_port_config(struct mvpp2_port *port)
4611 {
4612 	struct mvpp2_cls_lookup_entry le;
4613 	uint32_t val;
4614 
4615 	/* set way for the port */
4616 	val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
4617 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
4618 	mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
4619 
4620 	/*
4621 	 * pick the entry to be accessed in lookup ID decoding table
4622 	 * according to the way and lkpid.
4623 	 */
4624 	le.lkpid = port->sc_id;
4625 	le.way = 0;
4626 	le.data = 0;
4627 
4628 	/* set initial CPU queue for receiving packets */
4629 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4630 	le.data |= (port->sc_id * 32);
4631 
4632 	/* disable classification engines */
4633 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4634 
4635 	/* update lookup ID table entry */
4636 	mvpp2_cls_lookup_write(port->sc, &le);
4637 }
4638 
4639 void
4640 mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4641 {
4642 	uint32_t val;
4643 
4644 	mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
4645 	    (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4646 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_P2HQ_REG(port->sc_id),
4647 	    (port->sc_id * 32) >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS);
4648 	val = mvpp2_read(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG);
4649 	val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->sc_id);
4650 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4651 }
4652