xref: /openbsd-src/sys/dev/fdt/if_mvpp.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_mvpp.c,v 1.27 2020/08/22 12:34:14 patrick Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Copyright (C) 2016 Marvell International Ltd.
20  *
21  * Marvell BSD License Option
22  *
23  * If you received this File from Marvell, you may opt to use, redistribute
24  * and/or modify this File under the following licensing terms.
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions are met:
27  *
28  *   * Redistributions of source code must retain the above copyright notice,
29  *     this list of conditions and the following disclaimer.
30  *
31  *   * Redistributions in binary form must reproduce the above copyright
32  *     notice, this list of conditions and the following disclaimer in the
33  *     documentation and/or other materials provided with the distribution.
34  *
35  *   * Neither the name of Marvell nor the names of its contributors may be
36  *     used to endorse or promote products derived from this software without
37  *     specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
43  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
49  * POSSIBILITY OF SUCH DAMAGE.
50  */
51 
52 #include "bpfilter.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/device.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/queue.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/timeout.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include <machine/cpufunc.h>
68 #include <machine/bus.h>
69 #include <machine/fdt.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/ppp_defs.h>
74 
75 #include <dev/ofw/openfirm.h>
76 #include <dev/ofw/ofw_clock.h>
77 #include <dev/ofw/ofw_gpio.h>
78 #include <dev/ofw/ofw_misc.h>
79 #include <dev/ofw/ofw_pinctrl.h>
80 #include <dev/ofw/ofw_regulator.h>
81 #include <dev/ofw/fdt.h>
82 
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89 
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93 
94 #include <netinet6/in6_var.h>
95 #include <netinet/ip6.h>
96 
97 #include <dev/fdt/if_mvppreg.h>
98 
99 struct mvpp2_buf {
100 	bus_dmamap_t		mb_map;
101 	struct mbuf		*mb_m;
102 };
103 
104 #define MVPP2_NTXDESC	512
105 #define MVPP2_NTXSEGS	16
106 #define MVPP2_NRXDESC	512
107 
108 struct mvpp2_bm_pool {
109 	struct mvpp2_dmamem	*bm_mem;
110 	struct mvpp2_buf	*rxbuf;
111 	uint32_t		*freelist;
112 	int			free_prod;
113 	int			free_cons;
114 };
115 
116 #define MVPP2_BM_SIZE		64
117 #define MVPP2_BM_POOL_PTR_ALIGN	128
118 #define MVPP2_BM_POOLS_NUM	8
119 #define MVPP2_BM_ALIGN		32
120 
121 struct mvpp2_tx_queue {
122 	uint8_t			id;
123 	uint8_t			log_id;
124 	struct mvpp2_dmamem	*ring;
125 	struct mvpp2_buf	*buf;
126 	struct mvpp2_tx_desc	*descs;
127 	int			prod;
128 	int			cnt;
129 	int			cons;
130 
131 	uint32_t		done_pkts_coal;
132 };
133 
134 struct mvpp2_rx_queue {
135 	uint8_t			id;
136 	struct mvpp2_dmamem	*ring;
137 	struct mvpp2_rx_desc	*descs;
138 	int			prod;
139 	struct if_rxring	rxring;
140 	int			cons;
141 
142 	uint32_t		pkts_coal;
143 	uint32_t		time_coal;
144 };
145 
146 struct mvpp2_dmamem {
147 	bus_dmamap_t		mdm_map;
148 	bus_dma_segment_t	mdm_seg;
149 	size_t			mdm_size;
150 	caddr_t			mdm_kva;
151 };
152 #define MVPP2_DMA_MAP(_mdm)	((_mdm)->mdm_map)
153 #define MVPP2_DMA_LEN(_mdm)	((_mdm)->mdm_size)
154 #define MVPP2_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
155 #define MVPP2_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
156 
157 struct mvpp2_port;
158 struct mvpp2_softc {
159 	struct device		sc_dev;
160 	int			sc_node;
161 	bus_space_tag_t		sc_iot;
162 	bus_space_handle_t	sc_ioh_base;
163 	bus_space_handle_t	sc_ioh_iface;
164 	paddr_t			sc_ioh_paddr;
165 	bus_size_t		sc_iosize_base;
166 	bus_size_t		sc_iosize_iface;
167 	bus_dma_tag_t		sc_dmat;
168 	struct regmap		*sc_rm;
169 
170 	uint32_t		sc_tclk;
171 
172 	struct mvpp2_bm_pool	*sc_bm_pools;
173 	int			sc_npools;
174 
175 	struct mvpp2_prs_shadow	*sc_prs_shadow;
176 	uint8_t			*sc_prs_double_vlans;
177 
178 	int			sc_aggr_ntxq;
179 	struct mvpp2_tx_queue	*sc_aggr_txqs;
180 
181 	struct mvpp2_port	**sc_ports;
182 };
183 
184 struct mvpp2_port {
185 	struct device		sc_dev;
186 	struct mvpp2_softc	*sc;
187 	int			sc_node;
188 	bus_dma_tag_t		sc_dmat;
189 	int			sc_id;
190 	int			sc_gop_id;
191 
192 	struct arpcom		sc_ac;
193 #define sc_lladdr	sc_ac.ac_enaddr
194 	struct mii_data		sc_mii;
195 #define sc_media	sc_mii.mii_media
196 	struct mii_bus		*sc_mdio;
197 	char			sc_cur_lladdr[ETHER_ADDR_LEN];
198 
199 	enum {
200 		PHY_MODE_XAUI,
201 		PHY_MODE_10GBASER,
202 		PHY_MODE_2500BASEX,
203 		PHY_MODE_1000BASEX,
204 		PHY_MODE_SGMII,
205 		PHY_MODE_RGMII,
206 		PHY_MODE_RGMII_ID,
207 		PHY_MODE_RGMII_RXID,
208 		PHY_MODE_RGMII_TXID,
209 	}			sc_phy_mode;
210 	int			sc_fixed_link;
211 	int			sc_inband_status;
212 	int			sc_link;
213 	int			sc_phyloc;
214 	int			sc_sfp;
215 
216 	int			sc_ntxq;
217 	int			sc_nrxq;
218 
219 	struct mvpp2_tx_queue	*sc_txqs;
220 	struct mvpp2_rx_queue	*sc_rxqs;
221 
222 	struct timeout		sc_tick;
223 
224 	uint32_t		sc_tx_time_coal;
225 };
226 
227 #define MVPP2_MAX_PORTS		4
228 
229 struct mvpp2_attach_args {
230 	int			ma_node;
231 	bus_dma_tag_t		ma_dmat;
232 };
233 
234 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
235 
236 static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
237 
238 int	mvpp2_match(struct device *, void *, void *);
239 void	mvpp2_attach(struct device *, struct device *, void *);
240 void	mvpp2_attach_deferred(struct device *);
241 
242 struct cfattach mvppc_ca = {
243 	sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
244 };
245 
246 struct cfdriver mvppc_cd = {
247 	NULL, "mvppc", DV_DULL
248 };
249 
250 int	mvpp2_port_match(struct device *, void *, void *);
251 void	mvpp2_port_attach(struct device *, struct device *, void *);
252 
253 struct cfattach mvpp_ca = {
254 	sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
255 };
256 
257 struct cfdriver mvpp_cd = {
258 	NULL, "mvpp", DV_IFNET
259 };
260 
261 uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
262 void	mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
263 uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
264 void	mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
265 uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
266 void	mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
267 uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
268 void	mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
269 uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
270 void	mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
271 
272 int	mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
273 void	mvpp2_start(struct ifnet *);
274 int	mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
275 void	mvpp2_watchdog(struct ifnet *);
276 
277 int	mvpp2_media_change(struct ifnet *);
278 void	mvpp2_media_status(struct ifnet *, struct ifmediareq *);
279 
280 int	mvpp2_mii_readreg(struct device *, int, int);
281 void	mvpp2_mii_writereg(struct device *, int, int, int);
282 void	mvpp2_mii_statchg(struct device *);
283 void	mvpp2_inband_statchg(struct mvpp2_port *);
284 void	mvpp2_port_change(struct mvpp2_port *);
285 
286 void	mvpp2_tick(void *);
287 void	mvpp2_rxtick(void *);
288 
289 int	mvpp2_link_intr(void *);
290 int	mvpp2_intr(void *);
291 void	mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
292 void	mvpp2_txq_proc(struct mvpp2_port *, struct mvpp2_tx_queue *);
293 void	mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
294 void	mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
295 void	mvpp2_rx_refill(struct mvpp2_port *);
296 
297 void	mvpp2_up(struct mvpp2_port *);
298 void	mvpp2_down(struct mvpp2_port *);
299 void	mvpp2_iff(struct mvpp2_port *);
300 int	mvpp2_encap(struct mvpp2_port *, struct mbuf *, int *);
301 
302 void	mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
303 void	mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
304 void	mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
305 void	mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
306 void	mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
307 void	mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
308 void	mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
309 
310 void	mvpp2_mac_config(struct mvpp2_port *);
311 void	mvpp2_xlg_config(struct mvpp2_port *);
312 void	mvpp2_gmac_config(struct mvpp2_port *);
313 void	mvpp2_comphy_config(struct mvpp2_port *);
314 void	mvpp2_gop_config(struct mvpp2_port *);
315 
316 struct mvpp2_dmamem *
317 	mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
318 void	mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
319 struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
320 void	mvpp2_fill_rx_ring(struct mvpp2_softc *);
321 
322 void	mvpp2_interrupts_enable(struct mvpp2_port *, int);
323 void	mvpp2_interrupts_disable(struct mvpp2_port *, int);
324 int	mvpp2_egress_port(struct mvpp2_port *);
325 int	mvpp2_txq_phys(int, int);
326 void	mvpp2_defaults_set(struct mvpp2_port *);
327 void	mvpp2_ingress_enable(struct mvpp2_port *);
328 void	mvpp2_ingress_disable(struct mvpp2_port *);
329 void	mvpp2_egress_enable(struct mvpp2_port *);
330 void	mvpp2_egress_disable(struct mvpp2_port *);
331 void	mvpp2_port_enable(struct mvpp2_port *);
332 void	mvpp2_port_disable(struct mvpp2_port *);
333 void	mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
334 int	mvpp2_rxq_received(struct mvpp2_port *, int);
335 void	mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
336 void	mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
337 void	mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
338 	    uint32_t);
339 void	mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
340 	    uint32_t);
341 void	mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
342 	    uint32_t);
343 void	mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
344 
345 void	mvpp2_axi_config(struct mvpp2_softc *);
346 void	mvpp2_bm_pool_init(struct mvpp2_softc *);
347 void	mvpp2_rx_fifo_init(struct mvpp2_softc *);
348 void	mvpp2_tx_fifo_init(struct mvpp2_softc *);
349 int	mvpp2_prs_default_init(struct mvpp2_softc *);
350 void	mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
351 void	mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
352 void	mvpp2_prs_def_flow_init(struct mvpp2_softc *);
353 void	mvpp2_prs_mh_init(struct mvpp2_softc *);
354 void	mvpp2_prs_mac_init(struct mvpp2_softc *);
355 void	mvpp2_prs_dsa_init(struct mvpp2_softc *);
356 int	mvpp2_prs_etype_init(struct mvpp2_softc *);
357 int	mvpp2_prs_vlan_init(struct mvpp2_softc *);
358 int	mvpp2_prs_pppoe_init(struct mvpp2_softc *);
359 int	mvpp2_prs_ip6_init(struct mvpp2_softc *);
360 int	mvpp2_prs_ip4_init(struct mvpp2_softc *);
361 void	mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
362 	    uint32_t, uint32_t);
363 void	mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
364 void	mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
365 void	mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
366 uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
367 void	mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
368 	    uint8_t, uint8_t);
369 void	mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
370 	    uint8_t *, uint8_t *);
371 int	mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
372 void	mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
373 int	mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
374 int	mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
375 void	mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
376 	    uint32_t *, uint32_t *);
377 void	mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
378 int	mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
379 void	mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
380 void	mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
381 void	mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
382 void	mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
383 void	mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
384 void	mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
385 	    uint32_t);
386 void	mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
387 void	mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
388 int	mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
389 int	mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *, int);
390 int	mvpp2_prs_flow_find(struct mvpp2_softc *, int);
391 int	mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
392 void	mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
393 void	mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int, int);
394 void	mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
395 void	mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
396 	    int, int, int);
397 struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
398 	    int);
399 int	mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
400 int	mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
401 struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
402 	    uint16_t, uint16_t);
403 int	mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
404 	    uint32_t);
405 int	mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
406 int	mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
407 int	mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
408 int	mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
409 int	mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int, const uint8_t *,
410 	    uint8_t *, int);
411 int	mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
412 	    uint8_t *);
413 int	mvpp2_prs_mac_da_accept(struct mvpp2_port *, const uint8_t *, int);
414 int	mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
415 int	mvpp2_prs_def_flow(struct mvpp2_port *);
416 void	mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
417 void	mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
418 void	mvpp2_cls_init(struct mvpp2_softc *);
419 void	mvpp2_cls_port_config(struct mvpp2_port *);
420 void	mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
421 
422 int
423 mvpp2_match(struct device *parent, void *cfdata, void *aux)
424 {
425 	struct fdt_attach_args *faa = aux;
426 
427 	return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
428 }
429 
430 void
431 mvpp2_attach(struct device *parent, struct device *self, void *aux)
432 {
433 	struct mvpp2_softc *sc = (void *)self;
434 	struct fdt_attach_args *faa = aux;
435 
436 	if (faa->fa_nreg < 2) {
437 		printf(": no registers\n");
438 		return;
439 	}
440 
441 	sc->sc_node = faa->fa_node;
442 	sc->sc_iot = faa->fa_iot;
443 	sc->sc_dmat = faa->fa_dmat;
444 
445 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
446 	    faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
447 		printf(": can't map registers\n");
448 		return;
449 	}
450 	sc->sc_iosize_base = faa->fa_reg[0].size;
451 
452 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_ioh_base,
453 	    &sc->sc_ioh_paddr)) {
454 		printf(": can't extract address\n");
455 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
456 		    sc->sc_iosize_base);
457 		return;
458 	}
459 
460 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
461 	    faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
462 		printf(": can't map registers\n");
463 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
464 		    sc->sc_iosize_base);
465 		return;
466 	}
467 	sc->sc_iosize_iface = faa->fa_reg[1].size;
468 
469 	sc->sc_rm = regmap_byphandle(OF_getpropint(faa->fa_node,
470 	    "marvell,system-controller", 0));
471 
472 	clock_enable_all(faa->fa_node);
473 	sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
474 
475 	printf("\n");
476 
477 	config_defer(self, mvpp2_attach_deferred);
478 }
479 
480 void
481 mvpp2_attach_deferred(struct device *self)
482 {
483 	struct mvpp2_softc *sc = (void *)self;
484 	struct mvpp2_attach_args maa;
485 	struct mvpp2_tx_queue *txq;
486 	int i, node;
487 
488 	mvpp2_axi_config(sc);
489 
490 	sc->sc_aggr_ntxq = 1;
491 	sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
492 	    sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
493 
494 	for (i = 0; i < sc->sc_aggr_ntxq; i++) {
495 		txq = &sc->sc_aggr_txqs[i];
496 		txq->id = i;
497 		mvpp2_aggr_txq_hw_init(sc, txq);
498 	}
499 
500 	mvpp2_rx_fifo_init(sc);
501 	mvpp2_tx_fifo_init(sc);
502 
503 	mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
504 
505 	mvpp2_bm_pool_init(sc);
506 
507 	sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
508 	    sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
509 
510 	mvpp2_prs_default_init(sc);
511 	mvpp2_cls_init(sc);
512 
513 	memset(&maa, 0, sizeof(maa));
514 	for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
515 		maa.ma_node = node;
516 		maa.ma_dmat = sc->sc_dmat;
517 		config_found(self, &maa, NULL);
518 	}
519 }
520 
521 void
522 mvpp2_axi_config(struct mvpp2_softc *sc)
523 {
524 	uint32_t reg;
525 
526 	mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG, 0);
527 
528 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
529 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
530 	mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
531 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
532 	mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
533 	mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
534 
535 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
536 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
537 	mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
538 	mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
539 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
540 	mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
541 
542 	reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
543 	    (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
544 	mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
545 	mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
546 
547 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
548 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
549 	mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
550 
551 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
552 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
553 	mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
554 }
555 
556 void
557 mvpp2_bm_pool_init(struct mvpp2_softc *sc)
558 {
559 	struct mvpp2_bm_pool *bm;
560 	struct mvpp2_buf *rxb;
561 	uint64_t phys, virt;
562 	int i, j;
563 
564 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
565 		mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
566 		mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
567 	}
568 
569 	sc->sc_npools = ncpus;
570 	sc->sc_npools = min(sc->sc_npools, MVPP2_BM_POOLS_NUM);
571 
572 	sc->sc_bm_pools = mallocarray(sc->sc_npools, sizeof(*sc->sc_bm_pools),
573 	    M_DEVBUF, M_WAITOK | M_ZERO);
574 
575 	for (i = 0; i < sc->sc_npools; i++) {
576 		bm = &sc->sc_bm_pools[i];
577 		bm->bm_mem = mvpp2_dmamem_alloc(sc,
578 		    MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
579 		    MVPP2_BM_POOL_PTR_ALIGN);
580 		memset(MVPP2_DMA_KVA(bm->bm_mem), 0, MVPP2_DMA_LEN(bm->bm_mem));
581 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
582 		    MVPP2_DMA_LEN(bm->bm_mem),
583 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
584 
585 		mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
586 		    (uint64_t)MVPP2_DMA_KVA(bm->bm_mem) & 0xffffffff);
587 		mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
588 		    ((uint64_t)MVPP2_DMA_KVA(bm->bm_mem) >> 32)
589 		    & MVPP22_BM_POOL_BASE_HIGH_MASK);
590 		mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
591 		    MVPP2_BM_SIZE);
592 
593 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
594 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
595 		    MVPP2_BM_START_MASK);
596 
597 		mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
598 		    roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
599 
600 		bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
601 		    M_DEVBUF, M_WAITOK);
602 		bm->freelist = mallocarray(MVPP2_BM_SIZE, sizeof(*bm->freelist),
603 		    M_DEVBUF, M_WAITOK | M_ZERO);
604 
605 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
606 			rxb = &bm->rxbuf[j];
607 			bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
608 			    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
609 			rxb->mb_m = NULL;
610 		}
611 
612 		/* Use pool-id and rxbuf index as cookie. */
613 		for (j = 0; j < MVPP2_BM_SIZE; j++)
614 			bm->freelist[j] = (i << 16) | (j << 0);
615 
616 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
617 			rxb = &bm->rxbuf[j];
618 			rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
619 			if (rxb->mb_m == NULL)
620 				break;
621 
622 			KASSERT(bm->freelist[bm->free_cons] != -1);
623 			virt = bm->freelist[bm->free_cons];
624 			bm->freelist[bm->free_cons] = -1;
625 			bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
626 
627 			phys = rxb->mb_map->dm_segs[0].ds_addr;
628 			mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
629 			    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
630 			    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
631 			    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
632 			mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
633 			    virt & 0xffffffff);
634 			mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
635 			    phys & 0xffffffff);
636 		}
637 	}
638 }
639 
640 void
641 mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
642 {
643 	int i;
644 
645 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
646 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
647 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
648 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
649 
650 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
651 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
652 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
653 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
654 
655 	for (i = 2; i < MVPP2_MAX_PORTS; i++) {
656 		mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
657 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
658 		mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
659 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
660 	}
661 
662 	mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
663 	mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
664 }
665 
666 void
667 mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
668 {
669 	int i;
670 
671 	mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
672 	    MVPP22_TX_FIFO_DATA_SIZE_10KB);
673 	mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
674 	    MVPP2_TX_FIFO_THRESHOLD_10KB);
675 
676 	for (i = 1; i < MVPP2_MAX_PORTS; i++) {
677 		mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
678 		    MVPP22_TX_FIFO_DATA_SIZE_3KB);
679 		mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
680 		    MVPP2_TX_FIFO_THRESHOLD_3KB);
681 	}
682 }
683 
684 int
685 mvpp2_prs_default_init(struct mvpp2_softc *sc)
686 {
687 	int i, j, ret;
688 
689 	mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
690 
691 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
692 		mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
693 		for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
694 			mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
695 
696 		mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
697 		for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
698 			mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
699 	}
700 
701 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
702 		mvpp2_prs_hw_inv(sc, i);
703 
704 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
705 		mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
706 		    MVPP2_PRS_PORT_LU_MAX, 0);
707 
708 	mvpp2_prs_def_flow_init(sc);
709 	mvpp2_prs_mh_init(sc);
710 	mvpp2_prs_mac_init(sc);
711 	mvpp2_prs_dsa_init(sc);
712 	ret = mvpp2_prs_etype_init(sc);
713 	if (ret)
714 		return ret;
715 	ret = mvpp2_prs_vlan_init(sc);
716 	if (ret)
717 		return ret;
718 	ret = mvpp2_prs_pppoe_init(sc);
719 	if (ret)
720 		return ret;
721 	ret = mvpp2_prs_ip6_init(sc);
722 	if (ret)
723 		return ret;
724 	ret = mvpp2_prs_ip4_init(sc);
725 	if (ret)
726 		return ret;
727 
728 	return 0;
729 }
730 
731 void
732 mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
733 {
734 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
735 	mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
736 	    MVPP2_PRS_TCAM_INV_MASK);
737 }
738 
739 void
740 mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
741     int lu_first, int lu_max, int offset)
742 {
743 	uint32_t reg;
744 
745 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
746 	reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
747 	reg |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
748 	mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
749 
750 	reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
751 	reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
752 	reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
753 	mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
754 
755 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
756 	reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
757 	reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
758 	mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
759 }
760 
761 void
762 mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
763 {
764 	struct mvpp2_prs_entry pe;
765 	int i;
766 
767 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
768 		memset(&pe, 0, sizeof(pe));
769 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
770 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
771 		mvpp2_prs_tcam_port_map_set(&pe, 0);
772 		mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
773 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
774 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
775 		mvpp2_prs_hw_write(sc, &pe);
776 	}
777 }
778 
779 void
780 mvpp2_prs_mh_init(struct mvpp2_softc *sc)
781 {
782 	struct mvpp2_prs_entry pe;
783 
784 	memset(&pe, 0, sizeof(pe));
785 	pe.index = MVPP2_PE_MH_DEFAULT;
786 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
787 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
788 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
789 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
790 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
791 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
792 	mvpp2_prs_hw_write(sc, &pe);
793 }
794 
795 void
796 mvpp2_prs_mac_init(struct mvpp2_softc *sc)
797 {
798 	struct mvpp2_prs_entry pe;
799 
800 	memset(&pe, 0, sizeof(pe));
801 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
802 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
803 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
804 	    MVPP2_PRS_RI_DROP_MASK);
805 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
806 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
807 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
808 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
809 	mvpp2_prs_hw_write(sc, &pe);
810 	mvpp2_prs_mac_drop_all_set(sc, 0, 0);
811 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_UNI_CAST, 0);
812 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_MULTI_CAST, 0);
813 }
814 
815 void
816 mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
817 {
818 	struct mvpp2_prs_entry pe;
819 
820 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
821 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
822 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
823 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
824 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
825 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
826 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
827 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
828 	memset(&pe, 0, sizeof(pe));
829 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
830 	pe.index = MVPP2_PE_DSA_DEFAULT;
831 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
832 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
833 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
834 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
835 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
836 	mvpp2_prs_hw_write(sc, &pe);
837 }
838 
839 int
840 mvpp2_prs_etype_init(struct mvpp2_softc *sc)
841 {
842 	struct mvpp2_prs_entry pe;
843 	int tid;
844 
845 	/* Ethertype: PPPoE */
846 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
847 	    MVPP2_PE_LAST_FREE_TID);
848 	if (tid < 0)
849 		return tid;
850 	memset(&pe, 0, sizeof(pe));
851 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
852 	pe.index = tid;
853 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_PPPOE);
854 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
855 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
856 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
857 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
858 	    MVPP2_PRS_RI_PPPOE_MASK);
859 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
860 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
861 	sc->sc_prs_shadow[pe.index].finish = 0;
862 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
863 	    MVPP2_PRS_RI_PPPOE_MASK);
864 	mvpp2_prs_hw_write(sc, &pe);
865 
866 	/* Ethertype: ARP */
867 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
868 	    MVPP2_PE_LAST_FREE_TID);
869 	if (tid < 0)
870 		return tid;
871 	memset(&pe, 0, sizeof(pe));
872 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
873 	pe.index = tid;
874 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_ARP);
875 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
876 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
877 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
878 	    MVPP2_PRS_RI_L3_PROTO_MASK);
879 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
880 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
881 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
882 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
883 	sc->sc_prs_shadow[pe.index].finish = 1;
884 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
885 	    MVPP2_PRS_RI_L3_PROTO_MASK);
886 	mvpp2_prs_hw_write(sc, &pe);
887 
888 	/* Ethertype: LBTD */
889 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
890 	    MVPP2_PE_LAST_FREE_TID);
891 	if (tid < 0)
892 		return tid;
893 	memset(&pe, 0, sizeof(pe));
894 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
895 	pe.index = tid;
896 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
897 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
898 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
899 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
900 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
901 	    MVPP2_PRS_RI_UDF3_MASK);
902 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
903 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
904 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
905 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
906 	sc->sc_prs_shadow[pe.index].finish = 1;
907 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
908 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
909 	    MVPP2_PRS_RI_UDF3_MASK);
910 	mvpp2_prs_hw_write(sc, &pe);
911 
912 	/* Ethertype: IPv4 without options */
913 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
914 	    MVPP2_PE_LAST_FREE_TID);
915 	if (tid < 0)
916 		return tid;
917 	memset(&pe, 0, sizeof(pe));
918 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
919 	pe.index = tid;
920 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IP);
921 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
922 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
923 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
924 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
925 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
926 	    MVPP2_PRS_RI_L3_PROTO_MASK);
927 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
928 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
929 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
930 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
931 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
932 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
933 	sc->sc_prs_shadow[pe.index].finish = 0;
934 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
935 	    MVPP2_PRS_RI_L3_PROTO_MASK);
936 	mvpp2_prs_hw_write(sc, &pe);
937 
938 	/* Ethertype: IPv4 with options */
939 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
940 	    MVPP2_PE_LAST_FREE_TID);
941 	if (tid < 0)
942 		return tid;
943 	pe.index = tid;
944 
945 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
946 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
947 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
948 	    MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
949 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
950 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
951 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
952 	    MVPP2_PRS_RI_L3_PROTO_MASK);
953 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
954 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
955 	sc->sc_prs_shadow[pe.index].finish = 0;
956 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
957 	    MVPP2_PRS_RI_L3_PROTO_MASK);
958 	mvpp2_prs_hw_write(sc, &pe);
959 
960 	/* Ethertype: IPv6 without options */
961 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
962 	    MVPP2_PE_LAST_FREE_TID);
963 	if (tid < 0)
964 		return tid;
965 	memset(&pe, 0, sizeof(pe));
966 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
967 	pe.index = tid;
968 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IPV6);
969 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
970 	    MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
971 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
972 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
973 	    MVPP2_PRS_RI_L3_PROTO_MASK);
974 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
975 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
976 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
977 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
978 	sc->sc_prs_shadow[pe.index].finish = 0;
979 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
980 	    MVPP2_PRS_RI_L3_PROTO_MASK);
981 	mvpp2_prs_hw_write(sc, &pe);
982 
983 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
984 	memset(&pe, 0, sizeof(pe));
985 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
986 	pe.index = MVPP2_PE_ETH_TYPE_UN;
987 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
988 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
989 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
990 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
991 	    MVPP2_PRS_RI_L3_PROTO_MASK);
992 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
993 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
994 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
995 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
996 	sc->sc_prs_shadow[pe.index].finish = 1;
997 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
998 	    MVPP2_PRS_RI_L3_PROTO_MASK);
999 	mvpp2_prs_hw_write(sc, &pe);
1000 
1001 	return 0;
1002 }
1003 
1004 int
1005 mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
1006 {
1007 	struct mvpp2_prs_entry pe;
1008 	int ret;
1009 
1010 	sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
1011 	    sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
1012 
1013 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_QINQ,
1014 	    MVPP2_PRS_PORT_MASK);
1015 	if (ret)
1016 		return ret;
1017 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_VLAN,
1018 	    MVPP2_PRS_PORT_MASK);
1019 	if (ret)
1020 		return ret;
1021 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_QINQ, MVPP2_PRS_SINGLE_VLAN_AI,
1022 	    MVPP2_PRS_PORT_MASK);
1023 	if (ret)
1024 		return ret;
1025 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_VLAN, MVPP2_PRS_SINGLE_VLAN_AI,
1026 	    MVPP2_PRS_PORT_MASK);
1027 	if (ret)
1028 		return ret;
1029 
1030 	memset(&pe, 0, sizeof(pe));
1031 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1032 	pe.index = MVPP2_PE_VLAN_DBL;
1033 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1034 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1035 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1036 	    MVPP2_PRS_RI_VLAN_MASK);
1037 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1038 	    MVPP2_PRS_DBL_VLAN_AI_BIT);
1039 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1040 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1041 	mvpp2_prs_hw_write(sc, &pe);
1042 
1043 	memset(&pe, 0, sizeof(pe));
1044 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1045 	pe.index = MVPP2_PE_VLAN_NONE;
1046 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1047 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1048 	    MVPP2_PRS_RI_VLAN_MASK);
1049 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1050 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1051 	mvpp2_prs_hw_write(sc, &pe);
1052 
1053 	return 0;
1054 }
1055 
1056 int
1057 mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
1058 {
1059 	struct mvpp2_prs_entry pe;
1060 	int tid;
1061 
1062 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1063 	    MVPP2_PE_LAST_FREE_TID);
1064 	if (tid < 0)
1065 		return tid;
1066 
1067 	memset(&pe, 0, sizeof(pe));
1068 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1069 	pe.index = tid;
1070 	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1071 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1072 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1073 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1074 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1075 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1076 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1077 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1078 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1079 	mvpp2_prs_hw_write(sc, &pe);
1080 
1081 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1082 	    MVPP2_PE_LAST_FREE_TID);
1083 	if (tid < 0)
1084 		return tid;
1085 
1086 	pe.index = tid;
1087 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1088 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1089 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
1090 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1091 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1092 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
1093 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1094 	mvpp2_prs_hw_write(sc, &pe);
1095 
1096 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1097 	    MVPP2_PE_LAST_FREE_TID);
1098 	if (tid < 0)
1099 		return tid;
1100 
1101 	memset(&pe, 0, sizeof(pe));
1102 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1103 	pe.index = tid;
1104 	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1105 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1106 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1107 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1108 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1109 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1110 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1111 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1112 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1113 	mvpp2_prs_hw_write(sc, &pe);
1114 
1115 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1116 	    MVPP2_PE_LAST_FREE_TID);
1117 	if (tid < 0)
1118 		return tid;
1119 
1120 	memset(&pe, 0, sizeof(pe));
1121 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1122 	pe.index = tid;
1123 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1124 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1125 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1126 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1127 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1128 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1129 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1130 	mvpp2_prs_hw_write(sc, &pe);
1131 
1132 	return 0;
1133 }
1134 
1135 int
1136 mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
1137 {
1138 	struct mvpp2_prs_entry pe;
1139 	int tid, ret;
1140 
1141 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1142 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1143 	if (ret)
1144 		return ret;
1145 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1146 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1147 	if (ret)
1148 		return ret;
1149 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_ICMPV6,
1150 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1151 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1152 	if (ret)
1153 		return ret;
1154 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
1155 	    MVPP2_PRS_RI_UDF7_MASK);
1156 	if (ret)
1157 		return ret;
1158 	ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1159 	if (ret)
1160 		return ret;
1161 
1162 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1163 	    MVPP2_PE_LAST_FREE_TID);
1164 	if (tid < 0)
1165 		return tid;
1166 
1167 	memset(&pe, 0, sizeof(pe));
1168 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1169 	pe.index = tid;
1170 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1171 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1172 	mvpp2_prs_sram_ri_update(&pe,
1173 	    MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
1174 	    MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
1175 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1176 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1177 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1178 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1179 	mvpp2_prs_hw_write(sc, &pe);
1180 
1181 	memset(&pe, 0, sizeof(pe));
1182 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1183 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1184 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1185 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1186 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1187 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1188 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1189 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1190 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1191 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1192 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1193 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1194 	mvpp2_prs_hw_write(sc, &pe);
1195 
1196 	memset(&pe, 0, sizeof(pe));
1197 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1198 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1199 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1200 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1201 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1202 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1203 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1204 	    MVPP2_PRS_IPV6_EXT_AI_BIT);
1205 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1206 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1207 	mvpp2_prs_hw_write(sc, &pe);
1208 
1209 	memset(&pe, 0, sizeof(pe));
1210 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1211 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1212 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1213 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1214 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1215 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1216 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1217 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1218 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1219 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1220 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
1221 	mvpp2_prs_hw_write(sc, &pe);
1222 
1223 	return 0;
1224 }
1225 
1226 int
1227 mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
1228 {
1229 	struct mvpp2_prs_entry pe;
1230 	int ret;
1231 
1232 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1233 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1234 	if (ret)
1235 		return ret;
1236 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1237 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1238 	if (ret)
1239 		return ret;
1240 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_IGMP,
1241 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1242 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1243 	if (ret)
1244 		return ret;
1245 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
1246 	if (ret)
1247 		return ret;
1248 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1249 	if (ret)
1250 		return ret;
1251 
1252 	memset(&pe, 0, sizeof(pe));
1253 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1254 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1255 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1256 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1257 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1258 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1259 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1260 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1261 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1262 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1263 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1264 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1265 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1266 	mvpp2_prs_hw_write(sc, &pe);
1267 
1268 	memset(&pe, 0, sizeof(pe));
1269 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1270 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1271 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1272 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1273 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1274 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1275 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1276 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1277 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1278 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1279 	mvpp2_prs_hw_write(sc, &pe);
1280 
1281 	return 0;
1282 }
1283 
1284 int
1285 mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
1286 {
1287 	struct mvpp2_attach_args *maa = aux;
1288 	char buf[32];
1289 
1290 	if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
1291 	    strcmp(buf, "disabled") == 0)
1292 		return 0;
1293 
1294 	return 1;
1295 }
1296 
1297 void
1298 mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
1299 {
1300 	struct mvpp2_port *sc = (void *)self;
1301 	struct mvpp2_attach_args *maa = aux;
1302 	struct mvpp2_tx_queue *txq;
1303 	struct mvpp2_rx_queue *rxq;
1304 	struct ifnet *ifp;
1305 	uint32_t phy, reg;
1306 	int i, idx, len, node;
1307 	char *phy_mode;
1308 	char *managed;
1309 
1310 	sc->sc = (void *)parent;
1311 	sc->sc_node = maa->ma_node;
1312 	sc->sc_dmat = maa->ma_dmat;
1313 
1314 	sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
1315 	sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
1316 	sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
1317 
1318 	len = OF_getproplen(sc->sc_node, "phy-mode");
1319 	if (len <= 0) {
1320 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
1321 		return;
1322 	}
1323 
1324 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
1325 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
1326 	if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
1327 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1328 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
1329 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1330 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
1331 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1332 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
1333 		sc->sc_phy_mode = PHY_MODE_SGMII;
1334 	else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
1335 		sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
1336 	else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
1337 		sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
1338 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
1339 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
1340 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
1341 		sc->sc_phy_mode = PHY_MODE_RGMII;
1342 	else {
1343 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
1344 		    phy_mode);
1345 		return;
1346 	}
1347 	free(phy_mode, M_TEMP, len);
1348 
1349 	/* Lookup PHY. */
1350 	phy = OF_getpropint(sc->sc_node, "phy", 0);
1351 	if (phy) {
1352 		node = OF_getnodebyphandle(phy);
1353 		if (!node) {
1354 			printf(": no phy\n");
1355 			return;
1356 		}
1357 		sc->sc_mdio = mii_byphandle(phy);
1358 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
1359 		sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
1360 	}
1361 
1362 	if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
1363 		managed = malloc(len, M_TEMP, M_WAITOK);
1364 		OF_getprop(sc->sc_node, "managed", managed, len);
1365 		if (!strncmp(managed, "in-band-status",
1366 		    strlen("in-band-status")))
1367 			sc->sc_inband_status = 1;
1368 		free(managed, M_TEMP, len);
1369 	}
1370 
1371 	if (OF_getprop(sc->sc_node, "local-mac-address",
1372 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
1373 		memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
1374 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
1375 
1376 	sc->sc_ntxq = sc->sc_nrxq = 1;
1377 	sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
1378 	    M_DEVBUF, M_WAITOK | M_ZERO);
1379 	sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
1380 	    M_DEVBUF, M_WAITOK | M_ZERO);
1381 
1382 	for (i = 0; i < sc->sc_ntxq; i++) {
1383 		txq = &sc->sc_txqs[i];
1384 		txq->id = mvpp2_txq_phys(sc->sc_id, i);
1385 		txq->log_id = i;
1386 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
1387 	}
1388 
1389 	sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
1390 
1391 	for (i = 0; i < sc->sc_nrxq; i++) {
1392 		rxq = &sc->sc_rxqs[i];
1393 		rxq->id = sc->sc_id * 32 + i;
1394 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
1395 		rxq->time_coal = MVPP2_RX_COAL_USEC;
1396 	}
1397 
1398 	mvpp2_egress_disable(sc);
1399 	mvpp2_port_disable(sc);
1400 
1401 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
1402 	    sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
1403 	    0 /* queue vector id */);
1404 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
1405 	    sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
1406 	    0 /* first rxq */);
1407 
1408 	mvpp2_ingress_disable(sc);
1409 	mvpp2_defaults_set(sc);
1410 
1411 	mvpp2_cls_oversize_rxq_set(sc);
1412 	mvpp2_cls_port_config(sc);
1413 
1414 	/*
1415 	 * We have one pool per core, so all RX queues on a specific
1416 	 * core share that pool.  Also long and short uses the same
1417 	 * pool.
1418 	 */
1419 	for (i = 0; i < sc->sc_nrxq; i++) {
1420 		mvpp2_rxq_long_pool_set(sc, i, i);
1421 		mvpp2_rxq_short_pool_set(sc, i, i);
1422 	}
1423 
1424 	/* Reset Mac */
1425 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
1426 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
1427 	    MVPP2_PORT_CTRL2_PORTMACRESET);
1428 	if (sc->sc_gop_id == 0) {
1429 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
1430 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
1431 		    ~MV_XLG_MAC_CTRL0_MACRESETN);
1432 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
1433 		reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET;
1434 		reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET;
1435 		reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET;
1436 		reg &= ~MVPP22_MPCS_MAC_CLK_RESET;
1437 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
1438 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
1439 		reg &= ~MVPP22_XPCS_PCSRESET;
1440 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
1441 	}
1442 
1443 	timeout_set(&sc->sc_tick, mvpp2_tick, sc);
1444 
1445 	ifp = &sc->sc_ac.ac_if;
1446 	ifp->if_softc = sc;
1447 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1448 	ifp->if_ioctl = mvpp2_ioctl;
1449 	ifp->if_start = mvpp2_start;
1450 	ifp->if_watchdog = mvpp2_watchdog;
1451 	ifq_set_maxlen(&ifp->if_snd, MVPP2_NTXDESC - 1);
1452 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1453 
1454 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1455 
1456 	sc->sc_mii.mii_ifp = ifp;
1457 	sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
1458 	sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
1459 	sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
1460 
1461 	ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
1462 
1463 	if (sc->sc_mdio) {
1464 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
1465 		    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0);
1466 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1467 			printf("%s: no PHY found!\n", self->dv_xname);
1468 			ifmedia_add(&sc->sc_mii.mii_media,
1469 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
1470 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1471 		} else
1472 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1473 	} else {
1474 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1475 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1476 
1477 		if (sc->sc_inband_status) {
1478 			mvpp2_inband_statchg(sc);
1479 		} else {
1480 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
1481 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1482 			mvpp2_mii_statchg(self);
1483 		}
1484 
1485 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
1486 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1487 	}
1488 
1489 	if_attach(ifp);
1490 	ether_ifattach(ifp);
1491 
1492 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1493 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1494 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1495 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1496 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1497 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1498 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1499 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1500 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1501 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1502 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
1503 		reg |= MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
1504 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
1505 	}
1506 
1507 	if (sc->sc_gop_id == 0) {
1508 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
1509 		reg |= MV_XLG_INTERRUPT_LINK_CHANGE;
1510 		mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
1511 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
1512 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
1513 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
1514 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1515 		    sc->sc_phy_mode == PHY_MODE_XAUI)
1516 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
1517 		else
1518 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
1519 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
1520 	}
1521 
1522 	idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
1523 	if (idx >= 0)
1524 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1525 		    mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
1526 	idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
1527 	if (idx < 0)
1528 		idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
1529 	if (idx >= 0)
1530 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1531 		    mvpp2_intr, sc, sc->sc_dev.dv_xname);
1532 }
1533 
1534 uint32_t
1535 mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
1536 {
1537 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
1538 }
1539 
1540 void
1541 mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
1542 {
1543 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
1544 }
1545 
1546 uint32_t
1547 mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
1548 {
1549 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1550 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
1551 }
1552 
1553 void
1554 mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1555 {
1556 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1557 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
1558 	    data);
1559 }
1560 
1561 uint32_t
1562 mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
1563 {
1564 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1565 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
1566 }
1567 
1568 void
1569 mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1570 {
1571 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1572 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
1573 	    data);
1574 }
1575 
1576 uint32_t
1577 mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1578 {
1579 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1580 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
1581 }
1582 
1583 void
1584 mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1585 {
1586 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1587 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
1588 	    data);
1589 }
1590 
1591 uint32_t
1592 mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1593 {
1594 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1595 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
1596 }
1597 
1598 void
1599 mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1600 {
1601 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1602 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
1603 	    data);
1604 }
1605 
1606 void
1607 mvpp2_start(struct ifnet *ifp)
1608 {
1609 	struct mvpp2_port *sc = ifp->if_softc;
1610 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1611 	struct mbuf *m;
1612 	int error, idx;
1613 
1614 	if (!(ifp->if_flags & IFF_RUNNING))
1615 		return;
1616 	if (ifq_is_oactive(&ifp->if_snd))
1617 		return;
1618 	if (ifq_empty(&ifp->if_snd))
1619 		return;
1620 	if (!sc->sc_link)
1621 		return;
1622 
1623 	idx = txq->prod;
1624 	while (txq->cnt < MVPP2_AGGR_TXQ_SIZE) {
1625 		m = ifq_dequeue(&ifp->if_snd);
1626 		if (m == NULL)
1627 			break;
1628 
1629 		error = mvpp2_encap(sc, m, &idx);
1630 		if (error == ENOBUFS) {
1631 			m_freem(m); /* give up: drop it */
1632 			ifq_set_oactive(&ifp->if_snd);
1633 			break;
1634 		}
1635 		if (error == EFBIG) {
1636 			m_freem(m); /* give up: drop it */
1637 			ifp->if_oerrors++;
1638 			continue;
1639 		}
1640 
1641 #if NBPFILTER > 0
1642 		if (ifp->if_bpf)
1643 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1644 #endif
1645 	}
1646 
1647 	if (txq->prod != idx) {
1648 		txq->prod = idx;
1649 
1650 		/* Set a timeout in case the chip goes out to lunch. */
1651 		ifp->if_timer = 5;
1652 	}
1653 }
1654 
1655 int
1656 mvpp2_encap(struct mvpp2_port *sc, struct mbuf *m, int *idx)
1657 {
1658 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1659 	struct mvpp2_tx_desc *txd;
1660 	bus_dmamap_t map;
1661 	uint32_t command;
1662 	int i, current, first, last;
1663 
1664 	first = last = current = *idx;
1665 	map = txq->buf[current].mb_map;
1666 
1667 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1668 		return ENOBUFS;
1669 
1670 	if (map->dm_nsegs > (MVPP2_AGGR_TXQ_SIZE - txq->cnt - 2)) {
1671 		bus_dmamap_unload(sc->sc_dmat, map);
1672 		return ENOBUFS;
1673 	}
1674 
1675 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1676 	    BUS_DMASYNC_PREWRITE);
1677 
1678 	command = MVPP2_TXD_L4_CSUM_NOT |
1679 	    MVPP2_TXD_IP_CSUM_DISABLE;
1680 	for (i = 0; i < map->dm_nsegs; i++) {
1681 		txd = &txq->descs[current];
1682 		memset(txd, 0, sizeof(*txd));
1683 		txd->buf_phys_addr_hw_cmd2 =
1684 		    map->dm_segs[i].ds_addr & ~0x1f;
1685 		txd->packet_offset =
1686 		    map->dm_segs[i].ds_addr & 0x1f;
1687 		txd->data_size = map->dm_segs[i].ds_len;
1688 		txd->phys_txq = sc->sc_txqs[0].id;
1689 		txd->command = command |
1690 		    MVPP2_TXD_PADDING_DISABLE;
1691 		if (i == 0)
1692 		    txd->command |= MVPP2_TXD_F_DESC;
1693 		if (i == (map->dm_nsegs - 1))
1694 		    txd->command |= MVPP2_TXD_L_DESC;
1695 
1696 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
1697 		    current * sizeof(*txd), sizeof(*txd),
1698 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1699 
1700 		last = current;
1701 		current = (current + 1) % MVPP2_AGGR_TXQ_SIZE;
1702 		KASSERT(current != txq->cons);
1703 	}
1704 
1705 	KASSERT(txq->buf[last].mb_m == NULL);
1706 	txq->buf[first].mb_map = txq->buf[last].mb_map;
1707 	txq->buf[last].mb_map = map;
1708 	txq->buf[last].mb_m = m;
1709 
1710 	txq->cnt += map->dm_nsegs;
1711 	*idx = current;
1712 
1713 	mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, map->dm_nsegs);
1714 
1715 	return 0;
1716 }
1717 
1718 int
1719 mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1720 {
1721 	struct mvpp2_port *sc = ifp->if_softc;
1722 	struct ifreq *ifr = (struct ifreq *)addr;
1723 	int error = 0, s;
1724 
1725 	s = splnet();
1726 
1727 	switch (cmd) {
1728 	case SIOCSIFADDR:
1729 		ifp->if_flags |= IFF_UP;
1730 		/* FALLTHROUGH */
1731 	case SIOCSIFFLAGS:
1732 		if (ifp->if_flags & IFF_UP) {
1733 			if (ifp->if_flags & IFF_RUNNING)
1734 				error = ENETRESET;
1735 			else
1736 				mvpp2_up(sc);
1737 		} else {
1738 			if (ifp->if_flags & IFF_RUNNING)
1739 				mvpp2_down(sc);
1740 		}
1741 		break;
1742 
1743 	case SIOCGIFMEDIA:
1744 	case SIOCSIFMEDIA:
1745 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1746 		break;
1747 
1748 	case SIOCGIFRXR:
1749 		error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1750 		break;
1751 
1752 	case SIOCGIFSFFPAGE:
1753 		error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
1754 		if (error != 0)
1755 			break;
1756 
1757 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1758 		rw_exit(&mvpp2_sff_lock);
1759 		break;
1760 
1761 	default:
1762 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1763 		break;
1764 	}
1765 
1766 	if (error == ENETRESET) {
1767 		if (ifp->if_flags & IFF_RUNNING)
1768 			mvpp2_iff(sc);
1769 		error = 0;
1770 	}
1771 
1772 	splx(s);
1773 	return (error);
1774 }
1775 
1776 int
1777 mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
1778 {
1779 	struct mvpp2_rx_queue *rxq;
1780 	struct if_rxring_info *ifrs, *ifr;
1781 	unsigned int i;
1782 	int error;
1783 
1784 	ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
1785 	    M_WAITOK|M_ZERO|M_CANFAIL);
1786 	if (ifrs == NULL)
1787 		return (ENOMEM);
1788 
1789 	for (i = 0; i < sc->sc_nrxq; i++) {
1790 		rxq = &sc->sc_rxqs[i];
1791 		ifr = &ifrs[i];
1792 
1793 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
1794 		ifr->ifr_size = MCLBYTES;
1795 		ifr->ifr_info = rxq->rxring;
1796 	}
1797 
1798 	error = if_rxr_info_ioctl(ifri, i, ifrs);
1799 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
1800 
1801 	return (error);
1802 }
1803 
1804 void
1805 mvpp2_watchdog(struct ifnet *ifp)
1806 {
1807 	printf("%s\n", __func__);
1808 }
1809 
1810 int
1811 mvpp2_media_change(struct ifnet *ifp)
1812 {
1813 	struct mvpp2_port *sc = ifp->if_softc;
1814 
1815 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1816 		mii_mediachg(&sc->sc_mii);
1817 
1818 	return (0);
1819 }
1820 
1821 void
1822 mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1823 {
1824 	struct mvpp2_port *sc = ifp->if_softc;
1825 
1826 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1827 		mii_pollstat(&sc->sc_mii);
1828 
1829 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1830 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1831 }
1832 
1833 int
1834 mvpp2_mii_readreg(struct device *self, int phy, int reg)
1835 {
1836 	struct mvpp2_port *sc = (void *)self;
1837 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
1838 }
1839 
1840 void
1841 mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
1842 {
1843 	struct mvpp2_port *sc = (void *)self;
1844 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
1845 }
1846 
1847 void
1848 mvpp2_mii_statchg(struct device *self)
1849 {
1850 	struct mvpp2_port *sc = (void *)self;
1851 	mvpp2_port_change(sc);
1852 }
1853 
1854 void
1855 mvpp2_inband_statchg(struct mvpp2_port *sc)
1856 {
1857 	uint32_t reg;
1858 
1859 	sc->sc_mii.mii_media_status = IFM_AVALID;
1860 	sc->sc_mii.mii_media_active = IFM_ETHER;
1861 
1862 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1863 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1864 		reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
1865 		if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS)
1866 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1867 		sc->sc_mii.mii_media_active |= IFM_FDX;
1868 		sc->sc_mii.mii_media_active |= IFM_10G_SR;
1869 	} else {
1870 		reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
1871 		if (reg & MVPP2_PORT_STATUS0_LINKUP)
1872 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1873 		if (reg & MVPP2_PORT_STATUS0_FULLDX)
1874 			sc->sc_mii.mii_media_active |= IFM_FDX;
1875 		if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
1876 			sc->sc_mii.mii_media_active |= IFM_2500_SX;
1877 		else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
1878 			sc->sc_mii.mii_media_active |= IFM_1000_SX;
1879 		else if (reg & MVPP2_PORT_STATUS0_GMIISPEED)
1880 			sc->sc_mii.mii_media_active |= IFM_1000_T;
1881 		else if (reg & MVPP2_PORT_STATUS0_MIISPEED)
1882 			sc->sc_mii.mii_media_active |= IFM_100_TX;
1883 		else
1884 			sc->sc_mii.mii_media_active |= IFM_10_T;
1885 	}
1886 
1887 	mvpp2_port_change(sc);
1888 }
1889 
1890 void
1891 mvpp2_port_change(struct mvpp2_port *sc)
1892 {
1893 	uint32_t reg;
1894 
1895 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) == sc->sc_link)
1896 		return;
1897 
1898 	sc->sc_link = !sc->sc_link;
1899 
1900 	if (sc->sc_inband_status)
1901 		return;
1902 
1903 	if (sc->sc_link) {
1904 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1905 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1906 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1907 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
1908 			reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS;
1909 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1910 		} else {
1911 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1912 			reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
1913 			reg |= MVPP2_GMAC_FORCE_LINK_PASS;
1914 			reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
1915 			reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
1916 			reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
1917 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
1918 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
1919 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
1920 				reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
1921 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
1922 				reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
1923 			if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
1924 				reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
1925 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
1926 		}
1927 	} else {
1928 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1929 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1930 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1931 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
1932 			reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
1933 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1934 		} else {
1935 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1936 			reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
1937 			reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
1938 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
1939 		}
1940 	}
1941 }
1942 
1943 void
1944 mvpp2_tick(void *arg)
1945 {
1946 	struct mvpp2_port *sc = arg;
1947 	int s;
1948 
1949 	s = splnet();
1950 	mii_tick(&sc->sc_mii);
1951 	splx(s);
1952 
1953 	timeout_add_sec(&sc->sc_tick, 1);
1954 }
1955 
1956 int
1957 mvpp2_link_intr(void *arg)
1958 {
1959 	struct mvpp2_port *sc = arg;
1960 	uint32_t reg;
1961 	int event = 0;
1962 
1963 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1964 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1965 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
1966 		if (reg & MV_XLG_INTERRUPT_LINK_CHANGE)
1967 			event = 1;
1968 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1969 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1970 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1971 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1972 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1973 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1974 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1975 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_CAUSE_REG);
1976 		if (reg & MVPP2_GMAC_INT_CAUSE_LINK_CHANGE)
1977 			event = 1;
1978 	}
1979 
1980 	if (event && sc->sc_inband_status)
1981 		mvpp2_inband_statchg(sc);
1982 
1983 	return (1);
1984 }
1985 
1986 int
1987 mvpp2_intr(void *arg)
1988 {
1989 	struct mvpp2_port *sc = arg;
1990 	uint32_t reg;
1991 
1992 	reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
1993 	if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
1994 		mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
1995 		mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
1996 		    reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
1997 	}
1998 	if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
1999 		mvpp2_tx_proc(sc,
2000 		    (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
2001 		    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
2002 
2003 	if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
2004 		mvpp2_rx_proc(sc,
2005 		    reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
2006 
2007 	return (1);
2008 }
2009 
2010 void
2011 mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
2012 {
2013 	struct mvpp2_tx_queue *txq;
2014 	int i;
2015 
2016 	for (i = 0; i < sc->sc_ntxq; i++) {
2017 		txq = &sc->sc_txqs[i];
2018 		if ((queues & (1 << i)) == 0)
2019 			continue;
2020 		mvpp2_txq_proc(sc, txq);
2021 	}
2022 }
2023 
2024 void
2025 mvpp2_txq_proc(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2026 {
2027 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2028 	struct mvpp2_tx_queue *aggr_txq = &sc->sc->sc_aggr_txqs[0];
2029 	struct mvpp2_buf *txb;
2030 	int i, idx, nsent;
2031 
2032 	nsent = (mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id)) &
2033 	    MVPP2_TRANSMITTED_COUNT_MASK) >>
2034 	    MVPP2_TRANSMITTED_COUNT_OFFSET;
2035 
2036 	for (i = 0; i < nsent; i++) {
2037 		idx = aggr_txq->cons;
2038 		KASSERT(idx < MVPP2_AGGR_TXQ_SIZE);
2039 
2040 		txb = &aggr_txq->buf[idx];
2041 		if (txb->mb_m) {
2042 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2043 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2044 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2045 
2046 			m_freem(txb->mb_m);
2047 			txb->mb_m = NULL;
2048 		}
2049 
2050 		aggr_txq->cnt--;
2051 		aggr_txq->cons = (aggr_txq->cons + 1) % MVPP2_AGGR_TXQ_SIZE;
2052 	}
2053 
2054 	if (aggr_txq->cnt == 0)
2055 		ifp->if_timer = 0;
2056 
2057 	if (ifq_is_oactive(&ifp->if_snd))
2058 		ifq_restart(&ifp->if_snd);
2059 }
2060 
2061 void
2062 mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
2063 {
2064 	struct mvpp2_rx_queue *rxq;
2065 	int i;
2066 
2067 	for (i = 0; i < sc->sc_nrxq; i++) {
2068 		rxq = &sc->sc_rxqs[i];
2069 		if ((queues & (1 << i)) == 0)
2070 			continue;
2071 		mvpp2_rxq_proc(sc, rxq);
2072 	}
2073 
2074 	mvpp2_rx_refill(sc);
2075 }
2076 
2077 void
2078 mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2079 {
2080 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2081 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2082 	struct mvpp2_rx_desc *rxd;
2083 	struct mvpp2_bm_pool *bm;
2084 	struct mvpp2_buf *rxb;
2085 	struct mbuf *m;
2086 	uint64_t virt;
2087 	uint32_t i, nrecv, pool;
2088 
2089 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2090 	if (!nrecv)
2091 		return;
2092 
2093 	pool = curcpu()->ci_cpuid;
2094 	KASSERT(pool < sc->sc->sc_npools);
2095 	bm = &sc->sc->sc_bm_pools[pool];
2096 
2097 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2098 	    MVPP2_DMA_LEN(rxq->ring),
2099 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2100 
2101 	for (i = 0; i < nrecv; i++) {
2102 		rxd = &rxq->descs[rxq->cons];
2103 		virt = rxd->buf_cookie_bm_qset_cls_info;
2104 		KASSERT(((virt >> 16) & 0xffff) == pool);
2105 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2106 		rxb = &bm->rxbuf[virt & 0xffff];
2107 		KASSERT(rxb->mb_m != NULL);
2108 
2109 		bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
2110 		    rxd->data_size, BUS_DMASYNC_POSTREAD);
2111 		bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
2112 
2113 		m = rxb->mb_m;
2114 		rxb->mb_m = NULL;
2115 
2116 		m->m_pkthdr.len = m->m_len = rxd->data_size;
2117 		m_adj(m, MVPP2_MH_SIZE);
2118 		ml_enqueue(&ml, m);
2119 
2120 		KASSERT(bm->freelist[bm->free_prod] == -1);
2121 		bm->freelist[bm->free_prod] = virt & 0xffffffff;
2122 		bm->free_prod = (bm->free_prod + 1) % MVPP2_BM_SIZE;
2123 
2124 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
2125 	}
2126 
2127 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2128 	    MVPP2_DMA_LEN(rxq->ring),
2129 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2130 
2131 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2132 
2133 	if_input(ifp, &ml);
2134 }
2135 
2136 /*
2137  * We have a pool per core, and since we should not assume that
2138  * RX buffers are always used in order, keep a list of rxbuf[]
2139  * indices that should be filled with an mbuf, if possible.
2140  */
2141 void
2142 mvpp2_rx_refill(struct mvpp2_port *sc)
2143 {
2144 	struct mvpp2_bm_pool *bm;
2145 	struct mvpp2_buf *rxb;
2146 	uint64_t phys, virt;
2147 	int pool;
2148 
2149 	pool = curcpu()->ci_cpuid;
2150 	KASSERT(pool < sc->sc->sc_npools);
2151 	bm = &sc->sc->sc_bm_pools[pool];
2152 
2153 	while (bm->free_cons != bm->free_prod) {
2154 		KASSERT(bm->freelist[bm->free_cons] != -1);
2155 		virt = bm->freelist[bm->free_cons];
2156 		KASSERT(((virt >> 16) & 0xffff) == pool);
2157 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2158 		rxb = &bm->rxbuf[virt & 0xffff];
2159 		KASSERT(rxb->mb_m == NULL);
2160 
2161 		rxb->mb_m = mvpp2_alloc_mbuf(sc->sc, rxb->mb_map);
2162 		if (rxb->mb_m == NULL)
2163 			break;
2164 
2165 		bm->freelist[bm->free_cons] = -1;
2166 		bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
2167 
2168 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2169 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2170 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2171 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2172 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2173 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2174 		    virt & 0xffffffff);
2175 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2176 		    phys & 0xffffffff);
2177 	}
2178 }
2179 
2180 void
2181 mvpp2_up(struct mvpp2_port *sc)
2182 {
2183 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2184 	int i;
2185 
2186 	memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
2187 	mvpp2_prs_mac_da_accept(sc, etherbroadcastaddr, 1);
2188 	mvpp2_prs_mac_da_accept(sc, sc->sc_cur_lladdr, 1);
2189 	mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
2190 	mvpp2_prs_def_flow(sc);
2191 
2192 	for (i = 0; i < sc->sc_ntxq; i++)
2193 		mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
2194 
2195 	mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
2196 
2197 	for (i = 0; i < sc->sc_nrxq; i++)
2198 		mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
2199 
2200 	/* FIXME: rx buffer fill */
2201 
2202 	/* Configure media. */
2203 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
2204 		mii_mediachg(&sc->sc_mii);
2205 
2206 	/* Program promiscuous mode and multicast filters. */
2207 	mvpp2_iff(sc);
2208 
2209 	ifp->if_flags |= IFF_RUNNING;
2210 	ifq_clr_oactive(&ifp->if_snd);
2211 
2212 	mvpp2_txp_max_tx_size_set(sc);
2213 
2214 	/* XXX: single vector */
2215 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
2216 	    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
2217 	    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
2218 	    MVPP2_CAUSE_MISC_SUM_MASK);
2219 	mvpp2_interrupts_enable(sc, (1 << 0));
2220 
2221 	mvpp2_mac_config(sc);
2222 	mvpp2_egress_enable(sc);
2223 	mvpp2_ingress_enable(sc);
2224 
2225 	timeout_add_sec(&sc->sc_tick, 1);
2226 }
2227 
2228 void
2229 mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
2230 {
2231 	struct mvpp2_buf *txb;
2232 	int i;
2233 
2234 	txq->ring = mvpp2_dmamem_alloc(sc,
2235 	    MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
2236 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2237 
2238 	txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
2239 	    M_DEVBUF, M_WAITOK);
2240 
2241 	for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
2242 		txb = &txq->buf[i];
2243 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2244 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2245 		txb->mb_m = NULL;
2246 	}
2247 
2248 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2249 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2250 
2251 	txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
2252 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
2253 	    MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_OFFS);
2254 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
2255 	    MVPP2_AGGR_TXQ_SIZE);
2256 }
2257 
2258 void
2259 mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2260 {
2261 	struct mvpp2_buf *txb;
2262 	int desc, desc_per_txq;
2263 	uint32_t reg;
2264 	int i;
2265 
2266 	txq->prod = txq->cons = txq->cnt = 0;
2267 //	txq->last_desc = txq->size - 1;
2268 
2269 	txq->ring = mvpp2_dmamem_alloc(sc->sc,
2270 	    MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
2271 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2272 
2273 	txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
2274 	    M_DEVBUF, M_WAITOK);
2275 
2276 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2277 		txb = &txq->buf[i];
2278 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2279 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2280 		txb->mb_m = NULL;
2281 	}
2282 
2283 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2284 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2285 
2286 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2287 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
2288 	    MVPP2_DMA_DVA(txq->ring));
2289 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
2290 	    MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
2291 	mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
2292 	mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
2293 	    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2294 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
2295 	reg &= ~MVPP2_TXQ_PENDING_MASK;
2296 	mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
2297 
2298 	desc_per_txq = 16;
2299 	desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
2300 	    (txq->log_id * desc_per_txq);
2301 
2302 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
2303 	    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2304 	    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2305 
2306 	/* WRR / EJP configuration - indirect access */
2307 	mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2308 	    mvpp2_egress_port(sc));
2309 
2310 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2311 	reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2312 	reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2313 	reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2314 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
2315 
2316 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2317 	    MVPP2_TXQ_TOKEN_SIZE_MAX);
2318 
2319 	mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
2320 }
2321 
2322 void
2323 mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2324 {
2325 	rxq->prod = rxq->cons = 0;
2326 
2327 	rxq->ring = mvpp2_dmamem_alloc(sc->sc,
2328 	    MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
2329 	rxq->descs = MVPP2_DMA_KVA(rxq->ring);
2330 
2331 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
2332 	    0, MVPP2_DMA_LEN(rxq->ring),
2333 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2334 
2335 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2336 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2337 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
2338 	    MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_OFFS);
2339 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
2340 	mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
2341 	mvpp2_rxq_offset_set(sc, rxq->id, 0);
2342 	mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
2343 	mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
2344 	mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
2345 }
2346 
2347 void
2348 mvpp2_mac_config(struct mvpp2_port *sc)
2349 {
2350 	uint32_t reg;
2351 
2352 	mvpp2_port_disable(sc);
2353 
2354 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2355 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
2356 	    MVPP2_PORT_CTRL2_PORTMACRESET);
2357 	if (sc->sc_gop_id == 0) {
2358 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
2359 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2360 		    ~MV_XLG_MAC_CTRL0_MACRESETN);
2361 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2362 		reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET;
2363 		reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET;
2364 		reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET;
2365 		reg &= ~MVPP22_MPCS_MAC_CLK_RESET;
2366 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2367 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2368 		reg &= ~MVPP22_XPCS_PCSRESET;
2369 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2370 	}
2371 
2372 	mvpp2_comphy_config(sc);
2373 	mvpp2_gop_config(sc);
2374 
2375 	if (sc->sc_gop_id == 0) {
2376 		if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2377 			reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2378 			reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET;
2379 			reg |= MVPP22_MPCS_TX_SD_CLK_RESET;
2380 			reg |= MVPP22_MPCS_RX_SD_CLK_RESET;
2381 			reg |= MVPP22_MPCS_MAC_CLK_RESET;
2382 			mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2383 		} else if (sc->sc_phy_mode == PHY_MODE_XAUI) {
2384 			reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2385 			reg |= MVPP22_XPCS_PCSRESET;
2386 			mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2387 		}
2388 
2389 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
2390 		reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
2391 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2392 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2393 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
2394 		else
2395 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
2396 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
2397 	}
2398 
2399 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2400 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2401 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
2402 		reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
2403 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2404 		    MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
2405 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
2406 	} else {
2407 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
2408 		reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2409 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2410 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS;
2411 		mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
2412 	}
2413 
2414 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2415 	    sc->sc_phy_mode == PHY_MODE_XAUI))
2416 		mvpp2_xlg_config(sc);
2417 	else
2418 		mvpp2_gmac_config(sc);
2419 
2420 	mvpp2_port_enable(sc);
2421 }
2422 
2423 void
2424 mvpp2_xlg_config(struct mvpp2_port *sc)
2425 {
2426 	uint32_t ctl0, ctl4;
2427 
2428 	ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2429 	ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
2430 
2431 	ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN;
2432 	ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
2433 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN;
2434 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN;
2435 
2436 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
2437 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl0);
2438 
2439 	/* Port reset */
2440 	while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2441 	    MV_XLG_MAC_CTRL0_MACRESETN) == 0)
2442 		;
2443 }
2444 
2445 void
2446 mvpp2_gmac_config(struct mvpp2_port *sc)
2447 {
2448 	uint32_t ctl0, ctl2, ctl4, panc;
2449 
2450 	/* Setup phy. */
2451 	ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
2452 	ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
2453 	ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
2454 	panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2455 
2456 	/* Force link down to change in-band settings. */
2457 	panc &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2458 	panc |= MVPP2_GMAC_FORCE_LINK_DOWN;
2459 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2460 
2461 	ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
2462 	ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
2463 	    MVPP2_GMAC_INBAND_AN_MASK);
2464 	panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
2465 	    MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
2466 	    MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
2467 	    MVPP2_GMAC_IN_BAND_AUTONEG);
2468 
2469 	switch (sc->sc_phy_mode) {
2470 	case PHY_MODE_XAUI:
2471 	case PHY_MODE_10GBASER:
2472 		break;
2473 	case PHY_MODE_2500BASEX:
2474 	case PHY_MODE_1000BASEX:
2475 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2476 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2477 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2478 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2479 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2480 		break;
2481 	case PHY_MODE_SGMII:
2482 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2483 		ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
2484 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2485 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2486 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2487 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2488 		break;
2489 	case PHY_MODE_RGMII:
2490 	case PHY_MODE_RGMII_ID:
2491 	case PHY_MODE_RGMII_RXID:
2492 	case PHY_MODE_RGMII_TXID:
2493 		ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL;
2494 		ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2495 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2496 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2497 		break;
2498 	}
2499 
2500 	/* Use Auto-Negotiation for Inband Status only */
2501 	if (sc->sc_inband_status) {
2502 		panc &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2503 		panc &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2504 		panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2505 		panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2506 		panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2507 		panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
2508 		/* TODO: read mode from SFP */
2509 		if (1) {
2510 			/* 802.3z */
2511 			ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
2512 			panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2513 			panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2514 		} else {
2515 			/* SGMII */
2516 			panc |= MVPP2_GMAC_AN_SPEED_EN;
2517 			panc |= MVPP2_GMAC_AN_DUPLEX_EN;
2518 		}
2519 	}
2520 
2521 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
2522 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
2523 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
2524 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2525 
2526 	/* Port reset */
2527 	while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2528 	    MVPP2_PORT_CTRL2_PORTMACRESET)
2529 		;
2530 }
2531 
2532 #define COMPHY_BASE		0x120000
2533 #define COMPHY_SIP_POWER_ON	0x82000001
2534 #define COMPHY_SIP_POWER_OFF	0x82000002
2535 #define COMPHY_SPEED(x)		((x) << 2)
2536 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
2537 #define  COMPHY_SPEED_2_5G		1
2538 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
2539 #define  COMPHY_SPEED_5G		3
2540 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
2541 #define  COMPHY_SPEED_6G		5
2542 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
2543 #define COMPHY_UNIT(x)		((x) << 8)
2544 #define COMPHY_MODE(x)		((x) << 12)
2545 #define  COMPHY_MODE_SATA		1
2546 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
2547 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
2548 #define  COMPHY_MODE_USB3H		4
2549 #define  COMPHY_MODE_USB3D		5
2550 #define  COMPHY_MODE_PCIE		6
2551 #define  COMPHY_MODE_RXAUI		7
2552 #define  COMPHY_MODE_XFI		8
2553 #define  COMPHY_MODE_SFI		9
2554 #define  COMPHY_MODE_USB3		10
2555 #define  COMPHY_MODE_AP			11
2556 
2557 void
2558 mvpp2_comphy_config(struct mvpp2_port *sc)
2559 {
2560 	int node, phys[2], lane, unit;
2561 	uint32_t mode;
2562 
2563 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
2564 	    sizeof(phys))
2565 		return;
2566 	node = OF_getnodebyphandle(phys[0]);
2567 	if (!node)
2568 		return;
2569 
2570 	lane = OF_getpropint(node, "reg", 0);
2571 	unit = phys[1];
2572 
2573 	switch (sc->sc_phy_mode) {
2574 	case PHY_MODE_XAUI:
2575 		mode = COMPHY_MODE(COMPHY_MODE_RXAUI) |
2576 		    COMPHY_UNIT(unit);
2577 		break;
2578 	case PHY_MODE_10GBASER:
2579 		mode = COMPHY_MODE(COMPHY_MODE_XFI) |
2580 		    COMPHY_SPEED(COMPHY_SPEED_10_3125G) |
2581 		    COMPHY_UNIT(unit);
2582 		break;
2583 	case PHY_MODE_2500BASEX:
2584 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
2585 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
2586 		    COMPHY_UNIT(unit);
2587 		break;
2588 	case PHY_MODE_1000BASEX:
2589 	case PHY_MODE_SGMII:
2590 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
2591 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
2592 		    COMPHY_UNIT(unit);
2593 		break;
2594 	default:
2595 		return;
2596 	}
2597 
2598 	smc_call(COMPHY_SIP_POWER_ON, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2599 	    lane, mode);
2600 }
2601 
2602 void
2603 mvpp2_gop_config(struct mvpp2_port *sc)
2604 {
2605 	uint32_t reg;
2606 
2607 	if (sc->sc->sc_rm == NULL)
2608 		return;
2609 
2610 	if (sc->sc_phy_mode == PHY_MODE_RGMII ||
2611 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2612 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2613 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2614 		if (sc->sc_gop_id == 0)
2615 			return;
2616 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2617 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
2618 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2619 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2620 		if (sc->sc_gop_id == 2)
2621 			reg |= GENCONF_CTRL0_PORT0_RGMII |
2622 			    GENCONF_CTRL0_PORT1_RGMII;
2623 		else if (sc->sc_gop_id == 3)
2624 			reg |= GENCONF_CTRL0_PORT1_RGMII_MII;
2625 		regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2626 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2627 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2628 	    sc->sc_phy_mode == PHY_MODE_SGMII) {
2629 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2630 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
2631 		    GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
2632 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2633 		if (sc->sc_gop_id > 1) {
2634 			reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2635 			if (sc->sc_gop_id == 2)
2636 				reg &= ~GENCONF_CTRL0_PORT0_RGMII;
2637 			else if (sc->sc_gop_id == 3)
2638 				reg &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
2639 			regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2640 		}
2641 	} else if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2642 		if (sc->sc_gop_id != 0)
2643 			return;
2644 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2645 		reg &= ~MVPP22_XPCS_PCSMODE_MASK;
2646 		reg &= ~MVPP22_XPCS_LANEACTIVE_MASK;
2647 		reg |= 2 << MVPP22_XPCS_LANEACTIVE_OFFS;
2648 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2649 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS40G_COMMON_CONTROL);
2650 		reg &= ~MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK;
2651 		mvpp2_mpcs_write(sc, MVPP22_MPCS40G_COMMON_CONTROL, reg);
2652 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2653 		reg &= ~MVPP22_MPCS_CLK_DIVISION_RATIO_MASK;
2654 		reg |= MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT;
2655 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2656 	} else
2657 		return;
2658 
2659 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1);
2660 	reg |= GENCONF_PORT_CTRL1_RESET(sc->sc_gop_id) |
2661 	    GENCONF_PORT_CTRL1_EN(sc->sc_gop_id);
2662 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1, reg);
2663 
2664 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2665 	reg |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
2666 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2667 
2668 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1);
2669 	reg |= GENCONF_SOFT_RESET1_GOP;
2670 	regmap_write_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1, reg);
2671 }
2672 
2673 void
2674 mvpp2_down(struct mvpp2_port *sc)
2675 {
2676 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2677 	uint32_t reg;
2678 	int i;
2679 
2680 	timeout_del(&sc->sc_tick);
2681 
2682 	ifp->if_flags &= ~IFF_RUNNING;
2683 	ifq_clr_oactive(&ifp->if_snd);
2684 	ifp->if_timer = 0;
2685 
2686 	mvpp2_egress_disable(sc);
2687 	mvpp2_ingress_disable(sc);
2688 	mvpp2_port_disable(sc);
2689 
2690 	/* XXX: single vector */
2691 	mvpp2_interrupts_disable(sc, (1 << 0));
2692 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
2693 
2694 	reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
2695 	reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2696 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2697 
2698 	for (i = 0; i < sc->sc_ntxq; i++)
2699 		mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
2700 
2701 	reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2702 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2703 
2704 	for (i = 0; i < sc->sc_nrxq; i++)
2705 		mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
2706 
2707 	mvpp2_prs_mac_da_accept(sc, sc->sc_cur_lladdr, 0);
2708 }
2709 
2710 void
2711 mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2712 {
2713 	struct mvpp2_buf *txb;
2714 	int i, pending;
2715 	uint32_t reg;
2716 
2717 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2718 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
2719 	reg |= MVPP2_TXQ_DRAIN_EN_MASK;
2720 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2721 
2722 	/*
2723 	 * the queue has been stopped so wait for all packets
2724 	 * to be transmitted.
2725 	 */
2726 	i = 0;
2727 	do {
2728 		if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2729 			printf("%s: port %d: cleaning queue %d timed out\n",
2730 			    sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
2731 			break;
2732 		}
2733 		delay(1000);
2734 		i++;
2735 
2736 		pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
2737 		    MVPP2_TXQ_PENDING_MASK;
2738 	} while (pending);
2739 
2740 	reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2741 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2742 
2743 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2744 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2745 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
2746 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
2747 
2748 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2749 		txb = &txq->buf[i];
2750 		if (txb->mb_m) {
2751 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2752 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2753 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2754 			m_freem(txb->mb_m);
2755 		}
2756 		bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
2757 	}
2758 
2759 	mvpp2_dmamem_free(sc->sc, txq->ring);
2760 	free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
2761 	    MVPP2_NTXDESC);
2762 }
2763 
2764 void
2765 mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2766 {
2767 	uint32_t nrecv;
2768 
2769 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2770 	if (nrecv)
2771 		mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2772 
2773 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2774 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2775 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
2776 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
2777 
2778 	mvpp2_dmamem_free(sc->sc, rxq->ring);
2779 }
2780 
2781 void
2782 mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
2783 {
2784 	uint32_t val;
2785 	int prxq;
2786 
2787 	/* get queue physical ID */
2788 	prxq = port->sc_rxqs[lrxq].id;
2789 
2790 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
2791 	val &= ~MVPP2_RXQ_POOL_LONG_MASK;
2792 	val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
2793 
2794 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
2795 }
2796 
2797 void
2798 mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
2799 {
2800 	uint32_t val;
2801 	int prxq;
2802 
2803 	/* get queue physical ID */
2804 	prxq = port->sc_rxqs[lrxq].id;
2805 
2806 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
2807 	val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
2808 	val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
2809 
2810 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
2811 }
2812 
2813 void
2814 mvpp2_iff(struct mvpp2_port *sc)
2815 {
2816 	/* FIXME: multicast handling */
2817 
2818 	if (memcmp(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN) != 0) {
2819 		mvpp2_prs_mac_da_accept(sc, sc->sc_cur_lladdr, 0);
2820 		memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
2821 		mvpp2_prs_mac_da_accept(sc, sc->sc_cur_lladdr, 1);
2822 	}
2823 }
2824 
2825 struct mvpp2_dmamem *
2826 mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
2827 {
2828 	struct mvpp2_dmamem *mdm;
2829 	int nsegs;
2830 
2831 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
2832 	mdm->mdm_size = size;
2833 
2834 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2835 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2836 		goto mdmfree;
2837 
2838 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
2839 	    &nsegs, BUS_DMA_WAITOK) != 0)
2840 		goto destroy;
2841 
2842 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2843 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
2844 		goto free;
2845 
2846 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2847 	    NULL, BUS_DMA_WAITOK) != 0)
2848 		goto unmap;
2849 
2850 	bzero(mdm->mdm_kva, size);
2851 
2852 	return (mdm);
2853 
2854 unmap:
2855 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2856 free:
2857 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2858 destroy:
2859 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2860 mdmfree:
2861 	free(mdm, M_DEVBUF, 0);
2862 
2863 	return (NULL);
2864 }
2865 
2866 void
2867 mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
2868 {
2869 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2870 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2871 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2872 	free(mdm, M_DEVBUF, 0);
2873 }
2874 
2875 struct mbuf *
2876 mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
2877 {
2878 	struct mbuf *m = NULL;
2879 
2880 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
2881 	if (!m)
2882 		return (NULL);
2883 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2884 
2885 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
2886 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
2887 		m_freem(m);
2888 		return (NULL);
2889 	}
2890 
2891 	bus_dmamap_sync(sc->sc_dmat, map, 0,
2892 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
2893 
2894 	return (m);
2895 }
2896 
2897 void
2898 mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
2899 {
2900 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
2901 	    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
2902 }
2903 
2904 void
2905 mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
2906 {
2907 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
2908 	    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
2909 }
2910 
2911 int
2912 mvpp2_egress_port(struct mvpp2_port *port)
2913 {
2914 	return MVPP2_MAX_TCONT + port->sc_id;
2915 }
2916 
2917 int
2918 mvpp2_txq_phys(int port, int txq)
2919 {
2920 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
2921 }
2922 
2923 void
2924 mvpp2_defaults_set(struct mvpp2_port *port)
2925 {
2926 	int val, queue;
2927 
2928 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2929 	    mvpp2_egress_port(port));
2930 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2931 
2932 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2933 		mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2934 
2935 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
2936 	    (1000 * 1000));
2937 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
2938 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2939 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2940 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2941 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
2942 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
2943 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2944 
2945 	/* set maximum_low_latency_packet_size value to 256 */
2946 	mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
2947 	    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2948 	    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2949 
2950 	/* mask all interrupts to all present cpus */
2951 	mvpp2_interrupts_disable(port, (0xf << 0));
2952 }
2953 
2954 void
2955 mvpp2_ingress_enable(struct mvpp2_port *port)
2956 {
2957 	uint32_t val;
2958 	int lrxq, queue;
2959 
2960 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
2961 		queue = port->sc_rxqs[lrxq].id;
2962 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
2963 		val &= ~MVPP2_RXQ_DISABLE_MASK;
2964 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
2965 	}
2966 }
2967 
2968 void
2969 mvpp2_ingress_disable(struct mvpp2_port *port)
2970 {
2971 	uint32_t val;
2972 	int lrxq, queue;
2973 
2974 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
2975 		queue = port->sc_rxqs[lrxq].id;
2976 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
2977 		val |= MVPP2_RXQ_DISABLE_MASK;
2978 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
2979 	}
2980 }
2981 
2982 void
2983 mvpp2_egress_enable(struct mvpp2_port *port)
2984 {
2985 	struct mvpp2_tx_queue *txq;
2986 	uint32_t qmap;
2987 	int queue;
2988 
2989 	qmap = 0;
2990 	for (queue = 0; queue < port->sc_ntxq; queue++) {
2991 		txq = &port->sc_txqs[queue];
2992 
2993 		if (txq->descs != NULL) {
2994 			qmap |= (1 << queue);
2995 		}
2996 	}
2997 
2998 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2999 	    mvpp2_egress_port(port));
3000 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3001 }
3002 
3003 void
3004 mvpp2_egress_disable(struct mvpp2_port *port)
3005 {
3006 	uint32_t reg_data;
3007 	int i;
3008 
3009 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3010 	    mvpp2_egress_port(port));
3011 	reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3012 	    MVPP2_TXP_SCHED_ENQ_MASK;
3013 	if (reg_data)
3014 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,(reg_data <<
3015 		    MVPP2_TXP_SCHED_DISQ_OFFSET));
3016 
3017 	i = 0;
3018 	do {
3019 		if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3020 			printf("%s: tx stop timed out, status=0x%08x\n",
3021 			    port->sc_dev.dv_xname, reg_data);
3022 			break;
3023 		}
3024 		delay(1000);
3025 		i++;
3026 		reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
3027 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3028 }
3029 
3030 void
3031 mvpp2_port_enable(struct mvpp2_port *port)
3032 {
3033 	uint32_t val;
3034 
3035 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3036 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3037 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3038 		val |= MV_XLG_MAC_CTRL0_PORTEN;
3039 		val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS;
3040 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3041 	} else {
3042 		val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3043 		val |= MVPP2_GMAC_PORT_EN_MASK;
3044 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3045 		mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3046 	}
3047 }
3048 
3049 void
3050 mvpp2_port_disable(struct mvpp2_port *port)
3051 {
3052 	uint32_t val;
3053 
3054 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3055 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3056 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3057 		val &= ~MV_XLG_MAC_CTRL0_PORTEN;
3058 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3059 	}
3060 
3061 	val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3062 	val &= ~MVPP2_GMAC_PORT_EN_MASK;
3063 	mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3064 }
3065 
3066 int
3067 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3068 {
3069 	uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
3070 
3071 	return val & MVPP2_RXQ_OCCUPIED_MASK;
3072 }
3073 
3074 void
3075 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3076     int used_count, int free_count)
3077 {
3078 	uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3079 	mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3080 }
3081 
3082 void
3083 mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
3084 {
3085 	uint32_t val;
3086 
3087 	offset = offset >> 5;
3088 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3089 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3090 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3091 	    MVPP2_RXQ_PACKET_OFFSET_MASK);
3092 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3093 }
3094 
3095 void
3096 mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3097 {
3098 	uint32_t val, size, mtu;
3099 	int txq;
3100 
3101 	mtu = MCLBYTES * 8;
3102 	if (mtu > MVPP2_TXP_MTU_MAX)
3103 		mtu = MVPP2_TXP_MTU_MAX;
3104 
3105 	/* WA for wrong token bucket update: set MTU value = 3*real MTU value */
3106 	mtu = 3 * mtu;
3107 
3108 	/* indirect access to reg_valisters */
3109 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3110 	    mvpp2_egress_port(port));
3111 
3112 	/* set MTU */
3113 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
3114 	val &= ~MVPP2_TXP_MTU_MAX;
3115 	val |= mtu;
3116 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
3117 
3118 	/* TXP token size and all TXqs token size must be larger that MTU */
3119 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3120 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3121 	if (size < mtu) {
3122 		size = mtu;
3123 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3124 		val |= size;
3125 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3126 	}
3127 
3128 	for (txq = 0; txq < port->sc_ntxq; txq++) {
3129 		val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3130 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3131 
3132 		if (size < mtu) {
3133 			size = mtu;
3134 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3135 			val |= size;
3136 			mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
3137 		}
3138 	}
3139 }
3140 
3141 void
3142 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3143     uint32_t pkts)
3144 {
3145 	rxq->pkts_coal =
3146 	    pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
3147 	    pkts : MVPP2_OCCUPIED_THRESH_MASK;
3148 
3149 	mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3150 	mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
3151 
3152 }
3153 
3154 void
3155 mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3156     uint32_t pkts)
3157 {
3158 	txq->done_pkts_coal =
3159 	    pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
3160 	    pkts : MVPP2_TRANSMITTED_THRESH_MASK;
3161 
3162 	mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
3163 	mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
3164 	    txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
3165 }
3166 
3167 void
3168 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3169     uint32_t usec)
3170 {
3171 	uint32_t val;
3172 
3173 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3174 	mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3175 
3176 	rxq->time_coal = usec;
3177 }
3178 
3179 void
3180 mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
3181 {
3182 	uint32_t val;
3183 
3184 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3185 	mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
3186 
3187 	port->sc_tx_time_coal = usec;
3188 }
3189 
3190 void
3191 mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
3192     uint32_t ri, uint32_t ri_mask)
3193 {
3194 	sc->sc_prs_shadow[index].ri_mask = ri_mask;
3195 	sc->sc_prs_shadow[index].ri = ri;
3196 }
3197 
3198 void
3199 mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3200 {
3201 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
3202 
3203 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
3204 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
3205 }
3206 
3207 void
3208 mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
3209 {
3210 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3211 
3212 	if (add)
3213 		pe->tcam.byte[enable_off] &= ~(1 << port);
3214 	else
3215 		pe->tcam.byte[enable_off] |= (1 << port);
3216 }
3217 
3218 void
3219 mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
3220 {
3221 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3222 	uint8_t mask = MVPP2_PRS_PORT_MASK;
3223 
3224 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
3225 	pe->tcam.byte[enable_off] &= ~mask;
3226 	pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
3227 }
3228 
3229 uint32_t
3230 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
3231 {
3232 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3233 
3234 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
3235 }
3236 
3237 void
3238 mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
3239     uint8_t byte, uint8_t enable)
3240 {
3241 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
3242 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
3243 }
3244 
3245 void
3246 mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
3247     uint8_t *byte, uint8_t *enable)
3248 {
3249 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
3250 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
3251 }
3252 
3253 int
3254 mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
3255 {
3256 	int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
3257 	uint16_t tcam_data;
3258 
3259 	tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
3260 	    pe->tcam.byte[byte_offset];
3261 	return tcam_data == data;
3262 }
3263 
3264 void
3265 mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
3266 {
3267 	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
3268 
3269 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
3270 		if (!(enable & BIT(i)))
3271 			continue;
3272 
3273 		if (bits & BIT(i))
3274 			pe->tcam.byte[ai_idx] |= BIT(i);
3275 		else
3276 			pe->tcam.byte[ai_idx] &= ~BIT(i);
3277 	}
3278 
3279 	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
3280 }
3281 
3282 int
3283 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
3284 {
3285 	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
3286 }
3287 
3288 void
3289 mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
3290     uint32_t *word, uint32_t *enable)
3291 {
3292 	int index, position;
3293 	uint8_t byte, mask;
3294 
3295 	for (index = 0; index < 4; index++) {
3296 		position = (data_offset * sizeof(int)) + index;
3297 		mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
3298 		((uint8_t *)word)[index] = byte;
3299 		((uint8_t *)enable)[index] = mask;
3300 	}
3301 }
3302 
3303 void
3304 mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
3305     uint16_t ether_type)
3306 {
3307 	mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
3308 	mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
3309 }
3310 
3311 void
3312 mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3313 {
3314 	pe->sram.byte[bit / 8] |= (val << (bit % 8));
3315 }
3316 
3317 void
3318 mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3319 {
3320 	pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
3321 }
3322 
3323 void
3324 mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3325 {
3326 	int i;
3327 
3328 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
3329 		if (!(mask & BIT(i)))
3330 			continue;
3331 
3332 		if (bits & BIT(i))
3333 			mvpp2_prs_sram_bits_set(pe,
3334 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3335 		else
3336 			mvpp2_prs_sram_bits_clear(pe,
3337 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3338 
3339 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
3340 	}
3341 }
3342 
3343 int
3344 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
3345 {
3346 	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
3347 }
3348 
3349 void
3350 mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3351 {
3352 	int i;
3353 
3354 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
3355 		if (!(mask & BIT(i)))
3356 			continue;
3357 
3358 		if (bits & BIT(i))
3359 			mvpp2_prs_sram_bits_set(pe,
3360 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3361 		else
3362 			mvpp2_prs_sram_bits_clear(pe,
3363 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3364 
3365 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
3366 	}
3367 }
3368 
3369 int
3370 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
3371 {
3372 	uint8_t bits;
3373 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
3374 	int ai_en_off = ai_off + 1;
3375 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
3376 
3377 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
3378 	    (pe->sram.byte[ai_en_off] << (8 - ai_shift));
3379 
3380 	return bits;
3381 }
3382 
3383 void
3384 mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
3385 {
3386 	if (shift < 0) {
3387 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3388 		shift = -shift;
3389 	} else {
3390 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3391 	}
3392 
3393 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
3394 	    shift & MVPP2_PRS_SRAM_SHIFT_MASK;
3395 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
3396 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
3397 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
3398 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3399 }
3400 
3401 void
3402 mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
3403     uint32_t op)
3404 {
3405 	uint8_t udf_byte, udf_byte_offset;
3406 	uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
3407 
3408 	udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
3409 	    MVPP2_PRS_SRAM_UDF_BITS);
3410 	udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
3411 	op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
3412 	    MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
3413 	op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
3414 
3415 	if (offset < 0) {
3416 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3417 		offset = -offset;
3418 	} else {
3419 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3420 	}
3421 
3422 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
3423 	    MVPP2_PRS_SRAM_UDF_MASK);
3424 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
3425 	pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
3426 	pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
3427 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
3428 	    MVPP2_PRS_SRAM_UDF_TYPE_MASK);
3429 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
3430 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
3431 	    MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
3432 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
3433 	pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
3434 	    op_sel_udf_byte_offset);
3435 	pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
3436 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3437 }
3438 
3439 void
3440 mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3441 {
3442 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
3443 
3444 	mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
3445 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
3446 }
3447 
3448 void
3449 mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
3450 {
3451 	sc->sc_prs_shadow[index].valid = 1;
3452 	sc->sc_prs_shadow[index].lu = lu;
3453 }
3454 
3455 int
3456 mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3457 {
3458 	int i;
3459 
3460 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3461 		return EINVAL;
3462 
3463 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3464 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3465 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3466 		mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
3467 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3468 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3469 		mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
3470 
3471 	return 0;
3472 }
3473 
3474 int
3475 mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe, int tid)
3476 {
3477 	int i;
3478 
3479 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3480 		return EINVAL;
3481 
3482 	memset(pe, 0, sizeof(*pe));
3483 	pe->index = tid;
3484 
3485 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3486 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
3487 	    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
3488 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
3489 		return EINVAL;
3490 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3491 		pe->tcam.word[i] =
3492 		    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
3493 
3494 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3495 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3496 		pe->sram.word[i] =
3497 		    mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
3498 
3499 	return 0;
3500 }
3501 
3502 int
3503 mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
3504 {
3505 	struct mvpp2_prs_entry pe;
3506 	uint8_t bits;
3507 	int tid;
3508 
3509 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
3510 		if (!sc->sc_prs_shadow[tid].valid ||
3511 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
3512 			continue;
3513 
3514 		mvpp2_prs_hw_read(sc, &pe, tid);
3515 		bits = mvpp2_prs_sram_ai_get(&pe);
3516 
3517 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
3518 			return tid;
3519 	}
3520 
3521 	return -1;
3522 }
3523 
3524 int
3525 mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
3526 {
3527 	uint8_t tmp;
3528 	int tid;
3529 
3530 	if (start > end) {
3531 		tmp = end;
3532 		end = start;
3533 		start = tmp;
3534 	}
3535 
3536 	for (tid = start; tid <= end; tid++) {
3537 		if (!sc->sc_prs_shadow[tid].valid)
3538 			return tid;
3539 	}
3540 
3541 	return -1;
3542 }
3543 
3544 void
3545 mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
3546 {
3547 	struct mvpp2_prs_entry pe;
3548 
3549 	if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
3550 		mvpp2_prs_hw_read(sc, &pe, MVPP2_PE_DROP_ALL);
3551 	} else {
3552 		memset(&pe, 0, sizeof(pe));
3553 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3554 		pe.index = MVPP2_PE_DROP_ALL;
3555 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3556 		    MVPP2_PRS_RI_DROP_MASK);
3557 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3558 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3559 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3560 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3561 	}
3562 
3563 	mvpp2_prs_tcam_port_set(&pe, port, add);
3564 	mvpp2_prs_hw_write(sc, &pe);
3565 }
3566 
3567 void
3568 mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int l2_cast,
3569     int add)
3570 {
3571 	struct mvpp2_prs_entry pe;
3572 	uint8_t cast_match;
3573 	uint32_t ri;
3574 	int tid;
3575 
3576 	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
3577 		cast_match = MVPP2_PRS_UCAST_VAL;
3578 		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
3579 		ri = MVPP2_PRS_RI_L2_UCAST;
3580 	} else {
3581 		cast_match = MVPP2_PRS_MCAST_VAL;
3582 		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
3583 		ri = MVPP2_PRS_RI_L2_MCAST;
3584 	}
3585 
3586 	if (sc->sc_prs_shadow[tid].valid) {
3587 		mvpp2_prs_hw_read(sc, &pe, tid);
3588 	} else {
3589 		memset(&pe, 0, sizeof(pe));
3590 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3591 		pe.index = tid;
3592 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3593 		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
3594 		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
3595 		    MVPP2_PRS_CAST_MASK);
3596 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3597 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3598 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3599 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3600 	}
3601 
3602 	mvpp2_prs_tcam_port_set(&pe, port, add);
3603 	mvpp2_prs_hw_write(sc, &pe);
3604 }
3605 
3606 void
3607 mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
3608     int tagged, int extend)
3609 {
3610 	struct mvpp2_prs_entry pe;
3611 	int32_t tid, shift;
3612 
3613 	if (extend) {
3614 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3615 		shift = 8;
3616 	} else {
3617 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3618 		shift = 4;
3619 	}
3620 
3621 	if (sc->sc_prs_shadow[tid].valid) {
3622 		mvpp2_prs_hw_read(sc, &pe, tid);
3623 	} else {
3624 		memset(&pe, 0, sizeof(pe));
3625 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3626 		pe.index = tid;
3627 		mvpp2_prs_sram_shift_set(&pe, shift,
3628 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3629 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3630 		if (tagged) {
3631 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
3632 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3633 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3634 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3635 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3636 		} else {
3637 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3638 			    MVPP2_PRS_RI_VLAN_MASK);
3639 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3640 		}
3641 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3642 	}
3643 
3644 	mvpp2_prs_tcam_port_set(&pe, port, add);
3645 	mvpp2_prs_hw_write(sc, &pe);
3646 }
3647 
3648 void
3649 mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
3650     int add, int tagged, int extend)
3651 {
3652 	struct mvpp2_prs_entry pe;
3653 	int32_t tid, shift, port_mask;
3654 
3655 	if (extend) {
3656 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3657 		port_mask = 0;
3658 		shift = 8;
3659 	} else {
3660 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3661 		port_mask = MVPP2_PRS_PORT_MASK;
3662 		shift = 4;
3663 	}
3664 
3665 	if (sc->sc_prs_shadow[tid].valid) {
3666 		mvpp2_prs_hw_read(sc, &pe, tid);
3667 	} else {
3668 		memset(&pe, 0, sizeof(pe));
3669 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3670 		pe.index = tid;
3671 		mvpp2_prs_match_etype(&pe, 0, 0xdada);
3672 		mvpp2_prs_match_etype(&pe, 2, 0);
3673 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
3674 		    MVPP2_PRS_RI_DSA_MASK);
3675 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
3676 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3677 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3678 		if (tagged) {
3679 			mvpp2_prs_tcam_data_byte_set(&pe,
3680 			    MVPP2_ETH_TYPE_LEN + 2 + 3,
3681 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3682 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3683 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3684 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3685 		} else {
3686 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3687 			    MVPP2_PRS_RI_VLAN_MASK);
3688 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3689 		}
3690 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
3691 	}
3692 
3693 	mvpp2_prs_tcam_port_set(&pe, port, add);
3694 	mvpp2_prs_hw_write(sc, &pe);
3695 }
3696 
3697 struct mvpp2_prs_entry *
3698 mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
3699 {
3700 	struct mvpp2_prs_entry *pe;
3701 	uint32_t ri_bits, ai_bits;
3702 	int match, tid;
3703 
3704 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3705 	if (pe == NULL)
3706 		return NULL;
3707 
3708 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3709 
3710 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3711 		if (!sc->sc_prs_shadow[tid].valid ||
3712 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3713 			continue;
3714 		mvpp2_prs_hw_read(sc, pe, tid);
3715 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
3716 		if (!match)
3717 			continue;
3718 		ri_bits = mvpp2_prs_sram_ri_get(pe);
3719 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3720 		ai_bits = mvpp2_prs_tcam_ai_get(pe);
3721 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
3722 		if (ai != ai_bits)
3723 			continue;
3724 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3725 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3726 			return pe;
3727 	}
3728 
3729 	free(pe, M_TEMP, sizeof(*pe));
3730 	return NULL;
3731 }
3732 
3733 int
3734 mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
3735 {
3736 	struct mvpp2_prs_entry *pe;
3737 	uint32_t ri_bits;
3738 	int tid_aux, tid;
3739 	int ret = 0;
3740 
3741 	pe = mvpp2_prs_vlan_find(sc, tpid, ai);
3742 	if (pe == NULL) {
3743 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
3744 		    MVPP2_PE_FIRST_FREE_TID);
3745 		if (tid < 0)
3746 			return tid;
3747 
3748 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3749 		if (pe == NULL)
3750 			return ENOMEM;
3751 
3752 		/* get last double vlan tid */
3753 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
3754 		    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
3755 			if (!sc->sc_prs_shadow[tid_aux].valid ||
3756 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
3757 				continue;
3758 			mvpp2_prs_hw_read(sc, pe, tid_aux);
3759 			ri_bits = mvpp2_prs_sram_ri_get(pe);
3760 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
3761 			    MVPP2_PRS_RI_VLAN_DOUBLE)
3762 				break;
3763 		}
3764 
3765 		if (tid <= tid_aux) {
3766 			ret = EINVAL;
3767 			goto error;
3768 		}
3769 
3770 		memset(pe, 0, sizeof(*pe));
3771 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3772 		pe->index = tid;
3773 		mvpp2_prs_match_etype(pe, 0, tpid);
3774 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
3775 		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
3776 				   MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3777 		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3778 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
3779 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
3780 			    MVPP2_PRS_RI_VLAN_MASK);
3781 		} else {
3782 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
3783 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
3784 			    MVPP2_PRS_RI_VLAN_MASK);
3785 		}
3786 		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
3787 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
3788 	}
3789 
3790 	mvpp2_prs_tcam_port_map_set(pe, port_map);
3791 	mvpp2_prs_hw_write(sc, pe);
3792 
3793 error:
3794 	free(pe, M_TEMP, sizeof(*pe));
3795 	return ret;
3796 }
3797 
3798 int
3799 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
3800 {
3801 	int i;
3802 
3803 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
3804 		if (!sc->sc_prs_double_vlans[i])
3805 			return i;
3806 
3807 	return -1;
3808 }
3809 
3810 struct mvpp2_prs_entry *
3811 mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
3812 {
3813 	struct mvpp2_prs_entry *pe;
3814 	uint32_t ri_mask;
3815 	int match, tid;
3816 
3817 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3818 	if (pe == NULL)
3819 		return NULL;
3820 
3821 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3822 
3823 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3824 		if (!sc->sc_prs_shadow[tid].valid ||
3825 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3826 			continue;
3827 
3828 		mvpp2_prs_hw_read(sc, pe, tid);
3829 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
3830 		    mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
3831 		if (!match)
3832 			continue;
3833 		ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
3834 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
3835 			return pe;
3836 	}
3837 
3838 	free(pe, M_TEMP, sizeof(*pe));
3839 	return NULL;
3840 }
3841 
3842 int
3843 mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
3844     uint32_t port_map)
3845 {
3846 	struct mvpp2_prs_entry *pe;
3847 	int tid_aux, tid, ai, ret = 0;
3848 	uint32_t ri_bits;
3849 
3850 	pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
3851 	if (pe == NULL) {
3852 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3853 		    MVPP2_PE_LAST_FREE_TID);
3854 		if (tid < 0)
3855 			return tid;
3856 
3857 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3858 		if (pe == NULL)
3859 			return ENOMEM;
3860 
3861 		ai = mvpp2_prs_double_vlan_ai_free_get(sc);
3862 		if (ai < 0) {
3863 			ret = ai;
3864 			goto error;
3865 		}
3866 
3867 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
3868 		    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
3869 			if (!sc->sc_prs_shadow[tid_aux].valid ||
3870 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
3871 				continue;
3872 			mvpp2_prs_hw_read(sc, pe, tid_aux);
3873 			ri_bits = mvpp2_prs_sram_ri_get(pe);
3874 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3875 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3876 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3877 				break;
3878 		}
3879 
3880 		if (tid >= tid_aux) {
3881 			ret = ERANGE;
3882 			goto error;
3883 		}
3884 
3885 		memset(pe, 0, sizeof(*pe));
3886 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3887 		pe->index = tid;
3888 		sc->sc_prs_double_vlans[ai] = 1;
3889 		mvpp2_prs_match_etype(pe, 0, tpid1);
3890 		mvpp2_prs_match_etype(pe, 4, tpid2);
3891 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
3892 		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
3893 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3894 		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3895 		    MVPP2_PRS_RI_VLAN_MASK);
3896 		mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
3897 		    MVPP2_PRS_SRAM_AI_MASK);
3898 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
3899 	}
3900 
3901 	mvpp2_prs_tcam_port_map_set(pe, port_map);
3902 	mvpp2_prs_hw_write(sc, pe);
3903 
3904 error:
3905 	free(pe, M_TEMP, sizeof(*pe));
3906 	return ret;
3907 }
3908 
3909 int
3910 mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
3911     uint32_t ri_mask)
3912 {
3913 	struct mvpp2_prs_entry pe;
3914 	int tid;
3915 
3916 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
3917 	    (proto != IPPROTO_IGMP))
3918 		return EINVAL;
3919 
3920 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3921 	    MVPP2_PE_LAST_FREE_TID);
3922 	if (tid < 0)
3923 		return tid;
3924 
3925 	memset(&pe, 0, sizeof(pe));
3926 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3927 	pe.index = tid;
3928 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3929 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3930 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3931 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3932 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3933 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
3934 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
3935 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
3936 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
3937 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
3938 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3939 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3940 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
3941 	mvpp2_prs_hw_write(sc, &pe);
3942 
3943 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3944 	    MVPP2_PE_LAST_FREE_TID);
3945 	if (tid < 0)
3946 		return tid;
3947 
3948 	pe.index = tid;
3949 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3950 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3951 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
3952 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
3953 	    ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
3954 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
3955 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
3956 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
3957 	mvpp2_prs_hw_write(sc, &pe);
3958 
3959 	return 0;
3960 }
3961 
3962 int
3963 mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
3964 {
3965 	struct mvpp2_prs_entry pe;
3966 	int mask, tid;
3967 
3968 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3969 	    MVPP2_PE_LAST_FREE_TID);
3970 	if (tid < 0)
3971 		return tid;
3972 
3973 	memset(&pe, 0, sizeof(pe));
3974 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3975 	pe.index = tid;
3976 
3977 	switch (l3_cast) {
3978 	case MVPP2_PRS_L3_MULTI_CAST:
3979 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
3980 		    MVPP2_PRS_IPV4_MC_MASK);
3981 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
3982 		    MVPP2_PRS_RI_L3_ADDR_MASK);
3983 		break;
3984 	case  MVPP2_PRS_L3_BROAD_CAST:
3985 		mask = MVPP2_PRS_IPV4_BC_MASK;
3986 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
3987 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
3988 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
3989 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
3990 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
3991 		    MVPP2_PRS_RI_L3_ADDR_MASK);
3992 		break;
3993 	default:
3994 		return EINVAL;
3995 	}
3996 
3997 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3998 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3999 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4000 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4001 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4002 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4003 	mvpp2_prs_hw_write(sc, &pe);
4004 
4005 	return 0;
4006 }
4007 
4008 int
4009 mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4010     uint32_t ri_mask)
4011 {
4012 	struct mvpp2_prs_entry pe;
4013 	int tid;
4014 
4015 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4016 	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
4017 		return EINVAL;
4018 
4019 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4020 	    MVPP2_PE_LAST_FREE_TID);
4021 	if (tid < 0)
4022 		return tid;
4023 
4024 	memset(&pe, 0, sizeof(pe));
4025 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4026 	pe.index = tid;
4027 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4028 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4029 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4030 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4031 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4032 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4033 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4034 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4035 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4036 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4037 	mvpp2_prs_hw_write(sc, &pe);
4038 
4039 	return 0;
4040 }
4041 
4042 int
4043 mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4044 {
4045 	struct mvpp2_prs_entry pe;
4046 	int tid;
4047 
4048 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
4049 		return EINVAL;
4050 
4051 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4052 	    MVPP2_PE_LAST_FREE_TID);
4053 	if (tid < 0)
4054 		return tid;
4055 
4056 	memset(&pe, 0, sizeof(pe));
4057 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4058 	pe.index = tid;
4059 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
4060 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4061 	    MVPP2_PRS_RI_L3_ADDR_MASK);
4062 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4063 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4064 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4065 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
4066 	    MVPP2_PRS_IPV6_MC_MASK);
4067 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4068 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4069 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4070 	mvpp2_prs_hw_write(sc, &pe);
4071 
4072 	return 0;
4073 }
4074 
4075 int
4076 mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
4077     uint8_t *mask)
4078 {
4079 	uint8_t tcam_byte, tcam_mask;
4080 	int index;
4081 
4082 	for (index = 0; index < ETHER_ADDR_LEN; index++) {
4083 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
4084 		    &tcam_mask);
4085 		if (tcam_mask != mask[index])
4086 			return 0;
4087 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
4088 			return 0;
4089 	}
4090 
4091 	return 1;
4092 }
4093 
4094 int
4095 mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
4096     uint8_t *mask, int udf_type)
4097 {
4098 	struct mvpp2_prs_entry pe;
4099 	int tid;
4100 
4101 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID;
4102 	    tid++) {
4103 		uint32_t entry_pmap;
4104 
4105 		if (!sc->sc_prs_shadow[tid].valid ||
4106 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4107 		    (sc->sc_prs_shadow[tid].udf != udf_type))
4108 			continue;
4109 
4110 		mvpp2_prs_hw_read(sc, &pe, tid);
4111 		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
4112 		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
4113 		    entry_pmap == pmap)
4114 			return tid;
4115 	}
4116 
4117 	return -1;
4118 }
4119 
4120 int
4121 mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const uint8_t *da, int add)
4122 {
4123 	struct mvpp2_softc *sc = port->sc;
4124 	struct mvpp2_prs_entry pe;
4125 	uint32_t pmap, len, ri;
4126 	uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4127 	int tid;
4128 
4129 	memset(&pe, 0, sizeof(pe));
4130 
4131 	tid = mvpp2_prs_mac_da_range_find(sc, BIT(port->sc_id), da, mask,
4132 	    MVPP2_PRS_UDF_MAC_DEF);
4133 	if (tid < 0) {
4134 		if (!add)
4135 			return 0;
4136 
4137 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4138 		    MVPP2_PE_LAST_FREE_TID);
4139 		if (tid < 0)
4140 			return tid;
4141 
4142 		pe.index = tid;
4143 		mvpp2_prs_tcam_port_map_set(&pe, 0);
4144 	} else {
4145 		mvpp2_prs_hw_read(sc, &pe, tid);
4146 	}
4147 
4148 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
4149 
4150 	mvpp2_prs_tcam_port_set(&pe, port->sc_id, add);
4151 
4152 	/* invalidate the entry if no ports are left enabled */
4153 	pmap = mvpp2_prs_tcam_port_map_get(&pe);
4154 	if (pmap == 0) {
4155 		if (add)
4156 			return -1;
4157 		mvpp2_prs_hw_inv(sc, pe.index);
4158 		sc->sc_prs_shadow[pe.index].valid = 0;
4159 		return 0;
4160 	}
4161 
4162 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
4163 
4164 	len = ETHER_ADDR_LEN;
4165 	while (len--)
4166 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
4167 
4168 	if (ETHER_IS_BROADCAST(da))
4169 		ri = MVPP2_PRS_RI_L2_BCAST;
4170 	else if (ETHER_IS_MULTICAST(da))
4171 		ri = MVPP2_PRS_RI_L2_MCAST;
4172 	else
4173 		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
4174 
4175 	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4176 	    MVPP2_PRS_RI_MAC_ME_MASK);
4177 	mvpp2_prs_shadow_ri_set(sc, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4178 	    MVPP2_PRS_RI_MAC_ME_MASK);
4179 	mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
4180 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4181 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
4182 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
4183 	mvpp2_prs_hw_write(sc, &pe);
4184 
4185 	return 0;
4186 }
4187 
4188 int
4189 mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
4190 {
4191 	switch (type) {
4192 	case MVPP2_TAG_TYPE_EDSA:
4193 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4194 		    MVPP2_PRS_EDSA);
4195 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4196 		    MVPP2_PRS_EDSA);
4197 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4198 		    MVPP2_PRS_DSA);
4199 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4200 		    MVPP2_PRS_DSA);
4201 		break;
4202 	case MVPP2_TAG_TYPE_DSA:
4203 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4204 		    MVPP2_PRS_DSA);
4205 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4206 		    MVPP2_PRS_DSA);
4207 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4208 		    MVPP2_PRS_EDSA);
4209 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4210 		    MVPP2_PRS_EDSA);
4211 		break;
4212 	case MVPP2_TAG_TYPE_MH:
4213 	case MVPP2_TAG_TYPE_NONE:
4214 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4215 		    MVPP2_PRS_DSA);
4216 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4217 		    MVPP2_PRS_DSA);
4218 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4219 		    MVPP2_PRS_EDSA);
4220 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4221 		    MVPP2_PRS_EDSA);
4222 		break;
4223 	default:
4224 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4225 			return EINVAL;
4226 		break;
4227 	}
4228 
4229 	return 0;
4230 }
4231 
4232 int
4233 mvpp2_prs_def_flow(struct mvpp2_port *port)
4234 {
4235 	struct mvpp2_prs_entry pe;
4236 	int tid;
4237 
4238 	memset(&pe, 0, sizeof(pe));
4239 
4240 	tid = mvpp2_prs_flow_find(port->sc, port->sc_id);
4241 	if (tid < 0) {
4242 		tid = mvpp2_prs_tcam_first_free(port->sc,
4243 		    MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
4244 		if (tid < 0)
4245 			return tid;
4246 
4247 		pe.index = tid;
4248 		mvpp2_prs_sram_ai_update(&pe, port->sc_id,
4249 		    MVPP2_PRS_FLOW_ID_MASK);
4250 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4251 		mvpp2_prs_shadow_set(port->sc, pe.index, MVPP2_PRS_LU_FLOWS);
4252 	} else {
4253 		mvpp2_prs_hw_read(port->sc, &pe, tid);
4254 	}
4255 
4256 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4257 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->sc_id));
4258 	mvpp2_prs_hw_write(port->sc, &pe);
4259 	return 0;
4260 }
4261 
4262 void
4263 mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
4264 {
4265 	mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4266 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4267 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4268 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4269 }
4270 
4271 void
4272 mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
4273 {
4274 	uint32_t val;
4275 
4276 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4277 	mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
4278 	mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
4279 }
4280 
4281 void
4282 mvpp2_cls_init(struct mvpp2_softc *sc)
4283 {
4284 	struct mvpp2_cls_lookup_entry le;
4285 	struct mvpp2_cls_flow_entry fe;
4286 	int index;
4287 
4288 	mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4289 	memset(&fe.data, 0, sizeof(fe.data));
4290 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4291 		fe.index = index;
4292 		mvpp2_cls_flow_write(sc, &fe);
4293 	}
4294 	le.data = 0;
4295 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4296 		le.lkpid = index;
4297 		le.way = 0;
4298 		mvpp2_cls_lookup_write(sc, &le);
4299 		le.way = 1;
4300 		mvpp2_cls_lookup_write(sc, &le);
4301 	}
4302 }
4303 
4304 void
4305 mvpp2_cls_port_config(struct mvpp2_port *port)
4306 {
4307 	struct mvpp2_cls_lookup_entry le;
4308 	uint32_t val;
4309 
4310 	/* set way for the port */
4311 	val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
4312 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
4313 	mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
4314 
4315 	/*
4316 	 * pick the entry to be accessed in lookup ID decoding table
4317 	 * according to the way and lkpid.
4318 	 */
4319 	le.lkpid = port->sc_id;
4320 	le.way = 0;
4321 	le.data = 0;
4322 
4323 	/* set initial CPU queue for receiving packets */
4324 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4325 	le.data |= (port->sc_id * 32);
4326 
4327 	/* disable classification engines */
4328 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4329 
4330 	/* update lookup ID table entry */
4331 	mvpp2_cls_lookup_write(port->sc, &le);
4332 }
4333 
4334 void
4335 mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4336 {
4337 	uint32_t val;
4338 
4339 	mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
4340 	    (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4341 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_P2HQ_REG(port->sc_id),
4342 	    (port->sc_id * 32) >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS);
4343 	val = mvpp2_read(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG);
4344 	val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->sc_id);
4345 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4346 }
4347