xref: /openbsd-src/sys/dev/fdt/if_mvpp.c (revision 3cab2bb3f667058bece8e38b12449a63a9d73c4b)
1 /*	$OpenBSD: if_mvpp.c,v 1.16 2020/07/23 10:10:15 patrick Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Copyright (C) 2016 Marvell International Ltd.
20  *
21  * Marvell BSD License Option
22  *
23  * If you received this File from Marvell, you may opt to use, redistribute
24  * and/or modify this File under the following licensing terms.
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions are met:
27  *
28  *   * Redistributions of source code must retain the above copyright notice,
29  *     this list of conditions and the following disclaimer.
30  *
31  *   * Redistributions in binary form must reproduce the above copyright
32  *     notice, this list of conditions and the following disclaimer in the
33  *     documentation and/or other materials provided with the distribution.
34  *
35  *   * Neither the name of Marvell nor the names of its contributors may be
36  *     used to endorse or promote products derived from this software without
37  *     specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
43  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
49  * POSSIBILITY OF SUCH DAMAGE.
50  */
51 
52 #include "bpfilter.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/device.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/queue.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/timeout.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include <machine/cpufunc.h>
68 #include <machine/bus.h>
69 #include <machine/fdt.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/ppp_defs.h>
74 
75 #include <dev/ofw/openfirm.h>
76 #include <dev/ofw/ofw_clock.h>
77 #include <dev/ofw/ofw_gpio.h>
78 #include <dev/ofw/ofw_misc.h>
79 #include <dev/ofw/ofw_pinctrl.h>
80 #include <dev/ofw/ofw_regulator.h>
81 #include <dev/ofw/fdt.h>
82 
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89 
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93 
94 #include <netinet6/in6_var.h>
95 #include <netinet/ip6.h>
96 
97 #include <dev/fdt/if_mvppreg.h>
98 
99 struct mvpp2_buf {
100 	bus_dmamap_t		mb_map;
101 	struct mbuf		*mb_m;
102 };
103 
104 #define MVPP2_NTXDESC	512
105 #define MVPP2_NTXSEGS	16
106 #define MVPP2_NRXDESC	512
107 
108 struct mvpp2_bm_pool {
109 	struct mvpp2_dmamem	*bm_mem;
110 	struct mvpp2_buf	*rxbuf;
111 	uint32_t		*freelist;
112 	int			free_prod;
113 	int			free_cons;
114 };
115 
116 #define MVPP2_BM_SIZE		64
117 #define MVPP2_BM_POOL_PTR_ALIGN	128
118 #define MVPP2_BM_POOLS_NUM	8
119 #define MVPP2_BM_ALIGN		32
120 
121 struct mvpp2_tx_queue {
122 	uint8_t			id;
123 	uint8_t			log_id;
124 	struct mvpp2_dmamem	*ring;
125 	struct mvpp2_buf	*buf;
126 	struct mvpp2_tx_desc	*descs;
127 	int			prod;
128 	int			cnt;
129 	int			cons;
130 
131 	uint32_t		done_pkts_coal;
132 };
133 
134 struct mvpp2_rx_queue {
135 	uint8_t			id;
136 	struct mvpp2_dmamem	*ring;
137 	struct mvpp2_rx_desc	*descs;
138 	int			prod;
139 	struct if_rxring	rxring;
140 	int			cons;
141 
142 	uint32_t		pkts_coal;
143 	uint32_t		time_coal;
144 };
145 
146 struct mvpp2_dmamem {
147 	bus_dmamap_t		mdm_map;
148 	bus_dma_segment_t	mdm_seg;
149 	size_t			mdm_size;
150 	caddr_t			mdm_kva;
151 };
152 #define MVPP2_DMA_MAP(_mdm)	((_mdm)->mdm_map)
153 #define MVPP2_DMA_LEN(_mdm)	((_mdm)->mdm_size)
154 #define MVPP2_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
155 #define MVPP2_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
156 
157 struct mvpp2_port;
158 struct mvpp2_softc {
159 	struct device		sc_dev;
160 	int			sc_node;
161 	bus_space_tag_t		sc_iot;
162 	bus_space_handle_t	sc_ioh_base;
163 	bus_space_handle_t	sc_ioh_iface;
164 	paddr_t			sc_ioh_paddr;
165 	bus_size_t		sc_iosize_base;
166 	bus_size_t		sc_iosize_iface;
167 	bus_dma_tag_t		sc_dmat;
168 	struct regmap		*sc_rm;
169 
170 	uint32_t		sc_tclk;
171 
172 	struct mvpp2_bm_pool	*sc_bm_pools;
173 	int			sc_npools;
174 
175 	struct mvpp2_prs_shadow	*sc_prs_shadow;
176 	uint8_t			*sc_prs_double_vlans;
177 
178 	int			sc_aggr_ntxq;
179 	struct mvpp2_tx_queue	*sc_aggr_txqs;
180 
181 	struct mvpp2_port	**sc_ports;
182 };
183 
184 struct mvpp2_port {
185 	struct device		sc_dev;
186 	struct mvpp2_softc	*sc;
187 	int			sc_node;
188 	bus_dma_tag_t		sc_dmat;
189 	int			sc_id;
190 	int			sc_gop_id;
191 
192 	struct arpcom		sc_ac;
193 #define sc_lladdr	sc_ac.ac_enaddr
194 	struct mii_data		sc_mii;
195 #define sc_media	sc_mii.mii_media
196 	struct mii_bus		*sc_mdio;
197 	char			sc_cur_lladdr[ETHER_ADDR_LEN];
198 
199 	enum {
200 		PHY_MODE_XAUI,
201 		PHY_MODE_10GBASER,
202 		PHY_MODE_2500BASEX,
203 		PHY_MODE_1000BASEX,
204 		PHY_MODE_SGMII,
205 		PHY_MODE_RGMII,
206 		PHY_MODE_RGMII_ID,
207 		PHY_MODE_RGMII_RXID,
208 		PHY_MODE_RGMII_TXID,
209 	}			sc_phy_mode;
210 	int			sc_fixed_link;
211 	int			sc_inband_status;
212 	int			sc_link;
213 	int			sc_phyloc;
214 	int			sc_sfp;
215 
216 	int			sc_ntxq;
217 	int			sc_nrxq;
218 
219 	struct mvpp2_tx_queue	*sc_txqs;
220 	struct mvpp2_rx_queue	*sc_rxqs;
221 
222 	struct timeout		sc_tick;
223 
224 	uint32_t		sc_tx_time_coal;
225 };
226 
227 #define MVPP2_MAX_PORTS		4
228 
229 struct mvpp2_attach_args {
230 	int			ma_node;
231 	bus_dma_tag_t		ma_dmat;
232 };
233 
234 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
235 
236 static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
237 
238 int	mvpp2_match(struct device *, void *, void *);
239 void	mvpp2_attach(struct device *, struct device *, void *);
240 void	mvpp2_attach_deferred(struct device *);
241 
242 struct cfattach mvppc_ca = {
243 	sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
244 };
245 
246 struct cfdriver mvppc_cd = {
247 	NULL, "mvppc", DV_DULL
248 };
249 
250 int	mvpp2_port_match(struct device *, void *, void *);
251 void	mvpp2_port_attach(struct device *, struct device *, void *);
252 
253 struct cfattach mvpp_ca = {
254 	sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
255 };
256 
257 struct cfdriver mvpp_cd = {
258 	NULL, "mvpp", DV_IFNET
259 };
260 
261 uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
262 void	mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
263 uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
264 void	mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
265 uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
266 void	mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
267 uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
268 void	mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
269 uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
270 void	mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
271 
272 int	mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
273 void	mvpp2_start(struct ifnet *);
274 int	mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
275 void	mvpp2_watchdog(struct ifnet *);
276 
277 int	mvpp2_media_change(struct ifnet *);
278 void	mvpp2_media_status(struct ifnet *, struct ifmediareq *);
279 
280 int	mvpp2_mii_readreg(struct device *, int, int);
281 void	mvpp2_mii_writereg(struct device *, int, int, int);
282 void	mvpp2_mii_statchg(struct device *);
283 void	mvpp2_inband_statchg(struct mvpp2_port *);
284 void	mvpp2_port_change(struct mvpp2_port *);
285 
286 void	mvpp2_tick(void *);
287 void	mvpp2_rxtick(void *);
288 
289 int	mvpp2_link_intr(void *);
290 int	mvpp2_intr(void *);
291 void	mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
292 void	mvpp2_txq_proc(struct mvpp2_port *, struct mvpp2_tx_queue *);
293 void	mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
294 void	mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
295 void	mvpp2_rx_refill(struct mvpp2_port *);
296 
297 void	mvpp2_up(struct mvpp2_port *);
298 void	mvpp2_down(struct mvpp2_port *);
299 void	mvpp2_iff(struct mvpp2_port *);
300 int	mvpp2_encap(struct mvpp2_port *, struct mbuf *, int *);
301 
302 void	mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
303 void	mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
304 void	mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
305 void	mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
306 void	mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
307 void	mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
308 void	mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
309 
310 void	mvpp2_mac_config(struct mvpp2_port *);
311 void	mvpp2_xlg_config(struct mvpp2_port *);
312 void	mvpp2_gmac_config(struct mvpp2_port *);
313 void	mvpp2_comphy_config(struct mvpp2_port *);
314 void	mvpp2_gop_config(struct mvpp2_port *);
315 
316 struct mvpp2_dmamem *
317 	mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
318 void	mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
319 struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
320 void	mvpp2_fill_rx_ring(struct mvpp2_softc *);
321 
322 void	mvpp2_interrupts_enable(struct mvpp2_port *, int);
323 void	mvpp2_interrupts_disable(struct mvpp2_port *, int);
324 int	mvpp2_egress_port(struct mvpp2_port *);
325 int	mvpp2_txq_phys(int, int);
326 void	mvpp2_defaults_set(struct mvpp2_port *);
327 void	mvpp2_ingress_enable(struct mvpp2_port *);
328 void	mvpp2_ingress_disable(struct mvpp2_port *);
329 void	mvpp2_egress_enable(struct mvpp2_port *);
330 void	mvpp2_egress_disable(struct mvpp2_port *);
331 void	mvpp2_port_enable(struct mvpp2_port *);
332 void	mvpp2_port_disable(struct mvpp2_port *);
333 void	mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
334 int	mvpp2_rxq_received(struct mvpp2_port *, int);
335 void	mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
336 void	mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
337 void	mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
338 	    uint32_t);
339 void	mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
340 	    uint32_t);
341 void	mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
342 	    uint32_t);
343 void	mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
344 
345 void	mvpp2_axi_config(struct mvpp2_softc *);
346 void	mvpp2_bm_pool_init(struct mvpp2_softc *);
347 void	mvpp2_rx_fifo_init(struct mvpp2_softc *);
348 void	mvpp2_tx_fifo_init(struct mvpp2_softc *);
349 int	mvpp2_prs_default_init(struct mvpp2_softc *);
350 void	mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
351 void	mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
352 void	mvpp2_prs_def_flow_init(struct mvpp2_softc *);
353 void	mvpp2_prs_mh_init(struct mvpp2_softc *);
354 void	mvpp2_prs_mac_init(struct mvpp2_softc *);
355 void	mvpp2_prs_dsa_init(struct mvpp2_softc *);
356 int	mvpp2_prs_etype_init(struct mvpp2_softc *);
357 int	mvpp2_prs_vlan_init(struct mvpp2_softc *);
358 int	mvpp2_prs_pppoe_init(struct mvpp2_softc *);
359 int	mvpp2_prs_ip6_init(struct mvpp2_softc *);
360 int	mvpp2_prs_ip4_init(struct mvpp2_softc *);
361 void	mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
362 	    uint32_t, uint32_t);
363 void	mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
364 void	mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
365 void	mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
366 uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
367 void	mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
368 	    uint8_t, uint8_t);
369 void	mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
370 	    uint8_t *, uint8_t *);
371 int	mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
372 void	mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
373 int	mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
374 int	mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
375 void	mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
376 	    uint32_t *, uint32_t *);
377 void	mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
378 int	mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
379 void	mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
380 void	mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
381 void	mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
382 void	mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
383 void	mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
384 void	mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
385 	    uint32_t);
386 void	mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
387 void	mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
388 int	mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
389 int	mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *, int);
390 int	mvpp2_prs_flow_find(struct mvpp2_softc *, int);
391 int	mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
392 void	mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
393 void	mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int);
394 void	mvpp2_prs_mac_multi_set(struct mvpp2_softc *, uint32_t, uint32_t, int);
395 void	mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
396 void	mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
397 	    int, int, int);
398 struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
399 	    int);
400 int	mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
401 int	mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
402 struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
403 	    uint16_t, uint16_t);
404 int	mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
405 	    uint32_t);
406 int	mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
407 int	mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
408 int	mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
409 int	mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
410 struct mvpp2_prs_entry *mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int,
411 	    const uint8_t *, uint8_t *, int);
412 int	mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
413 	    uint8_t *);
414 int	mvpp2_prs_mac_da_accept(struct mvpp2_softc *, int, const uint8_t *, int);
415 int	mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
416 int	mvpp2_prs_def_flow(struct mvpp2_port *);
417 void	mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
418 void	mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
419 void	mvpp2_cls_init(struct mvpp2_softc *);
420 void	mvpp2_cls_port_config(struct mvpp2_port *);
421 void	mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
422 
423 int
424 mvpp2_match(struct device *parent, void *cfdata, void *aux)
425 {
426 	struct fdt_attach_args *faa = aux;
427 
428 	return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
429 }
430 
431 void
432 mvpp2_attach(struct device *parent, struct device *self, void *aux)
433 {
434 	struct mvpp2_softc *sc = (void *)self;
435 	struct fdt_attach_args *faa = aux;
436 
437 	if (faa->fa_nreg < 2) {
438 		printf(": no registers\n");
439 		return;
440 	}
441 
442 	sc->sc_node = faa->fa_node;
443 	sc->sc_iot = faa->fa_iot;
444 	sc->sc_dmat = faa->fa_dmat;
445 
446 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
447 	    faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
448 		printf(": can't map registers\n");
449 		return;
450 	}
451 	sc->sc_iosize_base = faa->fa_reg[0].size;
452 
453 	if (!pmap_extract(pmap_kernel(), (vaddr_t)sc->sc_ioh_base,
454 	    &sc->sc_ioh_paddr)) {
455 		printf(": can't extract address\n");
456 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
457 		    sc->sc_iosize_base);
458 		return;
459 	}
460 
461 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
462 	    faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
463 		printf(": can't map registers\n");
464 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
465 		    sc->sc_iosize_base);
466 		return;
467 	}
468 	sc->sc_iosize_iface = faa->fa_reg[1].size;
469 
470 	sc->sc_rm = regmap_byphandle(OF_getpropint(faa->fa_node,
471 	    "marvell,system-controller", 0));
472 
473 	clock_enable_all(faa->fa_node);
474 	sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
475 
476 	printf("\n");
477 
478 	config_defer(self, mvpp2_attach_deferred);
479 }
480 
481 void
482 mvpp2_attach_deferred(struct device *self)
483 {
484 	struct mvpp2_softc *sc = (void *)self;
485 	struct mvpp2_attach_args maa;
486 	struct mvpp2_tx_queue *txq;
487 	int i, node;
488 
489 	mvpp2_axi_config(sc);
490 
491 	sc->sc_aggr_ntxq = 1;
492 	sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
493 	    sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
494 
495 	for (i = 0; i < sc->sc_aggr_ntxq; i++) {
496 		txq = &sc->sc_aggr_txqs[i];
497 		txq->id = i;
498 		mvpp2_aggr_txq_hw_init(sc, txq);
499 	}
500 
501 	mvpp2_rx_fifo_init(sc);
502 	mvpp2_tx_fifo_init(sc);
503 
504 	mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
505 
506 	mvpp2_bm_pool_init(sc);
507 
508 	sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
509 	    sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
510 
511 	mvpp2_prs_default_init(sc);
512 	mvpp2_cls_init(sc);
513 
514 	memset(&maa, 0, sizeof(maa));
515 	for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
516 		maa.ma_node = node;
517 		maa.ma_dmat = sc->sc_dmat;
518 		config_found(self, &maa, NULL);
519 	}
520 }
521 
522 void
523 mvpp2_axi_config(struct mvpp2_softc *sc)
524 {
525 	uint32_t reg;
526 
527 	mvpp2_write(sc, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG, 0);
528 
529 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
530 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
531 	mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
532 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
533 	mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
534 	mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
535 
536 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
537 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
538 	mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
539 	mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
540 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
541 	mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
542 
543 	reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
544 	    (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
545 	mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
546 	mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
547 
548 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
549 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
550 	mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
551 
552 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
553 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
554 	mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
555 }
556 
557 void
558 mvpp2_bm_pool_init(struct mvpp2_softc *sc)
559 {
560 	struct mvpp2_bm_pool *bm;
561 	struct mvpp2_buf *rxb;
562 	uint64_t phys, virt;
563 	int i, j;
564 
565 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
566 		mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
567 		mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
568 	}
569 
570 	sc->sc_npools = ncpus;
571 	sc->sc_npools = min(sc->sc_npools, MVPP2_BM_POOLS_NUM);
572 
573 	sc->sc_bm_pools = mallocarray(sc->sc_npools, sizeof(*sc->sc_bm_pools),
574 	    M_DEVBUF, M_WAITOK | M_ZERO);
575 
576 	for (i = 0; i < sc->sc_npools; i++) {
577 		bm = &sc->sc_bm_pools[i];
578 		bm->bm_mem = mvpp2_dmamem_alloc(sc,
579 		    MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
580 		    MVPP2_BM_POOL_PTR_ALIGN);
581 		memset(MVPP2_DMA_KVA(bm->bm_mem), 0, MVPP2_DMA_LEN(bm->bm_mem));
582 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
583 		    MVPP2_DMA_LEN(bm->bm_mem),
584 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
585 
586 		mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
587 		    (uint64_t)MVPP2_DMA_KVA(bm->bm_mem) & 0xffffffff);
588 		mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
589 		    ((uint64_t)MVPP2_DMA_KVA(bm->bm_mem) >> 32)
590 		    & MVPP22_BM_POOL_BASE_HIGH_MASK);
591 		mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
592 		    MVPP2_BM_SIZE);
593 
594 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
595 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
596 		    MVPP2_BM_START_MASK);
597 
598 		mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
599 		    roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
600 
601 		bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
602 		    M_DEVBUF, M_WAITOK);
603 		bm->freelist = mallocarray(MVPP2_BM_SIZE, sizeof(*bm->freelist),
604 		    M_DEVBUF, M_WAITOK | M_ZERO);
605 
606 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
607 			rxb = &bm->rxbuf[j];
608 			bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
609 			    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
610 			rxb->mb_m = NULL;
611 		}
612 
613 		/* Use pool-id and rxbuf index as cookie. */
614 		for (j = 0; j < MVPP2_BM_SIZE; j++)
615 			bm->freelist[j] = (i << 16) | (j << 0);
616 
617 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
618 			rxb = &bm->rxbuf[j];
619 			rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
620 			if (rxb->mb_m == NULL)
621 				break;
622 
623 			KASSERT(bm->freelist[bm->free_cons] != -1);
624 			virt = bm->freelist[bm->free_cons];
625 			bm->freelist[bm->free_cons] = -1;
626 			bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
627 
628 			phys = rxb->mb_map->dm_segs[0].ds_addr;
629 			mvpp2_write(sc, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG,
630 			    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
631 			    << MVPP22_BM_VIRT_HIGH_RLS_OFFST) |
632 			    (((phys >> 32) & MVPP22_ADDR_HIGH_MASK)
633 			    << MVPP22_BM_PHY_HIGH_RLS_OFFSET));
634 			mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
635 			    virt & 0xffffffff);
636 			mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
637 			    phys & 0xffffffff);
638 		}
639 	}
640 }
641 
642 void
643 mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
644 {
645 	int i;
646 
647 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
648 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
649 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
650 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
651 
652 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
653 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
654 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
655 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
656 
657 	for (i = 2; i < MVPP2_MAX_PORTS; i++) {
658 		mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
659 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
660 		mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
661 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
662 	}
663 
664 	mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
665 	mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
666 }
667 
668 void
669 mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
670 {
671 	int i;
672 
673 	mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
674 	    MVPP22_TX_FIFO_DATA_SIZE_10KB);
675 	mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
676 	    MVPP2_TX_FIFO_THRESHOLD_10KB);
677 
678 	for (i = 1; i < MVPP2_MAX_PORTS; i++) {
679 		mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
680 		    MVPP22_TX_FIFO_DATA_SIZE_3KB);
681 		mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
682 		    MVPP2_TX_FIFO_THRESHOLD_3KB);
683 	}
684 }
685 
686 int
687 mvpp2_prs_default_init(struct mvpp2_softc *sc)
688 {
689 	int i, j, ret;
690 
691 	mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
692 
693 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
694 		mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
695 		for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
696 			mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
697 
698 		mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
699 		for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
700 			mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
701 	}
702 
703 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
704 		mvpp2_prs_hw_inv(sc, i);
705 
706 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
707 		mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
708 		    MVPP2_PRS_PORT_LU_MAX, 0);
709 
710 	mvpp2_prs_def_flow_init(sc);
711 	mvpp2_prs_mh_init(sc);
712 	mvpp2_prs_mac_init(sc);
713 	mvpp2_prs_dsa_init(sc);
714 	ret = mvpp2_prs_etype_init(sc);
715 	if (ret)
716 		return ret;
717 	ret = mvpp2_prs_vlan_init(sc);
718 	if (ret)
719 		return ret;
720 	ret = mvpp2_prs_pppoe_init(sc);
721 	if (ret)
722 		return ret;
723 	ret = mvpp2_prs_ip6_init(sc);
724 	if (ret)
725 		return ret;
726 	ret = mvpp2_prs_ip4_init(sc);
727 	if (ret)
728 		return ret;
729 
730 	return 0;
731 }
732 
733 void
734 mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
735 {
736 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
737 	mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
738 	    MVPP2_PRS_TCAM_INV_MASK);
739 }
740 
741 void
742 mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
743     int lu_first, int lu_max, int offset)
744 {
745 	uint32_t reg;
746 
747 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
748 	reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
749 	reg |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
750 	mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
751 
752 	reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
753 	reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
754 	reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
755 	mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
756 
757 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
758 	reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
759 	reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
760 	mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
761 }
762 
763 void
764 mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
765 {
766 	struct mvpp2_prs_entry pe;
767 	int i;
768 
769 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
770 		memset(&pe, 0, sizeof(pe));
771 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
772 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
773 		mvpp2_prs_tcam_port_map_set(&pe, 0);
774 		mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
775 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
776 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
777 		mvpp2_prs_hw_write(sc, &pe);
778 	}
779 }
780 
781 void
782 mvpp2_prs_mh_init(struct mvpp2_softc *sc)
783 {
784 	struct mvpp2_prs_entry pe;
785 
786 	memset(&pe, 0, sizeof(pe));
787 	pe.index = MVPP2_PE_MH_DEFAULT;
788 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
789 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
790 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
791 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
792 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
793 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
794 	mvpp2_prs_hw_write(sc, &pe);
795 }
796 
797 void
798 mvpp2_prs_mac_init(struct mvpp2_softc *sc)
799 {
800 	struct mvpp2_prs_entry pe;
801 
802 	memset(&pe, 0, sizeof(pe));
803 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
804 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
805 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
806 	    MVPP2_PRS_RI_DROP_MASK);
807 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
808 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
809 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
810 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
811 	mvpp2_prs_hw_write(sc, &pe);
812 	mvpp2_prs_mac_drop_all_set(sc, 0, 0);
813 	mvpp2_prs_mac_promisc_set(sc, 0, 0);
814 	mvpp2_prs_mac_multi_set(sc, MVPP2_PE_MAC_MC_ALL, 0, 0);
815 	mvpp2_prs_mac_multi_set(sc, MVPP2_PE_MAC_MC_IP6, 0, 0);
816 }
817 
818 void
819 mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
820 {
821 	struct mvpp2_prs_entry pe;
822 
823 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
824 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
825 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
826 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
827 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
828 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
829 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
830 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
831 	memset(&pe, 0, sizeof(pe));
832 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
833 	pe.index = MVPP2_PE_DSA_DEFAULT;
834 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
835 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
836 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
837 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
838 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
839 	mvpp2_prs_hw_write(sc, &pe);
840 }
841 
842 int
843 mvpp2_prs_etype_init(struct mvpp2_softc *sc)
844 {
845 	struct mvpp2_prs_entry pe;
846 	int tid;
847 
848 	/* Ethertype: PPPoE */
849 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
850 	    MVPP2_PE_LAST_FREE_TID);
851 	if (tid < 0)
852 		return tid;
853 	memset(&pe, 0, sizeof(pe));
854 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
855 	pe.index = tid;
856 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_PPPOE);
857 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
858 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
859 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
860 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
861 	    MVPP2_PRS_RI_PPPOE_MASK);
862 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
863 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
864 	sc->sc_prs_shadow[pe.index].finish = 0;
865 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
866 	    MVPP2_PRS_RI_PPPOE_MASK);
867 	mvpp2_prs_hw_write(sc, &pe);
868 
869 	/* Ethertype: ARP */
870 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
871 	    MVPP2_PE_LAST_FREE_TID);
872 	if (tid < 0)
873 		return tid;
874 	memset(&pe, 0, sizeof(pe));
875 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
876 	pe.index = tid;
877 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_ARP);
878 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
879 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
880 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
881 	    MVPP2_PRS_RI_L3_PROTO_MASK);
882 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
883 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
884 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
885 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
886 	sc->sc_prs_shadow[pe.index].finish = 1;
887 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
888 	    MVPP2_PRS_RI_L3_PROTO_MASK);
889 	mvpp2_prs_hw_write(sc, &pe);
890 
891 	/* Ethertype: LBTD */
892 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
893 	    MVPP2_PE_LAST_FREE_TID);
894 	if (tid < 0)
895 		return tid;
896 	memset(&pe, 0, sizeof(pe));
897 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
898 	pe.index = tid;
899 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
900 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
901 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
902 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
903 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
904 	    MVPP2_PRS_RI_UDF3_MASK);
905 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
906 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
907 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
908 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
909 	sc->sc_prs_shadow[pe.index].finish = 1;
910 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
911 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
912 	    MVPP2_PRS_RI_UDF3_MASK);
913 	mvpp2_prs_hw_write(sc, &pe);
914 
915 	/* Ethertype: IPv4 without options */
916 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
917 	    MVPP2_PE_LAST_FREE_TID);
918 	if (tid < 0)
919 		return tid;
920 	memset(&pe, 0, sizeof(pe));
921 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
922 	pe.index = tid;
923 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IP);
924 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
925 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
926 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
927 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
928 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
929 	    MVPP2_PRS_RI_L3_PROTO_MASK);
930 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
931 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
932 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
933 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
934 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
935 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
936 	sc->sc_prs_shadow[pe.index].finish = 0;
937 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
938 	    MVPP2_PRS_RI_L3_PROTO_MASK);
939 	mvpp2_prs_hw_write(sc, &pe);
940 
941 	/* Ethertype: IPv4 with options */
942 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
943 	    MVPP2_PE_LAST_FREE_TID);
944 	if (tid < 0)
945 		return tid;
946 	pe.index = tid;
947 
948 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
949 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
950 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
951 	    MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
952 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
953 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
954 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
955 	    MVPP2_PRS_RI_L3_PROTO_MASK);
956 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
957 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
958 	sc->sc_prs_shadow[pe.index].finish = 0;
959 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
960 	    MVPP2_PRS_RI_L3_PROTO_MASK);
961 	mvpp2_prs_hw_write(sc, &pe);
962 
963 	/* Ethertype: IPv6 without options */
964 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
965 	    MVPP2_PE_LAST_FREE_TID);
966 	if (tid < 0)
967 		return tid;
968 	memset(&pe, 0, sizeof(pe));
969 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
970 	pe.index = tid;
971 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IPV6);
972 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
973 	    MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
974 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
975 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
976 	    MVPP2_PRS_RI_L3_PROTO_MASK);
977 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
978 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
979 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
980 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
981 	sc->sc_prs_shadow[pe.index].finish = 0;
982 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
983 	    MVPP2_PRS_RI_L3_PROTO_MASK);
984 	mvpp2_prs_hw_write(sc, &pe);
985 
986 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
987 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
988 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
989 	pe.index = MVPP2_PE_ETH_TYPE_UN;
990 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
991 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
992 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
993 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
994 	    MVPP2_PRS_RI_L3_PROTO_MASK);
995 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
996 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
997 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
998 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
999 	sc->sc_prs_shadow[pe.index].finish = 1;
1000 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
1001 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1002 	mvpp2_prs_hw_write(sc, &pe);
1003 
1004 	return 0;
1005 }
1006 
1007 int
1008 mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
1009 {
1010 	struct mvpp2_prs_entry pe;
1011 	int ret;
1012 
1013 	sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
1014 	    sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
1015 
1016 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_QINQ,
1017 	    MVPP2_PRS_PORT_MASK);
1018 	if (ret)
1019 		return ret;
1020 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_VLAN,
1021 	    MVPP2_PRS_PORT_MASK);
1022 	if (ret)
1023 		return ret;
1024 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_QINQ, MVPP2_PRS_SINGLE_VLAN_AI,
1025 	    MVPP2_PRS_PORT_MASK);
1026 	if (ret)
1027 		return ret;
1028 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_VLAN, MVPP2_PRS_SINGLE_VLAN_AI,
1029 	    MVPP2_PRS_PORT_MASK);
1030 	if (ret)
1031 		return ret;
1032 
1033 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1034 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1035 	pe.index = MVPP2_PE_VLAN_DBL;
1036 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1037 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1038 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1039 	    MVPP2_PRS_RI_VLAN_MASK);
1040 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1041 	    MVPP2_PRS_DBL_VLAN_AI_BIT);
1042 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1043 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1044 	mvpp2_prs_hw_write(sc, &pe);
1045 
1046 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1047 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1048 	pe.index = MVPP2_PE_VLAN_NONE;
1049 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1050 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK);
1051 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1052 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1053 	mvpp2_prs_hw_write(sc, &pe);
1054 
1055 	return 0;
1056 }
1057 
1058 int
1059 mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
1060 {
1061 	struct mvpp2_prs_entry pe;
1062 	int tid;
1063 
1064 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1065 	    MVPP2_PE_LAST_FREE_TID);
1066 	if (tid < 0)
1067 		return tid;
1068 
1069 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1070 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1071 	pe.index = tid;
1072 	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1073 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1074 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1075 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1076 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1077 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1078 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1079 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1080 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1081 	mvpp2_prs_hw_write(sc, &pe);
1082 
1083 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1084 	    MVPP2_PE_LAST_FREE_TID);
1085 	if (tid < 0)
1086 		return tid;
1087 
1088 	pe.index = tid;
1089 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1090 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1091 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
1092 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1093 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1094 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
1095 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1096 	mvpp2_prs_hw_write(sc, &pe);
1097 
1098 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1099 	    MVPP2_PE_LAST_FREE_TID);
1100 	if (tid < 0)
1101 		return tid;
1102 
1103 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1104 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1105 	pe.index = tid;
1106 	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1107 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1108 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1109 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1110 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1111 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1112 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1113 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1114 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1115 	mvpp2_prs_hw_write(sc, &pe);
1116 
1117 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1118 	    MVPP2_PE_LAST_FREE_TID);
1119 	if (tid < 0)
1120 		return tid;
1121 
1122 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1123 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1124 	pe.index = tid;
1125 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1126 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1127 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1128 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1129 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1130 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1131 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1132 	mvpp2_prs_hw_write(sc, &pe);
1133 
1134 	return 0;
1135 }
1136 
1137 int
1138 mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
1139 {
1140 	struct mvpp2_prs_entry pe;
1141 	int tid, ret;
1142 
1143 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1144 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1145 	if (ret)
1146 		return ret;
1147 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1148 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1149 	if (ret)
1150 		return ret;
1151 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_ICMPV6,
1152 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1153 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1154 	if (ret)
1155 		return ret;
1156 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
1157 	    MVPP2_PRS_RI_UDF7_MASK);
1158 	if (ret)
1159 		return ret;
1160 	ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1161 	if (ret)
1162 		return ret;
1163 
1164 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1165 	    MVPP2_PE_LAST_FREE_TID);
1166 	if (tid < 0)
1167 		return tid;
1168 
1169 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1170 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1171 	pe.index = tid;
1172 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1173 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1174 	mvpp2_prs_sram_ri_update(&pe,
1175 	    MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
1176 	    MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
1177 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1178 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1179 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1180 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1181 	mvpp2_prs_hw_write(sc, &pe);
1182 
1183 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1184 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1185 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1186 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1187 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1188 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1189 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1190 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1191 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1192 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1193 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1194 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1195 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1196 	mvpp2_prs_hw_write(sc, &pe);
1197 
1198 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1199 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1200 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1201 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1202 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1203 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1204 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1205 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1206 	    MVPP2_PRS_IPV6_EXT_AI_BIT);
1207 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1208 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1209 	mvpp2_prs_hw_write(sc, &pe);
1210 
1211 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1212 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1213 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1214 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1215 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1216 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1217 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1218 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1219 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1220 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1221 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1222 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
1223 	mvpp2_prs_hw_write(sc, &pe);
1224 
1225 	return 0;
1226 }
1227 
1228 int
1229 mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
1230 {
1231 	struct mvpp2_prs_entry pe;
1232 	int ret;
1233 
1234 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1235 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1236 	if (ret)
1237 		return ret;
1238 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1239 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1240 	if (ret)
1241 		return ret;
1242 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_IGMP,
1243 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1244 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1245 	if (ret)
1246 		return ret;
1247 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
1248 	if (ret)
1249 		return ret;
1250 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1251 	if (ret)
1252 		return ret;
1253 
1254 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1255 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1256 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1257 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1258 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1259 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1260 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1261 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1262 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1263 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1264 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1265 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1266 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1267 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1268 	mvpp2_prs_hw_write(sc, &pe);
1269 
1270 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1271 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1272 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1273 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1274 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1275 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1276 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1277 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1278 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1279 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1280 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1281 	mvpp2_prs_hw_write(sc, &pe);
1282 
1283 	return 0;
1284 }
1285 
1286 int
1287 mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
1288 {
1289 	struct mvpp2_attach_args *maa = aux;
1290 	char buf[32];
1291 
1292 	if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
1293 	    strcmp(buf, "disabled") == 0)
1294 		return 0;
1295 
1296 	return 1;
1297 }
1298 
1299 void
1300 mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
1301 {
1302 	struct mvpp2_port *sc = (void *)self;
1303 	struct mvpp2_attach_args *maa = aux;
1304 	struct mvpp2_tx_queue *txq;
1305 	struct mvpp2_rx_queue *rxq;
1306 	struct ifnet *ifp;
1307 	uint32_t phy, reg;
1308 	int i, idx, len, node;
1309 	char *phy_mode;
1310 	char *managed;
1311 
1312 	sc->sc = (void *)parent;
1313 	sc->sc_node = maa->ma_node;
1314 	sc->sc_dmat = maa->ma_dmat;
1315 
1316 	sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
1317 	sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
1318 	sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
1319 
1320 	len = OF_getproplen(sc->sc_node, "phy-mode");
1321 	if (len <= 0) {
1322 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
1323 		return;
1324 	}
1325 
1326 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
1327 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
1328 	if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
1329 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1330 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
1331 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1332 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
1333 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1334 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
1335 		sc->sc_phy_mode = PHY_MODE_SGMII;
1336 	else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
1337 		sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
1338 	else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
1339 		sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
1340 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
1341 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
1342 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
1343 		sc->sc_phy_mode = PHY_MODE_RGMII;
1344 	else {
1345 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
1346 		    phy_mode);
1347 		return;
1348 	}
1349 	free(phy_mode, M_TEMP, len);
1350 
1351 	/* Lookup PHY. */
1352 	phy = OF_getpropint(sc->sc_node, "phy", 0);
1353 	if (phy) {
1354 		node = OF_getnodebyphandle(phy);
1355 		if (!node) {
1356 			printf(": no phy\n");
1357 			return;
1358 		}
1359 		sc->sc_mdio = mii_byphandle(phy);
1360 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
1361 		sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
1362 	}
1363 
1364 	if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
1365 		managed = malloc(len, M_TEMP, M_WAITOK);
1366 		OF_getprop(sc->sc_node, "managed", managed, len);
1367 		if (!strncmp(managed, "in-band-status",
1368 		    strlen("in-band-status")))
1369 			sc->sc_inband_status = 1;
1370 		free(managed, M_TEMP, len);
1371 	}
1372 
1373 	if (OF_getprop(sc->sc_node, "local-mac-address",
1374 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
1375 		memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
1376 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
1377 
1378 	sc->sc_ntxq = sc->sc_nrxq = 1;
1379 	sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
1380 	    M_DEVBUF, M_WAITOK | M_ZERO);
1381 	sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
1382 	    M_DEVBUF, M_WAITOK | M_ZERO);
1383 
1384 	for (i = 0; i < sc->sc_ntxq; i++) {
1385 		txq = &sc->sc_txqs[i];
1386 		txq->id = mvpp2_txq_phys(sc->sc_id, i);
1387 		txq->log_id = i;
1388 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
1389 	}
1390 
1391 	sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
1392 
1393 	for (i = 0; i < sc->sc_nrxq; i++) {
1394 		rxq = &sc->sc_rxqs[i];
1395 		rxq->id = sc->sc_id * 32 + i;
1396 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
1397 		rxq->time_coal = MVPP2_RX_COAL_USEC;
1398 	}
1399 
1400 	mvpp2_egress_disable(sc);
1401 	mvpp2_port_disable(sc);
1402 
1403 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
1404 	    sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
1405 	    0 /* queue vector id */);
1406 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
1407 	    sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
1408 	    0 /* first rxq */);
1409 
1410 	mvpp2_ingress_disable(sc);
1411 	mvpp2_defaults_set(sc);
1412 
1413 	mvpp2_cls_oversize_rxq_set(sc);
1414 	mvpp2_cls_port_config(sc);
1415 
1416 	/*
1417 	 * We have one pool per core, so all RX queues on a specific
1418 	 * core share that pool.  Also long and short uses the same
1419 	 * pool.
1420 	 */
1421 	for (i = 0; i < sc->sc_nrxq; i++) {
1422 		mvpp2_rxq_long_pool_set(sc, i, i);
1423 		mvpp2_rxq_short_pool_set(sc, i, i);
1424 	}
1425 
1426 	/* Reset Mac */
1427 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
1428 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
1429 	    MVPP2_PORT_CTRL2_PORTMACRESET_MASK);
1430 	if (sc->sc_gop_id == 0) {
1431 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
1432 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
1433 		    ~MV_XLG_MAC_CTRL0_MACRESETN_MASK);
1434 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
1435 		reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
1436 		reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
1437 		reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
1438 		reg &= ~MVPP22_MPCS_MAC_CLK_RESET_MASK;
1439 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
1440 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
1441 		reg &= ~MVPP22_XPCS_PCSRESET;
1442 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
1443 	}
1444 
1445 	timeout_set(&sc->sc_tick, mvpp2_tick, sc);
1446 
1447 	ifp = &sc->sc_ac.ac_if;
1448 	ifp->if_softc = sc;
1449 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1450 	ifp->if_ioctl = mvpp2_ioctl;
1451 	ifp->if_start = mvpp2_start;
1452 	ifp->if_watchdog = mvpp2_watchdog;
1453 	ifq_set_maxlen(&ifp->if_snd, MVPP2_NTXDESC - 1);
1454 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1455 
1456 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1457 
1458 	sc->sc_mii.mii_ifp = ifp;
1459 	sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
1460 	sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
1461 	sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
1462 
1463 	ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
1464 
1465 	if (sc->sc_mdio) {
1466 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
1467 		    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0);
1468 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1469 			printf("%s: no PHY found!\n", self->dv_xname);
1470 			ifmedia_add(&sc->sc_mii.mii_media,
1471 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
1472 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1473 		} else
1474 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1475 	} else {
1476 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1477 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1478 
1479 		if (sc->sc_inband_status) {
1480 			mvpp2_inband_statchg(sc);
1481 		} else {
1482 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
1483 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1484 			mvpp2_mii_statchg(self);
1485 		}
1486 
1487 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
1488 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1489 	}
1490 
1491 	if_attach(ifp);
1492 	ether_ifattach(ifp);
1493 
1494 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1495 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1496 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1497 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1498 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1499 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1500 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1501 		reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_MASK_REG);
1502 		reg |= MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK;
1503 		mvpp2_gmac_write(sc, MV_GMAC_INTERRUPT_MASK_REG, reg);
1504 		reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_SUM_MASK_REG);
1505 		reg |= MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK;
1506 		mvpp2_gmac_write(sc, MV_GMAC_INTERRUPT_SUM_MASK_REG, reg);
1507 	}
1508 
1509 	if (sc->sc_gop_id == 0) {
1510 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
1511 		reg |= MV_XLG_INTERRUPT_LINK_CHANGE_MASK;
1512 		mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
1513 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
1514 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_MASK;
1515 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_MASK;
1516 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1517 		    sc->sc_phy_mode == PHY_MODE_XAUI)
1518 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_MASK;
1519 		else
1520 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_MASK;
1521 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
1522 	}
1523 
1524 	idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
1525 	if (idx >= 0)
1526 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1527 		    mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
1528 	idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
1529 	if (idx < 0)
1530 		idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
1531 	if (idx >= 0)
1532 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1533 		    mvpp2_intr, sc, sc->sc_dev.dv_xname);
1534 }
1535 
1536 uint32_t
1537 mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
1538 {
1539 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
1540 }
1541 
1542 void
1543 mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
1544 {
1545 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
1546 }
1547 
1548 uint32_t
1549 mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
1550 {
1551 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1552 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
1553 }
1554 
1555 void
1556 mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1557 {
1558 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1559 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
1560 	    data);
1561 }
1562 
1563 uint32_t
1564 mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
1565 {
1566 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1567 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
1568 }
1569 
1570 void
1571 mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1572 {
1573 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1574 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
1575 	    data);
1576 }
1577 
1578 uint32_t
1579 mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1580 {
1581 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1582 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
1583 }
1584 
1585 void
1586 mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1587 {
1588 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1589 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
1590 	    data);
1591 }
1592 
1593 uint32_t
1594 mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1595 {
1596 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1597 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
1598 }
1599 
1600 void
1601 mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1602 {
1603 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1604 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
1605 	    data);
1606 }
1607 
1608 void
1609 mvpp2_start(struct ifnet *ifp)
1610 {
1611 	struct mvpp2_port *sc = ifp->if_softc;
1612 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1613 	struct mbuf *m;
1614 	int error, idx;
1615 
1616 	if (!(ifp->if_flags & IFF_RUNNING))
1617 		return;
1618 	if (ifq_is_oactive(&ifp->if_snd))
1619 		return;
1620 	if (ifq_empty(&ifp->if_snd))
1621 		return;
1622 	if (!sc->sc_link)
1623 		return;
1624 
1625 	idx = txq->prod;
1626 	while (txq->cnt < MVPP2_AGGR_TXQ_SIZE) {
1627 		m = ifq_dequeue(&ifp->if_snd);
1628 		if (m == NULL)
1629 			break;
1630 
1631 		error = mvpp2_encap(sc, m, &idx);
1632 		if (error == ENOBUFS) {
1633 			m_freem(m); /* give up: drop it */
1634 			ifq_set_oactive(&ifp->if_snd);
1635 			break;
1636 		}
1637 		if (error == EFBIG) {
1638 			m_freem(m); /* give up: drop it */
1639 			ifp->if_oerrors++;
1640 			continue;
1641 		}
1642 
1643 #if NBPFILTER > 0
1644 		if (ifp->if_bpf)
1645 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1646 #endif
1647 	}
1648 
1649 	if (txq->prod != idx) {
1650 		txq->prod = idx;
1651 
1652 		/* Set a timeout in case the chip goes out to lunch. */
1653 		ifp->if_timer = 5;
1654 	}
1655 }
1656 
1657 int
1658 mvpp2_encap(struct mvpp2_port *sc, struct mbuf *m, int *idx)
1659 {
1660 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1661 	struct mvpp2_tx_desc *txd;
1662 	bus_dmamap_t map;
1663 	uint32_t command;
1664 	int i, current, first, last;
1665 
1666 	first = last = current = *idx;
1667 	map = txq->buf[current].mb_map;
1668 
1669 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1670 		return ENOBUFS;
1671 
1672 	if (map->dm_nsegs > (MVPP2_AGGR_TXQ_SIZE - txq->cnt - 2)) {
1673 		bus_dmamap_unload(sc->sc_dmat, map);
1674 		return ENOBUFS;
1675 	}
1676 
1677 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1678 	    BUS_DMASYNC_PREWRITE);
1679 
1680 	command = MVPP2_TXD_L4_CSUM_NOT |
1681 	    MVPP2_TXD_IP_CSUM_DISABLE;
1682 	for (i = 0; i < map->dm_nsegs; i++) {
1683 		txd = &txq->descs[current];
1684 		memset(txd, 0, sizeof(*txd));
1685 		txd->buf_phys_addr_hw_cmd2 =
1686 		    map->dm_segs[i].ds_addr & ~0x1f;
1687 		txd->packet_offset =
1688 		    map->dm_segs[i].ds_addr & 0x1f;
1689 		txd->data_size = map->dm_segs[i].ds_len;
1690 		txd->phys_txq = sc->sc_txqs[0].id;
1691 		txd->command = command |
1692 		    MVPP2_TXD_PADDING_DISABLE;
1693 		if (i == 0)
1694 		    txd->command |= MVPP2_TXD_F_DESC;
1695 		if (i == (map->dm_nsegs - 1))
1696 		    txd->command |= MVPP2_TXD_L_DESC;
1697 
1698 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
1699 		    current * sizeof(*txd), sizeof(*txd),
1700 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1701 
1702 		last = current;
1703 		current = (current + 1) % MVPP2_AGGR_TXQ_SIZE;
1704 		KASSERT(current != txq->cons);
1705 	}
1706 
1707 	KASSERT(txq->buf[last].mb_m == NULL);
1708 	txq->buf[first].mb_map = txq->buf[last].mb_map;
1709 	txq->buf[last].mb_map = map;
1710 	txq->buf[last].mb_m = m;
1711 
1712 	txq->cnt += map->dm_nsegs;
1713 	*idx = current;
1714 
1715 	mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, map->dm_nsegs);
1716 
1717 	return 0;
1718 }
1719 
1720 int
1721 mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1722 {
1723 	struct mvpp2_port *sc = ifp->if_softc;
1724 	struct ifreq *ifr = (struct ifreq *)addr;
1725 	int error = 0, s;
1726 
1727 	s = splnet();
1728 
1729 	switch (cmd) {
1730 	case SIOCSIFADDR:
1731 		ifp->if_flags |= IFF_UP;
1732 		/* FALLTHROUGH */
1733 	case SIOCSIFFLAGS:
1734 		if (ifp->if_flags & IFF_UP) {
1735 			if (ifp->if_flags & IFF_RUNNING)
1736 				error = ENETRESET;
1737 			else
1738 				mvpp2_up(sc);
1739 		} else {
1740 			if (ifp->if_flags & IFF_RUNNING)
1741 				mvpp2_down(sc);
1742 		}
1743 		break;
1744 
1745 	case SIOCGIFMEDIA:
1746 	case SIOCSIFMEDIA:
1747 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1748 		break;
1749 
1750 	case SIOCGIFRXR:
1751 		error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1752 		break;
1753 
1754 	case SIOCGIFSFFPAGE:
1755 		error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
1756 		if (error != 0)
1757 			break;
1758 
1759 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1760 		rw_exit(&mvpp2_sff_lock);
1761 		break;
1762 
1763 	default:
1764 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1765 		break;
1766 	}
1767 
1768 	if (error == ENETRESET) {
1769 		if (ifp->if_flags & IFF_RUNNING)
1770 			mvpp2_iff(sc);
1771 		error = 0;
1772 	}
1773 
1774 	splx(s);
1775 	return (error);
1776 }
1777 
1778 int
1779 mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
1780 {
1781 	struct mvpp2_rx_queue *rxq;
1782 	struct if_rxring_info *ifrs, *ifr;
1783 	unsigned int i;
1784 	int error;
1785 
1786 	ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
1787 	    M_WAITOK|M_ZERO|M_CANFAIL);
1788 	if (ifrs == NULL)
1789 		return (ENOMEM);
1790 
1791 	for (i = 0; i < sc->sc_nrxq; i++) {
1792 		rxq = &sc->sc_rxqs[i];
1793 		ifr = &ifrs[i];
1794 
1795 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
1796 		ifr->ifr_size = MCLBYTES;
1797 		ifr->ifr_info = rxq->rxring;
1798 	}
1799 
1800 	error = if_rxr_info_ioctl(ifri, i, ifrs);
1801 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
1802 
1803 	return (error);
1804 }
1805 
1806 void
1807 mvpp2_watchdog(struct ifnet *ifp)
1808 {
1809 	printf("%s\n", __func__);
1810 }
1811 
1812 int
1813 mvpp2_media_change(struct ifnet *ifp)
1814 {
1815 	struct mvpp2_port *sc = ifp->if_softc;
1816 
1817 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1818 		mii_mediachg(&sc->sc_mii);
1819 
1820 	return (0);
1821 }
1822 
1823 void
1824 mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1825 {
1826 	struct mvpp2_port *sc = ifp->if_softc;
1827 
1828 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1829 		mii_pollstat(&sc->sc_mii);
1830 
1831 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1832 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1833 }
1834 
1835 int
1836 mvpp2_mii_readreg(struct device *self, int phy, int reg)
1837 {
1838 	struct mvpp2_port *sc = (void *)self;
1839 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
1840 }
1841 
1842 void
1843 mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
1844 {
1845 	struct mvpp2_port *sc = (void *)self;
1846 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
1847 }
1848 
1849 void
1850 mvpp2_mii_statchg(struct device *self)
1851 {
1852 	struct mvpp2_port *sc = (void *)self;
1853 	mvpp2_port_change(sc);
1854 }
1855 
1856 void
1857 mvpp2_inband_statchg(struct mvpp2_port *sc)
1858 {
1859 	uint32_t reg;
1860 
1861 	sc->sc_mii.mii_media_status = IFM_AVALID;
1862 	sc->sc_mii.mii_media_active = IFM_ETHER;
1863 
1864 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1865 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1866 		reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
1867 		if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK)
1868 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1869 		sc->sc_mii.mii_media_active |= IFM_FDX;
1870 		sc->sc_mii.mii_media_active |= IFM_10G_SR;
1871 	} else {
1872 		reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
1873 		if (reg & MVPP2_PORT_STATUS0_LINKUP_MASK)
1874 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1875 		if (reg & MVPP2_PORT_STATUS0_FULLDX_MASK)
1876 			sc->sc_mii.mii_media_active |= IFM_FDX;
1877 		if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
1878 			sc->sc_mii.mii_media_active |= IFM_2500_SX;
1879 		else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
1880 			sc->sc_mii.mii_media_active |= IFM_1000_SX;
1881 		else if (reg & MVPP2_PORT_STATUS0_GMIISPEED_MASK)
1882 			sc->sc_mii.mii_media_active |= IFM_1000_T;
1883 		else if (reg & MVPP2_PORT_STATUS0_MIISPEED_MASK)
1884 			sc->sc_mii.mii_media_active |= IFM_100_TX;
1885 		else
1886 			sc->sc_mii.mii_media_active |= IFM_10_T;
1887 	}
1888 
1889 	mvpp2_port_change(sc);
1890 }
1891 
1892 void
1893 mvpp2_port_change(struct mvpp2_port *sc)
1894 {
1895 	uint32_t reg;
1896 
1897 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) == sc->sc_link)
1898 		return;
1899 
1900 	sc->sc_link = !sc->sc_link;
1901 
1902 	if (sc->sc_inband_status)
1903 		return;
1904 
1905 	if (sc->sc_link) {
1906 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1907 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1908 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1909 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK;
1910 			reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK;
1911 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1912 		} else {
1913 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1914 			reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
1915 			reg |= MVPP2_GMAC_FORCE_LINK_PASS;
1916 			reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
1917 			reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
1918 			reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
1919 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
1920 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
1921 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
1922 				reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
1923 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
1924 				reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
1925 			if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
1926 				reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
1927 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
1928 		}
1929 	} else {
1930 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1931 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1932 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1933 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK;
1934 			reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK;
1935 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1936 		} else {
1937 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1938 			reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
1939 			reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
1940 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
1941 		}
1942 	}
1943 }
1944 
1945 void
1946 mvpp2_tick(void *arg)
1947 {
1948 	struct mvpp2_port *sc = arg;
1949 	int s;
1950 
1951 	s = splnet();
1952 	mii_tick(&sc->sc_mii);
1953 	splx(s);
1954 
1955 	timeout_add_sec(&sc->sc_tick, 1);
1956 }
1957 
1958 int
1959 mvpp2_link_intr(void *arg)
1960 {
1961 	struct mvpp2_port *sc = arg;
1962 	uint32_t reg;
1963 	int event = 0;
1964 
1965 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1966 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1967 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
1968 		if (reg & MV_XLG_INTERRUPT_LINK_CHANGE_MASK)
1969 			event = 1;
1970 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1971 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1972 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1973 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1974 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1975 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1976 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1977 		reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_CAUSE_REG);
1978 		if (reg & MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK)
1979 			event = 1;
1980 	}
1981 
1982 	if (event && sc->sc_inband_status)
1983 		mvpp2_inband_statchg(sc);
1984 
1985 	return (1);
1986 }
1987 
1988 int
1989 mvpp2_intr(void *arg)
1990 {
1991 	struct mvpp2_port *sc = arg;
1992 	uint32_t reg;
1993 
1994 	reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
1995 	if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
1996 		mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
1997 		mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
1998 		    reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
1999 	}
2000 	if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
2001 		mvpp2_tx_proc(sc,
2002 		    (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
2003 		    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
2004 
2005 	if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
2006 		mvpp2_rx_proc(sc,
2007 		    reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
2008 
2009 	return (1);
2010 }
2011 
2012 void
2013 mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
2014 {
2015 	struct mvpp2_tx_queue *txq;
2016 	int i;
2017 
2018 	for (i = 0; i < sc->sc_ntxq; i++) {
2019 		txq = &sc->sc_txqs[i];
2020 		if ((queues & (1 << i)) == 0)
2021 			continue;
2022 		mvpp2_txq_proc(sc, txq);
2023 	}
2024 }
2025 
2026 void
2027 mvpp2_txq_proc(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2028 {
2029 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2030 	struct mvpp2_tx_queue *aggr_txq = &sc->sc->sc_aggr_txqs[0];
2031 	struct mvpp2_buf *txb;
2032 	int i, idx, nsent;
2033 
2034 	nsent = (mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id)) &
2035 	    MVPP2_TRANSMITTED_COUNT_MASK) >>
2036 	    MVPP2_TRANSMITTED_COUNT_OFFSET;
2037 
2038 	for (i = 0; i < nsent; i++) {
2039 		idx = aggr_txq->cons;
2040 		KASSERT(idx < MVPP2_AGGR_TXQ_SIZE);
2041 
2042 		txb = &aggr_txq->buf[idx];
2043 		if (txb->mb_m) {
2044 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2045 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2046 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2047 
2048 			m_freem(txb->mb_m);
2049 			txb->mb_m = NULL;
2050 		}
2051 
2052 		aggr_txq->cnt--;
2053 		aggr_txq->cons = (aggr_txq->cons + 1) % MVPP2_AGGR_TXQ_SIZE;
2054 	}
2055 
2056 	if (aggr_txq->cnt == 0)
2057 		ifp->if_timer = 0;
2058 
2059 	if (ifq_is_oactive(&ifp->if_snd))
2060 		ifq_restart(&ifp->if_snd);
2061 }
2062 
2063 void
2064 mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
2065 {
2066 	struct mvpp2_rx_queue *rxq;
2067 	int i;
2068 
2069 	for (i = 0; i < sc->sc_nrxq; i++) {
2070 		rxq = &sc->sc_rxqs[i];
2071 		if ((queues & (1 << i)) == 0)
2072 			continue;
2073 		mvpp2_rxq_proc(sc, rxq);
2074 	}
2075 
2076 	mvpp2_rx_refill(sc);
2077 }
2078 
2079 void
2080 mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2081 {
2082 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2083 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2084 	struct mvpp2_rx_desc *rxd;
2085 	struct mvpp2_bm_pool *bm;
2086 	struct mvpp2_buf *rxb;
2087 	struct mbuf *m;
2088 	uint64_t virt;
2089 	uint32_t i, nrecv, pool;
2090 
2091 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2092 	if (!nrecv)
2093 		return;
2094 
2095 	pool = curcpu()->ci_cpuid;
2096 	KASSERT(pool < sc->sc->sc_npools);
2097 	bm = &sc->sc->sc_bm_pools[pool];
2098 
2099 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2100 	    MVPP2_DMA_LEN(rxq->ring),
2101 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2102 
2103 	for (i = 0; i < nrecv; i++) {
2104 		rxd = &rxq->descs[rxq->cons];
2105 		virt = rxd->buf_cookie_bm_qset_cls_info;
2106 		KASSERT(((virt >> 16) & 0xffff) == pool);
2107 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2108 		rxb = &bm->rxbuf[virt & 0xffff];
2109 		KASSERT(rxb->mb_m != NULL);
2110 
2111 		bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
2112 		    rxd->data_size, BUS_DMASYNC_POSTREAD);
2113 		bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
2114 
2115 		m = rxb->mb_m;
2116 		rxb->mb_m = NULL;
2117 
2118 		m->m_pkthdr.len = m->m_len = rxd->data_size;
2119 		m_adj(m, MVPP2_MH_SIZE);
2120 		ml_enqueue(&ml, m);
2121 
2122 		KASSERT(bm->freelist[bm->free_prod] == -1);
2123 		bm->freelist[bm->free_prod] = virt & 0xffffffff;
2124 		bm->free_prod = (bm->free_prod + 1) % MVPP2_BM_SIZE;
2125 
2126 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
2127 	}
2128 
2129 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2130 	    MVPP2_DMA_LEN(rxq->ring),
2131 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2132 
2133 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2134 
2135 	if_input(ifp, &ml);
2136 }
2137 
2138 /*
2139  * We have a pool per core, and since we should not assume that
2140  * RX buffers are always used in order, keep a list of rxbuf[]
2141  * indices that should be filled with an mbuf, if possible.
2142  */
2143 void
2144 mvpp2_rx_refill(struct mvpp2_port *sc)
2145 {
2146 	struct mvpp2_bm_pool *bm;
2147 	struct mvpp2_buf *rxb;
2148 	uint64_t phys, virt;
2149 	int pool;
2150 
2151 	pool = curcpu()->ci_cpuid;
2152 	KASSERT(pool < sc->sc->sc_npools);
2153 	bm = &sc->sc->sc_bm_pools[pool];
2154 
2155 	while (bm->free_cons != bm->free_prod) {
2156 		KASSERT(bm->freelist[bm->free_cons] != -1);
2157 		virt = bm->freelist[bm->free_cons];
2158 		KASSERT(((virt >> 16) & 0xffff) == pool);
2159 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2160 		rxb = &bm->rxbuf[virt & 0xffff];
2161 		KASSERT(rxb->mb_m == NULL);
2162 
2163 		rxb->mb_m = mvpp2_alloc_mbuf(sc->sc, rxb->mb_map);
2164 		if (rxb->mb_m == NULL)
2165 			break;
2166 
2167 		bm->freelist[bm->free_cons] = -1;
2168 		bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
2169 
2170 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2171 		mvpp2_write(sc->sc, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG,
2172 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2173 		    << MVPP22_BM_VIRT_HIGH_RLS_OFFST) |
2174 		    (((phys >> 32) & MVPP22_ADDR_HIGH_MASK)
2175 		    << MVPP22_BM_PHY_HIGH_RLS_OFFSET));
2176 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2177 		    virt & 0xffffffff);
2178 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2179 		    phys & 0xffffffff);
2180 	}
2181 }
2182 
2183 void
2184 mvpp2_up(struct mvpp2_port *sc)
2185 {
2186 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2187 	int i;
2188 
2189 	memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
2190 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, etherbroadcastaddr, 1);
2191 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 1);
2192 	/* FIXME: not promisc!!! */
2193 	mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id, 1);
2194 	mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
2195 	mvpp2_prs_def_flow(sc);
2196 
2197 	for (i = 0; i < sc->sc_ntxq; i++)
2198 		mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
2199 
2200 	mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
2201 
2202 	for (i = 0; i < sc->sc_nrxq; i++)
2203 		mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
2204 
2205 	/* FIXME: rx buffer fill */
2206 
2207 	/* Configure media. */
2208 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
2209 		mii_mediachg(&sc->sc_mii);
2210 
2211 	/* Program promiscuous mode and multicast filters. */
2212 	mvpp2_iff(sc);
2213 
2214 	ifp->if_flags |= IFF_RUNNING;
2215 	ifq_clr_oactive(&ifp->if_snd);
2216 
2217 	mvpp2_txp_max_tx_size_set(sc);
2218 
2219 	/* XXX: single vector */
2220 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
2221 	    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
2222 	    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
2223 	    MVPP2_CAUSE_MISC_SUM_MASK);
2224 	mvpp2_interrupts_enable(sc, (1 << 0));
2225 
2226 	mvpp2_mac_config(sc);
2227 	mvpp2_egress_enable(sc);
2228 	mvpp2_ingress_enable(sc);
2229 
2230 	timeout_add_sec(&sc->sc_tick, 1);
2231 }
2232 
2233 void
2234 mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
2235 {
2236 	struct mvpp2_buf *txb;
2237 	int i;
2238 
2239 	txq->ring = mvpp2_dmamem_alloc(sc,
2240 	    MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
2241 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2242 
2243 	txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
2244 	    M_DEVBUF, M_WAITOK);
2245 
2246 	for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
2247 		txb = &txq->buf[i];
2248 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2249 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2250 		txb->mb_m = NULL;
2251 	}
2252 
2253 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2254 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2255 
2256 	txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
2257 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
2258 	    MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_SHIFT);
2259 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
2260 	    MVPP2_AGGR_TXQ_SIZE);
2261 }
2262 
2263 void
2264 mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2265 {
2266 	struct mvpp2_buf *txb;
2267 	int desc, desc_per_txq;
2268 	uint32_t reg;
2269 	int i;
2270 
2271 	txq->prod = txq->cons = txq->cnt = 0;
2272 //	txq->last_desc = txq->size - 1;
2273 
2274 	txq->ring = mvpp2_dmamem_alloc(sc->sc,
2275 	    MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
2276 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2277 
2278 	txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
2279 	    M_DEVBUF, M_WAITOK);
2280 
2281 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2282 		txb = &txq->buf[i];
2283 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2284 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2285 		txb->mb_m = NULL;
2286 	}
2287 
2288 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2289 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2290 
2291 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2292 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
2293 	    MVPP2_DMA_DVA(txq->ring));
2294 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
2295 	    MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
2296 	mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
2297 	mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
2298 	    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2299 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
2300 	reg &= ~MVPP2_TXQ_PENDING_MASK;
2301 	mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
2302 
2303 	desc_per_txq = 16;
2304 	desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
2305 	    (txq->log_id * desc_per_txq);
2306 
2307 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
2308 	    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2309 	    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2310 
2311 	/* WRR / EJP configuration - indirect access */
2312 	mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2313 	    mvpp2_egress_port(sc));
2314 
2315 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2316 	reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2317 	reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2318 	reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2319 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
2320 
2321 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2322 	    MVPP2_TXQ_TOKEN_SIZE_MAX);
2323 
2324 	mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
2325 }
2326 
2327 void
2328 mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2329 {
2330 	rxq->prod = rxq->cons = 0;
2331 
2332 	rxq->ring = mvpp2_dmamem_alloc(sc->sc,
2333 	    MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
2334 	rxq->descs = MVPP2_DMA_KVA(rxq->ring);
2335 
2336 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
2337 	    0, MVPP2_DMA_LEN(rxq->ring),
2338 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2339 
2340 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2341 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2342 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
2343 	    MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_SHIFT);
2344 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
2345 	mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
2346 	mvpp2_rxq_offset_set(sc, rxq->id, 0);
2347 	mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
2348 	mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
2349 	mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
2350 }
2351 
2352 void
2353 mvpp2_mac_config(struct mvpp2_port *sc)
2354 {
2355 	uint32_t reg;
2356 
2357 	mvpp2_port_disable(sc);
2358 
2359 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2360 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
2361 	    MVPP2_PORT_CTRL2_PORTMACRESET_MASK);
2362 	if (sc->sc_gop_id == 0) {
2363 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
2364 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2365 		    ~MV_XLG_MAC_CTRL0_MACRESETN_MASK);
2366 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2367 		reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
2368 		reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
2369 		reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
2370 		reg &= ~MVPP22_MPCS_MAC_CLK_RESET_MASK;
2371 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2372 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2373 		reg &= ~MVPP22_XPCS_PCSRESET;
2374 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2375 	}
2376 
2377 	mvpp2_comphy_config(sc);
2378 	mvpp2_gop_config(sc);
2379 
2380 	if (sc->sc_gop_id == 0) {
2381 		if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2382 			reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2383 			reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
2384 			reg |= MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
2385 			reg |= MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
2386 			reg |= MVPP22_MPCS_MAC_CLK_RESET_MASK;
2387 			mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2388 		} else if (sc->sc_phy_mode == PHY_MODE_XAUI) {
2389 			reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2390 			reg |= MVPP22_XPCS_PCSRESET;
2391 			mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2392 		}
2393 
2394 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
2395 		reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
2396 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2397 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2398 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
2399 		else
2400 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
2401 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
2402 	}
2403 
2404 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2405 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2406 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
2407 		reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
2408 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2409 		    MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
2410 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
2411 	} else {
2412 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
2413 		reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2414 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2415 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS;
2416 		mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
2417 	}
2418 
2419 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2420 	    sc->sc_phy_mode == PHY_MODE_XAUI))
2421 		mvpp2_xlg_config(sc);
2422 	else
2423 		mvpp2_gmac_config(sc);
2424 
2425 	mvpp2_port_enable(sc);
2426 }
2427 
2428 void
2429 mvpp2_xlg_config(struct mvpp2_port *sc)
2430 {
2431 	uint32_t ctl0, ctl4;
2432 
2433 	ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2434 	ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
2435 
2436 	ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN_MASK;
2437 	ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
2438 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS;
2439 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK;
2440 
2441 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
2442 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl0);
2443 
2444 	/* Port reset */
2445 	while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2446 	    MV_XLG_MAC_CTRL0_MACRESETN_MASK) == 0)
2447 		;
2448 }
2449 
2450 void
2451 mvpp2_gmac_config(struct mvpp2_port *sc)
2452 {
2453 	uint32_t ctl0, ctl2, ctl4, panc;
2454 
2455 	/* Setup phy. */
2456 	ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
2457 	ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
2458 	ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
2459 	panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2460 
2461 	/* Force link down to change in-band settings. */
2462 	panc &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2463 	panc |= MVPP2_GMAC_FORCE_LINK_DOWN;
2464 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2465 
2466 	ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
2467 	ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
2468 	    MVPP2_GMAC_INBAND_AN_MASK);
2469 	panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
2470 	    MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
2471 	    MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
2472 	    MVPP2_GMAC_IN_BAND_AUTONEG);
2473 
2474 	switch (sc->sc_phy_mode) {
2475 	case PHY_MODE_XAUI:
2476 	case PHY_MODE_10GBASER:
2477 		break;
2478 	case PHY_MODE_2500BASEX:
2479 	case PHY_MODE_1000BASEX:
2480 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2481 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
2482 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
2483 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
2484 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
2485 		break;
2486 	case PHY_MODE_SGMII:
2487 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2488 		ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
2489 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
2490 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
2491 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
2492 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
2493 		break;
2494 	case PHY_MODE_RGMII:
2495 	case PHY_MODE_RGMII_ID:
2496 	case PHY_MODE_RGMII_RXID:
2497 	case PHY_MODE_RGMII_TXID:
2498 		ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
2499 		ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
2500 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
2501 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
2502 		break;
2503 	}
2504 
2505 	/* Use Auto-Negotiation for Inband Status only */
2506 	if (sc->sc_inband_status) {
2507 		panc &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2508 		panc &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2509 		panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2510 		panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2511 		panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2512 		panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
2513 		/* TODO: read mode from SFP */
2514 		if (1) {
2515 			/* 802.3z */
2516 			ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
2517 			panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2518 			panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2519 		} else {
2520 			/* SGMII */
2521 			panc |= MVPP2_GMAC_AN_SPEED_EN;
2522 			panc |= MVPP2_GMAC_AN_DUPLEX_EN;
2523 		}
2524 	}
2525 
2526 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
2527 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
2528 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
2529 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2530 
2531 	/* Port reset */
2532 	while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2533 	    MVPP2_PORT_CTRL2_PORTMACRESET_MASK)
2534 		;
2535 }
2536 
2537 #define COMPHY_BASE		0x120000
2538 #define COMPHY_SIP_POWER_ON	0x82000001
2539 #define COMPHY_SIP_POWER_OFF	0x82000002
2540 #define COMPHY_SPEED(x)		((x) << 2)
2541 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
2542 #define  COMPHY_SPEED_2_5G		1
2543 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
2544 #define  COMPHY_SPEED_5G		3
2545 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
2546 #define  COMPHY_SPEED_6G		5
2547 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
2548 #define COMPHY_UNIT(x)		((x) << 8)
2549 #define COMPHY_MODE(x)		((x) << 12)
2550 #define  COMPHY_MODE_SATA		1
2551 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
2552 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
2553 #define  COMPHY_MODE_USB3H		4
2554 #define  COMPHY_MODE_USB3D		5
2555 #define  COMPHY_MODE_PCIE		6
2556 #define  COMPHY_MODE_RXAUI		7
2557 #define  COMPHY_MODE_XFI		8
2558 #define  COMPHY_MODE_SFI		9
2559 #define  COMPHY_MODE_USB3		10
2560 #define  COMPHY_MODE_AP			11
2561 
2562 void
2563 mvpp2_comphy_config(struct mvpp2_port *sc)
2564 {
2565 	int node, phys[2], lane, unit;
2566 	uint32_t mode;
2567 
2568 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
2569 	    sizeof(phys))
2570 		return;
2571 	node = OF_getnodebyphandle(phys[0]);
2572 	if (!node)
2573 		return;
2574 
2575 	lane = OF_getpropint(node, "reg", 0);
2576 	unit = phys[1];
2577 
2578 	switch (sc->sc_phy_mode) {
2579 	case PHY_MODE_XAUI:
2580 		mode = COMPHY_MODE(COMPHY_MODE_RXAUI) |
2581 		    COMPHY_UNIT(unit);
2582 		break;
2583 	case PHY_MODE_10GBASER:
2584 		mode = COMPHY_MODE(COMPHY_MODE_XFI) |
2585 		    COMPHY_SPEED(COMPHY_SPEED_10_3125G) |
2586 		    COMPHY_UNIT(unit);
2587 		break;
2588 	case PHY_MODE_2500BASEX:
2589 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
2590 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
2591 		    COMPHY_UNIT(unit);
2592 		break;
2593 	case PHY_MODE_1000BASEX:
2594 	case PHY_MODE_SGMII:
2595 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
2596 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
2597 		    COMPHY_UNIT(unit);
2598 		break;
2599 	default:
2600 		return;
2601 	}
2602 
2603 	smc_call(COMPHY_SIP_POWER_ON, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2604 	    lane, mode);
2605 }
2606 
2607 void
2608 mvpp2_gop_config(struct mvpp2_port *sc)
2609 {
2610 	uint32_t reg;
2611 
2612 	if (sc->sc->sc_rm == NULL)
2613 		return;
2614 
2615 	if (sc->sc_phy_mode == PHY_MODE_RGMII ||
2616 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2617 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2618 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2619 		if (sc->sc_gop_id == 0)
2620 			return;
2621 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2622 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
2623 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2624 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2625 		if (sc->sc_gop_id == 2)
2626 			reg |= GENCONF_CTRL0_PORT0_RGMII |
2627 			    GENCONF_CTRL0_PORT1_RGMII;
2628 		else if (sc->sc_gop_id == 3)
2629 			reg |= GENCONF_CTRL0_PORT1_RGMII_MII;
2630 		regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2631 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2632 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2633 	    sc->sc_phy_mode == PHY_MODE_SGMII) {
2634 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2635 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
2636 		    GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
2637 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2638 		if (sc->sc_gop_id > 1) {
2639 			reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2640 			if (sc->sc_gop_id == 2)
2641 				reg &= ~GENCONF_CTRL0_PORT0_RGMII;
2642 			else if (sc->sc_gop_id == 3)
2643 				reg &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
2644 			regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2645 		}
2646 	} else if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2647 		if (sc->sc_gop_id != 0)
2648 			return;
2649 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2650 		reg &= ~MVPP22_XPCS_PCSMODE_MASK;
2651 		reg &= ~MVPP22_XPCS_LANEACTIVE_MASK;
2652 		reg |= 2 << MVPP22_XPCS_LANEACTIVE_OFFS;
2653 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2654 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS40G_COMMON_CONTROL);
2655 		reg &= ~MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK;
2656 		mvpp2_mpcs_write(sc, MVPP22_MPCS40G_COMMON_CONTROL, reg);
2657 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2658 		reg &= ~MVPP22_MPCS_CLK_DIVISION_RATIO_MASK;
2659 		reg |= MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT;
2660 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2661 	} else
2662 		return;
2663 
2664 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1);
2665 	reg |= GENCONF_PORT_CTRL1_RESET(sc->sc_gop_id) |
2666 	    GENCONF_PORT_CTRL1_EN(sc->sc_gop_id);
2667 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1, reg);
2668 
2669 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2670 	reg |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
2671 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2672 
2673 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1);
2674 	reg |= GENCONF_SOFT_RESET1_GOP;
2675 	regmap_write_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1, reg);
2676 }
2677 
2678 void
2679 mvpp2_down(struct mvpp2_port *sc)
2680 {
2681 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2682 	uint32_t reg;
2683 	int i;
2684 
2685 	timeout_del(&sc->sc_tick);
2686 
2687 	ifp->if_flags &= ~IFF_RUNNING;
2688 	ifq_clr_oactive(&ifp->if_snd);
2689 	ifp->if_timer = 0;
2690 
2691 	mvpp2_egress_disable(sc);
2692 	mvpp2_ingress_disable(sc);
2693 	mvpp2_port_disable(sc);
2694 
2695 	/* XXX: single vector */
2696 	mvpp2_interrupts_disable(sc, (1 << 0));
2697 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
2698 
2699 	reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
2700 	reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2701 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2702 
2703 	for (i = 0; i < sc->sc_ntxq; i++)
2704 		mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
2705 
2706 	reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2707 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2708 
2709 	for (i = 0; i < sc->sc_nrxq; i++)
2710 		mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
2711 
2712 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 0);
2713 }
2714 
2715 void
2716 mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2717 {
2718 	struct mvpp2_buf *txb;
2719 	int i, pending;
2720 	uint32_t reg;
2721 
2722 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2723 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
2724 	reg |= MVPP2_TXQ_DRAIN_EN_MASK;
2725 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2726 
2727 	/*
2728 	 * the queue has been stopped so wait for all packets
2729 	 * to be transmitted.
2730 	 */
2731 	i = 0;
2732 	do {
2733 		if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2734 			printf("%s: port %d: cleaning queue %d timed out\n",
2735 			    sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
2736 			break;
2737 		}
2738 		delay(1000);
2739 		i++;
2740 
2741 		pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
2742 		    MVPP2_TXQ_PENDING_MASK;
2743 	} while (pending);
2744 
2745 	reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2746 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2747 
2748 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2749 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2750 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
2751 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
2752 
2753 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2754 		txb = &txq->buf[i];
2755 		if (txb->mb_m) {
2756 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2757 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2758 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2759 			m_freem(txb->mb_m);
2760 		}
2761 		bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
2762 	}
2763 
2764 	mvpp2_dmamem_free(sc->sc, txq->ring);
2765 	free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
2766 	    MVPP2_NTXDESC);
2767 }
2768 
2769 void
2770 mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2771 {
2772 	uint32_t nrecv;
2773 
2774 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2775 	if (nrecv)
2776 		mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2777 
2778 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2779 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2780 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
2781 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
2782 
2783 	mvpp2_dmamem_free(sc->sc, rxq->ring);
2784 }
2785 
2786 void
2787 mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
2788 {
2789 	uint32_t val;
2790 	int prxq;
2791 
2792 	/* get queue physical ID */
2793 	prxq = port->sc_rxqs[lrxq].id;
2794 
2795 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
2796 	val &= ~MVPP2_RXQ_POOL_LONG_MASK;
2797 	val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
2798 
2799 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
2800 }
2801 
2802 void
2803 mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
2804 {
2805 	uint32_t val;
2806 	int prxq;
2807 
2808 	/* get queue physical ID */
2809 	prxq = port->sc_rxqs[lrxq].id;
2810 
2811 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
2812 	val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
2813 	val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
2814 
2815 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
2816 }
2817 
2818 void
2819 mvpp2_iff(struct mvpp2_port *sc)
2820 {
2821 	/* FIXME: multicast handling */
2822 
2823 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 0);
2824 	memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
2825 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 1);
2826 }
2827 
2828 struct mvpp2_dmamem *
2829 mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
2830 {
2831 	struct mvpp2_dmamem *mdm;
2832 	int nsegs;
2833 
2834 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
2835 	mdm->mdm_size = size;
2836 
2837 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2838 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2839 		goto mdmfree;
2840 
2841 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
2842 	    &nsegs, BUS_DMA_WAITOK) != 0)
2843 		goto destroy;
2844 
2845 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2846 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
2847 		goto free;
2848 
2849 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2850 	    NULL, BUS_DMA_WAITOK) != 0)
2851 		goto unmap;
2852 
2853 	bzero(mdm->mdm_kva, size);
2854 
2855 	return (mdm);
2856 
2857 unmap:
2858 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2859 free:
2860 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2861 destroy:
2862 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2863 mdmfree:
2864 	free(mdm, M_DEVBUF, 0);
2865 
2866 	return (NULL);
2867 }
2868 
2869 void
2870 mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
2871 {
2872 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2873 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2874 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2875 	free(mdm, M_DEVBUF, 0);
2876 }
2877 
2878 struct mbuf *
2879 mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
2880 {
2881 	struct mbuf *m = NULL;
2882 
2883 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
2884 	if (!m)
2885 		return (NULL);
2886 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2887 
2888 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
2889 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
2890 		m_freem(m);
2891 		return (NULL);
2892 	}
2893 
2894 	bus_dmamap_sync(sc->sc_dmat, map, 0,
2895 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
2896 
2897 	return (m);
2898 }
2899 
2900 void
2901 mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
2902 {
2903 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
2904 	    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
2905 }
2906 
2907 void
2908 mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
2909 {
2910 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
2911 	    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
2912 }
2913 
2914 int
2915 mvpp2_egress_port(struct mvpp2_port *port)
2916 {
2917 	return MVPP2_MAX_TCONT + port->sc_id;
2918 }
2919 
2920 int
2921 mvpp2_txq_phys(int port, int txq)
2922 {
2923 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
2924 }
2925 
2926 void
2927 mvpp2_defaults_set(struct mvpp2_port *port)
2928 {
2929 	int val, queue;
2930 
2931 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2932 	    mvpp2_egress_port(port));
2933 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2934 
2935 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2936 		mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2937 
2938 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
2939 	    (1000 * 1000));
2940 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
2941 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2942 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2943 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2944 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
2945 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
2946 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2947 
2948 	/* set maximum_low_latency_packet_size value to 256 */
2949 	mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
2950 	    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2951 	    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2952 
2953 	/* mask all interrupts to all present cpus */
2954 	mvpp2_interrupts_disable(port, (0xf << 0));
2955 }
2956 
2957 void
2958 mvpp2_ingress_enable(struct mvpp2_port *port)
2959 {
2960 	uint32_t val;
2961 	int lrxq, queue;
2962 
2963 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
2964 		queue = port->sc_rxqs[lrxq].id;
2965 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
2966 		val &= ~MVPP2_RXQ_DISABLE_MASK;
2967 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
2968 	}
2969 }
2970 
2971 void
2972 mvpp2_ingress_disable(struct mvpp2_port *port)
2973 {
2974 	uint32_t val;
2975 	int lrxq, queue;
2976 
2977 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
2978 		queue = port->sc_rxqs[lrxq].id;
2979 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
2980 		val |= MVPP2_RXQ_DISABLE_MASK;
2981 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
2982 	}
2983 }
2984 
2985 void
2986 mvpp2_egress_enable(struct mvpp2_port *port)
2987 {
2988 	struct mvpp2_tx_queue *txq;
2989 	uint32_t qmap;
2990 	int queue;
2991 
2992 	qmap = 0;
2993 	for (queue = 0; queue < port->sc_ntxq; queue++) {
2994 		txq = &port->sc_txqs[queue];
2995 
2996 		if (txq->descs != NULL) {
2997 			qmap |= (1 << queue);
2998 		}
2999 	}
3000 
3001 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3002 	    mvpp2_egress_port(port));
3003 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3004 }
3005 
3006 void
3007 mvpp2_egress_disable(struct mvpp2_port *port)
3008 {
3009 	uint32_t reg_data;
3010 	int i;
3011 
3012 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3013 	    mvpp2_egress_port(port));
3014 	reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3015 	    MVPP2_TXP_SCHED_ENQ_MASK;
3016 	if (reg_data)
3017 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,(reg_data <<
3018 		    MVPP2_TXP_SCHED_DISQ_OFFSET));
3019 
3020 	i = 0;
3021 	do {
3022 		if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3023 			printf("%s: tx stop timed out, status=0x%08x\n",
3024 			    port->sc_dev.dv_xname, reg_data);
3025 			break;
3026 		}
3027 		delay(1000);
3028 		i++;
3029 		reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
3030 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3031 }
3032 
3033 void
3034 mvpp2_port_enable(struct mvpp2_port *port)
3035 {
3036 	uint32_t val;
3037 
3038 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3039 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3040 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3041 		val |= MV_XLG_MAC_CTRL0_PORTEN_MASK;
3042 		val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK;
3043 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3044 	} else {
3045 		val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3046 		val |= MVPP2_GMAC_PORT_EN_MASK;
3047 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3048 		mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3049 	}
3050 }
3051 
3052 void
3053 mvpp2_port_disable(struct mvpp2_port *port)
3054 {
3055 	uint32_t val;
3056 
3057 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3058 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3059 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3060 		val &= ~MV_XLG_MAC_CTRL0_PORTEN_MASK;
3061 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3062 	}
3063 
3064 	val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3065 	val &= ~MVPP2_GMAC_PORT_EN_MASK;
3066 	mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3067 }
3068 
3069 int
3070 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3071 {
3072 	uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
3073 
3074 	return val & MVPP2_RXQ_OCCUPIED_MASK;
3075 }
3076 
3077 void
3078 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3079     int used_count, int free_count)
3080 {
3081 	uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3082 	mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3083 }
3084 
3085 void
3086 mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
3087 {
3088 	uint32_t val;
3089 
3090 	offset = offset >> 5;
3091 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3092 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3093 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3094 	    MVPP2_RXQ_PACKET_OFFSET_MASK);
3095 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3096 }
3097 
3098 void
3099 mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3100 {
3101 	uint32_t val, size, mtu;
3102 	int txq;
3103 
3104 	mtu = MCLBYTES * 8;
3105 	if (mtu > MVPP2_TXP_MTU_MAX)
3106 		mtu = MVPP2_TXP_MTU_MAX;
3107 
3108 	/* WA for wrong token bucket update: set MTU value = 3*real MTU value */
3109 	mtu = 3 * mtu;
3110 
3111 	/* indirect access to reg_valisters */
3112 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3113 	    mvpp2_egress_port(port));
3114 
3115 	/* set MTU */
3116 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
3117 	val &= ~MVPP2_TXP_MTU_MAX;
3118 	val |= mtu;
3119 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
3120 
3121 	/* TXP token size and all TXqs token size must be larger that MTU */
3122 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3123 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3124 	if (size < mtu) {
3125 		size = mtu;
3126 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3127 		val |= size;
3128 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3129 	}
3130 
3131 	for (txq = 0; txq < port->sc_ntxq; txq++) {
3132 		val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3133 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3134 
3135 		if (size < mtu) {
3136 			size = mtu;
3137 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3138 			val |= size;
3139 			mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
3140 		}
3141 	}
3142 }
3143 
3144 void
3145 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3146     uint32_t pkts)
3147 {
3148 	rxq->pkts_coal =
3149 	    pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
3150 	    pkts : MVPP2_OCCUPIED_THRESH_MASK;
3151 
3152 	mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3153 	mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
3154 
3155 }
3156 
3157 void
3158 mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3159     uint32_t pkts)
3160 {
3161 	txq->done_pkts_coal =
3162 	    pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
3163 	    pkts : MVPP2_TRANSMITTED_THRESH_MASK;
3164 
3165 	mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
3166 	mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
3167 	    txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
3168 }
3169 
3170 void
3171 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3172     uint32_t usec)
3173 {
3174 	uint32_t val;
3175 
3176 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3177 	mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3178 
3179 	rxq->time_coal = usec;
3180 }
3181 
3182 void
3183 mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
3184 {
3185 	uint32_t val;
3186 
3187 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3188 	mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
3189 
3190 	port->sc_tx_time_coal = usec;
3191 }
3192 
3193 void
3194 mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
3195     uint32_t ri, uint32_t ri_mask)
3196 {
3197 	sc->sc_prs_shadow[index].ri_mask = ri_mask;
3198 	sc->sc_prs_shadow[index].ri = ri;
3199 }
3200 
3201 void
3202 mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3203 {
3204 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
3205 
3206 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
3207 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
3208 }
3209 
3210 void
3211 mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
3212 {
3213 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3214 
3215 	if (add)
3216 		pe->tcam.byte[enable_off] &= ~(1 << port);
3217 	else
3218 		pe->tcam.byte[enable_off] |= (1 << port);
3219 }
3220 
3221 void
3222 mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
3223 {
3224 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3225 	uint8_t mask = MVPP2_PRS_PORT_MASK;
3226 
3227 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
3228 	pe->tcam.byte[enable_off] &= ~mask;
3229 	pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
3230 }
3231 
3232 uint32_t
3233 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
3234 {
3235 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3236 
3237 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
3238 }
3239 
3240 void
3241 mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
3242     uint8_t byte, uint8_t enable)
3243 {
3244 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
3245 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
3246 }
3247 
3248 void
3249 mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
3250     uint8_t *byte, uint8_t *enable)
3251 {
3252 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
3253 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
3254 }
3255 
3256 int
3257 mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
3258 {
3259 	int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
3260 	uint16_t tcam_data;
3261 
3262 	tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
3263 	    pe->tcam.byte[byte_offset];
3264 	if (tcam_data != data)
3265 		return 0;
3266 
3267 	return 1;
3268 }
3269 
3270 void
3271 mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
3272 {
3273 	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
3274 
3275 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
3276 		if (!(enable & (i << 1)))
3277 			continue;
3278 
3279 		if (bits & (i << 1))
3280 			pe->tcam.byte[ai_idx] |= 1 << i;
3281 		else
3282 			pe->tcam.byte[ai_idx] &= ~(1 << i);
3283 	}
3284 
3285 	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
3286 }
3287 
3288 int
3289 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
3290 {
3291 	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
3292 }
3293 
3294 void
3295 mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
3296     uint32_t *word, uint32_t *enable)
3297 {
3298 	int index, position;
3299 	uint8_t byte, mask;
3300 
3301 	for (index = 0; index < 4; index++) {
3302 		position = (data_offset * sizeof(int)) + index;
3303 		mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
3304 		((uint8_t *)word)[index] = byte;
3305 		((uint8_t *)enable)[index] = mask;
3306 	}
3307 }
3308 
3309 void
3310 mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
3311     uint16_t ether_type)
3312 {
3313 	mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
3314 	mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
3315 }
3316 
3317 void
3318 mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3319 {
3320 	pe->sram.byte[bit / 8] |= (val << (bit % 8));
3321 }
3322 
3323 void
3324 mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3325 {
3326 	pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
3327 }
3328 
3329 void
3330 mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3331 {
3332 	int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
3333 	int i;
3334 
3335 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
3336 		if (!(mask & BIT(i)))
3337 			continue;
3338 
3339 		if (bits & BIT(i))
3340 			mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
3341 		else
3342 			mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
3343 
3344 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
3345 	}
3346 }
3347 
3348 int
3349 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
3350 {
3351 	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
3352 }
3353 
3354 void
3355 mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3356 {
3357 	int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
3358 	int i;
3359 
3360 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
3361 		if (!(mask & BIT(i)))
3362 			continue;
3363 
3364 		if (bits & BIT(i))
3365 			mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
3366 		else
3367 			mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
3368 
3369 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
3370 	}
3371 }
3372 
3373 int
3374 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
3375 {
3376 	uint8_t bits;
3377 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
3378 	int ai_en_off = ai_off + 1;
3379 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
3380 
3381 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
3382 	    (pe->sram.byte[ai_en_off] << (8 - ai_shift));
3383 
3384 	return bits;
3385 }
3386 
3387 void
3388 mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
3389 {
3390 	if (shift < 0) {
3391 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3392 		shift = -shift;
3393 	} else {
3394 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3395 	}
3396 
3397 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
3398 	    shift & MVPP2_PRS_SRAM_SHIFT_MASK;
3399 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
3400 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
3401 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
3402 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3403 }
3404 
3405 void
3406 mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
3407     uint32_t op)
3408 {
3409 	uint8_t udf_byte, udf_byte_offset;
3410 	uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
3411 
3412 	udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
3413 	    MVPP2_PRS_SRAM_UDF_BITS);
3414 	udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
3415 	op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
3416 	    MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
3417 	op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
3418 
3419 	if (offset < 0) {
3420 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3421 		offset = -offset;
3422 	} else {
3423 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3424 	}
3425 
3426 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
3427 	    MVPP2_PRS_SRAM_UDF_MASK);
3428 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
3429 	pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
3430 	pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
3431 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
3432 	    MVPP2_PRS_SRAM_UDF_TYPE_MASK);
3433 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
3434 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
3435 	    MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
3436 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
3437 	pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
3438 	    op_sel_udf_byte_offset);
3439 	pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
3440 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3441 }
3442 
3443 void
3444 mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3445 {
3446 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
3447 
3448 	mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
3449 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
3450 }
3451 
3452 void
3453 mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
3454 {
3455 	sc->sc_prs_shadow[index].valid = 1;
3456 	sc->sc_prs_shadow[index].lu = lu;
3457 }
3458 
3459 int
3460 mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3461 {
3462 	int i;
3463 
3464 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3465 		return EINVAL;
3466 
3467 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3468 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3469 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3470 		mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
3471 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3472 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3473 		mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
3474 
3475 	return 0;
3476 }
3477 
3478 int
3479 mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe, int tid)
3480 {
3481 	int i;
3482 
3483 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3484 		return EINVAL;
3485 
3486 	memset(pe, 0, sizeof(*pe));
3487 	pe->index = tid;
3488 
3489 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3490 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
3491 	    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
3492 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
3493 		return EINVAL;
3494 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3495 		pe->tcam.word[i] =
3496 		    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
3497 
3498 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3499 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3500 		pe->sram.word[i] =
3501 		    mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
3502 
3503 	return 0;
3504 }
3505 
3506 int
3507 mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
3508 {
3509 	struct mvpp2_prs_entry pe;
3510 	uint8_t bits;
3511 	int tid;
3512 
3513 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
3514 		if (!sc->sc_prs_shadow[tid].valid ||
3515 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
3516 			continue;
3517 
3518 		mvpp2_prs_hw_read(sc, &pe, tid);
3519 		bits = mvpp2_prs_sram_ai_get(&pe);
3520 
3521 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
3522 			return tid;
3523 	}
3524 
3525 	return -1;
3526 }
3527 
3528 int
3529 mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
3530 {
3531 	uint8_t tmp;
3532 	int tid;
3533 
3534 	if (start > end) {
3535 		tmp = end;
3536 		end = start;
3537 		start = tmp;
3538 	}
3539 
3540 	for (tid = start; tid <= end; tid++) {
3541 		if (!sc->sc_prs_shadow[tid].valid)
3542 			return tid;
3543 	}
3544 
3545 	return -1;
3546 }
3547 
3548 void
3549 mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
3550 {
3551 	struct mvpp2_prs_entry pe;
3552 
3553 	if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
3554 		mvpp2_prs_hw_read(sc, &pe, MVPP2_PE_DROP_ALL);
3555 	} else {
3556 		memset(&pe, 0, sizeof(pe));
3557 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3558 		pe.index = MVPP2_PE_DROP_ALL;
3559 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3560 		    MVPP2_PRS_RI_DROP_MASK);
3561 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3562 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3563 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3564 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3565 	}
3566 
3567 	mvpp2_prs_tcam_port_set(&pe, port, add);
3568 	mvpp2_prs_hw_write(sc, &pe);
3569 }
3570 
3571 void
3572 mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int add)
3573 {
3574 	struct mvpp2_prs_entry pe;
3575 
3576 	if (sc->sc_prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
3577 		mvpp2_prs_hw_read(sc, &pe, MVPP2_PE_MAC_PROMISCUOUS);
3578 	} else {
3579 		memset(&pe, 0, sizeof(pe));
3580 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3581 		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
3582 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3583 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
3584 		    MVPP2_PRS_RI_L2_CAST_MASK);
3585 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3586 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3587 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3588 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3589 	}
3590 
3591 	mvpp2_prs_tcam_port_set(&pe, port, add);
3592 	mvpp2_prs_hw_write(sc, &pe);
3593 }
3594 
3595 void
3596 mvpp2_prs_mac_multi_set(struct mvpp2_softc *sc, uint32_t port, uint32_t index, int add)
3597 {
3598 	struct mvpp2_prs_entry pe;
3599 	uint8_t da_mc;
3600 
3601 	da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
3602 
3603 	if (sc->sc_prs_shadow[index].valid) {
3604 		mvpp2_prs_hw_read(sc, &pe, index);
3605 	} else {
3606 		memset(&pe, 0, sizeof(pe));
3607 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3608 		pe.index = index;
3609 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3610 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
3611 		    MVPP2_PRS_RI_L2_CAST_MASK);
3612 		mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
3613 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3614 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3615 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3616 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3617 	}
3618 
3619 	mvpp2_prs_tcam_port_set(&pe, port, add);
3620 	mvpp2_prs_hw_write(sc, &pe);
3621 }
3622 
3623 void
3624 mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
3625     int tagged, int extend)
3626 {
3627 	struct mvpp2_prs_entry pe;
3628 	int32_t tid, shift;
3629 
3630 	if (extend) {
3631 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3632 		shift = 8;
3633 	} else {
3634 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3635 		shift = 4;
3636 	}
3637 
3638 	if (sc->sc_prs_shadow[tid].valid) {
3639 		mvpp2_prs_hw_read(sc, &pe, tid);
3640 	} else {
3641 		memset(&pe, 0, sizeof(pe));
3642 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3643 		pe.index = tid;
3644 		mvpp2_prs_sram_shift_set(&pe, shift,
3645 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3646 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3647 		if (tagged) {
3648 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
3649 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3650 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3651 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3652 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3653 		} else {
3654 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3655 			    MVPP2_PRS_RI_VLAN_MASK);
3656 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3657 		}
3658 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3659 	}
3660 
3661 	mvpp2_prs_tcam_port_set(&pe, port, add);
3662 	mvpp2_prs_hw_write(sc, &pe);
3663 }
3664 
3665 void
3666 mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
3667     int add, int tagged, int extend)
3668 {
3669 	struct mvpp2_prs_entry pe;
3670 	int32_t tid, shift, port_mask;
3671 
3672 	if (extend) {
3673 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3674 		port_mask = 0;
3675 		shift = 8;
3676 	} else {
3677 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3678 		port_mask = MVPP2_PRS_PORT_MASK;
3679 		shift = 4;
3680 	}
3681 
3682 	if (sc->sc_prs_shadow[tid].valid) {
3683 		mvpp2_prs_hw_read(sc, &pe, tid);
3684 	} else {
3685 		memset(&pe, 0, sizeof(pe));
3686 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3687 		pe.index = tid;
3688 		mvpp2_prs_match_etype(&pe, 0, 0xdada);
3689 		mvpp2_prs_match_etype(&pe, 2, 0);
3690 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
3691 		    MVPP2_PRS_RI_DSA_MASK);
3692 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
3693 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3694 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3695 		if (tagged) {
3696 			mvpp2_prs_tcam_data_byte_set(&pe,
3697 			    MVPP2_ETH_TYPE_LEN + 2 + 3,
3698 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3699 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3700 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3701 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3702 		} else {
3703 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3704 			    MVPP2_PRS_RI_VLAN_MASK);
3705 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3706 		}
3707 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
3708 	}
3709 
3710 	mvpp2_prs_tcam_port_set(&pe, port, add);
3711 	mvpp2_prs_hw_write(sc, &pe);
3712 }
3713 
3714 struct mvpp2_prs_entry *
3715 mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
3716 {
3717 	struct mvpp2_prs_entry *pe;
3718 	uint32_t ri_bits, ai_bits;
3719 	int match, tid;
3720 
3721 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3722 	if (pe == NULL)
3723 		return NULL;
3724 
3725 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3726 
3727 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3728 		if (!sc->sc_prs_shadow[tid].valid ||
3729 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3730 			continue;
3731 		mvpp2_prs_hw_read(sc, pe, tid);
3732 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
3733 		if (!match)
3734 			continue;
3735 		ri_bits = mvpp2_prs_sram_ri_get(pe);
3736 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3737 		ai_bits = mvpp2_prs_tcam_ai_get(pe);
3738 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
3739 		if (ai != ai_bits)
3740 			continue;
3741 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3742 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3743 			return pe;
3744 	}
3745 
3746 	free(pe, M_TEMP, sizeof(*pe));
3747 	return NULL;
3748 }
3749 
3750 int
3751 mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
3752 {
3753 	struct mvpp2_prs_entry *pe;
3754 	uint32_t ri_bits;
3755 	int tid_aux, tid;
3756 	int ret = 0;
3757 
3758 	pe = mvpp2_prs_vlan_find(sc, tpid, ai);
3759 	if (pe == NULL) {
3760 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
3761 		    MVPP2_PE_FIRST_FREE_TID);
3762 		if (tid < 0)
3763 			return tid;
3764 
3765 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3766 		if (pe == NULL)
3767 			return ENOMEM;
3768 
3769 		/* get last double vlan tid */
3770 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
3771 		    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
3772 			if (!sc->sc_prs_shadow[tid_aux].valid ||
3773 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
3774 				continue;
3775 			mvpp2_prs_hw_read(sc, pe, tid_aux);
3776 			ri_bits = mvpp2_prs_sram_ri_get(pe);
3777 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
3778 			    MVPP2_PRS_RI_VLAN_DOUBLE)
3779 				break;
3780 		}
3781 
3782 		if (tid <= tid_aux) {
3783 			ret = EINVAL;
3784 			goto error;
3785 		}
3786 
3787 		memset(pe, 0, sizeof(struct mvpp2_prs_entry));
3788 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3789 		pe->index = tid;
3790 		mvpp2_prs_match_etype(pe, 0, tpid);
3791 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
3792 		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
3793 				   MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3794 		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3795 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
3796 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
3797 			    MVPP2_PRS_RI_VLAN_MASK);
3798 		} else {
3799 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
3800 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
3801 			    MVPP2_PRS_RI_VLAN_MASK);
3802 		}
3803 		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
3804 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
3805 	}
3806 
3807 	mvpp2_prs_tcam_port_map_set(pe, port_map);
3808 	mvpp2_prs_hw_write(sc, pe);
3809 
3810 error:
3811 	free(pe, M_TEMP, sizeof(*pe));
3812 	return ret;
3813 }
3814 
3815 int
3816 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
3817 {
3818 	int i;
3819 
3820 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
3821 		if (!sc->sc_prs_double_vlans[i])
3822 			return i;
3823 
3824 	return -1;
3825 }
3826 
3827 struct mvpp2_prs_entry *
3828 mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
3829 {
3830 	struct mvpp2_prs_entry *pe;
3831 	uint32_t ri_mask;
3832 	int match, tid;
3833 
3834 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3835 	if (pe == NULL)
3836 		return NULL;
3837 
3838 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3839 
3840 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3841 		if (!sc->sc_prs_shadow[tid].valid ||
3842 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3843 			continue;
3844 
3845 		mvpp2_prs_hw_read(sc, pe, tid);
3846 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
3847 		    mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
3848 		if (!match)
3849 			continue;
3850 		ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
3851 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
3852 			return pe;
3853 	}
3854 
3855 	free(pe, M_TEMP, sizeof(*pe));
3856 	return NULL;
3857 }
3858 
3859 int
3860 mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
3861     uint32_t port_map)
3862 {
3863 	struct mvpp2_prs_entry *pe;
3864 	int tid_aux, tid, ai, ret = 0;
3865 	uint32_t ri_bits;
3866 
3867 	pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
3868 	if (pe == NULL) {
3869 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3870 		    MVPP2_PE_LAST_FREE_TID);
3871 		if (tid < 0)
3872 			return tid;
3873 
3874 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3875 		if (pe == NULL)
3876 			return ENOMEM;
3877 
3878 		ai = mvpp2_prs_double_vlan_ai_free_get(sc);
3879 		if (ai < 0) {
3880 			ret = ai;
3881 			goto error;
3882 		}
3883 
3884 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
3885 		    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
3886 			if (!sc->sc_prs_shadow[tid_aux].valid ||
3887 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
3888 				continue;
3889 			mvpp2_prs_hw_read(sc, pe, tid_aux);
3890 			ri_bits = mvpp2_prs_sram_ri_get(pe);
3891 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3892 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3893 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3894 				break;
3895 		}
3896 
3897 		if (tid >= tid_aux) {
3898 			ret = ERANGE;
3899 			goto error;
3900 		}
3901 
3902 		memset(pe, 0, sizeof(struct mvpp2_prs_entry));
3903 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3904 		pe->index = tid;
3905 		sc->sc_prs_double_vlans[ai] = 1;
3906 		mvpp2_prs_match_etype(pe, 0, tpid1);
3907 		mvpp2_prs_match_etype(pe, 4, tpid2);
3908 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
3909 		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
3910 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3911 		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3912 		    MVPP2_PRS_RI_VLAN_MASK);
3913 		mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
3914 		    MVPP2_PRS_SRAM_AI_MASK);
3915 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
3916 	}
3917 
3918 	mvpp2_prs_tcam_port_map_set(pe, port_map);
3919 	mvpp2_prs_hw_write(sc, pe);
3920 
3921 error:
3922 	free(pe, M_TEMP, sizeof(*pe));
3923 	return ret;
3924 }
3925 
3926 int
3927 mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
3928     uint32_t ri_mask)
3929 {
3930 	struct mvpp2_prs_entry pe;
3931 	int tid;
3932 
3933 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
3934 	    (proto != IPPROTO_IGMP))
3935 		return EINVAL;
3936 
3937 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3938 	    MVPP2_PE_LAST_FREE_TID);
3939 	if (tid < 0)
3940 		return tid;
3941 
3942 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3943 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3944 	pe.index = tid;
3945 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3946 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3947 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3948 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3949 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3950 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
3951 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, ri_mask |
3952 	    MVPP2_PRS_RI_IP_FRAG_MASK);
3953 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
3954 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3955 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3956 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
3957 	mvpp2_prs_hw_write(sc, &pe);
3958 
3959 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3960 	    MVPP2_PE_LAST_FREE_TID);
3961 	if (tid < 0)
3962 		return tid;
3963 
3964 	pe.index = tid;
3965 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3966 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3967 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
3968 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
3969 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
3970 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
3971 	mvpp2_prs_hw_write(sc, &pe);
3972 
3973 	return 0;
3974 }
3975 
3976 int
3977 mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
3978 {
3979 	struct mvpp2_prs_entry pe;
3980 	int mask, tid;
3981 
3982 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3983 	    MVPP2_PE_LAST_FREE_TID);
3984 	if (tid < 0)
3985 		return tid;
3986 
3987 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3988 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3989 	pe.index = tid;
3990 
3991 	switch (l3_cast) {
3992 	case MVPP2_PRS_L3_MULTI_CAST:
3993 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
3994 		    MVPP2_PRS_IPV4_MC_MASK);
3995 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
3996 		    MVPP2_PRS_RI_L3_ADDR_MASK);
3997 		break;
3998 	case  MVPP2_PRS_L3_BROAD_CAST:
3999 		mask = MVPP2_PRS_IPV4_BC_MASK;
4000 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
4001 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
4002 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
4003 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
4004 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
4005 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4006 		break;
4007 	default:
4008 		return EINVAL;
4009 	}
4010 
4011 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4012 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4013 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4014 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4015 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4016 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4017 	mvpp2_prs_hw_write(sc, &pe);
4018 
4019 	return 0;
4020 }
4021 
4022 int
4023 mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4024     uint32_t ri_mask)
4025 {
4026 	struct mvpp2_prs_entry pe;
4027 	int tid;
4028 
4029 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4030 	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
4031 		return EINVAL;
4032 
4033 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4034 	    MVPP2_PE_LAST_FREE_TID);
4035 	if (tid < 0)
4036 		return tid;
4037 
4038 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
4039 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4040 	pe.index = tid;
4041 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4042 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4043 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4044 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4045 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4046 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4047 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4048 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4049 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4050 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4051 	mvpp2_prs_hw_write(sc, &pe);
4052 
4053 	return 0;
4054 }
4055 
4056 int
4057 mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4058 {
4059 	struct mvpp2_prs_entry pe;
4060 	int tid;
4061 
4062 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
4063 		return EINVAL;
4064 
4065 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
4066 	if (tid < 0)
4067 		return tid;
4068 
4069 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
4070 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4071 	pe.index = tid;
4072 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
4073 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4074 	    MVPP2_PRS_RI_L3_ADDR_MASK);
4075 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4076 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4077 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4078 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
4079 	    MVPP2_PRS_IPV6_MC_MASK);
4080 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4081 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4082 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4083 	mvpp2_prs_hw_write(sc, &pe);
4084 
4085 	return 0;
4086 }
4087 
4088 int
4089 mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
4090     uint8_t *mask)
4091 {
4092 	uint8_t tcam_byte, tcam_mask;
4093 	int index;
4094 
4095 	for (index = 0; index < ETHER_ADDR_LEN; index++) {
4096 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
4097 		    &tcam_mask);
4098 		if (tcam_mask != mask[index])
4099 			return 0;
4100 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
4101 			return 0;
4102 	}
4103 
4104 	return 1;
4105 }
4106 
4107 struct mvpp2_prs_entry *
4108 mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
4109     uint8_t *mask, int udf_type)
4110 {
4111 	struct mvpp2_prs_entry *pe;
4112 	int tid;
4113 
4114 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4115 	if (pe == NULL)
4116 		return NULL;
4117 
4118 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
4119 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID;
4120 	    tid++) {
4121 		uint32_t entry_pmap;
4122 
4123 		if (!sc->sc_prs_shadow[tid].valid ||
4124 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4125 		    (sc->sc_prs_shadow[tid].udf != udf_type))
4126 			continue;
4127 
4128 		mvpp2_prs_hw_read(sc, pe, tid);
4129 		entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
4130 		if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
4131 		    entry_pmap == pmap)
4132 			return pe;
4133 	}
4134 
4135 	free(pe, M_TEMP, sizeof(*pe));
4136 	return NULL;
4137 }
4138 
4139 int
4140 mvpp2_prs_mac_da_accept(struct mvpp2_softc *sc, int port_id, const uint8_t *da,
4141     int add)
4142 {
4143 	struct mvpp2_prs_entry *pe;
4144 	uint32_t pmap, len, ri;
4145 	uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4146 	int tid;
4147 
4148 	pe = mvpp2_prs_mac_da_range_find(sc, (1 << port_id), da, mask,
4149 	    MVPP2_PRS_UDF_MAC_DEF);
4150 	if (pe == NULL) {
4151 		if (!add)
4152 			return 0;
4153 
4154 		for (tid = MVPP2_PE_FIRST_FREE_TID; tid <=
4155 		    MVPP2_PE_LAST_FREE_TID; tid++) {
4156 			if (sc->sc_prs_shadow[tid].valid &&
4157 			    (sc->sc_prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
4158 			    (sc->sc_prs_shadow[tid].udf == MVPP2_PRS_UDF_MAC_RANGE))
4159 				break;
4160 		}
4161 
4162 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID, tid - 1);
4163 		if (tid < 0)
4164 			return tid;
4165 
4166 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4167 		if (pe == NULL)
4168 			return ENOMEM;
4169 
4170 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
4171 		pe->index = tid;
4172 		mvpp2_prs_tcam_port_map_set(pe, 0);
4173 	}
4174 
4175 	mvpp2_prs_tcam_port_set(pe, port_id, add);
4176 
4177 	/* invalidate the entry if no ports are left enabled */
4178 	pmap = mvpp2_prs_tcam_port_map_get(pe);
4179 	if (pmap == 0) {
4180 		if (add) {
4181 			free(pe, M_TEMP, sizeof(*pe));
4182 			return -1;
4183 		}
4184 		mvpp2_prs_hw_inv(sc, pe->index);
4185 		sc->sc_prs_shadow[pe->index].valid = 0;
4186 		free(pe, M_TEMP, sizeof(*pe));
4187 		return 0;
4188 	}
4189 
4190 	mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
4191 
4192 	len = ETHER_ADDR_LEN;
4193 	while (len--)
4194 		mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
4195 
4196 	if (ETHER_IS_BROADCAST(da))
4197 		ri = MVPP2_PRS_RI_L2_BCAST;
4198 	else if (ETHER_IS_MULTICAST(da))
4199 		ri = MVPP2_PRS_RI_L2_MCAST;
4200 	else
4201 		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
4202 
4203 	mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4204 	    MVPP2_PRS_RI_MAC_ME_MASK);
4205 	mvpp2_prs_shadow_ri_set(sc, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4206 	    MVPP2_PRS_RI_MAC_ME_MASK);
4207 	mvpp2_prs_sram_shift_set(pe, 2 * ETHER_ADDR_LEN,
4208 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4209 	sc->sc_prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
4210 	mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_MAC);
4211 	mvpp2_prs_hw_write(sc, pe);
4212 
4213 	free(pe, M_TEMP, sizeof(*pe));
4214 	return 0;
4215 }
4216 
4217 int
4218 mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
4219 {
4220 	switch (type) {
4221 	case MVPP2_TAG_TYPE_EDSA:
4222 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4223 		    MVPP2_PRS_EDSA);
4224 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4225 		    MVPP2_PRS_EDSA);
4226 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4227 		    MVPP2_PRS_DSA);
4228 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4229 		    MVPP2_PRS_DSA);
4230 		break;
4231 	case MVPP2_TAG_TYPE_DSA:
4232 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4233 		    MVPP2_PRS_DSA);
4234 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4235 		    MVPP2_PRS_DSA);
4236 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4237 		    MVPP2_PRS_EDSA);
4238 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4239 		    MVPP2_PRS_EDSA);
4240 		break;
4241 	case MVPP2_TAG_TYPE_MH:
4242 	case MVPP2_TAG_TYPE_NONE:
4243 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4244 		    MVPP2_PRS_DSA);
4245 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4246 		    MVPP2_PRS_DSA);
4247 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4248 		    MVPP2_PRS_EDSA);
4249 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4250 		    MVPP2_PRS_EDSA);
4251 		break;
4252 	default:
4253 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4254 			return EINVAL;
4255 		break;
4256 	}
4257 
4258 	return 0;
4259 }
4260 
4261 int
4262 mvpp2_prs_def_flow(struct mvpp2_port *port)
4263 {
4264 	struct mvpp2_prs_entry pe;
4265 	int tid;
4266 
4267 	memset(&pe, 0, sizeof(pe));
4268 
4269 	tid = mvpp2_prs_flow_find(port->sc, port->sc_id);
4270 	if (tid < 0) {
4271 		tid = mvpp2_prs_tcam_first_free(port->sc,
4272 		    MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
4273 		if (tid < 0)
4274 			return tid;
4275 
4276 		pe.index = tid;
4277 		mvpp2_prs_sram_ai_update(&pe, port->sc_id,
4278 		    MVPP2_PRS_FLOW_ID_MASK);
4279 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4280 		mvpp2_prs_shadow_set(port->sc, pe.index, MVPP2_PRS_LU_FLOWS);
4281 	} else {
4282 		mvpp2_prs_hw_read(port->sc, &pe, tid);
4283 	}
4284 
4285 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4286 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->sc_id));
4287 	mvpp2_prs_hw_write(port->sc, &pe);
4288 	return 0;
4289 }
4290 
4291 void
4292 mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
4293 {
4294 	mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4295 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4296 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4297 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4298 }
4299 
4300 void
4301 mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
4302 {
4303 	uint32_t val;
4304 
4305 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4306 	mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
4307 	mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
4308 }
4309 
4310 void
4311 mvpp2_cls_init(struct mvpp2_softc *sc)
4312 {
4313 	struct mvpp2_cls_lookup_entry le;
4314 	struct mvpp2_cls_flow_entry fe;
4315 	int index;
4316 
4317 	mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4318 	memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
4319 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4320 		fe.index = index;
4321 		mvpp2_cls_flow_write(sc, &fe);
4322 	}
4323 	le.data = 0;
4324 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4325 		le.lkpid = index;
4326 		le.way = 0;
4327 		mvpp2_cls_lookup_write(sc, &le);
4328 		le.way = 1;
4329 		mvpp2_cls_lookup_write(sc, &le);
4330 	}
4331 }
4332 
4333 void
4334 mvpp2_cls_port_config(struct mvpp2_port *port)
4335 {
4336 	struct mvpp2_cls_lookup_entry le;
4337 	uint32_t val;
4338 
4339 	/* set way for the port */
4340 	val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
4341 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
4342 	mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
4343 
4344 	/*
4345 	 * pick the entry to be accessed in lookup ID decoding table
4346 	 * according to the way and lkpid.
4347 	 */
4348 	le.lkpid = port->sc_id;
4349 	le.way = 0;
4350 	le.data = 0;
4351 
4352 	/* set initial CPU queue for receiving packets */
4353 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4354 	le.data |= (port->sc_id * 32);
4355 
4356 	/* disable classification engines */
4357 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4358 
4359 	/* update lookup ID table entry */
4360 	mvpp2_cls_lookup_write(port->sc, &le);
4361 }
4362 
4363 void
4364 mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4365 {
4366 	mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
4367 	    (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4368 }
4369