xref: /openbsd-src/sys/dev/fdt/if_mvpp.c (revision fcde59b201a29a2b4570b00b71e7aa25d61cb5c1)
1 /*	$OpenBSD: if_mvpp.c,v 1.44 2020/12/12 11:48:52 jan Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Copyright (C) 2016 Marvell International Ltd.
20  *
21  * Marvell BSD License Option
22  *
23  * If you received this File from Marvell, you may opt to use, redistribute
24  * and/or modify this File under the following licensing terms.
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions are met:
27  *
28  *   * Redistributions of source code must retain the above copyright notice,
29  *     this list of conditions and the following disclaimer.
30  *
31  *   * Redistributions in binary form must reproduce the above copyright
32  *     notice, this list of conditions and the following disclaimer in the
33  *     documentation and/or other materials provided with the distribution.
34  *
35  *   * Neither the name of Marvell nor the names of its contributors may be
36  *     used to endorse or promote products derived from this software without
37  *     specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
43  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
49  * POSSIBILITY OF SUCH DAMAGE.
50  */
51 
52 #include "bpfilter.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/device.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/queue.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/timeout.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include <machine/cpufunc.h>
68 #include <machine/bus.h>
69 #include <machine/fdt.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/ppp_defs.h>
74 
75 #include <dev/ofw/openfirm.h>
76 #include <dev/ofw/ofw_clock.h>
77 #include <dev/ofw/ofw_gpio.h>
78 #include <dev/ofw/ofw_misc.h>
79 #include <dev/ofw/ofw_pinctrl.h>
80 #include <dev/ofw/ofw_regulator.h>
81 #include <dev/ofw/fdt.h>
82 
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89 
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93 
94 #include <netinet6/in6_var.h>
95 #include <netinet/ip6.h>
96 
97 #include <dev/fdt/if_mvppreg.h>
98 
99 struct mvpp2_buf {
100 	bus_dmamap_t		mb_map;
101 	struct mbuf		*mb_m;
102 };
103 
104 #define MVPP2_NTXDESC	512
105 #define MVPP2_NTXSEGS	16
106 #define MVPP2_NRXDESC	512
107 
108 struct mvpp2_bm_pool {
109 	struct mvpp2_dmamem	*bm_mem;
110 	struct mvpp2_buf	*rxbuf;
111 	uint32_t		*freelist;
112 	int			free_prod;
113 	int			free_cons;
114 };
115 
116 #define MVPP2_BM_SIZE		64
117 #define MVPP2_BM_POOL_PTR_ALIGN	128
118 #define MVPP2_BM_POOLS_NUM	8
119 #define MVPP2_BM_ALIGN		32
120 
121 struct mvpp2_tx_queue {
122 	uint8_t			id;
123 	uint8_t			log_id;
124 	struct mvpp2_dmamem	*ring;
125 	struct mvpp2_buf	*buf;
126 	struct mvpp2_tx_desc	*descs;
127 	int			prod;
128 	int			cnt;
129 	int			cons;
130 
131 	uint32_t		done_pkts_coal;
132 };
133 
134 struct mvpp2_rx_queue {
135 	uint8_t			id;
136 	struct mvpp2_dmamem	*ring;
137 	struct mvpp2_rx_desc	*descs;
138 	int			prod;
139 	struct if_rxring	rxring;
140 	int			cons;
141 
142 	uint32_t		pkts_coal;
143 	uint32_t		time_coal;
144 };
145 
146 struct mvpp2_dmamem {
147 	bus_dmamap_t		mdm_map;
148 	bus_dma_segment_t	mdm_seg;
149 	size_t			mdm_size;
150 	caddr_t			mdm_kva;
151 };
152 #define MVPP2_DMA_MAP(_mdm)	((_mdm)->mdm_map)
153 #define MVPP2_DMA_LEN(_mdm)	((_mdm)->mdm_size)
154 #define MVPP2_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
155 #define MVPP2_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
156 
157 struct mvpp2_port;
158 struct mvpp2_softc {
159 	struct device		sc_dev;
160 	int			sc_node;
161 	bus_space_tag_t		sc_iot;
162 	bus_space_handle_t	sc_ioh_base;
163 	bus_space_handle_t	sc_ioh_iface;
164 	paddr_t			sc_ioh_paddr;
165 	bus_size_t		sc_iosize_base;
166 	bus_size_t		sc_iosize_iface;
167 	bus_dma_tag_t		sc_dmat;
168 	struct regmap		*sc_rm;
169 
170 	uint32_t		sc_tclk;
171 
172 	struct mvpp2_bm_pool	*sc_bm_pools;
173 	int			sc_npools;
174 
175 	struct mvpp2_prs_shadow	*sc_prs_shadow;
176 	uint8_t			*sc_prs_double_vlans;
177 
178 	int			sc_aggr_ntxq;
179 	struct mvpp2_tx_queue	*sc_aggr_txqs;
180 
181 	struct mvpp2_port	**sc_ports;
182 };
183 
184 struct mvpp2_port {
185 	struct device		sc_dev;
186 	struct mvpp2_softc	*sc;
187 	int			sc_node;
188 	bus_dma_tag_t		sc_dmat;
189 	int			sc_id;
190 	int			sc_gop_id;
191 
192 	struct arpcom		sc_ac;
193 #define sc_lladdr	sc_ac.ac_enaddr
194 	struct mii_data		sc_mii;
195 #define sc_media	sc_mii.mii_media
196 	struct mii_bus		*sc_mdio;
197 	char			sc_cur_lladdr[ETHER_ADDR_LEN];
198 
199 	enum {
200 		PHY_MODE_XAUI,
201 		PHY_MODE_10GBASER,
202 		PHY_MODE_2500BASEX,
203 		PHY_MODE_1000BASEX,
204 		PHY_MODE_SGMII,
205 		PHY_MODE_RGMII,
206 		PHY_MODE_RGMII_ID,
207 		PHY_MODE_RGMII_RXID,
208 		PHY_MODE_RGMII_TXID,
209 	}			sc_phy_mode;
210 	int			sc_fixed_link;
211 	int			sc_inband_status;
212 	int			sc_link;
213 	int			sc_phyloc;
214 	int			sc_sfp;
215 
216 	int			sc_ntxq;
217 	int			sc_nrxq;
218 
219 	struct mvpp2_tx_queue	*sc_txqs;
220 	struct mvpp2_rx_queue	*sc_rxqs;
221 
222 	struct timeout		sc_tick;
223 
224 	uint32_t		sc_tx_time_coal;
225 };
226 
227 #define MVPP2_MAX_PORTS		4
228 
229 struct mvpp2_attach_args {
230 	int			ma_node;
231 	bus_dma_tag_t		ma_dmat;
232 };
233 
234 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
235 
236 static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
237 
238 int	mvpp2_match(struct device *, void *, void *);
239 void	mvpp2_attach(struct device *, struct device *, void *);
240 void	mvpp2_attach_deferred(struct device *);
241 
242 struct cfattach mvppc_ca = {
243 	sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
244 };
245 
246 struct cfdriver mvppc_cd = {
247 	NULL, "mvppc", DV_DULL
248 };
249 
250 int	mvpp2_port_match(struct device *, void *, void *);
251 void	mvpp2_port_attach(struct device *, struct device *, void *);
252 
253 struct cfattach mvpp_ca = {
254 	sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
255 };
256 
257 struct cfdriver mvpp_cd = {
258 	NULL, "mvpp", DV_IFNET
259 };
260 
261 void	mvpp2_port_attach_sfp(struct device *);
262 
263 uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
264 void	mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
265 uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
266 void	mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
267 uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
268 void	mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
269 uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
270 void	mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
271 uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
272 void	mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
273 
274 int	mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
275 void	mvpp2_start(struct ifnet *);
276 int	mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
277 void	mvpp2_watchdog(struct ifnet *);
278 
279 int	mvpp2_media_change(struct ifnet *);
280 void	mvpp2_media_status(struct ifnet *, struct ifmediareq *);
281 
282 int	mvpp2_mii_readreg(struct device *, int, int);
283 void	mvpp2_mii_writereg(struct device *, int, int, int);
284 void	mvpp2_mii_statchg(struct device *);
285 void	mvpp2_inband_statchg(struct mvpp2_port *);
286 void	mvpp2_port_change(struct mvpp2_port *);
287 
288 void	mvpp2_tick(void *);
289 void	mvpp2_rxtick(void *);
290 
291 int	mvpp2_link_intr(void *);
292 int	mvpp2_intr(void *);
293 void	mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
294 void	mvpp2_txq_proc(struct mvpp2_port *, struct mvpp2_tx_queue *);
295 void	mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
296 void	mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
297 void	mvpp2_rx_refill(struct mvpp2_port *);
298 
299 void	mvpp2_up(struct mvpp2_port *);
300 void	mvpp2_down(struct mvpp2_port *);
301 void	mvpp2_iff(struct mvpp2_port *);
302 int	mvpp2_encap(struct mvpp2_port *, struct mbuf *, int *);
303 
304 void	mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
305 void	mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
306 void	mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
307 void	mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
308 void	mvpp2_rxq_hw_drop(struct mvpp2_port *, struct mvpp2_rx_queue *);
309 void	mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
310 void	mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
311 void	mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
312 
313 void	mvpp2_mac_reset_assert(struct mvpp2_port *);
314 void	mvpp2_pcs_reset_assert(struct mvpp2_port *);
315 void	mvpp2_pcs_reset_deassert(struct mvpp2_port *);
316 void	mvpp2_mac_config(struct mvpp2_port *);
317 void	mvpp2_xlg_config(struct mvpp2_port *);
318 void	mvpp2_gmac_config(struct mvpp2_port *);
319 void	mvpp2_comphy_config(struct mvpp2_port *, int);
320 void	mvpp2_gop_config(struct mvpp2_port *);
321 void	mvpp2_gop_intr_mask(struct mvpp2_port *);
322 void	mvpp2_gop_intr_unmask(struct mvpp2_port *);
323 
324 struct mvpp2_dmamem *
325 	mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
326 void	mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
327 struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
328 void	mvpp2_fill_rx_ring(struct mvpp2_softc *);
329 
330 void	mvpp2_interrupts_enable(struct mvpp2_port *, int);
331 void	mvpp2_interrupts_disable(struct mvpp2_port *, int);
332 int	mvpp2_egress_port(struct mvpp2_port *);
333 int	mvpp2_txq_phys(int, int);
334 void	mvpp2_defaults_set(struct mvpp2_port *);
335 void	mvpp2_ingress_enable(struct mvpp2_port *);
336 void	mvpp2_ingress_disable(struct mvpp2_port *);
337 void	mvpp2_egress_enable(struct mvpp2_port *);
338 void	mvpp2_egress_disable(struct mvpp2_port *);
339 void	mvpp2_port_enable(struct mvpp2_port *);
340 void	mvpp2_port_disable(struct mvpp2_port *);
341 void	mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
342 int	mvpp2_rxq_received(struct mvpp2_port *, int);
343 void	mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
344 void	mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
345 void	mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
346 	    uint32_t);
347 void	mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
348 	    uint32_t);
349 void	mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
350 	    uint32_t);
351 void	mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
352 
353 void	mvpp2_axi_config(struct mvpp2_softc *);
354 void	mvpp2_bm_pool_init(struct mvpp2_softc *);
355 void	mvpp2_rx_fifo_init(struct mvpp2_softc *);
356 void	mvpp2_tx_fifo_init(struct mvpp2_softc *);
357 int	mvpp2_prs_default_init(struct mvpp2_softc *);
358 void	mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
359 void	mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
360 void	mvpp2_prs_def_flow_init(struct mvpp2_softc *);
361 void	mvpp2_prs_mh_init(struct mvpp2_softc *);
362 void	mvpp2_prs_mac_init(struct mvpp2_softc *);
363 void	mvpp2_prs_dsa_init(struct mvpp2_softc *);
364 int	mvpp2_prs_etype_init(struct mvpp2_softc *);
365 int	mvpp2_prs_vlan_init(struct mvpp2_softc *);
366 int	mvpp2_prs_pppoe_init(struct mvpp2_softc *);
367 int	mvpp2_prs_ip6_init(struct mvpp2_softc *);
368 int	mvpp2_prs_ip4_init(struct mvpp2_softc *);
369 void	mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
370 	    uint32_t, uint32_t);
371 void	mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
372 void	mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
373 void	mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
374 uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
375 void	mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
376 	    uint8_t, uint8_t);
377 void	mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
378 	    uint8_t *, uint8_t *);
379 int	mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
380 void	mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
381 int	mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
382 int	mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
383 void	mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
384 	    uint32_t *, uint32_t *);
385 void	mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
386 int	mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
387 void	mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
388 void	mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
389 void	mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
390 void	mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
391 void	mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
392 void	mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
393 	    uint32_t);
394 void	mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
395 void	mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
396 int	mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
397 int	mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *, int);
398 int	mvpp2_prs_flow_find(struct mvpp2_softc *, int);
399 int	mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
400 void	mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
401 void	mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int, int);
402 void	mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
403 void	mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
404 	    int, int, int);
405 struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
406 	    int);
407 int	mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
408 int	mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
409 struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
410 	    uint16_t, uint16_t);
411 int	mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
412 	    uint32_t);
413 int	mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
414 int	mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
415 int	mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
416 int	mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
417 int	mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int, const uint8_t *,
418 	    uint8_t *, int);
419 int	mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
420 	    uint8_t *);
421 int	mvpp2_prs_mac_da_accept(struct mvpp2_port *, const uint8_t *, int);
422 int	mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
423 int	mvpp2_prs_def_flow(struct mvpp2_port *);
424 void	mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
425 void	mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
426 void	mvpp2_cls_init(struct mvpp2_softc *);
427 void	mvpp2_cls_port_config(struct mvpp2_port *);
428 void	mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
429 
430 int
431 mvpp2_match(struct device *parent, void *cfdata, void *aux)
432 {
433 	struct fdt_attach_args *faa = aux;
434 
435 	return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
436 }
437 
438 void
439 mvpp2_attach(struct device *parent, struct device *self, void *aux)
440 {
441 	struct mvpp2_softc *sc = (void *)self;
442 	struct fdt_attach_args *faa = aux;
443 
444 	if (faa->fa_nreg < 2) {
445 		printf(": no registers\n");
446 		return;
447 	}
448 
449 	sc->sc_node = faa->fa_node;
450 	sc->sc_iot = faa->fa_iot;
451 	sc->sc_dmat = faa->fa_dmat;
452 
453 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
454 	    faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
455 		printf(": can't map registers\n");
456 		return;
457 	}
458 	sc->sc_iosize_base = faa->fa_reg[0].size;
459 
460 	sc->sc_ioh_paddr = bus_space_mmap(sc->sc_iot, faa->fa_reg[0].addr,
461 	    0, PROT_READ | PROT_WRITE, 0);
462 	KASSERT(sc->sc_ioh_paddr != -1);
463 	sc->sc_ioh_paddr &= PMAP_PA_MASK;
464 
465 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
466 	    faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
467 		printf(": can't map registers\n");
468 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
469 		    sc->sc_iosize_base);
470 		return;
471 	}
472 	sc->sc_iosize_iface = faa->fa_reg[1].size;
473 
474 	sc->sc_rm = regmap_byphandle(OF_getpropint(faa->fa_node,
475 	    "marvell,system-controller", 0));
476 
477 	clock_enable_all(faa->fa_node);
478 	sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
479 
480 	printf("\n");
481 
482 	config_defer(self, mvpp2_attach_deferred);
483 }
484 
485 void
486 mvpp2_attach_deferred(struct device *self)
487 {
488 	struct mvpp2_softc *sc = (void *)self;
489 	struct mvpp2_attach_args maa;
490 	struct mvpp2_tx_queue *txq;
491 	int i, node;
492 
493 	mvpp2_axi_config(sc);
494 
495 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_iface, MVPP22_SMI_MISC_CFG_REG,
496 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh_iface,
497 	    MVPP22_SMI_MISC_CFG_REG) & ~MVPP22_SMI_POLLING_EN);
498 
499 	sc->sc_aggr_ntxq = 1;
500 	sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
501 	    sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
502 
503 	for (i = 0; i < sc->sc_aggr_ntxq; i++) {
504 		txq = &sc->sc_aggr_txqs[i];
505 		txq->id = i;
506 		mvpp2_aggr_txq_hw_init(sc, txq);
507 	}
508 
509 	mvpp2_rx_fifo_init(sc);
510 	mvpp2_tx_fifo_init(sc);
511 
512 	mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
513 
514 	mvpp2_bm_pool_init(sc);
515 
516 	sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
517 	    sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
518 
519 	mvpp2_prs_default_init(sc);
520 	mvpp2_cls_init(sc);
521 
522 	memset(&maa, 0, sizeof(maa));
523 	for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
524 		maa.ma_node = node;
525 		maa.ma_dmat = sc->sc_dmat;
526 		config_found(self, &maa, NULL);
527 	}
528 }
529 
530 void
531 mvpp2_axi_config(struct mvpp2_softc *sc)
532 {
533 	uint32_t reg;
534 
535 	mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG, 0);
536 
537 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
538 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
539 	mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
540 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
541 	mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
542 	mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
543 
544 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
545 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
546 	mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
547 	mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
548 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
549 	mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
550 
551 	reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
552 	    (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
553 	mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
554 	mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
555 
556 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
557 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
558 	mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
559 
560 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
561 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
562 	mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
563 }
564 
565 void
566 mvpp2_bm_pool_init(struct mvpp2_softc *sc)
567 {
568 	struct mvpp2_bm_pool *bm;
569 	struct mvpp2_buf *rxb;
570 	uint64_t phys, virt;
571 	int i, j, inuse;
572 
573 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
574 		mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
575 		mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
576 	}
577 
578 	sc->sc_npools = ncpus;
579 	sc->sc_npools = min(sc->sc_npools, MVPP2_BM_POOLS_NUM);
580 
581 	sc->sc_bm_pools = mallocarray(sc->sc_npools, sizeof(*sc->sc_bm_pools),
582 	    M_DEVBUF, M_WAITOK | M_ZERO);
583 
584 	for (i = 0; i < sc->sc_npools; i++) {
585 		bm = &sc->sc_bm_pools[i];
586 		bm->bm_mem = mvpp2_dmamem_alloc(sc,
587 		    MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
588 		    MVPP2_BM_POOL_PTR_ALIGN);
589 		KASSERT(bm->bm_mem != NULL);
590 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
591 		    MVPP2_DMA_LEN(bm->bm_mem),
592 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
593 
594 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
595 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
596 		    MVPP2_BM_STOP_MASK);
597 
598 		mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
599 		    (uint64_t)MVPP2_DMA_DVA(bm->bm_mem) & 0xffffffff);
600 		mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
601 		    ((uint64_t)MVPP2_DMA_DVA(bm->bm_mem) >> 32)
602 		    & MVPP22_BM_POOL_BASE_HIGH_MASK);
603 		mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
604 		    MVPP2_BM_SIZE);
605 
606 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
607 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
608 		    MVPP2_BM_START_MASK);
609 
610 		/*
611 		 * U-Boot might not have cleaned its pools.  The pool needs
612 		 * to be empty before we fill it, otherwise our packets are
613 		 * written to wherever U-Boot allocated memory.  Cleaning it
614 		 * up ourselves is worrying as well, since the BM's pages are
615 		 * probably in our own memory.  Best we can do is stop the BM,
616 		 * set new memory and drain the pool.
617 		 */
618 		inuse = mvpp2_read(sc, MVPP2_BM_POOL_PTRS_NUM_REG(i)) &
619 		    MVPP2_BM_POOL_PTRS_NUM_MASK;
620 		inuse += mvpp2_read(sc, MVPP2_BM_BPPI_PTRS_NUM_REG(i)) &
621 		    MVPP2_BM_BPPI_PTRS_NUM_MASK;
622 		if (inuse)
623 			inuse++;
624 		for (j = 0; j < inuse; j++)
625 			mvpp2_read(sc, MVPP2_BM_PHY_ALLOC_REG(i));
626 
627 		mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
628 		    roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
629 
630 		bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
631 		    M_DEVBUF, M_WAITOK);
632 		bm->freelist = mallocarray(MVPP2_BM_SIZE, sizeof(*bm->freelist),
633 		    M_DEVBUF, M_WAITOK | M_ZERO);
634 
635 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
636 			rxb = &bm->rxbuf[j];
637 			bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
638 			    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
639 			rxb->mb_m = NULL;
640 		}
641 
642 		/* Use pool-id and rxbuf index as cookie. */
643 		for (j = 0; j < MVPP2_BM_SIZE; j++)
644 			bm->freelist[j] = (i << 16) | (j << 0);
645 
646 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
647 			rxb = &bm->rxbuf[j];
648 			rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
649 			if (rxb->mb_m == NULL)
650 				break;
651 
652 			KASSERT(bm->freelist[bm->free_cons] != -1);
653 			virt = bm->freelist[bm->free_cons];
654 			bm->freelist[bm->free_cons] = -1;
655 			bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
656 
657 			phys = rxb->mb_map->dm_segs[0].ds_addr;
658 			mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
659 			    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
660 			    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
661 			    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
662 			mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
663 			    virt & 0xffffffff);
664 			mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
665 			    phys & 0xffffffff);
666 		}
667 	}
668 }
669 
670 void
671 mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
672 {
673 	int i;
674 
675 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
676 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
677 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
678 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
679 
680 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
681 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
682 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
683 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
684 
685 	for (i = 2; i < MVPP2_MAX_PORTS; i++) {
686 		mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
687 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
688 		mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
689 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
690 	}
691 
692 	mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
693 	mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
694 }
695 
696 void
697 mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
698 {
699 	int i;
700 
701 	mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
702 	    MVPP22_TX_FIFO_DATA_SIZE_10KB);
703 	mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
704 	    MVPP2_TX_FIFO_THRESHOLD_10KB);
705 
706 	for (i = 1; i < MVPP2_MAX_PORTS; i++) {
707 		mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
708 		    MVPP22_TX_FIFO_DATA_SIZE_3KB);
709 		mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
710 		    MVPP2_TX_FIFO_THRESHOLD_3KB);
711 	}
712 }
713 
714 int
715 mvpp2_prs_default_init(struct mvpp2_softc *sc)
716 {
717 	int i, j, ret;
718 
719 	mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
720 
721 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
722 		mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
723 		for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
724 			mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
725 
726 		mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
727 		for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
728 			mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
729 	}
730 
731 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
732 		mvpp2_prs_hw_inv(sc, i);
733 
734 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
735 		mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
736 		    MVPP2_PRS_PORT_LU_MAX, 0);
737 
738 	mvpp2_prs_def_flow_init(sc);
739 	mvpp2_prs_mh_init(sc);
740 	mvpp2_prs_mac_init(sc);
741 	mvpp2_prs_dsa_init(sc);
742 	ret = mvpp2_prs_etype_init(sc);
743 	if (ret)
744 		return ret;
745 	ret = mvpp2_prs_vlan_init(sc);
746 	if (ret)
747 		return ret;
748 	ret = mvpp2_prs_pppoe_init(sc);
749 	if (ret)
750 		return ret;
751 	ret = mvpp2_prs_ip6_init(sc);
752 	if (ret)
753 		return ret;
754 	ret = mvpp2_prs_ip4_init(sc);
755 	if (ret)
756 		return ret;
757 
758 	return 0;
759 }
760 
761 void
762 mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
763 {
764 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
765 	mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
766 	    MVPP2_PRS_TCAM_INV_MASK);
767 }
768 
769 void
770 mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
771     int lu_first, int lu_max, int offset)
772 {
773 	uint32_t reg;
774 
775 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
776 	reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
777 	reg |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
778 	mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
779 
780 	reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
781 	reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
782 	reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
783 	mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
784 
785 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
786 	reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
787 	reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
788 	mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
789 }
790 
791 void
792 mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
793 {
794 	struct mvpp2_prs_entry pe;
795 	int i;
796 
797 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
798 		memset(&pe, 0, sizeof(pe));
799 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
800 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
801 		mvpp2_prs_tcam_port_map_set(&pe, 0);
802 		mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
803 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
804 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
805 		mvpp2_prs_hw_write(sc, &pe);
806 	}
807 }
808 
809 void
810 mvpp2_prs_mh_init(struct mvpp2_softc *sc)
811 {
812 	struct mvpp2_prs_entry pe;
813 
814 	memset(&pe, 0, sizeof(pe));
815 	pe.index = MVPP2_PE_MH_DEFAULT;
816 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
817 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
818 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
819 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
820 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
821 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
822 	mvpp2_prs_hw_write(sc, &pe);
823 }
824 
825 void
826 mvpp2_prs_mac_init(struct mvpp2_softc *sc)
827 {
828 	struct mvpp2_prs_entry pe;
829 
830 	memset(&pe, 0, sizeof(pe));
831 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
832 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
833 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
834 	    MVPP2_PRS_RI_DROP_MASK);
835 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
836 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
837 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
838 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
839 	mvpp2_prs_hw_write(sc, &pe);
840 	mvpp2_prs_mac_drop_all_set(sc, 0, 0);
841 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_UNI_CAST, 0);
842 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_MULTI_CAST, 0);
843 }
844 
845 void
846 mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
847 {
848 	struct mvpp2_prs_entry pe;
849 
850 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
851 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
852 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
853 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
854 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
855 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
856 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
857 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
858 	memset(&pe, 0, sizeof(pe));
859 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
860 	pe.index = MVPP2_PE_DSA_DEFAULT;
861 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
862 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
863 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
864 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
865 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
866 	mvpp2_prs_hw_write(sc, &pe);
867 }
868 
869 int
870 mvpp2_prs_etype_init(struct mvpp2_softc *sc)
871 {
872 	struct mvpp2_prs_entry pe;
873 	int tid;
874 
875 	/* Ethertype: PPPoE */
876 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
877 	    MVPP2_PE_LAST_FREE_TID);
878 	if (tid < 0)
879 		return tid;
880 	memset(&pe, 0, sizeof(pe));
881 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
882 	pe.index = tid;
883 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_PPPOE);
884 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
885 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
886 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
887 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
888 	    MVPP2_PRS_RI_PPPOE_MASK);
889 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
890 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
891 	sc->sc_prs_shadow[pe.index].finish = 0;
892 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
893 	    MVPP2_PRS_RI_PPPOE_MASK);
894 	mvpp2_prs_hw_write(sc, &pe);
895 
896 	/* Ethertype: ARP */
897 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
898 	    MVPP2_PE_LAST_FREE_TID);
899 	if (tid < 0)
900 		return tid;
901 	memset(&pe, 0, sizeof(pe));
902 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
903 	pe.index = tid;
904 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_ARP);
905 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
906 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
907 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
908 	    MVPP2_PRS_RI_L3_PROTO_MASK);
909 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
910 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
911 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
912 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
913 	sc->sc_prs_shadow[pe.index].finish = 1;
914 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
915 	    MVPP2_PRS_RI_L3_PROTO_MASK);
916 	mvpp2_prs_hw_write(sc, &pe);
917 
918 	/* Ethertype: LBTD */
919 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
920 	    MVPP2_PE_LAST_FREE_TID);
921 	if (tid < 0)
922 		return tid;
923 	memset(&pe, 0, sizeof(pe));
924 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
925 	pe.index = tid;
926 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
927 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
928 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
929 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
930 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
931 	    MVPP2_PRS_RI_UDF3_MASK);
932 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
933 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
934 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
935 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
936 	sc->sc_prs_shadow[pe.index].finish = 1;
937 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
938 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
939 	    MVPP2_PRS_RI_UDF3_MASK);
940 	mvpp2_prs_hw_write(sc, &pe);
941 
942 	/* Ethertype: IPv4 without options */
943 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
944 	    MVPP2_PE_LAST_FREE_TID);
945 	if (tid < 0)
946 		return tid;
947 	memset(&pe, 0, sizeof(pe));
948 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
949 	pe.index = tid;
950 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IP);
951 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
952 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
953 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
954 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
955 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
956 	    MVPP2_PRS_RI_L3_PROTO_MASK);
957 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
958 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
959 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
960 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
961 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
962 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
963 	sc->sc_prs_shadow[pe.index].finish = 0;
964 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
965 	    MVPP2_PRS_RI_L3_PROTO_MASK);
966 	mvpp2_prs_hw_write(sc, &pe);
967 
968 	/* Ethertype: IPv4 with options */
969 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
970 	    MVPP2_PE_LAST_FREE_TID);
971 	if (tid < 0)
972 		return tid;
973 	pe.index = tid;
974 
975 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
976 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
977 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
978 	    MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
979 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
980 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
981 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
982 	    MVPP2_PRS_RI_L3_PROTO_MASK);
983 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
984 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
985 	sc->sc_prs_shadow[pe.index].finish = 0;
986 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
987 	    MVPP2_PRS_RI_L3_PROTO_MASK);
988 	mvpp2_prs_hw_write(sc, &pe);
989 
990 	/* Ethertype: IPv6 without options */
991 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
992 	    MVPP2_PE_LAST_FREE_TID);
993 	if (tid < 0)
994 		return tid;
995 	memset(&pe, 0, sizeof(pe));
996 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
997 	pe.index = tid;
998 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IPV6);
999 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1000 	    MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1001 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1002 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1003 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1004 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1005 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1006 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1007 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1008 	sc->sc_prs_shadow[pe.index].finish = 0;
1009 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
1010 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1011 	mvpp2_prs_hw_write(sc, &pe);
1012 
1013 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1014 	memset(&pe, 0, sizeof(pe));
1015 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1016 	pe.index = MVPP2_PE_ETH_TYPE_UN;
1017 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1018 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1019 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1020 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1021 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1022 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1023 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1024 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1025 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1026 	sc->sc_prs_shadow[pe.index].finish = 1;
1027 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
1028 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1029 	mvpp2_prs_hw_write(sc, &pe);
1030 
1031 	return 0;
1032 }
1033 
1034 int
1035 mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
1036 {
1037 	struct mvpp2_prs_entry pe;
1038 	int ret;
1039 
1040 	sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
1041 	    sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
1042 
1043 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_QINQ,
1044 	    MVPP2_PRS_PORT_MASK);
1045 	if (ret)
1046 		return ret;
1047 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_VLAN,
1048 	    MVPP2_PRS_PORT_MASK);
1049 	if (ret)
1050 		return ret;
1051 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_QINQ, MVPP2_PRS_SINGLE_VLAN_AI,
1052 	    MVPP2_PRS_PORT_MASK);
1053 	if (ret)
1054 		return ret;
1055 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_VLAN, MVPP2_PRS_SINGLE_VLAN_AI,
1056 	    MVPP2_PRS_PORT_MASK);
1057 	if (ret)
1058 		return ret;
1059 
1060 	memset(&pe, 0, sizeof(pe));
1061 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1062 	pe.index = MVPP2_PE_VLAN_DBL;
1063 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1064 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1065 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1066 	    MVPP2_PRS_RI_VLAN_MASK);
1067 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1068 	    MVPP2_PRS_DBL_VLAN_AI_BIT);
1069 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1070 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1071 	mvpp2_prs_hw_write(sc, &pe);
1072 
1073 	memset(&pe, 0, sizeof(pe));
1074 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1075 	pe.index = MVPP2_PE_VLAN_NONE;
1076 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1077 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1078 	    MVPP2_PRS_RI_VLAN_MASK);
1079 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1080 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1081 	mvpp2_prs_hw_write(sc, &pe);
1082 
1083 	return 0;
1084 }
1085 
1086 int
1087 mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
1088 {
1089 	struct mvpp2_prs_entry pe;
1090 	int tid;
1091 
1092 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1093 	    MVPP2_PE_LAST_FREE_TID);
1094 	if (tid < 0)
1095 		return tid;
1096 
1097 	memset(&pe, 0, sizeof(pe));
1098 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1099 	pe.index = tid;
1100 	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1101 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1102 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1103 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1104 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1105 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1106 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1107 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1108 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1109 	mvpp2_prs_hw_write(sc, &pe);
1110 
1111 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1112 	    MVPP2_PE_LAST_FREE_TID);
1113 	if (tid < 0)
1114 		return tid;
1115 
1116 	pe.index = tid;
1117 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1118 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1119 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
1120 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1121 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1122 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
1123 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1124 	mvpp2_prs_hw_write(sc, &pe);
1125 
1126 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1127 	    MVPP2_PE_LAST_FREE_TID);
1128 	if (tid < 0)
1129 		return tid;
1130 
1131 	memset(&pe, 0, sizeof(pe));
1132 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1133 	pe.index = tid;
1134 	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1135 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1136 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1137 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1138 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1139 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1140 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1141 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1142 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1143 	mvpp2_prs_hw_write(sc, &pe);
1144 
1145 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1146 	    MVPP2_PE_LAST_FREE_TID);
1147 	if (tid < 0)
1148 		return tid;
1149 
1150 	memset(&pe, 0, sizeof(pe));
1151 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1152 	pe.index = tid;
1153 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1154 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1155 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1156 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1157 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1158 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1159 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1160 	mvpp2_prs_hw_write(sc, &pe);
1161 
1162 	return 0;
1163 }
1164 
1165 int
1166 mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
1167 {
1168 	struct mvpp2_prs_entry pe;
1169 	int tid, ret;
1170 
1171 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1172 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1173 	if (ret)
1174 		return ret;
1175 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1176 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1177 	if (ret)
1178 		return ret;
1179 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_ICMPV6,
1180 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1181 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1182 	if (ret)
1183 		return ret;
1184 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
1185 	    MVPP2_PRS_RI_UDF7_MASK);
1186 	if (ret)
1187 		return ret;
1188 	ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1189 	if (ret)
1190 		return ret;
1191 
1192 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1193 	    MVPP2_PE_LAST_FREE_TID);
1194 	if (tid < 0)
1195 		return tid;
1196 
1197 	memset(&pe, 0, sizeof(pe));
1198 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1199 	pe.index = tid;
1200 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1201 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1202 	mvpp2_prs_sram_ri_update(&pe,
1203 	    MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
1204 	    MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
1205 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1206 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1207 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1208 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1209 	mvpp2_prs_hw_write(sc, &pe);
1210 
1211 	memset(&pe, 0, sizeof(pe));
1212 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1213 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1214 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1215 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1216 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1217 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1218 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1219 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1220 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1221 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1222 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1223 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1224 	mvpp2_prs_hw_write(sc, &pe);
1225 
1226 	memset(&pe, 0, sizeof(pe));
1227 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1228 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1229 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1230 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1231 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1232 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1233 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1234 	    MVPP2_PRS_IPV6_EXT_AI_BIT);
1235 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1236 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1237 	mvpp2_prs_hw_write(sc, &pe);
1238 
1239 	memset(&pe, 0, sizeof(pe));
1240 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1241 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1242 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1243 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1244 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1245 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1246 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1247 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1248 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1249 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1250 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
1251 	mvpp2_prs_hw_write(sc, &pe);
1252 
1253 	return 0;
1254 }
1255 
1256 int
1257 mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
1258 {
1259 	struct mvpp2_prs_entry pe;
1260 	int ret;
1261 
1262 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1263 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1264 	if (ret)
1265 		return ret;
1266 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1267 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1268 	if (ret)
1269 		return ret;
1270 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_IGMP,
1271 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1272 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1273 	if (ret)
1274 		return ret;
1275 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
1276 	if (ret)
1277 		return ret;
1278 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1279 	if (ret)
1280 		return ret;
1281 
1282 	memset(&pe, 0, sizeof(pe));
1283 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1284 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1285 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1286 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1287 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1288 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1289 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1290 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1291 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1292 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1293 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1294 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1295 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1296 	mvpp2_prs_hw_write(sc, &pe);
1297 
1298 	memset(&pe, 0, sizeof(pe));
1299 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1300 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1301 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1302 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1303 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1304 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1305 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1306 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1307 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1308 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1309 	mvpp2_prs_hw_write(sc, &pe);
1310 
1311 	return 0;
1312 }
1313 
1314 int
1315 mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
1316 {
1317 	struct mvpp2_attach_args *maa = aux;
1318 	char buf[32];
1319 
1320 	if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
1321 	    strcmp(buf, "disabled") == 0)
1322 		return 0;
1323 
1324 	return 1;
1325 }
1326 
1327 void
1328 mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
1329 {
1330 	struct mvpp2_port *sc = (void *)self;
1331 	struct mvpp2_attach_args *maa = aux;
1332 	struct mvpp2_tx_queue *txq;
1333 	struct mvpp2_rx_queue *rxq;
1334 	struct ifnet *ifp;
1335 	uint32_t phy, reg;
1336 	int i, idx, len, node;
1337 	int mii_flags = 0;
1338 	char *phy_mode;
1339 	char *managed;
1340 
1341 	sc->sc = (void *)parent;
1342 	sc->sc_node = maa->ma_node;
1343 	sc->sc_dmat = maa->ma_dmat;
1344 
1345 	sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
1346 	sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
1347 	sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
1348 
1349 	len = OF_getproplen(sc->sc_node, "phy-mode");
1350 	if (len <= 0) {
1351 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
1352 		return;
1353 	}
1354 
1355 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
1356 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
1357 	if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
1358 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1359 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
1360 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1361 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
1362 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1363 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
1364 		sc->sc_phy_mode = PHY_MODE_SGMII;
1365 	else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
1366 		sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
1367 	else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
1368 		sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
1369 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
1370 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
1371 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
1372 		sc->sc_phy_mode = PHY_MODE_RGMII;
1373 	else {
1374 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
1375 		    phy_mode);
1376 		return;
1377 	}
1378 	free(phy_mode, M_TEMP, len);
1379 
1380 	/* Lookup PHY. */
1381 	phy = OF_getpropint(sc->sc_node, "phy", 0);
1382 	if (phy) {
1383 		node = OF_getnodebyphandle(phy);
1384 		if (!node) {
1385 			printf(": no phy\n");
1386 			return;
1387 		}
1388 		sc->sc_mdio = mii_byphandle(phy);
1389 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
1390 		sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
1391 	}
1392 
1393 	if (sc->sc_sfp)
1394 		config_mountroot(self, mvpp2_port_attach_sfp);
1395 
1396 	if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
1397 		managed = malloc(len, M_TEMP, M_WAITOK);
1398 		OF_getprop(sc->sc_node, "managed", managed, len);
1399 		if (!strncmp(managed, "in-band-status",
1400 		    strlen("in-band-status")))
1401 			sc->sc_inband_status = 1;
1402 		free(managed, M_TEMP, len);
1403 	}
1404 
1405 	if (OF_getprop(sc->sc_node, "local-mac-address",
1406 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
1407 		memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
1408 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
1409 
1410 	sc->sc_ntxq = sc->sc_nrxq = 1;
1411 	sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
1412 	    M_DEVBUF, M_WAITOK | M_ZERO);
1413 	sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
1414 	    M_DEVBUF, M_WAITOK | M_ZERO);
1415 
1416 	for (i = 0; i < sc->sc_ntxq; i++) {
1417 		txq = &sc->sc_txqs[i];
1418 		txq->id = mvpp2_txq_phys(sc->sc_id, i);
1419 		txq->log_id = i;
1420 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
1421 	}
1422 
1423 	sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
1424 
1425 	for (i = 0; i < sc->sc_nrxq; i++) {
1426 		rxq = &sc->sc_rxqs[i];
1427 		rxq->id = sc->sc_id * 32 + i;
1428 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
1429 		rxq->time_coal = MVPP2_RX_COAL_USEC;
1430 	}
1431 
1432 	mvpp2_egress_disable(sc);
1433 	mvpp2_port_disable(sc);
1434 
1435 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
1436 	    sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
1437 	    0 /* queue vector id */);
1438 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
1439 	    sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
1440 	    0 /* first rxq */);
1441 
1442 	mvpp2_ingress_disable(sc);
1443 	mvpp2_defaults_set(sc);
1444 
1445 	mvpp2_cls_oversize_rxq_set(sc);
1446 	mvpp2_cls_port_config(sc);
1447 
1448 	/*
1449 	 * We have one pool per core, so all RX queues on a specific
1450 	 * core share that pool.  Also long and short uses the same
1451 	 * pool.
1452 	 */
1453 	for (i = 0; i < sc->sc_nrxq; i++) {
1454 		mvpp2_rxq_long_pool_set(sc, i, i);
1455 		mvpp2_rxq_short_pool_set(sc, i, i);
1456 	}
1457 
1458 	mvpp2_mac_reset_assert(sc);
1459 	mvpp2_pcs_reset_assert(sc);
1460 
1461 	timeout_set(&sc->sc_tick, mvpp2_tick, sc);
1462 
1463 	ifp = &sc->sc_ac.ac_if;
1464 	ifp->if_softc = sc;
1465 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1466 	ifp->if_ioctl = mvpp2_ioctl;
1467 	ifp->if_start = mvpp2_start;
1468 	ifp->if_watchdog = mvpp2_watchdog;
1469 	ifq_set_maxlen(&ifp->if_snd, MVPP2_NTXDESC - 1);
1470 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1471 
1472 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1473 
1474 	sc->sc_mii.mii_ifp = ifp;
1475 	sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
1476 	sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
1477 	sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
1478 
1479 	ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
1480 
1481 	if (sc->sc_mdio) {
1482 		switch (sc->sc_phy_mode) {
1483 		case PHY_MODE_1000BASEX:
1484 			mii_flags |= MIIF_IS_1000X;
1485 			break;
1486 		case PHY_MODE_SGMII:
1487 			mii_flags |= MIIF_SGMII;
1488 			break;
1489 		case PHY_MODE_RGMII_ID:
1490 			mii_flags |= MIIF_RXID | MIIF_TXID;
1491 			break;
1492 		case PHY_MODE_RGMII_RXID:
1493 			mii_flags |= MIIF_RXID;
1494 			break;
1495 		case PHY_MODE_RGMII_TXID:
1496 			mii_flags |= MIIF_TXID;
1497 			break;
1498 		default:
1499 			break;
1500 		}
1501 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
1502 		    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY,
1503 		    mii_flags);
1504 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1505 			printf("%s: no PHY found!\n", self->dv_xname);
1506 			ifmedia_add(&sc->sc_mii.mii_media,
1507 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
1508 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1509 		} else
1510 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1511 	} else {
1512 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1513 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1514 
1515 		if (sc->sc_inband_status) {
1516 			switch (sc->sc_phy_mode) {
1517 			case PHY_MODE_1000BASEX:
1518 				sc->sc_mii.mii_media_active =
1519 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
1520 				break;
1521 			case PHY_MODE_2500BASEX:
1522 				sc->sc_mii.mii_media_active =
1523 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
1524 				break;
1525 			case PHY_MODE_10GBASER:
1526 				sc->sc_mii.mii_media_active =
1527 				    IFM_ETHER|IFM_10G_KR|IFM_FDX;
1528 				break;
1529 			default:
1530 				break;
1531 			}
1532 			mvpp2_inband_statchg(sc);
1533 		} else {
1534 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
1535 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1536 			mvpp2_mii_statchg(self);
1537 		}
1538 
1539 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
1540 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1541 	}
1542 
1543 	if_attach(ifp);
1544 	ether_ifattach(ifp);
1545 
1546 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1547 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1548 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1549 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1550 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1551 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1552 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1553 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1554 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1555 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1556 	}
1557 
1558 	if (sc->sc_gop_id == 0) {
1559 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
1560 		reg |= MV_XLG_INTERRUPT_LINK_CHANGE;
1561 		mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
1562 	}
1563 
1564 	mvpp2_gop_intr_unmask(sc);
1565 
1566 	idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
1567 	if (idx >= 0)
1568 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1569 		    mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
1570 	idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
1571 	if (idx < 0)
1572 		idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
1573 	if (idx >= 0)
1574 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1575 		    mvpp2_intr, sc, sc->sc_dev.dv_xname);
1576 }
1577 
1578 void
1579 mvpp2_port_attach_sfp(struct device *self)
1580 {
1581 	struct mvpp2_port *sc = (struct mvpp2_port *)self;
1582 	uint32_t reg;
1583 
1584 	rw_enter(&mvpp2_sff_lock, RW_WRITE);
1585 	sfp_disable(sc->sc_sfp);
1586 	sfp_add_media(sc->sc_sfp, &sc->sc_mii);
1587 	rw_exit(&mvpp2_sff_lock);
1588 
1589 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1590 	case IFM_10G_SR:
1591 	case IFM_10G_LR:
1592 	case IFM_10G_LRM:
1593 	case IFM_10G_ER:
1594 	case IFM_10G_SFP_CU:
1595 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1596 		sc->sc_mii.mii_media_status = IFM_AVALID;
1597 		sc->sc_inband_status = 1;
1598 		break;
1599 	case IFM_2500_SX:
1600 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1601 		sc->sc_mii.mii_media_status = IFM_AVALID;
1602 		sc->sc_inband_status = 1;
1603 		break;
1604 	case IFM_1000_CX:
1605 	case IFM_1000_LX:
1606 	case IFM_1000_SX:
1607 	case IFM_1000_T:
1608 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1609 		sc->sc_mii.mii_media_status = IFM_AVALID;
1610 		sc->sc_inband_status = 1;
1611 		break;
1612 	}
1613 
1614 	if (sc->sc_inband_status) {
1615 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1616 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1617 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1618 	}
1619 }
1620 
1621 uint32_t
1622 mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
1623 {
1624 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
1625 }
1626 
1627 void
1628 mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
1629 {
1630 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
1631 }
1632 
1633 uint32_t
1634 mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
1635 {
1636 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1637 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
1638 }
1639 
1640 void
1641 mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1642 {
1643 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1644 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
1645 	    data);
1646 }
1647 
1648 uint32_t
1649 mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
1650 {
1651 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1652 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
1653 }
1654 
1655 void
1656 mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1657 {
1658 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1659 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
1660 	    data);
1661 }
1662 
1663 uint32_t
1664 mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1665 {
1666 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1667 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
1668 }
1669 
1670 void
1671 mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1672 {
1673 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1674 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
1675 	    data);
1676 }
1677 
1678 uint32_t
1679 mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1680 {
1681 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1682 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
1683 }
1684 
1685 void
1686 mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1687 {
1688 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1689 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
1690 	    data);
1691 }
1692 
1693 void
1694 mvpp2_start(struct ifnet *ifp)
1695 {
1696 	struct mvpp2_port *sc = ifp->if_softc;
1697 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1698 	struct mbuf *m;
1699 	int error, idx;
1700 
1701 	if (!(ifp->if_flags & IFF_RUNNING))
1702 		return;
1703 	if (ifq_is_oactive(&ifp->if_snd))
1704 		return;
1705 	if (ifq_empty(&ifp->if_snd))
1706 		return;
1707 	if (!sc->sc_link)
1708 		return;
1709 
1710 	idx = txq->prod;
1711 	while (txq->cnt < MVPP2_AGGR_TXQ_SIZE) {
1712 		m = ifq_dequeue(&ifp->if_snd);
1713 		if (m == NULL)
1714 			break;
1715 
1716 		error = mvpp2_encap(sc, m, &idx);
1717 		if (error == ENOBUFS) {
1718 			m_freem(m); /* give up: drop it */
1719 			ifq_set_oactive(&ifp->if_snd);
1720 			break;
1721 		}
1722 		if (error == EFBIG) {
1723 			m_freem(m); /* give up: drop it */
1724 			ifp->if_oerrors++;
1725 			continue;
1726 		}
1727 
1728 #if NBPFILTER > 0
1729 		if (ifp->if_bpf)
1730 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1731 #endif
1732 	}
1733 
1734 	if (txq->prod != idx) {
1735 		txq->prod = idx;
1736 
1737 		/* Set a timeout in case the chip goes out to lunch. */
1738 		ifp->if_timer = 5;
1739 	}
1740 }
1741 
1742 int
1743 mvpp2_encap(struct mvpp2_port *sc, struct mbuf *m, int *idx)
1744 {
1745 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1746 	struct mvpp2_tx_desc *txd;
1747 	bus_dmamap_t map;
1748 	uint32_t command;
1749 	int i, current, first, last;
1750 
1751 	first = last = current = *idx;
1752 	map = txq->buf[current].mb_map;
1753 
1754 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1755 		return ENOBUFS;
1756 
1757 	if (map->dm_nsegs > (MVPP2_AGGR_TXQ_SIZE - txq->cnt - 2)) {
1758 		bus_dmamap_unload(sc->sc_dmat, map);
1759 		return ENOBUFS;
1760 	}
1761 
1762 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1763 	    BUS_DMASYNC_PREWRITE);
1764 
1765 	command = MVPP2_TXD_L4_CSUM_NOT |
1766 	    MVPP2_TXD_IP_CSUM_DISABLE;
1767 	for (i = 0; i < map->dm_nsegs; i++) {
1768 		txd = &txq->descs[current];
1769 		memset(txd, 0, sizeof(*txd));
1770 		txd->buf_phys_addr_hw_cmd2 =
1771 		    map->dm_segs[i].ds_addr & ~0x1f;
1772 		txd->packet_offset =
1773 		    map->dm_segs[i].ds_addr & 0x1f;
1774 		txd->data_size = map->dm_segs[i].ds_len;
1775 		txd->phys_txq = sc->sc_txqs[0].id;
1776 		txd->command = command |
1777 		    MVPP2_TXD_PADDING_DISABLE;
1778 		if (i == 0)
1779 			txd->command |= MVPP2_TXD_F_DESC;
1780 		if (i == (map->dm_nsegs - 1))
1781 			txd->command |= MVPP2_TXD_L_DESC;
1782 
1783 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
1784 		    current * sizeof(*txd), sizeof(*txd),
1785 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1786 
1787 		last = current;
1788 		current = (current + 1) % MVPP2_AGGR_TXQ_SIZE;
1789 		KASSERT(current != txq->cons);
1790 	}
1791 
1792 	KASSERT(txq->buf[last].mb_m == NULL);
1793 	txq->buf[first].mb_map = txq->buf[last].mb_map;
1794 	txq->buf[last].mb_map = map;
1795 	txq->buf[last].mb_m = m;
1796 
1797 	txq->cnt += map->dm_nsegs;
1798 	*idx = current;
1799 
1800 	mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, map->dm_nsegs);
1801 
1802 	return 0;
1803 }
1804 
1805 int
1806 mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1807 {
1808 	struct mvpp2_port *sc = ifp->if_softc;
1809 	struct ifreq *ifr = (struct ifreq *)addr;
1810 	int error = 0, s;
1811 
1812 	s = splnet();
1813 
1814 	switch (cmd) {
1815 	case SIOCSIFADDR:
1816 		ifp->if_flags |= IFF_UP;
1817 		/* FALLTHROUGH */
1818 	case SIOCSIFFLAGS:
1819 		if (ifp->if_flags & IFF_UP) {
1820 			if (ifp->if_flags & IFF_RUNNING)
1821 				error = ENETRESET;
1822 			else
1823 				mvpp2_up(sc);
1824 		} else {
1825 			if (ifp->if_flags & IFF_RUNNING)
1826 				mvpp2_down(sc);
1827 		}
1828 		break;
1829 
1830 	case SIOCGIFMEDIA:
1831 	case SIOCSIFMEDIA:
1832 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1833 		break;
1834 
1835 	case SIOCGIFRXR:
1836 		error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1837 		break;
1838 
1839 	case SIOCGIFSFFPAGE:
1840 		error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
1841 		if (error != 0)
1842 			break;
1843 
1844 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1845 		rw_exit(&mvpp2_sff_lock);
1846 		break;
1847 
1848 	default:
1849 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1850 		break;
1851 	}
1852 
1853 	if (error == ENETRESET) {
1854 		if (ifp->if_flags & IFF_RUNNING)
1855 			mvpp2_iff(sc);
1856 		error = 0;
1857 	}
1858 
1859 	splx(s);
1860 	return (error);
1861 }
1862 
1863 int
1864 mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
1865 {
1866 	struct mvpp2_rx_queue *rxq;
1867 	struct if_rxring_info *ifrs, *ifr;
1868 	unsigned int i;
1869 	int error;
1870 
1871 	ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
1872 	    M_WAITOK|M_ZERO|M_CANFAIL);
1873 	if (ifrs == NULL)
1874 		return (ENOMEM);
1875 
1876 	for (i = 0; i < sc->sc_nrxq; i++) {
1877 		rxq = &sc->sc_rxqs[i];
1878 		ifr = &ifrs[i];
1879 
1880 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
1881 		ifr->ifr_size = MCLBYTES;
1882 		ifr->ifr_info = rxq->rxring;
1883 	}
1884 
1885 	error = if_rxr_info_ioctl(ifri, i, ifrs);
1886 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
1887 
1888 	return (error);
1889 }
1890 
1891 void
1892 mvpp2_watchdog(struct ifnet *ifp)
1893 {
1894 	printf("%s\n", __func__);
1895 }
1896 
1897 int
1898 mvpp2_media_change(struct ifnet *ifp)
1899 {
1900 	struct mvpp2_port *sc = ifp->if_softc;
1901 
1902 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1903 		mii_mediachg(&sc->sc_mii);
1904 
1905 	return (0);
1906 }
1907 
1908 void
1909 mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1910 {
1911 	struct mvpp2_port *sc = ifp->if_softc;
1912 
1913 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1914 		mii_pollstat(&sc->sc_mii);
1915 
1916 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1917 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1918 }
1919 
1920 int
1921 mvpp2_mii_readreg(struct device *self, int phy, int reg)
1922 {
1923 	struct mvpp2_port *sc = (void *)self;
1924 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
1925 }
1926 
1927 void
1928 mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
1929 {
1930 	struct mvpp2_port *sc = (void *)self;
1931 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
1932 }
1933 
1934 void
1935 mvpp2_mii_statchg(struct device *self)
1936 {
1937 	struct mvpp2_port *sc = (void *)self;
1938 	mvpp2_port_change(sc);
1939 }
1940 
1941 void
1942 mvpp2_inband_statchg(struct mvpp2_port *sc)
1943 {
1944 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
1945 	uint32_t reg;
1946 
1947 	sc->sc_mii.mii_media_status = IFM_AVALID;
1948 	sc->sc_mii.mii_media_active = IFM_ETHER;
1949 
1950 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1951 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1952 		reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
1953 		if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS)
1954 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1955 		sc->sc_mii.mii_media_active |= IFM_FDX;
1956 		sc->sc_mii.mii_media_active |= subtype;
1957 	} else {
1958 		reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
1959 		if (reg & MVPP2_PORT_STATUS0_LINKUP)
1960 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1961 		if (reg & MVPP2_PORT_STATUS0_FULLDX)
1962 			sc->sc_mii.mii_media_active |= IFM_FDX;
1963 		if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
1964 			sc->sc_mii.mii_media_active |= subtype;
1965 		else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
1966 			sc->sc_mii.mii_media_active |= subtype;
1967 		else if (reg & MVPP2_PORT_STATUS0_GMIISPEED)
1968 			sc->sc_mii.mii_media_active |= IFM_1000_T;
1969 		else if (reg & MVPP2_PORT_STATUS0_MIISPEED)
1970 			sc->sc_mii.mii_media_active |= IFM_100_TX;
1971 		else
1972 			sc->sc_mii.mii_media_active |= IFM_10_T;
1973 	}
1974 
1975 	mvpp2_port_change(sc);
1976 }
1977 
1978 void
1979 mvpp2_port_change(struct mvpp2_port *sc)
1980 {
1981 	uint32_t reg;
1982 
1983 	sc->sc_link = !!(sc->sc_mii.mii_media_status & IFM_ACTIVE);
1984 
1985 	if (sc->sc_inband_status)
1986 		return;
1987 
1988 	if (sc->sc_link) {
1989 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1990 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1991 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1992 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
1993 			reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS;
1994 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1995 		} else {
1996 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1997 			reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
1998 			reg |= MVPP2_GMAC_FORCE_LINK_PASS;
1999 			reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2000 			reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2001 			reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2002 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_KX ||
2003 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
2004 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_CX ||
2005 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_LX ||
2006 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_KX ||
2007 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
2008 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
2009 				reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2010 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
2011 				reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
2012 			if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
2013 				reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2014 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2015 		}
2016 	} else {
2017 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2018 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
2019 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2020 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2021 			reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2022 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2023 		} else {
2024 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2025 			reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2026 			reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2027 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2028 		}
2029 	}
2030 }
2031 
2032 void
2033 mvpp2_tick(void *arg)
2034 {
2035 	struct mvpp2_port *sc = arg;
2036 	int s;
2037 
2038 	s = splnet();
2039 	mii_tick(&sc->sc_mii);
2040 	splx(s);
2041 
2042 	timeout_add_sec(&sc->sc_tick, 1);
2043 }
2044 
2045 int
2046 mvpp2_link_intr(void *arg)
2047 {
2048 	struct mvpp2_port *sc = arg;
2049 	uint32_t reg;
2050 	int event = 0;
2051 
2052 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2053 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2054 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
2055 		if (reg & MV_XLG_INTERRUPT_LINK_CHANGE)
2056 			event = 1;
2057 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2058 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2059 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
2060 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
2061 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2062 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2063 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2064 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_CAUSE_REG);
2065 		if (reg & MVPP2_GMAC_INT_CAUSE_LINK_CHANGE)
2066 			event = 1;
2067 	}
2068 
2069 	if (event && sc->sc_inband_status)
2070 		mvpp2_inband_statchg(sc);
2071 
2072 	return (1);
2073 }
2074 
2075 int
2076 mvpp2_intr(void *arg)
2077 {
2078 	struct mvpp2_port *sc = arg;
2079 	uint32_t reg;
2080 
2081 	reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
2082 	if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
2083 		mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
2084 		mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
2085 		    reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
2086 	}
2087 	if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
2088 		mvpp2_tx_proc(sc,
2089 		    (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
2090 		    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
2091 
2092 	if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
2093 		mvpp2_rx_proc(sc,
2094 		    reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
2095 
2096 	return (1);
2097 }
2098 
2099 void
2100 mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
2101 {
2102 	struct mvpp2_tx_queue *txq;
2103 	int i;
2104 
2105 	for (i = 0; i < sc->sc_ntxq; i++) {
2106 		txq = &sc->sc_txqs[i];
2107 		if ((queues & (1 << i)) == 0)
2108 			continue;
2109 		mvpp2_txq_proc(sc, txq);
2110 	}
2111 }
2112 
2113 void
2114 mvpp2_txq_proc(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2115 {
2116 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2117 	struct mvpp2_tx_queue *aggr_txq = &sc->sc->sc_aggr_txqs[0];
2118 	struct mvpp2_buf *txb;
2119 	int i, idx, nsent;
2120 
2121 	nsent = (mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id)) &
2122 	    MVPP2_TRANSMITTED_COUNT_MASK) >>
2123 	    MVPP2_TRANSMITTED_COUNT_OFFSET;
2124 
2125 	for (i = 0; i < nsent; i++) {
2126 		idx = aggr_txq->cons;
2127 		KASSERT(idx < MVPP2_AGGR_TXQ_SIZE);
2128 
2129 		txb = &aggr_txq->buf[idx];
2130 		if (txb->mb_m) {
2131 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2132 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2133 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2134 
2135 			m_freem(txb->mb_m);
2136 			txb->mb_m = NULL;
2137 		}
2138 
2139 		aggr_txq->cnt--;
2140 		aggr_txq->cons = (aggr_txq->cons + 1) % MVPP2_AGGR_TXQ_SIZE;
2141 	}
2142 
2143 	if (aggr_txq->cnt == 0)
2144 		ifp->if_timer = 0;
2145 
2146 	if (ifq_is_oactive(&ifp->if_snd))
2147 		ifq_restart(&ifp->if_snd);
2148 }
2149 
2150 void
2151 mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
2152 {
2153 	struct mvpp2_rx_queue *rxq;
2154 	int i;
2155 
2156 	for (i = 0; i < sc->sc_nrxq; i++) {
2157 		rxq = &sc->sc_rxqs[i];
2158 		if ((queues & (1 << i)) == 0)
2159 			continue;
2160 		mvpp2_rxq_proc(sc, rxq);
2161 	}
2162 
2163 	mvpp2_rx_refill(sc);
2164 }
2165 
2166 void
2167 mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2168 {
2169 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2170 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2171 	struct mvpp2_rx_desc *rxd;
2172 	struct mvpp2_bm_pool *bm;
2173 	struct mvpp2_buf *rxb;
2174 	struct mbuf *m;
2175 	uint64_t virt;
2176 	uint32_t i, nrecv, pool;
2177 
2178 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2179 	if (!nrecv)
2180 		return;
2181 
2182 	pool = curcpu()->ci_cpuid;
2183 	KASSERT(pool < sc->sc->sc_npools);
2184 	bm = &sc->sc->sc_bm_pools[pool];
2185 
2186 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2187 	    MVPP2_DMA_LEN(rxq->ring),
2188 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2189 
2190 	for (i = 0; i < nrecv; i++) {
2191 		rxd = &rxq->descs[rxq->cons];
2192 		virt = rxd->buf_cookie_bm_qset_cls_info;
2193 		KASSERT(((virt >> 16) & 0xffff) == pool);
2194 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2195 		rxb = &bm->rxbuf[virt & 0xffff];
2196 		KASSERT(rxb->mb_m != NULL);
2197 
2198 		bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
2199 		    rxd->data_size, BUS_DMASYNC_POSTREAD);
2200 		bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
2201 
2202 		m = rxb->mb_m;
2203 		rxb->mb_m = NULL;
2204 
2205 		m->m_pkthdr.len = m->m_len = rxd->data_size;
2206 		m_adj(m, MVPP2_MH_SIZE);
2207 		ml_enqueue(&ml, m);
2208 
2209 		KASSERT(bm->freelist[bm->free_prod] == -1);
2210 		bm->freelist[bm->free_prod] = virt & 0xffffffff;
2211 		bm->free_prod = (bm->free_prod + 1) % MVPP2_BM_SIZE;
2212 
2213 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
2214 	}
2215 
2216 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2217 	    MVPP2_DMA_LEN(rxq->ring),
2218 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2219 
2220 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2221 
2222 	if_input(ifp, &ml);
2223 }
2224 
2225 /*
2226  * We have a pool per core, and since we should not assume that
2227  * RX buffers are always used in order, keep a list of rxbuf[]
2228  * indices that should be filled with an mbuf, if possible.
2229  */
2230 void
2231 mvpp2_rx_refill(struct mvpp2_port *sc)
2232 {
2233 	struct mvpp2_bm_pool *bm;
2234 	struct mvpp2_buf *rxb;
2235 	uint64_t phys, virt;
2236 	int pool;
2237 
2238 	pool = curcpu()->ci_cpuid;
2239 	KASSERT(pool < sc->sc->sc_npools);
2240 	bm = &sc->sc->sc_bm_pools[pool];
2241 
2242 	while (bm->free_cons != bm->free_prod) {
2243 		KASSERT(bm->freelist[bm->free_cons] != -1);
2244 		virt = bm->freelist[bm->free_cons];
2245 		KASSERT(((virt >> 16) & 0xffff) == pool);
2246 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2247 		rxb = &bm->rxbuf[virt & 0xffff];
2248 		KASSERT(rxb->mb_m == NULL);
2249 
2250 		rxb->mb_m = mvpp2_alloc_mbuf(sc->sc, rxb->mb_map);
2251 		if (rxb->mb_m == NULL)
2252 			break;
2253 
2254 		bm->freelist[bm->free_cons] = -1;
2255 		bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
2256 
2257 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2258 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2259 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2260 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2261 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2262 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2263 		    virt & 0xffffffff);
2264 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2265 		    phys & 0xffffffff);
2266 	}
2267 }
2268 
2269 void
2270 mvpp2_up(struct mvpp2_port *sc)
2271 {
2272 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2273 	int i;
2274 
2275 	if (sc->sc_sfp) {
2276 		rw_enter(&mvpp2_sff_lock, RW_WRITE);
2277 		sfp_enable(sc->sc_sfp);
2278 		rw_exit(&mvpp2_sff_lock);
2279 	}
2280 
2281 	memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
2282 	mvpp2_prs_mac_da_accept(sc, etherbroadcastaddr, 1);
2283 	mvpp2_prs_mac_da_accept(sc, sc->sc_cur_lladdr, 1);
2284 	mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
2285 	mvpp2_prs_def_flow(sc);
2286 
2287 	for (i = 0; i < sc->sc_ntxq; i++)
2288 		mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
2289 
2290 	mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
2291 
2292 	for (i = 0; i < sc->sc_nrxq; i++)
2293 		mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
2294 
2295 	/* FIXME: rx buffer fill */
2296 
2297 	/* Configure media. */
2298 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
2299 		mii_mediachg(&sc->sc_mii);
2300 
2301 	/* Program promiscuous mode and multicast filters. */
2302 	mvpp2_iff(sc);
2303 
2304 	ifp->if_flags |= IFF_RUNNING;
2305 	ifq_clr_oactive(&ifp->if_snd);
2306 
2307 	mvpp2_txp_max_tx_size_set(sc);
2308 
2309 	/* XXX: single vector */
2310 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
2311 	    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
2312 	    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
2313 	    MVPP2_CAUSE_MISC_SUM_MASK);
2314 	mvpp2_interrupts_enable(sc, (1 << 0));
2315 
2316 	mvpp2_mac_config(sc);
2317 	mvpp2_egress_enable(sc);
2318 	mvpp2_ingress_enable(sc);
2319 
2320 	timeout_add_sec(&sc->sc_tick, 1);
2321 }
2322 
2323 void
2324 mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
2325 {
2326 	struct mvpp2_buf *txb;
2327 	int i;
2328 
2329 	txq->ring = mvpp2_dmamem_alloc(sc,
2330 	    MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
2331 	KASSERT(txq->ring != NULL);
2332 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2333 
2334 	txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
2335 	    M_DEVBUF, M_WAITOK);
2336 
2337 	for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
2338 		txb = &txq->buf[i];
2339 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2340 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2341 		txb->mb_m = NULL;
2342 	}
2343 
2344 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2345 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2346 
2347 	txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
2348 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
2349 	    MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_OFFS);
2350 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
2351 	    MVPP2_AGGR_TXQ_SIZE);
2352 }
2353 
2354 void
2355 mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2356 {
2357 	struct mvpp2_buf *txb;
2358 	int desc, desc_per_txq;
2359 	uint32_t reg;
2360 	int i;
2361 
2362 	txq->prod = txq->cons = txq->cnt = 0;
2363 //	txq->last_desc = txq->size - 1;
2364 
2365 	txq->ring = mvpp2_dmamem_alloc(sc->sc,
2366 	    MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
2367 	KASSERT(txq->ring != NULL);
2368 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2369 
2370 	txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
2371 	    M_DEVBUF, M_WAITOK);
2372 
2373 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2374 		txb = &txq->buf[i];
2375 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2376 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2377 		txb->mb_m = NULL;
2378 	}
2379 
2380 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2381 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2382 
2383 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2384 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
2385 	    MVPP2_DMA_DVA(txq->ring));
2386 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
2387 	    MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
2388 	mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
2389 	mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
2390 	    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2391 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
2392 	reg &= ~MVPP2_TXQ_PENDING_MASK;
2393 	mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
2394 
2395 	desc_per_txq = 16;
2396 	desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
2397 	    (txq->log_id * desc_per_txq);
2398 
2399 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
2400 	    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2401 	    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2402 
2403 	/* WRR / EJP configuration - indirect access */
2404 	mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2405 	    mvpp2_egress_port(sc));
2406 
2407 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2408 	reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2409 	reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2410 	reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2411 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
2412 
2413 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2414 	    MVPP2_TXQ_TOKEN_SIZE_MAX);
2415 
2416 	mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
2417 
2418 	mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2419 }
2420 
2421 void
2422 mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2423 {
2424 	rxq->prod = rxq->cons = 0;
2425 
2426 	rxq->ring = mvpp2_dmamem_alloc(sc->sc,
2427 	    MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
2428 	KASSERT(rxq->ring != NULL);
2429 	rxq->descs = MVPP2_DMA_KVA(rxq->ring);
2430 
2431 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
2432 	    0, MVPP2_DMA_LEN(rxq->ring),
2433 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2434 
2435 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2436 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2437 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
2438 	    MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_OFFS);
2439 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
2440 	mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
2441 	mvpp2_rxq_offset_set(sc, rxq->id, 0);
2442 	mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
2443 	mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
2444 	mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
2445 }
2446 
2447 void
2448 mvpp2_mac_reset_assert(struct mvpp2_port *sc)
2449 {
2450 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2451 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
2452 	    MVPP2_PORT_CTRL2_PORTMACRESET);
2453 	if (sc->sc_gop_id == 0)
2454 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
2455 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2456 		    ~MV_XLG_MAC_CTRL0_MACRESETN);
2457 }
2458 
2459 void
2460 mvpp2_pcs_reset_assert(struct mvpp2_port *sc)
2461 {
2462 	uint32_t reg;
2463 
2464 	if (sc->sc_gop_id != 0)
2465 		return;
2466 
2467 	reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2468 	reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET;
2469 	reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET;
2470 	reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET;
2471 	reg &= ~MVPP22_MPCS_MAC_CLK_RESET;
2472 	mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2473 	reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2474 	reg &= ~MVPP22_XPCS_PCSRESET;
2475 	mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2476 }
2477 
2478 void
2479 mvpp2_pcs_reset_deassert(struct mvpp2_port *sc)
2480 {
2481 	uint32_t reg;
2482 
2483 	if (sc->sc_gop_id != 0)
2484 		return;
2485 
2486 	if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2487 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2488 		reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET;
2489 		reg |= MVPP22_MPCS_TX_SD_CLK_RESET;
2490 		reg |= MVPP22_MPCS_RX_SD_CLK_RESET;
2491 		reg |= MVPP22_MPCS_MAC_CLK_RESET;
2492 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2493 	} else if (sc->sc_phy_mode == PHY_MODE_XAUI) {
2494 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2495 		reg |= MVPP22_XPCS_PCSRESET;
2496 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2497 	}
2498 }
2499 
2500 void
2501 mvpp2_mac_config(struct mvpp2_port *sc)
2502 {
2503 	uint32_t reg;
2504 
2505 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2506 	reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2507 	reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2508 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2509 	if (sc->sc_gop_id == 0) {
2510 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2511 		reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2512 		reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2513 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2514 	}
2515 
2516 	mvpp2_port_disable(sc);
2517 
2518 	mvpp2_mac_reset_assert(sc);
2519 	mvpp2_pcs_reset_assert(sc);
2520 
2521 	mvpp2_gop_intr_mask(sc);
2522 	mvpp2_comphy_config(sc, 0);
2523 
2524 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2525 	    sc->sc_phy_mode == PHY_MODE_XAUI))
2526 		mvpp2_xlg_config(sc);
2527 	else
2528 		mvpp2_gmac_config(sc);
2529 
2530 	mvpp2_comphy_config(sc, 1);
2531 	mvpp2_gop_config(sc);
2532 
2533 	mvpp2_pcs_reset_deassert(sc);
2534 
2535 	if (sc->sc_gop_id == 0) {
2536 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
2537 		reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
2538 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2539 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2540 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
2541 		else
2542 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
2543 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
2544 	}
2545 
2546 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2547 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2548 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
2549 		reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
2550 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2551 		    MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
2552 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
2553 	} else {
2554 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
2555 		reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2556 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2557 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS;
2558 		mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
2559 	}
2560 
2561 	mvpp2_gop_intr_unmask(sc);
2562 
2563 	if (!(sc->sc_phy_mode == PHY_MODE_10GBASER ||
2564 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2565 		mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2566 		    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2567 		    ~MVPP2_PORT_CTRL2_PORTMACRESET);
2568 		while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2569 		    MVPP2_PORT_CTRL2_PORTMACRESET)
2570 			;
2571 	}
2572 
2573 	mvpp2_port_enable(sc);
2574 
2575 	if (sc->sc_inband_status) {
2576 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2577 		reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2578 		reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2579 		mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2580 		if (sc->sc_gop_id == 0) {
2581 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2582 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2583 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2584 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2585 		}
2586 	} else
2587 		mvpp2_port_change(sc);
2588 }
2589 
2590 void
2591 mvpp2_xlg_config(struct mvpp2_port *sc)
2592 {
2593 	uint32_t ctl0, ctl4;
2594 
2595 	ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2596 	ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
2597 
2598 	ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN;
2599 	ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
2600 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN;
2601 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN;
2602 
2603 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
2604 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl0);
2605 
2606 	/* Port reset */
2607 	while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2608 	    MV_XLG_MAC_CTRL0_MACRESETN) == 0)
2609 		;
2610 }
2611 
2612 void
2613 mvpp2_gmac_config(struct mvpp2_port *sc)
2614 {
2615 	uint32_t ctl0, ctl2, ctl4, panc;
2616 
2617 	/* Setup phy. */
2618 	ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
2619 	ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
2620 	ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
2621 	panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2622 
2623 	ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
2624 	ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
2625 	    MVPP2_GMAC_INBAND_AN_MASK);
2626 	panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
2627 	    MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
2628 	    MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
2629 	    MVPP2_GMAC_IN_BAND_AUTONEG);
2630 
2631 	switch (sc->sc_phy_mode) {
2632 	case PHY_MODE_XAUI:
2633 	case PHY_MODE_10GBASER:
2634 		break;
2635 	case PHY_MODE_2500BASEX:
2636 	case PHY_MODE_1000BASEX:
2637 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2638 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2639 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2640 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2641 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2642 		break;
2643 	case PHY_MODE_SGMII:
2644 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2645 		ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
2646 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2647 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2648 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2649 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2650 		break;
2651 	case PHY_MODE_RGMII:
2652 	case PHY_MODE_RGMII_ID:
2653 	case PHY_MODE_RGMII_RXID:
2654 	case PHY_MODE_RGMII_TXID:
2655 		ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL;
2656 		ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2657 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2658 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2659 		break;
2660 	}
2661 
2662 	/* Use Auto-Negotiation for Inband Status only */
2663 	if (sc->sc_inband_status) {
2664 		panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2665 		panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2666 		panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2667 		panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
2668 		/* TODO: read mode from SFP */
2669 		if (sc->sc_phy_mode == PHY_MODE_SGMII) {
2670 			/* SGMII */
2671 			panc |= MVPP2_GMAC_AN_SPEED_EN;
2672 			panc |= MVPP2_GMAC_AN_DUPLEX_EN;
2673 		} else {
2674 			/* 802.3z */
2675 			ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
2676 			panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2677 			panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2678 		}
2679 	}
2680 
2681 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
2682 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
2683 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
2684 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2685 }
2686 
2687 #define COMPHY_BASE		0x120000
2688 #define COMPHY_SIP_POWER_ON	0x82000001
2689 #define COMPHY_SIP_POWER_OFF	0x82000002
2690 #define COMPHY_SPEED(x)		((x) << 2)
2691 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
2692 #define  COMPHY_SPEED_2_5G		1
2693 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
2694 #define  COMPHY_SPEED_5G		3
2695 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
2696 #define  COMPHY_SPEED_6G		5
2697 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
2698 #define COMPHY_UNIT(x)		((x) << 8)
2699 #define COMPHY_MODE(x)		((x) << 12)
2700 #define  COMPHY_MODE_SATA		1
2701 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
2702 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
2703 #define  COMPHY_MODE_USB3H		4
2704 #define  COMPHY_MODE_USB3D		5
2705 #define  COMPHY_MODE_PCIE		6
2706 #define  COMPHY_MODE_RXAUI		7
2707 #define  COMPHY_MODE_XFI		8
2708 #define  COMPHY_MODE_SFI		9
2709 #define  COMPHY_MODE_USB3		10
2710 #define  COMPHY_MODE_AP			11
2711 
2712 void
2713 mvpp2_comphy_config(struct mvpp2_port *sc, int on)
2714 {
2715 	int node, phys[2], lane, unit;
2716 	uint32_t mode;
2717 
2718 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
2719 	    sizeof(phys))
2720 		return;
2721 	node = OF_getnodebyphandle(phys[0]);
2722 	if (!node)
2723 		return;
2724 
2725 	lane = OF_getpropint(node, "reg", 0);
2726 	unit = phys[1];
2727 
2728 	switch (sc->sc_phy_mode) {
2729 	case PHY_MODE_XAUI:
2730 		mode = COMPHY_MODE(COMPHY_MODE_RXAUI) |
2731 		    COMPHY_UNIT(unit);
2732 		break;
2733 	case PHY_MODE_10GBASER:
2734 		mode = COMPHY_MODE(COMPHY_MODE_XFI) |
2735 		    COMPHY_SPEED(COMPHY_SPEED_10_3125G) |
2736 		    COMPHY_UNIT(unit);
2737 		break;
2738 	case PHY_MODE_2500BASEX:
2739 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
2740 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
2741 		    COMPHY_UNIT(unit);
2742 		break;
2743 	case PHY_MODE_1000BASEX:
2744 	case PHY_MODE_SGMII:
2745 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
2746 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
2747 		    COMPHY_UNIT(unit);
2748 		break;
2749 	default:
2750 		return;
2751 	}
2752 
2753 	if (on)
2754 		smc_call(COMPHY_SIP_POWER_ON, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2755 		    lane, mode);
2756 	else
2757 		smc_call(COMPHY_SIP_POWER_OFF, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2758 		    lane, 0);
2759 }
2760 
2761 void
2762 mvpp2_gop_config(struct mvpp2_port *sc)
2763 {
2764 	uint32_t reg;
2765 
2766 	if (sc->sc->sc_rm == NULL)
2767 		return;
2768 
2769 	if (sc->sc_phy_mode == PHY_MODE_RGMII ||
2770 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2771 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2772 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2773 		if (sc->sc_gop_id == 0)
2774 			return;
2775 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2776 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
2777 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2778 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2779 		if (sc->sc_gop_id == 2)
2780 			reg |= GENCONF_CTRL0_PORT0_RGMII |
2781 			    GENCONF_CTRL0_PORT1_RGMII;
2782 		else if (sc->sc_gop_id == 3)
2783 			reg |= GENCONF_CTRL0_PORT1_RGMII_MII;
2784 		regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2785 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2786 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2787 	    sc->sc_phy_mode == PHY_MODE_SGMII) {
2788 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2789 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
2790 		    GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
2791 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2792 		if (sc->sc_gop_id > 1) {
2793 			reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2794 			if (sc->sc_gop_id == 2)
2795 				reg &= ~GENCONF_CTRL0_PORT0_RGMII;
2796 			else if (sc->sc_gop_id == 3)
2797 				reg &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
2798 			regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2799 		}
2800 	} else if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2801 		if (sc->sc_gop_id != 0)
2802 			return;
2803 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2804 		reg &= ~MVPP22_XPCS_PCSMODE_MASK;
2805 		reg &= ~MVPP22_XPCS_LANEACTIVE_MASK;
2806 		reg |= 2 << MVPP22_XPCS_LANEACTIVE_OFFS;
2807 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2808 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS40G_COMMON_CONTROL);
2809 		reg &= ~MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK;
2810 		mvpp2_mpcs_write(sc, MVPP22_MPCS40G_COMMON_CONTROL, reg);
2811 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2812 		reg &= ~MVPP22_MPCS_CLK_DIVISION_RATIO_MASK;
2813 		reg |= MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT;
2814 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2815 	} else
2816 		return;
2817 
2818 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1);
2819 	reg |= GENCONF_PORT_CTRL1_RESET(sc->sc_gop_id) |
2820 	    GENCONF_PORT_CTRL1_EN(sc->sc_gop_id);
2821 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1, reg);
2822 
2823 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2824 	reg |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
2825 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2826 
2827 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1);
2828 	reg |= GENCONF_SOFT_RESET1_GOP;
2829 	regmap_write_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1, reg);
2830 }
2831 
2832 void
2833 mvpp2_gop_intr_mask(struct mvpp2_port *sc)
2834 {
2835 	uint32_t reg;
2836 
2837 	if (sc->sc_gop_id == 0) {
2838 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2839 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2840 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2841 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2842 	}
2843 
2844 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2845 	reg &= ~MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2846 	mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2847 }
2848 
2849 void
2850 mvpp2_gop_intr_unmask(struct mvpp2_port *sc)
2851 {
2852 	uint32_t reg;
2853 
2854 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2855 	reg |= MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2856 	mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2857 
2858 	if (sc->sc_gop_id == 0) {
2859 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2860 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2861 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2862 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2863 		else
2864 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2865 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2866 	}
2867 }
2868 
2869 void
2870 mvpp2_down(struct mvpp2_port *sc)
2871 {
2872 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2873 	uint32_t reg;
2874 	int i;
2875 
2876 	timeout_del(&sc->sc_tick);
2877 
2878 	ifp->if_flags &= ~IFF_RUNNING;
2879 	ifq_clr_oactive(&ifp->if_snd);
2880 	ifp->if_timer = 0;
2881 
2882 	mvpp2_egress_disable(sc);
2883 	mvpp2_ingress_disable(sc);
2884 
2885 	mvpp2_mac_reset_assert(sc);
2886 	mvpp2_pcs_reset_assert(sc);
2887 
2888 	/* XXX: single vector */
2889 	mvpp2_interrupts_disable(sc, (1 << 0));
2890 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
2891 
2892 	reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
2893 	reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2894 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2895 
2896 	for (i = 0; i < sc->sc_ntxq; i++)
2897 		mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
2898 
2899 	reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2900 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2901 
2902 	for (i = 0; i < sc->sc_nrxq; i++)
2903 		mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
2904 
2905 	mvpp2_prs_mac_da_accept(sc, sc->sc_cur_lladdr, 0);
2906 
2907 	if (sc->sc_sfp) {
2908 		rw_enter(&mvpp2_sff_lock, RW_WRITE);
2909 		sfp_disable(sc->sc_sfp);
2910 		rw_exit(&mvpp2_sff_lock);
2911 	}
2912 }
2913 
2914 void
2915 mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2916 {
2917 	struct mvpp2_buf *txb;
2918 	int i, pending;
2919 	uint32_t reg;
2920 
2921 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2922 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
2923 	reg |= MVPP2_TXQ_DRAIN_EN_MASK;
2924 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2925 
2926 	/*
2927 	 * the queue has been stopped so wait for all packets
2928 	 * to be transmitted.
2929 	 */
2930 	i = 0;
2931 	do {
2932 		if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2933 			printf("%s: port %d: cleaning queue %d timed out\n",
2934 			    sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
2935 			break;
2936 		}
2937 		delay(1000);
2938 		i++;
2939 
2940 		pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
2941 		    MVPP2_TXQ_PENDING_MASK;
2942 	} while (pending);
2943 
2944 	reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2945 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2946 
2947 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2948 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2949 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
2950 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
2951 	mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2952 
2953 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2954 		txb = &txq->buf[i];
2955 		if (txb->mb_m) {
2956 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2957 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2958 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2959 			m_freem(txb->mb_m);
2960 		}
2961 		bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
2962 	}
2963 
2964 	mvpp2_dmamem_free(sc->sc, txq->ring);
2965 	free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
2966 	    MVPP2_NTXDESC);
2967 }
2968 
2969 void
2970 mvpp2_rxq_hw_drop(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2971 {
2972 	struct mvpp2_rx_desc *rxd;
2973 	struct mvpp2_bm_pool *bm;
2974 	uint64_t phys, virt;
2975 	uint32_t i, nrecv, pool;
2976 	struct mvpp2_buf *rxb;
2977 
2978 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2979 	if (!nrecv)
2980 		return;
2981 
2982 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2983 	    MVPP2_DMA_LEN(rxq->ring),
2984 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2985 
2986 	for (i = 0; i < nrecv; i++) {
2987 		rxd = &rxq->descs[rxq->cons];
2988 		virt = rxd->buf_cookie_bm_qset_cls_info;
2989 		pool = (virt >> 16) & 0xffff;
2990 		KASSERT(pool < sc->sc->sc_npools);
2991 		bm = &sc->sc->sc_bm_pools[pool];
2992 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2993 		rxb = &bm->rxbuf[virt & 0xffff];
2994 		KASSERT(rxb->mb_m != NULL);
2995 		virt &= 0xffffffff;
2996 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2997 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2998 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2999 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
3000 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
3001 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
3002 		    virt & 0xffffffff);
3003 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
3004 		    phys & 0xffffffff);
3005 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
3006 	}
3007 
3008 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
3009 	    MVPP2_DMA_LEN(rxq->ring),
3010 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3011 
3012 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
3013 }
3014 
3015 void
3016 mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
3017 {
3018 	mvpp2_rxq_hw_drop(sc, rxq);
3019 
3020 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3021 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3022 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
3023 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
3024 
3025 	mvpp2_dmamem_free(sc->sc, rxq->ring);
3026 }
3027 
3028 void
3029 mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3030 {
3031 	uint32_t val;
3032 	int prxq;
3033 
3034 	/* get queue physical ID */
3035 	prxq = port->sc_rxqs[lrxq].id;
3036 
3037 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3038 	val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3039 	val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
3040 
3041 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3042 }
3043 
3044 void
3045 mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3046 {
3047 	uint32_t val;
3048 	int prxq;
3049 
3050 	/* get queue physical ID */
3051 	prxq = port->sc_rxqs[lrxq].id;
3052 
3053 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3054 	val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3055 	val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
3056 
3057 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3058 }
3059 
3060 void
3061 mvpp2_iff(struct mvpp2_port *sc)
3062 {
3063 	/* FIXME: multicast handling */
3064 
3065 	if (memcmp(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN) != 0) {
3066 		mvpp2_prs_mac_da_accept(sc, sc->sc_cur_lladdr, 0);
3067 		memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
3068 		mvpp2_prs_mac_da_accept(sc, sc->sc_cur_lladdr, 1);
3069 	}
3070 }
3071 
3072 struct mvpp2_dmamem *
3073 mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
3074 {
3075 	struct mvpp2_dmamem *mdm;
3076 	int nsegs;
3077 
3078 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
3079 	mdm->mdm_size = size;
3080 
3081 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3082 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
3083 		goto mdmfree;
3084 
3085 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
3086 	    &nsegs, BUS_DMA_WAITOK) != 0)
3087 		goto destroy;
3088 
3089 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
3090 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
3091 		goto free;
3092 
3093 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
3094 	    NULL, BUS_DMA_WAITOK) != 0)
3095 		goto unmap;
3096 
3097 	bzero(mdm->mdm_kva, size);
3098 
3099 	return (mdm);
3100 
3101 unmap:
3102 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
3103 free:
3104 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3105 destroy:
3106 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3107 mdmfree:
3108 	free(mdm, M_DEVBUF, 0);
3109 
3110 	return (NULL);
3111 }
3112 
3113 void
3114 mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
3115 {
3116 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
3117 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3118 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3119 	free(mdm, M_DEVBUF, 0);
3120 }
3121 
3122 struct mbuf *
3123 mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
3124 {
3125 	struct mbuf *m = NULL;
3126 
3127 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
3128 	if (!m)
3129 		return (NULL);
3130 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3131 
3132 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
3133 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
3134 		m_freem(m);
3135 		return (NULL);
3136 	}
3137 
3138 	bus_dmamap_sync(sc->sc_dmat, map, 0,
3139 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
3140 
3141 	return (m);
3142 }
3143 
3144 void
3145 mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
3146 {
3147 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3148 	    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3149 }
3150 
3151 void
3152 mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
3153 {
3154 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3155 	    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3156 }
3157 
3158 int
3159 mvpp2_egress_port(struct mvpp2_port *port)
3160 {
3161 	return MVPP2_MAX_TCONT + port->sc_id;
3162 }
3163 
3164 int
3165 mvpp2_txq_phys(int port, int txq)
3166 {
3167 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
3168 }
3169 
3170 void
3171 mvpp2_defaults_set(struct mvpp2_port *port)
3172 {
3173 	int val, queue;
3174 
3175 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3176 	    mvpp2_egress_port(port));
3177 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3178 
3179 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
3180 		mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
3181 
3182 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
3183 	    (1000 * 1000));
3184 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
3185 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3186 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3187 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3188 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
3189 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
3190 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3191 
3192 	/* set maximum_low_latency_packet_size value to 256 */
3193 	mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
3194 	    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3195 	    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3196 
3197 	/* mask all interrupts to all present cpus */
3198 	mvpp2_interrupts_disable(port, (0xf << 0));
3199 }
3200 
3201 void
3202 mvpp2_ingress_enable(struct mvpp2_port *port)
3203 {
3204 	uint32_t val;
3205 	int lrxq, queue;
3206 
3207 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3208 		queue = port->sc_rxqs[lrxq].id;
3209 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3210 		val &= ~MVPP2_RXQ_DISABLE_MASK;
3211 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3212 	}
3213 }
3214 
3215 void
3216 mvpp2_ingress_disable(struct mvpp2_port *port)
3217 {
3218 	uint32_t val;
3219 	int lrxq, queue;
3220 
3221 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3222 		queue = port->sc_rxqs[lrxq].id;
3223 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3224 		val |= MVPP2_RXQ_DISABLE_MASK;
3225 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3226 	}
3227 }
3228 
3229 void
3230 mvpp2_egress_enable(struct mvpp2_port *port)
3231 {
3232 	struct mvpp2_tx_queue *txq;
3233 	uint32_t qmap;
3234 	int queue;
3235 
3236 	qmap = 0;
3237 	for (queue = 0; queue < port->sc_ntxq; queue++) {
3238 		txq = &port->sc_txqs[queue];
3239 
3240 		if (txq->descs != NULL) {
3241 			qmap |= (1 << queue);
3242 		}
3243 	}
3244 
3245 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3246 	    mvpp2_egress_port(port));
3247 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3248 }
3249 
3250 void
3251 mvpp2_egress_disable(struct mvpp2_port *port)
3252 {
3253 	uint32_t reg_data;
3254 	int i;
3255 
3256 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3257 	    mvpp2_egress_port(port));
3258 	reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3259 	    MVPP2_TXP_SCHED_ENQ_MASK;
3260 	if (reg_data)
3261 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,
3262 		    reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET);
3263 
3264 	i = 0;
3265 	do {
3266 		if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3267 			printf("%s: tx stop timed out, status=0x%08x\n",
3268 			    port->sc_dev.dv_xname, reg_data);
3269 			break;
3270 		}
3271 		delay(1000);
3272 		i++;
3273 		reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
3274 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3275 }
3276 
3277 void
3278 mvpp2_port_enable(struct mvpp2_port *port)
3279 {
3280 	uint32_t val;
3281 
3282 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3283 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3284 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3285 		val |= MV_XLG_MAC_CTRL0_PORTEN;
3286 		val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS;
3287 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3288 	} else {
3289 		val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3290 		val |= MVPP2_GMAC_PORT_EN_MASK;
3291 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3292 		mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3293 	}
3294 }
3295 
3296 void
3297 mvpp2_port_disable(struct mvpp2_port *port)
3298 {
3299 	uint32_t val;
3300 
3301 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3302 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3303 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3304 		val &= ~MV_XLG_MAC_CTRL0_PORTEN;
3305 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3306 	}
3307 
3308 	val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3309 	val &= ~MVPP2_GMAC_PORT_EN_MASK;
3310 	mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3311 }
3312 
3313 int
3314 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3315 {
3316 	uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
3317 
3318 	return val & MVPP2_RXQ_OCCUPIED_MASK;
3319 }
3320 
3321 void
3322 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3323     int used_count, int free_count)
3324 {
3325 	uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3326 	mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3327 }
3328 
3329 void
3330 mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
3331 {
3332 	uint32_t val;
3333 
3334 	offset = offset >> 5;
3335 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3336 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3337 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3338 	    MVPP2_RXQ_PACKET_OFFSET_MASK);
3339 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3340 }
3341 
3342 void
3343 mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3344 {
3345 	uint32_t val, size, mtu;
3346 	int txq;
3347 
3348 	mtu = MCLBYTES * 8;
3349 	if (mtu > MVPP2_TXP_MTU_MAX)
3350 		mtu = MVPP2_TXP_MTU_MAX;
3351 
3352 	/* WA for wrong token bucket update: set MTU value = 3*real MTU value */
3353 	mtu = 3 * mtu;
3354 
3355 	/* indirect access to reg_valisters */
3356 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3357 	    mvpp2_egress_port(port));
3358 
3359 	/* set MTU */
3360 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
3361 	val &= ~MVPP2_TXP_MTU_MAX;
3362 	val |= mtu;
3363 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
3364 
3365 	/* TXP token size and all TXqs token size must be larger that MTU */
3366 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3367 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3368 	if (size < mtu) {
3369 		size = mtu;
3370 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3371 		val |= size;
3372 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3373 	}
3374 
3375 	for (txq = 0; txq < port->sc_ntxq; txq++) {
3376 		val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3377 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3378 
3379 		if (size < mtu) {
3380 			size = mtu;
3381 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3382 			val |= size;
3383 			mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
3384 		}
3385 	}
3386 }
3387 
3388 void
3389 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3390     uint32_t pkts)
3391 {
3392 	rxq->pkts_coal =
3393 	    pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
3394 	    pkts : MVPP2_OCCUPIED_THRESH_MASK;
3395 
3396 	mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3397 	mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
3398 
3399 }
3400 
3401 void
3402 mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3403     uint32_t pkts)
3404 {
3405 	txq->done_pkts_coal =
3406 	    pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
3407 	    pkts : MVPP2_TRANSMITTED_THRESH_MASK;
3408 
3409 	mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
3410 	mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
3411 	    txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
3412 }
3413 
3414 void
3415 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3416     uint32_t usec)
3417 {
3418 	uint32_t val;
3419 
3420 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3421 	mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3422 
3423 	rxq->time_coal = usec;
3424 }
3425 
3426 void
3427 mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
3428 {
3429 	uint32_t val;
3430 
3431 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3432 	mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
3433 
3434 	port->sc_tx_time_coal = usec;
3435 }
3436 
3437 void
3438 mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
3439     uint32_t ri, uint32_t ri_mask)
3440 {
3441 	sc->sc_prs_shadow[index].ri_mask = ri_mask;
3442 	sc->sc_prs_shadow[index].ri = ri;
3443 }
3444 
3445 void
3446 mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3447 {
3448 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
3449 
3450 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
3451 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
3452 }
3453 
3454 void
3455 mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
3456 {
3457 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3458 
3459 	if (add)
3460 		pe->tcam.byte[enable_off] &= ~(1 << port);
3461 	else
3462 		pe->tcam.byte[enable_off] |= (1 << port);
3463 }
3464 
3465 void
3466 mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
3467 {
3468 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3469 	uint8_t mask = MVPP2_PRS_PORT_MASK;
3470 
3471 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
3472 	pe->tcam.byte[enable_off] &= ~mask;
3473 	pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
3474 }
3475 
3476 uint32_t
3477 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
3478 {
3479 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3480 
3481 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
3482 }
3483 
3484 void
3485 mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
3486     uint8_t byte, uint8_t enable)
3487 {
3488 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
3489 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
3490 }
3491 
3492 void
3493 mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
3494     uint8_t *byte, uint8_t *enable)
3495 {
3496 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
3497 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
3498 }
3499 
3500 int
3501 mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
3502 {
3503 	int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
3504 	uint16_t tcam_data;
3505 
3506 	tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
3507 	    pe->tcam.byte[byte_offset];
3508 	return tcam_data == data;
3509 }
3510 
3511 void
3512 mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
3513 {
3514 	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
3515 
3516 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
3517 		if (!(enable & BIT(i)))
3518 			continue;
3519 
3520 		if (bits & BIT(i))
3521 			pe->tcam.byte[ai_idx] |= BIT(i);
3522 		else
3523 			pe->tcam.byte[ai_idx] &= ~BIT(i);
3524 	}
3525 
3526 	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
3527 }
3528 
3529 int
3530 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
3531 {
3532 	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
3533 }
3534 
3535 void
3536 mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
3537     uint32_t *word, uint32_t *enable)
3538 {
3539 	int index, position;
3540 	uint8_t byte, mask;
3541 
3542 	for (index = 0; index < 4; index++) {
3543 		position = (data_offset * sizeof(int)) + index;
3544 		mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
3545 		((uint8_t *)word)[index] = byte;
3546 		((uint8_t *)enable)[index] = mask;
3547 	}
3548 }
3549 
3550 void
3551 mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
3552     uint16_t ether_type)
3553 {
3554 	mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
3555 	mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
3556 }
3557 
3558 void
3559 mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3560 {
3561 	pe->sram.byte[bit / 8] |= (val << (bit % 8));
3562 }
3563 
3564 void
3565 mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3566 {
3567 	pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
3568 }
3569 
3570 void
3571 mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3572 {
3573 	int i;
3574 
3575 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
3576 		if (!(mask & BIT(i)))
3577 			continue;
3578 
3579 		if (bits & BIT(i))
3580 			mvpp2_prs_sram_bits_set(pe,
3581 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3582 		else
3583 			mvpp2_prs_sram_bits_clear(pe,
3584 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3585 
3586 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
3587 	}
3588 }
3589 
3590 int
3591 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
3592 {
3593 	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
3594 }
3595 
3596 void
3597 mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3598 {
3599 	int i;
3600 
3601 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
3602 		if (!(mask & BIT(i)))
3603 			continue;
3604 
3605 		if (bits & BIT(i))
3606 			mvpp2_prs_sram_bits_set(pe,
3607 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3608 		else
3609 			mvpp2_prs_sram_bits_clear(pe,
3610 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3611 
3612 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
3613 	}
3614 }
3615 
3616 int
3617 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
3618 {
3619 	uint8_t bits;
3620 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
3621 	int ai_en_off = ai_off + 1;
3622 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
3623 
3624 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
3625 	    (pe->sram.byte[ai_en_off] << (8 - ai_shift));
3626 
3627 	return bits;
3628 }
3629 
3630 void
3631 mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
3632 {
3633 	if (shift < 0) {
3634 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3635 		shift = -shift;
3636 	} else {
3637 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3638 	}
3639 
3640 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
3641 	    shift & MVPP2_PRS_SRAM_SHIFT_MASK;
3642 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
3643 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
3644 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
3645 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3646 }
3647 
3648 void
3649 mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
3650     uint32_t op)
3651 {
3652 	uint8_t udf_byte, udf_byte_offset;
3653 	uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
3654 
3655 	udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
3656 	    MVPP2_PRS_SRAM_UDF_BITS);
3657 	udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
3658 	op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
3659 	    MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
3660 	op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
3661 
3662 	if (offset < 0) {
3663 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3664 		offset = -offset;
3665 	} else {
3666 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3667 	}
3668 
3669 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
3670 	    MVPP2_PRS_SRAM_UDF_MASK);
3671 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
3672 	pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
3673 	pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
3674 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
3675 	    MVPP2_PRS_SRAM_UDF_TYPE_MASK);
3676 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
3677 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
3678 	    MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
3679 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
3680 	pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
3681 	    op_sel_udf_byte_offset);
3682 	pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
3683 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3684 }
3685 
3686 void
3687 mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3688 {
3689 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
3690 
3691 	mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
3692 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
3693 }
3694 
3695 void
3696 mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
3697 {
3698 	sc->sc_prs_shadow[index].valid = 1;
3699 	sc->sc_prs_shadow[index].lu = lu;
3700 }
3701 
3702 int
3703 mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3704 {
3705 	int i;
3706 
3707 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3708 		return EINVAL;
3709 
3710 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3711 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3712 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3713 		mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
3714 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3715 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3716 		mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
3717 
3718 	return 0;
3719 }
3720 
3721 int
3722 mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe, int tid)
3723 {
3724 	int i;
3725 
3726 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3727 		return EINVAL;
3728 
3729 	memset(pe, 0, sizeof(*pe));
3730 	pe->index = tid;
3731 
3732 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3733 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
3734 	    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
3735 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
3736 		return EINVAL;
3737 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3738 		pe->tcam.word[i] =
3739 		    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
3740 
3741 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3742 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3743 		pe->sram.word[i] =
3744 		    mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
3745 
3746 	return 0;
3747 }
3748 
3749 int
3750 mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
3751 {
3752 	struct mvpp2_prs_entry pe;
3753 	uint8_t bits;
3754 	int tid;
3755 
3756 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
3757 		if (!sc->sc_prs_shadow[tid].valid ||
3758 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
3759 			continue;
3760 
3761 		mvpp2_prs_hw_read(sc, &pe, tid);
3762 		bits = mvpp2_prs_sram_ai_get(&pe);
3763 
3764 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
3765 			return tid;
3766 	}
3767 
3768 	return -1;
3769 }
3770 
3771 int
3772 mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
3773 {
3774 	uint8_t tmp;
3775 	int tid;
3776 
3777 	if (start > end) {
3778 		tmp = end;
3779 		end = start;
3780 		start = tmp;
3781 	}
3782 
3783 	for (tid = start; tid <= end; tid++) {
3784 		if (!sc->sc_prs_shadow[tid].valid)
3785 			return tid;
3786 	}
3787 
3788 	return -1;
3789 }
3790 
3791 void
3792 mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
3793 {
3794 	struct mvpp2_prs_entry pe;
3795 
3796 	if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
3797 		mvpp2_prs_hw_read(sc, &pe, MVPP2_PE_DROP_ALL);
3798 	} else {
3799 		memset(&pe, 0, sizeof(pe));
3800 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3801 		pe.index = MVPP2_PE_DROP_ALL;
3802 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3803 		    MVPP2_PRS_RI_DROP_MASK);
3804 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3805 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3806 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3807 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3808 	}
3809 
3810 	mvpp2_prs_tcam_port_set(&pe, port, add);
3811 	mvpp2_prs_hw_write(sc, &pe);
3812 }
3813 
3814 void
3815 mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int l2_cast,
3816     int add)
3817 {
3818 	struct mvpp2_prs_entry pe;
3819 	uint8_t cast_match;
3820 	uint32_t ri;
3821 	int tid;
3822 
3823 	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
3824 		cast_match = MVPP2_PRS_UCAST_VAL;
3825 		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
3826 		ri = MVPP2_PRS_RI_L2_UCAST;
3827 	} else {
3828 		cast_match = MVPP2_PRS_MCAST_VAL;
3829 		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
3830 		ri = MVPP2_PRS_RI_L2_MCAST;
3831 	}
3832 
3833 	if (sc->sc_prs_shadow[tid].valid) {
3834 		mvpp2_prs_hw_read(sc, &pe, tid);
3835 	} else {
3836 		memset(&pe, 0, sizeof(pe));
3837 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3838 		pe.index = tid;
3839 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3840 		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
3841 		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
3842 		    MVPP2_PRS_CAST_MASK);
3843 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3844 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3845 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3846 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3847 	}
3848 
3849 	mvpp2_prs_tcam_port_set(&pe, port, add);
3850 	mvpp2_prs_hw_write(sc, &pe);
3851 }
3852 
3853 void
3854 mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
3855     int tagged, int extend)
3856 {
3857 	struct mvpp2_prs_entry pe;
3858 	int32_t tid, shift;
3859 
3860 	if (extend) {
3861 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3862 		shift = 8;
3863 	} else {
3864 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3865 		shift = 4;
3866 	}
3867 
3868 	if (sc->sc_prs_shadow[tid].valid) {
3869 		mvpp2_prs_hw_read(sc, &pe, tid);
3870 	} else {
3871 		memset(&pe, 0, sizeof(pe));
3872 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3873 		pe.index = tid;
3874 		mvpp2_prs_sram_shift_set(&pe, shift,
3875 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3876 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3877 		if (tagged) {
3878 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
3879 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3880 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3881 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3882 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3883 		} else {
3884 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3885 			    MVPP2_PRS_RI_VLAN_MASK);
3886 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3887 		}
3888 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3889 	}
3890 
3891 	mvpp2_prs_tcam_port_set(&pe, port, add);
3892 	mvpp2_prs_hw_write(sc, &pe);
3893 }
3894 
3895 void
3896 mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
3897     int add, int tagged, int extend)
3898 {
3899 	struct mvpp2_prs_entry pe;
3900 	int32_t tid, shift, port_mask;
3901 
3902 	if (extend) {
3903 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3904 		port_mask = 0;
3905 		shift = 8;
3906 	} else {
3907 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3908 		port_mask = MVPP2_PRS_PORT_MASK;
3909 		shift = 4;
3910 	}
3911 
3912 	if (sc->sc_prs_shadow[tid].valid) {
3913 		mvpp2_prs_hw_read(sc, &pe, tid);
3914 	} else {
3915 		memset(&pe, 0, sizeof(pe));
3916 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3917 		pe.index = tid;
3918 		mvpp2_prs_match_etype(&pe, 0, 0xdada);
3919 		mvpp2_prs_match_etype(&pe, 2, 0);
3920 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
3921 		    MVPP2_PRS_RI_DSA_MASK);
3922 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
3923 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3924 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3925 		if (tagged) {
3926 			mvpp2_prs_tcam_data_byte_set(&pe,
3927 			    MVPP2_ETH_TYPE_LEN + 2 + 3,
3928 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3929 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3930 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3931 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3932 		} else {
3933 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3934 			    MVPP2_PRS_RI_VLAN_MASK);
3935 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3936 		}
3937 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
3938 	}
3939 
3940 	mvpp2_prs_tcam_port_set(&pe, port, add);
3941 	mvpp2_prs_hw_write(sc, &pe);
3942 }
3943 
3944 struct mvpp2_prs_entry *
3945 mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
3946 {
3947 	struct mvpp2_prs_entry *pe;
3948 	uint32_t ri_bits, ai_bits;
3949 	int match, tid;
3950 
3951 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3952 	if (pe == NULL)
3953 		return NULL;
3954 
3955 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3956 
3957 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3958 		if (!sc->sc_prs_shadow[tid].valid ||
3959 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3960 			continue;
3961 		mvpp2_prs_hw_read(sc, pe, tid);
3962 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
3963 		if (!match)
3964 			continue;
3965 		ri_bits = mvpp2_prs_sram_ri_get(pe);
3966 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3967 		ai_bits = mvpp2_prs_tcam_ai_get(pe);
3968 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
3969 		if (ai != ai_bits)
3970 			continue;
3971 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3972 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3973 			return pe;
3974 	}
3975 
3976 	free(pe, M_TEMP, sizeof(*pe));
3977 	return NULL;
3978 }
3979 
3980 int
3981 mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
3982 {
3983 	struct mvpp2_prs_entry *pe;
3984 	uint32_t ri_bits;
3985 	int tid_aux, tid;
3986 	int ret = 0;
3987 
3988 	pe = mvpp2_prs_vlan_find(sc, tpid, ai);
3989 	if (pe == NULL) {
3990 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
3991 		    MVPP2_PE_FIRST_FREE_TID);
3992 		if (tid < 0)
3993 			return tid;
3994 
3995 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3996 		if (pe == NULL)
3997 			return ENOMEM;
3998 
3999 		/* get last double vlan tid */
4000 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
4001 		    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
4002 			if (!sc->sc_prs_shadow[tid_aux].valid ||
4003 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4004 				continue;
4005 			mvpp2_prs_hw_read(sc, pe, tid_aux);
4006 			ri_bits = mvpp2_prs_sram_ri_get(pe);
4007 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
4008 			    MVPP2_PRS_RI_VLAN_DOUBLE)
4009 				break;
4010 		}
4011 
4012 		if (tid <= tid_aux) {
4013 			ret = EINVAL;
4014 			goto error;
4015 		}
4016 
4017 		memset(pe, 0, sizeof(*pe));
4018 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4019 		pe->index = tid;
4020 		mvpp2_prs_match_etype(pe, 0, tpid);
4021 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
4022 		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
4023 				   MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4024 		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
4025 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
4026 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
4027 			    MVPP2_PRS_RI_VLAN_MASK);
4028 		} else {
4029 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
4030 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
4031 			    MVPP2_PRS_RI_VLAN_MASK);
4032 		}
4033 		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
4034 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4035 	}
4036 
4037 	mvpp2_prs_tcam_port_map_set(pe, port_map);
4038 	mvpp2_prs_hw_write(sc, pe);
4039 
4040 error:
4041 	free(pe, M_TEMP, sizeof(*pe));
4042 	return ret;
4043 }
4044 
4045 int
4046 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
4047 {
4048 	int i;
4049 
4050 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
4051 		if (!sc->sc_prs_double_vlans[i])
4052 			return i;
4053 
4054 	return -1;
4055 }
4056 
4057 struct mvpp2_prs_entry *
4058 mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
4059 {
4060 	struct mvpp2_prs_entry *pe;
4061 	uint32_t ri_mask;
4062 	int match, tid;
4063 
4064 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4065 	if (pe == NULL)
4066 		return NULL;
4067 
4068 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4069 
4070 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
4071 		if (!sc->sc_prs_shadow[tid].valid ||
4072 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
4073 			continue;
4074 
4075 		mvpp2_prs_hw_read(sc, pe, tid);
4076 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
4077 		    mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
4078 		if (!match)
4079 			continue;
4080 		ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
4081 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
4082 			return pe;
4083 	}
4084 
4085 	free(pe, M_TEMP, sizeof(*pe));
4086 	return NULL;
4087 }
4088 
4089 int
4090 mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
4091     uint32_t port_map)
4092 {
4093 	struct mvpp2_prs_entry *pe;
4094 	int tid_aux, tid, ai, ret = 0;
4095 	uint32_t ri_bits;
4096 
4097 	pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
4098 	if (pe == NULL) {
4099 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4100 		    MVPP2_PE_LAST_FREE_TID);
4101 		if (tid < 0)
4102 			return tid;
4103 
4104 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4105 		if (pe == NULL)
4106 			return ENOMEM;
4107 
4108 		ai = mvpp2_prs_double_vlan_ai_free_get(sc);
4109 		if (ai < 0) {
4110 			ret = ai;
4111 			goto error;
4112 		}
4113 
4114 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
4115 		    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
4116 			if (!sc->sc_prs_shadow[tid_aux].valid ||
4117 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4118 				continue;
4119 			mvpp2_prs_hw_read(sc, pe, tid_aux);
4120 			ri_bits = mvpp2_prs_sram_ri_get(pe);
4121 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
4122 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
4123 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
4124 				break;
4125 		}
4126 
4127 		if (tid >= tid_aux) {
4128 			ret = ERANGE;
4129 			goto error;
4130 		}
4131 
4132 		memset(pe, 0, sizeof(*pe));
4133 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4134 		pe->index = tid;
4135 		sc->sc_prs_double_vlans[ai] = 1;
4136 		mvpp2_prs_match_etype(pe, 0, tpid1);
4137 		mvpp2_prs_match_etype(pe, 4, tpid2);
4138 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
4139 		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
4140 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4141 		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
4142 		    MVPP2_PRS_RI_VLAN_MASK);
4143 		mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
4144 		    MVPP2_PRS_SRAM_AI_MASK);
4145 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4146 	}
4147 
4148 	mvpp2_prs_tcam_port_map_set(pe, port_map);
4149 	mvpp2_prs_hw_write(sc, pe);
4150 
4151 error:
4152 	free(pe, M_TEMP, sizeof(*pe));
4153 	return ret;
4154 }
4155 
4156 int
4157 mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4158     uint32_t ri_mask)
4159 {
4160 	struct mvpp2_prs_entry pe;
4161 	int tid;
4162 
4163 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4164 	    (proto != IPPROTO_IGMP))
4165 		return EINVAL;
4166 
4167 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4168 	    MVPP2_PE_LAST_FREE_TID);
4169 	if (tid < 0)
4170 		return tid;
4171 
4172 	memset(&pe, 0, sizeof(pe));
4173 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4174 	pe.index = tid;
4175 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
4176 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4177 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4178 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4179 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4180 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4181 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4182 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
4183 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
4184 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4185 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
4186 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4187 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4188 	mvpp2_prs_hw_write(sc, &pe);
4189 
4190 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4191 	    MVPP2_PE_LAST_FREE_TID);
4192 	if (tid < 0)
4193 		return tid;
4194 
4195 	pe.index = tid;
4196 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
4197 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
4198 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4199 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
4200 	    ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4201 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
4202 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
4203 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4204 	mvpp2_prs_hw_write(sc, &pe);
4205 
4206 	return 0;
4207 }
4208 
4209 int
4210 mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4211 {
4212 	struct mvpp2_prs_entry pe;
4213 	int mask, tid;
4214 
4215 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4216 	    MVPP2_PE_LAST_FREE_TID);
4217 	if (tid < 0)
4218 		return tid;
4219 
4220 	memset(&pe, 0, sizeof(pe));
4221 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4222 	pe.index = tid;
4223 
4224 	switch (l3_cast) {
4225 	case MVPP2_PRS_L3_MULTI_CAST:
4226 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
4227 		    MVPP2_PRS_IPV4_MC_MASK);
4228 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4229 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4230 		break;
4231 	case  MVPP2_PRS_L3_BROAD_CAST:
4232 		mask = MVPP2_PRS_IPV4_BC_MASK;
4233 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
4234 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
4235 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
4236 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
4237 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
4238 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4239 		break;
4240 	default:
4241 		return EINVAL;
4242 	}
4243 
4244 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4245 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4246 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4247 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4248 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4249 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4250 	mvpp2_prs_hw_write(sc, &pe);
4251 
4252 	return 0;
4253 }
4254 
4255 int
4256 mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4257     uint32_t ri_mask)
4258 {
4259 	struct mvpp2_prs_entry pe;
4260 	int tid;
4261 
4262 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4263 	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
4264 		return EINVAL;
4265 
4266 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4267 	    MVPP2_PE_LAST_FREE_TID);
4268 	if (tid < 0)
4269 		return tid;
4270 
4271 	memset(&pe, 0, sizeof(pe));
4272 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4273 	pe.index = tid;
4274 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4275 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4276 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4277 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4278 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4279 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4280 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4281 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4282 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4283 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4284 	mvpp2_prs_hw_write(sc, &pe);
4285 
4286 	return 0;
4287 }
4288 
4289 int
4290 mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4291 {
4292 	struct mvpp2_prs_entry pe;
4293 	int tid;
4294 
4295 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
4296 		return EINVAL;
4297 
4298 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4299 	    MVPP2_PE_LAST_FREE_TID);
4300 	if (tid < 0)
4301 		return tid;
4302 
4303 	memset(&pe, 0, sizeof(pe));
4304 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4305 	pe.index = tid;
4306 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
4307 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4308 	    MVPP2_PRS_RI_L3_ADDR_MASK);
4309 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4310 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4311 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4312 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
4313 	    MVPP2_PRS_IPV6_MC_MASK);
4314 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4315 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4316 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4317 	mvpp2_prs_hw_write(sc, &pe);
4318 
4319 	return 0;
4320 }
4321 
4322 int
4323 mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
4324     uint8_t *mask)
4325 {
4326 	uint8_t tcam_byte, tcam_mask;
4327 	int index;
4328 
4329 	for (index = 0; index < ETHER_ADDR_LEN; index++) {
4330 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
4331 		    &tcam_mask);
4332 		if (tcam_mask != mask[index])
4333 			return 0;
4334 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
4335 			return 0;
4336 	}
4337 
4338 	return 1;
4339 }
4340 
4341 int
4342 mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
4343     uint8_t *mask, int udf_type)
4344 {
4345 	struct mvpp2_prs_entry pe;
4346 	int tid;
4347 
4348 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID;
4349 	    tid++) {
4350 		uint32_t entry_pmap;
4351 
4352 		if (!sc->sc_prs_shadow[tid].valid ||
4353 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4354 		    (sc->sc_prs_shadow[tid].udf != udf_type))
4355 			continue;
4356 
4357 		mvpp2_prs_hw_read(sc, &pe, tid);
4358 		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
4359 		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
4360 		    entry_pmap == pmap)
4361 			return tid;
4362 	}
4363 
4364 	return -1;
4365 }
4366 
4367 int
4368 mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const uint8_t *da, int add)
4369 {
4370 	struct mvpp2_softc *sc = port->sc;
4371 	struct mvpp2_prs_entry pe;
4372 	uint32_t pmap, len, ri;
4373 	uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4374 	int tid;
4375 
4376 	memset(&pe, 0, sizeof(pe));
4377 
4378 	tid = mvpp2_prs_mac_da_range_find(sc, BIT(port->sc_id), da, mask,
4379 	    MVPP2_PRS_UDF_MAC_DEF);
4380 	if (tid < 0) {
4381 		if (!add)
4382 			return 0;
4383 
4384 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4385 		    MVPP2_PE_LAST_FREE_TID);
4386 		if (tid < 0)
4387 			return tid;
4388 
4389 		pe.index = tid;
4390 		mvpp2_prs_tcam_port_map_set(&pe, 0);
4391 	} else {
4392 		mvpp2_prs_hw_read(sc, &pe, tid);
4393 	}
4394 
4395 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
4396 
4397 	mvpp2_prs_tcam_port_set(&pe, port->sc_id, add);
4398 
4399 	/* invalidate the entry if no ports are left enabled */
4400 	pmap = mvpp2_prs_tcam_port_map_get(&pe);
4401 	if (pmap == 0) {
4402 		if (add)
4403 			return -1;
4404 		mvpp2_prs_hw_inv(sc, pe.index);
4405 		sc->sc_prs_shadow[pe.index].valid = 0;
4406 		return 0;
4407 	}
4408 
4409 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
4410 
4411 	len = ETHER_ADDR_LEN;
4412 	while (len--)
4413 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
4414 
4415 	if (ETHER_IS_BROADCAST(da))
4416 		ri = MVPP2_PRS_RI_L2_BCAST;
4417 	else if (ETHER_IS_MULTICAST(da))
4418 		ri = MVPP2_PRS_RI_L2_MCAST;
4419 	else
4420 		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
4421 
4422 	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4423 	    MVPP2_PRS_RI_MAC_ME_MASK);
4424 	mvpp2_prs_shadow_ri_set(sc, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4425 	    MVPP2_PRS_RI_MAC_ME_MASK);
4426 	mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
4427 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4428 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
4429 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
4430 	mvpp2_prs_hw_write(sc, &pe);
4431 
4432 	return 0;
4433 }
4434 
4435 int
4436 mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
4437 {
4438 	switch (type) {
4439 	case MVPP2_TAG_TYPE_EDSA:
4440 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4441 		    MVPP2_PRS_EDSA);
4442 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4443 		    MVPP2_PRS_EDSA);
4444 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4445 		    MVPP2_PRS_DSA);
4446 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4447 		    MVPP2_PRS_DSA);
4448 		break;
4449 	case MVPP2_TAG_TYPE_DSA:
4450 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4451 		    MVPP2_PRS_DSA);
4452 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4453 		    MVPP2_PRS_DSA);
4454 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4455 		    MVPP2_PRS_EDSA);
4456 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4457 		    MVPP2_PRS_EDSA);
4458 		break;
4459 	case MVPP2_TAG_TYPE_MH:
4460 	case MVPP2_TAG_TYPE_NONE:
4461 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4462 		    MVPP2_PRS_DSA);
4463 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4464 		    MVPP2_PRS_DSA);
4465 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4466 		    MVPP2_PRS_EDSA);
4467 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4468 		    MVPP2_PRS_EDSA);
4469 		break;
4470 	default:
4471 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4472 			return EINVAL;
4473 		break;
4474 	}
4475 
4476 	return 0;
4477 }
4478 
4479 int
4480 mvpp2_prs_def_flow(struct mvpp2_port *port)
4481 {
4482 	struct mvpp2_prs_entry pe;
4483 	int tid;
4484 
4485 	memset(&pe, 0, sizeof(pe));
4486 
4487 	tid = mvpp2_prs_flow_find(port->sc, port->sc_id);
4488 	if (tid < 0) {
4489 		tid = mvpp2_prs_tcam_first_free(port->sc,
4490 		    MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
4491 		if (tid < 0)
4492 			return tid;
4493 
4494 		pe.index = tid;
4495 		mvpp2_prs_sram_ai_update(&pe, port->sc_id,
4496 		    MVPP2_PRS_FLOW_ID_MASK);
4497 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4498 		mvpp2_prs_shadow_set(port->sc, pe.index, MVPP2_PRS_LU_FLOWS);
4499 	} else {
4500 		mvpp2_prs_hw_read(port->sc, &pe, tid);
4501 	}
4502 
4503 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4504 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->sc_id));
4505 	mvpp2_prs_hw_write(port->sc, &pe);
4506 	return 0;
4507 }
4508 
4509 void
4510 mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
4511 {
4512 	mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4513 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4514 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4515 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4516 }
4517 
4518 void
4519 mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
4520 {
4521 	uint32_t val;
4522 
4523 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4524 	mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
4525 	mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
4526 }
4527 
4528 void
4529 mvpp2_cls_init(struct mvpp2_softc *sc)
4530 {
4531 	struct mvpp2_cls_lookup_entry le;
4532 	struct mvpp2_cls_flow_entry fe;
4533 	int index;
4534 
4535 	mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4536 	memset(&fe.data, 0, sizeof(fe.data));
4537 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4538 		fe.index = index;
4539 		mvpp2_cls_flow_write(sc, &fe);
4540 	}
4541 	le.data = 0;
4542 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4543 		le.lkpid = index;
4544 		le.way = 0;
4545 		mvpp2_cls_lookup_write(sc, &le);
4546 		le.way = 1;
4547 		mvpp2_cls_lookup_write(sc, &le);
4548 	}
4549 }
4550 
4551 void
4552 mvpp2_cls_port_config(struct mvpp2_port *port)
4553 {
4554 	struct mvpp2_cls_lookup_entry le;
4555 	uint32_t val;
4556 
4557 	/* set way for the port */
4558 	val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
4559 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
4560 	mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
4561 
4562 	/*
4563 	 * pick the entry to be accessed in lookup ID decoding table
4564 	 * according to the way and lkpid.
4565 	 */
4566 	le.lkpid = port->sc_id;
4567 	le.way = 0;
4568 	le.data = 0;
4569 
4570 	/* set initial CPU queue for receiving packets */
4571 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4572 	le.data |= (port->sc_id * 32);
4573 
4574 	/* disable classification engines */
4575 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4576 
4577 	/* update lookup ID table entry */
4578 	mvpp2_cls_lookup_write(port->sc, &le);
4579 }
4580 
4581 void
4582 mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4583 {
4584 	uint32_t val;
4585 
4586 	mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
4587 	    (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4588 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_P2HQ_REG(port->sc_id),
4589 	    (port->sc_id * 32) >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS);
4590 	val = mvpp2_read(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG);
4591 	val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->sc_id);
4592 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4593 }
4594