xref: /openbsd-src/sys/dev/fdt/if_mvpp.c (revision 824adb5411e4389b29bae28eba5c2c2bbd147f34)
1 /*	$OpenBSD: if_mvpp.c,v 1.48 2021/07/07 21:21:48 patrick Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Copyright (C) 2016 Marvell International Ltd.
20  *
21  * Marvell BSD License Option
22  *
23  * If you received this File from Marvell, you may opt to use, redistribute
24  * and/or modify this File under the following licensing terms.
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions are met:
27  *
28  *   * Redistributions of source code must retain the above copyright notice,
29  *     this list of conditions and the following disclaimer.
30  *
31  *   * Redistributions in binary form must reproduce the above copyright
32  *     notice, this list of conditions and the following disclaimer in the
33  *     documentation and/or other materials provided with the distribution.
34  *
35  *   * Neither the name of Marvell nor the names of its contributors may be
36  *     used to endorse or promote products derived from this software without
37  *     specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
43  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
49  * POSSIBILITY OF SUCH DAMAGE.
50  */
51 
52 #include "bpfilter.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/device.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/queue.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/timeout.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include <machine/cpufunc.h>
68 #include <machine/bus.h>
69 #include <machine/fdt.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/ppp_defs.h>
74 
75 #include <dev/ofw/openfirm.h>
76 #include <dev/ofw/ofw_clock.h>
77 #include <dev/ofw/ofw_gpio.h>
78 #include <dev/ofw/ofw_misc.h>
79 #include <dev/ofw/ofw_pinctrl.h>
80 #include <dev/ofw/ofw_regulator.h>
81 #include <dev/ofw/fdt.h>
82 
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89 
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93 
94 #include <netinet6/in6_var.h>
95 #include <netinet/ip6.h>
96 
97 #include <dev/fdt/if_mvppreg.h>
98 
99 struct mvpp2_buf {
100 	bus_dmamap_t		mb_map;
101 	struct mbuf		*mb_m;
102 };
103 
104 #define MVPP2_NTXDESC	512
105 #define MVPP2_NTXSEGS	16
106 #define MVPP2_NRXDESC	512
107 
108 struct mvpp2_bm_pool {
109 	struct mvpp2_dmamem	*bm_mem;
110 	struct mvpp2_buf	*rxbuf;
111 	uint32_t		*freelist;
112 	int			free_prod;
113 	int			free_cons;
114 };
115 
116 #define MVPP2_BM_SIZE		64
117 #define MVPP2_BM_POOL_PTR_ALIGN	128
118 #define MVPP2_BM_POOLS_NUM	8
119 #define MVPP2_BM_ALIGN		32
120 
121 struct mvpp2_tx_queue {
122 	uint8_t			id;
123 	uint8_t			log_id;
124 	struct mvpp2_dmamem	*ring;
125 	struct mvpp2_buf	*buf;
126 	struct mvpp2_tx_desc	*descs;
127 	int			prod;
128 	int			cons;
129 
130 	uint32_t		done_pkts_coal;
131 };
132 
133 struct mvpp2_rx_queue {
134 	uint8_t			id;
135 	struct mvpp2_dmamem	*ring;
136 	struct mvpp2_rx_desc	*descs;
137 	int			prod;
138 	struct if_rxring	rxring;
139 	int			cons;
140 
141 	uint32_t		pkts_coal;
142 	uint32_t		time_coal;
143 };
144 
145 struct mvpp2_dmamem {
146 	bus_dmamap_t		mdm_map;
147 	bus_dma_segment_t	mdm_seg;
148 	size_t			mdm_size;
149 	caddr_t			mdm_kva;
150 };
151 #define MVPP2_DMA_MAP(_mdm)	((_mdm)->mdm_map)
152 #define MVPP2_DMA_LEN(_mdm)	((_mdm)->mdm_size)
153 #define MVPP2_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
154 #define MVPP2_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
155 
156 struct mvpp2_port;
157 struct mvpp2_softc {
158 	struct device		sc_dev;
159 	int			sc_node;
160 	bus_space_tag_t		sc_iot;
161 	bus_space_handle_t	sc_ioh_base;
162 	bus_space_handle_t	sc_ioh_iface;
163 	paddr_t			sc_ioh_paddr;
164 	bus_size_t		sc_iosize_base;
165 	bus_size_t		sc_iosize_iface;
166 	bus_dma_tag_t		sc_dmat;
167 	struct regmap		*sc_rm;
168 
169 	uint32_t		sc_tclk;
170 
171 	struct mvpp2_bm_pool	*sc_bm_pools;
172 	int			sc_npools;
173 
174 	struct mvpp2_prs_shadow	*sc_prs_shadow;
175 	uint8_t			*sc_prs_double_vlans;
176 
177 	int			sc_aggr_ntxq;
178 	struct mvpp2_tx_queue	*sc_aggr_txqs;
179 
180 	struct mvpp2_port	**sc_ports;
181 };
182 
183 struct mvpp2_port {
184 	struct device		sc_dev;
185 	struct mvpp2_softc	*sc;
186 	int			sc_node;
187 	bus_dma_tag_t		sc_dmat;
188 	int			sc_id;
189 	int			sc_gop_id;
190 
191 	struct arpcom		sc_ac;
192 #define sc_lladdr	sc_ac.ac_enaddr
193 	struct mii_data		sc_mii;
194 #define sc_media	sc_mii.mii_media
195 	struct mii_bus		*sc_mdio;
196 
197 	enum {
198 		PHY_MODE_XAUI,
199 		PHY_MODE_10GBASER,
200 		PHY_MODE_2500BASEX,
201 		PHY_MODE_1000BASEX,
202 		PHY_MODE_SGMII,
203 		PHY_MODE_RGMII,
204 		PHY_MODE_RGMII_ID,
205 		PHY_MODE_RGMII_RXID,
206 		PHY_MODE_RGMII_TXID,
207 	}			sc_phy_mode;
208 	int			sc_fixed_link;
209 	int			sc_inband_status;
210 	int			sc_link;
211 	int			sc_phyloc;
212 	int			sc_sfp;
213 
214 	int			sc_ntxq;
215 	int			sc_nrxq;
216 
217 	struct mvpp2_tx_queue	*sc_txqs;
218 	struct mvpp2_rx_queue	*sc_rxqs;
219 
220 	struct timeout		sc_tick;
221 
222 	uint32_t		sc_tx_time_coal;
223 };
224 
225 #define MVPP2_MAX_PORTS		4
226 
227 struct mvpp2_attach_args {
228 	int			ma_node;
229 	bus_dma_tag_t		ma_dmat;
230 };
231 
232 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
233 
234 static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
235 
236 int	mvpp2_match(struct device *, void *, void *);
237 void	mvpp2_attach(struct device *, struct device *, void *);
238 void	mvpp2_attach_deferred(struct device *);
239 
240 struct cfattach mvppc_ca = {
241 	sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
242 };
243 
244 struct cfdriver mvppc_cd = {
245 	NULL, "mvppc", DV_DULL
246 };
247 
248 int	mvpp2_port_match(struct device *, void *, void *);
249 void	mvpp2_port_attach(struct device *, struct device *, void *);
250 
251 struct cfattach mvpp_ca = {
252 	sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
253 };
254 
255 struct cfdriver mvpp_cd = {
256 	NULL, "mvpp", DV_IFNET
257 };
258 
259 void	mvpp2_port_attach_sfp(struct device *);
260 
261 uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
262 void	mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
263 uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
264 void	mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
265 uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
266 void	mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
267 uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
268 void	mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
269 uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
270 void	mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
271 
272 int	mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
273 void	mvpp2_start(struct ifnet *);
274 int	mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
275 void	mvpp2_watchdog(struct ifnet *);
276 
277 int	mvpp2_media_change(struct ifnet *);
278 void	mvpp2_media_status(struct ifnet *, struct ifmediareq *);
279 
280 int	mvpp2_mii_readreg(struct device *, int, int);
281 void	mvpp2_mii_writereg(struct device *, int, int, int);
282 void	mvpp2_mii_statchg(struct device *);
283 void	mvpp2_inband_statchg(struct mvpp2_port *);
284 void	mvpp2_port_change(struct mvpp2_port *);
285 
286 void	mvpp2_tick(void *);
287 void	mvpp2_rxtick(void *);
288 
289 int	mvpp2_link_intr(void *);
290 int	mvpp2_intr(void *);
291 void	mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
292 void	mvpp2_txq_proc(struct mvpp2_port *, struct mvpp2_tx_queue *);
293 void	mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
294 void	mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
295 void	mvpp2_rx_refill(struct mvpp2_port *);
296 
297 void	mvpp2_up(struct mvpp2_port *);
298 void	mvpp2_down(struct mvpp2_port *);
299 void	mvpp2_iff(struct mvpp2_port *);
300 
301 void	mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
302 void	mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
303 void	mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
304 void	mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
305 void	mvpp2_rxq_hw_drop(struct mvpp2_port *, struct mvpp2_rx_queue *);
306 void	mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
307 void	mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
308 void	mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
309 
310 void	mvpp2_mac_reset_assert(struct mvpp2_port *);
311 void	mvpp2_pcs_reset_assert(struct mvpp2_port *);
312 void	mvpp2_pcs_reset_deassert(struct mvpp2_port *);
313 void	mvpp2_mac_config(struct mvpp2_port *);
314 void	mvpp2_xlg_config(struct mvpp2_port *);
315 void	mvpp2_gmac_config(struct mvpp2_port *);
316 void	mvpp2_comphy_config(struct mvpp2_port *, int);
317 void	mvpp2_gop_config(struct mvpp2_port *);
318 void	mvpp2_gop_intr_mask(struct mvpp2_port *);
319 void	mvpp2_gop_intr_unmask(struct mvpp2_port *);
320 
321 struct mvpp2_dmamem *
322 	mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
323 void	mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
324 struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
325 void	mvpp2_fill_rx_ring(struct mvpp2_softc *);
326 
327 void	mvpp2_interrupts_enable(struct mvpp2_port *, int);
328 void	mvpp2_interrupts_disable(struct mvpp2_port *, int);
329 int	mvpp2_egress_port(struct mvpp2_port *);
330 int	mvpp2_txq_phys(int, int);
331 void	mvpp2_defaults_set(struct mvpp2_port *);
332 void	mvpp2_ingress_enable(struct mvpp2_port *);
333 void	mvpp2_ingress_disable(struct mvpp2_port *);
334 void	mvpp2_egress_enable(struct mvpp2_port *);
335 void	mvpp2_egress_disable(struct mvpp2_port *);
336 void	mvpp2_port_enable(struct mvpp2_port *);
337 void	mvpp2_port_disable(struct mvpp2_port *);
338 void	mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
339 int	mvpp2_rxq_received(struct mvpp2_port *, int);
340 void	mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
341 void	mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
342 void	mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
343 	    uint32_t);
344 void	mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
345 	    uint32_t);
346 void	mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
347 	    uint32_t);
348 void	mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
349 
350 void	mvpp2_axi_config(struct mvpp2_softc *);
351 void	mvpp2_bm_pool_init(struct mvpp2_softc *);
352 void	mvpp2_rx_fifo_init(struct mvpp2_softc *);
353 void	mvpp2_tx_fifo_init(struct mvpp2_softc *);
354 int	mvpp2_prs_default_init(struct mvpp2_softc *);
355 void	mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
356 void	mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
357 void	mvpp2_prs_def_flow_init(struct mvpp2_softc *);
358 void	mvpp2_prs_mh_init(struct mvpp2_softc *);
359 void	mvpp2_prs_mac_init(struct mvpp2_softc *);
360 void	mvpp2_prs_dsa_init(struct mvpp2_softc *);
361 int	mvpp2_prs_etype_init(struct mvpp2_softc *);
362 int	mvpp2_prs_vlan_init(struct mvpp2_softc *);
363 int	mvpp2_prs_pppoe_init(struct mvpp2_softc *);
364 int	mvpp2_prs_ip6_init(struct mvpp2_softc *);
365 int	mvpp2_prs_ip4_init(struct mvpp2_softc *);
366 void	mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
367 	    uint32_t, uint32_t);
368 void	mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
369 void	mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
370 void	mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
371 uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
372 void	mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
373 	    uint8_t, uint8_t);
374 void	mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
375 	    uint8_t *, uint8_t *);
376 int	mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
377 void	mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
378 int	mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
379 int	mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
380 void	mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
381 	    uint32_t *, uint32_t *);
382 void	mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
383 int	mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
384 void	mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
385 void	mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
386 void	mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
387 void	mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
388 void	mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
389 void	mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
390 	    uint32_t);
391 void	mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
392 void	mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
393 int	mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
394 int	mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *, int);
395 int	mvpp2_prs_flow_find(struct mvpp2_softc *, int);
396 int	mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
397 void	mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
398 void	mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int, int);
399 void	mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
400 void	mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
401 	    int, int, int);
402 struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
403 	    int);
404 int	mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
405 int	mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
406 struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
407 	    uint16_t, uint16_t);
408 int	mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
409 	    uint32_t);
410 int	mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
411 int	mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
412 int	mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
413 int	mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
414 int	mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int, const uint8_t *,
415 	    uint8_t *, int);
416 int	mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
417 	    uint8_t *);
418 int	mvpp2_prs_mac_da_accept(struct mvpp2_port *, const uint8_t *, int);
419 void	mvpp2_prs_mac_del_all(struct mvpp2_port *);
420 int	mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
421 int	mvpp2_prs_def_flow(struct mvpp2_port *);
422 void	mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
423 void	mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
424 void	mvpp2_cls_init(struct mvpp2_softc *);
425 void	mvpp2_cls_port_config(struct mvpp2_port *);
426 void	mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
427 
428 int
429 mvpp2_match(struct device *parent, void *cfdata, void *aux)
430 {
431 	struct fdt_attach_args *faa = aux;
432 
433 	return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
434 }
435 
436 void
437 mvpp2_attach(struct device *parent, struct device *self, void *aux)
438 {
439 	struct mvpp2_softc *sc = (void *)self;
440 	struct fdt_attach_args *faa = aux;
441 
442 	if (faa->fa_nreg < 2) {
443 		printf(": no registers\n");
444 		return;
445 	}
446 
447 	sc->sc_node = faa->fa_node;
448 	sc->sc_iot = faa->fa_iot;
449 	sc->sc_dmat = faa->fa_dmat;
450 
451 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
452 	    faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
453 		printf(": can't map registers\n");
454 		return;
455 	}
456 	sc->sc_iosize_base = faa->fa_reg[0].size;
457 
458 	sc->sc_ioh_paddr = bus_space_mmap(sc->sc_iot, faa->fa_reg[0].addr,
459 	    0, PROT_READ | PROT_WRITE, 0);
460 	KASSERT(sc->sc_ioh_paddr != -1);
461 	sc->sc_ioh_paddr &= PMAP_PA_MASK;
462 
463 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
464 	    faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
465 		printf(": can't map registers\n");
466 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
467 		    sc->sc_iosize_base);
468 		return;
469 	}
470 	sc->sc_iosize_iface = faa->fa_reg[1].size;
471 
472 	sc->sc_rm = regmap_byphandle(OF_getpropint(faa->fa_node,
473 	    "marvell,system-controller", 0));
474 
475 	clock_enable_all(faa->fa_node);
476 	sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
477 
478 	printf("\n");
479 
480 	config_defer(self, mvpp2_attach_deferred);
481 }
482 
483 void
484 mvpp2_attach_deferred(struct device *self)
485 {
486 	struct mvpp2_softc *sc = (void *)self;
487 	struct mvpp2_attach_args maa;
488 	struct mvpp2_tx_queue *txq;
489 	int i, node;
490 
491 	mvpp2_axi_config(sc);
492 
493 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_iface, MVPP22_SMI_MISC_CFG_REG,
494 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh_iface,
495 	    MVPP22_SMI_MISC_CFG_REG) & ~MVPP22_SMI_POLLING_EN);
496 
497 	sc->sc_aggr_ntxq = 1;
498 	sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
499 	    sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
500 
501 	for (i = 0; i < sc->sc_aggr_ntxq; i++) {
502 		txq = &sc->sc_aggr_txqs[i];
503 		txq->id = i;
504 		mvpp2_aggr_txq_hw_init(sc, txq);
505 	}
506 
507 	mvpp2_rx_fifo_init(sc);
508 	mvpp2_tx_fifo_init(sc);
509 
510 	mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
511 
512 	mvpp2_bm_pool_init(sc);
513 
514 	sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
515 	    sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
516 
517 	mvpp2_prs_default_init(sc);
518 	mvpp2_cls_init(sc);
519 
520 	memset(&maa, 0, sizeof(maa));
521 	for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
522 		maa.ma_node = node;
523 		maa.ma_dmat = sc->sc_dmat;
524 		config_found(self, &maa, NULL);
525 	}
526 }
527 
528 void
529 mvpp2_axi_config(struct mvpp2_softc *sc)
530 {
531 	uint32_t reg;
532 
533 	mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG, 0);
534 
535 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
536 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
537 	mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
538 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
539 	mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
540 	mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
541 
542 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
543 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
544 	mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
545 	mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
546 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
547 	mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
548 
549 	reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
550 	    (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
551 	mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
552 	mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
553 
554 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
555 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
556 	mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
557 
558 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
559 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
560 	mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
561 }
562 
563 void
564 mvpp2_bm_pool_init(struct mvpp2_softc *sc)
565 {
566 	struct mvpp2_bm_pool *bm;
567 	struct mvpp2_buf *rxb;
568 	uint64_t phys, virt;
569 	int i, j, inuse;
570 
571 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
572 		mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
573 		mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
574 	}
575 
576 	sc->sc_npools = ncpus;
577 	sc->sc_npools = min(sc->sc_npools, MVPP2_BM_POOLS_NUM);
578 
579 	sc->sc_bm_pools = mallocarray(sc->sc_npools, sizeof(*sc->sc_bm_pools),
580 	    M_DEVBUF, M_WAITOK | M_ZERO);
581 
582 	for (i = 0; i < sc->sc_npools; i++) {
583 		bm = &sc->sc_bm_pools[i];
584 		bm->bm_mem = mvpp2_dmamem_alloc(sc,
585 		    MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
586 		    MVPP2_BM_POOL_PTR_ALIGN);
587 		KASSERT(bm->bm_mem != NULL);
588 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
589 		    MVPP2_DMA_LEN(bm->bm_mem),
590 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
591 
592 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
593 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
594 		    MVPP2_BM_STOP_MASK);
595 
596 		mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
597 		    (uint64_t)MVPP2_DMA_DVA(bm->bm_mem) & 0xffffffff);
598 		mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
599 		    ((uint64_t)MVPP2_DMA_DVA(bm->bm_mem) >> 32)
600 		    & MVPP22_BM_POOL_BASE_HIGH_MASK);
601 		mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
602 		    MVPP2_BM_SIZE);
603 
604 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
605 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
606 		    MVPP2_BM_START_MASK);
607 
608 		/*
609 		 * U-Boot might not have cleaned its pools.  The pool needs
610 		 * to be empty before we fill it, otherwise our packets are
611 		 * written to wherever U-Boot allocated memory.  Cleaning it
612 		 * up ourselves is worrying as well, since the BM's pages are
613 		 * probably in our own memory.  Best we can do is stop the BM,
614 		 * set new memory and drain the pool.
615 		 */
616 		inuse = mvpp2_read(sc, MVPP2_BM_POOL_PTRS_NUM_REG(i)) &
617 		    MVPP2_BM_POOL_PTRS_NUM_MASK;
618 		inuse += mvpp2_read(sc, MVPP2_BM_BPPI_PTRS_NUM_REG(i)) &
619 		    MVPP2_BM_BPPI_PTRS_NUM_MASK;
620 		if (inuse)
621 			inuse++;
622 		for (j = 0; j < inuse; j++)
623 			mvpp2_read(sc, MVPP2_BM_PHY_ALLOC_REG(i));
624 
625 		mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
626 		    roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
627 
628 		bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
629 		    M_DEVBUF, M_WAITOK);
630 		bm->freelist = mallocarray(MVPP2_BM_SIZE, sizeof(*bm->freelist),
631 		    M_DEVBUF, M_WAITOK | M_ZERO);
632 
633 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
634 			rxb = &bm->rxbuf[j];
635 			bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
636 			    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
637 			rxb->mb_m = NULL;
638 		}
639 
640 		/* Use pool-id and rxbuf index as cookie. */
641 		for (j = 0; j < MVPP2_BM_SIZE; j++)
642 			bm->freelist[j] = (i << 16) | (j << 0);
643 
644 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
645 			rxb = &bm->rxbuf[j];
646 			rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
647 			if (rxb->mb_m == NULL)
648 				break;
649 
650 			KASSERT(bm->freelist[bm->free_cons] != -1);
651 			virt = bm->freelist[bm->free_cons];
652 			bm->freelist[bm->free_cons] = -1;
653 			bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
654 
655 			phys = rxb->mb_map->dm_segs[0].ds_addr;
656 			mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
657 			    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
658 			    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
659 			    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
660 			mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
661 			    virt & 0xffffffff);
662 			mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
663 			    phys & 0xffffffff);
664 		}
665 	}
666 }
667 
668 void
669 mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
670 {
671 	int i;
672 
673 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
674 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
675 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
676 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
677 
678 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
679 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
680 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
681 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
682 
683 	for (i = 2; i < MVPP2_MAX_PORTS; i++) {
684 		mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
685 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
686 		mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
687 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
688 	}
689 
690 	mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
691 	mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
692 }
693 
694 void
695 mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
696 {
697 	int i;
698 
699 	mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
700 	    MVPP22_TX_FIFO_DATA_SIZE_10KB);
701 	mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
702 	    MVPP2_TX_FIFO_THRESHOLD_10KB);
703 
704 	for (i = 1; i < MVPP2_MAX_PORTS; i++) {
705 		mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
706 		    MVPP22_TX_FIFO_DATA_SIZE_3KB);
707 		mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
708 		    MVPP2_TX_FIFO_THRESHOLD_3KB);
709 	}
710 }
711 
712 int
713 mvpp2_prs_default_init(struct mvpp2_softc *sc)
714 {
715 	int i, j, ret;
716 
717 	mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
718 
719 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
720 		mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
721 		for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
722 			mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
723 
724 		mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
725 		for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
726 			mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
727 	}
728 
729 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
730 		mvpp2_prs_hw_inv(sc, i);
731 
732 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
733 		mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
734 		    MVPP2_PRS_PORT_LU_MAX, 0);
735 
736 	mvpp2_prs_def_flow_init(sc);
737 	mvpp2_prs_mh_init(sc);
738 	mvpp2_prs_mac_init(sc);
739 	mvpp2_prs_dsa_init(sc);
740 	ret = mvpp2_prs_etype_init(sc);
741 	if (ret)
742 		return ret;
743 	ret = mvpp2_prs_vlan_init(sc);
744 	if (ret)
745 		return ret;
746 	ret = mvpp2_prs_pppoe_init(sc);
747 	if (ret)
748 		return ret;
749 	ret = mvpp2_prs_ip6_init(sc);
750 	if (ret)
751 		return ret;
752 	ret = mvpp2_prs_ip4_init(sc);
753 	if (ret)
754 		return ret;
755 
756 	return 0;
757 }
758 
759 void
760 mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
761 {
762 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
763 	mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
764 	    MVPP2_PRS_TCAM_INV_MASK);
765 }
766 
767 void
768 mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
769     int lu_first, int lu_max, int offset)
770 {
771 	uint32_t reg;
772 
773 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
774 	reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
775 	reg |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
776 	mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
777 
778 	reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
779 	reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
780 	reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
781 	mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
782 
783 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
784 	reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
785 	reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
786 	mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
787 }
788 
789 void
790 mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
791 {
792 	struct mvpp2_prs_entry pe;
793 	int i;
794 
795 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
796 		memset(&pe, 0, sizeof(pe));
797 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
798 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
799 		mvpp2_prs_tcam_port_map_set(&pe, 0);
800 		mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
801 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
802 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
803 		mvpp2_prs_hw_write(sc, &pe);
804 	}
805 }
806 
807 void
808 mvpp2_prs_mh_init(struct mvpp2_softc *sc)
809 {
810 	struct mvpp2_prs_entry pe;
811 
812 	memset(&pe, 0, sizeof(pe));
813 	pe.index = MVPP2_PE_MH_DEFAULT;
814 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
815 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
816 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
817 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
818 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
819 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
820 	mvpp2_prs_hw_write(sc, &pe);
821 }
822 
823 void
824 mvpp2_prs_mac_init(struct mvpp2_softc *sc)
825 {
826 	struct mvpp2_prs_entry pe;
827 
828 	memset(&pe, 0, sizeof(pe));
829 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
830 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
831 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
832 	    MVPP2_PRS_RI_DROP_MASK);
833 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
834 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
835 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
836 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
837 	mvpp2_prs_hw_write(sc, &pe);
838 	mvpp2_prs_mac_drop_all_set(sc, 0, 0);
839 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_UNI_CAST, 0);
840 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_MULTI_CAST, 0);
841 }
842 
843 void
844 mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
845 {
846 	struct mvpp2_prs_entry pe;
847 
848 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
849 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
850 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
851 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
852 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
853 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
854 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
855 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
856 	memset(&pe, 0, sizeof(pe));
857 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
858 	pe.index = MVPP2_PE_DSA_DEFAULT;
859 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
860 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
861 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
862 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
863 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
864 	mvpp2_prs_hw_write(sc, &pe);
865 }
866 
867 int
868 mvpp2_prs_etype_init(struct mvpp2_softc *sc)
869 {
870 	struct mvpp2_prs_entry pe;
871 	int tid;
872 
873 	/* Ethertype: PPPoE */
874 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
875 	    MVPP2_PE_LAST_FREE_TID);
876 	if (tid < 0)
877 		return tid;
878 	memset(&pe, 0, sizeof(pe));
879 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
880 	pe.index = tid;
881 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_PPPOE);
882 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
883 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
884 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
885 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
886 	    MVPP2_PRS_RI_PPPOE_MASK);
887 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
888 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
889 	sc->sc_prs_shadow[pe.index].finish = 0;
890 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
891 	    MVPP2_PRS_RI_PPPOE_MASK);
892 	mvpp2_prs_hw_write(sc, &pe);
893 
894 	/* Ethertype: ARP */
895 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
896 	    MVPP2_PE_LAST_FREE_TID);
897 	if (tid < 0)
898 		return tid;
899 	memset(&pe, 0, sizeof(pe));
900 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
901 	pe.index = tid;
902 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_ARP);
903 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
904 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
905 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
906 	    MVPP2_PRS_RI_L3_PROTO_MASK);
907 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
908 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
909 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
910 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
911 	sc->sc_prs_shadow[pe.index].finish = 1;
912 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
913 	    MVPP2_PRS_RI_L3_PROTO_MASK);
914 	mvpp2_prs_hw_write(sc, &pe);
915 
916 	/* Ethertype: LBTD */
917 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
918 	    MVPP2_PE_LAST_FREE_TID);
919 	if (tid < 0)
920 		return tid;
921 	memset(&pe, 0, sizeof(pe));
922 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
923 	pe.index = tid;
924 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
925 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
926 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
927 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
928 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
929 	    MVPP2_PRS_RI_UDF3_MASK);
930 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
931 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
932 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
933 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
934 	sc->sc_prs_shadow[pe.index].finish = 1;
935 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
936 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
937 	    MVPP2_PRS_RI_UDF3_MASK);
938 	mvpp2_prs_hw_write(sc, &pe);
939 
940 	/* Ethertype: IPv4 without options */
941 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
942 	    MVPP2_PE_LAST_FREE_TID);
943 	if (tid < 0)
944 		return tid;
945 	memset(&pe, 0, sizeof(pe));
946 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
947 	pe.index = tid;
948 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IP);
949 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
950 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
951 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
952 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
953 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
954 	    MVPP2_PRS_RI_L3_PROTO_MASK);
955 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
956 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
957 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
958 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
959 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
960 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
961 	sc->sc_prs_shadow[pe.index].finish = 0;
962 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
963 	    MVPP2_PRS_RI_L3_PROTO_MASK);
964 	mvpp2_prs_hw_write(sc, &pe);
965 
966 	/* Ethertype: IPv4 with options */
967 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
968 	    MVPP2_PE_LAST_FREE_TID);
969 	if (tid < 0)
970 		return tid;
971 	pe.index = tid;
972 
973 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
974 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
975 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
976 	    MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
977 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
978 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
979 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
980 	    MVPP2_PRS_RI_L3_PROTO_MASK);
981 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
982 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
983 	sc->sc_prs_shadow[pe.index].finish = 0;
984 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
985 	    MVPP2_PRS_RI_L3_PROTO_MASK);
986 	mvpp2_prs_hw_write(sc, &pe);
987 
988 	/* Ethertype: IPv6 without options */
989 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
990 	    MVPP2_PE_LAST_FREE_TID);
991 	if (tid < 0)
992 		return tid;
993 	memset(&pe, 0, sizeof(pe));
994 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
995 	pe.index = tid;
996 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IPV6);
997 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
998 	    MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
999 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1000 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1001 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1002 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1003 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1004 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1005 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1006 	sc->sc_prs_shadow[pe.index].finish = 0;
1007 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
1008 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1009 	mvpp2_prs_hw_write(sc, &pe);
1010 
1011 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1012 	memset(&pe, 0, sizeof(pe));
1013 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1014 	pe.index = MVPP2_PE_ETH_TYPE_UN;
1015 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1016 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1017 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1018 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1019 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1020 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1021 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1022 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1023 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1024 	sc->sc_prs_shadow[pe.index].finish = 1;
1025 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
1026 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1027 	mvpp2_prs_hw_write(sc, &pe);
1028 
1029 	return 0;
1030 }
1031 
1032 int
1033 mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
1034 {
1035 	struct mvpp2_prs_entry pe;
1036 	int ret;
1037 
1038 	sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
1039 	    sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
1040 
1041 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_QINQ,
1042 	    MVPP2_PRS_PORT_MASK);
1043 	if (ret)
1044 		return ret;
1045 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_VLAN,
1046 	    MVPP2_PRS_PORT_MASK);
1047 	if (ret)
1048 		return ret;
1049 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_QINQ, MVPP2_PRS_SINGLE_VLAN_AI,
1050 	    MVPP2_PRS_PORT_MASK);
1051 	if (ret)
1052 		return ret;
1053 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_VLAN, MVPP2_PRS_SINGLE_VLAN_AI,
1054 	    MVPP2_PRS_PORT_MASK);
1055 	if (ret)
1056 		return ret;
1057 
1058 	memset(&pe, 0, sizeof(pe));
1059 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1060 	pe.index = MVPP2_PE_VLAN_DBL;
1061 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1062 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1063 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1064 	    MVPP2_PRS_RI_VLAN_MASK);
1065 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1066 	    MVPP2_PRS_DBL_VLAN_AI_BIT);
1067 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1068 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1069 	mvpp2_prs_hw_write(sc, &pe);
1070 
1071 	memset(&pe, 0, sizeof(pe));
1072 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1073 	pe.index = MVPP2_PE_VLAN_NONE;
1074 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1075 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1076 	    MVPP2_PRS_RI_VLAN_MASK);
1077 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1078 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1079 	mvpp2_prs_hw_write(sc, &pe);
1080 
1081 	return 0;
1082 }
1083 
1084 int
1085 mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
1086 {
1087 	struct mvpp2_prs_entry pe;
1088 	int tid;
1089 
1090 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1091 	    MVPP2_PE_LAST_FREE_TID);
1092 	if (tid < 0)
1093 		return tid;
1094 
1095 	memset(&pe, 0, sizeof(pe));
1096 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1097 	pe.index = tid;
1098 	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1099 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1100 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1101 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1102 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1103 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1104 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1105 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1106 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1107 	mvpp2_prs_hw_write(sc, &pe);
1108 
1109 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1110 	    MVPP2_PE_LAST_FREE_TID);
1111 	if (tid < 0)
1112 		return tid;
1113 
1114 	pe.index = tid;
1115 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1116 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1117 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
1118 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1119 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1120 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
1121 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1122 	mvpp2_prs_hw_write(sc, &pe);
1123 
1124 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1125 	    MVPP2_PE_LAST_FREE_TID);
1126 	if (tid < 0)
1127 		return tid;
1128 
1129 	memset(&pe, 0, sizeof(pe));
1130 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1131 	pe.index = tid;
1132 	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1133 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1134 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1135 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1136 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1137 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1138 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1139 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1140 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1141 	mvpp2_prs_hw_write(sc, &pe);
1142 
1143 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1144 	    MVPP2_PE_LAST_FREE_TID);
1145 	if (tid < 0)
1146 		return tid;
1147 
1148 	memset(&pe, 0, sizeof(pe));
1149 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1150 	pe.index = tid;
1151 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1152 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1153 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1154 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1155 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1156 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1157 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1158 	mvpp2_prs_hw_write(sc, &pe);
1159 
1160 	return 0;
1161 }
1162 
1163 int
1164 mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
1165 {
1166 	struct mvpp2_prs_entry pe;
1167 	int tid, ret;
1168 
1169 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1170 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1171 	if (ret)
1172 		return ret;
1173 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1174 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1175 	if (ret)
1176 		return ret;
1177 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_ICMPV6,
1178 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1179 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1180 	if (ret)
1181 		return ret;
1182 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
1183 	    MVPP2_PRS_RI_UDF7_MASK);
1184 	if (ret)
1185 		return ret;
1186 	ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1187 	if (ret)
1188 		return ret;
1189 
1190 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1191 	    MVPP2_PE_LAST_FREE_TID);
1192 	if (tid < 0)
1193 		return tid;
1194 
1195 	memset(&pe, 0, sizeof(pe));
1196 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1197 	pe.index = tid;
1198 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1199 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1200 	mvpp2_prs_sram_ri_update(&pe,
1201 	    MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
1202 	    MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
1203 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1204 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1205 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1206 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1207 	mvpp2_prs_hw_write(sc, &pe);
1208 
1209 	memset(&pe, 0, sizeof(pe));
1210 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1211 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1212 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1213 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1214 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1215 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1216 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1217 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1218 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1219 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1220 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1221 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1222 	mvpp2_prs_hw_write(sc, &pe);
1223 
1224 	memset(&pe, 0, sizeof(pe));
1225 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1226 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1227 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1228 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1229 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1230 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1231 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1232 	    MVPP2_PRS_IPV6_EXT_AI_BIT);
1233 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1234 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1235 	mvpp2_prs_hw_write(sc, &pe);
1236 
1237 	memset(&pe, 0, sizeof(pe));
1238 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1239 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1240 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1241 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1242 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1243 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1244 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1245 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1246 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1247 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1248 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
1249 	mvpp2_prs_hw_write(sc, &pe);
1250 
1251 	return 0;
1252 }
1253 
1254 int
1255 mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
1256 {
1257 	struct mvpp2_prs_entry pe;
1258 	int ret;
1259 
1260 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1261 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1262 	if (ret)
1263 		return ret;
1264 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1265 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1266 	if (ret)
1267 		return ret;
1268 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_IGMP,
1269 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1270 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1271 	if (ret)
1272 		return ret;
1273 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
1274 	if (ret)
1275 		return ret;
1276 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1277 	if (ret)
1278 		return ret;
1279 
1280 	memset(&pe, 0, sizeof(pe));
1281 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1282 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1283 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1284 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1285 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1286 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1287 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1288 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1289 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1290 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1291 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1292 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1293 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1294 	mvpp2_prs_hw_write(sc, &pe);
1295 
1296 	memset(&pe, 0, sizeof(pe));
1297 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1298 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1299 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1300 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1301 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1302 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1303 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1304 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1305 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1306 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1307 	mvpp2_prs_hw_write(sc, &pe);
1308 
1309 	return 0;
1310 }
1311 
1312 int
1313 mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
1314 {
1315 	struct mvpp2_attach_args *maa = aux;
1316 	char buf[32];
1317 
1318 	if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
1319 	    strcmp(buf, "disabled") == 0)
1320 		return 0;
1321 
1322 	return 1;
1323 }
1324 
1325 void
1326 mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
1327 {
1328 	struct mvpp2_port *sc = (void *)self;
1329 	struct mvpp2_attach_args *maa = aux;
1330 	struct mvpp2_tx_queue *txq;
1331 	struct mvpp2_rx_queue *rxq;
1332 	struct ifnet *ifp;
1333 	uint32_t phy, reg;
1334 	int i, idx, len, node;
1335 	int mii_flags = 0;
1336 	char *phy_mode;
1337 	char *managed;
1338 
1339 	sc->sc = (void *)parent;
1340 	sc->sc_node = maa->ma_node;
1341 	sc->sc_dmat = maa->ma_dmat;
1342 
1343 	sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
1344 	sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
1345 	sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
1346 
1347 	len = OF_getproplen(sc->sc_node, "phy-mode");
1348 	if (len <= 0) {
1349 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
1350 		return;
1351 	}
1352 
1353 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
1354 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
1355 	if (!strncmp(phy_mode, "10gbase-r", strlen("10gbase-r")))
1356 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1357 	else if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
1358 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1359 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
1360 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1361 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
1362 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1363 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
1364 		sc->sc_phy_mode = PHY_MODE_SGMII;
1365 	else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
1366 		sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
1367 	else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
1368 		sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
1369 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
1370 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
1371 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
1372 		sc->sc_phy_mode = PHY_MODE_RGMII;
1373 	else {
1374 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
1375 		    phy_mode);
1376 		return;
1377 	}
1378 	free(phy_mode, M_TEMP, len);
1379 
1380 	/* Lookup PHY. */
1381 	phy = OF_getpropint(sc->sc_node, "phy", 0);
1382 	if (phy) {
1383 		node = OF_getnodebyphandle(phy);
1384 		if (!node) {
1385 			printf(": no phy\n");
1386 			return;
1387 		}
1388 		sc->sc_mdio = mii_byphandle(phy);
1389 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
1390 		sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
1391 	}
1392 
1393 	if (sc->sc_sfp)
1394 		config_mountroot(self, mvpp2_port_attach_sfp);
1395 
1396 	if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
1397 		managed = malloc(len, M_TEMP, M_WAITOK);
1398 		OF_getprop(sc->sc_node, "managed", managed, len);
1399 		if (!strncmp(managed, "in-band-status",
1400 		    strlen("in-band-status")))
1401 			sc->sc_inband_status = 1;
1402 		free(managed, M_TEMP, len);
1403 	}
1404 
1405 	if (OF_getprop(sc->sc_node, "local-mac-address",
1406 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
1407 		memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
1408 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
1409 
1410 	sc->sc_ntxq = sc->sc_nrxq = 1;
1411 	sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
1412 	    M_DEVBUF, M_WAITOK | M_ZERO);
1413 	sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
1414 	    M_DEVBUF, M_WAITOK | M_ZERO);
1415 
1416 	for (i = 0; i < sc->sc_ntxq; i++) {
1417 		txq = &sc->sc_txqs[i];
1418 		txq->id = mvpp2_txq_phys(sc->sc_id, i);
1419 		txq->log_id = i;
1420 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
1421 	}
1422 
1423 	sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
1424 
1425 	for (i = 0; i < sc->sc_nrxq; i++) {
1426 		rxq = &sc->sc_rxqs[i];
1427 		rxq->id = sc->sc_id * 32 + i;
1428 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
1429 		rxq->time_coal = MVPP2_RX_COAL_USEC;
1430 	}
1431 
1432 	mvpp2_egress_disable(sc);
1433 	mvpp2_port_disable(sc);
1434 
1435 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
1436 	    sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
1437 	    0 /* queue vector id */);
1438 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
1439 	    sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
1440 	    0 /* first rxq */);
1441 
1442 	mvpp2_ingress_disable(sc);
1443 	mvpp2_defaults_set(sc);
1444 
1445 	mvpp2_cls_oversize_rxq_set(sc);
1446 	mvpp2_cls_port_config(sc);
1447 
1448 	/*
1449 	 * We have one pool per core, so all RX queues on a specific
1450 	 * core share that pool.  Also long and short uses the same
1451 	 * pool.
1452 	 */
1453 	for (i = 0; i < sc->sc_nrxq; i++) {
1454 		mvpp2_rxq_long_pool_set(sc, i, i);
1455 		mvpp2_rxq_short_pool_set(sc, i, i);
1456 	}
1457 
1458 	mvpp2_mac_reset_assert(sc);
1459 	mvpp2_pcs_reset_assert(sc);
1460 
1461 	timeout_set(&sc->sc_tick, mvpp2_tick, sc);
1462 
1463 	ifp = &sc->sc_ac.ac_if;
1464 	ifp->if_softc = sc;
1465 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1466 	ifp->if_ioctl = mvpp2_ioctl;
1467 	ifp->if_start = mvpp2_start;
1468 	ifp->if_watchdog = mvpp2_watchdog;
1469 	ifq_set_maxlen(&ifp->if_snd, MVPP2_NTXDESC - 1);
1470 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1471 
1472 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1473 
1474 	sc->sc_mii.mii_ifp = ifp;
1475 	sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
1476 	sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
1477 	sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
1478 
1479 	ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
1480 
1481 	if (sc->sc_mdio) {
1482 		switch (sc->sc_phy_mode) {
1483 		case PHY_MODE_1000BASEX:
1484 			mii_flags |= MIIF_IS_1000X;
1485 			break;
1486 		case PHY_MODE_SGMII:
1487 			mii_flags |= MIIF_SGMII;
1488 			break;
1489 		case PHY_MODE_RGMII_ID:
1490 			mii_flags |= MIIF_RXID | MIIF_TXID;
1491 			break;
1492 		case PHY_MODE_RGMII_RXID:
1493 			mii_flags |= MIIF_RXID;
1494 			break;
1495 		case PHY_MODE_RGMII_TXID:
1496 			mii_flags |= MIIF_TXID;
1497 			break;
1498 		default:
1499 			break;
1500 		}
1501 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
1502 		    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY,
1503 		    mii_flags);
1504 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1505 			printf("%s: no PHY found!\n", self->dv_xname);
1506 			ifmedia_add(&sc->sc_mii.mii_media,
1507 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
1508 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1509 		} else
1510 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1511 	} else {
1512 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1513 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1514 
1515 		if (sc->sc_inband_status) {
1516 			switch (sc->sc_phy_mode) {
1517 			case PHY_MODE_1000BASEX:
1518 				sc->sc_mii.mii_media_active =
1519 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
1520 				break;
1521 			case PHY_MODE_2500BASEX:
1522 				sc->sc_mii.mii_media_active =
1523 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
1524 				break;
1525 			case PHY_MODE_10GBASER:
1526 				sc->sc_mii.mii_media_active =
1527 				    IFM_ETHER|IFM_10G_KR|IFM_FDX;
1528 				break;
1529 			default:
1530 				break;
1531 			}
1532 			mvpp2_inband_statchg(sc);
1533 		} else {
1534 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
1535 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1536 			mvpp2_mii_statchg(self);
1537 		}
1538 
1539 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
1540 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1541 	}
1542 
1543 	if_attach(ifp);
1544 	ether_ifattach(ifp);
1545 
1546 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1547 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1548 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1549 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1550 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1551 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1552 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1553 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1554 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1555 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1556 	}
1557 
1558 	if (sc->sc_gop_id == 0) {
1559 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
1560 		reg |= MV_XLG_INTERRUPT_LINK_CHANGE;
1561 		mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
1562 	}
1563 
1564 	mvpp2_gop_intr_unmask(sc);
1565 
1566 	idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
1567 	if (idx >= 0)
1568 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1569 		    mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
1570 	idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
1571 	if (idx < 0)
1572 		idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
1573 	if (idx >= 0)
1574 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1575 		    mvpp2_intr, sc, sc->sc_dev.dv_xname);
1576 }
1577 
1578 void
1579 mvpp2_port_attach_sfp(struct device *self)
1580 {
1581 	struct mvpp2_port *sc = (struct mvpp2_port *)self;
1582 	uint32_t reg;
1583 
1584 	rw_enter(&mvpp2_sff_lock, RW_WRITE);
1585 	sfp_disable(sc->sc_sfp);
1586 	sfp_add_media(sc->sc_sfp, &sc->sc_mii);
1587 	rw_exit(&mvpp2_sff_lock);
1588 
1589 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1590 	case IFM_10G_SR:
1591 	case IFM_10G_LR:
1592 	case IFM_10G_LRM:
1593 	case IFM_10G_ER:
1594 	case IFM_10G_SFP_CU:
1595 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1596 		sc->sc_mii.mii_media_status = IFM_AVALID;
1597 		sc->sc_inband_status = 1;
1598 		break;
1599 	case IFM_2500_SX:
1600 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1601 		sc->sc_mii.mii_media_status = IFM_AVALID;
1602 		sc->sc_inband_status = 1;
1603 		break;
1604 	case IFM_1000_CX:
1605 	case IFM_1000_LX:
1606 	case IFM_1000_SX:
1607 	case IFM_1000_T:
1608 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1609 		sc->sc_mii.mii_media_status = IFM_AVALID;
1610 		sc->sc_inband_status = 1;
1611 		break;
1612 	}
1613 
1614 	if (sc->sc_inband_status) {
1615 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1616 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1617 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1618 	}
1619 }
1620 
1621 uint32_t
1622 mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
1623 {
1624 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
1625 }
1626 
1627 void
1628 mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
1629 {
1630 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
1631 }
1632 
1633 uint32_t
1634 mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
1635 {
1636 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1637 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
1638 }
1639 
1640 void
1641 mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1642 {
1643 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1644 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
1645 	    data);
1646 }
1647 
1648 uint32_t
1649 mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
1650 {
1651 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1652 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
1653 }
1654 
1655 void
1656 mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1657 {
1658 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1659 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
1660 	    data);
1661 }
1662 
1663 uint32_t
1664 mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1665 {
1666 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1667 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
1668 }
1669 
1670 void
1671 mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1672 {
1673 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1674 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
1675 	    data);
1676 }
1677 
1678 uint32_t
1679 mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1680 {
1681 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1682 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
1683 }
1684 
1685 void
1686 mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1687 {
1688 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1689 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
1690 	    data);
1691 }
1692 
1693 static inline int
1694 mvpp2_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
1695 {
1696 	int error;
1697 
1698 	error = bus_dmamap_load_mbuf(dmat, map, m, BUS_DMA_NOWAIT);
1699 	if (error != EFBIG)
1700 		return (error);
1701 
1702 	error = m_defrag(m, M_DONTWAIT);
1703 	if (error != 0)
1704 		return (error);
1705 
1706 	return bus_dmamap_load_mbuf(dmat, map, m, BUS_DMA_NOWAIT);
1707 }
1708 
1709 void
1710 mvpp2_start(struct ifnet *ifp)
1711 {
1712 	struct mvpp2_port *sc = ifp->if_softc;
1713 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1714 	struct mvpp2_tx_desc *txd;
1715 	struct mbuf *m;
1716 	bus_dmamap_t map;
1717 	uint32_t command;
1718 	int i, current, first, last;
1719 	int free, prod, used;
1720 
1721 	if (!(ifp->if_flags & IFF_RUNNING))
1722 		return;
1723 	if (ifq_is_oactive(&ifp->if_snd))
1724 		return;
1725 	if (ifq_empty(&ifp->if_snd))
1726 		return;
1727 	if (!sc->sc_link)
1728 		return;
1729 
1730 	used = 0;
1731 	prod = txq->prod;
1732 	free = txq->cons;
1733 	if (free <= prod)
1734 		free += MVPP2_AGGR_TXQ_SIZE;
1735 	free -= prod;
1736 
1737 	for (;;) {
1738 		if (free <= MVPP2_NTXSEGS) {
1739 			ifq_set_oactive(&ifp->if_snd);
1740 			break;
1741 		}
1742 
1743 		m = ifq_dequeue(&ifp->if_snd);
1744 		if (m == NULL)
1745 			break;
1746 
1747 		first = last = current = prod;
1748 		map = txq->buf[current].mb_map;
1749 
1750 		if (mvpp2_load_mbuf(sc->sc_dmat, map, m) != 0) {
1751 			ifp->if_oerrors++;
1752 			m_freem(m);
1753 			continue;
1754 		}
1755 
1756 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1757 		    BUS_DMASYNC_PREWRITE);
1758 
1759 		command = MVPP2_TXD_L4_CSUM_NOT |
1760 		    MVPP2_TXD_IP_CSUM_DISABLE;
1761 		for (i = 0; i < map->dm_nsegs; i++) {
1762 			txd = &txq->descs[current];
1763 			memset(txd, 0, sizeof(*txd));
1764 			txd->buf_phys_addr_hw_cmd2 =
1765 			    map->dm_segs[i].ds_addr & ~0x1f;
1766 			txd->packet_offset =
1767 			    map->dm_segs[i].ds_addr & 0x1f;
1768 			txd->data_size = map->dm_segs[i].ds_len;
1769 			txd->phys_txq = sc->sc_txqs[0].id;
1770 			txd->command = command |
1771 			    MVPP2_TXD_PADDING_DISABLE;
1772 			if (i == 0)
1773 				txd->command |= MVPP2_TXD_F_DESC;
1774 			if (i == (map->dm_nsegs - 1))
1775 				txd->command |= MVPP2_TXD_L_DESC;
1776 
1777 			bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
1778 			    current * sizeof(*txd), sizeof(*txd),
1779 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1780 
1781 			last = current;
1782 			current = (current + 1) % MVPP2_AGGR_TXQ_SIZE;
1783 			KASSERT(current != txq->cons);
1784 		}
1785 
1786 		KASSERT(txq->buf[last].mb_m == NULL);
1787 		txq->buf[first].mb_map = txq->buf[last].mb_map;
1788 		txq->buf[last].mb_map = map;
1789 		txq->buf[last].mb_m = m;
1790 
1791 #if NBPFILTER > 0
1792 		if (ifp->if_bpf)
1793 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1794 #endif
1795 
1796 		free -= map->dm_nsegs;
1797 		used += map->dm_nsegs;
1798 		prod = current;
1799 	}
1800 
1801 	if (used)
1802 		mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, used);
1803 
1804 	if (txq->prod != prod)
1805 		txq->prod = prod;
1806 }
1807 
1808 int
1809 mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1810 {
1811 	struct mvpp2_port *sc = ifp->if_softc;
1812 	struct ifreq *ifr = (struct ifreq *)addr;
1813 	int error = 0, s;
1814 
1815 	s = splnet();
1816 
1817 	switch (cmd) {
1818 	case SIOCSIFADDR:
1819 		ifp->if_flags |= IFF_UP;
1820 		/* FALLTHROUGH */
1821 	case SIOCSIFFLAGS:
1822 		if (ifp->if_flags & IFF_UP) {
1823 			if (ifp->if_flags & IFF_RUNNING)
1824 				error = ENETRESET;
1825 			else
1826 				mvpp2_up(sc);
1827 		} else {
1828 			if (ifp->if_flags & IFF_RUNNING)
1829 				mvpp2_down(sc);
1830 		}
1831 		break;
1832 
1833 	case SIOCGIFMEDIA:
1834 	case SIOCSIFMEDIA:
1835 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1836 		break;
1837 
1838 	case SIOCGIFRXR:
1839 		error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1840 		break;
1841 
1842 	case SIOCGIFSFFPAGE:
1843 		error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
1844 		if (error != 0)
1845 			break;
1846 
1847 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1848 		rw_exit(&mvpp2_sff_lock);
1849 		break;
1850 
1851 	default:
1852 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1853 		break;
1854 	}
1855 
1856 	if (error == ENETRESET) {
1857 		if (ifp->if_flags & IFF_RUNNING)
1858 			mvpp2_iff(sc);
1859 		error = 0;
1860 	}
1861 
1862 	splx(s);
1863 	return (error);
1864 }
1865 
1866 int
1867 mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
1868 {
1869 	struct mvpp2_rx_queue *rxq;
1870 	struct if_rxring_info *ifrs, *ifr;
1871 	unsigned int i;
1872 	int error;
1873 
1874 	ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
1875 	    M_WAITOK|M_ZERO|M_CANFAIL);
1876 	if (ifrs == NULL)
1877 		return (ENOMEM);
1878 
1879 	for (i = 0; i < sc->sc_nrxq; i++) {
1880 		rxq = &sc->sc_rxqs[i];
1881 		ifr = &ifrs[i];
1882 
1883 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
1884 		ifr->ifr_size = MCLBYTES;
1885 		ifr->ifr_info = rxq->rxring;
1886 	}
1887 
1888 	error = if_rxr_info_ioctl(ifri, i, ifrs);
1889 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
1890 
1891 	return (error);
1892 }
1893 
1894 void
1895 mvpp2_watchdog(struct ifnet *ifp)
1896 {
1897 	printf("%s\n", __func__);
1898 }
1899 
1900 int
1901 mvpp2_media_change(struct ifnet *ifp)
1902 {
1903 	struct mvpp2_port *sc = ifp->if_softc;
1904 
1905 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1906 		mii_mediachg(&sc->sc_mii);
1907 
1908 	return (0);
1909 }
1910 
1911 void
1912 mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1913 {
1914 	struct mvpp2_port *sc = ifp->if_softc;
1915 
1916 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1917 		mii_pollstat(&sc->sc_mii);
1918 
1919 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1920 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1921 }
1922 
1923 int
1924 mvpp2_mii_readreg(struct device *self, int phy, int reg)
1925 {
1926 	struct mvpp2_port *sc = (void *)self;
1927 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
1928 }
1929 
1930 void
1931 mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
1932 {
1933 	struct mvpp2_port *sc = (void *)self;
1934 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
1935 }
1936 
1937 void
1938 mvpp2_mii_statchg(struct device *self)
1939 {
1940 	struct mvpp2_port *sc = (void *)self;
1941 	mvpp2_port_change(sc);
1942 }
1943 
1944 void
1945 mvpp2_inband_statchg(struct mvpp2_port *sc)
1946 {
1947 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
1948 	uint32_t reg;
1949 
1950 	sc->sc_mii.mii_media_status = IFM_AVALID;
1951 	sc->sc_mii.mii_media_active = IFM_ETHER;
1952 
1953 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1954 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1955 		reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
1956 		if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS)
1957 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1958 		sc->sc_mii.mii_media_active |= IFM_FDX;
1959 		sc->sc_mii.mii_media_active |= subtype;
1960 	} else {
1961 		reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
1962 		if (reg & MVPP2_PORT_STATUS0_LINKUP)
1963 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1964 		if (reg & MVPP2_PORT_STATUS0_FULLDX)
1965 			sc->sc_mii.mii_media_active |= IFM_FDX;
1966 		if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
1967 			sc->sc_mii.mii_media_active |= subtype;
1968 		else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
1969 			sc->sc_mii.mii_media_active |= subtype;
1970 		else if (reg & MVPP2_PORT_STATUS0_GMIISPEED)
1971 			sc->sc_mii.mii_media_active |= IFM_1000_T;
1972 		else if (reg & MVPP2_PORT_STATUS0_MIISPEED)
1973 			sc->sc_mii.mii_media_active |= IFM_100_TX;
1974 		else
1975 			sc->sc_mii.mii_media_active |= IFM_10_T;
1976 	}
1977 
1978 	mvpp2_port_change(sc);
1979 }
1980 
1981 void
1982 mvpp2_port_change(struct mvpp2_port *sc)
1983 {
1984 	uint32_t reg;
1985 
1986 	sc->sc_link = !!(sc->sc_mii.mii_media_status & IFM_ACTIVE);
1987 
1988 	if (sc->sc_inband_status)
1989 		return;
1990 
1991 	if (sc->sc_link) {
1992 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1993 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1994 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1995 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
1996 			reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS;
1997 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1998 		} else {
1999 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2000 			reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2001 			reg |= MVPP2_GMAC_FORCE_LINK_PASS;
2002 			reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2003 			reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2004 			reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2005 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_KX ||
2006 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
2007 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_CX ||
2008 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_LX ||
2009 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_KX ||
2010 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
2011 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
2012 				reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2013 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
2014 				reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
2015 			if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
2016 				reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2017 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2018 		}
2019 	} else {
2020 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2021 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
2022 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2023 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2024 			reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2025 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2026 		} else {
2027 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2028 			reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2029 			reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2030 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2031 		}
2032 	}
2033 }
2034 
2035 void
2036 mvpp2_tick(void *arg)
2037 {
2038 	struct mvpp2_port *sc = arg;
2039 	int s;
2040 
2041 	s = splnet();
2042 	mii_tick(&sc->sc_mii);
2043 	splx(s);
2044 
2045 	timeout_add_sec(&sc->sc_tick, 1);
2046 }
2047 
2048 int
2049 mvpp2_link_intr(void *arg)
2050 {
2051 	struct mvpp2_port *sc = arg;
2052 	uint32_t reg;
2053 	int event = 0;
2054 
2055 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2056 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2057 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
2058 		if (reg & MV_XLG_INTERRUPT_LINK_CHANGE)
2059 			event = 1;
2060 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2061 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2062 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
2063 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
2064 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2065 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2066 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2067 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_CAUSE_REG);
2068 		if (reg & MVPP2_GMAC_INT_CAUSE_LINK_CHANGE)
2069 			event = 1;
2070 	}
2071 
2072 	if (event && sc->sc_inband_status)
2073 		mvpp2_inband_statchg(sc);
2074 
2075 	return (1);
2076 }
2077 
2078 int
2079 mvpp2_intr(void *arg)
2080 {
2081 	struct mvpp2_port *sc = arg;
2082 	uint32_t reg;
2083 
2084 	reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
2085 	if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
2086 		mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
2087 		mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
2088 		    reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
2089 	}
2090 	if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
2091 		mvpp2_tx_proc(sc,
2092 		    (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
2093 		    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
2094 
2095 	if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
2096 		mvpp2_rx_proc(sc,
2097 		    reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
2098 
2099 	return (1);
2100 }
2101 
2102 void
2103 mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
2104 {
2105 	struct mvpp2_tx_queue *txq;
2106 	int i;
2107 
2108 	for (i = 0; i < sc->sc_ntxq; i++) {
2109 		txq = &sc->sc_txqs[i];
2110 		if ((queues & (1 << i)) == 0)
2111 			continue;
2112 		mvpp2_txq_proc(sc, txq);
2113 	}
2114 }
2115 
2116 void
2117 mvpp2_txq_proc(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2118 {
2119 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2120 	struct mvpp2_tx_queue *aggr_txq = &sc->sc->sc_aggr_txqs[0];
2121 	struct mvpp2_buf *txb;
2122 	int i, idx, nsent;
2123 
2124 	/* XXX: this is a percpu register! */
2125 	nsent = (mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id)) &
2126 	    MVPP2_TRANSMITTED_COUNT_MASK) >>
2127 	    MVPP2_TRANSMITTED_COUNT_OFFSET;
2128 
2129 	for (i = 0; i < nsent; i++) {
2130 		idx = aggr_txq->cons;
2131 		KASSERT(idx < MVPP2_AGGR_TXQ_SIZE);
2132 
2133 		txb = &aggr_txq->buf[idx];
2134 		if (txb->mb_m) {
2135 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2136 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2137 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2138 
2139 			m_freem(txb->mb_m);
2140 			txb->mb_m = NULL;
2141 		}
2142 
2143 		aggr_txq->cons = (aggr_txq->cons + 1) % MVPP2_AGGR_TXQ_SIZE;
2144 	}
2145 
2146 	if (ifq_is_oactive(&ifp->if_snd))
2147 		ifq_restart(&ifp->if_snd);
2148 }
2149 
2150 void
2151 mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
2152 {
2153 	struct mvpp2_rx_queue *rxq;
2154 	int i;
2155 
2156 	for (i = 0; i < sc->sc_nrxq; i++) {
2157 		rxq = &sc->sc_rxqs[i];
2158 		if ((queues & (1 << i)) == 0)
2159 			continue;
2160 		mvpp2_rxq_proc(sc, rxq);
2161 	}
2162 
2163 	mvpp2_rx_refill(sc);
2164 }
2165 
2166 void
2167 mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2168 {
2169 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2170 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2171 	struct mvpp2_rx_desc *rxd;
2172 	struct mvpp2_bm_pool *bm;
2173 	struct mvpp2_buf *rxb;
2174 	struct mbuf *m;
2175 	uint64_t virt;
2176 	uint32_t i, nrecv, pool;
2177 
2178 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2179 	if (!nrecv)
2180 		return;
2181 
2182 	pool = curcpu()->ci_cpuid;
2183 	KASSERT(pool < sc->sc->sc_npools);
2184 	bm = &sc->sc->sc_bm_pools[pool];
2185 
2186 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2187 	    MVPP2_DMA_LEN(rxq->ring),
2188 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2189 
2190 	for (i = 0; i < nrecv; i++) {
2191 		rxd = &rxq->descs[rxq->cons];
2192 		virt = rxd->buf_cookie_bm_qset_cls_info;
2193 		KASSERT(((virt >> 16) & 0xffff) == pool);
2194 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2195 		rxb = &bm->rxbuf[virt & 0xffff];
2196 		KASSERT(rxb->mb_m != NULL);
2197 
2198 		bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
2199 		    rxd->data_size, BUS_DMASYNC_POSTREAD);
2200 		bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
2201 
2202 		m = rxb->mb_m;
2203 		rxb->mb_m = NULL;
2204 
2205 		m->m_pkthdr.len = m->m_len = rxd->data_size;
2206 		m_adj(m, MVPP2_MH_SIZE);
2207 		ml_enqueue(&ml, m);
2208 
2209 		KASSERT(bm->freelist[bm->free_prod] == -1);
2210 		bm->freelist[bm->free_prod] = virt & 0xffffffff;
2211 		bm->free_prod = (bm->free_prod + 1) % MVPP2_BM_SIZE;
2212 
2213 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
2214 	}
2215 
2216 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2217 	    MVPP2_DMA_LEN(rxq->ring),
2218 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2219 
2220 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2221 
2222 	if_input(ifp, &ml);
2223 }
2224 
2225 /*
2226  * We have a pool per core, and since we should not assume that
2227  * RX buffers are always used in order, keep a list of rxbuf[]
2228  * indices that should be filled with an mbuf, if possible.
2229  */
2230 void
2231 mvpp2_rx_refill(struct mvpp2_port *sc)
2232 {
2233 	struct mvpp2_bm_pool *bm;
2234 	struct mvpp2_buf *rxb;
2235 	uint64_t phys, virt;
2236 	int pool;
2237 
2238 	pool = curcpu()->ci_cpuid;
2239 	KASSERT(pool < sc->sc->sc_npools);
2240 	bm = &sc->sc->sc_bm_pools[pool];
2241 
2242 	while (bm->freelist[bm->free_cons] != -1) {
2243 		virt = bm->freelist[bm->free_cons];
2244 		KASSERT(((virt >> 16) & 0xffff) == pool);
2245 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2246 		rxb = &bm->rxbuf[virt & 0xffff];
2247 		KASSERT(rxb->mb_m == NULL);
2248 
2249 		rxb->mb_m = mvpp2_alloc_mbuf(sc->sc, rxb->mb_map);
2250 		if (rxb->mb_m == NULL)
2251 			break;
2252 
2253 		bm->freelist[bm->free_cons] = -1;
2254 		bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
2255 
2256 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2257 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2258 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2259 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2260 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2261 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2262 		    virt & 0xffffffff);
2263 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2264 		    phys & 0xffffffff);
2265 	}
2266 }
2267 
2268 void
2269 mvpp2_up(struct mvpp2_port *sc)
2270 {
2271 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2272 	int i;
2273 
2274 	if (sc->sc_sfp) {
2275 		rw_enter(&mvpp2_sff_lock, RW_WRITE);
2276 		sfp_enable(sc->sc_sfp);
2277 		rw_exit(&mvpp2_sff_lock);
2278 	}
2279 
2280 	mvpp2_prs_mac_da_accept(sc, etherbroadcastaddr, 1);
2281 	mvpp2_prs_mac_da_accept(sc, sc->sc_lladdr, 1);
2282 	mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
2283 	mvpp2_prs_def_flow(sc);
2284 
2285 	for (i = 0; i < sc->sc_ntxq; i++)
2286 		mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
2287 
2288 	mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
2289 
2290 	for (i = 0; i < sc->sc_nrxq; i++)
2291 		mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
2292 
2293 	/* FIXME: rx buffer fill */
2294 
2295 	/* Configure media. */
2296 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
2297 		mii_mediachg(&sc->sc_mii);
2298 
2299 	/* Program promiscuous mode and multicast filters. */
2300 	mvpp2_iff(sc);
2301 
2302 	ifp->if_flags |= IFF_RUNNING;
2303 	ifq_clr_oactive(&ifp->if_snd);
2304 
2305 	mvpp2_txp_max_tx_size_set(sc);
2306 
2307 	/* XXX: single vector */
2308 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
2309 	    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
2310 	    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
2311 	    MVPP2_CAUSE_MISC_SUM_MASK);
2312 	mvpp2_interrupts_enable(sc, (1 << 0));
2313 
2314 	mvpp2_mac_config(sc);
2315 	mvpp2_egress_enable(sc);
2316 	mvpp2_ingress_enable(sc);
2317 
2318 	timeout_add_sec(&sc->sc_tick, 1);
2319 }
2320 
2321 void
2322 mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
2323 {
2324 	struct mvpp2_buf *txb;
2325 	int i;
2326 
2327 	txq->ring = mvpp2_dmamem_alloc(sc,
2328 	    MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
2329 	KASSERT(txq->ring != NULL);
2330 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2331 
2332 	txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
2333 	    M_DEVBUF, M_WAITOK);
2334 
2335 	for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
2336 		txb = &txq->buf[i];
2337 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2338 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2339 		txb->mb_m = NULL;
2340 	}
2341 
2342 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2343 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2344 
2345 	txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
2346 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
2347 	    MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_OFFS);
2348 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
2349 	    MVPP2_AGGR_TXQ_SIZE);
2350 }
2351 
2352 void
2353 mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2354 {
2355 	struct mvpp2_buf *txb;
2356 	int desc, desc_per_txq;
2357 	uint32_t reg;
2358 	int i;
2359 
2360 	txq->prod = txq->cons = 0;
2361 //	txq->last_desc = txq->size - 1;
2362 
2363 	txq->ring = mvpp2_dmamem_alloc(sc->sc,
2364 	    MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
2365 	KASSERT(txq->ring != NULL);
2366 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2367 
2368 	txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
2369 	    M_DEVBUF, M_WAITOK);
2370 
2371 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2372 		txb = &txq->buf[i];
2373 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2374 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2375 		txb->mb_m = NULL;
2376 	}
2377 
2378 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2379 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2380 
2381 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2382 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
2383 	    MVPP2_DMA_DVA(txq->ring));
2384 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
2385 	    MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
2386 	mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
2387 	mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
2388 	    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2389 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
2390 	reg &= ~MVPP2_TXQ_PENDING_MASK;
2391 	mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
2392 
2393 	desc_per_txq = 16;
2394 	desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
2395 	    (txq->log_id * desc_per_txq);
2396 
2397 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
2398 	    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2399 	    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2400 
2401 	/* WRR / EJP configuration - indirect access */
2402 	mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2403 	    mvpp2_egress_port(sc));
2404 
2405 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2406 	reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2407 	reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2408 	reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2409 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
2410 
2411 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2412 	    MVPP2_TXQ_TOKEN_SIZE_MAX);
2413 
2414 	mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
2415 
2416 	mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2417 }
2418 
2419 void
2420 mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2421 {
2422 	rxq->prod = rxq->cons = 0;
2423 
2424 	rxq->ring = mvpp2_dmamem_alloc(sc->sc,
2425 	    MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
2426 	KASSERT(rxq->ring != NULL);
2427 	rxq->descs = MVPP2_DMA_KVA(rxq->ring);
2428 
2429 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
2430 	    0, MVPP2_DMA_LEN(rxq->ring),
2431 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2432 
2433 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2434 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2435 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
2436 	    MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_OFFS);
2437 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
2438 	mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
2439 	mvpp2_rxq_offset_set(sc, rxq->id, 0);
2440 	mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
2441 	mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
2442 	mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
2443 }
2444 
2445 void
2446 mvpp2_mac_reset_assert(struct mvpp2_port *sc)
2447 {
2448 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2449 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
2450 	    MVPP2_PORT_CTRL2_PORTMACRESET);
2451 	if (sc->sc_gop_id == 0)
2452 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
2453 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2454 		    ~MV_XLG_MAC_CTRL0_MACRESETN);
2455 }
2456 
2457 void
2458 mvpp2_pcs_reset_assert(struct mvpp2_port *sc)
2459 {
2460 	uint32_t reg;
2461 
2462 	if (sc->sc_gop_id != 0)
2463 		return;
2464 
2465 	reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2466 	reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET;
2467 	reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET;
2468 	reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET;
2469 	reg &= ~MVPP22_MPCS_MAC_CLK_RESET;
2470 	mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2471 	reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2472 	reg &= ~MVPP22_XPCS_PCSRESET;
2473 	mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2474 }
2475 
2476 void
2477 mvpp2_pcs_reset_deassert(struct mvpp2_port *sc)
2478 {
2479 	uint32_t reg;
2480 
2481 	if (sc->sc_gop_id != 0)
2482 		return;
2483 
2484 	if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2485 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2486 		reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET;
2487 		reg |= MVPP22_MPCS_TX_SD_CLK_RESET;
2488 		reg |= MVPP22_MPCS_RX_SD_CLK_RESET;
2489 		reg |= MVPP22_MPCS_MAC_CLK_RESET;
2490 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2491 	} else if (sc->sc_phy_mode == PHY_MODE_XAUI) {
2492 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2493 		reg |= MVPP22_XPCS_PCSRESET;
2494 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2495 	}
2496 }
2497 
2498 void
2499 mvpp2_mac_config(struct mvpp2_port *sc)
2500 {
2501 	uint32_t reg;
2502 
2503 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2504 	reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2505 	reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2506 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2507 	if (sc->sc_gop_id == 0) {
2508 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2509 		reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2510 		reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2511 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2512 	}
2513 
2514 	mvpp2_port_disable(sc);
2515 
2516 	mvpp2_mac_reset_assert(sc);
2517 	mvpp2_pcs_reset_assert(sc);
2518 
2519 	mvpp2_gop_intr_mask(sc);
2520 	mvpp2_comphy_config(sc, 0);
2521 
2522 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2523 	    sc->sc_phy_mode == PHY_MODE_XAUI))
2524 		mvpp2_xlg_config(sc);
2525 	else
2526 		mvpp2_gmac_config(sc);
2527 
2528 	mvpp2_comphy_config(sc, 1);
2529 	mvpp2_gop_config(sc);
2530 
2531 	mvpp2_pcs_reset_deassert(sc);
2532 
2533 	if (sc->sc_gop_id == 0) {
2534 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
2535 		reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
2536 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2537 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2538 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
2539 		else
2540 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
2541 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
2542 	}
2543 
2544 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2545 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2546 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
2547 		reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
2548 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2549 		    MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
2550 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
2551 	} else {
2552 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
2553 		reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2554 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2555 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS;
2556 		mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
2557 	}
2558 
2559 	mvpp2_gop_intr_unmask(sc);
2560 
2561 	if (!(sc->sc_phy_mode == PHY_MODE_10GBASER ||
2562 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2563 		mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2564 		    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2565 		    ~MVPP2_PORT_CTRL2_PORTMACRESET);
2566 		while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2567 		    MVPP2_PORT_CTRL2_PORTMACRESET)
2568 			;
2569 	}
2570 
2571 	mvpp2_port_enable(sc);
2572 
2573 	if (sc->sc_inband_status) {
2574 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2575 		reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2576 		reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2577 		mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2578 		if (sc->sc_gop_id == 0) {
2579 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2580 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2581 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2582 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2583 		}
2584 	} else
2585 		mvpp2_port_change(sc);
2586 }
2587 
2588 void
2589 mvpp2_xlg_config(struct mvpp2_port *sc)
2590 {
2591 	uint32_t ctl0, ctl4;
2592 
2593 	ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2594 	ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
2595 
2596 	ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN;
2597 	ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
2598 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN;
2599 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN;
2600 
2601 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
2602 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl0);
2603 
2604 	/* Port reset */
2605 	while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2606 	    MV_XLG_MAC_CTRL0_MACRESETN) == 0)
2607 		;
2608 }
2609 
2610 void
2611 mvpp2_gmac_config(struct mvpp2_port *sc)
2612 {
2613 	uint32_t ctl0, ctl2, ctl4, panc;
2614 
2615 	/* Setup phy. */
2616 	ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
2617 	ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
2618 	ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
2619 	panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2620 
2621 	ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
2622 	ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
2623 	    MVPP2_GMAC_INBAND_AN_MASK);
2624 	panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
2625 	    MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
2626 	    MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
2627 	    MVPP2_GMAC_IN_BAND_AUTONEG);
2628 
2629 	switch (sc->sc_phy_mode) {
2630 	case PHY_MODE_XAUI:
2631 	case PHY_MODE_10GBASER:
2632 		break;
2633 	case PHY_MODE_2500BASEX:
2634 	case PHY_MODE_1000BASEX:
2635 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2636 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2637 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2638 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2639 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2640 		break;
2641 	case PHY_MODE_SGMII:
2642 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2643 		ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
2644 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2645 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2646 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2647 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2648 		break;
2649 	case PHY_MODE_RGMII:
2650 	case PHY_MODE_RGMII_ID:
2651 	case PHY_MODE_RGMII_RXID:
2652 	case PHY_MODE_RGMII_TXID:
2653 		ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL;
2654 		ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2655 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2656 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2657 		break;
2658 	}
2659 
2660 	/* Use Auto-Negotiation for Inband Status only */
2661 	if (sc->sc_inband_status) {
2662 		panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2663 		panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2664 		panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2665 		panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
2666 		/* TODO: read mode from SFP */
2667 		if (sc->sc_phy_mode == PHY_MODE_SGMII) {
2668 			/* SGMII */
2669 			panc |= MVPP2_GMAC_AN_SPEED_EN;
2670 			panc |= MVPP2_GMAC_AN_DUPLEX_EN;
2671 		} else {
2672 			/* 802.3z */
2673 			ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
2674 			panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2675 			panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2676 		}
2677 	}
2678 
2679 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
2680 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
2681 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
2682 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2683 }
2684 
2685 #define COMPHY_BASE		0x120000
2686 #define COMPHY_SIP_POWER_ON	0x82000001
2687 #define COMPHY_SIP_POWER_OFF	0x82000002
2688 #define COMPHY_SPEED(x)		((x) << 2)
2689 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
2690 #define  COMPHY_SPEED_2_5G		1
2691 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
2692 #define  COMPHY_SPEED_5G		3
2693 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
2694 #define  COMPHY_SPEED_6G		5
2695 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
2696 #define COMPHY_UNIT(x)		((x) << 8)
2697 #define COMPHY_MODE(x)		((x) << 12)
2698 #define  COMPHY_MODE_SATA		1
2699 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
2700 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
2701 #define  COMPHY_MODE_USB3H		4
2702 #define  COMPHY_MODE_USB3D		5
2703 #define  COMPHY_MODE_PCIE		6
2704 #define  COMPHY_MODE_RXAUI		7
2705 #define  COMPHY_MODE_XFI		8
2706 #define  COMPHY_MODE_SFI		9
2707 #define  COMPHY_MODE_USB3		10
2708 #define  COMPHY_MODE_AP			11
2709 
2710 void
2711 mvpp2_comphy_config(struct mvpp2_port *sc, int on)
2712 {
2713 	int node, phys[2], lane, unit;
2714 	uint32_t mode;
2715 
2716 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
2717 	    sizeof(phys))
2718 		return;
2719 	node = OF_getnodebyphandle(phys[0]);
2720 	if (!node)
2721 		return;
2722 
2723 	lane = OF_getpropint(node, "reg", 0);
2724 	unit = phys[1];
2725 
2726 	switch (sc->sc_phy_mode) {
2727 	case PHY_MODE_XAUI:
2728 		mode = COMPHY_MODE(COMPHY_MODE_RXAUI) |
2729 		    COMPHY_UNIT(unit);
2730 		break;
2731 	case PHY_MODE_10GBASER:
2732 		mode = COMPHY_MODE(COMPHY_MODE_XFI) |
2733 		    COMPHY_SPEED(COMPHY_SPEED_10_3125G) |
2734 		    COMPHY_UNIT(unit);
2735 		break;
2736 	case PHY_MODE_2500BASEX:
2737 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
2738 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
2739 		    COMPHY_UNIT(unit);
2740 		break;
2741 	case PHY_MODE_1000BASEX:
2742 	case PHY_MODE_SGMII:
2743 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
2744 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
2745 		    COMPHY_UNIT(unit);
2746 		break;
2747 	default:
2748 		return;
2749 	}
2750 
2751 	if (on)
2752 		smc_call(COMPHY_SIP_POWER_ON, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2753 		    lane, mode);
2754 	else
2755 		smc_call(COMPHY_SIP_POWER_OFF, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2756 		    lane, 0);
2757 }
2758 
2759 void
2760 mvpp2_gop_config(struct mvpp2_port *sc)
2761 {
2762 	uint32_t reg;
2763 
2764 	if (sc->sc->sc_rm == NULL)
2765 		return;
2766 
2767 	if (sc->sc_phy_mode == PHY_MODE_RGMII ||
2768 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2769 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2770 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2771 		if (sc->sc_gop_id == 0)
2772 			return;
2773 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2774 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
2775 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2776 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2777 		if (sc->sc_gop_id == 2)
2778 			reg |= GENCONF_CTRL0_PORT0_RGMII |
2779 			    GENCONF_CTRL0_PORT1_RGMII;
2780 		else if (sc->sc_gop_id == 3)
2781 			reg |= GENCONF_CTRL0_PORT1_RGMII_MII;
2782 		regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2783 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2784 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2785 	    sc->sc_phy_mode == PHY_MODE_SGMII) {
2786 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2787 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
2788 		    GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
2789 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2790 		if (sc->sc_gop_id > 1) {
2791 			reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2792 			if (sc->sc_gop_id == 2)
2793 				reg &= ~GENCONF_CTRL0_PORT0_RGMII;
2794 			else if (sc->sc_gop_id == 3)
2795 				reg &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
2796 			regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2797 		}
2798 	} else if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2799 		if (sc->sc_gop_id != 0)
2800 			return;
2801 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2802 		reg &= ~MVPP22_XPCS_PCSMODE_MASK;
2803 		reg &= ~MVPP22_XPCS_LANEACTIVE_MASK;
2804 		reg |= 2 << MVPP22_XPCS_LANEACTIVE_OFFS;
2805 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2806 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS40G_COMMON_CONTROL);
2807 		reg &= ~MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK;
2808 		mvpp2_mpcs_write(sc, MVPP22_MPCS40G_COMMON_CONTROL, reg);
2809 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2810 		reg &= ~MVPP22_MPCS_CLK_DIVISION_RATIO_MASK;
2811 		reg |= MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT;
2812 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2813 	} else
2814 		return;
2815 
2816 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1);
2817 	reg |= GENCONF_PORT_CTRL1_RESET(sc->sc_gop_id) |
2818 	    GENCONF_PORT_CTRL1_EN(sc->sc_gop_id);
2819 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1, reg);
2820 
2821 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2822 	reg |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
2823 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2824 
2825 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1);
2826 	reg |= GENCONF_SOFT_RESET1_GOP;
2827 	regmap_write_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1, reg);
2828 }
2829 
2830 void
2831 mvpp2_gop_intr_mask(struct mvpp2_port *sc)
2832 {
2833 	uint32_t reg;
2834 
2835 	if (sc->sc_gop_id == 0) {
2836 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2837 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2838 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2839 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2840 	}
2841 
2842 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2843 	reg &= ~MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2844 	mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2845 }
2846 
2847 void
2848 mvpp2_gop_intr_unmask(struct mvpp2_port *sc)
2849 {
2850 	uint32_t reg;
2851 
2852 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2853 	reg |= MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2854 	mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2855 
2856 	if (sc->sc_gop_id == 0) {
2857 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2858 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2859 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2860 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2861 		else
2862 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2863 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2864 	}
2865 }
2866 
2867 void
2868 mvpp2_down(struct mvpp2_port *sc)
2869 {
2870 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2871 	uint32_t reg;
2872 	int i;
2873 
2874 	timeout_del(&sc->sc_tick);
2875 
2876 	ifp->if_flags &= ~IFF_RUNNING;
2877 	ifq_clr_oactive(&ifp->if_snd);
2878 
2879 	mvpp2_egress_disable(sc);
2880 	mvpp2_ingress_disable(sc);
2881 
2882 	mvpp2_mac_reset_assert(sc);
2883 	mvpp2_pcs_reset_assert(sc);
2884 
2885 	/* XXX: single vector */
2886 	mvpp2_interrupts_disable(sc, (1 << 0));
2887 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
2888 
2889 	reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
2890 	reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2891 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2892 
2893 	for (i = 0; i < sc->sc_ntxq; i++)
2894 		mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
2895 
2896 	reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2897 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2898 
2899 	for (i = 0; i < sc->sc_nrxq; i++)
2900 		mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
2901 
2902 	if (sc->sc_sfp) {
2903 		rw_enter(&mvpp2_sff_lock, RW_WRITE);
2904 		sfp_disable(sc->sc_sfp);
2905 		rw_exit(&mvpp2_sff_lock);
2906 	}
2907 }
2908 
2909 void
2910 mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2911 {
2912 	struct mvpp2_buf *txb;
2913 	int i, pending;
2914 	uint32_t reg;
2915 
2916 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2917 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
2918 	reg |= MVPP2_TXQ_DRAIN_EN_MASK;
2919 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2920 
2921 	/*
2922 	 * the queue has been stopped so wait for all packets
2923 	 * to be transmitted.
2924 	 */
2925 	i = 0;
2926 	do {
2927 		if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2928 			printf("%s: port %d: cleaning queue %d timed out\n",
2929 			    sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
2930 			break;
2931 		}
2932 		delay(1000);
2933 		i++;
2934 
2935 		pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
2936 		    MVPP2_TXQ_PENDING_MASK;
2937 	} while (pending);
2938 
2939 	reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2940 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2941 
2942 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2943 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2944 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
2945 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
2946 	mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2947 
2948 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2949 		txb = &txq->buf[i];
2950 		if (txb->mb_m) {
2951 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2952 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2953 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2954 			m_freem(txb->mb_m);
2955 		}
2956 		bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
2957 	}
2958 
2959 	mvpp2_dmamem_free(sc->sc, txq->ring);
2960 	free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
2961 	    MVPP2_NTXDESC);
2962 }
2963 
2964 void
2965 mvpp2_rxq_hw_drop(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2966 {
2967 	struct mvpp2_rx_desc *rxd;
2968 	struct mvpp2_bm_pool *bm;
2969 	uint64_t phys, virt;
2970 	uint32_t i, nrecv, pool;
2971 	struct mvpp2_buf *rxb;
2972 
2973 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2974 	if (!nrecv)
2975 		return;
2976 
2977 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2978 	    MVPP2_DMA_LEN(rxq->ring),
2979 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2980 
2981 	for (i = 0; i < nrecv; i++) {
2982 		rxd = &rxq->descs[rxq->cons];
2983 		virt = rxd->buf_cookie_bm_qset_cls_info;
2984 		pool = (virt >> 16) & 0xffff;
2985 		KASSERT(pool < sc->sc->sc_npools);
2986 		bm = &sc->sc->sc_bm_pools[pool];
2987 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2988 		rxb = &bm->rxbuf[virt & 0xffff];
2989 		KASSERT(rxb->mb_m != NULL);
2990 		virt &= 0xffffffff;
2991 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2992 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2993 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2994 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2995 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2996 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2997 		    virt & 0xffffffff);
2998 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2999 		    phys & 0xffffffff);
3000 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
3001 	}
3002 
3003 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
3004 	    MVPP2_DMA_LEN(rxq->ring),
3005 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3006 
3007 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
3008 }
3009 
3010 void
3011 mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
3012 {
3013 	mvpp2_rxq_hw_drop(sc, rxq);
3014 
3015 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3016 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3017 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
3018 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
3019 
3020 	mvpp2_dmamem_free(sc->sc, rxq->ring);
3021 }
3022 
3023 void
3024 mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3025 {
3026 	uint32_t val;
3027 	int prxq;
3028 
3029 	/* get queue physical ID */
3030 	prxq = port->sc_rxqs[lrxq].id;
3031 
3032 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3033 	val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3034 	val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
3035 
3036 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3037 }
3038 
3039 void
3040 mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3041 {
3042 	uint32_t val;
3043 	int prxq;
3044 
3045 	/* get queue physical ID */
3046 	prxq = port->sc_rxqs[lrxq].id;
3047 
3048 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3049 	val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3050 	val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
3051 
3052 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3053 }
3054 
3055 void
3056 mvpp2_iff(struct mvpp2_port *sc)
3057 {
3058 	struct arpcom *ac = &sc->sc_ac;
3059 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3060 	struct ether_multi *enm;
3061 	struct ether_multistep step;
3062 
3063 	ifp->if_flags &= ~IFF_ALLMULTI;
3064 
3065 	/* Removes all but broadcast and (new) lladdr */
3066 	mvpp2_prs_mac_del_all(sc);
3067 
3068 	if (ifp->if_flags & IFF_PROMISC) {
3069 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3070 		    MVPP2_PRS_L2_UNI_CAST, 1);
3071 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3072 		    MVPP2_PRS_L2_MULTI_CAST, 1);
3073 		return;
3074 	}
3075 
3076 	mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3077 	    MVPP2_PRS_L2_UNI_CAST, 0);
3078 	mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3079 	    MVPP2_PRS_L2_MULTI_CAST, 0);
3080 
3081 	if (ac->ac_multirangecnt > 0 ||
3082 	    ac->ac_multicnt > MVPP2_PRS_MAC_MC_FILT_MAX) {
3083 		ifp->if_flags |= IFF_ALLMULTI;
3084 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3085 		    MVPP2_PRS_L2_MULTI_CAST, 1);
3086 	} else {
3087 		ETHER_FIRST_MULTI(step, ac, enm);
3088 		while (enm != NULL) {
3089 			mvpp2_prs_mac_da_accept(sc, enm->enm_addrlo, 1);
3090 			ETHER_NEXT_MULTI(step, enm);
3091 		}
3092 	}
3093 }
3094 
3095 struct mvpp2_dmamem *
3096 mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
3097 {
3098 	struct mvpp2_dmamem *mdm;
3099 	int nsegs;
3100 
3101 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
3102 	mdm->mdm_size = size;
3103 
3104 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3105 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
3106 		goto mdmfree;
3107 
3108 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
3109 	    &nsegs, BUS_DMA_WAITOK) != 0)
3110 		goto destroy;
3111 
3112 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
3113 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
3114 		goto free;
3115 
3116 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
3117 	    NULL, BUS_DMA_WAITOK) != 0)
3118 		goto unmap;
3119 
3120 	bzero(mdm->mdm_kva, size);
3121 
3122 	return (mdm);
3123 
3124 unmap:
3125 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
3126 free:
3127 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3128 destroy:
3129 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3130 mdmfree:
3131 	free(mdm, M_DEVBUF, 0);
3132 
3133 	return (NULL);
3134 }
3135 
3136 void
3137 mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
3138 {
3139 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
3140 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3141 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3142 	free(mdm, M_DEVBUF, 0);
3143 }
3144 
3145 struct mbuf *
3146 mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
3147 {
3148 	struct mbuf *m = NULL;
3149 
3150 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
3151 	if (!m)
3152 		return (NULL);
3153 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3154 
3155 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
3156 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
3157 		m_freem(m);
3158 		return (NULL);
3159 	}
3160 
3161 	bus_dmamap_sync(sc->sc_dmat, map, 0,
3162 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
3163 
3164 	return (m);
3165 }
3166 
3167 void
3168 mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
3169 {
3170 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3171 	    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3172 }
3173 
3174 void
3175 mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
3176 {
3177 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3178 	    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3179 }
3180 
3181 int
3182 mvpp2_egress_port(struct mvpp2_port *port)
3183 {
3184 	return MVPP2_MAX_TCONT + port->sc_id;
3185 }
3186 
3187 int
3188 mvpp2_txq_phys(int port, int txq)
3189 {
3190 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
3191 }
3192 
3193 void
3194 mvpp2_defaults_set(struct mvpp2_port *port)
3195 {
3196 	int val, queue;
3197 
3198 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3199 	    mvpp2_egress_port(port));
3200 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3201 
3202 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
3203 		mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
3204 
3205 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
3206 	    (1000 * 1000));
3207 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
3208 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3209 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3210 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3211 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
3212 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
3213 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3214 
3215 	/* set maximum_low_latency_packet_size value to 256 */
3216 	mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
3217 	    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3218 	    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3219 
3220 	/* mask all interrupts to all present cpus */
3221 	mvpp2_interrupts_disable(port, (0xf << 0));
3222 }
3223 
3224 void
3225 mvpp2_ingress_enable(struct mvpp2_port *port)
3226 {
3227 	uint32_t val;
3228 	int lrxq, queue;
3229 
3230 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3231 		queue = port->sc_rxqs[lrxq].id;
3232 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3233 		val &= ~MVPP2_RXQ_DISABLE_MASK;
3234 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3235 	}
3236 }
3237 
3238 void
3239 mvpp2_ingress_disable(struct mvpp2_port *port)
3240 {
3241 	uint32_t val;
3242 	int lrxq, queue;
3243 
3244 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3245 		queue = port->sc_rxqs[lrxq].id;
3246 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3247 		val |= MVPP2_RXQ_DISABLE_MASK;
3248 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3249 	}
3250 }
3251 
3252 void
3253 mvpp2_egress_enable(struct mvpp2_port *port)
3254 {
3255 	struct mvpp2_tx_queue *txq;
3256 	uint32_t qmap;
3257 	int queue;
3258 
3259 	qmap = 0;
3260 	for (queue = 0; queue < port->sc_ntxq; queue++) {
3261 		txq = &port->sc_txqs[queue];
3262 
3263 		if (txq->descs != NULL) {
3264 			qmap |= (1 << queue);
3265 		}
3266 	}
3267 
3268 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3269 	    mvpp2_egress_port(port));
3270 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3271 }
3272 
3273 void
3274 mvpp2_egress_disable(struct mvpp2_port *port)
3275 {
3276 	uint32_t reg_data;
3277 	int i;
3278 
3279 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3280 	    mvpp2_egress_port(port));
3281 	reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3282 	    MVPP2_TXP_SCHED_ENQ_MASK;
3283 	if (reg_data)
3284 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,
3285 		    reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET);
3286 
3287 	i = 0;
3288 	do {
3289 		if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3290 			printf("%s: tx stop timed out, status=0x%08x\n",
3291 			    port->sc_dev.dv_xname, reg_data);
3292 			break;
3293 		}
3294 		delay(1000);
3295 		i++;
3296 		reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
3297 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3298 }
3299 
3300 void
3301 mvpp2_port_enable(struct mvpp2_port *port)
3302 {
3303 	uint32_t val;
3304 
3305 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3306 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3307 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3308 		val |= MV_XLG_MAC_CTRL0_PORTEN;
3309 		val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS;
3310 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3311 	} else {
3312 		val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3313 		val |= MVPP2_GMAC_PORT_EN_MASK;
3314 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3315 		mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3316 	}
3317 }
3318 
3319 void
3320 mvpp2_port_disable(struct mvpp2_port *port)
3321 {
3322 	uint32_t val;
3323 
3324 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3325 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3326 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3327 		val &= ~MV_XLG_MAC_CTRL0_PORTEN;
3328 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3329 	}
3330 
3331 	val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3332 	val &= ~MVPP2_GMAC_PORT_EN_MASK;
3333 	mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3334 }
3335 
3336 int
3337 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3338 {
3339 	uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
3340 
3341 	return val & MVPP2_RXQ_OCCUPIED_MASK;
3342 }
3343 
3344 void
3345 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3346     int used_count, int free_count)
3347 {
3348 	uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3349 	mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3350 }
3351 
3352 void
3353 mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
3354 {
3355 	uint32_t val;
3356 
3357 	offset = offset >> 5;
3358 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3359 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3360 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3361 	    MVPP2_RXQ_PACKET_OFFSET_MASK);
3362 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3363 }
3364 
3365 void
3366 mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3367 {
3368 	uint32_t val, size, mtu;
3369 	int txq;
3370 
3371 	mtu = MCLBYTES * 8;
3372 	if (mtu > MVPP2_TXP_MTU_MAX)
3373 		mtu = MVPP2_TXP_MTU_MAX;
3374 
3375 	/* WA for wrong token bucket update: set MTU value = 3*real MTU value */
3376 	mtu = 3 * mtu;
3377 
3378 	/* indirect access to reg_valisters */
3379 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3380 	    mvpp2_egress_port(port));
3381 
3382 	/* set MTU */
3383 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
3384 	val &= ~MVPP2_TXP_MTU_MAX;
3385 	val |= mtu;
3386 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
3387 
3388 	/* TXP token size and all TXqs token size must be larger that MTU */
3389 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3390 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3391 	if (size < mtu) {
3392 		size = mtu;
3393 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3394 		val |= size;
3395 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3396 	}
3397 
3398 	for (txq = 0; txq < port->sc_ntxq; txq++) {
3399 		val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3400 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3401 
3402 		if (size < mtu) {
3403 			size = mtu;
3404 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3405 			val |= size;
3406 			mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
3407 		}
3408 	}
3409 }
3410 
3411 void
3412 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3413     uint32_t pkts)
3414 {
3415 	rxq->pkts_coal =
3416 	    pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
3417 	    pkts : MVPP2_OCCUPIED_THRESH_MASK;
3418 
3419 	mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3420 	mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
3421 
3422 }
3423 
3424 void
3425 mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3426     uint32_t pkts)
3427 {
3428 	txq->done_pkts_coal =
3429 	    pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
3430 	    pkts : MVPP2_TRANSMITTED_THRESH_MASK;
3431 
3432 	mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
3433 	mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
3434 	    txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
3435 }
3436 
3437 void
3438 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3439     uint32_t usec)
3440 {
3441 	uint32_t val;
3442 
3443 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3444 	mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3445 
3446 	rxq->time_coal = usec;
3447 }
3448 
3449 void
3450 mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
3451 {
3452 	uint32_t val;
3453 
3454 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3455 	mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
3456 
3457 	port->sc_tx_time_coal = usec;
3458 }
3459 
3460 void
3461 mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
3462     uint32_t ri, uint32_t ri_mask)
3463 {
3464 	sc->sc_prs_shadow[index].ri_mask = ri_mask;
3465 	sc->sc_prs_shadow[index].ri = ri;
3466 }
3467 
3468 void
3469 mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3470 {
3471 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
3472 
3473 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
3474 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
3475 }
3476 
3477 void
3478 mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
3479 {
3480 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3481 
3482 	if (add)
3483 		pe->tcam.byte[enable_off] &= ~(1 << port);
3484 	else
3485 		pe->tcam.byte[enable_off] |= (1 << port);
3486 }
3487 
3488 void
3489 mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
3490 {
3491 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3492 	uint8_t mask = MVPP2_PRS_PORT_MASK;
3493 
3494 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
3495 	pe->tcam.byte[enable_off] &= ~mask;
3496 	pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
3497 }
3498 
3499 uint32_t
3500 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
3501 {
3502 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3503 
3504 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
3505 }
3506 
3507 void
3508 mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
3509     uint8_t byte, uint8_t enable)
3510 {
3511 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
3512 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
3513 }
3514 
3515 void
3516 mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
3517     uint8_t *byte, uint8_t *enable)
3518 {
3519 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
3520 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
3521 }
3522 
3523 int
3524 mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
3525 {
3526 	int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
3527 	uint16_t tcam_data;
3528 
3529 	tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
3530 	    pe->tcam.byte[byte_offset];
3531 	return tcam_data == data;
3532 }
3533 
3534 void
3535 mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
3536 {
3537 	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
3538 
3539 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
3540 		if (!(enable & BIT(i)))
3541 			continue;
3542 
3543 		if (bits & BIT(i))
3544 			pe->tcam.byte[ai_idx] |= BIT(i);
3545 		else
3546 			pe->tcam.byte[ai_idx] &= ~BIT(i);
3547 	}
3548 
3549 	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
3550 }
3551 
3552 int
3553 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
3554 {
3555 	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
3556 }
3557 
3558 void
3559 mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
3560     uint32_t *word, uint32_t *enable)
3561 {
3562 	int index, position;
3563 	uint8_t byte, mask;
3564 
3565 	for (index = 0; index < 4; index++) {
3566 		position = (data_offset * sizeof(int)) + index;
3567 		mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
3568 		((uint8_t *)word)[index] = byte;
3569 		((uint8_t *)enable)[index] = mask;
3570 	}
3571 }
3572 
3573 void
3574 mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
3575     uint16_t ether_type)
3576 {
3577 	mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
3578 	mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
3579 }
3580 
3581 void
3582 mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3583 {
3584 	pe->sram.byte[bit / 8] |= (val << (bit % 8));
3585 }
3586 
3587 void
3588 mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3589 {
3590 	pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
3591 }
3592 
3593 void
3594 mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3595 {
3596 	int i;
3597 
3598 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
3599 		if (!(mask & BIT(i)))
3600 			continue;
3601 
3602 		if (bits & BIT(i))
3603 			mvpp2_prs_sram_bits_set(pe,
3604 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3605 		else
3606 			mvpp2_prs_sram_bits_clear(pe,
3607 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3608 
3609 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
3610 	}
3611 }
3612 
3613 int
3614 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
3615 {
3616 	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
3617 }
3618 
3619 void
3620 mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3621 {
3622 	int i;
3623 
3624 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
3625 		if (!(mask & BIT(i)))
3626 			continue;
3627 
3628 		if (bits & BIT(i))
3629 			mvpp2_prs_sram_bits_set(pe,
3630 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3631 		else
3632 			mvpp2_prs_sram_bits_clear(pe,
3633 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3634 
3635 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
3636 	}
3637 }
3638 
3639 int
3640 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
3641 {
3642 	uint8_t bits;
3643 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
3644 	int ai_en_off = ai_off + 1;
3645 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
3646 
3647 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
3648 	    (pe->sram.byte[ai_en_off] << (8 - ai_shift));
3649 
3650 	return bits;
3651 }
3652 
3653 void
3654 mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
3655 {
3656 	if (shift < 0) {
3657 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3658 		shift = -shift;
3659 	} else {
3660 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3661 	}
3662 
3663 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
3664 	    shift & MVPP2_PRS_SRAM_SHIFT_MASK;
3665 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
3666 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
3667 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
3668 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3669 }
3670 
3671 void
3672 mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
3673     uint32_t op)
3674 {
3675 	uint8_t udf_byte, udf_byte_offset;
3676 	uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
3677 
3678 	udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
3679 	    MVPP2_PRS_SRAM_UDF_BITS);
3680 	udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
3681 	op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
3682 	    MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
3683 	op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
3684 
3685 	if (offset < 0) {
3686 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3687 		offset = -offset;
3688 	} else {
3689 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3690 	}
3691 
3692 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
3693 	    MVPP2_PRS_SRAM_UDF_MASK);
3694 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
3695 	pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
3696 	pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
3697 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
3698 	    MVPP2_PRS_SRAM_UDF_TYPE_MASK);
3699 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
3700 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
3701 	    MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
3702 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
3703 	pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
3704 	    op_sel_udf_byte_offset);
3705 	pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
3706 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3707 }
3708 
3709 void
3710 mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3711 {
3712 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
3713 
3714 	mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
3715 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
3716 }
3717 
3718 void
3719 mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
3720 {
3721 	sc->sc_prs_shadow[index].valid = 1;
3722 	sc->sc_prs_shadow[index].lu = lu;
3723 }
3724 
3725 int
3726 mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3727 {
3728 	int i;
3729 
3730 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3731 		return EINVAL;
3732 
3733 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3734 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3735 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3736 		mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
3737 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3738 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3739 		mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
3740 
3741 	return 0;
3742 }
3743 
3744 int
3745 mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe, int tid)
3746 {
3747 	int i;
3748 
3749 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3750 		return EINVAL;
3751 
3752 	memset(pe, 0, sizeof(*pe));
3753 	pe->index = tid;
3754 
3755 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3756 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
3757 	    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
3758 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
3759 		return EINVAL;
3760 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3761 		pe->tcam.word[i] =
3762 		    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
3763 
3764 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3765 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3766 		pe->sram.word[i] =
3767 		    mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
3768 
3769 	return 0;
3770 }
3771 
3772 int
3773 mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
3774 {
3775 	struct mvpp2_prs_entry pe;
3776 	uint8_t bits;
3777 	int tid;
3778 
3779 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
3780 		if (!sc->sc_prs_shadow[tid].valid ||
3781 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
3782 			continue;
3783 
3784 		mvpp2_prs_hw_read(sc, &pe, tid);
3785 		bits = mvpp2_prs_sram_ai_get(&pe);
3786 
3787 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
3788 			return tid;
3789 	}
3790 
3791 	return -1;
3792 }
3793 
3794 int
3795 mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
3796 {
3797 	uint8_t tmp;
3798 	int tid;
3799 
3800 	if (start > end) {
3801 		tmp = end;
3802 		end = start;
3803 		start = tmp;
3804 	}
3805 
3806 	for (tid = start; tid <= end; tid++) {
3807 		if (!sc->sc_prs_shadow[tid].valid)
3808 			return tid;
3809 	}
3810 
3811 	return -1;
3812 }
3813 
3814 void
3815 mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
3816 {
3817 	struct mvpp2_prs_entry pe;
3818 
3819 	if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
3820 		mvpp2_prs_hw_read(sc, &pe, MVPP2_PE_DROP_ALL);
3821 	} else {
3822 		memset(&pe, 0, sizeof(pe));
3823 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3824 		pe.index = MVPP2_PE_DROP_ALL;
3825 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3826 		    MVPP2_PRS_RI_DROP_MASK);
3827 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3828 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3829 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3830 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3831 	}
3832 
3833 	mvpp2_prs_tcam_port_set(&pe, port, add);
3834 	mvpp2_prs_hw_write(sc, &pe);
3835 }
3836 
3837 void
3838 mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int l2_cast,
3839     int add)
3840 {
3841 	struct mvpp2_prs_entry pe;
3842 	uint8_t cast_match;
3843 	uint32_t ri;
3844 	int tid;
3845 
3846 	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
3847 		cast_match = MVPP2_PRS_UCAST_VAL;
3848 		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
3849 		ri = MVPP2_PRS_RI_L2_UCAST;
3850 	} else {
3851 		cast_match = MVPP2_PRS_MCAST_VAL;
3852 		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
3853 		ri = MVPP2_PRS_RI_L2_MCAST;
3854 	}
3855 
3856 	if (sc->sc_prs_shadow[tid].valid) {
3857 		mvpp2_prs_hw_read(sc, &pe, tid);
3858 	} else {
3859 		memset(&pe, 0, sizeof(pe));
3860 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3861 		pe.index = tid;
3862 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3863 		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
3864 		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
3865 		    MVPP2_PRS_CAST_MASK);
3866 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3867 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3868 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3869 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3870 	}
3871 
3872 	mvpp2_prs_tcam_port_set(&pe, port, add);
3873 	mvpp2_prs_hw_write(sc, &pe);
3874 }
3875 
3876 void
3877 mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
3878     int tagged, int extend)
3879 {
3880 	struct mvpp2_prs_entry pe;
3881 	int32_t tid, shift;
3882 
3883 	if (extend) {
3884 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3885 		shift = 8;
3886 	} else {
3887 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3888 		shift = 4;
3889 	}
3890 
3891 	if (sc->sc_prs_shadow[tid].valid) {
3892 		mvpp2_prs_hw_read(sc, &pe, tid);
3893 	} else {
3894 		memset(&pe, 0, sizeof(pe));
3895 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3896 		pe.index = tid;
3897 		mvpp2_prs_sram_shift_set(&pe, shift,
3898 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3899 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3900 		if (tagged) {
3901 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
3902 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3903 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3904 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3905 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3906 		} else {
3907 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3908 			    MVPP2_PRS_RI_VLAN_MASK);
3909 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3910 		}
3911 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3912 	}
3913 
3914 	mvpp2_prs_tcam_port_set(&pe, port, add);
3915 	mvpp2_prs_hw_write(sc, &pe);
3916 }
3917 
3918 void
3919 mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
3920     int add, int tagged, int extend)
3921 {
3922 	struct mvpp2_prs_entry pe;
3923 	int32_t tid, shift, port_mask;
3924 
3925 	if (extend) {
3926 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3927 		port_mask = 0;
3928 		shift = 8;
3929 	} else {
3930 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3931 		port_mask = MVPP2_PRS_PORT_MASK;
3932 		shift = 4;
3933 	}
3934 
3935 	if (sc->sc_prs_shadow[tid].valid) {
3936 		mvpp2_prs_hw_read(sc, &pe, tid);
3937 	} else {
3938 		memset(&pe, 0, sizeof(pe));
3939 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3940 		pe.index = tid;
3941 		mvpp2_prs_match_etype(&pe, 0, 0xdada);
3942 		mvpp2_prs_match_etype(&pe, 2, 0);
3943 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
3944 		    MVPP2_PRS_RI_DSA_MASK);
3945 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
3946 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3947 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3948 		if (tagged) {
3949 			mvpp2_prs_tcam_data_byte_set(&pe,
3950 			    MVPP2_ETH_TYPE_LEN + 2 + 3,
3951 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3952 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3953 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3954 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3955 		} else {
3956 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3957 			    MVPP2_PRS_RI_VLAN_MASK);
3958 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3959 		}
3960 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
3961 	}
3962 
3963 	mvpp2_prs_tcam_port_set(&pe, port, add);
3964 	mvpp2_prs_hw_write(sc, &pe);
3965 }
3966 
3967 struct mvpp2_prs_entry *
3968 mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
3969 {
3970 	struct mvpp2_prs_entry *pe;
3971 	uint32_t ri_bits, ai_bits;
3972 	int match, tid;
3973 
3974 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3975 	if (pe == NULL)
3976 		return NULL;
3977 
3978 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3979 
3980 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3981 		if (!sc->sc_prs_shadow[tid].valid ||
3982 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3983 			continue;
3984 		mvpp2_prs_hw_read(sc, pe, tid);
3985 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
3986 		if (!match)
3987 			continue;
3988 		ri_bits = mvpp2_prs_sram_ri_get(pe);
3989 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3990 		ai_bits = mvpp2_prs_tcam_ai_get(pe);
3991 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
3992 		if (ai != ai_bits)
3993 			continue;
3994 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3995 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3996 			return pe;
3997 	}
3998 
3999 	free(pe, M_TEMP, sizeof(*pe));
4000 	return NULL;
4001 }
4002 
4003 int
4004 mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
4005 {
4006 	struct mvpp2_prs_entry *pe;
4007 	uint32_t ri_bits;
4008 	int tid_aux, tid;
4009 	int ret = 0;
4010 
4011 	pe = mvpp2_prs_vlan_find(sc, tpid, ai);
4012 	if (pe == NULL) {
4013 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
4014 		    MVPP2_PE_FIRST_FREE_TID);
4015 		if (tid < 0)
4016 			return tid;
4017 
4018 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4019 		if (pe == NULL)
4020 			return ENOMEM;
4021 
4022 		/* get last double vlan tid */
4023 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
4024 		    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
4025 			if (!sc->sc_prs_shadow[tid_aux].valid ||
4026 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4027 				continue;
4028 			mvpp2_prs_hw_read(sc, pe, tid_aux);
4029 			ri_bits = mvpp2_prs_sram_ri_get(pe);
4030 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
4031 			    MVPP2_PRS_RI_VLAN_DOUBLE)
4032 				break;
4033 		}
4034 
4035 		if (tid <= tid_aux) {
4036 			ret = EINVAL;
4037 			goto error;
4038 		}
4039 
4040 		memset(pe, 0, sizeof(*pe));
4041 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4042 		pe->index = tid;
4043 		mvpp2_prs_match_etype(pe, 0, tpid);
4044 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
4045 		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
4046 				   MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4047 		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
4048 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
4049 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
4050 			    MVPP2_PRS_RI_VLAN_MASK);
4051 		} else {
4052 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
4053 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
4054 			    MVPP2_PRS_RI_VLAN_MASK);
4055 		}
4056 		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
4057 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4058 	}
4059 
4060 	mvpp2_prs_tcam_port_map_set(pe, port_map);
4061 	mvpp2_prs_hw_write(sc, pe);
4062 
4063 error:
4064 	free(pe, M_TEMP, sizeof(*pe));
4065 	return ret;
4066 }
4067 
4068 int
4069 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
4070 {
4071 	int i;
4072 
4073 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
4074 		if (!sc->sc_prs_double_vlans[i])
4075 			return i;
4076 
4077 	return -1;
4078 }
4079 
4080 struct mvpp2_prs_entry *
4081 mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
4082 {
4083 	struct mvpp2_prs_entry *pe;
4084 	uint32_t ri_mask;
4085 	int match, tid;
4086 
4087 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4088 	if (pe == NULL)
4089 		return NULL;
4090 
4091 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4092 
4093 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
4094 		if (!sc->sc_prs_shadow[tid].valid ||
4095 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
4096 			continue;
4097 
4098 		mvpp2_prs_hw_read(sc, pe, tid);
4099 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
4100 		    mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
4101 		if (!match)
4102 			continue;
4103 		ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
4104 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
4105 			return pe;
4106 	}
4107 
4108 	free(pe, M_TEMP, sizeof(*pe));
4109 	return NULL;
4110 }
4111 
4112 int
4113 mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
4114     uint32_t port_map)
4115 {
4116 	struct mvpp2_prs_entry *pe;
4117 	int tid_aux, tid, ai, ret = 0;
4118 	uint32_t ri_bits;
4119 
4120 	pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
4121 	if (pe == NULL) {
4122 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4123 		    MVPP2_PE_LAST_FREE_TID);
4124 		if (tid < 0)
4125 			return tid;
4126 
4127 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4128 		if (pe == NULL)
4129 			return ENOMEM;
4130 
4131 		ai = mvpp2_prs_double_vlan_ai_free_get(sc);
4132 		if (ai < 0) {
4133 			ret = ai;
4134 			goto error;
4135 		}
4136 
4137 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
4138 		    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
4139 			if (!sc->sc_prs_shadow[tid_aux].valid ||
4140 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4141 				continue;
4142 			mvpp2_prs_hw_read(sc, pe, tid_aux);
4143 			ri_bits = mvpp2_prs_sram_ri_get(pe);
4144 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
4145 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
4146 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
4147 				break;
4148 		}
4149 
4150 		if (tid >= tid_aux) {
4151 			ret = ERANGE;
4152 			goto error;
4153 		}
4154 
4155 		memset(pe, 0, sizeof(*pe));
4156 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4157 		pe->index = tid;
4158 		sc->sc_prs_double_vlans[ai] = 1;
4159 		mvpp2_prs_match_etype(pe, 0, tpid1);
4160 		mvpp2_prs_match_etype(pe, 4, tpid2);
4161 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
4162 		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
4163 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4164 		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
4165 		    MVPP2_PRS_RI_VLAN_MASK);
4166 		mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
4167 		    MVPP2_PRS_SRAM_AI_MASK);
4168 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4169 	}
4170 
4171 	mvpp2_prs_tcam_port_map_set(pe, port_map);
4172 	mvpp2_prs_hw_write(sc, pe);
4173 
4174 error:
4175 	free(pe, M_TEMP, sizeof(*pe));
4176 	return ret;
4177 }
4178 
4179 int
4180 mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4181     uint32_t ri_mask)
4182 {
4183 	struct mvpp2_prs_entry pe;
4184 	int tid;
4185 
4186 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4187 	    (proto != IPPROTO_IGMP))
4188 		return EINVAL;
4189 
4190 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4191 	    MVPP2_PE_LAST_FREE_TID);
4192 	if (tid < 0)
4193 		return tid;
4194 
4195 	memset(&pe, 0, sizeof(pe));
4196 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4197 	pe.index = tid;
4198 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
4199 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4200 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4201 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4202 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4203 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4204 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4205 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
4206 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
4207 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4208 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
4209 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4210 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4211 	mvpp2_prs_hw_write(sc, &pe);
4212 
4213 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4214 	    MVPP2_PE_LAST_FREE_TID);
4215 	if (tid < 0)
4216 		return tid;
4217 
4218 	pe.index = tid;
4219 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
4220 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
4221 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4222 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
4223 	    ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4224 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
4225 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
4226 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4227 	mvpp2_prs_hw_write(sc, &pe);
4228 
4229 	return 0;
4230 }
4231 
4232 int
4233 mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4234 {
4235 	struct mvpp2_prs_entry pe;
4236 	int mask, tid;
4237 
4238 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4239 	    MVPP2_PE_LAST_FREE_TID);
4240 	if (tid < 0)
4241 		return tid;
4242 
4243 	memset(&pe, 0, sizeof(pe));
4244 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4245 	pe.index = tid;
4246 
4247 	switch (l3_cast) {
4248 	case MVPP2_PRS_L3_MULTI_CAST:
4249 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
4250 		    MVPP2_PRS_IPV4_MC_MASK);
4251 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4252 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4253 		break;
4254 	case  MVPP2_PRS_L3_BROAD_CAST:
4255 		mask = MVPP2_PRS_IPV4_BC_MASK;
4256 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
4257 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
4258 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
4259 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
4260 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
4261 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4262 		break;
4263 	default:
4264 		return EINVAL;
4265 	}
4266 
4267 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4268 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4269 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4270 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4271 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4272 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4273 	mvpp2_prs_hw_write(sc, &pe);
4274 
4275 	return 0;
4276 }
4277 
4278 int
4279 mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4280     uint32_t ri_mask)
4281 {
4282 	struct mvpp2_prs_entry pe;
4283 	int tid;
4284 
4285 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4286 	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
4287 		return EINVAL;
4288 
4289 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4290 	    MVPP2_PE_LAST_FREE_TID);
4291 	if (tid < 0)
4292 		return tid;
4293 
4294 	memset(&pe, 0, sizeof(pe));
4295 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4296 	pe.index = tid;
4297 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4298 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4299 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4300 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4301 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4302 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4303 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4304 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4305 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4306 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4307 	mvpp2_prs_hw_write(sc, &pe);
4308 
4309 	return 0;
4310 }
4311 
4312 int
4313 mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4314 {
4315 	struct mvpp2_prs_entry pe;
4316 	int tid;
4317 
4318 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
4319 		return EINVAL;
4320 
4321 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4322 	    MVPP2_PE_LAST_FREE_TID);
4323 	if (tid < 0)
4324 		return tid;
4325 
4326 	memset(&pe, 0, sizeof(pe));
4327 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4328 	pe.index = tid;
4329 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
4330 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4331 	    MVPP2_PRS_RI_L3_ADDR_MASK);
4332 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4333 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4334 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4335 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
4336 	    MVPP2_PRS_IPV6_MC_MASK);
4337 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4338 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4339 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4340 	mvpp2_prs_hw_write(sc, &pe);
4341 
4342 	return 0;
4343 }
4344 
4345 int
4346 mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
4347     uint8_t *mask)
4348 {
4349 	uint8_t tcam_byte, tcam_mask;
4350 	int index;
4351 
4352 	for (index = 0; index < ETHER_ADDR_LEN; index++) {
4353 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
4354 		    &tcam_mask);
4355 		if (tcam_mask != mask[index])
4356 			return 0;
4357 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
4358 			return 0;
4359 	}
4360 
4361 	return 1;
4362 }
4363 
4364 int
4365 mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
4366     uint8_t *mask, int udf_type)
4367 {
4368 	struct mvpp2_prs_entry pe;
4369 	int tid;
4370 
4371 	for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END;
4372 	    tid++) {
4373 		uint32_t entry_pmap;
4374 
4375 		if (!sc->sc_prs_shadow[tid].valid ||
4376 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4377 		    (sc->sc_prs_shadow[tid].udf != udf_type))
4378 			continue;
4379 
4380 		mvpp2_prs_hw_read(sc, &pe, tid);
4381 		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
4382 		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
4383 		    entry_pmap == pmap)
4384 			return tid;
4385 	}
4386 
4387 	return -1;
4388 }
4389 
4390 int
4391 mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const uint8_t *da, int add)
4392 {
4393 	struct mvpp2_softc *sc = port->sc;
4394 	struct mvpp2_prs_entry pe;
4395 	uint32_t pmap, len, ri;
4396 	uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4397 	int tid;
4398 
4399 	memset(&pe, 0, sizeof(pe));
4400 
4401 	tid = mvpp2_prs_mac_da_range_find(sc, BIT(port->sc_id), da, mask,
4402 	    MVPP2_PRS_UDF_MAC_DEF);
4403 	if (tid < 0) {
4404 		if (!add)
4405 			return 0;
4406 
4407 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_MAC_RANGE_START,
4408 		    MVPP2_PE_MAC_RANGE_END);
4409 		if (tid < 0)
4410 			return tid;
4411 
4412 		pe.index = tid;
4413 		mvpp2_prs_tcam_port_map_set(&pe, 0);
4414 	} else {
4415 		mvpp2_prs_hw_read(sc, &pe, tid);
4416 	}
4417 
4418 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
4419 
4420 	mvpp2_prs_tcam_port_set(&pe, port->sc_id, add);
4421 
4422 	/* invalidate the entry if no ports are left enabled */
4423 	pmap = mvpp2_prs_tcam_port_map_get(&pe);
4424 	if (pmap == 0) {
4425 		if (add)
4426 			return -1;
4427 		mvpp2_prs_hw_inv(sc, pe.index);
4428 		sc->sc_prs_shadow[pe.index].valid = 0;
4429 		return 0;
4430 	}
4431 
4432 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
4433 
4434 	len = ETHER_ADDR_LEN;
4435 	while (len--)
4436 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
4437 
4438 	if (ETHER_IS_BROADCAST(da))
4439 		ri = MVPP2_PRS_RI_L2_BCAST;
4440 	else if (ETHER_IS_MULTICAST(da))
4441 		ri = MVPP2_PRS_RI_L2_MCAST;
4442 	else
4443 		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
4444 
4445 	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4446 	    MVPP2_PRS_RI_MAC_ME_MASK);
4447 	mvpp2_prs_shadow_ri_set(sc, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4448 	    MVPP2_PRS_RI_MAC_ME_MASK);
4449 	mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
4450 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4451 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
4452 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
4453 	mvpp2_prs_hw_write(sc, &pe);
4454 
4455 	return 0;
4456 }
4457 
4458 void
4459 mvpp2_prs_mac_del_all(struct mvpp2_port *port)
4460 {
4461 	struct mvpp2_softc *sc = port->sc;
4462 	struct mvpp2_prs_entry pe;
4463 	uint32_t pmap;
4464 	int index, tid;
4465 
4466 	for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END;
4467 	    tid++) {
4468 		uint8_t da[ETHER_ADDR_LEN], da_mask[ETHER_ADDR_LEN];
4469 
4470 		if (!sc->sc_prs_shadow[tid].valid ||
4471 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4472 		    (sc->sc_prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
4473 			continue;
4474 
4475 		mvpp2_prs_hw_read(sc, &pe, tid);
4476 		pmap = mvpp2_prs_tcam_port_map_get(&pe);
4477 
4478 		if (!(pmap & (1 << port->sc_id)))
4479 			continue;
4480 
4481 		for (index = 0; index < ETHER_ADDR_LEN; index++)
4482 			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
4483 			    &da_mask[index]);
4484 
4485 		if (ETHER_IS_BROADCAST(da) || ETHER_IS_EQ(da, port->sc_lladdr))
4486 			continue;
4487 
4488 		mvpp2_prs_mac_da_accept(port, da, 0);
4489 	}
4490 }
4491 
4492 int
4493 mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
4494 {
4495 	switch (type) {
4496 	case MVPP2_TAG_TYPE_EDSA:
4497 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4498 		    MVPP2_PRS_EDSA);
4499 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4500 		    MVPP2_PRS_EDSA);
4501 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4502 		    MVPP2_PRS_DSA);
4503 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4504 		    MVPP2_PRS_DSA);
4505 		break;
4506 	case MVPP2_TAG_TYPE_DSA:
4507 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4508 		    MVPP2_PRS_DSA);
4509 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4510 		    MVPP2_PRS_DSA);
4511 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4512 		    MVPP2_PRS_EDSA);
4513 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4514 		    MVPP2_PRS_EDSA);
4515 		break;
4516 	case MVPP2_TAG_TYPE_MH:
4517 	case MVPP2_TAG_TYPE_NONE:
4518 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4519 		    MVPP2_PRS_DSA);
4520 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4521 		    MVPP2_PRS_DSA);
4522 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4523 		    MVPP2_PRS_EDSA);
4524 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4525 		    MVPP2_PRS_EDSA);
4526 		break;
4527 	default:
4528 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4529 			return EINVAL;
4530 		break;
4531 	}
4532 
4533 	return 0;
4534 }
4535 
4536 int
4537 mvpp2_prs_def_flow(struct mvpp2_port *port)
4538 {
4539 	struct mvpp2_prs_entry pe;
4540 	int tid;
4541 
4542 	memset(&pe, 0, sizeof(pe));
4543 
4544 	tid = mvpp2_prs_flow_find(port->sc, port->sc_id);
4545 	if (tid < 0) {
4546 		tid = mvpp2_prs_tcam_first_free(port->sc,
4547 		    MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
4548 		if (tid < 0)
4549 			return tid;
4550 
4551 		pe.index = tid;
4552 		mvpp2_prs_sram_ai_update(&pe, port->sc_id,
4553 		    MVPP2_PRS_FLOW_ID_MASK);
4554 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4555 		mvpp2_prs_shadow_set(port->sc, pe.index, MVPP2_PRS_LU_FLOWS);
4556 	} else {
4557 		mvpp2_prs_hw_read(port->sc, &pe, tid);
4558 	}
4559 
4560 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4561 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->sc_id));
4562 	mvpp2_prs_hw_write(port->sc, &pe);
4563 	return 0;
4564 }
4565 
4566 void
4567 mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
4568 {
4569 	mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4570 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4571 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4572 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4573 }
4574 
4575 void
4576 mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
4577 {
4578 	uint32_t val;
4579 
4580 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4581 	mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
4582 	mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
4583 }
4584 
4585 void
4586 mvpp2_cls_init(struct mvpp2_softc *sc)
4587 {
4588 	struct mvpp2_cls_lookup_entry le;
4589 	struct mvpp2_cls_flow_entry fe;
4590 	int index;
4591 
4592 	mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4593 	memset(&fe.data, 0, sizeof(fe.data));
4594 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4595 		fe.index = index;
4596 		mvpp2_cls_flow_write(sc, &fe);
4597 	}
4598 	le.data = 0;
4599 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4600 		le.lkpid = index;
4601 		le.way = 0;
4602 		mvpp2_cls_lookup_write(sc, &le);
4603 		le.way = 1;
4604 		mvpp2_cls_lookup_write(sc, &le);
4605 	}
4606 }
4607 
4608 void
4609 mvpp2_cls_port_config(struct mvpp2_port *port)
4610 {
4611 	struct mvpp2_cls_lookup_entry le;
4612 	uint32_t val;
4613 
4614 	/* set way for the port */
4615 	val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
4616 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
4617 	mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
4618 
4619 	/*
4620 	 * pick the entry to be accessed in lookup ID decoding table
4621 	 * according to the way and lkpid.
4622 	 */
4623 	le.lkpid = port->sc_id;
4624 	le.way = 0;
4625 	le.data = 0;
4626 
4627 	/* set initial CPU queue for receiving packets */
4628 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4629 	le.data |= (port->sc_id * 32);
4630 
4631 	/* disable classification engines */
4632 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4633 
4634 	/* update lookup ID table entry */
4635 	mvpp2_cls_lookup_write(port->sc, &le);
4636 }
4637 
4638 void
4639 mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4640 {
4641 	uint32_t val;
4642 
4643 	mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
4644 	    (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4645 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_P2HQ_REG(port->sc_id),
4646 	    (port->sc_id * 32) >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS);
4647 	val = mvpp2_read(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG);
4648 	val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->sc_id);
4649 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4650 }
4651