xref: /openbsd-src/sys/dev/fdt/if_mvpp.c (revision d0fc3bb68efd6c434b4053cd7adb29023cbec341)
1 /*	$OpenBSD: if_mvpp.c,v 1.46 2021/06/03 21:42:23 patrick Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Copyright (C) 2016 Marvell International Ltd.
20  *
21  * Marvell BSD License Option
22  *
23  * If you received this File from Marvell, you may opt to use, redistribute
24  * and/or modify this File under the following licensing terms.
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions are met:
27  *
28  *   * Redistributions of source code must retain the above copyright notice,
29  *     this list of conditions and the following disclaimer.
30  *
31  *   * Redistributions in binary form must reproduce the above copyright
32  *     notice, this list of conditions and the following disclaimer in the
33  *     documentation and/or other materials provided with the distribution.
34  *
35  *   * Neither the name of Marvell nor the names of its contributors may be
36  *     used to endorse or promote products derived from this software without
37  *     specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
43  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
49  * POSSIBILITY OF SUCH DAMAGE.
50  */
51 
52 #include "bpfilter.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/device.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/queue.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/timeout.h>
64 
65 #include <uvm/uvm_extern.h>
66 
67 #include <machine/cpufunc.h>
68 #include <machine/bus.h>
69 #include <machine/fdt.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/ppp_defs.h>
74 
75 #include <dev/ofw/openfirm.h>
76 #include <dev/ofw/ofw_clock.h>
77 #include <dev/ofw/ofw_gpio.h>
78 #include <dev/ofw/ofw_misc.h>
79 #include <dev/ofw/ofw_pinctrl.h>
80 #include <dev/ofw/ofw_regulator.h>
81 #include <dev/ofw/fdt.h>
82 
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85 
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89 
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93 
94 #include <netinet6/in6_var.h>
95 #include <netinet/ip6.h>
96 
97 #include <dev/fdt/if_mvppreg.h>
98 
99 struct mvpp2_buf {
100 	bus_dmamap_t		mb_map;
101 	struct mbuf		*mb_m;
102 };
103 
104 #define MVPP2_NTXDESC	512
105 #define MVPP2_NTXSEGS	16
106 #define MVPP2_NRXDESC	512
107 
108 struct mvpp2_bm_pool {
109 	struct mvpp2_dmamem	*bm_mem;
110 	struct mvpp2_buf	*rxbuf;
111 	uint32_t		*freelist;
112 	int			free_prod;
113 	int			free_cons;
114 };
115 
116 #define MVPP2_BM_SIZE		64
117 #define MVPP2_BM_POOL_PTR_ALIGN	128
118 #define MVPP2_BM_POOLS_NUM	8
119 #define MVPP2_BM_ALIGN		32
120 
121 struct mvpp2_tx_queue {
122 	uint8_t			id;
123 	uint8_t			log_id;
124 	struct mvpp2_dmamem	*ring;
125 	struct mvpp2_buf	*buf;
126 	struct mvpp2_tx_desc	*descs;
127 	int			prod;
128 	int			cnt;
129 	int			cons;
130 
131 	uint32_t		done_pkts_coal;
132 };
133 
134 struct mvpp2_rx_queue {
135 	uint8_t			id;
136 	struct mvpp2_dmamem	*ring;
137 	struct mvpp2_rx_desc	*descs;
138 	int			prod;
139 	struct if_rxring	rxring;
140 	int			cons;
141 
142 	uint32_t		pkts_coal;
143 	uint32_t		time_coal;
144 };
145 
146 struct mvpp2_dmamem {
147 	bus_dmamap_t		mdm_map;
148 	bus_dma_segment_t	mdm_seg;
149 	size_t			mdm_size;
150 	caddr_t			mdm_kva;
151 };
152 #define MVPP2_DMA_MAP(_mdm)	((_mdm)->mdm_map)
153 #define MVPP2_DMA_LEN(_mdm)	((_mdm)->mdm_size)
154 #define MVPP2_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
155 #define MVPP2_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
156 
157 struct mvpp2_port;
158 struct mvpp2_softc {
159 	struct device		sc_dev;
160 	int			sc_node;
161 	bus_space_tag_t		sc_iot;
162 	bus_space_handle_t	sc_ioh_base;
163 	bus_space_handle_t	sc_ioh_iface;
164 	paddr_t			sc_ioh_paddr;
165 	bus_size_t		sc_iosize_base;
166 	bus_size_t		sc_iosize_iface;
167 	bus_dma_tag_t		sc_dmat;
168 	struct regmap		*sc_rm;
169 
170 	uint32_t		sc_tclk;
171 
172 	struct mvpp2_bm_pool	*sc_bm_pools;
173 	int			sc_npools;
174 
175 	struct mvpp2_prs_shadow	*sc_prs_shadow;
176 	uint8_t			*sc_prs_double_vlans;
177 
178 	int			sc_aggr_ntxq;
179 	struct mvpp2_tx_queue	*sc_aggr_txqs;
180 
181 	struct mvpp2_port	**sc_ports;
182 };
183 
184 struct mvpp2_port {
185 	struct device		sc_dev;
186 	struct mvpp2_softc	*sc;
187 	int			sc_node;
188 	bus_dma_tag_t		sc_dmat;
189 	int			sc_id;
190 	int			sc_gop_id;
191 
192 	struct arpcom		sc_ac;
193 #define sc_lladdr	sc_ac.ac_enaddr
194 	struct mii_data		sc_mii;
195 #define sc_media	sc_mii.mii_media
196 	struct mii_bus		*sc_mdio;
197 
198 	enum {
199 		PHY_MODE_XAUI,
200 		PHY_MODE_10GBASER,
201 		PHY_MODE_2500BASEX,
202 		PHY_MODE_1000BASEX,
203 		PHY_MODE_SGMII,
204 		PHY_MODE_RGMII,
205 		PHY_MODE_RGMII_ID,
206 		PHY_MODE_RGMII_RXID,
207 		PHY_MODE_RGMII_TXID,
208 	}			sc_phy_mode;
209 	int			sc_fixed_link;
210 	int			sc_inband_status;
211 	int			sc_link;
212 	int			sc_phyloc;
213 	int			sc_sfp;
214 
215 	int			sc_ntxq;
216 	int			sc_nrxq;
217 
218 	struct mvpp2_tx_queue	*sc_txqs;
219 	struct mvpp2_rx_queue	*sc_rxqs;
220 
221 	struct timeout		sc_tick;
222 
223 	uint32_t		sc_tx_time_coal;
224 };
225 
226 #define MVPP2_MAX_PORTS		4
227 
228 struct mvpp2_attach_args {
229 	int			ma_node;
230 	bus_dma_tag_t		ma_dmat;
231 };
232 
233 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
234 
235 static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
236 
237 int	mvpp2_match(struct device *, void *, void *);
238 void	mvpp2_attach(struct device *, struct device *, void *);
239 void	mvpp2_attach_deferred(struct device *);
240 
241 struct cfattach mvppc_ca = {
242 	sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
243 };
244 
245 struct cfdriver mvppc_cd = {
246 	NULL, "mvppc", DV_DULL
247 };
248 
249 int	mvpp2_port_match(struct device *, void *, void *);
250 void	mvpp2_port_attach(struct device *, struct device *, void *);
251 
252 struct cfattach mvpp_ca = {
253 	sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
254 };
255 
256 struct cfdriver mvpp_cd = {
257 	NULL, "mvpp", DV_IFNET
258 };
259 
260 void	mvpp2_port_attach_sfp(struct device *);
261 
262 uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
263 void	mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
264 uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
265 void	mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
266 uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
267 void	mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
268 uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
269 void	mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
270 uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
271 void	mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
272 
273 int	mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
274 void	mvpp2_start(struct ifnet *);
275 int	mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
276 void	mvpp2_watchdog(struct ifnet *);
277 
278 int	mvpp2_media_change(struct ifnet *);
279 void	mvpp2_media_status(struct ifnet *, struct ifmediareq *);
280 
281 int	mvpp2_mii_readreg(struct device *, int, int);
282 void	mvpp2_mii_writereg(struct device *, int, int, int);
283 void	mvpp2_mii_statchg(struct device *);
284 void	mvpp2_inband_statchg(struct mvpp2_port *);
285 void	mvpp2_port_change(struct mvpp2_port *);
286 
287 void	mvpp2_tick(void *);
288 void	mvpp2_rxtick(void *);
289 
290 int	mvpp2_link_intr(void *);
291 int	mvpp2_intr(void *);
292 void	mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
293 void	mvpp2_txq_proc(struct mvpp2_port *, struct mvpp2_tx_queue *);
294 void	mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
295 void	mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
296 void	mvpp2_rx_refill(struct mvpp2_port *);
297 
298 void	mvpp2_up(struct mvpp2_port *);
299 void	mvpp2_down(struct mvpp2_port *);
300 void	mvpp2_iff(struct mvpp2_port *);
301 int	mvpp2_encap(struct mvpp2_port *, struct mbuf *, int *);
302 
303 void	mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
304 void	mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
305 void	mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
306 void	mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
307 void	mvpp2_rxq_hw_drop(struct mvpp2_port *, struct mvpp2_rx_queue *);
308 void	mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
309 void	mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
310 void	mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
311 
312 void	mvpp2_mac_reset_assert(struct mvpp2_port *);
313 void	mvpp2_pcs_reset_assert(struct mvpp2_port *);
314 void	mvpp2_pcs_reset_deassert(struct mvpp2_port *);
315 void	mvpp2_mac_config(struct mvpp2_port *);
316 void	mvpp2_xlg_config(struct mvpp2_port *);
317 void	mvpp2_gmac_config(struct mvpp2_port *);
318 void	mvpp2_comphy_config(struct mvpp2_port *, int);
319 void	mvpp2_gop_config(struct mvpp2_port *);
320 void	mvpp2_gop_intr_mask(struct mvpp2_port *);
321 void	mvpp2_gop_intr_unmask(struct mvpp2_port *);
322 
323 struct mvpp2_dmamem *
324 	mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
325 void	mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
326 struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
327 void	mvpp2_fill_rx_ring(struct mvpp2_softc *);
328 
329 void	mvpp2_interrupts_enable(struct mvpp2_port *, int);
330 void	mvpp2_interrupts_disable(struct mvpp2_port *, int);
331 int	mvpp2_egress_port(struct mvpp2_port *);
332 int	mvpp2_txq_phys(int, int);
333 void	mvpp2_defaults_set(struct mvpp2_port *);
334 void	mvpp2_ingress_enable(struct mvpp2_port *);
335 void	mvpp2_ingress_disable(struct mvpp2_port *);
336 void	mvpp2_egress_enable(struct mvpp2_port *);
337 void	mvpp2_egress_disable(struct mvpp2_port *);
338 void	mvpp2_port_enable(struct mvpp2_port *);
339 void	mvpp2_port_disable(struct mvpp2_port *);
340 void	mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
341 int	mvpp2_rxq_received(struct mvpp2_port *, int);
342 void	mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
343 void	mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
344 void	mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
345 	    uint32_t);
346 void	mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
347 	    uint32_t);
348 void	mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
349 	    uint32_t);
350 void	mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
351 
352 void	mvpp2_axi_config(struct mvpp2_softc *);
353 void	mvpp2_bm_pool_init(struct mvpp2_softc *);
354 void	mvpp2_rx_fifo_init(struct mvpp2_softc *);
355 void	mvpp2_tx_fifo_init(struct mvpp2_softc *);
356 int	mvpp2_prs_default_init(struct mvpp2_softc *);
357 void	mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
358 void	mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
359 void	mvpp2_prs_def_flow_init(struct mvpp2_softc *);
360 void	mvpp2_prs_mh_init(struct mvpp2_softc *);
361 void	mvpp2_prs_mac_init(struct mvpp2_softc *);
362 void	mvpp2_prs_dsa_init(struct mvpp2_softc *);
363 int	mvpp2_prs_etype_init(struct mvpp2_softc *);
364 int	mvpp2_prs_vlan_init(struct mvpp2_softc *);
365 int	mvpp2_prs_pppoe_init(struct mvpp2_softc *);
366 int	mvpp2_prs_ip6_init(struct mvpp2_softc *);
367 int	mvpp2_prs_ip4_init(struct mvpp2_softc *);
368 void	mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
369 	    uint32_t, uint32_t);
370 void	mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
371 void	mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
372 void	mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
373 uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
374 void	mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
375 	    uint8_t, uint8_t);
376 void	mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
377 	    uint8_t *, uint8_t *);
378 int	mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
379 void	mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
380 int	mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
381 int	mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
382 void	mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
383 	    uint32_t *, uint32_t *);
384 void	mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
385 int	mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
386 void	mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
387 void	mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
388 void	mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
389 void	mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
390 void	mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
391 void	mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
392 	    uint32_t);
393 void	mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
394 void	mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
395 int	mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
396 int	mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *, int);
397 int	mvpp2_prs_flow_find(struct mvpp2_softc *, int);
398 int	mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
399 void	mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
400 void	mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int, int);
401 void	mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
402 void	mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
403 	    int, int, int);
404 struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
405 	    int);
406 int	mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
407 int	mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
408 struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
409 	    uint16_t, uint16_t);
410 int	mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
411 	    uint32_t);
412 int	mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
413 int	mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
414 int	mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
415 int	mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
416 int	mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int, const uint8_t *,
417 	    uint8_t *, int);
418 int	mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
419 	    uint8_t *);
420 int	mvpp2_prs_mac_da_accept(struct mvpp2_port *, const uint8_t *, int);
421 void	mvpp2_prs_mac_del_all(struct mvpp2_port *);
422 int	mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
423 int	mvpp2_prs_def_flow(struct mvpp2_port *);
424 void	mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
425 void	mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
426 void	mvpp2_cls_init(struct mvpp2_softc *);
427 void	mvpp2_cls_port_config(struct mvpp2_port *);
428 void	mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
429 
430 int
431 mvpp2_match(struct device *parent, void *cfdata, void *aux)
432 {
433 	struct fdt_attach_args *faa = aux;
434 
435 	return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
436 }
437 
438 void
439 mvpp2_attach(struct device *parent, struct device *self, void *aux)
440 {
441 	struct mvpp2_softc *sc = (void *)self;
442 	struct fdt_attach_args *faa = aux;
443 
444 	if (faa->fa_nreg < 2) {
445 		printf(": no registers\n");
446 		return;
447 	}
448 
449 	sc->sc_node = faa->fa_node;
450 	sc->sc_iot = faa->fa_iot;
451 	sc->sc_dmat = faa->fa_dmat;
452 
453 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
454 	    faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
455 		printf(": can't map registers\n");
456 		return;
457 	}
458 	sc->sc_iosize_base = faa->fa_reg[0].size;
459 
460 	sc->sc_ioh_paddr = bus_space_mmap(sc->sc_iot, faa->fa_reg[0].addr,
461 	    0, PROT_READ | PROT_WRITE, 0);
462 	KASSERT(sc->sc_ioh_paddr != -1);
463 	sc->sc_ioh_paddr &= PMAP_PA_MASK;
464 
465 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
466 	    faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
467 		printf(": can't map registers\n");
468 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
469 		    sc->sc_iosize_base);
470 		return;
471 	}
472 	sc->sc_iosize_iface = faa->fa_reg[1].size;
473 
474 	sc->sc_rm = regmap_byphandle(OF_getpropint(faa->fa_node,
475 	    "marvell,system-controller", 0));
476 
477 	clock_enable_all(faa->fa_node);
478 	sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
479 
480 	printf("\n");
481 
482 	config_defer(self, mvpp2_attach_deferred);
483 }
484 
485 void
486 mvpp2_attach_deferred(struct device *self)
487 {
488 	struct mvpp2_softc *sc = (void *)self;
489 	struct mvpp2_attach_args maa;
490 	struct mvpp2_tx_queue *txq;
491 	int i, node;
492 
493 	mvpp2_axi_config(sc);
494 
495 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_iface, MVPP22_SMI_MISC_CFG_REG,
496 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh_iface,
497 	    MVPP22_SMI_MISC_CFG_REG) & ~MVPP22_SMI_POLLING_EN);
498 
499 	sc->sc_aggr_ntxq = 1;
500 	sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
501 	    sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
502 
503 	for (i = 0; i < sc->sc_aggr_ntxq; i++) {
504 		txq = &sc->sc_aggr_txqs[i];
505 		txq->id = i;
506 		mvpp2_aggr_txq_hw_init(sc, txq);
507 	}
508 
509 	mvpp2_rx_fifo_init(sc);
510 	mvpp2_tx_fifo_init(sc);
511 
512 	mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
513 
514 	mvpp2_bm_pool_init(sc);
515 
516 	sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
517 	    sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
518 
519 	mvpp2_prs_default_init(sc);
520 	mvpp2_cls_init(sc);
521 
522 	memset(&maa, 0, sizeof(maa));
523 	for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
524 		maa.ma_node = node;
525 		maa.ma_dmat = sc->sc_dmat;
526 		config_found(self, &maa, NULL);
527 	}
528 }
529 
530 void
531 mvpp2_axi_config(struct mvpp2_softc *sc)
532 {
533 	uint32_t reg;
534 
535 	mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG, 0);
536 
537 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
538 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
539 	mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
540 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
541 	mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
542 	mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
543 
544 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
545 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
546 	mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
547 	mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
548 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
549 	mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
550 
551 	reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
552 	    (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
553 	mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
554 	mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
555 
556 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
557 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
558 	mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
559 
560 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
561 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
562 	mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
563 }
564 
565 void
566 mvpp2_bm_pool_init(struct mvpp2_softc *sc)
567 {
568 	struct mvpp2_bm_pool *bm;
569 	struct mvpp2_buf *rxb;
570 	uint64_t phys, virt;
571 	int i, j, inuse;
572 
573 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
574 		mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
575 		mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
576 	}
577 
578 	sc->sc_npools = ncpus;
579 	sc->sc_npools = min(sc->sc_npools, MVPP2_BM_POOLS_NUM);
580 
581 	sc->sc_bm_pools = mallocarray(sc->sc_npools, sizeof(*sc->sc_bm_pools),
582 	    M_DEVBUF, M_WAITOK | M_ZERO);
583 
584 	for (i = 0; i < sc->sc_npools; i++) {
585 		bm = &sc->sc_bm_pools[i];
586 		bm->bm_mem = mvpp2_dmamem_alloc(sc,
587 		    MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
588 		    MVPP2_BM_POOL_PTR_ALIGN);
589 		KASSERT(bm->bm_mem != NULL);
590 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
591 		    MVPP2_DMA_LEN(bm->bm_mem),
592 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
593 
594 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
595 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
596 		    MVPP2_BM_STOP_MASK);
597 
598 		mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
599 		    (uint64_t)MVPP2_DMA_DVA(bm->bm_mem) & 0xffffffff);
600 		mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
601 		    ((uint64_t)MVPP2_DMA_DVA(bm->bm_mem) >> 32)
602 		    & MVPP22_BM_POOL_BASE_HIGH_MASK);
603 		mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
604 		    MVPP2_BM_SIZE);
605 
606 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
607 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
608 		    MVPP2_BM_START_MASK);
609 
610 		/*
611 		 * U-Boot might not have cleaned its pools.  The pool needs
612 		 * to be empty before we fill it, otherwise our packets are
613 		 * written to wherever U-Boot allocated memory.  Cleaning it
614 		 * up ourselves is worrying as well, since the BM's pages are
615 		 * probably in our own memory.  Best we can do is stop the BM,
616 		 * set new memory and drain the pool.
617 		 */
618 		inuse = mvpp2_read(sc, MVPP2_BM_POOL_PTRS_NUM_REG(i)) &
619 		    MVPP2_BM_POOL_PTRS_NUM_MASK;
620 		inuse += mvpp2_read(sc, MVPP2_BM_BPPI_PTRS_NUM_REG(i)) &
621 		    MVPP2_BM_BPPI_PTRS_NUM_MASK;
622 		if (inuse)
623 			inuse++;
624 		for (j = 0; j < inuse; j++)
625 			mvpp2_read(sc, MVPP2_BM_PHY_ALLOC_REG(i));
626 
627 		mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
628 		    roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
629 
630 		bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
631 		    M_DEVBUF, M_WAITOK);
632 		bm->freelist = mallocarray(MVPP2_BM_SIZE, sizeof(*bm->freelist),
633 		    M_DEVBUF, M_WAITOK | M_ZERO);
634 
635 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
636 			rxb = &bm->rxbuf[j];
637 			bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
638 			    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
639 			rxb->mb_m = NULL;
640 		}
641 
642 		/* Use pool-id and rxbuf index as cookie. */
643 		for (j = 0; j < MVPP2_BM_SIZE; j++)
644 			bm->freelist[j] = (i << 16) | (j << 0);
645 
646 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
647 			rxb = &bm->rxbuf[j];
648 			rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
649 			if (rxb->mb_m == NULL)
650 				break;
651 
652 			KASSERT(bm->freelist[bm->free_cons] != -1);
653 			virt = bm->freelist[bm->free_cons];
654 			bm->freelist[bm->free_cons] = -1;
655 			bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
656 
657 			phys = rxb->mb_map->dm_segs[0].ds_addr;
658 			mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
659 			    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
660 			    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
661 			    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
662 			mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
663 			    virt & 0xffffffff);
664 			mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
665 			    phys & 0xffffffff);
666 		}
667 	}
668 }
669 
670 void
671 mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
672 {
673 	int i;
674 
675 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
676 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
677 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
678 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
679 
680 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
681 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
682 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
683 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
684 
685 	for (i = 2; i < MVPP2_MAX_PORTS; i++) {
686 		mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
687 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
688 		mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
689 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
690 	}
691 
692 	mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
693 	mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
694 }
695 
696 void
697 mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
698 {
699 	int i;
700 
701 	mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
702 	    MVPP22_TX_FIFO_DATA_SIZE_10KB);
703 	mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
704 	    MVPP2_TX_FIFO_THRESHOLD_10KB);
705 
706 	for (i = 1; i < MVPP2_MAX_PORTS; i++) {
707 		mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
708 		    MVPP22_TX_FIFO_DATA_SIZE_3KB);
709 		mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
710 		    MVPP2_TX_FIFO_THRESHOLD_3KB);
711 	}
712 }
713 
714 int
715 mvpp2_prs_default_init(struct mvpp2_softc *sc)
716 {
717 	int i, j, ret;
718 
719 	mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
720 
721 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
722 		mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
723 		for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
724 			mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
725 
726 		mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
727 		for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
728 			mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
729 	}
730 
731 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
732 		mvpp2_prs_hw_inv(sc, i);
733 
734 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
735 		mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
736 		    MVPP2_PRS_PORT_LU_MAX, 0);
737 
738 	mvpp2_prs_def_flow_init(sc);
739 	mvpp2_prs_mh_init(sc);
740 	mvpp2_prs_mac_init(sc);
741 	mvpp2_prs_dsa_init(sc);
742 	ret = mvpp2_prs_etype_init(sc);
743 	if (ret)
744 		return ret;
745 	ret = mvpp2_prs_vlan_init(sc);
746 	if (ret)
747 		return ret;
748 	ret = mvpp2_prs_pppoe_init(sc);
749 	if (ret)
750 		return ret;
751 	ret = mvpp2_prs_ip6_init(sc);
752 	if (ret)
753 		return ret;
754 	ret = mvpp2_prs_ip4_init(sc);
755 	if (ret)
756 		return ret;
757 
758 	return 0;
759 }
760 
761 void
762 mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
763 {
764 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
765 	mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
766 	    MVPP2_PRS_TCAM_INV_MASK);
767 }
768 
769 void
770 mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
771     int lu_first, int lu_max, int offset)
772 {
773 	uint32_t reg;
774 
775 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
776 	reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
777 	reg |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
778 	mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
779 
780 	reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
781 	reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
782 	reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
783 	mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
784 
785 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
786 	reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
787 	reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
788 	mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
789 }
790 
791 void
792 mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
793 {
794 	struct mvpp2_prs_entry pe;
795 	int i;
796 
797 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
798 		memset(&pe, 0, sizeof(pe));
799 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
800 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
801 		mvpp2_prs_tcam_port_map_set(&pe, 0);
802 		mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
803 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
804 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
805 		mvpp2_prs_hw_write(sc, &pe);
806 	}
807 }
808 
809 void
810 mvpp2_prs_mh_init(struct mvpp2_softc *sc)
811 {
812 	struct mvpp2_prs_entry pe;
813 
814 	memset(&pe, 0, sizeof(pe));
815 	pe.index = MVPP2_PE_MH_DEFAULT;
816 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
817 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
818 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
819 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
820 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
821 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
822 	mvpp2_prs_hw_write(sc, &pe);
823 }
824 
825 void
826 mvpp2_prs_mac_init(struct mvpp2_softc *sc)
827 {
828 	struct mvpp2_prs_entry pe;
829 
830 	memset(&pe, 0, sizeof(pe));
831 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
832 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
833 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
834 	    MVPP2_PRS_RI_DROP_MASK);
835 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
836 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
837 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
838 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
839 	mvpp2_prs_hw_write(sc, &pe);
840 	mvpp2_prs_mac_drop_all_set(sc, 0, 0);
841 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_UNI_CAST, 0);
842 	mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_MULTI_CAST, 0);
843 }
844 
845 void
846 mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
847 {
848 	struct mvpp2_prs_entry pe;
849 
850 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
851 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
852 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
853 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
854 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
855 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
856 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
857 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
858 	memset(&pe, 0, sizeof(pe));
859 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
860 	pe.index = MVPP2_PE_DSA_DEFAULT;
861 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
862 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
863 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
864 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
865 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
866 	mvpp2_prs_hw_write(sc, &pe);
867 }
868 
869 int
870 mvpp2_prs_etype_init(struct mvpp2_softc *sc)
871 {
872 	struct mvpp2_prs_entry pe;
873 	int tid;
874 
875 	/* Ethertype: PPPoE */
876 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
877 	    MVPP2_PE_LAST_FREE_TID);
878 	if (tid < 0)
879 		return tid;
880 	memset(&pe, 0, sizeof(pe));
881 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
882 	pe.index = tid;
883 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_PPPOE);
884 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
885 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
886 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
887 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
888 	    MVPP2_PRS_RI_PPPOE_MASK);
889 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
890 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
891 	sc->sc_prs_shadow[pe.index].finish = 0;
892 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
893 	    MVPP2_PRS_RI_PPPOE_MASK);
894 	mvpp2_prs_hw_write(sc, &pe);
895 
896 	/* Ethertype: ARP */
897 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
898 	    MVPP2_PE_LAST_FREE_TID);
899 	if (tid < 0)
900 		return tid;
901 	memset(&pe, 0, sizeof(pe));
902 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
903 	pe.index = tid;
904 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_ARP);
905 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
906 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
907 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
908 	    MVPP2_PRS_RI_L3_PROTO_MASK);
909 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
910 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
911 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
912 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
913 	sc->sc_prs_shadow[pe.index].finish = 1;
914 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
915 	    MVPP2_PRS_RI_L3_PROTO_MASK);
916 	mvpp2_prs_hw_write(sc, &pe);
917 
918 	/* Ethertype: LBTD */
919 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
920 	    MVPP2_PE_LAST_FREE_TID);
921 	if (tid < 0)
922 		return tid;
923 	memset(&pe, 0, sizeof(pe));
924 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
925 	pe.index = tid;
926 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
927 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
928 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
929 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
930 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
931 	    MVPP2_PRS_RI_UDF3_MASK);
932 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
933 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
934 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
935 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
936 	sc->sc_prs_shadow[pe.index].finish = 1;
937 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
938 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
939 	    MVPP2_PRS_RI_UDF3_MASK);
940 	mvpp2_prs_hw_write(sc, &pe);
941 
942 	/* Ethertype: IPv4 without options */
943 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
944 	    MVPP2_PE_LAST_FREE_TID);
945 	if (tid < 0)
946 		return tid;
947 	memset(&pe, 0, sizeof(pe));
948 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
949 	pe.index = tid;
950 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IP);
951 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
952 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
953 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
954 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
955 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
956 	    MVPP2_PRS_RI_L3_PROTO_MASK);
957 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
958 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
959 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
960 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
961 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
962 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
963 	sc->sc_prs_shadow[pe.index].finish = 0;
964 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
965 	    MVPP2_PRS_RI_L3_PROTO_MASK);
966 	mvpp2_prs_hw_write(sc, &pe);
967 
968 	/* Ethertype: IPv4 with options */
969 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
970 	    MVPP2_PE_LAST_FREE_TID);
971 	if (tid < 0)
972 		return tid;
973 	pe.index = tid;
974 
975 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
976 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
977 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
978 	    MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
979 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
980 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
981 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
982 	    MVPP2_PRS_RI_L3_PROTO_MASK);
983 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
984 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
985 	sc->sc_prs_shadow[pe.index].finish = 0;
986 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
987 	    MVPP2_PRS_RI_L3_PROTO_MASK);
988 	mvpp2_prs_hw_write(sc, &pe);
989 
990 	/* Ethertype: IPv6 without options */
991 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
992 	    MVPP2_PE_LAST_FREE_TID);
993 	if (tid < 0)
994 		return tid;
995 	memset(&pe, 0, sizeof(pe));
996 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
997 	pe.index = tid;
998 	mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IPV6);
999 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1000 	    MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1001 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1002 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1003 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1004 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1005 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1006 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1007 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1008 	sc->sc_prs_shadow[pe.index].finish = 0;
1009 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
1010 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1011 	mvpp2_prs_hw_write(sc, &pe);
1012 
1013 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1014 	memset(&pe, 0, sizeof(pe));
1015 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1016 	pe.index = MVPP2_PE_ETH_TYPE_UN;
1017 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1018 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1019 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1020 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1021 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1022 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1023 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1024 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1025 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1026 	sc->sc_prs_shadow[pe.index].finish = 1;
1027 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
1028 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1029 	mvpp2_prs_hw_write(sc, &pe);
1030 
1031 	return 0;
1032 }
1033 
1034 int
1035 mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
1036 {
1037 	struct mvpp2_prs_entry pe;
1038 	int ret;
1039 
1040 	sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
1041 	    sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
1042 
1043 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_QINQ,
1044 	    MVPP2_PRS_PORT_MASK);
1045 	if (ret)
1046 		return ret;
1047 	ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_VLAN,
1048 	    MVPP2_PRS_PORT_MASK);
1049 	if (ret)
1050 		return ret;
1051 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_QINQ, MVPP2_PRS_SINGLE_VLAN_AI,
1052 	    MVPP2_PRS_PORT_MASK);
1053 	if (ret)
1054 		return ret;
1055 	ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_VLAN, MVPP2_PRS_SINGLE_VLAN_AI,
1056 	    MVPP2_PRS_PORT_MASK);
1057 	if (ret)
1058 		return ret;
1059 
1060 	memset(&pe, 0, sizeof(pe));
1061 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1062 	pe.index = MVPP2_PE_VLAN_DBL;
1063 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1064 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1065 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1066 	    MVPP2_PRS_RI_VLAN_MASK);
1067 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1068 	    MVPP2_PRS_DBL_VLAN_AI_BIT);
1069 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1070 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1071 	mvpp2_prs_hw_write(sc, &pe);
1072 
1073 	memset(&pe, 0, sizeof(pe));
1074 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1075 	pe.index = MVPP2_PE_VLAN_NONE;
1076 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1077 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1078 	    MVPP2_PRS_RI_VLAN_MASK);
1079 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1080 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1081 	mvpp2_prs_hw_write(sc, &pe);
1082 
1083 	return 0;
1084 }
1085 
1086 int
1087 mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
1088 {
1089 	struct mvpp2_prs_entry pe;
1090 	int tid;
1091 
1092 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1093 	    MVPP2_PE_LAST_FREE_TID);
1094 	if (tid < 0)
1095 		return tid;
1096 
1097 	memset(&pe, 0, sizeof(pe));
1098 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1099 	pe.index = tid;
1100 	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1101 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1102 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1103 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1104 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1105 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1106 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1107 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1108 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1109 	mvpp2_prs_hw_write(sc, &pe);
1110 
1111 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1112 	    MVPP2_PE_LAST_FREE_TID);
1113 	if (tid < 0)
1114 		return tid;
1115 
1116 	pe.index = tid;
1117 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1118 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1119 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
1120 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1121 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1122 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
1123 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1124 	mvpp2_prs_hw_write(sc, &pe);
1125 
1126 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1127 	    MVPP2_PE_LAST_FREE_TID);
1128 	if (tid < 0)
1129 		return tid;
1130 
1131 	memset(&pe, 0, sizeof(pe));
1132 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1133 	pe.index = tid;
1134 	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1135 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1136 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1137 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1138 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1139 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1140 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1141 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1142 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1143 	mvpp2_prs_hw_write(sc, &pe);
1144 
1145 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1146 	    MVPP2_PE_LAST_FREE_TID);
1147 	if (tid < 0)
1148 		return tid;
1149 
1150 	memset(&pe, 0, sizeof(pe));
1151 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1152 	pe.index = tid;
1153 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1154 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1155 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1156 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1157 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1158 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1159 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1160 	mvpp2_prs_hw_write(sc, &pe);
1161 
1162 	return 0;
1163 }
1164 
1165 int
1166 mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
1167 {
1168 	struct mvpp2_prs_entry pe;
1169 	int tid, ret;
1170 
1171 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1172 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1173 	if (ret)
1174 		return ret;
1175 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1176 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1177 	if (ret)
1178 		return ret;
1179 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_ICMPV6,
1180 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1181 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1182 	if (ret)
1183 		return ret;
1184 	ret = mvpp2_prs_ip6_proto(sc, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
1185 	    MVPP2_PRS_RI_UDF7_MASK);
1186 	if (ret)
1187 		return ret;
1188 	ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1189 	if (ret)
1190 		return ret;
1191 
1192 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1193 	    MVPP2_PE_LAST_FREE_TID);
1194 	if (tid < 0)
1195 		return tid;
1196 
1197 	memset(&pe, 0, sizeof(pe));
1198 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1199 	pe.index = tid;
1200 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1201 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1202 	mvpp2_prs_sram_ri_update(&pe,
1203 	    MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
1204 	    MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
1205 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1206 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1207 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1208 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1209 	mvpp2_prs_hw_write(sc, &pe);
1210 
1211 	memset(&pe, 0, sizeof(pe));
1212 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1213 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1214 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1215 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1216 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1217 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1218 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1219 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1220 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1221 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1222 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1223 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1224 	mvpp2_prs_hw_write(sc, &pe);
1225 
1226 	memset(&pe, 0, sizeof(pe));
1227 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1228 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1229 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1230 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1231 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1232 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1233 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1234 	    MVPP2_PRS_IPV6_EXT_AI_BIT);
1235 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1236 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1237 	mvpp2_prs_hw_write(sc, &pe);
1238 
1239 	memset(&pe, 0, sizeof(pe));
1240 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1241 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1242 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1243 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1244 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1245 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1246 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1247 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1248 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1249 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1250 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
1251 	mvpp2_prs_hw_write(sc, &pe);
1252 
1253 	return 0;
1254 }
1255 
1256 int
1257 mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
1258 {
1259 	struct mvpp2_prs_entry pe;
1260 	int ret;
1261 
1262 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1263 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1264 	if (ret)
1265 		return ret;
1266 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1267 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1268 	if (ret)
1269 		return ret;
1270 	ret = mvpp2_prs_ip4_proto(sc, IPPROTO_IGMP,
1271 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1272 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1273 	if (ret)
1274 		return ret;
1275 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
1276 	if (ret)
1277 		return ret;
1278 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1279 	if (ret)
1280 		return ret;
1281 
1282 	memset(&pe, 0, sizeof(pe));
1283 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1284 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1285 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1286 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1287 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1288 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1289 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1290 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1291 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1292 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1293 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1294 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1295 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1296 	mvpp2_prs_hw_write(sc, &pe);
1297 
1298 	memset(&pe, 0, sizeof(pe));
1299 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1300 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1301 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1302 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1303 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1304 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1305 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1306 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1307 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1308 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1309 	mvpp2_prs_hw_write(sc, &pe);
1310 
1311 	return 0;
1312 }
1313 
1314 int
1315 mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
1316 {
1317 	struct mvpp2_attach_args *maa = aux;
1318 	char buf[32];
1319 
1320 	if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
1321 	    strcmp(buf, "disabled") == 0)
1322 		return 0;
1323 
1324 	return 1;
1325 }
1326 
1327 void
1328 mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
1329 {
1330 	struct mvpp2_port *sc = (void *)self;
1331 	struct mvpp2_attach_args *maa = aux;
1332 	struct mvpp2_tx_queue *txq;
1333 	struct mvpp2_rx_queue *rxq;
1334 	struct ifnet *ifp;
1335 	uint32_t phy, reg;
1336 	int i, idx, len, node;
1337 	int mii_flags = 0;
1338 	char *phy_mode;
1339 	char *managed;
1340 
1341 	sc->sc = (void *)parent;
1342 	sc->sc_node = maa->ma_node;
1343 	sc->sc_dmat = maa->ma_dmat;
1344 
1345 	sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
1346 	sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
1347 	sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
1348 
1349 	len = OF_getproplen(sc->sc_node, "phy-mode");
1350 	if (len <= 0) {
1351 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
1352 		return;
1353 	}
1354 
1355 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
1356 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
1357 	if (!strncmp(phy_mode, "10gbase-r", strlen("10gbase-r")))
1358 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1359 	else if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
1360 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1361 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
1362 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1363 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
1364 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1365 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
1366 		sc->sc_phy_mode = PHY_MODE_SGMII;
1367 	else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
1368 		sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
1369 	else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
1370 		sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
1371 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
1372 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
1373 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
1374 		sc->sc_phy_mode = PHY_MODE_RGMII;
1375 	else {
1376 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
1377 		    phy_mode);
1378 		return;
1379 	}
1380 	free(phy_mode, M_TEMP, len);
1381 
1382 	/* Lookup PHY. */
1383 	phy = OF_getpropint(sc->sc_node, "phy", 0);
1384 	if (phy) {
1385 		node = OF_getnodebyphandle(phy);
1386 		if (!node) {
1387 			printf(": no phy\n");
1388 			return;
1389 		}
1390 		sc->sc_mdio = mii_byphandle(phy);
1391 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
1392 		sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
1393 	}
1394 
1395 	if (sc->sc_sfp)
1396 		config_mountroot(self, mvpp2_port_attach_sfp);
1397 
1398 	if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
1399 		managed = malloc(len, M_TEMP, M_WAITOK);
1400 		OF_getprop(sc->sc_node, "managed", managed, len);
1401 		if (!strncmp(managed, "in-band-status",
1402 		    strlen("in-band-status")))
1403 			sc->sc_inband_status = 1;
1404 		free(managed, M_TEMP, len);
1405 	}
1406 
1407 	if (OF_getprop(sc->sc_node, "local-mac-address",
1408 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
1409 		memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
1410 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
1411 
1412 	sc->sc_ntxq = sc->sc_nrxq = 1;
1413 	sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
1414 	    M_DEVBUF, M_WAITOK | M_ZERO);
1415 	sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
1416 	    M_DEVBUF, M_WAITOK | M_ZERO);
1417 
1418 	for (i = 0; i < sc->sc_ntxq; i++) {
1419 		txq = &sc->sc_txqs[i];
1420 		txq->id = mvpp2_txq_phys(sc->sc_id, i);
1421 		txq->log_id = i;
1422 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
1423 	}
1424 
1425 	sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
1426 
1427 	for (i = 0; i < sc->sc_nrxq; i++) {
1428 		rxq = &sc->sc_rxqs[i];
1429 		rxq->id = sc->sc_id * 32 + i;
1430 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
1431 		rxq->time_coal = MVPP2_RX_COAL_USEC;
1432 	}
1433 
1434 	mvpp2_egress_disable(sc);
1435 	mvpp2_port_disable(sc);
1436 
1437 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
1438 	    sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
1439 	    0 /* queue vector id */);
1440 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
1441 	    sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
1442 	    0 /* first rxq */);
1443 
1444 	mvpp2_ingress_disable(sc);
1445 	mvpp2_defaults_set(sc);
1446 
1447 	mvpp2_cls_oversize_rxq_set(sc);
1448 	mvpp2_cls_port_config(sc);
1449 
1450 	/*
1451 	 * We have one pool per core, so all RX queues on a specific
1452 	 * core share that pool.  Also long and short uses the same
1453 	 * pool.
1454 	 */
1455 	for (i = 0; i < sc->sc_nrxq; i++) {
1456 		mvpp2_rxq_long_pool_set(sc, i, i);
1457 		mvpp2_rxq_short_pool_set(sc, i, i);
1458 	}
1459 
1460 	mvpp2_mac_reset_assert(sc);
1461 	mvpp2_pcs_reset_assert(sc);
1462 
1463 	timeout_set(&sc->sc_tick, mvpp2_tick, sc);
1464 
1465 	ifp = &sc->sc_ac.ac_if;
1466 	ifp->if_softc = sc;
1467 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1468 	ifp->if_ioctl = mvpp2_ioctl;
1469 	ifp->if_start = mvpp2_start;
1470 	ifp->if_watchdog = mvpp2_watchdog;
1471 	ifq_set_maxlen(&ifp->if_snd, MVPP2_NTXDESC - 1);
1472 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1473 
1474 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1475 
1476 	sc->sc_mii.mii_ifp = ifp;
1477 	sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
1478 	sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
1479 	sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
1480 
1481 	ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
1482 
1483 	if (sc->sc_mdio) {
1484 		switch (sc->sc_phy_mode) {
1485 		case PHY_MODE_1000BASEX:
1486 			mii_flags |= MIIF_IS_1000X;
1487 			break;
1488 		case PHY_MODE_SGMII:
1489 			mii_flags |= MIIF_SGMII;
1490 			break;
1491 		case PHY_MODE_RGMII_ID:
1492 			mii_flags |= MIIF_RXID | MIIF_TXID;
1493 			break;
1494 		case PHY_MODE_RGMII_RXID:
1495 			mii_flags |= MIIF_RXID;
1496 			break;
1497 		case PHY_MODE_RGMII_TXID:
1498 			mii_flags |= MIIF_TXID;
1499 			break;
1500 		default:
1501 			break;
1502 		}
1503 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
1504 		    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY,
1505 		    mii_flags);
1506 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1507 			printf("%s: no PHY found!\n", self->dv_xname);
1508 			ifmedia_add(&sc->sc_mii.mii_media,
1509 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
1510 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1511 		} else
1512 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1513 	} else {
1514 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1515 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1516 
1517 		if (sc->sc_inband_status) {
1518 			switch (sc->sc_phy_mode) {
1519 			case PHY_MODE_1000BASEX:
1520 				sc->sc_mii.mii_media_active =
1521 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
1522 				break;
1523 			case PHY_MODE_2500BASEX:
1524 				sc->sc_mii.mii_media_active =
1525 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
1526 				break;
1527 			case PHY_MODE_10GBASER:
1528 				sc->sc_mii.mii_media_active =
1529 				    IFM_ETHER|IFM_10G_KR|IFM_FDX;
1530 				break;
1531 			default:
1532 				break;
1533 			}
1534 			mvpp2_inband_statchg(sc);
1535 		} else {
1536 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
1537 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1538 			mvpp2_mii_statchg(self);
1539 		}
1540 
1541 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
1542 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1543 	}
1544 
1545 	if_attach(ifp);
1546 	ether_ifattach(ifp);
1547 
1548 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1549 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1550 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1551 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1552 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1553 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1554 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1555 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1556 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1557 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1558 	}
1559 
1560 	if (sc->sc_gop_id == 0) {
1561 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
1562 		reg |= MV_XLG_INTERRUPT_LINK_CHANGE;
1563 		mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
1564 	}
1565 
1566 	mvpp2_gop_intr_unmask(sc);
1567 
1568 	idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
1569 	if (idx >= 0)
1570 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1571 		    mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
1572 	idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
1573 	if (idx < 0)
1574 		idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
1575 	if (idx >= 0)
1576 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1577 		    mvpp2_intr, sc, sc->sc_dev.dv_xname);
1578 }
1579 
1580 void
1581 mvpp2_port_attach_sfp(struct device *self)
1582 {
1583 	struct mvpp2_port *sc = (struct mvpp2_port *)self;
1584 	uint32_t reg;
1585 
1586 	rw_enter(&mvpp2_sff_lock, RW_WRITE);
1587 	sfp_disable(sc->sc_sfp);
1588 	sfp_add_media(sc->sc_sfp, &sc->sc_mii);
1589 	rw_exit(&mvpp2_sff_lock);
1590 
1591 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1592 	case IFM_10G_SR:
1593 	case IFM_10G_LR:
1594 	case IFM_10G_LRM:
1595 	case IFM_10G_ER:
1596 	case IFM_10G_SFP_CU:
1597 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1598 		sc->sc_mii.mii_media_status = IFM_AVALID;
1599 		sc->sc_inband_status = 1;
1600 		break;
1601 	case IFM_2500_SX:
1602 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1603 		sc->sc_mii.mii_media_status = IFM_AVALID;
1604 		sc->sc_inband_status = 1;
1605 		break;
1606 	case IFM_1000_CX:
1607 	case IFM_1000_LX:
1608 	case IFM_1000_SX:
1609 	case IFM_1000_T:
1610 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1611 		sc->sc_mii.mii_media_status = IFM_AVALID;
1612 		sc->sc_inband_status = 1;
1613 		break;
1614 	}
1615 
1616 	if (sc->sc_inband_status) {
1617 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1618 		reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1619 		mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1620 	}
1621 }
1622 
1623 uint32_t
1624 mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
1625 {
1626 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
1627 }
1628 
1629 void
1630 mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
1631 {
1632 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
1633 }
1634 
1635 uint32_t
1636 mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
1637 {
1638 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1639 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
1640 }
1641 
1642 void
1643 mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1644 {
1645 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1646 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
1647 	    data);
1648 }
1649 
1650 uint32_t
1651 mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
1652 {
1653 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1654 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
1655 }
1656 
1657 void
1658 mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1659 {
1660 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1661 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
1662 	    data);
1663 }
1664 
1665 uint32_t
1666 mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1667 {
1668 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1669 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
1670 }
1671 
1672 void
1673 mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1674 {
1675 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1676 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
1677 	    data);
1678 }
1679 
1680 uint32_t
1681 mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1682 {
1683 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1684 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
1685 }
1686 
1687 void
1688 mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1689 {
1690 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1691 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
1692 	    data);
1693 }
1694 
1695 void
1696 mvpp2_start(struct ifnet *ifp)
1697 {
1698 	struct mvpp2_port *sc = ifp->if_softc;
1699 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1700 	struct mbuf *m;
1701 	int error, idx;
1702 
1703 	if (!(ifp->if_flags & IFF_RUNNING))
1704 		return;
1705 	if (ifq_is_oactive(&ifp->if_snd))
1706 		return;
1707 	if (ifq_empty(&ifp->if_snd))
1708 		return;
1709 	if (!sc->sc_link)
1710 		return;
1711 
1712 	idx = txq->prod;
1713 	while (txq->cnt < MVPP2_AGGR_TXQ_SIZE) {
1714 		m = ifq_dequeue(&ifp->if_snd);
1715 		if (m == NULL)
1716 			break;
1717 
1718 		error = mvpp2_encap(sc, m, &idx);
1719 		if (error == ENOBUFS) {
1720 			m_freem(m); /* give up: drop it */
1721 			ifq_set_oactive(&ifp->if_snd);
1722 			break;
1723 		}
1724 		if (error == EFBIG) {
1725 			m_freem(m); /* give up: drop it */
1726 			ifp->if_oerrors++;
1727 			continue;
1728 		}
1729 
1730 #if NBPFILTER > 0
1731 		if (ifp->if_bpf)
1732 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1733 #endif
1734 	}
1735 
1736 	if (txq->prod != idx) {
1737 		txq->prod = idx;
1738 
1739 		/* Set a timeout in case the chip goes out to lunch. */
1740 		ifp->if_timer = 5;
1741 	}
1742 }
1743 
1744 int
1745 mvpp2_encap(struct mvpp2_port *sc, struct mbuf *m, int *idx)
1746 {
1747 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1748 	struct mvpp2_tx_desc *txd;
1749 	bus_dmamap_t map;
1750 	uint32_t command;
1751 	int i, current, first, last;
1752 
1753 	first = last = current = *idx;
1754 	map = txq->buf[current].mb_map;
1755 
1756 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1757 		return ENOBUFS;
1758 
1759 	if (map->dm_nsegs > (MVPP2_AGGR_TXQ_SIZE - txq->cnt - 2)) {
1760 		bus_dmamap_unload(sc->sc_dmat, map);
1761 		return ENOBUFS;
1762 	}
1763 
1764 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1765 	    BUS_DMASYNC_PREWRITE);
1766 
1767 	command = MVPP2_TXD_L4_CSUM_NOT |
1768 	    MVPP2_TXD_IP_CSUM_DISABLE;
1769 	for (i = 0; i < map->dm_nsegs; i++) {
1770 		txd = &txq->descs[current];
1771 		memset(txd, 0, sizeof(*txd));
1772 		txd->buf_phys_addr_hw_cmd2 =
1773 		    map->dm_segs[i].ds_addr & ~0x1f;
1774 		txd->packet_offset =
1775 		    map->dm_segs[i].ds_addr & 0x1f;
1776 		txd->data_size = map->dm_segs[i].ds_len;
1777 		txd->phys_txq = sc->sc_txqs[0].id;
1778 		txd->command = command |
1779 		    MVPP2_TXD_PADDING_DISABLE;
1780 		if (i == 0)
1781 			txd->command |= MVPP2_TXD_F_DESC;
1782 		if (i == (map->dm_nsegs - 1))
1783 			txd->command |= MVPP2_TXD_L_DESC;
1784 
1785 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
1786 		    current * sizeof(*txd), sizeof(*txd),
1787 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1788 
1789 		last = current;
1790 		current = (current + 1) % MVPP2_AGGR_TXQ_SIZE;
1791 		KASSERT(current != txq->cons);
1792 	}
1793 
1794 	KASSERT(txq->buf[last].mb_m == NULL);
1795 	txq->buf[first].mb_map = txq->buf[last].mb_map;
1796 	txq->buf[last].mb_map = map;
1797 	txq->buf[last].mb_m = m;
1798 
1799 	txq->cnt += map->dm_nsegs;
1800 	*idx = current;
1801 
1802 	mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, map->dm_nsegs);
1803 
1804 	return 0;
1805 }
1806 
1807 int
1808 mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1809 {
1810 	struct mvpp2_port *sc = ifp->if_softc;
1811 	struct ifreq *ifr = (struct ifreq *)addr;
1812 	int error = 0, s;
1813 
1814 	s = splnet();
1815 
1816 	switch (cmd) {
1817 	case SIOCSIFADDR:
1818 		ifp->if_flags |= IFF_UP;
1819 		/* FALLTHROUGH */
1820 	case SIOCSIFFLAGS:
1821 		if (ifp->if_flags & IFF_UP) {
1822 			if (ifp->if_flags & IFF_RUNNING)
1823 				error = ENETRESET;
1824 			else
1825 				mvpp2_up(sc);
1826 		} else {
1827 			if (ifp->if_flags & IFF_RUNNING)
1828 				mvpp2_down(sc);
1829 		}
1830 		break;
1831 
1832 	case SIOCGIFMEDIA:
1833 	case SIOCSIFMEDIA:
1834 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1835 		break;
1836 
1837 	case SIOCGIFRXR:
1838 		error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1839 		break;
1840 
1841 	case SIOCGIFSFFPAGE:
1842 		error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
1843 		if (error != 0)
1844 			break;
1845 
1846 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1847 		rw_exit(&mvpp2_sff_lock);
1848 		break;
1849 
1850 	default:
1851 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1852 		break;
1853 	}
1854 
1855 	if (error == ENETRESET) {
1856 		if (ifp->if_flags & IFF_RUNNING)
1857 			mvpp2_iff(sc);
1858 		error = 0;
1859 	}
1860 
1861 	splx(s);
1862 	return (error);
1863 }
1864 
1865 int
1866 mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
1867 {
1868 	struct mvpp2_rx_queue *rxq;
1869 	struct if_rxring_info *ifrs, *ifr;
1870 	unsigned int i;
1871 	int error;
1872 
1873 	ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
1874 	    M_WAITOK|M_ZERO|M_CANFAIL);
1875 	if (ifrs == NULL)
1876 		return (ENOMEM);
1877 
1878 	for (i = 0; i < sc->sc_nrxq; i++) {
1879 		rxq = &sc->sc_rxqs[i];
1880 		ifr = &ifrs[i];
1881 
1882 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
1883 		ifr->ifr_size = MCLBYTES;
1884 		ifr->ifr_info = rxq->rxring;
1885 	}
1886 
1887 	error = if_rxr_info_ioctl(ifri, i, ifrs);
1888 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
1889 
1890 	return (error);
1891 }
1892 
1893 void
1894 mvpp2_watchdog(struct ifnet *ifp)
1895 {
1896 	printf("%s\n", __func__);
1897 }
1898 
1899 int
1900 mvpp2_media_change(struct ifnet *ifp)
1901 {
1902 	struct mvpp2_port *sc = ifp->if_softc;
1903 
1904 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1905 		mii_mediachg(&sc->sc_mii);
1906 
1907 	return (0);
1908 }
1909 
1910 void
1911 mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1912 {
1913 	struct mvpp2_port *sc = ifp->if_softc;
1914 
1915 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1916 		mii_pollstat(&sc->sc_mii);
1917 
1918 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1919 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1920 }
1921 
1922 int
1923 mvpp2_mii_readreg(struct device *self, int phy, int reg)
1924 {
1925 	struct mvpp2_port *sc = (void *)self;
1926 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
1927 }
1928 
1929 void
1930 mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
1931 {
1932 	struct mvpp2_port *sc = (void *)self;
1933 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
1934 }
1935 
1936 void
1937 mvpp2_mii_statchg(struct device *self)
1938 {
1939 	struct mvpp2_port *sc = (void *)self;
1940 	mvpp2_port_change(sc);
1941 }
1942 
1943 void
1944 mvpp2_inband_statchg(struct mvpp2_port *sc)
1945 {
1946 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
1947 	uint32_t reg;
1948 
1949 	sc->sc_mii.mii_media_status = IFM_AVALID;
1950 	sc->sc_mii.mii_media_active = IFM_ETHER;
1951 
1952 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1953 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1954 		reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
1955 		if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS)
1956 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1957 		sc->sc_mii.mii_media_active |= IFM_FDX;
1958 		sc->sc_mii.mii_media_active |= subtype;
1959 	} else {
1960 		reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
1961 		if (reg & MVPP2_PORT_STATUS0_LINKUP)
1962 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1963 		if (reg & MVPP2_PORT_STATUS0_FULLDX)
1964 			sc->sc_mii.mii_media_active |= IFM_FDX;
1965 		if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
1966 			sc->sc_mii.mii_media_active |= subtype;
1967 		else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
1968 			sc->sc_mii.mii_media_active |= subtype;
1969 		else if (reg & MVPP2_PORT_STATUS0_GMIISPEED)
1970 			sc->sc_mii.mii_media_active |= IFM_1000_T;
1971 		else if (reg & MVPP2_PORT_STATUS0_MIISPEED)
1972 			sc->sc_mii.mii_media_active |= IFM_100_TX;
1973 		else
1974 			sc->sc_mii.mii_media_active |= IFM_10_T;
1975 	}
1976 
1977 	mvpp2_port_change(sc);
1978 }
1979 
1980 void
1981 mvpp2_port_change(struct mvpp2_port *sc)
1982 {
1983 	uint32_t reg;
1984 
1985 	sc->sc_link = !!(sc->sc_mii.mii_media_status & IFM_ACTIVE);
1986 
1987 	if (sc->sc_inband_status)
1988 		return;
1989 
1990 	if (sc->sc_link) {
1991 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1992 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1993 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1994 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
1995 			reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS;
1996 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1997 		} else {
1998 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1999 			reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2000 			reg |= MVPP2_GMAC_FORCE_LINK_PASS;
2001 			reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2002 			reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2003 			reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2004 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_KX ||
2005 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
2006 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_CX ||
2007 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_LX ||
2008 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_KX ||
2009 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
2010 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
2011 				reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2012 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
2013 				reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
2014 			if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
2015 				reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2016 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2017 		}
2018 	} else {
2019 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2020 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
2021 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2022 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2023 			reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2024 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2025 		} else {
2026 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2027 			reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2028 			reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2029 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2030 		}
2031 	}
2032 }
2033 
2034 void
2035 mvpp2_tick(void *arg)
2036 {
2037 	struct mvpp2_port *sc = arg;
2038 	int s;
2039 
2040 	s = splnet();
2041 	mii_tick(&sc->sc_mii);
2042 	splx(s);
2043 
2044 	timeout_add_sec(&sc->sc_tick, 1);
2045 }
2046 
2047 int
2048 mvpp2_link_intr(void *arg)
2049 {
2050 	struct mvpp2_port *sc = arg;
2051 	uint32_t reg;
2052 	int event = 0;
2053 
2054 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2055 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2056 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
2057 		if (reg & MV_XLG_INTERRUPT_LINK_CHANGE)
2058 			event = 1;
2059 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2060 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2061 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
2062 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
2063 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2064 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2065 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2066 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_CAUSE_REG);
2067 		if (reg & MVPP2_GMAC_INT_CAUSE_LINK_CHANGE)
2068 			event = 1;
2069 	}
2070 
2071 	if (event && sc->sc_inband_status)
2072 		mvpp2_inband_statchg(sc);
2073 
2074 	return (1);
2075 }
2076 
2077 int
2078 mvpp2_intr(void *arg)
2079 {
2080 	struct mvpp2_port *sc = arg;
2081 	uint32_t reg;
2082 
2083 	reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
2084 	if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
2085 		mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
2086 		mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
2087 		    reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
2088 	}
2089 	if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
2090 		mvpp2_tx_proc(sc,
2091 		    (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
2092 		    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
2093 
2094 	if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
2095 		mvpp2_rx_proc(sc,
2096 		    reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
2097 
2098 	return (1);
2099 }
2100 
2101 void
2102 mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
2103 {
2104 	struct mvpp2_tx_queue *txq;
2105 	int i;
2106 
2107 	for (i = 0; i < sc->sc_ntxq; i++) {
2108 		txq = &sc->sc_txqs[i];
2109 		if ((queues & (1 << i)) == 0)
2110 			continue;
2111 		mvpp2_txq_proc(sc, txq);
2112 	}
2113 }
2114 
2115 void
2116 mvpp2_txq_proc(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2117 {
2118 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2119 	struct mvpp2_tx_queue *aggr_txq = &sc->sc->sc_aggr_txqs[0];
2120 	struct mvpp2_buf *txb;
2121 	int i, idx, nsent;
2122 
2123 	nsent = (mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id)) &
2124 	    MVPP2_TRANSMITTED_COUNT_MASK) >>
2125 	    MVPP2_TRANSMITTED_COUNT_OFFSET;
2126 
2127 	for (i = 0; i < nsent; i++) {
2128 		idx = aggr_txq->cons;
2129 		KASSERT(idx < MVPP2_AGGR_TXQ_SIZE);
2130 
2131 		txb = &aggr_txq->buf[idx];
2132 		if (txb->mb_m) {
2133 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2134 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2135 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2136 
2137 			m_freem(txb->mb_m);
2138 			txb->mb_m = NULL;
2139 		}
2140 
2141 		aggr_txq->cnt--;
2142 		aggr_txq->cons = (aggr_txq->cons + 1) % MVPP2_AGGR_TXQ_SIZE;
2143 	}
2144 
2145 	if (aggr_txq->cnt == 0)
2146 		ifp->if_timer = 0;
2147 
2148 	if (ifq_is_oactive(&ifp->if_snd))
2149 		ifq_restart(&ifp->if_snd);
2150 }
2151 
2152 void
2153 mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
2154 {
2155 	struct mvpp2_rx_queue *rxq;
2156 	int i;
2157 
2158 	for (i = 0; i < sc->sc_nrxq; i++) {
2159 		rxq = &sc->sc_rxqs[i];
2160 		if ((queues & (1 << i)) == 0)
2161 			continue;
2162 		mvpp2_rxq_proc(sc, rxq);
2163 	}
2164 
2165 	mvpp2_rx_refill(sc);
2166 }
2167 
2168 void
2169 mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2170 {
2171 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2172 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2173 	struct mvpp2_rx_desc *rxd;
2174 	struct mvpp2_bm_pool *bm;
2175 	struct mvpp2_buf *rxb;
2176 	struct mbuf *m;
2177 	uint64_t virt;
2178 	uint32_t i, nrecv, pool;
2179 
2180 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2181 	if (!nrecv)
2182 		return;
2183 
2184 	pool = curcpu()->ci_cpuid;
2185 	KASSERT(pool < sc->sc->sc_npools);
2186 	bm = &sc->sc->sc_bm_pools[pool];
2187 
2188 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2189 	    MVPP2_DMA_LEN(rxq->ring),
2190 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2191 
2192 	for (i = 0; i < nrecv; i++) {
2193 		rxd = &rxq->descs[rxq->cons];
2194 		virt = rxd->buf_cookie_bm_qset_cls_info;
2195 		KASSERT(((virt >> 16) & 0xffff) == pool);
2196 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2197 		rxb = &bm->rxbuf[virt & 0xffff];
2198 		KASSERT(rxb->mb_m != NULL);
2199 
2200 		bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
2201 		    rxd->data_size, BUS_DMASYNC_POSTREAD);
2202 		bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
2203 
2204 		m = rxb->mb_m;
2205 		rxb->mb_m = NULL;
2206 
2207 		m->m_pkthdr.len = m->m_len = rxd->data_size;
2208 		m_adj(m, MVPP2_MH_SIZE);
2209 		ml_enqueue(&ml, m);
2210 
2211 		KASSERT(bm->freelist[bm->free_prod] == -1);
2212 		bm->freelist[bm->free_prod] = virt & 0xffffffff;
2213 		bm->free_prod = (bm->free_prod + 1) % MVPP2_BM_SIZE;
2214 
2215 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
2216 	}
2217 
2218 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2219 	    MVPP2_DMA_LEN(rxq->ring),
2220 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2221 
2222 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2223 
2224 	if_input(ifp, &ml);
2225 }
2226 
2227 /*
2228  * We have a pool per core, and since we should not assume that
2229  * RX buffers are always used in order, keep a list of rxbuf[]
2230  * indices that should be filled with an mbuf, if possible.
2231  */
2232 void
2233 mvpp2_rx_refill(struct mvpp2_port *sc)
2234 {
2235 	struct mvpp2_bm_pool *bm;
2236 	struct mvpp2_buf *rxb;
2237 	uint64_t phys, virt;
2238 	int pool;
2239 
2240 	pool = curcpu()->ci_cpuid;
2241 	KASSERT(pool < sc->sc->sc_npools);
2242 	bm = &sc->sc->sc_bm_pools[pool];
2243 
2244 	while (bm->free_cons != bm->free_prod) {
2245 		KASSERT(bm->freelist[bm->free_cons] != -1);
2246 		virt = bm->freelist[bm->free_cons];
2247 		KASSERT(((virt >> 16) & 0xffff) == pool);
2248 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2249 		rxb = &bm->rxbuf[virt & 0xffff];
2250 		KASSERT(rxb->mb_m == NULL);
2251 
2252 		rxb->mb_m = mvpp2_alloc_mbuf(sc->sc, rxb->mb_map);
2253 		if (rxb->mb_m == NULL)
2254 			break;
2255 
2256 		bm->freelist[bm->free_cons] = -1;
2257 		bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
2258 
2259 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2260 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2261 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2262 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2263 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2264 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2265 		    virt & 0xffffffff);
2266 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2267 		    phys & 0xffffffff);
2268 	}
2269 }
2270 
2271 void
2272 mvpp2_up(struct mvpp2_port *sc)
2273 {
2274 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2275 	int i;
2276 
2277 	if (sc->sc_sfp) {
2278 		rw_enter(&mvpp2_sff_lock, RW_WRITE);
2279 		sfp_enable(sc->sc_sfp);
2280 		rw_exit(&mvpp2_sff_lock);
2281 	}
2282 
2283 	mvpp2_prs_mac_da_accept(sc, etherbroadcastaddr, 1);
2284 	mvpp2_prs_mac_da_accept(sc, sc->sc_lladdr, 1);
2285 	mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
2286 	mvpp2_prs_def_flow(sc);
2287 
2288 	for (i = 0; i < sc->sc_ntxq; i++)
2289 		mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
2290 
2291 	mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
2292 
2293 	for (i = 0; i < sc->sc_nrxq; i++)
2294 		mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
2295 
2296 	/* FIXME: rx buffer fill */
2297 
2298 	/* Configure media. */
2299 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
2300 		mii_mediachg(&sc->sc_mii);
2301 
2302 	/* Program promiscuous mode and multicast filters. */
2303 	mvpp2_iff(sc);
2304 
2305 	ifp->if_flags |= IFF_RUNNING;
2306 	ifq_clr_oactive(&ifp->if_snd);
2307 
2308 	mvpp2_txp_max_tx_size_set(sc);
2309 
2310 	/* XXX: single vector */
2311 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
2312 	    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
2313 	    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
2314 	    MVPP2_CAUSE_MISC_SUM_MASK);
2315 	mvpp2_interrupts_enable(sc, (1 << 0));
2316 
2317 	mvpp2_mac_config(sc);
2318 	mvpp2_egress_enable(sc);
2319 	mvpp2_ingress_enable(sc);
2320 
2321 	timeout_add_sec(&sc->sc_tick, 1);
2322 }
2323 
2324 void
2325 mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
2326 {
2327 	struct mvpp2_buf *txb;
2328 	int i;
2329 
2330 	txq->ring = mvpp2_dmamem_alloc(sc,
2331 	    MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
2332 	KASSERT(txq->ring != NULL);
2333 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2334 
2335 	txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
2336 	    M_DEVBUF, M_WAITOK);
2337 
2338 	for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
2339 		txb = &txq->buf[i];
2340 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2341 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2342 		txb->mb_m = NULL;
2343 	}
2344 
2345 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2346 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2347 
2348 	txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
2349 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
2350 	    MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_OFFS);
2351 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
2352 	    MVPP2_AGGR_TXQ_SIZE);
2353 }
2354 
2355 void
2356 mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2357 {
2358 	struct mvpp2_buf *txb;
2359 	int desc, desc_per_txq;
2360 	uint32_t reg;
2361 	int i;
2362 
2363 	txq->prod = txq->cons = txq->cnt = 0;
2364 //	txq->last_desc = txq->size - 1;
2365 
2366 	txq->ring = mvpp2_dmamem_alloc(sc->sc,
2367 	    MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
2368 	KASSERT(txq->ring != NULL);
2369 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2370 
2371 	txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
2372 	    M_DEVBUF, M_WAITOK);
2373 
2374 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2375 		txb = &txq->buf[i];
2376 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2377 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2378 		txb->mb_m = NULL;
2379 	}
2380 
2381 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2382 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2383 
2384 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2385 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
2386 	    MVPP2_DMA_DVA(txq->ring));
2387 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
2388 	    MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
2389 	mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
2390 	mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
2391 	    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2392 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
2393 	reg &= ~MVPP2_TXQ_PENDING_MASK;
2394 	mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
2395 
2396 	desc_per_txq = 16;
2397 	desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
2398 	    (txq->log_id * desc_per_txq);
2399 
2400 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
2401 	    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2402 	    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2403 
2404 	/* WRR / EJP configuration - indirect access */
2405 	mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2406 	    mvpp2_egress_port(sc));
2407 
2408 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2409 	reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2410 	reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2411 	reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2412 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
2413 
2414 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2415 	    MVPP2_TXQ_TOKEN_SIZE_MAX);
2416 
2417 	mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
2418 
2419 	mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2420 }
2421 
2422 void
2423 mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2424 {
2425 	rxq->prod = rxq->cons = 0;
2426 
2427 	rxq->ring = mvpp2_dmamem_alloc(sc->sc,
2428 	    MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
2429 	KASSERT(rxq->ring != NULL);
2430 	rxq->descs = MVPP2_DMA_KVA(rxq->ring);
2431 
2432 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
2433 	    0, MVPP2_DMA_LEN(rxq->ring),
2434 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2435 
2436 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2437 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2438 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
2439 	    MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_OFFS);
2440 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
2441 	mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
2442 	mvpp2_rxq_offset_set(sc, rxq->id, 0);
2443 	mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
2444 	mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
2445 	mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
2446 }
2447 
2448 void
2449 mvpp2_mac_reset_assert(struct mvpp2_port *sc)
2450 {
2451 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2452 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
2453 	    MVPP2_PORT_CTRL2_PORTMACRESET);
2454 	if (sc->sc_gop_id == 0)
2455 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
2456 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2457 		    ~MV_XLG_MAC_CTRL0_MACRESETN);
2458 }
2459 
2460 void
2461 mvpp2_pcs_reset_assert(struct mvpp2_port *sc)
2462 {
2463 	uint32_t reg;
2464 
2465 	if (sc->sc_gop_id != 0)
2466 		return;
2467 
2468 	reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2469 	reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET;
2470 	reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET;
2471 	reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET;
2472 	reg &= ~MVPP22_MPCS_MAC_CLK_RESET;
2473 	mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2474 	reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2475 	reg &= ~MVPP22_XPCS_PCSRESET;
2476 	mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2477 }
2478 
2479 void
2480 mvpp2_pcs_reset_deassert(struct mvpp2_port *sc)
2481 {
2482 	uint32_t reg;
2483 
2484 	if (sc->sc_gop_id != 0)
2485 		return;
2486 
2487 	if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2488 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2489 		reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET;
2490 		reg |= MVPP22_MPCS_TX_SD_CLK_RESET;
2491 		reg |= MVPP22_MPCS_RX_SD_CLK_RESET;
2492 		reg |= MVPP22_MPCS_MAC_CLK_RESET;
2493 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2494 	} else if (sc->sc_phy_mode == PHY_MODE_XAUI) {
2495 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2496 		reg |= MVPP22_XPCS_PCSRESET;
2497 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2498 	}
2499 }
2500 
2501 void
2502 mvpp2_mac_config(struct mvpp2_port *sc)
2503 {
2504 	uint32_t reg;
2505 
2506 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2507 	reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2508 	reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2509 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2510 	if (sc->sc_gop_id == 0) {
2511 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2512 		reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2513 		reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2514 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2515 	}
2516 
2517 	mvpp2_port_disable(sc);
2518 
2519 	mvpp2_mac_reset_assert(sc);
2520 	mvpp2_pcs_reset_assert(sc);
2521 
2522 	mvpp2_gop_intr_mask(sc);
2523 	mvpp2_comphy_config(sc, 0);
2524 
2525 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2526 	    sc->sc_phy_mode == PHY_MODE_XAUI))
2527 		mvpp2_xlg_config(sc);
2528 	else
2529 		mvpp2_gmac_config(sc);
2530 
2531 	mvpp2_comphy_config(sc, 1);
2532 	mvpp2_gop_config(sc);
2533 
2534 	mvpp2_pcs_reset_deassert(sc);
2535 
2536 	if (sc->sc_gop_id == 0) {
2537 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
2538 		reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
2539 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2540 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2541 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
2542 		else
2543 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
2544 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
2545 	}
2546 
2547 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2548 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2549 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
2550 		reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
2551 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2552 		    MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
2553 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
2554 	} else {
2555 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
2556 		reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2557 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2558 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS;
2559 		mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
2560 	}
2561 
2562 	mvpp2_gop_intr_unmask(sc);
2563 
2564 	if (!(sc->sc_phy_mode == PHY_MODE_10GBASER ||
2565 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2566 		mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2567 		    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2568 		    ~MVPP2_PORT_CTRL2_PORTMACRESET);
2569 		while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2570 		    MVPP2_PORT_CTRL2_PORTMACRESET)
2571 			;
2572 	}
2573 
2574 	mvpp2_port_enable(sc);
2575 
2576 	if (sc->sc_inband_status) {
2577 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2578 		reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2579 		reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2580 		mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2581 		if (sc->sc_gop_id == 0) {
2582 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2583 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2584 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2585 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2586 		}
2587 	} else
2588 		mvpp2_port_change(sc);
2589 }
2590 
2591 void
2592 mvpp2_xlg_config(struct mvpp2_port *sc)
2593 {
2594 	uint32_t ctl0, ctl4;
2595 
2596 	ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2597 	ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
2598 
2599 	ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN;
2600 	ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
2601 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN;
2602 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN;
2603 
2604 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
2605 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl0);
2606 
2607 	/* Port reset */
2608 	while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2609 	    MV_XLG_MAC_CTRL0_MACRESETN) == 0)
2610 		;
2611 }
2612 
2613 void
2614 mvpp2_gmac_config(struct mvpp2_port *sc)
2615 {
2616 	uint32_t ctl0, ctl2, ctl4, panc;
2617 
2618 	/* Setup phy. */
2619 	ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
2620 	ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
2621 	ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
2622 	panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2623 
2624 	ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
2625 	ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
2626 	    MVPP2_GMAC_INBAND_AN_MASK);
2627 	panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
2628 	    MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
2629 	    MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
2630 	    MVPP2_GMAC_IN_BAND_AUTONEG);
2631 
2632 	switch (sc->sc_phy_mode) {
2633 	case PHY_MODE_XAUI:
2634 	case PHY_MODE_10GBASER:
2635 		break;
2636 	case PHY_MODE_2500BASEX:
2637 	case PHY_MODE_1000BASEX:
2638 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2639 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2640 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2641 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2642 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2643 		break;
2644 	case PHY_MODE_SGMII:
2645 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2646 		ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
2647 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2648 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2649 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2650 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2651 		break;
2652 	case PHY_MODE_RGMII:
2653 	case PHY_MODE_RGMII_ID:
2654 	case PHY_MODE_RGMII_RXID:
2655 	case PHY_MODE_RGMII_TXID:
2656 		ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL;
2657 		ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2658 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2659 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2660 		break;
2661 	}
2662 
2663 	/* Use Auto-Negotiation for Inband Status only */
2664 	if (sc->sc_inband_status) {
2665 		panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2666 		panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2667 		panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2668 		panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
2669 		/* TODO: read mode from SFP */
2670 		if (sc->sc_phy_mode == PHY_MODE_SGMII) {
2671 			/* SGMII */
2672 			panc |= MVPP2_GMAC_AN_SPEED_EN;
2673 			panc |= MVPP2_GMAC_AN_DUPLEX_EN;
2674 		} else {
2675 			/* 802.3z */
2676 			ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
2677 			panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2678 			panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2679 		}
2680 	}
2681 
2682 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
2683 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
2684 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
2685 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2686 }
2687 
2688 #define COMPHY_BASE		0x120000
2689 #define COMPHY_SIP_POWER_ON	0x82000001
2690 #define COMPHY_SIP_POWER_OFF	0x82000002
2691 #define COMPHY_SPEED(x)		((x) << 2)
2692 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
2693 #define  COMPHY_SPEED_2_5G		1
2694 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
2695 #define  COMPHY_SPEED_5G		3
2696 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
2697 #define  COMPHY_SPEED_6G		5
2698 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
2699 #define COMPHY_UNIT(x)		((x) << 8)
2700 #define COMPHY_MODE(x)		((x) << 12)
2701 #define  COMPHY_MODE_SATA		1
2702 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
2703 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
2704 #define  COMPHY_MODE_USB3H		4
2705 #define  COMPHY_MODE_USB3D		5
2706 #define  COMPHY_MODE_PCIE		6
2707 #define  COMPHY_MODE_RXAUI		7
2708 #define  COMPHY_MODE_XFI		8
2709 #define  COMPHY_MODE_SFI		9
2710 #define  COMPHY_MODE_USB3		10
2711 #define  COMPHY_MODE_AP			11
2712 
2713 void
2714 mvpp2_comphy_config(struct mvpp2_port *sc, int on)
2715 {
2716 	int node, phys[2], lane, unit;
2717 	uint32_t mode;
2718 
2719 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
2720 	    sizeof(phys))
2721 		return;
2722 	node = OF_getnodebyphandle(phys[0]);
2723 	if (!node)
2724 		return;
2725 
2726 	lane = OF_getpropint(node, "reg", 0);
2727 	unit = phys[1];
2728 
2729 	switch (sc->sc_phy_mode) {
2730 	case PHY_MODE_XAUI:
2731 		mode = COMPHY_MODE(COMPHY_MODE_RXAUI) |
2732 		    COMPHY_UNIT(unit);
2733 		break;
2734 	case PHY_MODE_10GBASER:
2735 		mode = COMPHY_MODE(COMPHY_MODE_XFI) |
2736 		    COMPHY_SPEED(COMPHY_SPEED_10_3125G) |
2737 		    COMPHY_UNIT(unit);
2738 		break;
2739 	case PHY_MODE_2500BASEX:
2740 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
2741 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
2742 		    COMPHY_UNIT(unit);
2743 		break;
2744 	case PHY_MODE_1000BASEX:
2745 	case PHY_MODE_SGMII:
2746 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
2747 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
2748 		    COMPHY_UNIT(unit);
2749 		break;
2750 	default:
2751 		return;
2752 	}
2753 
2754 	if (on)
2755 		smc_call(COMPHY_SIP_POWER_ON, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2756 		    lane, mode);
2757 	else
2758 		smc_call(COMPHY_SIP_POWER_OFF, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2759 		    lane, 0);
2760 }
2761 
2762 void
2763 mvpp2_gop_config(struct mvpp2_port *sc)
2764 {
2765 	uint32_t reg;
2766 
2767 	if (sc->sc->sc_rm == NULL)
2768 		return;
2769 
2770 	if (sc->sc_phy_mode == PHY_MODE_RGMII ||
2771 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2772 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2773 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2774 		if (sc->sc_gop_id == 0)
2775 			return;
2776 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2777 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
2778 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2779 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2780 		if (sc->sc_gop_id == 2)
2781 			reg |= GENCONF_CTRL0_PORT0_RGMII |
2782 			    GENCONF_CTRL0_PORT1_RGMII;
2783 		else if (sc->sc_gop_id == 3)
2784 			reg |= GENCONF_CTRL0_PORT1_RGMII_MII;
2785 		regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2786 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2787 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2788 	    sc->sc_phy_mode == PHY_MODE_SGMII) {
2789 		reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2790 		reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
2791 		    GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
2792 		regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2793 		if (sc->sc_gop_id > 1) {
2794 			reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2795 			if (sc->sc_gop_id == 2)
2796 				reg &= ~GENCONF_CTRL0_PORT0_RGMII;
2797 			else if (sc->sc_gop_id == 3)
2798 				reg &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
2799 			regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2800 		}
2801 	} else if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2802 		if (sc->sc_gop_id != 0)
2803 			return;
2804 		reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2805 		reg &= ~MVPP22_XPCS_PCSMODE_MASK;
2806 		reg &= ~MVPP22_XPCS_LANEACTIVE_MASK;
2807 		reg |= 2 << MVPP22_XPCS_LANEACTIVE_OFFS;
2808 		mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2809 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS40G_COMMON_CONTROL);
2810 		reg &= ~MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK;
2811 		mvpp2_mpcs_write(sc, MVPP22_MPCS40G_COMMON_CONTROL, reg);
2812 		reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2813 		reg &= ~MVPP22_MPCS_CLK_DIVISION_RATIO_MASK;
2814 		reg |= MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT;
2815 		mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2816 	} else
2817 		return;
2818 
2819 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1);
2820 	reg |= GENCONF_PORT_CTRL1_RESET(sc->sc_gop_id) |
2821 	    GENCONF_PORT_CTRL1_EN(sc->sc_gop_id);
2822 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1, reg);
2823 
2824 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2825 	reg |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
2826 	regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2827 
2828 	reg = regmap_read_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1);
2829 	reg |= GENCONF_SOFT_RESET1_GOP;
2830 	regmap_write_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1, reg);
2831 }
2832 
2833 void
2834 mvpp2_gop_intr_mask(struct mvpp2_port *sc)
2835 {
2836 	uint32_t reg;
2837 
2838 	if (sc->sc_gop_id == 0) {
2839 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2840 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2841 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2842 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2843 	}
2844 
2845 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2846 	reg &= ~MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2847 	mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2848 }
2849 
2850 void
2851 mvpp2_gop_intr_unmask(struct mvpp2_port *sc)
2852 {
2853 	uint32_t reg;
2854 
2855 	reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2856 	reg |= MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2857 	mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2858 
2859 	if (sc->sc_gop_id == 0) {
2860 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2861 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2862 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2863 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2864 		else
2865 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2866 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2867 	}
2868 }
2869 
2870 void
2871 mvpp2_down(struct mvpp2_port *sc)
2872 {
2873 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2874 	uint32_t reg;
2875 	int i;
2876 
2877 	timeout_del(&sc->sc_tick);
2878 
2879 	ifp->if_flags &= ~IFF_RUNNING;
2880 	ifq_clr_oactive(&ifp->if_snd);
2881 	ifp->if_timer = 0;
2882 
2883 	mvpp2_egress_disable(sc);
2884 	mvpp2_ingress_disable(sc);
2885 
2886 	mvpp2_mac_reset_assert(sc);
2887 	mvpp2_pcs_reset_assert(sc);
2888 
2889 	/* XXX: single vector */
2890 	mvpp2_interrupts_disable(sc, (1 << 0));
2891 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
2892 
2893 	reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
2894 	reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2895 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2896 
2897 	for (i = 0; i < sc->sc_ntxq; i++)
2898 		mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
2899 
2900 	reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2901 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2902 
2903 	for (i = 0; i < sc->sc_nrxq; i++)
2904 		mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
2905 
2906 	if (sc->sc_sfp) {
2907 		rw_enter(&mvpp2_sff_lock, RW_WRITE);
2908 		sfp_disable(sc->sc_sfp);
2909 		rw_exit(&mvpp2_sff_lock);
2910 	}
2911 }
2912 
2913 void
2914 mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2915 {
2916 	struct mvpp2_buf *txb;
2917 	int i, pending;
2918 	uint32_t reg;
2919 
2920 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2921 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
2922 	reg |= MVPP2_TXQ_DRAIN_EN_MASK;
2923 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2924 
2925 	/*
2926 	 * the queue has been stopped so wait for all packets
2927 	 * to be transmitted.
2928 	 */
2929 	i = 0;
2930 	do {
2931 		if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2932 			printf("%s: port %d: cleaning queue %d timed out\n",
2933 			    sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
2934 			break;
2935 		}
2936 		delay(1000);
2937 		i++;
2938 
2939 		pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
2940 		    MVPP2_TXQ_PENDING_MASK;
2941 	} while (pending);
2942 
2943 	reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2944 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2945 
2946 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2947 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2948 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
2949 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
2950 	mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2951 
2952 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2953 		txb = &txq->buf[i];
2954 		if (txb->mb_m) {
2955 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2956 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2957 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2958 			m_freem(txb->mb_m);
2959 		}
2960 		bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
2961 	}
2962 
2963 	mvpp2_dmamem_free(sc->sc, txq->ring);
2964 	free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
2965 	    MVPP2_NTXDESC);
2966 }
2967 
2968 void
2969 mvpp2_rxq_hw_drop(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2970 {
2971 	struct mvpp2_rx_desc *rxd;
2972 	struct mvpp2_bm_pool *bm;
2973 	uint64_t phys, virt;
2974 	uint32_t i, nrecv, pool;
2975 	struct mvpp2_buf *rxb;
2976 
2977 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2978 	if (!nrecv)
2979 		return;
2980 
2981 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2982 	    MVPP2_DMA_LEN(rxq->ring),
2983 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2984 
2985 	for (i = 0; i < nrecv; i++) {
2986 		rxd = &rxq->descs[rxq->cons];
2987 		virt = rxd->buf_cookie_bm_qset_cls_info;
2988 		pool = (virt >> 16) & 0xffff;
2989 		KASSERT(pool < sc->sc->sc_npools);
2990 		bm = &sc->sc->sc_bm_pools[pool];
2991 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2992 		rxb = &bm->rxbuf[virt & 0xffff];
2993 		KASSERT(rxb->mb_m != NULL);
2994 		virt &= 0xffffffff;
2995 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2996 		mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2997 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2998 		    << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2999 		    ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
3000 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
3001 		    virt & 0xffffffff);
3002 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
3003 		    phys & 0xffffffff);
3004 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
3005 	}
3006 
3007 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
3008 	    MVPP2_DMA_LEN(rxq->ring),
3009 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3010 
3011 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
3012 }
3013 
3014 void
3015 mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
3016 {
3017 	mvpp2_rxq_hw_drop(sc, rxq);
3018 
3019 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3020 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3021 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
3022 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
3023 
3024 	mvpp2_dmamem_free(sc->sc, rxq->ring);
3025 }
3026 
3027 void
3028 mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3029 {
3030 	uint32_t val;
3031 	int prxq;
3032 
3033 	/* get queue physical ID */
3034 	prxq = port->sc_rxqs[lrxq].id;
3035 
3036 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3037 	val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3038 	val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
3039 
3040 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3041 }
3042 
3043 void
3044 mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3045 {
3046 	uint32_t val;
3047 	int prxq;
3048 
3049 	/* get queue physical ID */
3050 	prxq = port->sc_rxqs[lrxq].id;
3051 
3052 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3053 	val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3054 	val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
3055 
3056 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3057 }
3058 
3059 void
3060 mvpp2_iff(struct mvpp2_port *sc)
3061 {
3062 	struct arpcom *ac = &sc->sc_ac;
3063 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3064 	struct ether_multi *enm;
3065 	struct ether_multistep step;
3066 
3067 	ifp->if_flags &= ~IFF_ALLMULTI;
3068 
3069 	/* Removes all but broadcast and (new) lladdr */
3070 	mvpp2_prs_mac_del_all(sc);
3071 
3072 	if (ifp->if_flags & IFF_PROMISC) {
3073 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3074 		    MVPP2_PRS_L2_UNI_CAST, 1);
3075 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3076 		    MVPP2_PRS_L2_MULTI_CAST, 1);
3077 		return;
3078 	}
3079 
3080 	mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3081 	    MVPP2_PRS_L2_UNI_CAST, 0);
3082 	mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3083 	    MVPP2_PRS_L2_MULTI_CAST, 0);
3084 
3085 	if (ac->ac_multirangecnt > 0 ||
3086 	    ac->ac_multicnt > MVPP2_PRS_MAC_MC_FILT_MAX) {
3087 		ifp->if_flags |= IFF_ALLMULTI;
3088 		mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3089 		    MVPP2_PRS_L2_MULTI_CAST, 1);
3090 	} else {
3091 		ETHER_FIRST_MULTI(step, ac, enm);
3092 		while (enm != NULL) {
3093 			mvpp2_prs_mac_da_accept(sc, enm->enm_addrlo, 1);
3094 			ETHER_NEXT_MULTI(step, enm);
3095 		}
3096 	}
3097 }
3098 
3099 struct mvpp2_dmamem *
3100 mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
3101 {
3102 	struct mvpp2_dmamem *mdm;
3103 	int nsegs;
3104 
3105 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
3106 	mdm->mdm_size = size;
3107 
3108 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3109 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
3110 		goto mdmfree;
3111 
3112 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
3113 	    &nsegs, BUS_DMA_WAITOK) != 0)
3114 		goto destroy;
3115 
3116 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
3117 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
3118 		goto free;
3119 
3120 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
3121 	    NULL, BUS_DMA_WAITOK) != 0)
3122 		goto unmap;
3123 
3124 	bzero(mdm->mdm_kva, size);
3125 
3126 	return (mdm);
3127 
3128 unmap:
3129 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
3130 free:
3131 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3132 destroy:
3133 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3134 mdmfree:
3135 	free(mdm, M_DEVBUF, 0);
3136 
3137 	return (NULL);
3138 }
3139 
3140 void
3141 mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
3142 {
3143 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
3144 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3145 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3146 	free(mdm, M_DEVBUF, 0);
3147 }
3148 
3149 struct mbuf *
3150 mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
3151 {
3152 	struct mbuf *m = NULL;
3153 
3154 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
3155 	if (!m)
3156 		return (NULL);
3157 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3158 
3159 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
3160 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
3161 		m_freem(m);
3162 		return (NULL);
3163 	}
3164 
3165 	bus_dmamap_sync(sc->sc_dmat, map, 0,
3166 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
3167 
3168 	return (m);
3169 }
3170 
3171 void
3172 mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
3173 {
3174 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3175 	    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3176 }
3177 
3178 void
3179 mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
3180 {
3181 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3182 	    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3183 }
3184 
3185 int
3186 mvpp2_egress_port(struct mvpp2_port *port)
3187 {
3188 	return MVPP2_MAX_TCONT + port->sc_id;
3189 }
3190 
3191 int
3192 mvpp2_txq_phys(int port, int txq)
3193 {
3194 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
3195 }
3196 
3197 void
3198 mvpp2_defaults_set(struct mvpp2_port *port)
3199 {
3200 	int val, queue;
3201 
3202 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3203 	    mvpp2_egress_port(port));
3204 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3205 
3206 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
3207 		mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
3208 
3209 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
3210 	    (1000 * 1000));
3211 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
3212 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3213 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3214 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3215 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
3216 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
3217 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3218 
3219 	/* set maximum_low_latency_packet_size value to 256 */
3220 	mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
3221 	    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3222 	    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3223 
3224 	/* mask all interrupts to all present cpus */
3225 	mvpp2_interrupts_disable(port, (0xf << 0));
3226 }
3227 
3228 void
3229 mvpp2_ingress_enable(struct mvpp2_port *port)
3230 {
3231 	uint32_t val;
3232 	int lrxq, queue;
3233 
3234 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3235 		queue = port->sc_rxqs[lrxq].id;
3236 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3237 		val &= ~MVPP2_RXQ_DISABLE_MASK;
3238 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3239 	}
3240 }
3241 
3242 void
3243 mvpp2_ingress_disable(struct mvpp2_port *port)
3244 {
3245 	uint32_t val;
3246 	int lrxq, queue;
3247 
3248 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3249 		queue = port->sc_rxqs[lrxq].id;
3250 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3251 		val |= MVPP2_RXQ_DISABLE_MASK;
3252 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3253 	}
3254 }
3255 
3256 void
3257 mvpp2_egress_enable(struct mvpp2_port *port)
3258 {
3259 	struct mvpp2_tx_queue *txq;
3260 	uint32_t qmap;
3261 	int queue;
3262 
3263 	qmap = 0;
3264 	for (queue = 0; queue < port->sc_ntxq; queue++) {
3265 		txq = &port->sc_txqs[queue];
3266 
3267 		if (txq->descs != NULL) {
3268 			qmap |= (1 << queue);
3269 		}
3270 	}
3271 
3272 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3273 	    mvpp2_egress_port(port));
3274 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3275 }
3276 
3277 void
3278 mvpp2_egress_disable(struct mvpp2_port *port)
3279 {
3280 	uint32_t reg_data;
3281 	int i;
3282 
3283 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3284 	    mvpp2_egress_port(port));
3285 	reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3286 	    MVPP2_TXP_SCHED_ENQ_MASK;
3287 	if (reg_data)
3288 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,
3289 		    reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET);
3290 
3291 	i = 0;
3292 	do {
3293 		if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3294 			printf("%s: tx stop timed out, status=0x%08x\n",
3295 			    port->sc_dev.dv_xname, reg_data);
3296 			break;
3297 		}
3298 		delay(1000);
3299 		i++;
3300 		reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
3301 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3302 }
3303 
3304 void
3305 mvpp2_port_enable(struct mvpp2_port *port)
3306 {
3307 	uint32_t val;
3308 
3309 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3310 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3311 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3312 		val |= MV_XLG_MAC_CTRL0_PORTEN;
3313 		val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS;
3314 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3315 	} else {
3316 		val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3317 		val |= MVPP2_GMAC_PORT_EN_MASK;
3318 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3319 		mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3320 	}
3321 }
3322 
3323 void
3324 mvpp2_port_disable(struct mvpp2_port *port)
3325 {
3326 	uint32_t val;
3327 
3328 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3329 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
3330 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3331 		val &= ~MV_XLG_MAC_CTRL0_PORTEN;
3332 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3333 	}
3334 
3335 	val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3336 	val &= ~MVPP2_GMAC_PORT_EN_MASK;
3337 	mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3338 }
3339 
3340 int
3341 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3342 {
3343 	uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
3344 
3345 	return val & MVPP2_RXQ_OCCUPIED_MASK;
3346 }
3347 
3348 void
3349 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3350     int used_count, int free_count)
3351 {
3352 	uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3353 	mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3354 }
3355 
3356 void
3357 mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
3358 {
3359 	uint32_t val;
3360 
3361 	offset = offset >> 5;
3362 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3363 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3364 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3365 	    MVPP2_RXQ_PACKET_OFFSET_MASK);
3366 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3367 }
3368 
3369 void
3370 mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3371 {
3372 	uint32_t val, size, mtu;
3373 	int txq;
3374 
3375 	mtu = MCLBYTES * 8;
3376 	if (mtu > MVPP2_TXP_MTU_MAX)
3377 		mtu = MVPP2_TXP_MTU_MAX;
3378 
3379 	/* WA for wrong token bucket update: set MTU value = 3*real MTU value */
3380 	mtu = 3 * mtu;
3381 
3382 	/* indirect access to reg_valisters */
3383 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3384 	    mvpp2_egress_port(port));
3385 
3386 	/* set MTU */
3387 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
3388 	val &= ~MVPP2_TXP_MTU_MAX;
3389 	val |= mtu;
3390 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
3391 
3392 	/* TXP token size and all TXqs token size must be larger that MTU */
3393 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3394 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3395 	if (size < mtu) {
3396 		size = mtu;
3397 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3398 		val |= size;
3399 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3400 	}
3401 
3402 	for (txq = 0; txq < port->sc_ntxq; txq++) {
3403 		val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3404 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3405 
3406 		if (size < mtu) {
3407 			size = mtu;
3408 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3409 			val |= size;
3410 			mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
3411 		}
3412 	}
3413 }
3414 
3415 void
3416 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3417     uint32_t pkts)
3418 {
3419 	rxq->pkts_coal =
3420 	    pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
3421 	    pkts : MVPP2_OCCUPIED_THRESH_MASK;
3422 
3423 	mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3424 	mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
3425 
3426 }
3427 
3428 void
3429 mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3430     uint32_t pkts)
3431 {
3432 	txq->done_pkts_coal =
3433 	    pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
3434 	    pkts : MVPP2_TRANSMITTED_THRESH_MASK;
3435 
3436 	mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
3437 	mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
3438 	    txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
3439 }
3440 
3441 void
3442 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3443     uint32_t usec)
3444 {
3445 	uint32_t val;
3446 
3447 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3448 	mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3449 
3450 	rxq->time_coal = usec;
3451 }
3452 
3453 void
3454 mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
3455 {
3456 	uint32_t val;
3457 
3458 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3459 	mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
3460 
3461 	port->sc_tx_time_coal = usec;
3462 }
3463 
3464 void
3465 mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
3466     uint32_t ri, uint32_t ri_mask)
3467 {
3468 	sc->sc_prs_shadow[index].ri_mask = ri_mask;
3469 	sc->sc_prs_shadow[index].ri = ri;
3470 }
3471 
3472 void
3473 mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3474 {
3475 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
3476 
3477 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
3478 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
3479 }
3480 
3481 void
3482 mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
3483 {
3484 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3485 
3486 	if (add)
3487 		pe->tcam.byte[enable_off] &= ~(1 << port);
3488 	else
3489 		pe->tcam.byte[enable_off] |= (1 << port);
3490 }
3491 
3492 void
3493 mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
3494 {
3495 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3496 	uint8_t mask = MVPP2_PRS_PORT_MASK;
3497 
3498 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
3499 	pe->tcam.byte[enable_off] &= ~mask;
3500 	pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
3501 }
3502 
3503 uint32_t
3504 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
3505 {
3506 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3507 
3508 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
3509 }
3510 
3511 void
3512 mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
3513     uint8_t byte, uint8_t enable)
3514 {
3515 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
3516 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
3517 }
3518 
3519 void
3520 mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
3521     uint8_t *byte, uint8_t *enable)
3522 {
3523 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
3524 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
3525 }
3526 
3527 int
3528 mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
3529 {
3530 	int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
3531 	uint16_t tcam_data;
3532 
3533 	tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
3534 	    pe->tcam.byte[byte_offset];
3535 	return tcam_data == data;
3536 }
3537 
3538 void
3539 mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
3540 {
3541 	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
3542 
3543 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
3544 		if (!(enable & BIT(i)))
3545 			continue;
3546 
3547 		if (bits & BIT(i))
3548 			pe->tcam.byte[ai_idx] |= BIT(i);
3549 		else
3550 			pe->tcam.byte[ai_idx] &= ~BIT(i);
3551 	}
3552 
3553 	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
3554 }
3555 
3556 int
3557 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
3558 {
3559 	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
3560 }
3561 
3562 void
3563 mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
3564     uint32_t *word, uint32_t *enable)
3565 {
3566 	int index, position;
3567 	uint8_t byte, mask;
3568 
3569 	for (index = 0; index < 4; index++) {
3570 		position = (data_offset * sizeof(int)) + index;
3571 		mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
3572 		((uint8_t *)word)[index] = byte;
3573 		((uint8_t *)enable)[index] = mask;
3574 	}
3575 }
3576 
3577 void
3578 mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
3579     uint16_t ether_type)
3580 {
3581 	mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
3582 	mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
3583 }
3584 
3585 void
3586 mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3587 {
3588 	pe->sram.byte[bit / 8] |= (val << (bit % 8));
3589 }
3590 
3591 void
3592 mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3593 {
3594 	pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
3595 }
3596 
3597 void
3598 mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3599 {
3600 	int i;
3601 
3602 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
3603 		if (!(mask & BIT(i)))
3604 			continue;
3605 
3606 		if (bits & BIT(i))
3607 			mvpp2_prs_sram_bits_set(pe,
3608 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3609 		else
3610 			mvpp2_prs_sram_bits_clear(pe,
3611 			    MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3612 
3613 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
3614 	}
3615 }
3616 
3617 int
3618 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
3619 {
3620 	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
3621 }
3622 
3623 void
3624 mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3625 {
3626 	int i;
3627 
3628 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
3629 		if (!(mask & BIT(i)))
3630 			continue;
3631 
3632 		if (bits & BIT(i))
3633 			mvpp2_prs_sram_bits_set(pe,
3634 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3635 		else
3636 			mvpp2_prs_sram_bits_clear(pe,
3637 			    MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3638 
3639 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
3640 	}
3641 }
3642 
3643 int
3644 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
3645 {
3646 	uint8_t bits;
3647 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
3648 	int ai_en_off = ai_off + 1;
3649 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
3650 
3651 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
3652 	    (pe->sram.byte[ai_en_off] << (8 - ai_shift));
3653 
3654 	return bits;
3655 }
3656 
3657 void
3658 mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
3659 {
3660 	if (shift < 0) {
3661 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3662 		shift = -shift;
3663 	} else {
3664 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3665 	}
3666 
3667 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
3668 	    shift & MVPP2_PRS_SRAM_SHIFT_MASK;
3669 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
3670 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
3671 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
3672 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3673 }
3674 
3675 void
3676 mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
3677     uint32_t op)
3678 {
3679 	uint8_t udf_byte, udf_byte_offset;
3680 	uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
3681 
3682 	udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
3683 	    MVPP2_PRS_SRAM_UDF_BITS);
3684 	udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
3685 	op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
3686 	    MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
3687 	op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
3688 
3689 	if (offset < 0) {
3690 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3691 		offset = -offset;
3692 	} else {
3693 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3694 	}
3695 
3696 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
3697 	    MVPP2_PRS_SRAM_UDF_MASK);
3698 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
3699 	pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
3700 	pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
3701 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
3702 	    MVPP2_PRS_SRAM_UDF_TYPE_MASK);
3703 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
3704 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
3705 	    MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
3706 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
3707 	pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
3708 	    op_sel_udf_byte_offset);
3709 	pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
3710 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3711 }
3712 
3713 void
3714 mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3715 {
3716 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
3717 
3718 	mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
3719 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
3720 }
3721 
3722 void
3723 mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
3724 {
3725 	sc->sc_prs_shadow[index].valid = 1;
3726 	sc->sc_prs_shadow[index].lu = lu;
3727 }
3728 
3729 int
3730 mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3731 {
3732 	int i;
3733 
3734 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3735 		return EINVAL;
3736 
3737 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3738 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3739 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3740 		mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
3741 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3742 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3743 		mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
3744 
3745 	return 0;
3746 }
3747 
3748 int
3749 mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe, int tid)
3750 {
3751 	int i;
3752 
3753 	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3754 		return EINVAL;
3755 
3756 	memset(pe, 0, sizeof(*pe));
3757 	pe->index = tid;
3758 
3759 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3760 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
3761 	    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
3762 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
3763 		return EINVAL;
3764 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3765 		pe->tcam.word[i] =
3766 		    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
3767 
3768 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3769 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3770 		pe->sram.word[i] =
3771 		    mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
3772 
3773 	return 0;
3774 }
3775 
3776 int
3777 mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
3778 {
3779 	struct mvpp2_prs_entry pe;
3780 	uint8_t bits;
3781 	int tid;
3782 
3783 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
3784 		if (!sc->sc_prs_shadow[tid].valid ||
3785 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
3786 			continue;
3787 
3788 		mvpp2_prs_hw_read(sc, &pe, tid);
3789 		bits = mvpp2_prs_sram_ai_get(&pe);
3790 
3791 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
3792 			return tid;
3793 	}
3794 
3795 	return -1;
3796 }
3797 
3798 int
3799 mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
3800 {
3801 	uint8_t tmp;
3802 	int tid;
3803 
3804 	if (start > end) {
3805 		tmp = end;
3806 		end = start;
3807 		start = tmp;
3808 	}
3809 
3810 	for (tid = start; tid <= end; tid++) {
3811 		if (!sc->sc_prs_shadow[tid].valid)
3812 			return tid;
3813 	}
3814 
3815 	return -1;
3816 }
3817 
3818 void
3819 mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
3820 {
3821 	struct mvpp2_prs_entry pe;
3822 
3823 	if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
3824 		mvpp2_prs_hw_read(sc, &pe, MVPP2_PE_DROP_ALL);
3825 	} else {
3826 		memset(&pe, 0, sizeof(pe));
3827 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3828 		pe.index = MVPP2_PE_DROP_ALL;
3829 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3830 		    MVPP2_PRS_RI_DROP_MASK);
3831 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3832 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3833 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3834 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3835 	}
3836 
3837 	mvpp2_prs_tcam_port_set(&pe, port, add);
3838 	mvpp2_prs_hw_write(sc, &pe);
3839 }
3840 
3841 void
3842 mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int l2_cast,
3843     int add)
3844 {
3845 	struct mvpp2_prs_entry pe;
3846 	uint8_t cast_match;
3847 	uint32_t ri;
3848 	int tid;
3849 
3850 	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
3851 		cast_match = MVPP2_PRS_UCAST_VAL;
3852 		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
3853 		ri = MVPP2_PRS_RI_L2_UCAST;
3854 	} else {
3855 		cast_match = MVPP2_PRS_MCAST_VAL;
3856 		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
3857 		ri = MVPP2_PRS_RI_L2_MCAST;
3858 	}
3859 
3860 	if (sc->sc_prs_shadow[tid].valid) {
3861 		mvpp2_prs_hw_read(sc, &pe, tid);
3862 	} else {
3863 		memset(&pe, 0, sizeof(pe));
3864 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3865 		pe.index = tid;
3866 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3867 		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
3868 		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
3869 		    MVPP2_PRS_CAST_MASK);
3870 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3871 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3872 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3873 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3874 	}
3875 
3876 	mvpp2_prs_tcam_port_set(&pe, port, add);
3877 	mvpp2_prs_hw_write(sc, &pe);
3878 }
3879 
3880 void
3881 mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
3882     int tagged, int extend)
3883 {
3884 	struct mvpp2_prs_entry pe;
3885 	int32_t tid, shift;
3886 
3887 	if (extend) {
3888 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3889 		shift = 8;
3890 	} else {
3891 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3892 		shift = 4;
3893 	}
3894 
3895 	if (sc->sc_prs_shadow[tid].valid) {
3896 		mvpp2_prs_hw_read(sc, &pe, tid);
3897 	} else {
3898 		memset(&pe, 0, sizeof(pe));
3899 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3900 		pe.index = tid;
3901 		mvpp2_prs_sram_shift_set(&pe, shift,
3902 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3903 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3904 		if (tagged) {
3905 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
3906 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3907 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3908 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3909 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3910 		} else {
3911 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3912 			    MVPP2_PRS_RI_VLAN_MASK);
3913 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3914 		}
3915 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3916 	}
3917 
3918 	mvpp2_prs_tcam_port_set(&pe, port, add);
3919 	mvpp2_prs_hw_write(sc, &pe);
3920 }
3921 
3922 void
3923 mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
3924     int add, int tagged, int extend)
3925 {
3926 	struct mvpp2_prs_entry pe;
3927 	int32_t tid, shift, port_mask;
3928 
3929 	if (extend) {
3930 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3931 		port_mask = 0;
3932 		shift = 8;
3933 	} else {
3934 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3935 		port_mask = MVPP2_PRS_PORT_MASK;
3936 		shift = 4;
3937 	}
3938 
3939 	if (sc->sc_prs_shadow[tid].valid) {
3940 		mvpp2_prs_hw_read(sc, &pe, tid);
3941 	} else {
3942 		memset(&pe, 0, sizeof(pe));
3943 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3944 		pe.index = tid;
3945 		mvpp2_prs_match_etype(&pe, 0, 0xdada);
3946 		mvpp2_prs_match_etype(&pe, 2, 0);
3947 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
3948 		    MVPP2_PRS_RI_DSA_MASK);
3949 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
3950 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3951 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3952 		if (tagged) {
3953 			mvpp2_prs_tcam_data_byte_set(&pe,
3954 			    MVPP2_ETH_TYPE_LEN + 2 + 3,
3955 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3956 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3957 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3958 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3959 		} else {
3960 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3961 			    MVPP2_PRS_RI_VLAN_MASK);
3962 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3963 		}
3964 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
3965 	}
3966 
3967 	mvpp2_prs_tcam_port_set(&pe, port, add);
3968 	mvpp2_prs_hw_write(sc, &pe);
3969 }
3970 
3971 struct mvpp2_prs_entry *
3972 mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
3973 {
3974 	struct mvpp2_prs_entry *pe;
3975 	uint32_t ri_bits, ai_bits;
3976 	int match, tid;
3977 
3978 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3979 	if (pe == NULL)
3980 		return NULL;
3981 
3982 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3983 
3984 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3985 		if (!sc->sc_prs_shadow[tid].valid ||
3986 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3987 			continue;
3988 		mvpp2_prs_hw_read(sc, pe, tid);
3989 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
3990 		if (!match)
3991 			continue;
3992 		ri_bits = mvpp2_prs_sram_ri_get(pe);
3993 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3994 		ai_bits = mvpp2_prs_tcam_ai_get(pe);
3995 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
3996 		if (ai != ai_bits)
3997 			continue;
3998 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3999 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
4000 			return pe;
4001 	}
4002 
4003 	free(pe, M_TEMP, sizeof(*pe));
4004 	return NULL;
4005 }
4006 
4007 int
4008 mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
4009 {
4010 	struct mvpp2_prs_entry *pe;
4011 	uint32_t ri_bits;
4012 	int tid_aux, tid;
4013 	int ret = 0;
4014 
4015 	pe = mvpp2_prs_vlan_find(sc, tpid, ai);
4016 	if (pe == NULL) {
4017 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
4018 		    MVPP2_PE_FIRST_FREE_TID);
4019 		if (tid < 0)
4020 			return tid;
4021 
4022 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4023 		if (pe == NULL)
4024 			return ENOMEM;
4025 
4026 		/* get last double vlan tid */
4027 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
4028 		    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
4029 			if (!sc->sc_prs_shadow[tid_aux].valid ||
4030 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4031 				continue;
4032 			mvpp2_prs_hw_read(sc, pe, tid_aux);
4033 			ri_bits = mvpp2_prs_sram_ri_get(pe);
4034 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
4035 			    MVPP2_PRS_RI_VLAN_DOUBLE)
4036 				break;
4037 		}
4038 
4039 		if (tid <= tid_aux) {
4040 			ret = EINVAL;
4041 			goto error;
4042 		}
4043 
4044 		memset(pe, 0, sizeof(*pe));
4045 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4046 		pe->index = tid;
4047 		mvpp2_prs_match_etype(pe, 0, tpid);
4048 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
4049 		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
4050 				   MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4051 		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
4052 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
4053 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
4054 			    MVPP2_PRS_RI_VLAN_MASK);
4055 		} else {
4056 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
4057 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
4058 			    MVPP2_PRS_RI_VLAN_MASK);
4059 		}
4060 		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
4061 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4062 	}
4063 
4064 	mvpp2_prs_tcam_port_map_set(pe, port_map);
4065 	mvpp2_prs_hw_write(sc, pe);
4066 
4067 error:
4068 	free(pe, M_TEMP, sizeof(*pe));
4069 	return ret;
4070 }
4071 
4072 int
4073 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
4074 {
4075 	int i;
4076 
4077 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
4078 		if (!sc->sc_prs_double_vlans[i])
4079 			return i;
4080 
4081 	return -1;
4082 }
4083 
4084 struct mvpp2_prs_entry *
4085 mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
4086 {
4087 	struct mvpp2_prs_entry *pe;
4088 	uint32_t ri_mask;
4089 	int match, tid;
4090 
4091 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4092 	if (pe == NULL)
4093 		return NULL;
4094 
4095 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4096 
4097 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
4098 		if (!sc->sc_prs_shadow[tid].valid ||
4099 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
4100 			continue;
4101 
4102 		mvpp2_prs_hw_read(sc, pe, tid);
4103 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
4104 		    mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
4105 		if (!match)
4106 			continue;
4107 		ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
4108 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
4109 			return pe;
4110 	}
4111 
4112 	free(pe, M_TEMP, sizeof(*pe));
4113 	return NULL;
4114 }
4115 
4116 int
4117 mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
4118     uint32_t port_map)
4119 {
4120 	struct mvpp2_prs_entry *pe;
4121 	int tid_aux, tid, ai, ret = 0;
4122 	uint32_t ri_bits;
4123 
4124 	pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
4125 	if (pe == NULL) {
4126 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4127 		    MVPP2_PE_LAST_FREE_TID);
4128 		if (tid < 0)
4129 			return tid;
4130 
4131 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4132 		if (pe == NULL)
4133 			return ENOMEM;
4134 
4135 		ai = mvpp2_prs_double_vlan_ai_free_get(sc);
4136 		if (ai < 0) {
4137 			ret = ai;
4138 			goto error;
4139 		}
4140 
4141 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
4142 		    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
4143 			if (!sc->sc_prs_shadow[tid_aux].valid ||
4144 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4145 				continue;
4146 			mvpp2_prs_hw_read(sc, pe, tid_aux);
4147 			ri_bits = mvpp2_prs_sram_ri_get(pe);
4148 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
4149 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
4150 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
4151 				break;
4152 		}
4153 
4154 		if (tid >= tid_aux) {
4155 			ret = ERANGE;
4156 			goto error;
4157 		}
4158 
4159 		memset(pe, 0, sizeof(*pe));
4160 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4161 		pe->index = tid;
4162 		sc->sc_prs_double_vlans[ai] = 1;
4163 		mvpp2_prs_match_etype(pe, 0, tpid1);
4164 		mvpp2_prs_match_etype(pe, 4, tpid2);
4165 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
4166 		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
4167 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4168 		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
4169 		    MVPP2_PRS_RI_VLAN_MASK);
4170 		mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
4171 		    MVPP2_PRS_SRAM_AI_MASK);
4172 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4173 	}
4174 
4175 	mvpp2_prs_tcam_port_map_set(pe, port_map);
4176 	mvpp2_prs_hw_write(sc, pe);
4177 
4178 error:
4179 	free(pe, M_TEMP, sizeof(*pe));
4180 	return ret;
4181 }
4182 
4183 int
4184 mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4185     uint32_t ri_mask)
4186 {
4187 	struct mvpp2_prs_entry pe;
4188 	int tid;
4189 
4190 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4191 	    (proto != IPPROTO_IGMP))
4192 		return EINVAL;
4193 
4194 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4195 	    MVPP2_PE_LAST_FREE_TID);
4196 	if (tid < 0)
4197 		return tid;
4198 
4199 	memset(&pe, 0, sizeof(pe));
4200 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4201 	pe.index = tid;
4202 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
4203 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4204 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4205 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4206 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4207 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4208 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4209 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
4210 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
4211 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4212 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
4213 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4214 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4215 	mvpp2_prs_hw_write(sc, &pe);
4216 
4217 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4218 	    MVPP2_PE_LAST_FREE_TID);
4219 	if (tid < 0)
4220 		return tid;
4221 
4222 	pe.index = tid;
4223 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
4224 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
4225 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4226 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
4227 	    ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4228 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
4229 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
4230 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4231 	mvpp2_prs_hw_write(sc, &pe);
4232 
4233 	return 0;
4234 }
4235 
4236 int
4237 mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4238 {
4239 	struct mvpp2_prs_entry pe;
4240 	int mask, tid;
4241 
4242 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4243 	    MVPP2_PE_LAST_FREE_TID);
4244 	if (tid < 0)
4245 		return tid;
4246 
4247 	memset(&pe, 0, sizeof(pe));
4248 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4249 	pe.index = tid;
4250 
4251 	switch (l3_cast) {
4252 	case MVPP2_PRS_L3_MULTI_CAST:
4253 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
4254 		    MVPP2_PRS_IPV4_MC_MASK);
4255 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4256 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4257 		break;
4258 	case  MVPP2_PRS_L3_BROAD_CAST:
4259 		mask = MVPP2_PRS_IPV4_BC_MASK;
4260 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
4261 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
4262 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
4263 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
4264 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
4265 		    MVPP2_PRS_RI_L3_ADDR_MASK);
4266 		break;
4267 	default:
4268 		return EINVAL;
4269 	}
4270 
4271 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4272 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4273 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4274 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
4275 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4276 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4277 	mvpp2_prs_hw_write(sc, &pe);
4278 
4279 	return 0;
4280 }
4281 
4282 int
4283 mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4284     uint32_t ri_mask)
4285 {
4286 	struct mvpp2_prs_entry pe;
4287 	int tid;
4288 
4289 	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4290 	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
4291 		return EINVAL;
4292 
4293 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4294 	    MVPP2_PE_LAST_FREE_TID);
4295 	if (tid < 0)
4296 		return tid;
4297 
4298 	memset(&pe, 0, sizeof(pe));
4299 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4300 	pe.index = tid;
4301 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4302 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4303 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4304 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4305 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4306 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4307 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4308 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4309 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4310 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4311 	mvpp2_prs_hw_write(sc, &pe);
4312 
4313 	return 0;
4314 }
4315 
4316 int
4317 mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4318 {
4319 	struct mvpp2_prs_entry pe;
4320 	int tid;
4321 
4322 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
4323 		return EINVAL;
4324 
4325 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4326 	    MVPP2_PE_LAST_FREE_TID);
4327 	if (tid < 0)
4328 		return tid;
4329 
4330 	memset(&pe, 0, sizeof(pe));
4331 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4332 	pe.index = tid;
4333 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
4334 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4335 	    MVPP2_PRS_RI_L3_ADDR_MASK);
4336 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4337 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4338 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4339 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
4340 	    MVPP2_PRS_IPV6_MC_MASK);
4341 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4342 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4343 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4344 	mvpp2_prs_hw_write(sc, &pe);
4345 
4346 	return 0;
4347 }
4348 
4349 int
4350 mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
4351     uint8_t *mask)
4352 {
4353 	uint8_t tcam_byte, tcam_mask;
4354 	int index;
4355 
4356 	for (index = 0; index < ETHER_ADDR_LEN; index++) {
4357 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
4358 		    &tcam_mask);
4359 		if (tcam_mask != mask[index])
4360 			return 0;
4361 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
4362 			return 0;
4363 	}
4364 
4365 	return 1;
4366 }
4367 
4368 int
4369 mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
4370     uint8_t *mask, int udf_type)
4371 {
4372 	struct mvpp2_prs_entry pe;
4373 	int tid;
4374 
4375 	for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END;
4376 	    tid++) {
4377 		uint32_t entry_pmap;
4378 
4379 		if (!sc->sc_prs_shadow[tid].valid ||
4380 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4381 		    (sc->sc_prs_shadow[tid].udf != udf_type))
4382 			continue;
4383 
4384 		mvpp2_prs_hw_read(sc, &pe, tid);
4385 		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
4386 		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
4387 		    entry_pmap == pmap)
4388 			return tid;
4389 	}
4390 
4391 	return -1;
4392 }
4393 
4394 int
4395 mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const uint8_t *da, int add)
4396 {
4397 	struct mvpp2_softc *sc = port->sc;
4398 	struct mvpp2_prs_entry pe;
4399 	uint32_t pmap, len, ri;
4400 	uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4401 	int tid;
4402 
4403 	memset(&pe, 0, sizeof(pe));
4404 
4405 	tid = mvpp2_prs_mac_da_range_find(sc, BIT(port->sc_id), da, mask,
4406 	    MVPP2_PRS_UDF_MAC_DEF);
4407 	if (tid < 0) {
4408 		if (!add)
4409 			return 0;
4410 
4411 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_MAC_RANGE_START,
4412 		    MVPP2_PE_MAC_RANGE_END);
4413 		if (tid < 0)
4414 			return tid;
4415 
4416 		pe.index = tid;
4417 		mvpp2_prs_tcam_port_map_set(&pe, 0);
4418 	} else {
4419 		mvpp2_prs_hw_read(sc, &pe, tid);
4420 	}
4421 
4422 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
4423 
4424 	mvpp2_prs_tcam_port_set(&pe, port->sc_id, add);
4425 
4426 	/* invalidate the entry if no ports are left enabled */
4427 	pmap = mvpp2_prs_tcam_port_map_get(&pe);
4428 	if (pmap == 0) {
4429 		if (add)
4430 			return -1;
4431 		mvpp2_prs_hw_inv(sc, pe.index);
4432 		sc->sc_prs_shadow[pe.index].valid = 0;
4433 		return 0;
4434 	}
4435 
4436 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
4437 
4438 	len = ETHER_ADDR_LEN;
4439 	while (len--)
4440 		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
4441 
4442 	if (ETHER_IS_BROADCAST(da))
4443 		ri = MVPP2_PRS_RI_L2_BCAST;
4444 	else if (ETHER_IS_MULTICAST(da))
4445 		ri = MVPP2_PRS_RI_L2_MCAST;
4446 	else
4447 		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
4448 
4449 	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4450 	    MVPP2_PRS_RI_MAC_ME_MASK);
4451 	mvpp2_prs_shadow_ri_set(sc, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4452 	    MVPP2_PRS_RI_MAC_ME_MASK);
4453 	mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
4454 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4455 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
4456 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
4457 	mvpp2_prs_hw_write(sc, &pe);
4458 
4459 	return 0;
4460 }
4461 
4462 void
4463 mvpp2_prs_mac_del_all(struct mvpp2_port *port)
4464 {
4465 	struct mvpp2_softc *sc = port->sc;
4466 	struct mvpp2_prs_entry pe;
4467 	uint32_t pmap;
4468 	int index, tid;
4469 
4470 	for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END;
4471 	    tid++) {
4472 		uint8_t da[ETHER_ADDR_LEN], da_mask[ETHER_ADDR_LEN];
4473 
4474 		if (!sc->sc_prs_shadow[tid].valid ||
4475 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4476 		    (sc->sc_prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
4477 			continue;
4478 
4479 		mvpp2_prs_hw_read(sc, &pe, tid);
4480 		pmap = mvpp2_prs_tcam_port_map_get(&pe);
4481 
4482 		if (!(pmap & (1 << port->sc_id)))
4483 			continue;
4484 
4485 		for (index = 0; index < ETHER_ADDR_LEN; index++)
4486 			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
4487 			    &da_mask[index]);
4488 
4489 		if (ETHER_IS_BROADCAST(da) || ETHER_IS_EQ(da, port->sc_lladdr))
4490 			continue;
4491 
4492 		mvpp2_prs_mac_da_accept(port, da, 0);
4493 	}
4494 }
4495 
4496 int
4497 mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
4498 {
4499 	switch (type) {
4500 	case MVPP2_TAG_TYPE_EDSA:
4501 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4502 		    MVPP2_PRS_EDSA);
4503 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4504 		    MVPP2_PRS_EDSA);
4505 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4506 		    MVPP2_PRS_DSA);
4507 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4508 		    MVPP2_PRS_DSA);
4509 		break;
4510 	case MVPP2_TAG_TYPE_DSA:
4511 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4512 		    MVPP2_PRS_DSA);
4513 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4514 		    MVPP2_PRS_DSA);
4515 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4516 		    MVPP2_PRS_EDSA);
4517 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4518 		    MVPP2_PRS_EDSA);
4519 		break;
4520 	case MVPP2_TAG_TYPE_MH:
4521 	case MVPP2_TAG_TYPE_NONE:
4522 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4523 		    MVPP2_PRS_DSA);
4524 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4525 		    MVPP2_PRS_DSA);
4526 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4527 		    MVPP2_PRS_EDSA);
4528 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4529 		    MVPP2_PRS_EDSA);
4530 		break;
4531 	default:
4532 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4533 			return EINVAL;
4534 		break;
4535 	}
4536 
4537 	return 0;
4538 }
4539 
4540 int
4541 mvpp2_prs_def_flow(struct mvpp2_port *port)
4542 {
4543 	struct mvpp2_prs_entry pe;
4544 	int tid;
4545 
4546 	memset(&pe, 0, sizeof(pe));
4547 
4548 	tid = mvpp2_prs_flow_find(port->sc, port->sc_id);
4549 	if (tid < 0) {
4550 		tid = mvpp2_prs_tcam_first_free(port->sc,
4551 		    MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
4552 		if (tid < 0)
4553 			return tid;
4554 
4555 		pe.index = tid;
4556 		mvpp2_prs_sram_ai_update(&pe, port->sc_id,
4557 		    MVPP2_PRS_FLOW_ID_MASK);
4558 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4559 		mvpp2_prs_shadow_set(port->sc, pe.index, MVPP2_PRS_LU_FLOWS);
4560 	} else {
4561 		mvpp2_prs_hw_read(port->sc, &pe, tid);
4562 	}
4563 
4564 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4565 	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->sc_id));
4566 	mvpp2_prs_hw_write(port->sc, &pe);
4567 	return 0;
4568 }
4569 
4570 void
4571 mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
4572 {
4573 	mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4574 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4575 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4576 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4577 }
4578 
4579 void
4580 mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
4581 {
4582 	uint32_t val;
4583 
4584 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4585 	mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
4586 	mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
4587 }
4588 
4589 void
4590 mvpp2_cls_init(struct mvpp2_softc *sc)
4591 {
4592 	struct mvpp2_cls_lookup_entry le;
4593 	struct mvpp2_cls_flow_entry fe;
4594 	int index;
4595 
4596 	mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4597 	memset(&fe.data, 0, sizeof(fe.data));
4598 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4599 		fe.index = index;
4600 		mvpp2_cls_flow_write(sc, &fe);
4601 	}
4602 	le.data = 0;
4603 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4604 		le.lkpid = index;
4605 		le.way = 0;
4606 		mvpp2_cls_lookup_write(sc, &le);
4607 		le.way = 1;
4608 		mvpp2_cls_lookup_write(sc, &le);
4609 	}
4610 }
4611 
4612 void
4613 mvpp2_cls_port_config(struct mvpp2_port *port)
4614 {
4615 	struct mvpp2_cls_lookup_entry le;
4616 	uint32_t val;
4617 
4618 	/* set way for the port */
4619 	val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
4620 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
4621 	mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
4622 
4623 	/*
4624 	 * pick the entry to be accessed in lookup ID decoding table
4625 	 * according to the way and lkpid.
4626 	 */
4627 	le.lkpid = port->sc_id;
4628 	le.way = 0;
4629 	le.data = 0;
4630 
4631 	/* set initial CPU queue for receiving packets */
4632 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4633 	le.data |= (port->sc_id * 32);
4634 
4635 	/* disable classification engines */
4636 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4637 
4638 	/* update lookup ID table entry */
4639 	mvpp2_cls_lookup_write(port->sc, &le);
4640 }
4641 
4642 void
4643 mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4644 {
4645 	uint32_t val;
4646 
4647 	mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
4648 	    (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4649 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_P2HQ_REG(port->sc_id),
4650 	    (port->sc_id * 32) >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS);
4651 	val = mvpp2_read(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG);
4652 	val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->sc_id);
4653 	mvpp2_write(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4654 }
4655