xref: /openbsd-src/sys/dev/fdt/if_mvpp.c (revision 50857149ad9113f0addb6fdf5f88bf9894eb1d0e)
1 /*	$OpenBSD: if_mvpp.c,v 1.7 2020/06/26 21:02:36 patrick Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Copyright (C) 2016 Marvell International Ltd.
20  *
21  * Marvell BSD License Option
22  *
23  * If you received this File from Marvell, you may opt to use, redistribute
24  * and/or modify this File under the following licensing terms.
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions are met:
27  *
28  *   * Redistributions of source code must retain the above copyright notice,
29  *     this list of conditions and the following disclaimer.
30  *
31  *   * Redistributions in binary form must reproduce the above copyright
32  *     notice, this list of conditions and the following disclaimer in the
33  *     documentation and/or other materials provided with the distribution.
34  *
35  *   * Neither the name of Marvell nor the names of its contributors may be
36  *     used to endorse or promote products derived from this software without
37  *     specific prior written permission.
38  *
39  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
43  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
49  * POSSIBILITY OF SUCH DAMAGE.
50  */
51 
52 #include "bpfilter.h"
53 
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/device.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/queue.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/timeout.h>
64 
65 #include <machine/bus.h>
66 #include <machine/fdt.h>
67 
68 #include <net/if.h>
69 #include <net/if_media.h>
70 
71 #include <dev/ofw/openfirm.h>
72 #include <dev/ofw/ofw_clock.h>
73 #include <dev/ofw/ofw_gpio.h>
74 #include <dev/ofw/ofw_misc.h>
75 #include <dev/ofw/ofw_pinctrl.h>
76 #include <dev/ofw/ofw_regulator.h>
77 #include <dev/ofw/fdt.h>
78 
79 #include <dev/mii/mii.h>
80 #include <dev/mii/miivar.h>
81 
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #endif
85 
86 #include <netinet/in.h>
87 #include <netinet/ip.h>
88 #include <netinet/if_ether.h>
89 
90 #include <netinet6/in6_var.h>
91 #include <netinet/ip6.h>
92 
93 #include <dev/fdt/if_mvppreg.h>
94 
95 struct mvpp2_buf {
96 	bus_dmamap_t		mb_map;
97 	struct mbuf		*mb_m;
98 };
99 
100 #define MVPP2_NTXDESC	512
101 #define MVPP2_NTXSEGS	16
102 #define MVPP2_NRXDESC	512
103 
104 struct mvpp2_bm_pool {
105 	struct mvpp2_dmamem	*bm_mem;
106 	struct mvpp2_buf	*rxbuf;
107 	uint32_t		*freelist;
108 	int			free_prod;
109 	int			free_cons;
110 };
111 
112 #define MVPP2_BM_SIZE		64
113 #define MVPP2_BM_POOL_PTR_ALIGN	128
114 #define MVPP2_BM_POOLS_NUM	8
115 #define MVPP2_BM_ALIGN		32
116 
117 struct mvpp2_tx_queue {
118 	uint8_t			id;
119 	uint8_t			log_id;
120 	struct mvpp2_dmamem	*ring;
121 	struct mvpp2_buf	*buf;
122 	struct mvpp2_tx_desc	*descs;
123 	int			prod;
124 	int			cnt;
125 	int			cons;
126 
127 	uint32_t		done_pkts_coal;
128 };
129 
130 struct mvpp2_rx_queue {
131 	uint8_t			id;
132 	struct mvpp2_dmamem	*ring;
133 	struct mvpp2_rx_desc	*descs;
134 	int			prod;
135 	struct if_rxring	rxring;
136 	int			cons;
137 
138 	uint32_t		pkts_coal;
139 	uint32_t		time_coal;
140 };
141 
142 struct mvpp2_dmamem {
143 	bus_dmamap_t		mdm_map;
144 	bus_dma_segment_t	mdm_seg;
145 	size_t			mdm_size;
146 	caddr_t			mdm_kva;
147 };
148 #define MVPP2_DMA_MAP(_mdm)	((_mdm)->mdm_map)
149 #define MVPP2_DMA_LEN(_mdm)	((_mdm)->mdm_size)
150 #define MVPP2_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
151 #define MVPP2_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
152 
153 struct mvpp2_port;
154 struct mvpp2_softc {
155 	struct device		sc_dev;
156 	int			sc_node;
157 	bus_space_tag_t		sc_iot;
158 	bus_space_handle_t	sc_ioh_base;
159 	bus_space_handle_t	sc_ioh_iface;
160 	bus_size_t		sc_iosize_base;
161 	bus_size_t		sc_iosize_iface;
162 	bus_dma_tag_t		sc_dmat;
163 
164 	uint32_t		sc_tclk;
165 
166 	struct mvpp2_bm_pool	*sc_bm_pools;
167 	int			sc_npools;
168 
169 	struct mvpp2_prs_shadow	*sc_prs_shadow;
170 	uint8_t			*sc_prs_double_vlans;
171 
172 	int			sc_aggr_ntxq;
173 	struct mvpp2_tx_queue	*sc_aggr_txqs;
174 
175 	struct mvpp2_port	**sc_ports;
176 };
177 
178 struct mvpp2_port {
179 	struct device		sc_dev;
180 	struct mvpp2_softc	*sc;
181 	int			sc_node;
182 	bus_dma_tag_t		sc_dmat;
183 	int			sc_id;
184 	int			sc_gop_id;
185 
186 	struct arpcom		sc_ac;
187 #define sc_lladdr	sc_ac.ac_enaddr
188 	struct mii_data		sc_mii;
189 #define sc_media	sc_mii.mii_media
190 	struct mii_bus		*sc_mdio;
191 	char			sc_cur_lladdr[ETHER_ADDR_LEN];
192 
193 	enum {
194 		PHY_MODE_XAUI,
195 		PHY_MODE_10GBASER,
196 		PHY_MODE_2500BASEX,
197 		PHY_MODE_1000BASEX,
198 		PHY_MODE_SGMII,
199 		PHY_MODE_RGMII,
200 		PHY_MODE_RGMII_ID,
201 		PHY_MODE_RGMII_RXID,
202 		PHY_MODE_RGMII_TXID,
203 	}			sc_phy_mode;
204 	int			sc_fixed_link;
205 	int			sc_inband_status;
206 	int			sc_link;
207 	int			sc_phyloc;
208 	int			sc_sfp;
209 
210 	int			sc_ntxq;
211 	int			sc_nrxq;
212 
213 	struct mvpp2_tx_queue	*sc_txqs;
214 	struct mvpp2_rx_queue	*sc_rxqs;
215 
216 	struct timeout		sc_tick;
217 
218 	uint32_t		sc_tx_time_coal;
219 };
220 
221 #define MVPP2_MAX_PORTS		4
222 
223 struct mvpp2_attach_args {
224 	int			ma_node;
225 	bus_dma_tag_t		ma_dmat;
226 };
227 
228 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
229 
230 static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
231 
232 int	mvpp2_match(struct device *, void *, void *);
233 void	mvpp2_attach(struct device *, struct device *, void *);
234 void	mvpp2_attach_deferred(struct device *);
235 
236 struct cfattach mvppc_ca = {
237 	sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
238 };
239 
240 struct cfdriver mvppc_cd = {
241 	NULL, "mvppc", DV_DULL
242 };
243 
244 int	mvpp2_port_match(struct device *, void *, void *);
245 void	mvpp2_port_attach(struct device *, struct device *, void *);
246 
247 struct cfattach mvpp_ca = {
248 	sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
249 };
250 
251 struct cfdriver mvpp_cd = {
252 	NULL, "mvpp", DV_IFNET
253 };
254 
255 uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
256 void	mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
257 uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
258 void	mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
259 uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
260 void	mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
261 uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
262 void	mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
263 uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
264 void	mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
265 
266 int	mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
267 void	mvpp2_start(struct ifnet *);
268 int	mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
269 void	mvpp2_watchdog(struct ifnet *);
270 
271 int	mvpp2_media_change(struct ifnet *);
272 void	mvpp2_media_status(struct ifnet *, struct ifmediareq *);
273 
274 int	mvpp2_mii_readreg(struct device *, int, int);
275 void	mvpp2_mii_writereg(struct device *, int, int, int);
276 void	mvpp2_mii_statchg(struct device *);
277 void	mvpp2_inband_statchg(struct mvpp2_port *);
278 void	mvpp2_port_change(struct mvpp2_port *);
279 
280 void	mvpp2_tick(void *);
281 void	mvpp2_rxtick(void *);
282 
283 int	mvpp2_link_intr(void *);
284 int	mvpp2_intr(void *);
285 void	mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
286 void	mvpp2_txq_proc(struct mvpp2_port *, struct mvpp2_tx_queue *);
287 void	mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
288 void	mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
289 void	mvpp2_rx_refill(struct mvpp2_port *);
290 
291 void	mvpp2_up(struct mvpp2_port *);
292 void	mvpp2_down(struct mvpp2_port *);
293 void	mvpp2_iff(struct mvpp2_port *);
294 int	mvpp2_encap(struct mvpp2_port *, struct mbuf *, int *);
295 
296 void	mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
297 void	mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
298 void	mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
299 void	mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
300 void	mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
301 void	mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
302 void	mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
303 
304 void	mvpp2_mac_config(struct mvpp2_port *);
305 void	mvpp2_xlg_config(struct mvpp2_port *);
306 void	mvpp2_gmac_config(struct mvpp2_port *);
307 
308 struct mvpp2_dmamem *
309 	mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
310 void	mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
311 struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
312 void	mvpp2_fill_rx_ring(struct mvpp2_softc *);
313 
314 void	mvpp2_interrupts_enable(struct mvpp2_port *, int);
315 void	mvpp2_interrupts_disable(struct mvpp2_port *, int);
316 int	mvpp2_egress_port(struct mvpp2_port *);
317 int	mvpp2_txq_phys(int, int);
318 void	mvpp2_defaults_set(struct mvpp2_port *);
319 void	mvpp2_ingress_enable(struct mvpp2_port *);
320 void	mvpp2_ingress_disable(struct mvpp2_port *);
321 void	mvpp2_egress_enable(struct mvpp2_port *);
322 void	mvpp2_egress_disable(struct mvpp2_port *);
323 void	mvpp2_port_enable(struct mvpp2_port *);
324 void	mvpp2_port_disable(struct mvpp2_port *);
325 void	mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
326 int	mvpp2_rxq_received(struct mvpp2_port *, int);
327 void	mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
328 void	mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
329 void	mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
330 	    uint32_t);
331 void	mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
332 	    uint32_t);
333 void	mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
334 	    uint32_t);
335 void	mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
336 
337 void	mvpp2_axi_config(struct mvpp2_softc *);
338 void	mvpp2_bm_pool_init(struct mvpp2_softc *);
339 void	mvpp2_rx_fifo_init(struct mvpp2_softc *);
340 void	mvpp2_tx_fifo_init(struct mvpp2_softc *);
341 int	mvpp2_prs_default_init(struct mvpp2_softc *);
342 void	mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
343 void	mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
344 void	mvpp2_prs_def_flow_init(struct mvpp2_softc *);
345 void	mvpp2_prs_mh_init(struct mvpp2_softc *);
346 void	mvpp2_prs_mac_init(struct mvpp2_softc *);
347 void	mvpp2_prs_dsa_init(struct mvpp2_softc *);
348 int	mvpp2_prs_etype_init(struct mvpp2_softc *);
349 int	mvpp2_prs_vlan_init(struct mvpp2_softc *);
350 int	mvpp2_prs_pppoe_init(struct mvpp2_softc *);
351 int	mvpp2_prs_ip6_init(struct mvpp2_softc *);
352 int	mvpp2_prs_ip4_init(struct mvpp2_softc *);
353 void	mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
354 	    uint32_t, uint32_t);
355 void	mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
356 void	mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
357 void	mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
358 uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
359 void	mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
360 	    uint8_t, uint8_t);
361 void	mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
362 	    uint8_t *, uint8_t *);
363 int	mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
364 void	mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
365 int	mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
366 int	mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
367 void	mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
368 	    uint32_t *, uint32_t *);
369 void	mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
370 int	mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
371 void	mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
372 void	mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
373 void	mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
374 void	mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
375 void	mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
376 void	mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
377 	    uint32_t);
378 void	mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
379 void	mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
380 int	mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
381 int	mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *);
382 struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2_softc *, int);
383 int	mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
384 void	mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
385 void	mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int);
386 void	mvpp2_prs_mac_multi_set(struct mvpp2_softc *, uint32_t, uint32_t, int);
387 void	mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
388 void	mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
389 	    int, int, int);
390 struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
391 	    int);
392 int	mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
393 int	mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
394 struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
395 	    uint16_t, uint16_t);
396 int	mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
397 	    uint32_t);
398 int	mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
399 int	mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
400 int	mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
401 int	mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
402 struct mvpp2_prs_entry *mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int,
403 	    const uint8_t *, uint8_t *, int);
404 int	mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
405 	    uint8_t *);
406 int	mvpp2_prs_mac_da_accept(struct mvpp2_softc *, int, const uint8_t *, int);
407 int	mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
408 int	mvpp2_prs_def_flow(struct mvpp2_port *);
409 void	mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
410 void	mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
411 void	mvpp2_cls_init(struct mvpp2_softc *);
412 void	mvpp2_cls_port_config(struct mvpp2_port *);
413 void	mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
414 
415 int
416 mvpp2_match(struct device *parent, void *cfdata, void *aux)
417 {
418 	struct fdt_attach_args *faa = aux;
419 
420 	return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
421 }
422 
423 void
424 mvpp2_attach(struct device *parent, struct device *self, void *aux)
425 {
426 	struct mvpp2_softc *sc = (void *)self;
427 	struct fdt_attach_args *faa = aux;
428 
429 	if (faa->fa_nreg < 2) {
430 		printf(": no registers\n");
431 		return;
432 	}
433 
434 	sc->sc_node = faa->fa_node;
435 	sc->sc_iot = faa->fa_iot;
436 	sc->sc_dmat = faa->fa_dmat;
437 
438 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
439 	    faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
440 		printf(": can't map registers\n");
441 		return;
442 	}
443 	sc->sc_iosize_base = faa->fa_reg[0].size;
444 
445 	if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
446 	    faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
447 		printf(": can't map registers\n");
448 		bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
449 		    sc->sc_iosize_base);
450 		return;
451 	}
452 	sc->sc_iosize_iface = faa->fa_reg[1].size;
453 
454 	clock_enable_all(faa->fa_node);
455 	sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
456 
457 	printf("\n");
458 
459 	config_defer(self, mvpp2_attach_deferred);
460 }
461 
462 void
463 mvpp2_attach_deferred(struct device *self)
464 {
465 	struct mvpp2_softc *sc = (void *)self;
466 	struct mvpp2_attach_args maa;
467 	struct mvpp2_tx_queue *txq;
468 	int i, node;
469 
470 	mvpp2_axi_config(sc);
471 
472 	sc->sc_aggr_ntxq = 1;
473 	sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
474 	    sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
475 
476 	for (i = 0; i < sc->sc_aggr_ntxq; i++) {
477 		txq = &sc->sc_aggr_txqs[i];
478 		txq->id = i;
479 		mvpp2_aggr_txq_hw_init(sc, txq);
480 	}
481 
482 	mvpp2_rx_fifo_init(sc);
483 	mvpp2_tx_fifo_init(sc);
484 
485 	mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
486 
487 	mvpp2_bm_pool_init(sc);
488 
489 	sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
490 	    sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
491 
492 	mvpp2_prs_default_init(sc);
493 	mvpp2_cls_init(sc);
494 
495 	memset(&maa, 0, sizeof(maa));
496 	for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
497 		maa.ma_node = node;
498 		maa.ma_dmat = sc->sc_dmat;
499 		config_found(self, &maa, NULL);
500 	}
501 }
502 
503 void
504 mvpp2_axi_config(struct mvpp2_softc *sc)
505 {
506 	uint32_t reg;
507 
508 	mvpp2_write(sc, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG, 0);
509 
510 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
511 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
512 	mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
513 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
514 	mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
515 	mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
516 
517 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
518 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
519 	mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
520 	mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
521 	mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
522 	mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
523 
524 	reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
525 	    (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
526 	mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
527 	mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
528 
529 	reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
530 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
531 	mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
532 
533 	reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
534 	    (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
535 	mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
536 }
537 
538 void
539 mvpp2_bm_pool_init(struct mvpp2_softc *sc)
540 {
541 	struct mvpp2_bm_pool *bm;
542 	struct mvpp2_buf *rxb;
543 	uint64_t phys, virt;
544 	int i, j;
545 
546 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
547 		mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
548 		mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
549 	}
550 
551 	sc->sc_npools = ncpus;
552 	sc->sc_npools = min(sc->sc_npools, MVPP2_BM_POOLS_NUM);
553 
554 	sc->sc_bm_pools = mallocarray(sc->sc_npools, sizeof(*sc->sc_bm_pools),
555 	    M_DEVBUF, M_WAITOK | M_ZERO);
556 
557 	for (i = 0; i < sc->sc_npools; i++) {
558 		bm = &sc->sc_bm_pools[i];
559 		bm->bm_mem = mvpp2_dmamem_alloc(sc,
560 		    MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
561 		    MVPP2_BM_POOL_PTR_ALIGN);
562 		memset(MVPP2_DMA_KVA(bm->bm_mem), 0, MVPP2_DMA_LEN(bm->bm_mem));
563 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
564 		    MVPP2_DMA_LEN(bm->bm_mem),
565 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
566 
567 		mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
568 		    (uint64_t)MVPP2_DMA_KVA(bm->bm_mem) & 0xffffffff);
569 		mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
570 		    ((uint64_t)MVPP2_DMA_KVA(bm->bm_mem) >> 32)
571 		    & MVPP22_BM_POOL_BASE_HIGH_MASK);
572 		mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
573 		    MVPP2_BM_SIZE);
574 
575 		mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
576 		    mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
577 		    MVPP2_BM_START_MASK);
578 
579 		mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
580 		    roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
581 
582 		bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
583 		    M_DEVBUF, M_WAITOK);
584 		bm->freelist = mallocarray(MVPP2_BM_SIZE, sizeof(*bm->freelist),
585 		    M_DEVBUF, M_WAITOK | M_ZERO);
586 
587 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
588 			rxb = &bm->rxbuf[j];
589 			bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
590 			    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
591 			rxb->mb_m = NULL;
592 		}
593 
594 		/* Use pool-id and rxbuf index as cookie. */
595 		for (j = 0; j < MVPP2_BM_SIZE; j++)
596 			bm->freelist[j] = (i << 16) | (j << 0);
597 
598 		for (j = 0; j < MVPP2_BM_SIZE; j++) {
599 			rxb = &bm->rxbuf[j];
600 			rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
601 			if (rxb->mb_m == NULL)
602 				break;
603 
604 			KASSERT(bm->freelist[bm->free_cons] != -1);
605 			virt = bm->freelist[bm->free_cons];
606 			bm->freelist[bm->free_cons] = -1;
607 			bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
608 
609 			phys = rxb->mb_map->dm_segs[0].ds_addr;
610 			mvpp2_write(sc, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG,
611 			    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
612 			    << MVPP22_BM_VIRT_HIGH_RLS_OFFST) |
613 			    (((phys >> 32) & MVPP22_ADDR_HIGH_MASK)
614 			    << MVPP22_BM_PHY_HIGH_RLS_OFFSET));
615 			mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
616 			    virt & 0xffffffff);
617 			mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
618 			    phys & 0xffffffff);
619 		}
620 	}
621 }
622 
623 void
624 mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
625 {
626 	int i;
627 
628 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
629 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
630 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
631 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
632 
633 	mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
634 	    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
635 	mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
636 	    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
637 
638 	for (i = 2; i < MVPP2_MAX_PORTS; i++) {
639 		mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
640 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
641 		mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
642 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
643 	}
644 
645 	mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
646 	mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
647 }
648 
649 void
650 mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
651 {
652 	int i;
653 
654 	mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
655 	    MVPP22_TX_FIFO_DATA_SIZE_10KB);
656 	mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
657 	    MVPP2_TX_FIFO_THRESHOLD_10KB);
658 
659 	for (i = 1; i < MVPP2_MAX_PORTS; i++) {
660 		mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
661 		    MVPP22_TX_FIFO_DATA_SIZE_3KB);
662 		mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
663 		    MVPP2_TX_FIFO_THRESHOLD_3KB);
664 	}
665 }
666 
667 int
668 mvpp2_prs_default_init(struct mvpp2_softc *sc)
669 {
670 	int i, j, ret;
671 
672 	mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
673 
674 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
675 		mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
676 		for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
677 			mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
678 
679 		mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
680 		for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
681 			mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
682 	}
683 
684 	for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
685 		mvpp2_prs_hw_inv(sc, i);
686 
687 	for (i = 0; i < MVPP2_MAX_PORTS; i++)
688 		mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
689 		    MVPP2_PRS_PORT_LU_MAX, 0);
690 
691 	mvpp2_prs_def_flow_init(sc);
692 	mvpp2_prs_mh_init(sc);
693 	mvpp2_prs_mac_init(sc);
694 	mvpp2_prs_dsa_init(sc);
695 	ret = mvpp2_prs_etype_init(sc);
696 	if (ret)
697 		return ret;
698 	ret = mvpp2_prs_vlan_init(sc);
699 	if (ret)
700 		return ret;
701 	ret = mvpp2_prs_pppoe_init(sc);
702 	if (ret)
703 		return ret;
704 	ret = mvpp2_prs_ip6_init(sc);
705 	if (ret)
706 		return ret;
707 	ret = mvpp2_prs_ip4_init(sc);
708 	if (ret)
709 		return ret;
710 
711 	return 0;
712 }
713 
714 void
715 mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
716 {
717 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
718 	mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
719 	    MVPP2_PRS_TCAM_INV_MASK);
720 }
721 
722 void
723 mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
724     int lu_first, int lu_max, int offset)
725 {
726 	uint32_t reg;
727 
728 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
729 	reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
730 	reg |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
731 	mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
732 
733 	reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
734 	reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
735 	reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
736 	mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
737 
738 	reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
739 	reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
740 	reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
741 	mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
742 }
743 
744 void
745 mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
746 {
747 	struct mvpp2_prs_entry pe;
748 	int i;
749 
750 	for (i = 0; i < MVPP2_MAX_PORTS; i++) {
751 		memset(&pe, 0, sizeof(pe));
752 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
753 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
754 		mvpp2_prs_tcam_port_map_set(&pe, 0);
755 		mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
756 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
757 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
758 		mvpp2_prs_hw_write(sc, &pe);
759 	}
760 }
761 
762 void
763 mvpp2_prs_mh_init(struct mvpp2_softc *sc)
764 {
765 	struct mvpp2_prs_entry pe;
766 
767 	memset(&pe, 0, sizeof(pe));
768 	pe.index = MVPP2_PE_MH_DEFAULT;
769 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
770 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
771 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
772 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
773 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
774 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
775 	mvpp2_prs_hw_write(sc, &pe);
776 }
777 
778 void
779 mvpp2_prs_mac_init(struct mvpp2_softc *sc)
780 {
781 	struct mvpp2_prs_entry pe;
782 
783 	memset(&pe, 0, sizeof(pe));
784 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
785 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
786 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
787 	    MVPP2_PRS_RI_DROP_MASK);
788 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
789 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
790 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
791 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
792 	mvpp2_prs_hw_write(sc, &pe);
793 	mvpp2_prs_mac_drop_all_set(sc, 0, 0);
794 	mvpp2_prs_mac_promisc_set(sc, 0, 0);
795 	mvpp2_prs_mac_multi_set(sc, MVPP2_PE_MAC_MC_ALL, 0, 0);
796 	mvpp2_prs_mac_multi_set(sc, MVPP2_PE_MAC_MC_IP6, 0, 0);
797 }
798 
799 void
800 mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
801 {
802 	struct mvpp2_prs_entry pe;
803 
804 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
805 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
806 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
807 	mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
808 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
809 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
810 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
811 	mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
812 	memset(&pe, 0, sizeof(pe));
813 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
814 	pe.index = MVPP2_PE_DSA_DEFAULT;
815 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
816 	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
817 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
818 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
819 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
820 	mvpp2_prs_hw_write(sc, &pe);
821 }
822 
823 int
824 mvpp2_prs_etype_init(struct mvpp2_softc *sc)
825 {
826 	struct mvpp2_prs_entry pe;
827 	int tid;
828 
829 	/* Ethertype: PPPoE */
830 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
831 	    MVPP2_PE_LAST_FREE_TID);
832 	if (tid < 0)
833 		return tid;
834 	memset(&pe, 0, sizeof(pe));
835 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
836 	pe.index = tid;
837 	mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_PPP_SES);
838 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
839 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
840 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
841 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
842 	    MVPP2_PRS_RI_PPPOE_MASK);
843 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
844 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
845 	sc->sc_prs_shadow[pe.index].finish = 0;
846 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
847 	    MVPP2_PRS_RI_PPPOE_MASK);
848 	mvpp2_prs_hw_write(sc, &pe);
849 
850 	/* Ethertype: ARP */
851 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
852 	    MVPP2_PE_LAST_FREE_TID);
853 	if (tid < 0)
854 		return tid;
855 	memset(&pe, 0, sizeof(pe));
856 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
857 	pe.index = tid;
858 	mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_ARP);
859 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
860 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
861 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
862 	    MVPP2_PRS_RI_L3_PROTO_MASK);
863 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
864 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
865 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
866 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
867 	sc->sc_prs_shadow[pe.index].finish = 1;
868 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
869 	    MVPP2_PRS_RI_L3_PROTO_MASK);
870 	mvpp2_prs_hw_write(sc, &pe);
871 
872 	/* Ethertype: LBTD */
873 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
874 	    MVPP2_PE_LAST_FREE_TID);
875 	if (tid < 0)
876 		return tid;
877 	memset(&pe, 0, sizeof(pe));
878 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
879 	pe.index = tid;
880 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
881 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
882 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
883 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
884 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
885 	    MVPP2_PRS_RI_UDF3_MASK);
886 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
887 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
888 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
889 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
890 	sc->sc_prs_shadow[pe.index].finish = 1;
891 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
892 	    MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
893 	    MVPP2_PRS_RI_UDF3_MASK);
894 	mvpp2_prs_hw_write(sc, &pe);
895 
896 	/* Ethertype: IPv4 without options */
897 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
898 	    MVPP2_PE_LAST_FREE_TID);
899 	if (tid < 0)
900 		return tid;
901 	memset(&pe, 0, sizeof(pe));
902 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
903 	pe.index = tid;
904 	mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_IP);
905 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
906 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
907 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
908 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
909 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
910 	    MVPP2_PRS_RI_L3_PROTO_MASK);
911 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
912 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
913 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
914 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
915 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
916 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
917 	sc->sc_prs_shadow[pe.index].finish = 0;
918 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
919 	    MVPP2_PRS_RI_L3_PROTO_MASK);
920 	mvpp2_prs_hw_write(sc, &pe);
921 
922 	/* Ethertype: IPv4 with options */
923 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
924 	    MVPP2_PE_LAST_FREE_TID);
925 	if (tid < 0)
926 		return tid;
927 	pe.index = tid;
928 
929 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
930 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
931 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
932 	    MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
933 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
934 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
935 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
936 	    MVPP2_PRS_RI_L3_PROTO_MASK);
937 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
938 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
939 	sc->sc_prs_shadow[pe.index].finish = 0;
940 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
941 	    MVPP2_PRS_RI_L3_PROTO_MASK);
942 	mvpp2_prs_hw_write(sc, &pe);
943 
944 	/* Ethertype: IPv6 without options */
945 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
946 	    MVPP2_PE_LAST_FREE_TID);
947 	if (tid < 0)
948 		return tid;
949 	memset(&pe, 0, sizeof(pe));
950 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
951 	pe.index = tid;
952 	mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_IPV6);
953 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
954 	    MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
955 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
956 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
957 	    MVPP2_PRS_RI_L3_PROTO_MASK);
958 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
959 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
960 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
961 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
962 	sc->sc_prs_shadow[pe.index].finish = 0;
963 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
964 	    MVPP2_PRS_RI_L3_PROTO_MASK);
965 	mvpp2_prs_hw_write(sc, &pe);
966 
967 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
968 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
969 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
970 	pe.index = MVPP2_PE_ETH_TYPE_UN;
971 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
972 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
973 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
974 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
975 	    MVPP2_PRS_RI_L3_PROTO_MASK);
976 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
977 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
978 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
979 	sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
980 	sc->sc_prs_shadow[pe.index].finish = 1;
981 	mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
982 	    MVPP2_PRS_RI_L3_PROTO_MASK);
983 	mvpp2_prs_hw_write(sc, &pe);
984 
985 	return 0;
986 }
987 
988 int
989 mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
990 {
991 	struct mvpp2_prs_entry pe;
992 	int ret;
993 
994 	sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
995 	    sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
996 
997 	ret = mvpp2_prs_double_vlan_add(sc, MV_ETH_P_8021Q, MV_ETH_P_8021AD,
998 	    MVPP2_PRS_PORT_MASK);
999 	if (ret)
1000 		return ret;
1001 	ret = mvpp2_prs_double_vlan_add(sc, MV_ETH_P_8021Q, MV_ETH_P_8021Q,
1002 	    MVPP2_PRS_PORT_MASK);
1003 	if (ret)
1004 		return ret;
1005 	ret = mvpp2_prs_vlan_add(sc, MV_ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1006 	    MVPP2_PRS_PORT_MASK);
1007 	if (ret)
1008 		return ret;
1009 	ret = mvpp2_prs_vlan_add(sc, MV_ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1010 	    MVPP2_PRS_PORT_MASK);
1011 	if (ret)
1012 		return ret;
1013 
1014 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1015 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1016 	pe.index = MVPP2_PE_VLAN_DBL;
1017 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1018 	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1019 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1020 	    MVPP2_PRS_RI_VLAN_MASK);
1021 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1022 	    MVPP2_PRS_DBL_VLAN_AI_BIT);
1023 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1024 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1025 	mvpp2_prs_hw_write(sc, &pe);
1026 
1027 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1028 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1029 	pe.index = MVPP2_PE_VLAN_NONE;
1030 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1031 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK);
1032 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1033 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1034 	mvpp2_prs_hw_write(sc, &pe);
1035 
1036 	return 0;
1037 }
1038 
1039 int
1040 mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
1041 {
1042 	struct mvpp2_prs_entry pe;
1043 	int tid;
1044 
1045 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1046 	    MVPP2_PE_LAST_FREE_TID);
1047 	if (tid < 0)
1048 		return tid;
1049 
1050 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1051 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1052 	pe.index = tid;
1053 	mvpp2_prs_match_etype(&pe, 0, MV_PPP_IP);
1054 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1055 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1056 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1057 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1058 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1059 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1060 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1061 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1062 	mvpp2_prs_hw_write(sc, &pe);
1063 
1064 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1065 	    MVPP2_PE_LAST_FREE_TID);
1066 	if (tid < 0)
1067 		return tid;
1068 
1069 	pe.index = tid;
1070 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1071 	    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1072 	    MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
1073 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1074 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1075 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
1076 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1077 	mvpp2_prs_hw_write(sc, &pe);
1078 
1079 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1080 	    MVPP2_PE_LAST_FREE_TID);
1081 	if (tid < 0)
1082 		return tid;
1083 
1084 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1085 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1086 	pe.index = tid;
1087 	mvpp2_prs_match_etype(&pe, 0, MV_PPP_IPV6);
1088 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1089 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1090 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1091 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1092 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1093 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1094 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1095 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1096 	mvpp2_prs_hw_write(sc, &pe);
1097 
1098 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1099 	    MVPP2_PE_LAST_FREE_TID);
1100 	if (tid < 0)
1101 		return tid;
1102 
1103 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1104 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1105 	pe.index = tid;
1106 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1107 	    MVPP2_PRS_RI_L3_PROTO_MASK);
1108 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1109 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1110 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1111 	    MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1112 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1113 	mvpp2_prs_hw_write(sc, &pe);
1114 
1115 	return 0;
1116 }
1117 
1118 int
1119 mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
1120 {
1121 	struct mvpp2_prs_entry pe;
1122 	int tid, ret;
1123 
1124 	ret = mvpp2_prs_ip6_proto(sc, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP,
1125 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1126 	if (ret)
1127 		return ret;
1128 	ret = mvpp2_prs_ip6_proto(sc, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP,
1129 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1130 	if (ret)
1131 		return ret;
1132 	ret = mvpp2_prs_ip6_proto(sc, MV_IPPR_ICMPV6,
1133 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1134 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1135 	if (ret)
1136 		return ret;
1137 	ret = mvpp2_prs_ip6_proto(sc, MV_IPPR_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
1138 	    MVPP2_PRS_RI_UDF7_MASK);
1139 	if (ret)
1140 		return ret;
1141 	ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1142 	if (ret)
1143 		return ret;
1144 
1145 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1146 	    MVPP2_PE_LAST_FREE_TID);
1147 	if (tid < 0)
1148 		return tid;
1149 
1150 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1151 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1152 	pe.index = tid;
1153 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1154 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1155 	mvpp2_prs_sram_ri_update(&pe,
1156 	    MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
1157 	    MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
1158 	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1159 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1160 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1161 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1162 	mvpp2_prs_hw_write(sc, &pe);
1163 
1164 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1165 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1166 	pe.index = MVPP2_PE_IP6_PROTO_UN;
1167 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1168 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1169 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1170 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1171 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1172 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1173 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1174 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1175 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1176 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1177 	mvpp2_prs_hw_write(sc, &pe);
1178 
1179 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1180 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1181 	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1182 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1183 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1184 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1185 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1186 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1187 	    MVPP2_PRS_IPV6_EXT_AI_BIT);
1188 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1189 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1190 	mvpp2_prs_hw_write(sc, &pe);
1191 
1192 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1193 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1194 	pe.index = MVPP2_PE_IP6_ADDR_UN;
1195 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1196 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1197 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1198 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1199 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1200 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1201 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1202 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1203 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
1204 	mvpp2_prs_hw_write(sc, &pe);
1205 
1206 	return 0;
1207 }
1208 
1209 int
1210 mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
1211 {
1212 	struct mvpp2_prs_entry pe;
1213 	int ret;
1214 
1215 	ret = mvpp2_prs_ip4_proto(sc, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP,
1216 			MVPP2_PRS_RI_L4_PROTO_MASK);
1217 	if (ret)
1218 		return ret;
1219 	ret = mvpp2_prs_ip4_proto(sc, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP,
1220 			MVPP2_PRS_RI_L4_PROTO_MASK);
1221 	if (ret)
1222 		return ret;
1223 	ret = mvpp2_prs_ip4_proto(sc, MV_IPPR_IGMP,
1224 	    MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1225 	    MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1226 	if (ret)
1227 		return ret;
1228 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
1229 	if (ret)
1230 		return ret;
1231 	ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1232 	if (ret)
1233 		return ret;
1234 
1235 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1236 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1237 	pe.index = MVPP2_PE_IP4_PROTO_UN;
1238 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1239 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1240 	mvpp2_prs_sram_offset_set( &pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1241 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1242 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1243 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1244 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1245 	    MVPP2_PRS_RI_L4_PROTO_MASK);
1246 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1247 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1248 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1249 	mvpp2_prs_hw_write(sc, &pe);
1250 
1251 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1252 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1253 	pe.index = MVPP2_PE_IP4_ADDR_UN;
1254 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1255 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1256 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1257 	    MVPP2_PRS_RI_L3_ADDR_MASK);
1258 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1259 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
1260 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1261 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1262 	mvpp2_prs_hw_write(sc, &pe);
1263 
1264 	return 0;
1265 }
1266 
1267 int
1268 mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
1269 {
1270 	struct mvpp2_attach_args *maa = aux;
1271 	char buf[32];
1272 
1273 	if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
1274 	    strcmp(buf, "disabled") == 0)
1275 		return 0;
1276 
1277 	return 1;
1278 }
1279 
1280 void
1281 mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
1282 {
1283 	struct mvpp2_port *sc = (void *)self;
1284 	struct mvpp2_attach_args *maa = aux;
1285 	struct mvpp2_tx_queue *txq;
1286 	struct mvpp2_rx_queue *rxq;
1287 	struct ifnet *ifp;
1288 	uint32_t phy, reg;
1289 	int i, idx, len, node;
1290 	char *phy_mode;
1291 	char *managed;
1292 
1293 	sc->sc = (void *)parent;
1294 	sc->sc_node = maa->ma_node;
1295 	sc->sc_dmat = maa->ma_dmat;
1296 
1297 	sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
1298 	sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
1299 	sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
1300 
1301 	len = OF_getproplen(sc->sc_node, "phy-mode");
1302 	if (len <= 0) {
1303 		printf("%s: cannot extract phy-mode\n", self->dv_xname);
1304 		return;
1305 	}
1306 
1307 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
1308 	OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
1309 	if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
1310 		sc->sc_phy_mode = PHY_MODE_10GBASER;
1311 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
1312 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
1313 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
1314 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
1315 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
1316 		sc->sc_phy_mode = PHY_MODE_SGMII;
1317 	else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
1318 		sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
1319 	else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
1320 		sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
1321 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
1322 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
1323 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
1324 		sc->sc_phy_mode = PHY_MODE_RGMII;
1325 	else {
1326 		printf("%s: cannot use phy-mode %s\n", self->dv_xname,
1327 		    phy_mode);
1328 		return;
1329 	}
1330 	free(phy_mode, M_TEMP, len);
1331 
1332 	/* Lookup PHY. */
1333 	phy = OF_getpropint(sc->sc_node, "phy", 0);
1334 	if (phy) {
1335 		node = OF_getnodebyphandle(phy);
1336 		if (!node) {
1337 			printf(": no phy\n");
1338 			return;
1339 		}
1340 		sc->sc_mdio = mii_byphandle(phy);
1341 		sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
1342 		sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
1343 	}
1344 
1345 	if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
1346 		managed = malloc(len, M_TEMP, M_WAITOK);
1347 		OF_getprop(sc->sc_node, "managed", managed, len);
1348 		if (!strncmp(managed, "in-band-status",
1349 		    strlen("in-band-status")))
1350 			sc->sc_inband_status = 1;
1351 		free(managed, M_TEMP, len);
1352 	}
1353 
1354 	if (OF_getprop(sc->sc_node, "local-mac-address",
1355 	    &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
1356 		memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
1357 	printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
1358 
1359 	sc->sc_ntxq = sc->sc_nrxq = 1;
1360 	sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
1361 	    M_DEVBUF, M_WAITOK | M_ZERO);
1362 	sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
1363 	    M_DEVBUF, M_WAITOK | M_ZERO);
1364 
1365 	for (i = 0; i < sc->sc_ntxq; i++) {
1366 		txq = &sc->sc_txqs[i];
1367 		txq->id = mvpp2_txq_phys(sc->sc_id, i);
1368 		txq->log_id = i;
1369 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
1370 	}
1371 
1372 	sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
1373 
1374 	for (i = 0; i < sc->sc_nrxq; i++) {
1375 		rxq = &sc->sc_rxqs[i];
1376 		rxq->id = sc->sc_id * 32 + i;
1377 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
1378 		rxq->time_coal = MVPP2_RX_COAL_USEC;
1379 	}
1380 
1381 	mvpp2_egress_disable(sc);
1382 	mvpp2_port_disable(sc);
1383 
1384 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
1385 	    sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
1386 	    0 /* queue vector id */);
1387 	mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
1388 	    sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
1389 	    0 /* first rxq */);
1390 
1391 	mvpp2_ingress_disable(sc);
1392 	mvpp2_defaults_set(sc);
1393 
1394 	mvpp2_cls_oversize_rxq_set(sc);
1395 	mvpp2_cls_port_config(sc);
1396 
1397 	/*
1398 	 * We have one pool per core, so all RX queues on a specific
1399 	 * core share that pool.  Also long and short uses the same
1400 	 * pool.
1401 	 */
1402 	for (i = 0; i < sc->sc_nrxq; i++) {
1403 		mvpp2_rxq_long_pool_set(sc, i, i);
1404 		mvpp2_rxq_short_pool_set(sc, i, i);
1405 	}
1406 
1407 	/* Reset Mac */
1408 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
1409 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
1410 	    MVPP2_PORT_CTRL2_PORTMACRESET_MASK);
1411 	if (sc->sc_gop_id == 0) {
1412 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
1413 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
1414 		    ~MV_XLG_MAC_CTRL0_MACRESETN_MASK);
1415 		if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
1416 			reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
1417 			reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
1418 			reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
1419 			reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
1420 			reg &= ~MVPP22_MPCS_MAC_CLK_RESET_MASK;
1421 			mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
1422 		} else if (sc->sc_phy_mode == PHY_MODE_XAUI)
1423 			mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG,
1424 			    mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG) &
1425 			    ~MVPP22_XPCS_PCSRESET);
1426 	}
1427 
1428 	timeout_set(&sc->sc_tick, mvpp2_tick, sc);
1429 
1430 	ifp = &sc->sc_ac.ac_if;
1431 	ifp->if_softc = sc;
1432 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1433 	ifp->if_ioctl = mvpp2_ioctl;
1434 	ifp->if_start = mvpp2_start;
1435 	ifp->if_watchdog = mvpp2_watchdog;
1436 	IFQ_SET_MAXLEN(&ifp->if_snd, MVPP2_NTXDESC - 1);
1437 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1438 
1439 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1440 
1441 	sc->sc_mii.mii_ifp = ifp;
1442 	sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
1443 	sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
1444 	sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
1445 
1446 	ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
1447 
1448 	if (sc->sc_mdio) {
1449 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
1450 		    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0);
1451 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1452 			printf("%s: no PHY found!\n", self->dv_xname);
1453 			ifmedia_add(&sc->sc_mii.mii_media,
1454 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
1455 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1456 		} else
1457 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1458 	} else {
1459 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1460 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1461 
1462 		if (sc->sc_inband_status) {
1463 			mvpp2_inband_statchg(sc);
1464 		} else {
1465 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
1466 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1467 			mvpp2_mii_statchg(self);
1468 		}
1469 
1470 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
1471 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1472 	}
1473 
1474 	if_attach(ifp);
1475 	ether_ifattach(ifp);
1476 
1477 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1478 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1479 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1480 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1481 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1482 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1483 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1484 		reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_MASK_REG);
1485 		reg |= MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK;
1486 		mvpp2_gmac_write(sc, MV_GMAC_INTERRUPT_MASK_REG, reg);
1487 		reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_SUM_MASK_REG);
1488 		reg |= MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK;
1489 		mvpp2_gmac_write(sc, MV_GMAC_INTERRUPT_SUM_MASK_REG, reg);
1490 	}
1491 
1492 	if (sc->sc_gop_id == 0) {
1493 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
1494 		reg |= MV_XLG_INTERRUPT_LINK_CHANGE_MASK;
1495 		mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
1496 		reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
1497 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_MASK;
1498 		reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_MASK;
1499 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1500 		    sc->sc_phy_mode == PHY_MODE_XAUI)
1501 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG_MASK;
1502 		else
1503 			reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG_MASK;
1504 		mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
1505 	}
1506 
1507 	idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
1508 	if (idx >= 0)
1509 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1510 		    mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
1511 	idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
1512 	if (idx < 0)
1513 		idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
1514 	if (idx >= 0)
1515 		fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1516 		    mvpp2_intr, sc, sc->sc_dev.dv_xname);
1517 }
1518 
1519 uint32_t
1520 mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
1521 {
1522 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
1523 }
1524 
1525 void
1526 mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
1527 {
1528 	bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
1529 }
1530 
1531 uint32_t
1532 mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
1533 {
1534 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1535 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
1536 }
1537 
1538 void
1539 mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1540 {
1541 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1542 	    MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
1543 	    data);
1544 }
1545 
1546 uint32_t
1547 mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
1548 {
1549 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1550 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
1551 }
1552 
1553 void
1554 mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1555 {
1556 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1557 	    MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
1558 	    data);
1559 }
1560 
1561 uint32_t
1562 mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1563 {
1564 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1565 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
1566 }
1567 
1568 void
1569 mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1570 {
1571 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1572 	    MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
1573 	    data);
1574 }
1575 
1576 uint32_t
1577 mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1578 {
1579 	return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1580 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
1581 }
1582 
1583 void
1584 mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1585 {
1586 	bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1587 	    MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
1588 	    data);
1589 }
1590 
1591 void
1592 mvpp2_start(struct ifnet *ifp)
1593 {
1594 	struct mvpp2_port *sc = ifp->if_softc;
1595 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1596 	struct mbuf *m;
1597 	int error, idx;
1598 
1599 	if (!(ifp->if_flags & IFF_RUNNING))
1600 		return;
1601 	if (ifq_is_oactive(&ifp->if_snd))
1602 		return;
1603 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1604 		return;
1605 	if (!sc->sc_link)
1606 		return;
1607 
1608 	idx = txq->prod;
1609 	while (txq->cnt < MVPP2_AGGR_TXQ_SIZE) {
1610 		m = ifq_dequeue(&ifp->if_snd);
1611 		if (m == NULL)
1612 			break;
1613 
1614 		error = mvpp2_encap(sc, m, &idx);
1615 		if (error == ENOBUFS) {
1616 			m_freem(m); /* give up: drop it */
1617 			ifq_set_oactive(&ifp->if_snd);
1618 			break;
1619 		}
1620 		if (error == EFBIG) {
1621 			m_freem(m); /* give up: drop it */
1622 			ifp->if_oerrors++;
1623 			continue;
1624 		}
1625 
1626 #if NBPFILTER > 0
1627 		if (ifp->if_bpf)
1628 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1629 #endif
1630 	}
1631 
1632 	if (txq->prod != idx) {
1633 		txq->prod = idx;
1634 
1635 		/* Set a timeout in case the chip goes out to lunch. */
1636 		ifp->if_timer = 5;
1637 	}
1638 }
1639 
1640 int
1641 mvpp2_encap(struct mvpp2_port *sc, struct mbuf *m, int *idx)
1642 {
1643 	struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1644 	struct mvpp2_tx_desc *txd;
1645 	bus_dmamap_t map;
1646 	uint32_t command;
1647 	int i, current, first, last;
1648 
1649 	first = last = current = *idx;
1650 	map = txq->buf[current].mb_map;
1651 
1652 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1653 		return ENOBUFS;
1654 
1655 	if (map->dm_nsegs > (MVPP2_AGGR_TXQ_SIZE - txq->cnt - 2)) {
1656 		bus_dmamap_unload(sc->sc_dmat, map);
1657 		return ENOBUFS;
1658 	}
1659 
1660 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1661 	    BUS_DMASYNC_PREWRITE);
1662 
1663 	command = MVPP2_TXD_L4_CSUM_NOT |
1664 	    MVPP2_TXD_IP_CSUM_DISABLE;
1665 	for (i = 0; i < map->dm_nsegs; i++) {
1666 		txd = &txq->descs[current];
1667 		memset(txd, 0, sizeof(*txd));
1668 		txd->buf_phys_addr_hw_cmd2 =
1669 		    map->dm_segs[i].ds_addr & ~0x1f;
1670 		txd->packet_offset =
1671 		    map->dm_segs[i].ds_addr & 0x1f;
1672 		txd->data_size = map->dm_segs[i].ds_len;
1673 		txd->phys_txq = sc->sc_txqs[0].id;
1674 		txd->command = command |
1675 		    MVPP2_TXD_PADDING_DISABLE;
1676 		if (i == 0)
1677 		    txd->command |= MVPP2_TXD_F_DESC;
1678 		if (i == (map->dm_nsegs - 1))
1679 		    txd->command |= MVPP2_TXD_L_DESC;
1680 
1681 		bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
1682 		    current * sizeof(*txd), sizeof(*txd),
1683 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1684 
1685 		last = current;
1686 		current = (current + 1) % MVPP2_AGGR_TXQ_SIZE;
1687 		KASSERT(current != txq->cons);
1688 	}
1689 
1690 	KASSERT(txq->buf[last].mb_m == NULL);
1691 	txq->buf[first].mb_map = txq->buf[last].mb_map;
1692 	txq->buf[last].mb_map = map;
1693 	txq->buf[last].mb_m = m;
1694 
1695 	txq->cnt += map->dm_nsegs;
1696 	*idx = current;
1697 
1698 	mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, map->dm_nsegs);
1699 
1700 	return 0;
1701 }
1702 
1703 int
1704 mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1705 {
1706 	struct mvpp2_port *sc = ifp->if_softc;
1707 	struct ifreq *ifr = (struct ifreq *)addr;
1708 	int error = 0, s;
1709 
1710 	s = splnet();
1711 
1712 	switch (cmd) {
1713 	case SIOCSIFADDR:
1714 		ifp->if_flags |= IFF_UP;
1715 		/* FALLTHROUGH */
1716 	case SIOCSIFFLAGS:
1717 		if (ifp->if_flags & IFF_UP) {
1718 			if (ifp->if_flags & IFF_RUNNING)
1719 				error = ENETRESET;
1720 			else
1721 				mvpp2_up(sc);
1722 		} else {
1723 			if (ifp->if_flags & IFF_RUNNING)
1724 				mvpp2_down(sc);
1725 		}
1726 		break;
1727 
1728 	case SIOCGIFMEDIA:
1729 	case SIOCSIFMEDIA:
1730 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1731 		break;
1732 
1733 	case SIOCGIFRXR:
1734 		error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1735 		break;
1736 
1737 	case SIOCGIFSFFPAGE:
1738 		error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
1739 		if (error != 0)
1740 			break;
1741 
1742 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1743 		rw_exit(&mvpp2_sff_lock);
1744 		break;
1745 
1746 	default:
1747 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1748 		break;
1749 	}
1750 
1751 	if (error == ENETRESET) {
1752 		if (ifp->if_flags & IFF_RUNNING)
1753 			mvpp2_iff(sc);
1754 		error = 0;
1755 	}
1756 
1757 	splx(s);
1758 	return (error);
1759 }
1760 
1761 int
1762 mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
1763 {
1764 	struct mvpp2_rx_queue *rxq;
1765 	struct if_rxring_info *ifrs, *ifr;
1766 	unsigned int i;
1767 	int error;
1768 
1769 	ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
1770 	    M_WAITOK|M_ZERO|M_CANFAIL);
1771 	if (ifrs == NULL)
1772 		return (ENOMEM);
1773 
1774 	for (i = 0; i < sc->sc_nrxq; i++) {
1775 		rxq = &sc->sc_rxqs[i];
1776 		ifr = &ifrs[i];
1777 
1778 		snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
1779 		ifr->ifr_size = MCLBYTES;
1780 		ifr->ifr_info = rxq->rxring;
1781 	}
1782 
1783 	error = if_rxr_info_ioctl(ifri, i, ifrs);
1784 	free(ifrs, M_TEMP, i * sizeof(*ifrs));
1785 
1786 	return (error);
1787 }
1788 
1789 void
1790 mvpp2_watchdog(struct ifnet *ifp)
1791 {
1792 	printf("%s\n", __func__);
1793 }
1794 
1795 int
1796 mvpp2_media_change(struct ifnet *ifp)
1797 {
1798 	struct mvpp2_port *sc = ifp->if_softc;
1799 
1800 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1801 		mii_mediachg(&sc->sc_mii);
1802 
1803 	return (0);
1804 }
1805 
1806 void
1807 mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1808 {
1809 	struct mvpp2_port *sc = ifp->if_softc;
1810 
1811 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1812 		mii_pollstat(&sc->sc_mii);
1813 
1814 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1815 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1816 }
1817 
1818 int
1819 mvpp2_mii_readreg(struct device *self, int phy, int reg)
1820 {
1821 	struct mvpp2_port *sc = (void *)self;
1822 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
1823 }
1824 
1825 void
1826 mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
1827 {
1828 	struct mvpp2_port *sc = (void *)self;
1829 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
1830 }
1831 
1832 void
1833 mvpp2_mii_statchg(struct device *self)
1834 {
1835 	struct mvpp2_port *sc = (void *)self;
1836 	mvpp2_port_change(sc);
1837 }
1838 
1839 void
1840 mvpp2_inband_statchg(struct mvpp2_port *sc)
1841 {
1842 	uint32_t reg;
1843 
1844 	sc->sc_mii.mii_media_status = IFM_AVALID;
1845 	sc->sc_mii.mii_media_active = IFM_ETHER;
1846 
1847 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1848 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1849 		reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
1850 		if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK)
1851 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1852 		sc->sc_mii.mii_media_active |= IFM_FDX;
1853 		sc->sc_mii.mii_media_active |= IFM_10G_SR;
1854 	} else {
1855 		reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
1856 		if (reg & MVPP2_PORT_STATUS0_LINKUP_MASK)
1857 			sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1858 		if (reg & MVPP2_PORT_STATUS0_FULLDX_MASK)
1859 			sc->sc_mii.mii_media_active |= IFM_FDX;
1860 		if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
1861 			sc->sc_mii.mii_media_active |= IFM_2500_SX;
1862 		else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
1863 			sc->sc_mii.mii_media_active |= IFM_1000_SX;
1864 		else if (reg & MVPP2_PORT_STATUS0_GMIISPEED_MASK)
1865 			sc->sc_mii.mii_media_active |= IFM_1000_T;
1866 		else if (reg & MVPP2_PORT_STATUS0_MIISPEED_MASK)
1867 			sc->sc_mii.mii_media_active |= IFM_100_TX;
1868 		else
1869 			sc->sc_mii.mii_media_active |= IFM_10_T;
1870 	}
1871 
1872 	mvpp2_port_change(sc);
1873 }
1874 
1875 void
1876 mvpp2_port_change(struct mvpp2_port *sc)
1877 {
1878 	uint32_t reg;
1879 
1880 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) == sc->sc_link)
1881 		return;
1882 
1883 	sc->sc_link = !sc->sc_link;
1884 
1885 	if (sc->sc_inband_status)
1886 		return;
1887 
1888 	if (sc->sc_link) {
1889 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1890 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1891 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1892 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK;
1893 			reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK;
1894 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1895 		} else {
1896 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1897 			reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
1898 			reg |= MVPP2_GMAC_FORCE_LINK_PASS;
1899 			reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
1900 			reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
1901 			reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
1902 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
1903 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
1904 			    IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
1905 				reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
1906 			if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
1907 				reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
1908 			if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
1909 				reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
1910 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
1911 		}
1912 	} else {
1913 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1914 		    sc->sc_phy_mode == PHY_MODE_XAUI) {
1915 			reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1916 			reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK;
1917 			reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK;
1918 			mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1919 		} else {
1920 			reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1921 			reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
1922 			reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
1923 			mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
1924 		}
1925 	}
1926 }
1927 
1928 void
1929 mvpp2_tick(void *arg)
1930 {
1931 	struct mvpp2_port *sc = arg;
1932 	int s;
1933 
1934 	s = splnet();
1935 	mii_tick(&sc->sc_mii);
1936 	splx(s);
1937 
1938 	timeout_add_sec(&sc->sc_tick, 1);
1939 }
1940 
1941 int
1942 mvpp2_link_intr(void *arg)
1943 {
1944 	struct mvpp2_port *sc = arg;
1945 	uint32_t reg;
1946 	int event = 0;
1947 
1948 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1949 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
1950 		reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
1951 		if (reg & MV_XLG_INTERRUPT_LINK_CHANGE_MASK)
1952 			event = 1;
1953 	} else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1954 	    sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1955 	    sc->sc_phy_mode == PHY_MODE_SGMII ||
1956 	    sc->sc_phy_mode == PHY_MODE_RGMII ||
1957 	    sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1958 	    sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1959 	    sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1960 		reg = mvpp2_gmac_read(sc, MV_GMAC_INTERRUPT_CAUSE_REG);
1961 		if (reg & MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK)
1962 			event = 1;
1963 	}
1964 
1965 	if (event && sc->sc_inband_status)
1966 		mvpp2_inband_statchg(sc);
1967 
1968 	return (1);
1969 }
1970 
1971 int
1972 mvpp2_intr(void *arg)
1973 {
1974 	struct mvpp2_port *sc = arg;
1975 	uint32_t reg;
1976 
1977 	reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
1978 	if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
1979 		mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
1980 		mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
1981 		    reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
1982 	}
1983 	if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
1984 		mvpp2_tx_proc(sc,
1985 		    (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
1986 		    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
1987 
1988 	if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
1989 		mvpp2_rx_proc(sc,
1990 		    reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
1991 
1992 	return (1);
1993 }
1994 
1995 void
1996 mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
1997 {
1998 	struct mvpp2_tx_queue *txq;
1999 	int i;
2000 
2001 	for (i = 0; i < sc->sc_ntxq; i++) {
2002 		txq = &sc->sc_txqs[i];
2003 		if ((queues & (1 << i)) == 0)
2004 			continue;
2005 		mvpp2_txq_proc(sc, txq);
2006 	}
2007 }
2008 
2009 void
2010 mvpp2_txq_proc(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2011 {
2012 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2013 	struct mvpp2_tx_queue *aggr_txq = &sc->sc->sc_aggr_txqs[0];
2014 	struct mvpp2_buf *txb;
2015 	int i, idx, nsent;
2016 
2017 	nsent = (mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id)) &
2018 	    MVPP2_TRANSMITTED_COUNT_MASK) >>
2019 	    MVPP2_TRANSMITTED_COUNT_OFFSET;
2020 
2021 	for (i = 0; i < nsent; i++) {
2022 		idx = aggr_txq->cons;
2023 		KASSERT(idx < MVPP2_AGGR_TXQ_SIZE);
2024 
2025 		txb = &aggr_txq->buf[idx];
2026 		if (txb->mb_m) {
2027 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2028 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2029 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2030 
2031 			m_freem(txb->mb_m);
2032 			txb->mb_m = NULL;
2033 		}
2034 
2035 		aggr_txq->cnt--;
2036 		aggr_txq->cons = (aggr_txq->cons + 1) % MVPP2_AGGR_TXQ_SIZE;
2037 	}
2038 
2039 	if (aggr_txq->cnt == 0)
2040 		ifp->if_timer = 0;
2041 
2042 	if (ifq_is_oactive(&ifp->if_snd))
2043 		ifq_restart(&ifp->if_snd);
2044 }
2045 
2046 void
2047 mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
2048 {
2049 	struct mvpp2_rx_queue *rxq;
2050 	int i;
2051 
2052 	for (i = 0; i < sc->sc_nrxq; i++) {
2053 		rxq = &sc->sc_rxqs[i];
2054 		if ((queues & (1 << i)) == 0)
2055 			continue;
2056 		mvpp2_rxq_proc(sc, rxq);
2057 	}
2058 
2059 	mvpp2_rx_refill(sc);
2060 }
2061 
2062 void
2063 mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2064 {
2065 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2066 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2067 	struct mvpp2_rx_desc *rxd;
2068 	struct mvpp2_bm_pool *bm;
2069 	struct mvpp2_buf *rxb;
2070 	struct mbuf *m;
2071 	uint64_t virt;
2072 	uint32_t i, nrecv, pool;
2073 
2074 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2075 	if (!nrecv)
2076 		return;
2077 
2078 	pool = curcpu()->ci_cpuid;
2079 	KASSERT(pool < sc->sc->sc_npools);
2080 	bm = &sc->sc->sc_bm_pools[pool];
2081 
2082 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2083 	    MVPP2_DMA_LEN(rxq->ring),
2084 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2085 
2086 	for (i = 0; i < nrecv; i++) {
2087 		rxd = &rxq->descs[rxq->cons];
2088 		virt = rxd->buf_cookie_bm_qset_cls_info;
2089 		KASSERT(((virt >> 16) & 0xffff) == pool);
2090 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2091 		rxb = &bm->rxbuf[virt & 0xffff];
2092 		KASSERT(rxb->mb_m != NULL);
2093 
2094 		bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
2095 		    rxd->data_size, BUS_DMASYNC_POSTREAD);
2096 		bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
2097 
2098 		m = rxb->mb_m;
2099 		rxb->mb_m = NULL;
2100 
2101 		m->m_pkthdr.len = m->m_len = rxd->data_size;
2102 		m_adj(m, MVPP2_MH_SIZE);
2103 		ml_enqueue(&ml, m);
2104 
2105 		KASSERT(bm->freelist[bm->free_prod] == -1);
2106 		bm->freelist[bm->free_prod] = virt & 0xffffffff;
2107 		bm->free_prod = (bm->free_prod + 1) % MVPP2_BM_SIZE;
2108 
2109 		rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
2110 	}
2111 
2112 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2113 	    MVPP2_DMA_LEN(rxq->ring),
2114 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2115 
2116 	mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2117 
2118 	if_input(ifp, &ml);
2119 }
2120 
2121 /*
2122  * We have a pool per core, and since we should not assume that
2123  * RX buffers are always used in order, keep a list of rxbuf[]
2124  * indices that should be filled with an mbuf, if possible.
2125  */
2126 void
2127 mvpp2_rx_refill(struct mvpp2_port *sc)
2128 {
2129 	struct mvpp2_bm_pool *bm;
2130 	struct mvpp2_buf *rxb;
2131 	uint64_t phys, virt;
2132 	int pool;
2133 
2134 	pool = curcpu()->ci_cpuid;
2135 	KASSERT(pool < sc->sc->sc_npools);
2136 	bm = &sc->sc->sc_bm_pools[pool];
2137 
2138 	while (bm->free_cons != bm->free_prod) {
2139 		KASSERT(bm->freelist[bm->free_cons] != -1);
2140 		virt = bm->freelist[bm->free_cons];
2141 		KASSERT(((virt >> 16) & 0xffff) == pool);
2142 		KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2143 		rxb = &bm->rxbuf[virt & 0xffff];
2144 		KASSERT(rxb->mb_m == NULL);
2145 
2146 		rxb->mb_m = mvpp2_alloc_mbuf(sc->sc, rxb->mb_map);
2147 		if (rxb->mb_m == NULL)
2148 			break;
2149 
2150 		bm->freelist[bm->free_cons] = -1;
2151 		bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
2152 
2153 		phys = rxb->mb_map->dm_segs[0].ds_addr;
2154 		mvpp2_write(sc->sc, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG,
2155 		    (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2156 		    << MVPP22_BM_VIRT_HIGH_RLS_OFFST) |
2157 		    (((phys >> 32) & MVPP22_ADDR_HIGH_MASK)
2158 		    << MVPP22_BM_PHY_HIGH_RLS_OFFSET));
2159 		mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2160 		    virt & 0xffffffff);
2161 		mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2162 		    phys & 0xffffffff);
2163 	}
2164 }
2165 
2166 void
2167 mvpp2_up(struct mvpp2_port *sc)
2168 {
2169 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2170 	int i;
2171 
2172 	memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
2173 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, etherbroadcastaddr, 1);
2174 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 1);
2175 	/* FIXME: not promisc!!! */
2176 	mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id, 1);
2177 	mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
2178 	mvpp2_prs_def_flow(sc);
2179 
2180 	for (i = 0; i < sc->sc_ntxq; i++)
2181 		mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
2182 
2183 	mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
2184 
2185 	for (i = 0; i < sc->sc_nrxq; i++)
2186 		mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
2187 
2188 	/* FIXME: rx buffer fill */
2189 
2190 	/* Configure media. */
2191 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
2192 		mii_mediachg(&sc->sc_mii);
2193 
2194 	/* Program promiscuous mode and multicast filters. */
2195 	mvpp2_iff(sc);
2196 
2197 	ifp->if_flags |= IFF_RUNNING;
2198 	ifq_clr_oactive(&ifp->if_snd);
2199 
2200 	mvpp2_txp_max_tx_size_set(sc);
2201 
2202 	/* XXX: single vector */
2203 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
2204 	    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
2205 	    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
2206 	    MVPP2_CAUSE_MISC_SUM_MASK);
2207 	mvpp2_interrupts_enable(sc, (1 << 0));
2208 
2209 	mvpp2_mac_config(sc);
2210 	mvpp2_egress_enable(sc);
2211 	mvpp2_ingress_enable(sc);
2212 
2213 	timeout_add_sec(&sc->sc_tick, 1);
2214 }
2215 
2216 void
2217 mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
2218 {
2219 	struct mvpp2_buf *txb;
2220 	int i;
2221 
2222 	txq->ring = mvpp2_dmamem_alloc(sc,
2223 	    MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
2224 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2225 
2226 	txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
2227 	    M_DEVBUF, M_WAITOK);
2228 
2229 	for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
2230 		txb = &txq->buf[i];
2231 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2232 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2233 		txb->mb_m = NULL;
2234 	}
2235 
2236 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2237 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2238 
2239 	txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
2240 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
2241 	    MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_SHIFT);
2242 	mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
2243 	    MVPP2_AGGR_TXQ_SIZE);
2244 }
2245 
2246 void
2247 mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2248 {
2249 	struct mvpp2_buf *txb;
2250 	int desc, desc_per_txq;
2251 	uint32_t reg;
2252 	int i;
2253 
2254 	txq->prod = txq->cons = txq->cnt = 0;
2255 //	txq->last_desc = txq->size - 1;
2256 
2257 	txq->ring = mvpp2_dmamem_alloc(sc->sc,
2258 	    MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
2259 	txq->descs = MVPP2_DMA_KVA(txq->ring);
2260 
2261 	txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
2262 	    M_DEVBUF, M_WAITOK);
2263 
2264 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2265 		txb = &txq->buf[i];
2266 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2267 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2268 		txb->mb_m = NULL;
2269 	}
2270 
2271 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2272 	    MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2273 
2274 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2275 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
2276 	    MVPP2_DMA_DVA(txq->ring));
2277 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
2278 	    MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
2279 	mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
2280 	mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
2281 	    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2282 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
2283 	reg &= ~MVPP2_TXQ_PENDING_MASK;
2284 	mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
2285 
2286 	desc_per_txq = 16;
2287 	desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
2288 	    (txq->log_id * desc_per_txq);
2289 
2290 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
2291 	    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2292 	    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2293 
2294 	/* WRR / EJP configuration - indirect access */
2295 	mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2296 	    mvpp2_egress_port(sc));
2297 
2298 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2299 	reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2300 	reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2301 	reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2302 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
2303 
2304 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2305 	    MVPP2_TXQ_TOKEN_SIZE_MAX);
2306 
2307 	mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
2308 }
2309 
2310 void
2311 mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2312 {
2313 	rxq->prod = rxq->cons = 0;
2314 
2315 	rxq->ring = mvpp2_dmamem_alloc(sc->sc,
2316 	    MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
2317 	rxq->descs = MVPP2_DMA_KVA(rxq->ring);
2318 
2319 	bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
2320 	    0, MVPP2_DMA_LEN(rxq->ring),
2321 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2322 
2323 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2324 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2325 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
2326 	    MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_SHIFT);
2327 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
2328 	mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
2329 	mvpp2_rxq_offset_set(sc, rxq->id, 0);
2330 	mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
2331 	mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
2332 	mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
2333 }
2334 
2335 void
2336 mvpp2_mac_config(struct mvpp2_port *sc)
2337 {
2338 	uint32_t reg;
2339 
2340 	mvpp2_port_disable(sc);
2341 
2342 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2343 	    mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
2344 	    MVPP2_PORT_CTRL2_PORTMACRESET_MASK);
2345 	if (sc->sc_gop_id == 0) {
2346 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
2347 		    mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2348 		    ~MV_XLG_MAC_CTRL0_MACRESETN_MASK);
2349 		if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2350 			reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2351 			reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
2352 			reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
2353 			reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
2354 			reg &= ~MVPP22_MPCS_MAC_CLK_RESET_MASK;
2355 			mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2356 		} else if (sc->sc_phy_mode == PHY_MODE_XAUI)
2357 			mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG,
2358 			    mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG) &
2359 			    ~MVPP22_XPCS_PCSRESET);
2360 	}
2361 
2362 	if (sc->sc_gop_id == 0) {
2363 		if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2364 			reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2365 			reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET_MASK;
2366 			reg |= MVPP22_MPCS_TX_SD_CLK_RESET_MASK;
2367 			reg |= MVPP22_MPCS_RX_SD_CLK_RESET_MASK;
2368 			reg |= MVPP22_MPCS_MAC_CLK_RESET_MASK;
2369 			mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2370 		} else if (sc->sc_phy_mode == PHY_MODE_XAUI)
2371 			mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG,
2372 			    mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG) |
2373 			    MVPP22_XPCS_PCSRESET);
2374 
2375 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
2376 		reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
2377 		if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2378 		    sc->sc_phy_mode == PHY_MODE_XAUI)
2379 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
2380 		else
2381 			reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
2382 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
2383 	}
2384 
2385 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2386 	    sc->sc_phy_mode == PHY_MODE_XAUI)) {
2387 		reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
2388 		reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
2389 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2390 		    MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
2391 		mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
2392 	} else {
2393 		reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
2394 		reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2395 		reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2396 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS;
2397 		mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
2398 	}
2399 
2400 	if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2401 	    sc->sc_phy_mode == PHY_MODE_XAUI))
2402 		mvpp2_xlg_config(sc);
2403 	else
2404 		mvpp2_gmac_config(sc);
2405 
2406 	mvpp2_port_enable(sc);
2407 }
2408 
2409 void
2410 mvpp2_xlg_config(struct mvpp2_port *sc)
2411 {
2412 	uint32_t ctl0, ctl4;
2413 
2414 	ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2415 	ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
2416 
2417 	ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN_MASK;
2418 	ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
2419 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS;
2420 	ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK;
2421 
2422 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
2423 	mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl0);
2424 
2425 	/* Port reset */
2426 	while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2427 	    MV_XLG_MAC_CTRL0_MACRESETN_MASK) == 0)
2428 		;
2429 }
2430 
2431 void
2432 mvpp2_gmac_config(struct mvpp2_port *sc)
2433 {
2434 	uint32_t ctl0, ctl2, ctl4, panc;
2435 
2436 	/* Setup phy. */
2437 	ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
2438 	ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
2439 	ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
2440 	panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2441 
2442 	/* Force link down to change in-band settings. */
2443 	panc &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2444 	panc |= MVPP2_GMAC_FORCE_LINK_DOWN;
2445 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2446 
2447 	ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
2448 	ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
2449 	    MVPP2_GMAC_INBAND_AN_MASK);
2450 	panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
2451 	    MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
2452 	    MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
2453 	    MVPP2_GMAC_IN_BAND_AUTONEG);
2454 
2455 	switch (sc->sc_phy_mode) {
2456 	case PHY_MODE_XAUI:
2457 	case PHY_MODE_10GBASER:
2458 		break;
2459 	case PHY_MODE_2500BASEX:
2460 	case PHY_MODE_1000BASEX:
2461 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2462 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
2463 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
2464 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
2465 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
2466 		break;
2467 	case PHY_MODE_SGMII:
2468 		ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2469 		ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
2470 		ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
2471 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
2472 		ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
2473 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
2474 		break;
2475 	case PHY_MODE_RGMII:
2476 	case PHY_MODE_RGMII_ID:
2477 	case PHY_MODE_RGMII_RXID:
2478 	case PHY_MODE_RGMII_TXID:
2479 		ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK;
2480 		ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK;
2481 		ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK;
2482 		ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
2483 		break;
2484 	}
2485 
2486 	/* Use Auto-Negotiation for Inband Status only */
2487 	if (sc->sc_inband_status) {
2488 		panc &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2489 		panc &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2490 		panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2491 		panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2492 		panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2493 		panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
2494 		/* TODO: read mode from SFP */
2495 		if (1) {
2496 			/* 802.3z */
2497 			ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
2498 			panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2499 			panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2500 		} else {
2501 			/* SGMII */
2502 			panc |= MVPP2_GMAC_AN_SPEED_EN;
2503 			panc |= MVPP2_GMAC_AN_DUPLEX_EN;
2504 		}
2505 	}
2506 
2507 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
2508 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
2509 	mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
2510 	mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2511 
2512 	/* Port reset */
2513 	while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2514 	    MVPP2_PORT_CTRL2_PORTMACRESET_MASK)
2515 		;
2516 }
2517 
2518 void
2519 mvpp2_down(struct mvpp2_port *sc)
2520 {
2521 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2522 	uint32_t reg;
2523 	int i;
2524 
2525 	timeout_del(&sc->sc_tick);
2526 
2527 	ifp->if_flags &= ~IFF_RUNNING;
2528 	ifq_clr_oactive(&ifp->if_snd);
2529 	ifp->if_timer = 0;
2530 
2531 	mvpp2_egress_disable(sc);
2532 	mvpp2_ingress_disable(sc);
2533 	mvpp2_port_disable(sc);
2534 
2535 	/* XXX: single vector */
2536 	mvpp2_interrupts_disable(sc, (1 << 0));
2537 	mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
2538 
2539 	reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
2540 	reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2541 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2542 
2543 	for (i = 0; i < sc->sc_ntxq; i++)
2544 		mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
2545 
2546 	reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2547 	mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2548 
2549 	for (i = 0; i < sc->sc_nrxq; i++)
2550 		mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
2551 
2552 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 0);
2553 }
2554 
2555 void
2556 mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2557 {
2558 	struct mvpp2_buf *txb;
2559 	int i, pending;
2560 	uint32_t reg;
2561 
2562 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2563 	reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
2564 	reg |= MVPP2_TXQ_DRAIN_EN_MASK;
2565 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2566 
2567 	/*
2568 	 * the queue has been stopped so wait for all packets
2569 	 * to be transmitted.
2570 	 */
2571 	i = 0;
2572 	do {
2573 		if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2574 			printf("%s: port %d: cleaning queue %d timed out\n",
2575 			    sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
2576 			break;
2577 		}
2578 		delay(1000);
2579 		i++;
2580 
2581 		pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
2582 		    MVPP2_TXQ_PENDING_MASK;
2583 	} while (pending);
2584 
2585 	reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2586 	mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2587 
2588 	mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2589 	mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2590 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
2591 	mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
2592 
2593 	for (i = 0; i < MVPP2_NTXDESC; i++) {
2594 		txb = &txq->buf[i];
2595 		if (txb->mb_m) {
2596 			bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2597 			    txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2598 			bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2599 			m_freem(txb->mb_m);
2600 		}
2601 		bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
2602 	}
2603 
2604 	mvpp2_dmamem_free(sc->sc, txq->ring);
2605 	free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
2606 	    MVPP2_NTXDESC);
2607 }
2608 
2609 void
2610 mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2611 {
2612 	uint32_t nrecv;
2613 
2614 	nrecv = mvpp2_rxq_received(sc, rxq->id);
2615 	if (nrecv)
2616 		mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2617 
2618 	mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2619 	mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2620 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
2621 	mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
2622 
2623 	mvpp2_dmamem_free(sc->sc, rxq->ring);
2624 }
2625 
2626 void
2627 mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
2628 {
2629 	uint32_t val;
2630 	int prxq;
2631 
2632 	/* get queue physical ID */
2633 	prxq = port->sc_rxqs[lrxq].id;
2634 
2635 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
2636 	val &= ~MVPP2_RXQ_POOL_LONG_MASK;
2637 	val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
2638 
2639 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
2640 }
2641 
2642 void
2643 mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
2644 {
2645 	uint32_t val;
2646 	int prxq;
2647 
2648 	/* get queue physical ID */
2649 	prxq = port->sc_rxqs[lrxq].id;
2650 
2651 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
2652 	val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
2653 	val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
2654 
2655 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
2656 }
2657 
2658 void
2659 mvpp2_iff(struct mvpp2_port *sc)
2660 {
2661 	/* FIXME: multicast handling */
2662 
2663 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 0);
2664 	memcpy(sc->sc_cur_lladdr, sc->sc_lladdr, ETHER_ADDR_LEN);
2665 	mvpp2_prs_mac_da_accept(sc->sc, sc->sc_id, sc->sc_cur_lladdr, 1);
2666 }
2667 
2668 struct mvpp2_dmamem *
2669 mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
2670 {
2671 	struct mvpp2_dmamem *mdm;
2672 	int nsegs;
2673 
2674 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
2675 	mdm->mdm_size = size;
2676 
2677 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2678 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2679 		goto mdmfree;
2680 
2681 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
2682 	    &nsegs, BUS_DMA_WAITOK) != 0)
2683 		goto destroy;
2684 
2685 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2686 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
2687 		goto free;
2688 
2689 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2690 	    NULL, BUS_DMA_WAITOK) != 0)
2691 		goto unmap;
2692 
2693 	bzero(mdm->mdm_kva, size);
2694 
2695 	return (mdm);
2696 
2697 unmap:
2698 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2699 free:
2700 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2701 destroy:
2702 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2703 mdmfree:
2704 	free(mdm, M_DEVBUF, 0);
2705 
2706 	return (NULL);
2707 }
2708 
2709 void
2710 mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
2711 {
2712 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2713 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2714 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2715 	free(mdm, M_DEVBUF, 0);
2716 }
2717 
2718 struct mbuf *
2719 mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
2720 {
2721 	struct mbuf *m = NULL;
2722 
2723 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
2724 	if (!m)
2725 		return (NULL);
2726 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2727 
2728 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
2729 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
2730 		m_freem(m);
2731 		return (NULL);
2732 	}
2733 
2734 	bus_dmamap_sync(sc->sc_dmat, map, 0,
2735 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
2736 
2737 	return (m);
2738 }
2739 
2740 void
2741 mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
2742 {
2743 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
2744 	    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
2745 }
2746 
2747 void
2748 mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
2749 {
2750 	mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
2751 	    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
2752 }
2753 
2754 int
2755 mvpp2_egress_port(struct mvpp2_port *port)
2756 {
2757 	return MVPP2_MAX_TCONT + port->sc_id;
2758 }
2759 
2760 int
2761 mvpp2_txq_phys(int port, int txq)
2762 {
2763 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
2764 }
2765 
2766 void
2767 mvpp2_defaults_set(struct mvpp2_port *port)
2768 {
2769 	int val, queue;
2770 
2771 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2772 	    mvpp2_egress_port(port));
2773 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2774 
2775 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2776 		mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2777 
2778 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
2779 	    (1000 * 1000));
2780 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
2781 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2782 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2783 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2784 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
2785 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
2786 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2787 
2788 	/* set maximum_low_latency_packet_size value to 256 */
2789 	mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
2790 	    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2791 	    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2792 
2793 	/* mask all interrupts to all present cpus */
2794 	mvpp2_interrupts_disable(port, (0xf << 0));
2795 }
2796 
2797 void
2798 mvpp2_ingress_enable(struct mvpp2_port *port)
2799 {
2800 	uint32_t val;
2801 	int lrxq, queue;
2802 
2803 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
2804 		queue = port->sc_rxqs[lrxq].id;
2805 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
2806 		val &= ~MVPP2_RXQ_DISABLE_MASK;
2807 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
2808 	}
2809 }
2810 
2811 void
2812 mvpp2_ingress_disable(struct mvpp2_port *port)
2813 {
2814 	uint32_t val;
2815 	int lrxq, queue;
2816 
2817 	for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
2818 		queue = port->sc_rxqs[lrxq].id;
2819 		val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
2820 		val |= MVPP2_RXQ_DISABLE_MASK;
2821 		mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
2822 	}
2823 }
2824 
2825 void
2826 mvpp2_egress_enable(struct mvpp2_port *port)
2827 {
2828 	struct mvpp2_tx_queue *txq;
2829 	uint32_t qmap;
2830 	int queue;
2831 
2832 	qmap = 0;
2833 	for (queue = 0; queue < port->sc_ntxq; queue++) {
2834 		txq = &port->sc_txqs[queue];
2835 
2836 		if (txq->descs != NULL) {
2837 			qmap |= (1 << queue);
2838 		}
2839 	}
2840 
2841 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2842 	    mvpp2_egress_port(port));
2843 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2844 }
2845 
2846 void
2847 mvpp2_egress_disable(struct mvpp2_port *port)
2848 {
2849 	uint32_t reg_data;
2850 	int i;
2851 
2852 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2853 	    mvpp2_egress_port(port));
2854 	reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2855 	    MVPP2_TXP_SCHED_ENQ_MASK;
2856 	if (reg_data)
2857 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,(reg_data <<
2858 		    MVPP2_TXP_SCHED_DISQ_OFFSET));
2859 
2860 	i = 0;
2861 	do {
2862 		if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2863 			printf("%s: tx stop timed out, status=0x%08x\n",
2864 			    port->sc_dev.dv_xname, reg_data);
2865 			break;
2866 		}
2867 		delay(1000);
2868 		i++;
2869 		reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
2870 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2871 }
2872 
2873 void
2874 mvpp2_port_enable(struct mvpp2_port *port)
2875 {
2876 	uint32_t val;
2877 
2878 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
2879 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
2880 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
2881 		val |= MV_XLG_MAC_CTRL0_PORTEN_MASK;
2882 		val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK;
2883 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
2884 	} else {
2885 		val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
2886 		val |= MVPP2_GMAC_PORT_EN_MASK;
2887 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2888 		mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
2889 	}
2890 }
2891 
2892 void
2893 mvpp2_port_disable(struct mvpp2_port *port)
2894 {
2895 	uint32_t val;
2896 
2897 	if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
2898 	    port->sc_phy_mode == PHY_MODE_XAUI)) {
2899 		val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
2900 		val &= ~MV_XLG_MAC_CTRL0_PORTEN_MASK;
2901 		mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
2902 	}
2903 
2904 	val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
2905 	val &= ~MVPP2_GMAC_PORT_EN_MASK;
2906 	mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
2907 }
2908 
2909 int
2910 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2911 {
2912 	uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
2913 
2914 	return val & MVPP2_RXQ_OCCUPIED_MASK;
2915 }
2916 
2917 void
2918 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2919     int used_count, int free_count)
2920 {
2921 	uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2922 	mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2923 }
2924 
2925 void
2926 mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
2927 {
2928 	uint32_t val;
2929 
2930 	offset = offset >> 5;
2931 	val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
2932 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2933 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2934 	    MVPP2_RXQ_PACKET_OFFSET_MASK);
2935 	mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
2936 }
2937 
2938 void
2939 mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2940 {
2941 	uint32_t val, size, mtu;
2942 	int txq;
2943 
2944 	mtu = MCLBYTES * 8;
2945 	if (mtu > MVPP2_TXP_MTU_MAX)
2946 		mtu = MVPP2_TXP_MTU_MAX;
2947 
2948 	/* WA for wrong token bucket update: set MTU value = 3*real MTU value */
2949 	mtu = 3 * mtu;
2950 
2951 	/* indirect access to reg_valisters */
2952 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2953 	    mvpp2_egress_port(port));
2954 
2955 	/* set MTU */
2956 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
2957 	val &= ~MVPP2_TXP_MTU_MAX;
2958 	val |= mtu;
2959 	mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
2960 
2961 	/* TXP token size and all TXqs token size must be larger that MTU */
2962 	val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2963 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2964 	if (size < mtu) {
2965 		size = mtu;
2966 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2967 		val |= size;
2968 		mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2969 	}
2970 
2971 	for (txq = 0; txq < port->sc_ntxq; txq++) {
2972 		val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2973 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2974 
2975 		if (size < mtu) {
2976 			size = mtu;
2977 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2978 			val |= size;
2979 			mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
2980 		}
2981 	}
2982 }
2983 
2984 void
2985 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
2986     uint32_t pkts)
2987 {
2988 	rxq->pkts_coal =
2989 	    pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
2990 	    pkts : MVPP2_OCCUPIED_THRESH_MASK;
2991 
2992 	mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2993 	mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
2994 
2995 }
2996 
2997 void
2998 mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2999     uint32_t pkts)
3000 {
3001 	txq->done_pkts_coal =
3002 	    pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
3003 	    pkts : MVPP2_TRANSMITTED_THRESH_MASK;
3004 
3005 	mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
3006 	mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
3007 	    txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
3008 }
3009 
3010 void
3011 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3012     uint32_t usec)
3013 {
3014 	uint32_t val;
3015 
3016 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3017 	mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3018 
3019 	rxq->time_coal = usec;
3020 }
3021 
3022 void
3023 mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
3024 {
3025 	uint32_t val;
3026 
3027 	val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3028 	mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
3029 
3030 	port->sc_tx_time_coal = usec;
3031 }
3032 
3033 void
3034 mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
3035     uint32_t ri, uint32_t ri_mask)
3036 {
3037 	sc->sc_prs_shadow[index].ri_mask = ri_mask;
3038 	sc->sc_prs_shadow[index].ri = ri;
3039 }
3040 
3041 void
3042 mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3043 {
3044 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
3045 
3046 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
3047 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
3048 }
3049 
3050 void
3051 mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
3052 {
3053 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3054 
3055 	if (add)
3056 		pe->tcam.byte[enable_off] &= ~(1 << port);
3057 	else
3058 		pe->tcam.byte[enable_off] |= (1 << port);
3059 }
3060 
3061 void
3062 mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
3063 {
3064 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3065 	uint8_t mask = MVPP2_PRS_PORT_MASK;
3066 
3067 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
3068 	pe->tcam.byte[enable_off] &= ~mask;
3069 	pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
3070 }
3071 
3072 uint32_t
3073 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
3074 {
3075 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3076 
3077 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
3078 }
3079 
3080 void
3081 mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
3082     uint8_t byte, uint8_t enable)
3083 {
3084 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
3085 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
3086 }
3087 
3088 void
3089 mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
3090     uint8_t *byte, uint8_t *enable)
3091 {
3092 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
3093 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
3094 }
3095 
3096 int
3097 mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
3098 {
3099 	int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
3100 	uint16_t tcam_data;
3101 
3102 	tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
3103 	    pe->tcam.byte[byte_offset];
3104 	if (tcam_data != data)
3105 		return 0;
3106 
3107 	return 1;
3108 }
3109 
3110 void
3111 mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
3112 {
3113 	int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
3114 
3115 	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
3116 		if (!(enable & (i << 1)))
3117 			continue;
3118 
3119 		if (bits & (i << 1))
3120 			pe->tcam.byte[ai_idx] |= 1 << i;
3121 		else
3122 			pe->tcam.byte[ai_idx] &= ~(1 << i);
3123 	}
3124 
3125 	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
3126 }
3127 
3128 int
3129 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
3130 {
3131 	return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
3132 }
3133 
3134 void
3135 mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
3136     uint32_t *word, uint32_t *enable)
3137 {
3138 	int index, position;
3139 	uint8_t byte, mask;
3140 
3141 	for (index = 0; index < 4; index++) {
3142 		position = (data_offset * sizeof(int)) + index;
3143 		mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
3144 		((uint8_t *)word)[index] = byte;
3145 		((uint8_t *)enable)[index] = mask;
3146 	}
3147 }
3148 
3149 void
3150 mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
3151     uint16_t ether_type)
3152 {
3153 	mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
3154 	mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
3155 }
3156 
3157 void
3158 mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3159 {
3160 	pe->sram.byte[bit / 8] |= (val << (bit % 8));
3161 }
3162 
3163 void
3164 mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3165 {
3166 	pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
3167 }
3168 
3169 void
3170 mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3171 {
3172 	int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
3173 	int i;
3174 
3175 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
3176 		if (!(mask & (1 << i)))
3177 			continue;
3178 
3179 		if (bits & (1 << i))
3180 			mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
3181 		else
3182 			mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
3183 
3184 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
3185 	}
3186 }
3187 
3188 int
3189 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
3190 {
3191 	return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
3192 }
3193 
3194 void
3195 mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3196 {
3197 	int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
3198 	int i;
3199 
3200 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
3201 		if (!(mask & (1 << i)))
3202 			continue;
3203 
3204 		if (bits & (1 << i))
3205 			mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
3206 		else
3207 			mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
3208 
3209 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
3210 	}
3211 }
3212 
3213 int
3214 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
3215 {
3216 	uint8_t bits;
3217 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
3218 	int ai_en_off = ai_off + 1;
3219 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
3220 
3221 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
3222 	    (pe->sram.byte[ai_en_off] << (8 - ai_shift));
3223 
3224 	return bits;
3225 }
3226 
3227 void
3228 mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
3229 {
3230 	if (shift < 0) {
3231 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3232 		shift = -shift;
3233 	} else {
3234 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3235 	}
3236 
3237 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
3238 	    shift & MVPP2_PRS_SRAM_SHIFT_MASK;
3239 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
3240 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
3241 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
3242 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3243 }
3244 
3245 void
3246 mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
3247     uint32_t op)
3248 {
3249 	uint8_t udf_byte, udf_byte_offset;
3250 	uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
3251 
3252 	udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
3253 	    MVPP2_PRS_SRAM_UDF_BITS);
3254 	udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
3255 	op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
3256 	    MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
3257 	op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
3258 
3259 	if (offset < 0) {
3260 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3261 		offset = -offset;
3262 	} else {
3263 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3264 	}
3265 
3266 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
3267 	    MVPP2_PRS_SRAM_UDF_MASK);
3268 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
3269 	pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
3270 	pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
3271 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
3272 	    MVPP2_PRS_SRAM_UDF_TYPE_MASK);
3273 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
3274 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
3275 	    MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
3276 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
3277 	pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
3278 	    op_sel_udf_byte_offset);
3279 	pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
3280 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3281 }
3282 
3283 void
3284 mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3285 {
3286 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
3287 
3288 	mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
3289 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
3290 }
3291 
3292 void
3293 mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
3294 {
3295 	sc->sc_prs_shadow[index].valid = 1;
3296 	sc->sc_prs_shadow[index].lu = lu;
3297 }
3298 
3299 int
3300 mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3301 {
3302 	int i;
3303 
3304 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3305 		return EINVAL;
3306 
3307 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3308 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3309 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3310 		mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
3311 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3312 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3313 		mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
3314 
3315 	return 0;
3316 }
3317 
3318 int
3319 mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3320 {
3321 	int i;
3322 
3323 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3324 		return EINVAL;
3325 
3326 	mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3327 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
3328 	    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
3329 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
3330 		return EINVAL;
3331 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3332 		pe->tcam.word[i] =
3333 		    mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
3334 
3335 	mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3336 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3337 		pe->sram.word[i] =
3338 		    mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
3339 
3340 	return 0;
3341 }
3342 
3343 struct mvpp2_prs_entry *
3344 mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
3345 {
3346 	struct mvpp2_prs_entry *pe;
3347 	uint32_t word, enable;
3348 	uint8_t bits;
3349 	int tid;
3350 
3351 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3352 	if (pe == NULL)
3353 		return NULL;
3354 
3355 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3356 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
3357 		if (!sc->sc_prs_shadow[tid].valid ||
3358 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
3359 			continue;
3360 
3361 		pe->index = tid;
3362 		mvpp2_prs_hw_read(sc, pe);
3363 
3364 		mvpp2_prs_tcam_data_word_get(pe, 0, &word, &enable);
3365 		if ((word != 0) || (enable != 0))
3366 			continue;
3367 
3368 		bits = mvpp2_prs_sram_ai_get(pe);
3369 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
3370 			return pe;
3371 	}
3372 
3373 	free(pe, M_TEMP, sizeof(*pe));
3374 	return NULL;
3375 }
3376 
3377 int
3378 mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
3379 {
3380 	uint8_t tmp;
3381 	int tid;
3382 
3383 	if (start > end) {
3384 		tmp = end;
3385 		end = start;
3386 		start = tmp;
3387 	}
3388 
3389 	for (tid = start; tid <= end; tid++) {
3390 		if (!sc->sc_prs_shadow[tid].valid)
3391 			return tid;
3392 	}
3393 
3394 	return -1;
3395 }
3396 
3397 void
3398 mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
3399 {
3400 	struct mvpp2_prs_entry pe;
3401 
3402 	if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
3403 		pe.index = MVPP2_PE_DROP_ALL;
3404 		mvpp2_prs_hw_read(sc, &pe);
3405 	} else {
3406 		memset(&pe, 0, sizeof(pe));
3407 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3408 		pe.index = MVPP2_PE_DROP_ALL;
3409 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3410 		    MVPP2_PRS_RI_DROP_MASK);
3411 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3412 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3413 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3414 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3415 	}
3416 
3417 	mvpp2_prs_tcam_port_set(&pe, port, add);
3418 	mvpp2_prs_hw_write(sc, &pe);
3419 }
3420 
3421 void
3422 mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int add)
3423 {
3424 	struct mvpp2_prs_entry pe;
3425 
3426 	if (sc->sc_prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
3427 		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
3428 		mvpp2_prs_hw_read(sc, &pe);
3429 	} else {
3430 		memset(&pe, 0, sizeof(pe));
3431 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3432 		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
3433 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3434 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
3435 		    MVPP2_PRS_RI_L2_CAST_MASK);
3436 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3437 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3438 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3439 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3440 	}
3441 
3442 	mvpp2_prs_tcam_port_set(&pe, port, add);
3443 	mvpp2_prs_hw_write(sc, &pe);
3444 }
3445 
3446 void
3447 mvpp2_prs_mac_multi_set(struct mvpp2_softc *sc, uint32_t port, uint32_t index, int add)
3448 {
3449 	struct mvpp2_prs_entry pe;
3450 	uint8_t da_mc;
3451 
3452 	da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
3453 
3454 	if (sc->sc_prs_shadow[index].valid) {
3455 		pe.index = index;
3456 		mvpp2_prs_hw_read(sc, &pe);
3457 	} else {
3458 		memset(&pe, 0, sizeof(pe));
3459 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3460 		pe.index = index;
3461 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3462 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
3463 		    MVPP2_PRS_RI_L2_CAST_MASK);
3464 		mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
3465 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3466 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3467 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3468 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3469 	}
3470 
3471 	mvpp2_prs_tcam_port_set(&pe, port, add);
3472 	mvpp2_prs_hw_write(sc, &pe);
3473 }
3474 
3475 void
3476 mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
3477     int tagged, int extend)
3478 {
3479 	struct mvpp2_prs_entry pe;
3480 	int32_t tid, shift;
3481 
3482 	if (extend) {
3483 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3484 		shift = 8;
3485 	} else {
3486 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3487 		shift = 4;
3488 	}
3489 
3490 	if (sc->sc_prs_shadow[tid].valid) {
3491 		pe.index = tid;
3492 		mvpp2_prs_hw_read(sc, &pe);
3493 	} else {
3494 		memset(&pe, 0, sizeof(pe));
3495 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3496 		pe.index = tid;
3497 		mvpp2_prs_sram_shift_set(&pe, shift,
3498 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3499 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3500 		if (tagged) {
3501 			mvpp2_prs_tcam_data_byte_set(&pe, 0,
3502 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3503 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3504 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3505 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3506 		} else {
3507 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3508 			    MVPP2_PRS_RI_VLAN_MASK);
3509 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3510 		}
3511 		mvpp2_prs_tcam_port_map_set(&pe, 0);
3512 	}
3513 
3514 	mvpp2_prs_tcam_port_set(&pe, port, add);
3515 	mvpp2_prs_hw_write(sc, &pe);
3516 }
3517 
3518 void
3519 mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
3520     int add, int tagged, int extend)
3521 {
3522 	struct mvpp2_prs_entry pe;
3523 	int32_t tid, shift, port_mask;
3524 
3525 	if (extend) {
3526 		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3527 		port_mask = 0;
3528 		shift = 8;
3529 	} else {
3530 		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3531 		port_mask = MVPP2_PRS_PORT_MASK;
3532 		shift = 4;
3533 	}
3534 
3535 	if (sc->sc_prs_shadow[tid].valid) {
3536 		pe.index = tid;
3537 		mvpp2_prs_hw_read(sc, &pe);
3538 	} else {
3539 		memset(&pe, 0, sizeof(pe));
3540 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3541 		pe.index = tid;
3542 		mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_EDSA);
3543 		mvpp2_prs_match_etype(&pe, 2, 0);
3544 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
3545 		    MVPP2_PRS_RI_DSA_MASK);
3546 		mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
3547 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3548 		mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3549 		if (tagged) {
3550 			mvpp2_prs_tcam_data_byte_set(&pe,
3551 			    MVPP2_ETH_TYPE_LEN + 2 + 3,
3552 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3553 			    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3554 			mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3555 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3556 		} else {
3557 			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3558 			    MVPP2_PRS_RI_VLAN_MASK);
3559 			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3560 		}
3561 		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
3562 	}
3563 
3564 	mvpp2_prs_tcam_port_set(&pe, port, add);
3565 	mvpp2_prs_hw_write(sc, &pe);
3566 }
3567 
3568 struct mvpp2_prs_entry *
3569 mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
3570 {
3571 	struct mvpp2_prs_entry *pe;
3572 	uint32_t ri_bits, ai_bits;
3573 	int match, tid;
3574 
3575 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3576 	if (pe == NULL)
3577 		return NULL;
3578 
3579 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3580 
3581 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3582 		if (!sc->sc_prs_shadow[tid].valid ||
3583 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3584 			continue;
3585 		pe->index = tid;
3586 		mvpp2_prs_hw_read(sc, pe);
3587 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
3588 		if (!match)
3589 			continue;
3590 		ri_bits = mvpp2_prs_sram_ri_get(pe);
3591 		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3592 		ai_bits = mvpp2_prs_tcam_ai_get(pe);
3593 		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
3594 		if (ai != ai_bits)
3595 			continue;
3596 		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3597 		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3598 			return pe;
3599 	}
3600 
3601 	free(pe, M_TEMP, sizeof(*pe));
3602 	return NULL;
3603 }
3604 
3605 int
3606 mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
3607 {
3608 	struct mvpp2_prs_entry *pe;
3609 	uint32_t ri_bits;
3610 	int tid_aux, tid;
3611 	int ret = 0;
3612 
3613 	pe = mvpp2_prs_vlan_find(sc, tpid, ai);
3614 	if (pe == NULL) {
3615 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
3616 		    MVPP2_PE_FIRST_FREE_TID);
3617 		if (tid < 0)
3618 			return tid;
3619 
3620 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3621 		if (pe == NULL)
3622 			return ENOMEM;
3623 
3624 		/* get last double vlan tid */
3625 		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
3626 		    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
3627 			if (!sc->sc_prs_shadow[tid_aux].valid ||
3628 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
3629 				continue;
3630 			pe->index = tid_aux;
3631 			mvpp2_prs_hw_read(sc, pe);
3632 			ri_bits = mvpp2_prs_sram_ri_get(pe);
3633 			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
3634 			    MVPP2_PRS_RI_VLAN_DOUBLE)
3635 				break;
3636 		}
3637 
3638 		if (tid <= tid_aux) {
3639 			ret = EINVAL;
3640 			goto error;
3641 		}
3642 
3643 		memset(pe, 0, sizeof(struct mvpp2_prs_entry));
3644 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3645 		pe->index = tid;
3646 		mvpp2_prs_match_etype(pe, 0, tpid);
3647 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
3648 		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
3649 				   MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3650 		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3651 		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
3652 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
3653 			    MVPP2_PRS_RI_VLAN_MASK);
3654 		} else {
3655 			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
3656 			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
3657 			    MVPP2_PRS_RI_VLAN_MASK);
3658 		}
3659 		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
3660 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
3661 	}
3662 
3663 	mvpp2_prs_tcam_port_map_set(pe, port_map);
3664 	mvpp2_prs_hw_write(sc, pe);
3665 
3666 error:
3667 	free(pe, M_TEMP, sizeof(*pe));
3668 	return ret;
3669 }
3670 
3671 int
3672 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
3673 {
3674 	int i;
3675 
3676 	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
3677 		if (!sc->sc_prs_double_vlans[i])
3678 			return i;
3679 
3680 	return -1;
3681 }
3682 
3683 struct mvpp2_prs_entry *
3684 mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
3685 {
3686 	struct mvpp2_prs_entry *pe;
3687 	uint32_t ri_mask;
3688 	int match, tid;
3689 
3690 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3691 	if (pe == NULL)
3692 		return NULL;
3693 
3694 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3695 
3696 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3697 		if (!sc->sc_prs_shadow[tid].valid ||
3698 		    sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3699 			continue;
3700 
3701 		pe->index = tid;
3702 		mvpp2_prs_hw_read(sc, pe);
3703 		match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
3704 		    mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
3705 		if (!match)
3706 			continue;
3707 		ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
3708 		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
3709 			return pe;
3710 	}
3711 
3712 	free(pe, M_TEMP, sizeof(*pe));
3713 	return NULL;
3714 }
3715 
3716 int
3717 mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
3718     uint32_t port_map)
3719 {
3720 	struct mvpp2_prs_entry *pe;
3721 	int tid_aux, tid, ai, ret = 0;
3722 	uint32_t ri_bits;
3723 
3724 	pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
3725 	if (pe == NULL) {
3726 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3727 		    MVPP2_PE_LAST_FREE_TID);
3728 		if (tid < 0)
3729 			return tid;
3730 
3731 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3732 		if (pe == NULL)
3733 			return ENOMEM;
3734 
3735 		ai = mvpp2_prs_double_vlan_ai_free_get(sc);
3736 		if (ai < 0) {
3737 			ret = ai;
3738 			goto error;
3739 		}
3740 
3741 		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
3742 		    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
3743 			if (!sc->sc_prs_shadow[tid_aux].valid ||
3744 			    sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
3745 				continue;
3746 			pe->index = tid_aux;
3747 			mvpp2_prs_hw_read(sc, pe);
3748 			ri_bits = mvpp2_prs_sram_ri_get(pe);
3749 			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3750 			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3751 			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3752 				break;
3753 		}
3754 
3755 		if (tid >= tid_aux) {
3756 			ret = ERANGE;
3757 			goto error;
3758 		}
3759 
3760 		memset(pe, 0, sizeof(struct mvpp2_prs_entry));
3761 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3762 		pe->index = tid;
3763 		sc->sc_prs_double_vlans[ai] = 1;
3764 		mvpp2_prs_match_etype(pe, 0, tpid1);
3765 		mvpp2_prs_match_etype(pe, 4, tpid2);
3766 		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
3767 		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
3768 		    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3769 		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3770 		    MVPP2_PRS_RI_VLAN_MASK);
3771 		mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
3772 		    MVPP2_PRS_SRAM_AI_MASK);
3773 		mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
3774 	}
3775 
3776 	mvpp2_prs_tcam_port_map_set(pe, port_map);
3777 	mvpp2_prs_hw_write(sc, pe);
3778 
3779 error:
3780 	free(pe, M_TEMP, sizeof(*pe));
3781 	return ret;
3782 }
3783 
3784 int
3785 mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
3786     uint32_t ri_mask)
3787 {
3788 	struct mvpp2_prs_entry pe;
3789 	int tid;
3790 
3791 	if ((proto != MV_IPPR_TCP) && (proto != MV_IPPR_UDP) &&
3792 	    (proto != MV_IPPR_IGMP))
3793 		return EINVAL;
3794 
3795 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3796 	    MVPP2_PE_LAST_FREE_TID);
3797 	if (tid < 0)
3798 		return tid;
3799 
3800 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3801 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3802 	pe.index = tid;
3803 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3804 	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3805 	mvpp2_prs_sram_offset_set( &pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3806 	    sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3807 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3808 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
3809 	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, ri_mask |
3810 	    MVPP2_PRS_RI_IP_FRAG_MASK);
3811 	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
3812 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3813 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3814 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
3815 	mvpp2_prs_hw_write(sc, &pe);
3816 
3817 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3818 	    MVPP2_PE_LAST_FREE_TID);
3819 	if (tid < 0)
3820 		return tid;
3821 
3822 	pe.index = tid;
3823 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3824 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3825 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
3826 	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
3827 	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
3828 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
3829 	mvpp2_prs_hw_write(sc, &pe);
3830 
3831 	return 0;
3832 }
3833 
3834 int
3835 mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
3836 {
3837 	struct mvpp2_prs_entry pe;
3838 	int mask, tid;
3839 
3840 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3841 	    MVPP2_PE_LAST_FREE_TID);
3842 	if (tid < 0)
3843 		return tid;
3844 
3845 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3846 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3847 	pe.index = tid;
3848 
3849 	switch (l3_cast) {
3850 	case MVPP2_PRS_L3_MULTI_CAST:
3851 		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
3852 		    MVPP2_PRS_IPV4_MC_MASK);
3853 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
3854 		    MVPP2_PRS_RI_L3_ADDR_MASK);
3855 		break;
3856 	case  MVPP2_PRS_L3_BROAD_CAST:
3857 		mask = MVPP2_PRS_IPV4_BC_MASK;
3858 		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
3859 		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
3860 		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
3861 		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
3862 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
3863 		    MVPP2_PRS_RI_L3_ADDR_MASK);
3864 		break;
3865 	default:
3866 		return EINVAL;
3867 	}
3868 
3869 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3870 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3871 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3872 	    MVPP2_PRS_IPV4_DIP_AI_BIT);
3873 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3874 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
3875 	mvpp2_prs_hw_write(sc, &pe);
3876 
3877 	return 0;
3878 }
3879 
3880 int
3881 mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
3882     uint32_t ri_mask)
3883 {
3884 	struct mvpp2_prs_entry pe;
3885 	int tid;
3886 
3887 	if ((proto != MV_IPPR_TCP) && (proto != MV_IPPR_UDP) &&
3888 	    (proto != MV_IPPR_ICMPV6) && (proto != MV_IPPR_IPIP))
3889 		return EINVAL;
3890 
3891 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
3892 	    MVPP2_PE_LAST_FREE_TID);
3893 	if (tid < 0)
3894 		return tid;
3895 
3896 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3897 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3898 	pe.index = tid;
3899 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3900 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3901 	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
3902 	mvpp2_prs_sram_offset_set( &pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3903 	    sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3904 	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
3905 	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3906 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3907 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3908 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
3909 	mvpp2_prs_hw_write(sc, &pe);
3910 
3911 	return 0;
3912 }
3913 
3914 int
3915 mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
3916 {
3917 	struct mvpp2_prs_entry pe;
3918 	int tid;
3919 
3920 	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
3921 		return EINVAL;
3922 
3923 	tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID);
3924 	if (tid < 0)
3925 		return tid;
3926 
3927 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3928 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3929 	pe.index = tid;
3930 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3931 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
3932 	    MVPP2_PRS_RI_L3_ADDR_MASK);
3933 	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3934 	    MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3935 	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3936 	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
3937 	    MVPP2_PRS_IPV6_MC_MASK);
3938 	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3939 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3940 	mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
3941 	mvpp2_prs_hw_write(sc, &pe);
3942 
3943 	return 0;
3944 }
3945 
3946 int
3947 mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
3948     uint8_t *mask)
3949 {
3950 	uint8_t tcam_byte, tcam_mask;
3951 	int index;
3952 
3953 	for (index = 0; index < ETHER_ADDR_LEN; index++) {
3954 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
3955 		    &tcam_mask);
3956 		if (tcam_mask != mask[index])
3957 			return 0;
3958 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3959 			return 0;
3960 	}
3961 
3962 	return 1;
3963 }
3964 
3965 struct mvpp2_prs_entry *
3966 mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
3967     uint8_t *mask, int udf_type)
3968 {
3969 	struct mvpp2_prs_entry *pe;
3970 	int tid;
3971 
3972 	pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3973 	if (pe == NULL)
3974 		return NULL;
3975 
3976 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3977 	for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID;
3978 	    tid++) {
3979 		uint32_t entry_pmap;
3980 
3981 		if (!sc->sc_prs_shadow[tid].valid ||
3982 		    (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3983 		    (sc->sc_prs_shadow[tid].udf != udf_type))
3984 			continue;
3985 
3986 		pe->index = tid;
3987 		mvpp2_prs_hw_read(sc, pe);
3988 		entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3989 		if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3990 		    entry_pmap == pmap)
3991 			return pe;
3992 	}
3993 
3994 	free(pe, M_TEMP, sizeof(*pe));
3995 	return NULL;
3996 }
3997 
3998 int
3999 mvpp2_prs_mac_da_accept(struct mvpp2_softc *sc, int port_id, const uint8_t *da,
4000     int add)
4001 {
4002 	struct mvpp2_prs_entry *pe;
4003 	uint32_t pmap, len, ri;
4004 	uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4005 	int tid;
4006 
4007 	pe = mvpp2_prs_mac_da_range_find(sc, (1 << port_id), da, mask,
4008 	    MVPP2_PRS_UDF_MAC_DEF);
4009 	if (pe == NULL) {
4010 		if (!add)
4011 			return 0;
4012 
4013 		for (tid = MVPP2_PE_FIRST_FREE_TID; tid <=
4014 		    MVPP2_PE_LAST_FREE_TID; tid++) {
4015 			if (sc->sc_prs_shadow[tid].valid &&
4016 			    (sc->sc_prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
4017 			    (sc->sc_prs_shadow[tid].udf == MVPP2_PRS_UDF_MAC_RANGE))
4018 				break;
4019 		}
4020 
4021 		tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID, tid - 1);
4022 		if (tid < 0)
4023 			return tid;
4024 
4025 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4026 		if (pe == NULL)
4027 			return ENOMEM;
4028 
4029 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
4030 		pe->index = tid;
4031 		mvpp2_prs_tcam_port_map_set(pe, 0);
4032 	}
4033 
4034 	mvpp2_prs_tcam_port_set(pe, port_id, add);
4035 
4036 	/* invalidate the entry if no ports are left enabled */
4037 	pmap = mvpp2_prs_tcam_port_map_get(pe);
4038 	if (pmap == 0) {
4039 		if (add) {
4040 			free(pe, M_TEMP, sizeof(*pe));
4041 			return -1;
4042 		}
4043 		mvpp2_prs_hw_inv(sc, pe->index);
4044 		sc->sc_prs_shadow[pe->index].valid = 0;
4045 		free(pe, M_TEMP, sizeof(*pe));
4046 		return 0;
4047 	}
4048 
4049 	mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
4050 
4051 	len = ETHER_ADDR_LEN;
4052 	while (len--)
4053 		mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
4054 
4055 	if (ETHER_IS_BROADCAST(da))
4056 		ri = MVPP2_PRS_RI_L2_BCAST;
4057 	else if (ETHER_IS_MULTICAST(da))
4058 		ri = MVPP2_PRS_RI_L2_MCAST;
4059 	else
4060 		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
4061 
4062 	mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4063 	    MVPP2_PRS_RI_MAC_ME_MASK);
4064 	mvpp2_prs_shadow_ri_set(sc, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4065 	    MVPP2_PRS_RI_MAC_ME_MASK);
4066 	mvpp2_prs_sram_shift_set(pe, 2 * ETHER_ADDR_LEN,
4067 	    MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4068 	sc->sc_prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
4069 	mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_MAC);
4070 	mvpp2_prs_hw_write(sc, pe);
4071 
4072 	free(pe, M_TEMP, sizeof(*pe));
4073 	return 0;
4074 }
4075 
4076 int
4077 mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
4078 {
4079 	switch (type) {
4080 	case MVPP2_TAG_TYPE_EDSA:
4081 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4082 		    MVPP2_PRS_EDSA);
4083 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4084 		    MVPP2_PRS_EDSA);
4085 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4086 		    MVPP2_PRS_DSA);
4087 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4088 		    MVPP2_PRS_DSA);
4089 		break;
4090 	case MVPP2_TAG_TYPE_DSA:
4091 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4092 		    MVPP2_PRS_DSA);
4093 		mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4094 		    MVPP2_PRS_DSA);
4095 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4096 		    MVPP2_PRS_EDSA);
4097 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4098 		    MVPP2_PRS_EDSA);
4099 		break;
4100 	case MVPP2_TAG_TYPE_MH:
4101 	case MVPP2_TAG_TYPE_NONE:
4102 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4103 		    MVPP2_PRS_DSA);
4104 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4105 		    MVPP2_PRS_DSA);
4106 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4107 		    MVPP2_PRS_EDSA);
4108 		mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4109 		    MVPP2_PRS_EDSA);
4110 		break;
4111 	default:
4112 		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4113 			return EINVAL;
4114 		break;
4115 	}
4116 
4117 	return 0;
4118 }
4119 
4120 int
4121 mvpp2_prs_def_flow(struct mvpp2_port *port)
4122 {
4123 	struct mvpp2_prs_entry *pe;
4124 	int tid;
4125 
4126 	pe = mvpp2_prs_flow_find(port->sc, port->sc_id);
4127 	if (pe == NULL) {
4128 		tid = mvpp2_prs_tcam_first_free(port->sc,
4129 		    MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
4130 		if (tid < 0)
4131 			return tid;
4132 
4133 		pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4134 		if (pe == NULL)
4135 			return ENOMEM;
4136 
4137 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
4138 		pe->index = tid;
4139 		mvpp2_prs_sram_ai_update(pe, port->sc_id,
4140 		    MVPP2_PRS_FLOW_ID_MASK);
4141 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4142 		mvpp2_prs_shadow_set(port->sc, pe->index, MVPP2_PRS_LU_FLOWS);
4143 	}
4144 
4145 	mvpp2_prs_tcam_port_map_set (pe,(1 << port->sc_id));
4146 	mvpp2_prs_hw_write(port->sc, pe);
4147 	free(pe, M_TEMP, sizeof(*pe));
4148 	return 0;
4149 }
4150 
4151 void
4152 mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
4153 {
4154 	mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4155 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4156 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4157 	mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4158 }
4159 
4160 void
4161 mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
4162 {
4163 	uint32_t val;
4164 
4165 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4166 	mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
4167 	mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
4168 }
4169 
4170 void
4171 mvpp2_cls_init(struct mvpp2_softc *sc)
4172 {
4173 	struct mvpp2_cls_lookup_entry le;
4174 	struct mvpp2_cls_flow_entry fe;
4175 	int index;
4176 
4177 	mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4178 	memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
4179 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4180 		fe.index = index;
4181 		mvpp2_cls_flow_write(sc, &fe);
4182 	}
4183 	le.data = 0;
4184 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4185 		le.lkpid = index;
4186 		le.way = 0;
4187 		mvpp2_cls_lookup_write(sc, &le);
4188 		le.way = 1;
4189 		mvpp2_cls_lookup_write(sc, &le);
4190 	}
4191 }
4192 
4193 void
4194 mvpp2_cls_port_config(struct mvpp2_port *port)
4195 {
4196 	struct mvpp2_cls_lookup_entry le;
4197 	uint32_t val;
4198 
4199 	/* set way for the port */
4200 	val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
4201 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
4202 	mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
4203 
4204 	/*
4205 	 * pick the entry to be accessed in lookup ID decoding table
4206 	 * according to the way and lkpid.
4207 	 */
4208 	le.lkpid = port->sc_id;
4209 	le.way = 0;
4210 	le.data = 0;
4211 
4212 	/* set initial CPU queue for receiving packets */
4213 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4214 	le.data |= (port->sc_id * 32);
4215 
4216 	/* disable classification engines */
4217 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4218 
4219 	/* update lookup ID table entry */
4220 	mvpp2_cls_lookup_write(port->sc, &le);
4221 }
4222 
4223 void
4224 mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4225 {
4226 	mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
4227 	    (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4228 }
4229