xref: /openbsd-src/sys/arch/octeon/dev/if_cnmac.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_cnmac.c,v 1.79 2020/09/04 15:18:05 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Internet Initiative Japan, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include "bpfilter.h"
29 
30 /*
31  * XXXSEIL
32  * If no free send buffer is available, free all the sent buffer and bail out.
33  */
34 #define OCTEON_ETH_SEND_QUEUE_CHECK
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/pool.h>
39 #include <sys/proc.h>
40 #include <sys/mbuf.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/queue.h>
48 #include <sys/conf.h>
49 #include <sys/stdint.h> /* uintptr_t */
50 #include <sys/syslog.h>
51 #include <sys/endian.h>
52 #include <sys/atomic.h>
53 
54 #include <net/if.h>
55 #include <net/if_media.h>
56 #include <netinet/in.h>
57 #include <netinet/if_ether.h>
58 
59 #if NBPFILTER > 0
60 #include <net/bpf.h>
61 #endif
62 
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65 #include <machine/octeonvar.h>
66 #include <machine/octeon_model.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #include <octeon/dev/cn30xxciureg.h>
72 #include <octeon/dev/cn30xxnpireg.h>
73 #include <octeon/dev/cn30xxgmxreg.h>
74 #include <octeon/dev/cn30xxipdreg.h>
75 #include <octeon/dev/cn30xxpipreg.h>
76 #include <octeon/dev/cn30xxpowreg.h>
77 #include <octeon/dev/cn30xxfaureg.h>
78 #include <octeon/dev/cn30xxfpareg.h>
79 #include <octeon/dev/cn30xxbootbusreg.h>
80 #include <octeon/dev/cn30xxfpavar.h>
81 #include <octeon/dev/cn30xxgmxvar.h>
82 #include <octeon/dev/cn30xxfauvar.h>
83 #include <octeon/dev/cn30xxpowvar.h>
84 #include <octeon/dev/cn30xxipdvar.h>
85 #include <octeon/dev/cn30xxpipvar.h>
86 #include <octeon/dev/cn30xxpkovar.h>
87 #include <octeon/dev/cn30xxsmivar.h>
88 #include <octeon/dev/iobusvar.h>
89 #include <octeon/dev/if_cnmacvar.h>
90 
91 #ifdef OCTEON_ETH_DEBUG
92 #define	OCTEON_ETH_KASSERT(x)	KASSERT(x)
93 #define	OCTEON_ETH_KDASSERT(x)	KDASSERT(x)
94 #else
95 #define	OCTEON_ETH_KASSERT(x)
96 #define	OCTEON_ETH_KDASSERT(x)
97 #endif
98 
99 /*
100  * Set the PKO to think command buffers are an odd length.  This makes it so we
101  * never have to divide a comamnd across two buffers.
102  */
103 #define OCTEON_POOL_NWORDS_CMD	\
104 	    (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1)
105 #define FPA_COMMAND_BUFFER_POOL_NWORDS	OCTEON_POOL_NWORDS_CMD	/* XXX */
106 
107 CTASSERT(MCLBYTES >= OCTEON_POOL_SIZE_PKT + CACHELINESIZE);
108 
109 void	cnmac_buf_init(struct cnmac_softc *);
110 
111 int	cnmac_match(struct device *, void *, void *);
112 void	cnmac_attach(struct device *, struct device *, void *);
113 void	cnmac_pip_init(struct cnmac_softc *);
114 void	cnmac_ipd_init(struct cnmac_softc *);
115 void	cnmac_pko_init(struct cnmac_softc *);
116 void	cnmac_smi_init(struct cnmac_softc *);
117 
118 void	cnmac_board_mac_addr(uint8_t *);
119 
120 int	cnmac_mii_readreg(struct device *, int, int);
121 void	cnmac_mii_writereg(struct device *, int, int, int);
122 void	cnmac_mii_statchg(struct device *);
123 
124 int	cnmac_mediainit(struct cnmac_softc *);
125 void	cnmac_mediastatus(struct ifnet *, struct ifmediareq *);
126 int	cnmac_mediachange(struct ifnet *);
127 
128 void	cnmac_send_queue_flush_prefetch(struct cnmac_softc *);
129 void	cnmac_send_queue_flush_fetch(struct cnmac_softc *);
130 void	cnmac_send_queue_flush(struct cnmac_softc *);
131 int	cnmac_send_queue_is_full(struct cnmac_softc *);
132 void	cnmac_send_queue_add(struct cnmac_softc *,
133 	    struct mbuf *, uint64_t *);
134 void	cnmac_send_queue_del(struct cnmac_softc *,
135 	    struct mbuf **, uint64_t **);
136 int	cnmac_buf_free_work(struct cnmac_softc *, uint64_t *);
137 void	cnmac_buf_ext_free(caddr_t, u_int, void *);
138 
139 int	cnmac_ioctl(struct ifnet *, u_long, caddr_t);
140 void	cnmac_watchdog(struct ifnet *);
141 int	cnmac_init(struct ifnet *);
142 int	cnmac_stop(struct ifnet *, int);
143 void	cnmac_start(struct ifqueue *);
144 
145 int	cnmac_send_cmd(struct cnmac_softc *, uint64_t, uint64_t);
146 uint64_t cnmac_send_makecmd_w1(int, paddr_t);
147 uint64_t cnmac_send_makecmd_w0(uint64_t, uint64_t, size_t, int, int);
148 int	cnmac_send_makecmd_gbuf(struct cnmac_softc *,
149 	    struct mbuf *, uint64_t *, int *);
150 int	cnmac_send_makecmd(struct cnmac_softc *,
151 	    struct mbuf *, uint64_t *, uint64_t *, uint64_t *);
152 int	cnmac_send_buf(struct cnmac_softc *,
153 	    struct mbuf *, uint64_t *);
154 int	cnmac_send(struct cnmac_softc *, struct mbuf *);
155 
156 int	cnmac_reset(struct cnmac_softc *);
157 int	cnmac_configure(struct cnmac_softc *);
158 int	cnmac_configure_common(struct cnmac_softc *);
159 
160 void	cnmac_free_task(void *);
161 void	cnmac_tick_free(void *arg);
162 void	cnmac_tick_misc(void *);
163 
164 int	cnmac_recv_mbuf(struct cnmac_softc *,
165 	    uint64_t *, struct mbuf **, int *);
166 int	cnmac_recv_check(struct cnmac_softc *, uint64_t);
167 int	cnmac_recv(struct cnmac_softc *, uint64_t *, struct mbuf_list *);
168 int	cnmac_intr(void *);
169 
170 int	cnmac_mbuf_alloc(int);
171 
172 #if NKSTAT > 0
173 void	cnmac_kstat_attach(struct cnmac_softc *);
174 int	cnmac_kstat_read(struct kstat *);
175 void	cnmac_kstat_tick(struct cnmac_softc *);
176 #endif
177 
178 /* device parameters */
179 int	cnmac_param_pko_cmd_w0_n2 = 1;
180 
181 const struct cfattach cnmac_ca = {
182 	sizeof(struct cnmac_softc), cnmac_match, cnmac_attach
183 };
184 
185 struct cfdriver cnmac_cd = { NULL, "cnmac", DV_IFNET };
186 
187 /* ---- buffer management */
188 
189 const struct cnmac_pool_param {
190 	int			poolno;
191 	size_t			size;
192 	size_t			nelems;
193 } cnmac_pool_params[] = {
194 #define	_ENTRY(x)	{ OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x }
195 	_ENTRY(WQE),
196 	_ENTRY(CMD),
197 	_ENTRY(SG)
198 #undef	_ENTRY
199 };
200 struct cn30xxfpa_buf	*cnmac_pools[8];
201 #define	cnmac_fb_wqe	cnmac_pools[OCTEON_POOL_NO_WQE]
202 #define	cnmac_fb_cmd	cnmac_pools[OCTEON_POOL_NO_CMD]
203 #define	cnmac_fb_sg	cnmac_pools[OCTEON_POOL_NO_SG]
204 
205 uint64_t cnmac_mac_addr = 0;
206 uint32_t cnmac_mac_addr_offset = 0;
207 
208 int	cnmac_mbufs_to_alloc;
209 int	cnmac_npowgroups = 0;
210 
211 void
212 cnmac_buf_init(struct cnmac_softc *sc)
213 {
214 	static int once;
215 	int i;
216 	const struct cnmac_pool_param *pp;
217 	struct cn30xxfpa_buf *fb;
218 
219 	if (once == 1)
220 		return;
221 	once = 1;
222 
223 	for (i = 0; i < (int)nitems(cnmac_pool_params); i++) {
224 		pp = &cnmac_pool_params[i];
225 		cn30xxfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb);
226 		cnmac_pools[pp->poolno] = fb;
227 	}
228 }
229 
230 /* ---- autoconf */
231 
232 int
233 cnmac_match(struct device *parent, void *match, void *aux)
234 {
235 	struct cfdata *cf = (struct cfdata *)match;
236 	struct cn30xxgmx_attach_args *ga = aux;
237 
238 	if (strcmp(cf->cf_driver->cd_name, ga->ga_name) != 0) {
239 		return 0;
240 	}
241 	return 1;
242 }
243 
244 void
245 cnmac_attach(struct device *parent, struct device *self, void *aux)
246 {
247 	struct cnmac_softc *sc = (void *)self;
248 	struct cn30xxgmx_attach_args *ga = aux;
249 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
250 	uint8_t enaddr[ETHER_ADDR_LEN];
251 
252 	if (cnmac_npowgroups >= OCTEON_POW_GROUP_MAX) {
253 		printf(": out of POW groups\n");
254 		return;
255 	}
256 
257 	atomic_add_int(&cnmac_mbufs_to_alloc,
258 	    cnmac_mbuf_alloc(CNMAC_MBUFS_PER_PORT));
259 
260 	sc->sc_regt = ga->ga_regt;
261 	sc->sc_dmat = ga->ga_dmat;
262 	sc->sc_port = ga->ga_portno;
263 	sc->sc_port_type = ga->ga_port_type;
264 	sc->sc_gmx = ga->ga_gmx;
265 	sc->sc_gmx_port = ga->ga_gmx_port;
266 	sc->sc_smi = ga->ga_smi;
267 	sc->sc_phy_addr = ga->ga_phy_addr;
268 	sc->sc_powgroup = cnmac_npowgroups++;
269 
270 	sc->sc_init_flag = 0;
271 
272 	/*
273 	 * XXX
274 	 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why???
275 	 */
276 	sc->sc_ip_offset = 0/* XXX */;
277 
278 	cnmac_board_mac_addr(enaddr);
279 	printf(", address %s\n", ether_sprintf(enaddr));
280 
281 	ml_init(&sc->sc_sendq);
282 	sc->sc_soft_req_thresh = 15/* XXX */;
283 	sc->sc_ext_callback_cnt = 0;
284 
285 	task_set(&sc->sc_free_task, cnmac_free_task, sc);
286 	timeout_set(&sc->sc_tick_misc_ch, cnmac_tick_misc, sc);
287 	timeout_set(&sc->sc_tick_free_ch, cnmac_tick_free, sc);
288 
289 	cn30xxfau_op_init(&sc->sc_fau_done,
290 	    OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_dev.dv_unit, csm_ether_fau_done),
291 	    OCT_FAU_REG_ADDR_END - (8 * (sc->sc_dev.dv_unit + 1))/* XXX */);
292 	cn30xxfau_op_set_8(&sc->sc_fau_done, 0);
293 
294 	cnmac_pip_init(sc);
295 	cnmac_ipd_init(sc);
296 	cnmac_pko_init(sc);
297 
298 	cnmac_configure_common(sc);
299 
300 	sc->sc_gmx_port->sc_ipd = sc->sc_ipd;
301 	sc->sc_gmx_port->sc_port_mii = &sc->sc_mii;
302 	sc->sc_gmx_port->sc_port_ac = &sc->sc_arpcom;
303 
304 	/* XXX */
305 	sc->sc_pow = &cn30xxpow_softc;
306 
307 	cnmac_mediainit(sc);
308 
309 	strncpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
310 	ifp->if_softc = sc;
311 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
312 	ifp->if_xflags = IFXF_MPSAFE;
313 	ifp->if_ioctl = cnmac_ioctl;
314 	ifp->if_qstart = cnmac_start;
315 	ifp->if_watchdog = cnmac_watchdog;
316 	ifp->if_hardmtu = CNMAC_MAX_MTU;
317 	ifq_set_maxlen(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN));
318 
319 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 |
320 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
321 
322 	cn30xxgmx_set_mac_addr(sc->sc_gmx_port, enaddr);
323 	cn30xxgmx_set_filter(sc->sc_gmx_port);
324 
325 	if_attach(ifp);
326 
327 	memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
328 	ether_ifattach(ifp);
329 
330 	cnmac_buf_init(sc);
331 
332 #if NKSTAT > 0
333 	cnmac_kstat_attach(sc);
334 #endif
335 
336 	sc->sc_ih = octeon_intr_establish(POW_WORKQ_IRQ(sc->sc_powgroup),
337 	    IPL_NET | IPL_MPSAFE, cnmac_intr, sc, sc->sc_dev.dv_xname);
338 	if (sc->sc_ih == NULL)
339 		panic("%s: could not set up interrupt", sc->sc_dev.dv_xname);
340 }
341 
342 /* ---- submodules */
343 
344 void
345 cnmac_pip_init(struct cnmac_softc *sc)
346 {
347 	struct cn30xxpip_attach_args pip_aa;
348 
349 	pip_aa.aa_port = sc->sc_port;
350 	pip_aa.aa_regt = sc->sc_regt;
351 	pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */;
352 	pip_aa.aa_receive_group = sc->sc_powgroup;
353 	pip_aa.aa_ip_offset = sc->sc_ip_offset;
354 	cn30xxpip_init(&pip_aa, &sc->sc_pip);
355 	cn30xxpip_port_config(sc->sc_pip);
356 }
357 
358 void
359 cnmac_ipd_init(struct cnmac_softc *sc)
360 {
361 	struct cn30xxipd_attach_args ipd_aa;
362 
363 	ipd_aa.aa_port = sc->sc_port;
364 	ipd_aa.aa_regt = sc->sc_regt;
365 	ipd_aa.aa_first_mbuff_skip = 0/* XXX */;
366 	ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */;
367 	cn30xxipd_init(&ipd_aa, &sc->sc_ipd);
368 }
369 
370 void
371 cnmac_pko_init(struct cnmac_softc *sc)
372 {
373 	struct cn30xxpko_attach_args pko_aa;
374 
375 	pko_aa.aa_port = sc->sc_port;
376 	pko_aa.aa_regt = sc->sc_regt;
377 	pko_aa.aa_cmdptr = &sc->sc_cmdptr;
378 	pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD;
379 	pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD;
380 	cn30xxpko_init(&pko_aa, &sc->sc_pko);
381 }
382 
383 /* ---- XXX */
384 
385 void
386 cnmac_board_mac_addr(uint8_t *enaddr)
387 {
388 	int id;
389 
390 	/* Initialize MAC addresses from the global address base. */
391 	if (cnmac_mac_addr == 0) {
392 		memcpy((uint8_t *)&cnmac_mac_addr + 2,
393 		    octeon_boot_info->mac_addr_base, 6);
394 
395 		/*
396 		 * Should be allowed to fail hard if couldn't read the
397 		 * mac_addr_base address...
398 		 */
399 		if (cnmac_mac_addr == 0)
400 			return;
401 
402 		/*
403 		 * Calculate the offset from the mac_addr_base that will be used
404 		 * for the next sc->sc_port.
405 		 */
406 		id = octeon_get_chipid();
407 
408 		switch (octeon_model_family(id)) {
409 		case OCTEON_MODEL_FAMILY_CN56XX:
410 			cnmac_mac_addr_offset = 1;
411 			break;
412 		/*
413 		case OCTEON_MODEL_FAMILY_CN52XX:
414 		case OCTEON_MODEL_FAMILY_CN63XX:
415 			cnmac_mac_addr_offset = 2;
416 			break;
417 		*/
418 		default:
419 			cnmac_mac_addr_offset = 0;
420 			break;
421 		}
422 
423 		enaddr += cnmac_mac_addr_offset;
424 	}
425 
426 	/* No more MAC addresses to assign. */
427 	if (cnmac_mac_addr_offset >= octeon_boot_info->mac_addr_count)
428 		return;
429 
430 	if (enaddr)
431 		memcpy(enaddr, (uint8_t *)&cnmac_mac_addr + 2, 6);
432 
433 	cnmac_mac_addr++;
434 	cnmac_mac_addr_offset++;
435 }
436 
437 /* ---- media */
438 
439 int
440 cnmac_mii_readreg(struct device *self, int phy_no, int reg)
441 {
442 	struct cnmac_softc *sc = (struct cnmac_softc *)self;
443 	return cn30xxsmi_read(sc->sc_smi, phy_no, reg);
444 }
445 
446 void
447 cnmac_mii_writereg(struct device *self, int phy_no, int reg, int value)
448 {
449 	struct cnmac_softc *sc = (struct cnmac_softc *)self;
450 	cn30xxsmi_write(sc->sc_smi, phy_no, reg, value);
451 }
452 
453 void
454 cnmac_mii_statchg(struct device *self)
455 {
456 	struct cnmac_softc *sc = (struct cnmac_softc *)self;
457 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
458 
459 	cn30xxpko_port_enable(sc->sc_pko, 0);
460 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
461 
462 	cnmac_reset(sc);
463 
464 	if (ISSET(ifp->if_flags, IFF_RUNNING))
465 		cn30xxgmx_set_filter(sc->sc_gmx_port);
466 
467 	cn30xxpko_port_enable(sc->sc_pko, 1);
468 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
469 }
470 
471 int
472 cnmac_mediainit(struct cnmac_softc *sc)
473 {
474 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
475 	struct mii_softc *child;
476 
477 	sc->sc_mii.mii_ifp = ifp;
478 	sc->sc_mii.mii_readreg = cnmac_mii_readreg;
479 	sc->sc_mii.mii_writereg = cnmac_mii_writereg;
480 	sc->sc_mii.mii_statchg = cnmac_mii_statchg;
481 	ifmedia_init(&sc->sc_mii.mii_media, 0, cnmac_mediachange,
482 	    cnmac_mediastatus);
483 
484 	mii_attach(&sc->sc_dev, &sc->sc_mii,
485 	    0xffffffff, sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE);
486 
487 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
488 	if (child == NULL) {
489                 /* No PHY attached. */
490 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
491 			    0, NULL);
492 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
493 	} else {
494 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
495 	}
496 
497 	return 0;
498 }
499 
500 void
501 cnmac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
502 {
503 	struct cnmac_softc *sc = ifp->if_softc;
504 
505 	mii_pollstat(&sc->sc_mii);
506 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
507 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
508 	ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
509 	    sc->sc_gmx_port->sc_port_flowflags;
510 }
511 
512 int
513 cnmac_mediachange(struct ifnet *ifp)
514 {
515 	struct cnmac_softc *sc = ifp->if_softc;
516 
517 	if ((ifp->if_flags & IFF_UP) == 0)
518 		return 0;
519 
520 	return mii_mediachg(&sc->sc_mii);
521 }
522 
523 /* ---- send buffer garbage collection */
524 
525 void
526 cnmac_send_queue_flush_prefetch(struct cnmac_softc *sc)
527 {
528 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
529 	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0);
530 	sc->sc_prefetch = 1;
531 }
532 
533 void
534 cnmac_send_queue_flush_fetch(struct cnmac_softc *sc)
535 {
536 #ifndef  OCTEON_ETH_DEBUG
537 	if (!sc->sc_prefetch)
538 		return;
539 #endif
540 	OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
541 	sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
542 	OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
543 	sc->sc_prefetch = 0;
544 }
545 
546 void
547 cnmac_send_queue_flush(struct cnmac_softc *sc)
548 {
549 	const int64_t sent_count = sc->sc_hard_done_cnt;
550 	int i;
551 
552 	OCTEON_ETH_KASSERT(sent_count <= 0);
553 
554 	for (i = 0; i < 0 - sent_count; i++) {
555 		struct mbuf *m;
556 		uint64_t *gbuf;
557 
558 		cnmac_send_queue_del(sc, &m, &gbuf);
559 
560 		cn30xxfpa_buf_put_paddr(cnmac_fb_sg, XKPHYS_TO_PHYS(gbuf));
561 
562 		m_freem(m);
563 	}
564 
565 	cn30xxfau_op_add_8(&sc->sc_fau_done, i);
566 }
567 
568 int
569 cnmac_send_queue_is_full(struct cnmac_softc *sc)
570 {
571 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK
572 	int64_t nofree_cnt;
573 
574 	nofree_cnt = ml_len(&sc->sc_sendq) + sc->sc_hard_done_cnt;
575 
576 	if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
577 		cnmac_send_queue_flush(sc);
578 		return 1;
579 	}
580 
581 #endif
582 	return 0;
583 }
584 
585 void
586 cnmac_send_queue_add(struct cnmac_softc *sc, struct mbuf *m,
587     uint64_t *gbuf)
588 {
589 	OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR);
590 
591 	m->m_pkthdr.ph_cookie = gbuf;
592 	ml_enqueue(&sc->sc_sendq, m);
593 
594 	if (m->m_ext.ext_free_fn != 0)
595 		sc->sc_ext_callback_cnt++;
596 }
597 
598 void
599 cnmac_send_queue_del(struct cnmac_softc *sc, struct mbuf **rm,
600     uint64_t **rgbuf)
601 {
602 	struct mbuf *m;
603 	m = ml_dequeue(&sc->sc_sendq);
604 	OCTEON_ETH_KASSERT(m != NULL);
605 
606 	*rm = m;
607 	*rgbuf = m->m_pkthdr.ph_cookie;
608 
609 	if (m->m_ext.ext_free_fn != 0) {
610 		sc->sc_ext_callback_cnt--;
611 		OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
612 	}
613 }
614 
615 int
616 cnmac_buf_free_work(struct cnmac_softc *sc, uint64_t *work)
617 {
618 	paddr_t addr, pktbuf;
619 	uint64_t word3;
620 	unsigned int back, nbufs;
621 
622 	nbufs = (work[2] & PIP_WQE_WORD2_IP_BUFS) >>
623 	    PIP_WQE_WORD2_IP_BUFS_SHIFT;
624 	word3 = work[3];
625 	while (nbufs-- > 0) {
626 		addr = word3 & PIP_WQE_WORD3_ADDR;
627 		back = (word3 & PIP_WQE_WORD3_BACK) >>
628 		    PIP_WQE_WORD3_BACK_SHIFT;
629 		pktbuf = (addr & ~(CACHELINESIZE - 1)) - back * CACHELINESIZE;
630 
631 		cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT,
632 		    OCTEON_POOL_SIZE_PKT / CACHELINESIZE);
633 
634 		if (nbufs > 0)
635 			memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr -
636 			    sizeof(word3), CCA_CACHED), sizeof(word3));
637 	}
638 
639 	cn30xxfpa_buf_put_paddr(cnmac_fb_wqe, XKPHYS_TO_PHYS(work));
640 
641 	return 0;
642 }
643 
644 /* ---- ifnet interfaces */
645 
646 int
647 cnmac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
648 {
649 	struct cnmac_softc *sc = ifp->if_softc;
650 	struct ifreq *ifr = (struct ifreq *)data;
651 	int s, error = 0;
652 
653 	s = splnet();
654 
655 	switch (cmd) {
656 	case SIOCSIFADDR:
657 		ifp->if_flags |= IFF_UP;
658 		if (!(ifp->if_flags & IFF_RUNNING))
659 			cnmac_init(ifp);
660 		break;
661 
662 	case SIOCSIFFLAGS:
663 		if (ifp->if_flags & IFF_UP) {
664 			if (ifp->if_flags & IFF_RUNNING)
665 				error = ENETRESET;
666 			else
667 				cnmac_init(ifp);
668 		} else {
669 			if (ifp->if_flags & IFF_RUNNING)
670 				cnmac_stop(ifp, 0);
671 		}
672 		break;
673 
674 	case SIOCSIFMEDIA:
675 		/* Flow control requires full-duplex mode. */
676 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
677 		    (ifr->ifr_media & IFM_FDX) == 0) {
678 			ifr->ifr_media &= ~IFM_ETH_FMASK;
679 		}
680 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
681 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
682 				ifr->ifr_media |=
683 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
684 			}
685 			sc->sc_gmx_port->sc_port_flowflags =
686 				ifr->ifr_media & IFM_ETH_FMASK;
687 		}
688 		/* FALLTHROUGH */
689 	case SIOCGIFMEDIA:
690 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
691 		break;
692 
693 	default:
694 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
695 	}
696 
697 	if (error == ENETRESET) {
698 		if (ISSET(ifp->if_flags, IFF_RUNNING))
699 			cn30xxgmx_set_filter(sc->sc_gmx_port);
700 		error = 0;
701 	}
702 
703 	splx(s);
704 	return (error);
705 }
706 
707 /* ---- send (output) */
708 
709 uint64_t
710 cnmac_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs,
711     int ipoffp1)
712 {
713 	return cn30xxpko_cmd_word0(
714 		OCT_FAU_OP_SIZE_64,		/* sz1 */
715 		OCT_FAU_OP_SIZE_64,		/* sz0 */
716 		1, fau1, 1, fau0,		/* s1, reg1, s0, reg0 */
717 		0,				/* le */
718 		cnmac_param_pko_cmd_w0_n2,	/* n2 */
719 		1, 0,				/* q, r */
720 		(segs == 1) ? 0 : 1,		/* g */
721 		ipoffp1, 0, 1,			/* ipoffp1, ii, df */
722 		segs, (int)len);		/* segs, totalbytes */
723 }
724 
725 uint64_t
726 cnmac_send_makecmd_w1(int size, paddr_t addr)
727 {
728 	return cn30xxpko_cmd_word1(
729 		0, 0,				/* i, back */
730 		OCTEON_POOL_NO_SG,		/* pool */
731 		size, addr);			/* size, addr */
732 }
733 
734 #define KVTOPHYS(addr)	cnmac_kvtophys((vaddr_t)(addr))
735 
736 static inline paddr_t
737 cnmac_kvtophys(vaddr_t kva)
738 {
739 	KASSERT(IS_XKPHYS(kva));
740 	return XKPHYS_TO_PHYS(kva);
741 }
742 
743 int
744 cnmac_send_makecmd_gbuf(struct cnmac_softc *sc, struct mbuf *m0,
745     uint64_t *gbuf, int *rsegs)
746 {
747 	struct mbuf *m;
748 	int segs = 0;
749 
750 	for (m = m0; m != NULL; m = m->m_next) {
751 		if (__predict_false(m->m_len == 0))
752 			continue;
753 
754 		if (segs >= OCTEON_POOL_SIZE_SG / sizeof(uint64_t))
755 			goto defrag;
756 		gbuf[segs] = cnmac_send_makecmd_w1(m->m_len,
757 		    KVTOPHYS(m->m_data));
758 		segs++;
759 	}
760 
761 	*rsegs = segs;
762 
763 	return 0;
764 
765 defrag:
766 	if (m_defrag(m0, M_DONTWAIT) != 0)
767 		return 1;
768 	gbuf[0] = cnmac_send_makecmd_w1(m0->m_len, KVTOPHYS(m0->m_data));
769 	*rsegs = 1;
770 	return 0;
771 }
772 
773 int
774 cnmac_send_makecmd(struct cnmac_softc *sc, struct mbuf *m,
775     uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1)
776 {
777 	uint64_t pko_cmd_w0, pko_cmd_w1;
778 	int ipoffp1;
779 	int segs;
780 	int result = 0;
781 
782 	if (cnmac_send_makecmd_gbuf(sc, m, gbuf, &segs)) {
783 		log(LOG_WARNING, "%s: large number of transmission"
784 		    " data segments", sc->sc_dev.dv_xname);
785 		result = 1;
786 		goto done;
787 	}
788 
789 	/* Get the IP packet offset for TCP/UDP checksum offloading. */
790 	ipoffp1 = (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
791 	    ? (ETHER_HDR_LEN + 1) : 0;
792 
793 	/*
794 	 * segs == 1	-> link mode (single continuous buffer)
795 	 *		   WORD1[size] is number of bytes pointed by segment
796 	 *
797 	 * segs > 1	-> gather mode (scatter-gather buffer)
798 	 *		   WORD1[size] is number of segments
799 	 */
800 	pko_cmd_w0 = cnmac_send_makecmd_w0(sc->sc_fau_done.fd_regno,
801 	    0, m->m_pkthdr.len, segs, ipoffp1);
802 	pko_cmd_w1 = cnmac_send_makecmd_w1(
803 	    (segs == 1) ? m->m_pkthdr.len : segs,
804 	    (segs == 1) ?
805 		KVTOPHYS(m->m_data) :
806 		XKPHYS_TO_PHYS(gbuf));
807 
808 	*rpko_cmd_w0 = pko_cmd_w0;
809 	*rpko_cmd_w1 = pko_cmd_w1;
810 
811 done:
812 	return result;
813 }
814 
815 int
816 cnmac_send_cmd(struct cnmac_softc *sc, uint64_t pko_cmd_w0,
817     uint64_t pko_cmd_w1)
818 {
819 	uint64_t *cmdptr;
820 	int result = 0;
821 
822 	cmdptr = (uint64_t *)PHYS_TO_XKPHYS(sc->sc_cmdptr.cmdptr, CCA_CACHED);
823 	cmdptr += sc->sc_cmdptr.cmdptr_idx;
824 
825 	OCTEON_ETH_KASSERT(cmdptr != NULL);
826 
827 	*cmdptr++ = pko_cmd_w0;
828 	*cmdptr++ = pko_cmd_w1;
829 
830 	OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);
831 
832 	if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
833 		paddr_t buf;
834 
835 		buf = cn30xxfpa_buf_get_paddr(cnmac_fb_cmd);
836 		if (buf == 0) {
837 			log(LOG_WARNING,
838 			    "%s: cannot allocate command buffer from free pool allocator\n",
839 			    sc->sc_dev.dv_xname);
840 			result = 1;
841 			goto done;
842 		}
843 		*cmdptr++ = buf;
844 		sc->sc_cmdptr.cmdptr = (uint64_t)buf;
845 		sc->sc_cmdptr.cmdptr_idx = 0;
846 	} else {
847 		sc->sc_cmdptr.cmdptr_idx += 2;
848 	}
849 
850 	cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);
851 
852 done:
853 	return result;
854 }
855 
856 int
857 cnmac_send_buf(struct cnmac_softc *sc, struct mbuf *m, uint64_t *gbuf)
858 {
859 	int result = 0, error;
860 	uint64_t pko_cmd_w0, pko_cmd_w1;
861 
862 	error = cnmac_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1);
863 	if (error != 0) {
864 		/* already logging */
865 		result = error;
866 		goto done;
867 	}
868 
869 	error = cnmac_send_cmd(sc, pko_cmd_w0, pko_cmd_w1);
870 	if (error != 0) {
871 		/* already logging */
872 		result = error;
873 	}
874 
875 done:
876 	return result;
877 }
878 
879 int
880 cnmac_send(struct cnmac_softc *sc, struct mbuf *m)
881 {
882 	paddr_t gaddr = 0;
883 	uint64_t *gbuf = NULL;
884 	int result = 0, error;
885 
886 	gaddr = cn30xxfpa_buf_get_paddr(cnmac_fb_sg);
887 	if (gaddr == 0) {
888 		log(LOG_WARNING,
889 		    "%s: cannot allocate gather buffer from free pool allocator\n",
890 		    sc->sc_dev.dv_xname);
891 		result = 1;
892 		goto done;
893 	}
894 
895 	gbuf = (uint64_t *)(uintptr_t)PHYS_TO_XKPHYS(gaddr, CCA_CACHED);
896 
897 	error = cnmac_send_buf(sc, m, gbuf);
898 	if (error != 0) {
899 		/* already logging */
900 		cn30xxfpa_buf_put_paddr(cnmac_fb_sg, gaddr);
901 		result = error;
902 		goto done;
903 	}
904 
905 	cnmac_send_queue_add(sc, m, gbuf);
906 
907 done:
908 	return result;
909 }
910 
911 void
912 cnmac_start(struct ifqueue *ifq)
913 {
914 	struct ifnet *ifp = ifq->ifq_if;
915 	struct cnmac_softc *sc = ifp->if_softc;
916 	struct mbuf *m;
917 
918 	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) {
919 		ifq_purge(ifq);
920 		return;
921 	}
922 
923 	/*
924 	 * performance tuning
925 	 * presend iobdma request
926 	 */
927 	cnmac_send_queue_flush_prefetch(sc);
928 
929 	for (;;) {
930 		cnmac_send_queue_flush_fetch(sc); /* XXX */
931 
932 		/*
933 		 * XXXSEIL
934 		 * If no free send buffer is available, free all the sent buffer
935 		 * and bail out.
936 		 */
937 		if (cnmac_send_queue_is_full(sc)) {
938 			ifq_set_oactive(ifq);
939 			timeout_add(&sc->sc_tick_free_ch, 1);
940 			return;
941 		}
942 
943 		m = ifq_dequeue(ifq);
944 		if (m == NULL)
945 			return;
946 
947 #if NBPFILTER > 0
948 		if (ifp->if_bpf != NULL)
949 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
950 #endif
951 
952 		/* XXX */
953 		if (ml_len(&sc->sc_sendq) > sc->sc_soft_req_thresh)
954 			cnmac_send_queue_flush(sc);
955 		if (cnmac_send(sc, m)) {
956 			ifp->if_oerrors++;
957 			m_freem(m);
958 			log(LOG_WARNING,
959 		  	  "%s: failed to transmit packet\n",
960 		    	  sc->sc_dev.dv_xname);
961 		}
962 		/* XXX */
963 
964 		/*
965 		 * send next iobdma request
966 		 */
967 		cnmac_send_queue_flush_prefetch(sc);
968 	}
969 
970 	cnmac_send_queue_flush_fetch(sc);
971 }
972 
973 void
974 cnmac_watchdog(struct ifnet *ifp)
975 {
976 	struct cnmac_softc *sc = ifp->if_softc;
977 
978 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
979 
980 	cnmac_stop(ifp, 0);
981 
982 	cnmac_configure(sc);
983 
984 	SET(ifp->if_flags, IFF_RUNNING);
985 	ifp->if_timer = 0;
986 
987 	ifq_restart(&ifp->if_snd);
988 }
989 
990 int
991 cnmac_init(struct ifnet *ifp)
992 {
993 	struct cnmac_softc *sc = ifp->if_softc;
994 
995 	/* XXX don't disable commonly used parts!!! XXX */
996 	if (sc->sc_init_flag == 0) {
997 		/* Cancel any pending I/O. */
998 		cnmac_stop(ifp, 0);
999 
1000 		/* Initialize the device */
1001 		cnmac_configure(sc);
1002 
1003 		cn30xxpko_enable(sc->sc_pko);
1004 		cn30xxipd_enable(sc->sc_ipd);
1005 
1006 		sc->sc_init_flag = 1;
1007 	} else {
1008 		cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1009 	}
1010 	cnmac_mediachange(ifp);
1011 
1012 	cn30xxpip_stats_init(sc->sc_pip);
1013 	cn30xxgmx_stats_init(sc->sc_gmx_port);
1014 	cn30xxgmx_set_mac_addr(sc->sc_gmx_port, sc->sc_arpcom.ac_enaddr);
1015 	cn30xxgmx_set_filter(sc->sc_gmx_port);
1016 
1017 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1018 	timeout_add_sec(&sc->sc_tick_free_ch, 1);
1019 
1020 	SET(ifp->if_flags, IFF_RUNNING);
1021 	ifq_clr_oactive(&ifp->if_snd);
1022 
1023 	return 0;
1024 }
1025 
1026 int
1027 cnmac_stop(struct ifnet *ifp, int disable)
1028 {
1029 	struct cnmac_softc *sc = ifp->if_softc;
1030 
1031 	CLR(ifp->if_flags, IFF_RUNNING);
1032 
1033 	timeout_del(&sc->sc_tick_misc_ch);
1034 	timeout_del(&sc->sc_tick_free_ch);
1035 
1036 	mii_down(&sc->sc_mii);
1037 
1038 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1039 
1040 	intr_barrier(sc->sc_ih);
1041 	ifq_barrier(&ifp->if_snd);
1042 
1043 	ifq_clr_oactive(&ifp->if_snd);
1044 	ifp->if_timer = 0;
1045 
1046 	return 0;
1047 }
1048 
1049 /* ---- misc */
1050 
1051 #define PKO_INDEX_MASK	((1ULL << 12/* XXX */) - 1)
1052 
1053 int
1054 cnmac_reset(struct cnmac_softc *sc)
1055 {
1056 	cn30xxgmx_reset_speed(sc->sc_gmx_port);
1057 	cn30xxgmx_reset_flowctl(sc->sc_gmx_port);
1058 	cn30xxgmx_reset_timing(sc->sc_gmx_port);
1059 
1060 	return 0;
1061 }
1062 
1063 int
1064 cnmac_configure(struct cnmac_softc *sc)
1065 {
1066 	cn30xxgmx_port_enable(sc->sc_gmx_port, 0);
1067 
1068 	cnmac_reset(sc);
1069 
1070 	cn30xxpko_port_config(sc->sc_pko);
1071 	cn30xxpko_port_enable(sc->sc_pko, 1);
1072 	cn30xxpow_config(sc->sc_pow, sc->sc_powgroup);
1073 
1074 	cn30xxgmx_port_enable(sc->sc_gmx_port, 1);
1075 
1076 	return 0;
1077 }
1078 
1079 int
1080 cnmac_configure_common(struct cnmac_softc *sc)
1081 {
1082 	static int once;
1083 
1084 	uint64_t reg;
1085 
1086 	if (once == 1)
1087 		return 0;
1088 	once = 1;
1089 
1090 	cn30xxipd_config(sc->sc_ipd);
1091 	cn30xxpko_config(sc->sc_pko);
1092 
1093 	/* Set padding for packets that Octeon does not recognize as IP. */
1094 	reg = octeon_xkphys_read_8(PIP_GBL_CFG);
1095 	reg &= ~PIP_GBL_CFG_NIP_SHF_MASK;
1096 	reg |= ETHER_ALIGN << PIP_GBL_CFG_NIP_SHF_SHIFT;
1097 	octeon_xkphys_write_8(PIP_GBL_CFG, reg);
1098 
1099 	return 0;
1100 }
1101 
1102 int
1103 cnmac_mbuf_alloc(int n)
1104 {
1105 	struct mbuf *m;
1106 	paddr_t pktbuf;
1107 
1108 	while (n > 0) {
1109 		m = MCLGETI(NULL, M_NOWAIT, NULL,
1110 		    OCTEON_POOL_SIZE_PKT + CACHELINESIZE);
1111 		if (m == NULL || !ISSET(m->m_flags, M_EXT)) {
1112 			m_freem(m);
1113 			break;
1114 		}
1115 
1116 		m->m_data = (void *)(((vaddr_t)m->m_data + CACHELINESIZE) &
1117 		    ~(CACHELINESIZE - 1));
1118 		((struct mbuf **)m->m_data)[-1] = m;
1119 
1120 		pktbuf = KVTOPHYS(m->m_data);
1121 		m->m_pkthdr.ph_cookie = (void *)pktbuf;
1122 		cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT,
1123 		    OCTEON_POOL_SIZE_PKT / CACHELINESIZE);
1124 
1125 		n--;
1126 	}
1127 	return n;
1128 }
1129 
1130 int
1131 cnmac_recv_mbuf(struct cnmac_softc *sc, uint64_t *work,
1132     struct mbuf **rm, int *nmbuf)
1133 {
1134 	struct mbuf *m, *m0, *mprev, **pm;
1135 	paddr_t addr, pktbuf;
1136 	uint64_t word1 = work[1];
1137 	uint64_t word2 = work[2];
1138 	uint64_t word3 = work[3];
1139 	unsigned int back, i, nbufs;
1140 	unsigned int left, total, size;
1141 
1142 	cn30xxfpa_buf_put_paddr(cnmac_fb_wqe, XKPHYS_TO_PHYS(work));
1143 
1144 	nbufs = (word2 & PIP_WQE_WORD2_IP_BUFS) >> PIP_WQE_WORD2_IP_BUFS_SHIFT;
1145 	if (nbufs == 0)
1146 		panic("%s: dynamic short packet", __func__);
1147 
1148 	m0 = mprev = NULL;
1149 	total = left = (word1 & PIP_WQE_WORD1_LEN) >> 48;
1150 	for (i = 0; i < nbufs; i++) {
1151 		addr = word3 & PIP_WQE_WORD3_ADDR;
1152 		back = (word3 & PIP_WQE_WORD3_BACK) >> PIP_WQE_WORD3_BACK_SHIFT;
1153 		pktbuf = (addr & ~(CACHELINESIZE - 1)) - back * CACHELINESIZE;
1154 		pm = (struct mbuf **)PHYS_TO_XKPHYS(pktbuf, CCA_CACHED) - 1;
1155 		m = *pm;
1156 		*pm = NULL;
1157 		if ((paddr_t)m->m_pkthdr.ph_cookie != pktbuf)
1158 			panic("%s: packet pool is corrupted, mbuf cookie %p != "
1159 			    "pktbuf %p", __func__, m->m_pkthdr.ph_cookie,
1160 			    (void *)pktbuf);
1161 
1162 		/*
1163 		 * Because of a hardware bug in some Octeon models the size
1164 		 * field of word3 can be wrong (erratum PKI-100).
1165 		 * However, the hardware uses all space in a buffer before
1166 		 * moving to the next one so it is possible to derive
1167 		 * the size of this data segment from the size
1168 		 * of packet data buffers.
1169 		 */
1170 		size = OCTEON_POOL_SIZE_PKT - (addr - pktbuf);
1171 		if (size > left)
1172 			size = left;
1173 
1174 		m->m_pkthdr.ph_cookie = NULL;
1175 		m->m_data += addr - pktbuf;
1176 		m->m_len = size;
1177 		left -= size;
1178 
1179 		if (m0 == NULL)
1180 			m0 = m;
1181 		else {
1182 			m->m_flags &= ~M_PKTHDR;
1183 			mprev->m_next = m;
1184 		}
1185 		mprev = m;
1186 
1187 		if (i + 1 < nbufs)
1188 			memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr -
1189 			    sizeof(word3), CCA_CACHED), sizeof(word3));
1190 	}
1191 
1192 	m0->m_pkthdr.len = total;
1193 	*rm = m0;
1194 	*nmbuf = nbufs;
1195 
1196 	return 0;
1197 }
1198 
1199 int
1200 cnmac_recv_check(struct cnmac_softc *sc, uint64_t word2)
1201 {
1202 	static struct timeval rxerr_log_interval = { 0, 250000 };
1203 	uint64_t opecode;
1204 
1205 	if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE)))
1206 		return 0;
1207 
1208 	opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE;
1209 	if ((sc->sc_arpcom.ac_if.if_flags & IFF_DEBUG) &&
1210 	    ratecheck(&sc->sc_rxerr_log_last, &rxerr_log_interval))
1211 		log(LOG_DEBUG, "%s: rx error (%lld)\n", sc->sc_dev.dv_xname,
1212 		    opecode);
1213 
1214 	/* XXX harmless error? */
1215 	if (opecode == PIP_WQE_WORD2_RE_OPCODE_OVRRUN)
1216 		return 0;
1217 
1218 	return 1;
1219 }
1220 
1221 int
1222 cnmac_recv(struct cnmac_softc *sc, uint64_t *work, struct mbuf_list *ml)
1223 {
1224 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1225 	struct mbuf *m;
1226 	uint64_t word2;
1227 	int nmbuf = 0;
1228 
1229 	word2 = work[2];
1230 
1231 	if (!(ifp->if_flags & IFF_RUNNING))
1232 		goto drop;
1233 
1234 	if (__predict_false(cnmac_recv_check(sc, word2) != 0)) {
1235 		ifp->if_ierrors++;
1236 		goto drop;
1237 	}
1238 
1239 	/* On success, this releases the work queue entry. */
1240 	if (__predict_false(cnmac_recv_mbuf(sc, work, &m, &nmbuf) != 0)) {
1241 		ifp->if_ierrors++;
1242 		goto drop;
1243 	}
1244 
1245 	m->m_pkthdr.csum_flags = 0;
1246 	if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_IP_NI))) {
1247 		/* Check IP checksum status. */
1248 		if (!ISSET(word2, PIP_WQE_WORD2_IP_V6) &&
1249 		    !ISSET(word2, PIP_WQE_WORD2_IP_IE))
1250 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1251 
1252 		/* Check TCP/UDP checksum status. */
1253 		if (ISSET(word2, PIP_WQE_WORD2_IP_TU) &&
1254 		    !ISSET(word2, PIP_WQE_WORD2_IP_FR) &&
1255 		    !ISSET(word2, PIP_WQE_WORD2_IP_LE))
1256 			m->m_pkthdr.csum_flags |=
1257 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1258 	}
1259 
1260 	ml_enqueue(ml, m);
1261 
1262 	return nmbuf;
1263 
1264 drop:
1265 	cnmac_buf_free_work(sc, work);
1266 	return 0;
1267 }
1268 
1269 int
1270 cnmac_intr(void *arg)
1271 {
1272 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1273 	struct cnmac_softc *sc = arg;
1274 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1275 	uint64_t *work;
1276 	uint64_t wqmask = 1ull << sc->sc_powgroup;
1277 	uint32_t coreid = octeon_get_coreid();
1278 	uint32_t port;
1279 	int nmbuf = 0;
1280 
1281 	_POW_WR8(sc->sc_pow, POW_PP_GRP_MSK_OFFSET(coreid), wqmask);
1282 
1283 	cn30xxpow_tag_sw_wait();
1284 	cn30xxpow_work_request_async(OCTEON_CVMSEG_OFFSET(csm_pow_intr),
1285 	    POW_NO_WAIT);
1286 
1287 	for (;;) {
1288 		work = (uint64_t *)cn30xxpow_work_response_async(
1289 		    OCTEON_CVMSEG_OFFSET(csm_pow_intr));
1290 		if (work == NULL)
1291 			break;
1292 
1293 		cn30xxpow_tag_sw_wait();
1294 		cn30xxpow_work_request_async(
1295 		    OCTEON_CVMSEG_OFFSET(csm_pow_intr), POW_NO_WAIT);
1296 
1297 		port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;
1298 		if (port != sc->sc_port) {
1299 			printf("%s: unexpected wqe port %u, should be %u\n",
1300 			    sc->sc_dev.dv_xname, port, sc->sc_port);
1301 			goto wqe_error;
1302 		}
1303 
1304 		nmbuf += cnmac_recv(sc, work, &ml);
1305 	}
1306 
1307 	_POW_WR8(sc->sc_pow, POW_WQ_INT_OFFSET, wqmask);
1308 
1309 	if_input(ifp, &ml);
1310 
1311 	nmbuf = cnmac_mbuf_alloc(nmbuf);
1312 	if (nmbuf != 0)
1313 		atomic_add_int(&cnmac_mbufs_to_alloc, nmbuf);
1314 
1315 	return 1;
1316 
1317 wqe_error:
1318 	printf("word0: 0x%016llx\n", work[0]);
1319 	printf("word1: 0x%016llx\n", work[1]);
1320 	printf("word2: 0x%016llx\n", work[2]);
1321 	printf("word3: 0x%016llx\n", work[3]);
1322 	panic("wqe error");
1323 }
1324 
1325 /* ---- tick */
1326 
1327 void
1328 cnmac_free_task(void *arg)
1329 {
1330 	struct cnmac_softc *sc = arg;
1331 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1332 	struct ifqueue *ifq = &ifp->if_snd;
1333 	int resched = 1;
1334 	int timeout;
1335 
1336 	if (ml_len(&sc->sc_sendq) > 0) {
1337 		cnmac_send_queue_flush_prefetch(sc);
1338 		cnmac_send_queue_flush_fetch(sc);
1339 		cnmac_send_queue_flush(sc);
1340 	}
1341 
1342 	if (ifq_is_oactive(ifq)) {
1343 		ifq_clr_oactive(ifq);
1344 		cnmac_start(ifq);
1345 
1346 		if (ifq_is_oactive(ifq)) {
1347 			/* The start routine did rescheduling already. */
1348 			resched = 0;
1349 		}
1350 	}
1351 
1352 	if (resched) {
1353 		timeout = (sc->sc_ext_callback_cnt > 0) ? 1 : hz;
1354 		timeout_add(&sc->sc_tick_free_ch, timeout);
1355 	}
1356 }
1357 
1358 /*
1359  * cnmac_tick_free
1360  *
1361  * => garbage collect send gather buffer / mbuf
1362  * => called at softclock
1363  */
1364 void
1365 cnmac_tick_free(void *arg)
1366 {
1367 	struct cnmac_softc *sc = arg;
1368 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1369 	int to_alloc;
1370 
1371 	ifq_serialize(&ifp->if_snd, &sc->sc_free_task);
1372 
1373 	if (cnmac_mbufs_to_alloc != 0) {
1374 		to_alloc = atomic_swap_uint(&cnmac_mbufs_to_alloc, 0);
1375 		to_alloc = cnmac_mbuf_alloc(to_alloc);
1376 		if (to_alloc != 0)
1377 			atomic_add_int(&cnmac_mbufs_to_alloc, to_alloc);
1378 	}
1379 }
1380 
1381 /*
1382  * cnmac_tick_misc
1383  *
1384  * => collect statistics
1385  * => check link status
1386  * => called at softclock
1387  */
1388 void
1389 cnmac_tick_misc(void *arg)
1390 {
1391 	struct cnmac_softc *sc = arg;
1392 	int s;
1393 
1394 	s = splnet();
1395 	mii_tick(&sc->sc_mii);
1396 	splx(s);
1397 
1398 #if NKSTAT > 0
1399 	cnmac_kstat_tick(sc);
1400 #endif
1401 
1402 	timeout_add_sec(&sc->sc_tick_misc_ch, 1);
1403 }
1404 
1405 #if NKSTAT > 0
1406 #define KVE(n, t) \
1407 	KSTAT_KV_UNIT_INITIALIZER((n), KSTAT_KV_T_COUNTER64, (t))
1408 
1409 static const struct kstat_kv cnmac_kstat_tpl[cnmac_stat_count] = {
1410 	[cnmac_stat_rx_toto_gmx]= KVE("rx total gmx",	KSTAT_KV_U_BYTES),
1411 	[cnmac_stat_rx_totp_gmx]= KVE("rx total gmx",	KSTAT_KV_U_PACKETS),
1412 	[cnmac_stat_rx_toto_pip]= KVE("rx total pip",	KSTAT_KV_U_BYTES),
1413 	[cnmac_stat_rx_totp_pip]= KVE("rx total pip",	KSTAT_KV_U_PACKETS),
1414 	[cnmac_stat_rx_h64]	= KVE("rx 64B",		KSTAT_KV_U_PACKETS),
1415 	[cnmac_stat_rx_h127]	= KVE("rx 65-127B",	KSTAT_KV_U_PACKETS),
1416 	[cnmac_stat_rx_h255]	= KVE("rx 128-255B",	KSTAT_KV_U_PACKETS),
1417 	[cnmac_stat_rx_h511]	= KVE("rx 256-511B",	KSTAT_KV_U_PACKETS),
1418 	[cnmac_stat_rx_h1023]	= KVE("rx 512-1023B",	KSTAT_KV_U_PACKETS),
1419 	[cnmac_stat_rx_h1518]	= KVE("rx 1024-1518B",	KSTAT_KV_U_PACKETS),
1420 	[cnmac_stat_rx_hmax]	= KVE("rx 1519-maxB",	KSTAT_KV_U_PACKETS),
1421 	[cnmac_stat_rx_bcast]	= KVE("rx bcast",	KSTAT_KV_U_PACKETS),
1422 	[cnmac_stat_rx_mcast]	= KVE("rx mcast",	KSTAT_KV_U_PACKETS),
1423 	[cnmac_stat_rx_qdpo]	= KVE("rx qos drop",	KSTAT_KV_U_BYTES),
1424 	[cnmac_stat_rx_qdpp]	= KVE("rx qos drop",	KSTAT_KV_U_PACKETS),
1425 	[cnmac_stat_rx_fcs]	= KVE("rx fcs err",	KSTAT_KV_U_PACKETS),
1426 	[cnmac_stat_rx_frag]	= KVE("rx fcs undersize",KSTAT_KV_U_PACKETS),
1427 	[cnmac_stat_rx_undersz]	= KVE("rx undersize",	KSTAT_KV_U_PACKETS),
1428 	[cnmac_stat_rx_jabber]	= KVE("rx jabber",	KSTAT_KV_U_PACKETS),
1429 	[cnmac_stat_rx_oversz]	= KVE("rx oversize",	KSTAT_KV_U_PACKETS),
1430 	[cnmac_stat_rx_raw]	= KVE("rx raw",		KSTAT_KV_U_PACKETS),
1431 	[cnmac_stat_rx_bad]	= KVE("rx bad",		KSTAT_KV_U_PACKETS),
1432 	[cnmac_stat_rx_drop]	= KVE("rx drop",	KSTAT_KV_U_PACKETS),
1433 	[cnmac_stat_rx_ctl]	= KVE("rx control",	KSTAT_KV_U_PACKETS),
1434 	[cnmac_stat_rx_dmac]	= KVE("rx dmac",	KSTAT_KV_U_PACKETS),
1435 	[cnmac_stat_tx_toto]	= KVE("tx total",	KSTAT_KV_U_BYTES),
1436 	[cnmac_stat_tx_totp]	= KVE("tx total",	KSTAT_KV_U_PACKETS),
1437 	[cnmac_stat_tx_hmin]	= KVE("tx min-63B",	KSTAT_KV_U_PACKETS),
1438 	[cnmac_stat_tx_h64]	= KVE("tx 64B",		KSTAT_KV_U_PACKETS),
1439 	[cnmac_stat_tx_h127]	= KVE("tx 65-127B",	KSTAT_KV_U_PACKETS),
1440 	[cnmac_stat_tx_h255]	= KVE("tx 128-255B",	KSTAT_KV_U_PACKETS),
1441 	[cnmac_stat_tx_h511]	= KVE("tx 256-511B",	KSTAT_KV_U_PACKETS),
1442 	[cnmac_stat_tx_h1023]	= KVE("tx 512-1023B",	KSTAT_KV_U_PACKETS),
1443 	[cnmac_stat_tx_h1518]	= KVE("tx 1024-1518B",	KSTAT_KV_U_PACKETS),
1444 	[cnmac_stat_tx_hmax]	= KVE("tx 1519-maxB",	KSTAT_KV_U_PACKETS),
1445 	[cnmac_stat_tx_bcast]	= KVE("tx bcast",	KSTAT_KV_U_PACKETS),
1446 	[cnmac_stat_tx_mcast]	= KVE("tx mcast",	KSTAT_KV_U_PACKETS),
1447 	[cnmac_stat_tx_coll]	= KVE("tx coll",	KSTAT_KV_U_PACKETS),
1448 	[cnmac_stat_tx_defer]	= KVE("tx defer",	KSTAT_KV_U_PACKETS),
1449 	[cnmac_stat_tx_scol]	= KVE("tx scoll",	KSTAT_KV_U_PACKETS),
1450 	[cnmac_stat_tx_mcol]	= KVE("tx mcoll",	KSTAT_KV_U_PACKETS),
1451 	[cnmac_stat_tx_ctl]	= KVE("tx control",	KSTAT_KV_U_PACKETS),
1452 	[cnmac_stat_tx_uflow]	= KVE("tx underflow",	KSTAT_KV_U_PACKETS),
1453 };
1454 
1455 void
1456 cnmac_kstat_attach(struct cnmac_softc *sc)
1457 {
1458 	struct kstat *ks;
1459 	struct kstat_kv *kvs;
1460 
1461 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
1462 
1463 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "cnmac-stats", 0,
1464 	    KSTAT_T_KV, 0);
1465 	if (ks == NULL)
1466 		return;
1467 
1468 	kvs = malloc(sizeof(cnmac_kstat_tpl), M_DEVBUF, M_WAITOK | M_ZERO);
1469 	memcpy(kvs, cnmac_kstat_tpl, sizeof(cnmac_kstat_tpl));
1470 
1471 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1472 	ks->ks_softc = sc;
1473 	ks->ks_data = kvs;
1474 	ks->ks_datalen = sizeof(cnmac_kstat_tpl);
1475 	ks->ks_read = cnmac_kstat_read;
1476 
1477 	sc->sc_kstat = ks;
1478 	kstat_install(ks);
1479 }
1480 
1481 int
1482 cnmac_kstat_read(struct kstat *ks)
1483 {
1484 	struct cnmac_softc *sc = ks->ks_softc;
1485 	struct kstat_kv *kvs = ks->ks_data;
1486 
1487 	cn30xxpip_kstat_read(sc->sc_pip, kvs);
1488 	cn30xxgmx_kstat_read(sc->sc_gmx_port, kvs);
1489 
1490 	getnanouptime(&ks->ks_updated);
1491 
1492 	return 0;
1493 }
1494 
1495 void
1496 cnmac_kstat_tick(struct cnmac_softc *sc)
1497 {
1498 	if (sc->sc_kstat == NULL)
1499 		return;
1500 	if (!mtx_enter_try(&sc->sc_kstat_mtx))
1501 		return;
1502 	cnmac_kstat_read(sc->sc_kstat);
1503 	mtx_leave(&sc->sc_kstat_mtx);
1504 }
1505 #endif
1506