xref: /netbsd-src/sys/dev/marvell/if_mvxpevar.h (revision e82c4d9bb4590b58ee29619e6e0ddde42d18907b)
1 /*	$NetBSD: if_mvxpevar.h,v 1.9 2022/04/04 19:33:45 andvar Exp $	*/
2 /*
3  * Copyright (c) 2015 Internet Initiative Japan Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 #ifndef _IF_MVXPEVAR_H_
28 #define _IF_MVXPEVAR_H_
29 #include <net/if.h>
30 #include <dev/marvell/mvxpbmvar.h>
31 
32 /*
33  * Limit of packet sizes.
34  */
35 #define MVXPE_HWHEADER_SIZE	2		/* Marvell Header */
36 #define MVXPE_MRU		2000		/* Max Receive Unit */
37 #define MVXPE_MTU		MVXPE_MRU	/* Max Transmit Unit */
38 
39 /*
40  * Default limit of queue length
41  *
42  * queue 0 is lowest priority and queue 7 is highest priority.
43  *
44  * XXX: packet classifier is not implement yet
45  */
46 #define MVXPE_RX_QUEUE_LIMIT_0	IFQ_MAXLEN
47 #define MVXPE_RX_QUEUE_LIMIT_1	8
48 #define MVXPE_RX_QUEUE_LIMIT_2	8
49 #define MVXPE_RX_QUEUE_LIMIT_3	8
50 #define MVXPE_RX_QUEUE_LIMIT_4	8
51 #define MVXPE_RX_QUEUE_LIMIT_5	8
52 #define MVXPE_RX_QUEUE_LIMIT_6	8
53 #define MVXPE_RX_QUEUE_LIMIT_7	8
54 
55 #define MVXPE_TX_QUEUE_LIMIT_0	IFQ_MAXLEN
56 #define MVXPE_TX_QUEUE_LIMIT_1	8
57 #define MVXPE_TX_QUEUE_LIMIT_2	8
58 #define MVXPE_TX_QUEUE_LIMIT_3	8
59 #define MVXPE_TX_QUEUE_LIMIT_4	8
60 #define MVXPE_TX_QUEUE_LIMIT_5	8
61 #define MVXPE_TX_QUEUE_LIMIT_6	8
62 #define MVXPE_TX_QUEUE_LIMIT_7	8
63 
64 /* interrupt is triggered when corossing (queuelen / RATIO) */
65 #define MVXPE_RXTH_RATIO	8
66 #define MVXPE_RXTH_REFILL_RATIO	2
67 #define MVXPE_TXTH_RATIO	8
68 
69 /*
70  * Device Register access
71  */
72 #define MVXPE_READ(sc, reg) \
73 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
74 #define MVXPE_WRITE(sc, reg, val) \
75 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
76 
77 #define MVXPE_READ_REGION(sc, reg, val, c) \
78 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
79 #define MVXPE_WRITE_REGION(sc, reg, val, c) \
80 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
81 
82 #define MVXPE_READ_MIB(sc, reg) \
83 	bus_space_read_4((sc)->sc_iot, (sc)->sc_mibh, (reg))
84 
85 #define MVXPE_IS_LINKUP(sc) \
86 	(MVXPE_READ((sc), MVXPE_PSR) & MVXPE_PSR_LINKUP)
87 
88 #define MVXPE_IS_QUEUE_BUSY(queues, q) \
89 	((((queues) >> (q)) & 0x1))
90 
91 /*
92  * EEE: Lower Power Idle config
93  * Default timer is duration of MTU sized frame transmission.
94  * The timer can be negotiated by LLDP protocol, but we have no
95  * support.
96  */
97 #define MVXPE_LPI_TS		(MVXPE_MRU * 8 / 1000) /* [us] */
98 #define MVXPE_LPI_TW		(MVXPE_MRU * 8 / 1000) /* [us] */
99 #define MVXPE_LPI_LI		(MVXPE_MRU * 8 / 1000) /* [us] */
100 
101 /*
102  * DMA Descriptor
103  *
104  * the ethernet device has 8 rx/tx DMA queues. each of queue has its own
105  * descriptor list. descriptors are simply index by counter inside the device.
106  */
107 #define MVXPE_TX_RING_CNT	IFQ_MAXLEN
108 #define MVXPE_TX_RING_MSK	(MVXPE_TX_RING_CNT - 1)
109 #define MVXPE_TX_RING_NEXT(x)	(((x) + 1) & MVXPE_TX_RING_MSK)
110 #define MVXPE_RX_RING_CNT	IFQ_MAXLEN
111 #define MVXPE_RX_RING_MSK	(MVXPE_RX_RING_CNT - 1)
112 #define MVXPE_RX_RING_NEXT(x)	(((x) + 1) & MVXPE_RX_RING_MSK)
113 #define MVXPE_TX_SEGLIMIT	32
114 
115 struct mvxpe_rx_ring {
116 	/* Real descriptors array. shared by RxDMA */
117 	struct mvxpe_rx_desc		*rx_descriptors;
118 	bus_dmamap_t			rx_descriptors_map;
119 
120 	/* Management entries for each of descriptors */
121 	struct mvxpe_rx_handle {
122 		struct mvxpe_rx_desc	*rxdesc_va;
123 		off_t			rxdesc_off; /* from rx_descriptors[0] */
124 		struct mvxpbm_chunk	*chunk;
125 	} rx_handle[MVXPE_RX_RING_CNT];
126 
127 	/* locks */
128 	kmutex_t			rx_ring_mtx;
129 
130 	/* Index */
131 	int				rx_dma;
132 	int				rx_cpu;
133 
134 	/* Limit */
135 	int				rx_queue_len;
136 	int				rx_queue_th_received;
137 	int				rx_queue_th_free;
138 	int				rx_queue_th_time; /* [Tclk] */
139 };
140 
141 struct mvxpe_tx_ring {
142 	/* Real descriptors array. shared by TxDMA */
143 	struct mvxpe_tx_desc		*tx_descriptors;
144 	bus_dmamap_t			tx_descriptors_map;
145 
146 	/* Management entries for each of descriptors */
147 	struct mvxpe_tx_handle {
148 		struct mvxpe_tx_desc	*txdesc_va;
149 		off_t			txdesc_off; /* from tx_descriptors[0] */
150 		struct mbuf		*txdesc_mbuf;
151 		bus_dmamap_t		txdesc_mbuf_map;
152 	} tx_handle[MVXPE_TX_RING_CNT];
153 
154 	/* locks */
155 	kmutex_t			tx_ring_mtx;
156 
157 	/* Index */
158 	int				tx_used;
159 	int				tx_dma;
160 	int				tx_cpu;
161 
162 	/* Limit */
163 	int				tx_queue_len;
164 	int				tx_queue_th_free;
165 };
166 
167 static __inline int
tx_counter_adv(int ctr,int n)168 tx_counter_adv(int ctr, int n)
169 {
170 	/* XXX: lock or atomic */
171 	ctr += n;
172 	while (ctr >= MVXPE_TX_RING_CNT)
173 		ctr -= MVXPE_TX_RING_CNT;
174 
175 	return ctr;
176 }
177 
178 static __inline int
rx_counter_adv(int ctr,int n)179 rx_counter_adv(int ctr, int n)
180 {
181 	/* XXX: lock or atomic */
182 	ctr += n;
183 	while (ctr >= MVXPE_TX_RING_CNT)
184 		ctr -= MVXPE_TX_RING_CNT;
185 
186 	return ctr;
187 }
188 
189 /*
190  * Timeout control
191  */
192 #define MVXPE_PHY_TIMEOUT	10000	/* msec */
193 #define RX_DISABLE_TIMEOUT	0x1000000 /* times */
194 #define TX_DISABLE_TIMEOUT	0x1000000 /* times */
195 #define TX_FIFO_EMPTY_TIMEOUT	0x1000000 /* times */
196 
197 /*
198  * Event counter
199  */
200 #ifdef MVXPE_EVENT_COUNTERS
201 #define	MVXPE_EVCNT_INCR(ev)		(ev)->ev_count++
202 #define	MVXPE_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
203 #else
204 #define	MVXPE_EVCNT_INCR(ev)		/* nothing */
205 #define	MVXPE_EVCNT_ADD(ev, val)	/* nothing */
206 #endif
207 struct mvxpe_evcnt {
208 	/*
209 	 * Master Interrupt Handler
210 	 */
211 	struct evcnt ev_i_rxtxth;
212 	struct evcnt ev_i_rxtx;
213 	struct evcnt ev_i_misc;
214 
215 	/*
216 	 * RXTXTH Interrupt
217 	 */
218 	struct evcnt ev_rxtxth_txerr;
219 
220 	/*
221 	 * MISC Interrupt
222 	 */
223 	struct evcnt ev_misc_phystatuschng;
224 	struct evcnt ev_misc_linkchange;
225 	struct evcnt ev_misc_iae;
226 	struct evcnt ev_misc_rxoverrun;
227 	struct evcnt ev_misc_rxcrc;
228 	struct evcnt ev_misc_rxlargepacket;
229 	struct evcnt ev_misc_txunderrun;
230 	struct evcnt ev_misc_prbserr;
231 	struct evcnt ev_misc_srse;
232 	struct evcnt ev_misc_txreq;
233 
234 	/*
235 	 * RxTx Interrupt
236 	 */
237 	struct evcnt ev_rxtx_rreq;
238 	struct evcnt ev_rxtx_rpq;
239 	struct evcnt ev_rxtx_tbrq;
240 	struct evcnt ev_rxtx_rxtxth;
241 	struct evcnt ev_rxtx_txerr;
242 	struct evcnt ev_rxtx_misc;
243 
244 	/*
245 	 * Link
246 	 */
247 	struct evcnt ev_link_up;
248 	struct evcnt ev_link_down;
249 
250 	/*
251 	 * Rx Descriptor
252 	 */
253 	struct evcnt ev_rxd_ce;
254 	struct evcnt ev_rxd_or;
255 	struct evcnt ev_rxd_mf;
256 	struct evcnt ev_rxd_re;
257 	struct evcnt ev_rxd_scat;
258 
259 	/*
260 	 * Tx Descriptor
261 	 */
262 	struct evcnt ev_txd_lc;
263 	struct evcnt ev_txd_ur;
264 	struct evcnt ev_txd_rl;
265 	struct evcnt ev_txd_oth;
266 
267 	/*
268 	 * Status Registers
269 	 */
270 	struct evcnt ev_reg_pdfc;	/* Rx Port Discard Frame Counter */
271 	struct evcnt ev_reg_pofc;	/* Rx Port Overrun Frame Counter */
272 	struct evcnt ev_reg_txbadfcs;	/* Tx BAD FCS Counter */
273 	struct evcnt ev_reg_txdropped;	/* Tx Dropped Counter */
274 	struct evcnt ev_reg_lpic;
275 
276 
277 	/* Device Driver Errors */
278 	struct evcnt ev_drv_wdogsoft;
279 	struct evcnt ev_drv_txerr;
280 	struct evcnt ev_drv_rxq[MVXPE_QUEUE_SIZE];
281 	struct evcnt ev_drv_rxqe[MVXPE_QUEUE_SIZE];
282 	struct evcnt ev_drv_txq[MVXPE_QUEUE_SIZE];
283 	struct evcnt ev_drv_txqe[MVXPE_QUEUE_SIZE];
284 };
285 
286 /*
287  * Debug
288  */
289 #ifdef MVXPE_DEBUG
290 #define DPRINTF(fmt, ...) \
291 	do { \
292 		if (mvxpe_debug >= 1) { \
293 			printf("%s: ", __func__); \
294 			printf((fmt), ##__VA_ARGS__); \
295 		} \
296 	} while (/*CONSTCOND*/0)
297 #define DPRINTFN(level , fmt, ...) \
298 	do { \
299 		if (mvxpe_debug >= (level)) { \
300 			printf("%s: ", __func__); \
301 			printf((fmt), ##__VA_ARGS__); \
302 		} \
303 	} while (/*CONSTCOND*/0)
304 #define DPRINTDEV(dev, level, fmt, ...) \
305 	do { \
306 		if (mvxpe_debug >= (level)) { \
307 			device_printf((dev), \
308 			    "%s: "fmt , __func__, ##__VA_ARGS__); \
309 		} \
310 	} while (/*CONSTCOND*/0)
311 #define DPRINTSC(sc, level, fmt, ...) \
312 	do { \
313 		device_t dev = (sc)->sc_dev; \
314 		if (mvxpe_debug >= (level)) { \
315 			device_printf(dev, \
316 			    "%s: " fmt, __func__, ##__VA_ARGS__); \
317 		} \
318 	} while (/*CONSTCOND*/0)
319 #define DPRINTIFNET(ifp, level, fmt, ...) \
320 	do { \
321 		const char *xname = (ifp)->if_xname; \
322 		if (mvxpe_debug >= (level)) { \
323 			printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
324 		} \
325 	} while (/*CONSTCOND*/0)
326 #define DPRINTIFNET(ifp, level, fmt, ...) \
327 	do { \
328 		const char *xname = (ifp)->if_xname; \
329 		if (mvxpe_debug >= (level)) { \
330 			printf("%s: %s: " fmt, xname, __func__, ##__VA_ARGS__);\
331 		} \
332 	} while (/*CONSTCOND*/0)
333 #define DPRINTPRXS(level, q) \
334 	do { \
335 		uint32_t _reg = MVXPE_READ(sc, MVXPE_PRXS(q)); \
336 		if (mvxpe_debug >= (level)) { \
337 		   printf("PRXS(queue %d) %#x: Occupied %d, NoOccupied %d.\n", \
338 		    q, _reg, MVXPE_PRXS_GET_ODC(_reg), \
339 		    MVXPE_PRXS_GET_NODC(_reg)); \
340 		} \
341 	} while (/*CONSTCOND*/0)
342 #else
343 #define DPRINTF(fmt, ...)
344 #define DPRINTFN(level, fmt, ...)
345 #define DPRINTDEV(dev, level, fmt, ...)
346 #define DPRINTSC(sc, level, fmt, ...)
347 #define DPRINTIFNET(ifp, level, fmt, ...)
348 #define DPRINTPRXS(level, reg)
349 #endif
350 
351 #define KASSERT_SC_MTX(sc) \
352     KASSERT(mutex_owned(&(sc)->sc_mtx))
353 #define KASSERT_BM_MTX(sc) \
354     KASSERT(mutex_owned(&(sc)->sc_bm.bm_mtx))
355 #define KASSERT_RX_MTX(sc, q) \
356     KASSERT(mutex_owned(&(sc)->sc_rx_ring[(q)].rx_ring_mtx))
357 #define KASSERT_TX_MTX(sc, q) \
358     KASSERT(mutex_owned(&(sc)->sc_tx_ring[(q)].tx_ring_mtx))
359 
360 /*
361  * Configuration parameters
362  */
363 struct mvxpe_conf {
364 	int cf_lpi;		/* EEE Low Power IDLE enable */
365 	int cf_fc;		/* Flow Control enable */
366 };
367 
368 /*
369  * sysctl(9) parameters
370  */
371 struct mvxpe_softc;
372 struct mvxpe_sysctl_queue {
373 	struct mvxpe_softc	*sc;
374 	int			rxtx;
375 	int			queue;
376 };
377 #define MVXPE_SYSCTL_RX		0
378 #define MVXPE_SYSCTL_TX		1
379 
380 struct mvxpe_sysctl_mib {
381 	struct mvxpe_softc	*sc;
382 	int			index;
383 	uint64_t		counter;
384 };
385 
386 /*
387  * Ethernet Device main context
388  */
389 struct mvxpe_softc {
390 	device_t sc_dev;
391 	int sc_port;
392 	uint32_t sc_version;
393 
394 	/*
395 	 * sc_mtx must be held by interface functions to/from
396 	 * other frameworks. interrupt handler, sysctl handler,
397 	 * ioctl handler, and so on.
398 	 */
399 	kmutex_t sc_mtx;
400 
401 	/*
402 	 * Ethernet facilities
403 	 */
404 	struct ethercom sc_ethercom;
405 	struct mii_data sc_mii;
406 	u_int8_t sc_enaddr[ETHER_ADDR_LEN];	/* station addr */
407 	u_short sc_if_flags;
408 	int sc_wdogsoft;
409 
410 	/*
411 	 * Configuration Parameters
412 	 */
413 	struct mvxpe_conf sc_cf;
414 
415 	/*
416 	 * I/O Spaces
417 	 */
418 	bus_space_tag_t sc_iot;
419 	bus_space_handle_t sc_ioh;	/* all registers handle */
420 	bus_space_handle_t sc_mibh;	/* mib counter handle */
421 
422 	/*
423 	 * DMA Spaces
424 	 */
425 	bus_dma_tag_t sc_dmat;
426 	struct mvxpe_rx_ring		sc_rx_ring[MVXPE_QUEUE_SIZE];
427 	struct mvxpe_tx_ring		sc_tx_ring[MVXPE_QUEUE_SIZE];
428 	int sc_tx_pending;		/* total number of tx pkt */
429 
430 	/*
431 	 * Software Buffer Manager
432 	 */
433 	struct mvxpbm_softc *sc_bm;
434 
435 	/*
436 	 * Maintenance clock
437 	 */
438 	callout_t sc_tick_ch;		/* tick callout */
439 
440 	/*
441 	 * Link State control
442 	 */
443 	uint32_t sc_linkstate;
444 
445 	/*
446 	 * Act as Rndom source
447 	 */
448 	krndsource_t sc_rnd_source;
449 
450 	/*
451 	 * Sysctl interfaces
452 	 */
453 	struct sysctllog *sc_mvxpe_clog;
454 	struct mvxpe_sysctl_queue sc_sysctl_rx_queue[MVXPE_QUEUE_SIZE];
455 	struct mvxpe_sysctl_queue sc_sysctl_tx_queue[MVXPE_QUEUE_SIZE];
456 
457 	/*
458 	 * MIB counter
459 	 */
460 	size_t sc_sysctl_mib_size;
461 	struct mvxpe_sysctl_mib *sc_sysctl_mib;
462 
463 #ifdef MVXPE_EVENT_COUNTERS
464 	/*
465 	 * Event counter
466 	 */
467 	struct mvxpe_evcnt sc_ev;
468 #endif
469 };
470 #define MVXPE_RX_RING_MEM_VA(sc, q) \
471     ((sc)->sc_rx_ring[(q)].rx_descriptors)
472 #define MVXPE_RX_RING_MEM_PA(sc, q) \
473     ((sc)->sc_rx_ring[(q)].rx_descriptors_map->dm_segs[0].ds_addr)
474 #define MVXPE_RX_RING_MEM_MAP(sc, q) \
475     ((sc)->sc_rx_ring[(q)].rx_descriptors_map)
476 #define MVXPE_RX_RING(sc, q) \
477     (&(sc)->sc_rx_ring[(q)])
478 #define MVXPE_RX_HANDLE(sc, q, i) \
479     (&(sc)->sc_rx_ring[(q)].rx_handle[(i)])
480 #define MVXPE_RX_DESC(sc, q, i) \
481     ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_va)
482 #define MVXPE_RX_DESC_OFF(sc, q, i) \
483     ((sc)->sc_rx_ring[(q)].rx_handle[(i)].rxdesc_off)
484 #define MVXPE_RX_PKTBUF(sc, q, i) \
485     ((sc)->sc_rx_ring[(q)].rx_handle[(i)].chunk)
486 
487 #define MVXPE_TX_RING_MEM_VA(sc, q) \
488     ((sc)->sc_tx_ring[(q)].tx_descriptors)
489 #define MVXPE_TX_RING_MEM_PA(sc, q) \
490     ((sc)->sc_tx_ring[(q)].tx_descriptors_map->dm_segs[0].ds_addr)
491 #define MVXPE_TX_RING_MEM_MAP(sc, q) \
492     ((sc)->sc_tx_ring[(q)].tx_descriptors_map)
493 #define MVXPE_TX_RING(sc, q) \
494     (&(sc)->sc_tx_ring[(q)])
495 #define MVXPE_TX_HANDLE(sc, q, i) \
496     (&(sc)->sc_tx_ring[(q)].tx_handle[(i)])
497 #define MVXPE_TX_DESC(sc, q, i) \
498     ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_va)
499 #define MVXPE_TX_DESC_OFF(sc, q, i) \
500     ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_off)
501 #define MVXPE_TX_MBUF(sc, q, i) \
502     ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf)
503 #define MVXPE_TX_MAP(sc, q, i) \
504     ((sc)->sc_tx_ring[(q)].tx_handle[(i)].txdesc_mbuf_map)
505 
506 #endif /* _IF_MVXPEVAR_H_ */
507