xref: /netbsd-src/sys/dev/pci/if_bgevar.h (revision a210e9ba5e939122a9ade883fca4b59eebe36e4c)
1 /*	$NetBSD: if_bgevar.h,v 1.42 2024/08/28 05:58:11 skrll Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2001
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_bgereg.h,v 1.1.2.7 2002/11/02 18:17:55 mp Exp $
35  */
36 
37 /*
38  * BCM570x memory map. The internal memory layout varies somewhat
39  * depending on whether or not we have external SSRAM attached.
40  * The BCM5700 can have up to 16MB of external memory. The BCM5701
41  * is apparently not designed to use external SSRAM. The mappings
42  * up to the first 4 send rings are the same for both internal and
43  * external memory configurations. Note that mini RX ring space is
44  * only available with external SSRAM configurations, which means
45  * the mini RX ring is not supported on the BCM5701.
46  *
47  * The NIC's memory can be accessed by the host in one of 3 ways:
48  *
49  * 1) Indirect register access. The MEMWIN_BASEADDR and MEMWIN_DATA
50  *    registers in PCI config space can be used to read any 32-bit
51  *    address within the NIC's memory.
52  *
53  * 2) Memory window access. The MEMWIN_BASEADDR register in PCI config
54  *    space can be used in conjunction with the memory window in the
55  *    device register space at offset 0x8000 to read any 32K chunk
56  *    of NIC memory.
57  *
58  * 3) Flat mode. If the 'flat mode' bit in the PCI state register is
59  *    set, the device I/O mapping consumes 32MB of host address space,
60  *    allowing all of the registers and internal NIC memory to be
61  *    accessed directly. NIC memory addresses are offset by 0x01000000.
62  *    Flat mode consumes so much host address space that it is not
63  *    recommended.
64  */
65 
66 #ifndef _DEV_PCI_IF_BGEVAR_H_
67 #define _DEV_PCI_IF_BGEVAR_H_
68 
69 #include <sys/bus.h>
70 #include <sys/rndsource.h>
71 #include <sys/time.h>
72 
73 #include <net/if_ether.h>
74 
75 #include <dev/pci/pcivar.h>
76 
77 #define BGE_HOSTADDR(x, y)						      \
78 	do {								      \
79 		(x).bge_addr_lo = BUS_ADDR_LO32(y);			      \
80 		if (sizeof (bus_addr_t) == 8)				      \
81 			(x).bge_addr_hi = BUS_ADDR_HI32(y);		      \
82 		else							      \
83 			(x).bge_addr_hi = 0;				      \
84 	} while(0)
85 
86 #define RCB_WRITE_4(sc, rcb, offset, val)				      \
87 	bus_space_write_4(sc->bge_btag, sc->bge_bhandle,		      \
88 			  rcb + offsetof(struct bge_rcb, offset), val)
89 
90 /*
91  * Other utility macros.
92  */
93 #define BGE_INC(x, y)	(x) = (x + 1) % y
94 
95 /*
96  * Register access macros. The Tigon always uses memory mapped register
97  * accesses and all registers must be accessed with 32 bit operations.
98  */
99 
100 #define CSR_WRITE_4(sc, reg, val)					      \
101 	bus_space_write_4(sc->bge_btag, sc->bge_bhandle, reg, val)
102 
103 #define CSR_READ_4(sc, reg)						      \
104 	bus_space_read_4(sc->bge_btag, sc->bge_bhandle, reg)
105 
106 #define CSR_WRITE_4_FLUSH(sc, reg, val)					      \
107 	do {								      \
108 		CSR_WRITE_4(sc, reg, val);				      \
109 		CSR_READ_4(sc, reg);					      \
110 	} while (0)
111 
112 #define BGE_SETBIT(sc, reg, x)						      \
113 	CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) | (x)))
114 #define BGE_SETBIT_FLUSH(sc, reg, x)					      \
115 	do {								      \
116 		BGE_SETBIT(sc, reg, x);					      \
117 		CSR_READ_4(sc, reg);					      \
118 	} while (0)
119 #define BGE_CLRBIT(sc, reg, x)						      \
120 	CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) & ~(x)))
121 #define BGE_CLRBIT_FLUSH(sc, reg, x)					      \
122 	do {								      \
123 		BGE_CLRBIT(sc, reg, x);					      \
124 		CSR_READ_4(sc, reg);					      \
125 	} while (0)
126 
127 /* BAR2 APE register access macros. */
128 #define	APE_WRITE_4(sc, reg, val)					      \
129 	bus_space_write_4(sc->bge_apetag, sc->bge_apehandle, reg, val)
130 
131 #define	APE_READ_4(sc, reg)						      \
132 	bus_space_read_4(sc->bge_apetag, sc->bge_apehandle, reg)
133 
134 #define	APE_WRITE_4_FLUSH(sc, reg, val)					      \
135 	do {								      \
136 		APE_WRITE_4(sc, reg, val);				      \
137 		APE_READ_4(sc, reg);					      \
138 	} while (0)
139 
140 #define	APE_SETBIT(sc, reg, x)						      \
141 	APE_WRITE_4(sc, reg, (APE_READ_4(sc, reg) | (x)))
142 #define	APE_CLRBIT(sc, reg, x)						      \
143 	APE_WRITE_4(sc, reg, (APE_READ_4(sc, reg) & ~(x)))
144 
145 #define PCI_SETBIT(pc, tag, reg, x)					      \
146 	pci_conf_write(pc, tag, reg, (pci_conf_read(pc, tag, reg) | (x)))
147 #define PCI_CLRBIT(pc, tag, reg, x)					      \
148 	pci_conf_write(pc, tag, reg, (pci_conf_read(pc, tag, reg) & ~(x)))
149 
150 /*
151  * Memory management stuff. Note: the SSLOTS, MSLOTS and JSLOTS
152  * values are tuneable. They control the actual amount of buffers
153  * allocated for the standard, mini and jumbo receive rings.
154  */
155 
156 #define BGE_SSLOTS	256
157 #define BGE_MSLOTS	256
158 #define BGE_JSLOTS	384
159 
160 #define BGE_JRAWLEN	(BGE_JUMBO_FRAMELEN + ETHER_ALIGN)
161 #define BGE_JLEN	(BGE_JRAWLEN + (sizeof(uint64_t) - 		      \
162 			    (BGE_JRAWLEN % sizeof(uint64_t))))
163 #define BGE_JPAGESZ 	PAGE_SIZE
164 #define BGE_RESID 	(BGE_JPAGESZ - (BGE_JLEN * BGE_JSLOTS) % BGE_JPAGESZ)
165 #define BGE_JMEM 	((BGE_JLEN * BGE_JSLOTS) + BGE_RESID)
166 
167 /*
168  * Ring structures. Most of these reside in host memory and we tell
169  * the NIC where they are via the ring control blocks. The exceptions
170  * are the tx and command rings, which live in NIC memory and which
171  * we access via the shared memory window.
172  */
173 struct bge_ring_data {
174 	struct bge_rx_bd	bge_rx_std_ring[BGE_STD_RX_RING_CNT];
175 	struct bge_rx_bd	bge_rx_jumbo_ring[BGE_JUMBO_RX_RING_CNT];
176 	struct bge_rx_bd	bge_rx_return_ring[BGE_RETURN_RING_CNT];
177 	struct bge_tx_bd	bge_tx_ring[BGE_TX_RING_CNT];
178 	struct bge_status_block	bge_status_block;
179 	struct bge_tx_desc	*bge_tx_ring_nic;/* pointer to shared mem */
180 	struct bge_cmd_desc	*bge_cmd_ring;	/* pointer to shared mem */
181 	struct bge_gib		bge_info;
182 };
183 
184 #define BGE_RING_DMA_ADDR(sc, offset)					      \
185 	((sc)->bge_ring_map->dm_segs[0].ds_addr +			      \
186 	offsetof(struct bge_ring_data, offset))
187 
188 /*
189  * Number of DMA segments in a TxCB. Note that this is carefully
190  * chosen to make the total struct size an even power of two. It's
191  * critical that no TxCB be split across a page boundary since
192  * no attempt is made to allocate physically contiguous memory.
193  *
194  */
195 #if 0	/* pre-TSO values */
196 #define BGE_TXDMA_MAX	ETHER_MAX_LEN_JUMBO
197 #ifdef _LP64
198 #define BGE_NTXSEG	30
199 #else
200 #define BGE_NTXSEG	31
201 #endif
202 #else	/* TSO values */
203 #define BGE_TXDMA_MAX	(round_page(IP_MAXPACKET))	/* for TSO */
204 #ifdef _LP64
205 #define BGE_NTXSEG	120	/* XXX just a guess */
206 #else
207 #define BGE_NTXSEG	124	/* XXX just a guess */
208 #endif
209 #endif	/* TSO values */
210 
211 #define	BGE_STATUS_BLK_SZ	sizeof (struct bge_status_block)
212 
213 /*
214  * Mbuf pointers. We need these to keep track of the virtual addresses
215  * of our mbuf chains since we can only convert from physical to virtual,
216  * not the other way around.
217  */
218 struct bge_chain_data {
219 	struct mbuf		*bge_tx_chain[BGE_TX_RING_CNT];
220 	struct mbuf		*bge_rx_std_chain[BGE_STD_RX_RING_CNT];
221 	struct mbuf		*bge_rx_jumbo_chain[BGE_JUMBO_RX_RING_CNT];
222 	bus_dmamap_t		bge_rx_std_map[BGE_STD_RX_RING_CNT];
223 	bus_dmamap_t		bge_rx_jumbo_map;
224 	bus_dma_segment_t	bge_rx_jumbo_seg;
225 	/* Stick the jumbo mem management stuff here too. */
226 	void *			bge_jslots[BGE_JSLOTS];
227 	void *			bge_jumbo_buf;
228 };
229 
230 #define BGE_JUMBO_DMA_ADDR(sc, m) \
231 	((sc)->bge_cdata.bge_rx_jumbo_map->dm_segs[0].ds_addr + \
232 	 (mtod((m), char *) - (char *)(sc)->bge_cdata.bge_jumbo_buf))
233 
234 struct bge_type {
235 	uint16_t		bge_vid;
236 	uint16_t		bge_did;
237 	char			*bge_name;
238 };
239 
240 #define BGE_TIMEOUT		100000
241 #define BGE_TXCONS_UNSET		0xFFFF	/* impossible value */
242 
243 struct bge_jpool_entry {
244 	int				slot;
245 	SLIST_ENTRY(bge_jpool_entry)	jpool_entries;
246 };
247 
248 struct bge_bcom_hack {
249 	int			reg;
250 	int			val;
251 };
252 
253 struct txdmamap_pool_entry {
254 	bus_dmamap_t dmamap;
255 	bus_dmamap_t dmamap32;
256 	bool is_dma32;
257 	SLIST_ENTRY(txdmamap_pool_entry) link;
258 };
259 
260 #define	ASF_ENABLE		1
261 #define	ASF_NEW_HANDSHAKE	2
262 #define	ASF_STACKUP		4
263 
264 /*
265  * Locking notes:
266  *
267  *	n		IFNET_LOCK
268  *	m		sc_mcast_lock
269  *	i		sc_intr_lock
270  *	i/n		while down, IFNET_LOCK; while up, sc_intr_lock
271  *
272  * Otherwise, stable from attach to detach.
273  *
274  * Lock order:
275  *
276  *	IFNET_LOCK -> sc_intr_lock
277  *	IFNET_LOCK -> sc_mcast_lock
278  */
279 struct bge_softc {
280 	device_t		bge_dev;
281 	struct ethercom		ethercom;	/* interface info */
282 	bus_space_handle_t	bge_bhandle;
283 	bus_space_tag_t		bge_btag;
284 	bus_size_t		bge_bsize;
285 	bus_space_handle_t	bge_apehandle;
286 	bus_space_tag_t		bge_apetag;
287 	bus_size_t		bge_apesize;
288 	void			*bge_intrhand;
289 	pci_intr_handle_t	*bge_pihp;
290 	pci_chipset_tag_t	sc_pc;
291 	pcitag_t		sc_pcitag;
292 
293 	struct pci_attach_args	bge_pa;
294 	struct mii_data		bge_mii;	/* i: mii data */
295 	struct ifmedia		bge_ifmedia;	/* i: media info */
296 	uint32_t		bge_return_ring_cnt;
297 	uint32_t		bge_tx_prodidx; /* i: tx producer idx */
298 	bus_dma_tag_t		bge_dmatag;
299 	bus_dma_tag_t		bge_dmatag32;
300 	bool			bge_dma64;
301 	uint32_t		bge_pcixcap;
302 	uint32_t		bge_pciecap;
303 	uint16_t		bge_mps;
304 	int			bge_expmrq;
305 	uint32_t		bge_lasttag;	/* i: last status tag */
306 	uint32_t		bge_mfw_flags;  /* Management F/W flags */
307 #define	BGE_MFW_ON_RXCPU	__BIT(0)
308 #define	BGE_MFW_ON_APE		__BIT(1)
309 #define	BGE_MFW_TYPE_NCSI	__BIT(2)
310 #define	BGE_MFW_TYPE_DASH	__BIT(3)
311 	int			bge_phy_ape_lock;
312 	int			bge_phy_addr;
313 	uint32_t		bge_chipid;
314 	uint8_t			bge_asf_mode;
315 	uint8_t			bge_asf_count;	/* i: XXX ??? */
316 	struct bge_ring_data	*bge_rdata;	/* rings */
317 	struct bge_chain_data	bge_cdata;	/* mbufs */
318 	bus_dmamap_t		bge_ring_map;
319 	bus_dma_segment_t	bge_ring_seg;
320 	int			bge_ring_rseg;
321 	uint16_t		bge_tx_saved_considx; /* i: tx consumer idx */
322 	uint16_t		bge_rx_saved_considx; /* i: rx consumer idx */
323 	uint16_t		bge_std;	/* i: current std ring head */
324 	uint16_t		bge_std_cnt;	/* i: number of std mbufs */
325 	uint16_t		bge_jumbo;
326 					/* i: current jumbo ring head */
327 	SLIST_HEAD(__bge_jfreehead, bge_jpool_entry)	bge_jfree_listhead;
328 					/* i: list of free jumbo mbufs */
329 	SLIST_HEAD(__bge_jinusehead, bge_jpool_entry)	bge_jinuse_listhead;
330 					/* i: list of jumbo mbufs in use */
331 	uint32_t		bge_stat_ticks;
332 	uint32_t		bge_rx_coal_ticks;	/* i */
333 	uint32_t		bge_tx_coal_ticks;	/* i */
334 	uint32_t		bge_rx_max_coal_bds;	/* i */
335 	uint32_t		bge_tx_max_coal_bds;	/* i */
336 	uint32_t		bge_sts;	/* i/n: link status */
337 #define BGE_STS_LINK		__BIT(0)	/* MAC link status */
338 #define BGE_STS_LINK_EVT	__BIT(1)	/* pending link event */
339 #define BGE_STS_AUTOPOLL	__BIT(2)	/* PHY auto-polling  */
340 #define BGE_STS_BIT(sc, x)	((sc)->bge_sts & (x))
341 #define BGE_STS_SETBIT(sc, x)	((sc)->bge_sts |= (x))
342 #define BGE_STS_CLRBIT(sc, x)	((sc)->bge_sts &= ~(x))
343 	u_short			bge_if_flags;	/* m: if_flags cache */
344 	uint32_t		bge_flags;	/* i/n */
345 	uint32_t		bge_phy_flags;
346 	int			bge_flowflags;	/* i */
347 	time_t			bge_tx_lastsent;
348 						/* i: time of last tx */
349 	bool			bge_txrx_stopping;
350 						/* i: true when going down */
351 	bool			bge_tx_sending;	/* i: true when tx inflight */
352 
353 #ifdef BGE_EVENT_COUNTERS
354 	/*
355 	 * Event counters.
356 	 */
357 	struct evcnt bge_ev_intr;	/* i: interrupts */
358 	struct evcnt bge_ev_intr_spurious;
359 					/* i: spurious intr. (tagged status) */
360 	struct evcnt bge_ev_intr_spurious2; /* i: spurious interrupts */
361 	struct evcnt bge_ev_tx_xoff;	/* i: send PAUSE(len>0) packets */
362 	struct evcnt bge_ev_tx_xon;	/* i: send PAUSE(len=0) packets */
363 	struct evcnt bge_ev_rx_xoff;	/* i: receive PAUSE(len>0) packets */
364 	struct evcnt bge_ev_rx_xon;	/* i: receive PAUSE(len=0) packets */
365 	struct evcnt bge_ev_rx_macctl;	/* i: receive MAC control packets */
366 	struct evcnt bge_ev_xoffentered;/* i: XOFF state entered */
367 #endif /* BGE_EVENT_COUNTERS */
368 	uint64_t		bge_if_collisions;	/* i */
369 	int			bge_txcnt;	/* i: # tx descs in use */
370 	struct callout		bge_timeout;	/* i: tx timeout */
371 	bool			bge_pending_rxintr_change;
372 						/* i: change pending to
373 						 * rx_coal_ticks and
374 						 * rx_max_coal_bds */
375 	bool			bge_attached;
376 	bool			bge_detaching;	/* n */
377 	SLIST_HEAD(, txdmamap_pool_entry) txdma_list;		/* i */
378 	struct txdmamap_pool_entry *txdma[BGE_TX_RING_CNT];	/* i */
379 
380 	struct sysctllog	*bge_log;
381 
382 	krndsource_t	rnd_source;	/* random source */
383 
384 	kmutex_t *sc_mcast_lock;	/* m: lock for SIOCADD/DELMULTI */
385 	kmutex_t *sc_intr_lock;		/* i: lock for interrupt operations */
386 	struct workqueue *sc_reset_wq;
387 	struct work sc_reset_work;	/* i */
388 	volatile unsigned sc_reset_pending;
389 
390 	bool sc_trigger_reset;		/* i */
391 };
392 
393 #endif /* _DEV_PCI_IF_BGEVAR_H_ */
394