xref: /netbsd-src/sys/dev/marvell/if_gfevar.h (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: if_gfevar.h,v 1.8 2007/03/04 06:02:14 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *      This product includes software developed for the NetBSD Project by
18  *      Allegro Networks, Inc., and Wasabi Systems, Inc.
19  * 4. The name of Allegro Networks, Inc. may not be used to endorse
20  *    or promote products derived from this software without specific prior
21  *    written permission.
22  * 5. The name of Wasabi Systems, Inc. may not be used to endorse
23  *    or promote products derived from this software without specific prior
24  *    written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
27  * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
28  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
29  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30  * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #define	GE_RXDESC_MEMSIZE		(1 * PAGE_SIZE)
41 #define	GE_RXDESC_MAX			64
42 #define	GE_RXBUF_SIZE			2048
43 #define	GE_RXBUF_MEMSIZE		(GE_RXDESC_MAX*GE_RXBUF_SIZE)
44 #define	GE_RXBUF_NSEGS			((GE_RXBUF_MEMSIZE/PAGE_SIZE)+1)
45 #define	GE_DMSEG_MAX			(GE_RXBUF_NSEGS)
46 
47 struct gfe_dmamem {
48 	bus_dmamap_t gdm_map;		/* dmamem'ed memory */
49 	void *gdm_kva;		/* kva of tx memory */
50 	int gdm_nsegs;			/* # of segment in gdm_segs */
51 	int gdm_maxsegs;		/* maximum # of segments allowed */
52 	size_t gdm_size;		/* size of memory region */
53 	bus_dma_segment_t gdm_segs[GE_DMSEG_MAX]; /* dma segment of tx memory */
54 };
55 
56 /* With a 4096 page size, we get 256 descriptors per page.
57  */
58 #define	GE_TXDESC_MEMSIZE		(1 * PAGE_SIZE)
59 #define	GE_TXDESC_MAX			(GE_TXDESC_MEMSIZE / 16)
60 #define	GE_TXBUF_SIZE			(4 * PAGE_SIZE)
61 
62 struct gfe_txqueue {
63 	struct ifqueue txq_pendq;	/* these are ready to go to the GT */
64 	struct gfe_dmamem txq_desc_mem;	/* transmit descriptor memory */
65 	struct gfe_dmamem txq_buf_mem;	/* transmit buffer memory */
66 	unsigned int txq_lo;		/* next to be given to GT */
67 	unsigned int txq_fi; 		/* next to be returned to CPU */
68 	unsigned int txq_ei_gapcount;	/* counter until next EI */
69 	unsigned int txq_nactive;	/* number of active descriptors */
70 	unsigned int txq_outptr;	/* where to put next transmit packet */
71 	unsigned int txq_inptr;		/* start of 1st queued tx packet */
72 	uint32_t txq_intrbits;		/* bits to write to EIMR */
73 	uint32_t txq_esdcmrbits;	/* bits to write to ESDCMR */
74 	uint32_t txq_epsrbits;		/* bits to test with EPSR */
75 	volatile struct gt_eth_desc *txq_descs; /* ptr to tx descriptors */
76 	bus_addr_t txq_ectdp;		/* offset to cur. tx desc ptr reg */
77 	bus_addr_t txq_desc_busaddr;	/* bus addr of tx descriptors */
78 	bus_addr_t txq_buf_busaddr;	/* bus addr of tx buffers */
79 };
80 
81 /* With a 4096 page size, we get 256 descriptors per page.  We want 1024
82  * which will give us about 8ms of 64 byte packets (2ms for each priority
83  * queue).
84  */
85 
86 struct gfe_rxbuf {
87 	uint8_t	rb_data[GE_RXBUF_SIZE];
88 };
89 
90 struct gfe_rxqueue {
91 	struct gfe_dmamem rxq_desc_mem;	/* receive descriptor memory */
92 	struct gfe_dmamem rxq_buf_mem;	/* receive buffer memory */
93 	struct mbuf *rxq_curpkt;	/* mbuf for current packet */
94 	volatile struct gt_eth_desc *rxq_descs;
95 	struct gfe_rxbuf *rxq_bufs;
96 	unsigned int rxq_fi; 		/* next to be returned to CPU */
97 	unsigned int rxq_active;	/* # of descriptors given to GT */
98 	uint32_t rxq_intrbits;		/* bits to write to EIMR */
99 	bus_addr_t rxq_desc_busaddr;	/* bus addr of rx descriptors */
100 	uint32_t rxq_cmdsts;		/* save cmdsts from first descriptor */
101 	bus_size_t rxq_efrdp;
102 	bus_size_t rxq_ecrdp;
103 };
104 
105 enum gfe_txprio {
106 	GE_TXPRIO_HI=1,
107 	GE_TXPRIO_LO=0,
108 	GE_TXPRIO_NONE=2
109 };
110 enum gfe_rxprio {
111 	GE_RXPRIO_HI=3,
112 	GE_RXPRIO_MEDHI=2,
113 	GE_RXPRIO_MEDLO=1,
114 	GE_RXPRIO_LO=0
115 };
116 
117 struct gfe_softc {
118 	struct device sc_dev;		/* must be first */
119 	struct ethercom sc_ec;		/* common ethernet glue */
120 	struct callout sc_co;		/* resource recovery */
121 	mii_data_t sc_mii;		/* mii interface */
122 
123 	/*
124 	 *
125 	 */
126 	bus_space_tag_t sc_gt_memt;
127 	bus_space_handle_t sc_gt_memh;
128 	bus_space_handle_t sc_memh;	/* subregion for ethernet */
129 	bus_dma_tag_t sc_dmat;
130 	int sc_macno;			/* which mac? 0, 1, or 2 */
131 
132 	unsigned int sc_tickflags;
133 #define	GE_TICK_TX_IFSTART	0x0001
134 #define	GE_TICK_RX_RESTART	0x0002
135 	unsigned int sc_flags;
136 #define	GE_ALLMULTI	0x0001
137 #define	GE_PHYSTSCHG	0x0002
138 #define	GE_RXACTIVE	0x0004
139 #define	GE_NOFREE	0x0008		/* Don't free on disable */
140 	uint32_t sc_pcr;		/* current EPCR value */
141 	uint32_t sc_pcxr;		/* current EPCXR value */
142 	uint32_t sc_intrmask;		/* current EIMR value */
143 	uint32_t sc_idlemask;		/* suspended EIMR bits */
144 	size_t sc_max_frame_length;	/* maximum frame length */
145 
146 	/*
147 	 * Hash table related members
148 	 */
149 	struct gfe_dmamem sc_hash_mem;	/* dma'ble hash table */
150 	uint64_t *sc_hashtable;
151 	unsigned int sc_hashmask;	/* 0x1ff or 0x1fff */
152 
153 	/*
154 	 * Transmit related members
155 	 */
156 	struct gfe_txqueue sc_txq[2];	/* High & Low transmit queues */
157 
158 	/*
159 	 * Receive related members
160 	 */
161 	struct gfe_rxqueue sc_rxq[4];	/* Hi/MedHi/MedLo/Lo receive queues */
162 };
163