xref: /dpdk/drivers/net/cxgbe/base/common.h (revision 04868e5bfddda2021df2fb191c1b9b2ee09d71ce)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2014-2017 Chelsio Communications.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Chelsio Communications nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef __CHELSIO_COMMON_H
35 #define __CHELSIO_COMMON_H
36 
37 #include "cxgbe_compat.h"
38 #include "t4_hw.h"
39 #include "t4_chip_type.h"
40 #include "t4fw_interface.h"
41 
42 #ifdef __cplusplus
43 extern "C" {
44 #endif
45 
46 #define CXGBE_PAGE_SIZE RTE_PGSIZE_4K
47 
48 enum {
49 	MAX_NPORTS     = 4,     /* max # of ports */
50 };
51 
52 enum {
53 	T5_REGMAP_SIZE = (332 * 1024),
54 };
55 
56 enum {
57 	MEMWIN0_APERTURE = 2048,
58 	MEMWIN0_BASE     = 0x1b800,
59 };
60 
61 enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
62 
63 enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
64 
65 enum {
66 	PAUSE_RX      = 1 << 0,
67 	PAUSE_TX      = 1 << 1,
68 	PAUSE_AUTONEG = 1 << 2
69 };
70 
71 struct port_stats {
72 	u64 tx_octets;            /* total # of octets in good frames */
73 	u64 tx_frames;            /* all good frames */
74 	u64 tx_bcast_frames;      /* all broadcast frames */
75 	u64 tx_mcast_frames;      /* all multicast frames */
76 	u64 tx_ucast_frames;      /* all unicast frames */
77 	u64 tx_error_frames;      /* all error frames */
78 
79 	u64 tx_frames_64;         /* # of Tx frames in a particular range */
80 	u64 tx_frames_65_127;
81 	u64 tx_frames_128_255;
82 	u64 tx_frames_256_511;
83 	u64 tx_frames_512_1023;
84 	u64 tx_frames_1024_1518;
85 	u64 tx_frames_1519_max;
86 
87 	u64 tx_drop;              /* # of dropped Tx frames */
88 	u64 tx_pause;             /* # of transmitted pause frames */
89 	u64 tx_ppp0;              /* # of transmitted PPP prio 0 frames */
90 	u64 tx_ppp1;              /* # of transmitted PPP prio 1 frames */
91 	u64 tx_ppp2;              /* # of transmitted PPP prio 2 frames */
92 	u64 tx_ppp3;              /* # of transmitted PPP prio 3 frames */
93 	u64 tx_ppp4;              /* # of transmitted PPP prio 4 frames */
94 	u64 tx_ppp5;              /* # of transmitted PPP prio 5 frames */
95 	u64 tx_ppp6;              /* # of transmitted PPP prio 6 frames */
96 	u64 tx_ppp7;              /* # of transmitted PPP prio 7 frames */
97 
98 	u64 rx_octets;            /* total # of octets in good frames */
99 	u64 rx_frames;            /* all good frames */
100 	u64 rx_bcast_frames;      /* all broadcast frames */
101 	u64 rx_mcast_frames;      /* all multicast frames */
102 	u64 rx_ucast_frames;      /* all unicast frames */
103 	u64 rx_too_long;          /* # of frames exceeding MTU */
104 	u64 rx_jabber;            /* # of jabber frames */
105 	u64 rx_fcs_err;           /* # of received frames with bad FCS */
106 	u64 rx_len_err;           /* # of received frames with length error */
107 	u64 rx_symbol_err;        /* symbol errors */
108 	u64 rx_runt;              /* # of short frames */
109 
110 	u64 rx_frames_64;         /* # of Rx frames in a particular range */
111 	u64 rx_frames_65_127;
112 	u64 rx_frames_128_255;
113 	u64 rx_frames_256_511;
114 	u64 rx_frames_512_1023;
115 	u64 rx_frames_1024_1518;
116 	u64 rx_frames_1519_max;
117 
118 	u64 rx_pause;             /* # of received pause frames */
119 	u64 rx_ppp0;              /* # of received PPP prio 0 frames */
120 	u64 rx_ppp1;              /* # of received PPP prio 1 frames */
121 	u64 rx_ppp2;              /* # of received PPP prio 2 frames */
122 	u64 rx_ppp3;              /* # of received PPP prio 3 frames */
123 	u64 rx_ppp4;              /* # of received PPP prio 4 frames */
124 	u64 rx_ppp5;              /* # of received PPP prio 5 frames */
125 	u64 rx_ppp6;              /* # of received PPP prio 6 frames */
126 	u64 rx_ppp7;              /* # of received PPP prio 7 frames */
127 
128 	u64 rx_ovflow0;           /* drops due to buffer-group 0 overflows */
129 	u64 rx_ovflow1;           /* drops due to buffer-group 1 overflows */
130 	u64 rx_ovflow2;           /* drops due to buffer-group 2 overflows */
131 	u64 rx_ovflow3;           /* drops due to buffer-group 3 overflows */
132 	u64 rx_trunc0;            /* buffer-group 0 truncated packets */
133 	u64 rx_trunc1;            /* buffer-group 1 truncated packets */
134 	u64 rx_trunc2;            /* buffer-group 2 truncated packets */
135 	u64 rx_trunc3;            /* buffer-group 3 truncated packets */
136 };
137 
138 struct sge_params {
139 	u32 hps;                        /* host page size for our PF/VF */
140 	u32 eq_qpp;                     /* egress queues/page for our PF/VF */
141 	u32 iq_qpp;                     /* egress queues/page for our PF/VF */
142 };
143 
144 struct tp_params {
145 	unsigned int ntxchan;        /* # of Tx channels */
146 	unsigned int tre;            /* log2 of core clocks per TP tick */
147 	unsigned int dack_re;        /* DACK timer resolution */
148 	unsigned int la_mask;        /* what events are recorded by TP LA */
149 	unsigned short tx_modq[NCHAN];  /* channel to modulation queue map */
150 
151 	u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
152 	u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
153 
154 	/*
155 	 * TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
156 	 * subset of the set of fields which may be present in the Compressed
157 	 * Filter Tuple portion of filters and TCP TCB connections.  The
158 	 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
159 	 * Since a variable number of fields may or may not be present, their
160 	 * shifted field positions within the Compressed Filter Tuple may
161 	 * vary, or not even be present if the field isn't selected in
162 	 * TP_VLAN_PRI_MAP.  Since some of these fields are needed in various
163 	 * places we store their offsets here, or a -1 if the field isn't
164 	 * present.
165 	 */
166 	int vlan_shift;
167 	int vnic_shift;
168 	int port_shift;
169 	int protocol_shift;
170 };
171 
172 struct vpd_params {
173 	unsigned int cclk;
174 };
175 
176 struct pci_params {
177 	uint16_t        vendor_id;
178 	uint16_t        device_id;
179 	uint32_t        vpd_cap_addr;
180 	uint16_t        speed;
181 	uint8_t         width;
182 };
183 
184 /*
185  * Firmware device log.
186  */
187 struct devlog_params {
188 	u32 memtype;                    /* which memory (EDC0, EDC1, MC) */
189 	u32 start;                      /* start of log in firmware memory */
190 	u32 size;                       /* size of log */
191 };
192 
193 struct arch_specific_params {
194 	u8 nchan;
195 	u16 mps_rplc_size;
196 	u16 vfcount;
197 	u32 sge_fl_db;
198 	u16 mps_tcam_size;
199 };
200 
201 struct adapter_params {
202 	struct sge_params sge;
203 	struct tp_params  tp;
204 	struct vpd_params vpd;
205 	struct pci_params pci;
206 	struct devlog_params devlog;
207 	enum pcie_memwin drv_memwin;
208 
209 	unsigned int sf_size;             /* serial flash size in bytes */
210 	unsigned int sf_nsec;             /* # of flash sectors */
211 
212 	unsigned int fw_vers;
213 	unsigned int tp_vers;
214 
215 	unsigned short mtus[NMTUS];
216 	unsigned short a_wnd[NCCTRL_WIN];
217 	unsigned short b_wnd[NCCTRL_WIN];
218 
219 	unsigned int mc_size;             /* MC memory size */
220 	unsigned int cim_la_size;
221 
222 	unsigned char nports;             /* # of ethernet ports */
223 	unsigned char portvec;
224 
225 	enum chip_type chip;              /* chip code */
226 	struct arch_specific_params arch; /* chip specific params */
227 
228 	bool ulptx_memwrite_dsgl;          /* use of T5 DSGL allowed */
229 };
230 
231 struct link_config {
232 	unsigned short supported;        /* link capabilities */
233 	unsigned short advertising;      /* advertised capabilities */
234 	unsigned short requested_speed;  /* speed user has requested */
235 	unsigned short speed;            /* actual link speed */
236 	unsigned char  requested_fc;     /* flow control user has requested */
237 	unsigned char  fc;               /* actual link flow control */
238 	unsigned char  autoneg;          /* autonegotiating? */
239 	unsigned char  link_ok;          /* link up? */
240 };
241 
242 #include "adapter.h"
243 
244 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
245 		      u32 val);
246 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
247 			int polarity,
248 			int attempts, int delay, u32 *valp);
249 
250 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
251 				  int polarity, int attempts, int delay)
252 {
253 	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
254 				   delay, NULL);
255 }
256 
257 #define for_each_port(adapter, iter) \
258 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
259 
260 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
261 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
262 			    unsigned int mask, unsigned int val);
263 void t4_intr_enable(struct adapter *adapter);
264 void t4_intr_disable(struct adapter *adapter);
265 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
266 		  struct link_config *lc);
267 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
268 		  const unsigned short *alpha, const unsigned short *beta);
269 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
270 		enum dev_master master, enum dev_state *state);
271 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
272 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
273 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
274 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
275 int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
276 				unsigned int cache_line_size,
277 				enum chip_type chip_compat);
278 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
279 			 unsigned int cache_line_size);
280 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
281 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
282 		    unsigned int vf, unsigned int nparams, const u32 *params,
283 		    u32 *val);
284 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
285 			  unsigned int pf, unsigned int vf,
286 			  unsigned int nparams, const u32 *params,
287 			  const u32 *val, int timeout);
288 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
289 		  unsigned int vf, unsigned int nparams, const u32 *params,
290 		  const u32 *val);
291 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
292 		     unsigned int port, unsigned int pf, unsigned int vf,
293 		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
294 		     unsigned int portfunc, unsigned int idstype);
295 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
296 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
297 		unsigned int *rss_size);
298 int t4_free_vi(struct adapter *adap, unsigned int mbox,
299 	       unsigned int pf, unsigned int vf,
300 	       unsigned int viid);
301 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
302 		  int mtu, int promisc, int all_multi, int bcast, int vlanex,
303 		  bool sleep_ok);
304 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
305 		  int idx, const u8 *addr, bool persist, bool add_smt);
306 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
307 			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
308 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
309 		 bool rx_en, bool tx_en);
310 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
311 		     unsigned int pf, unsigned int vf, unsigned int iqid,
312 		     unsigned int fl0id, unsigned int fl1id);
313 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
314 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
315 	       unsigned int fl0id, unsigned int fl1id);
316 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
317 		   unsigned int vf, unsigned int eqid);
318 
319 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
320 {
321 	return adap->params.vpd.cclk / 1000;
322 }
323 
324 static inline unsigned int us_to_core_ticks(const struct adapter *adap,
325 					    unsigned int us)
326 {
327 	return (us * adap->params.vpd.cclk) / 1000;
328 }
329 
330 static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
331 					    unsigned int ticks)
332 {
333 	/* add Core Clock / 2 to round ticks to nearest uS */
334 	return ((ticks * 1000 + adapter->params.vpd.cclk / 2) /
335 		adapter->params.vpd.cclk);
336 }
337 
338 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
339 			    int size, void *rpl, bool sleep_ok, int timeout);
340 int t4_wr_mbox_meat(struct adapter *adap, int mbox,
341 		    const void __attribute__((__may_alias__)) *cmd, int size,
342 		    void *rpl, bool sleep_ok);
343 
344 static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
345 				     const void *cmd, int size, void *rpl,
346 				     int timeout)
347 {
348 	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
349 				       timeout);
350 }
351 
352 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p);
353 
354 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
355 			     int size, void *rpl)
356 {
357 	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
358 }
359 
360 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
361 				int size, void *rpl)
362 {
363 	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
364 }
365 
366 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
367 		      unsigned int data_reg, u32 *vals, unsigned int nregs,
368 		      unsigned int start_idx);
369 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
370 		       unsigned int data_reg, const u32 *vals,
371 		       unsigned int nregs, unsigned int start_idx);
372 
373 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
374 int t4_read_flash(struct adapter *adapter, unsigned int addr,
375 		  unsigned int nwords, u32 *data, int byte_oriented);
376 int t4_flash_cfg_addr(struct adapter *adapter);
377 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
378 const char *t4_get_port_type_description(enum fw_port_type port_type);
379 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
380 void t4_get_port_stats_offset(struct adapter *adap, int idx,
381 			      struct port_stats *stats,
382 			      struct port_stats *offset);
383 void t4_clr_port_stats(struct adapter *adap, int idx);
384 void t4_reset_link_config(struct adapter *adap, int idx);
385 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
386 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
387 int t4_get_flash_params(struct adapter *adapter);
388 int t4_get_chip_type(struct adapter *adap, int ver);
389 int t4_prep_adapter(struct adapter *adapter);
390 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
391 int t4_init_rss_mode(struct adapter *adap, int mbox);
392 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
393 			int start, int n, const u16 *rspq, unsigned int nrspq);
394 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
395 		     unsigned int flags, unsigned int defq);
396 
397 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
398 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
399 		      unsigned int qtype, u64 *pbar2_qoffset,
400 		      unsigned int *pbar2_qid);
401 
402 int t4_init_sge_params(struct adapter *adapter);
403 int t4_init_tp_params(struct adapter *adap);
404 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel);
405 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
406 unsigned int t4_get_regs_len(struct adapter *adap);
407 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
408 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
409 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
410 int t4_seeprom_wp(struct adapter *adapter, int enable);
411 #endif /* __CHELSIO_COMMON_H */
412