xref: /dpdk/drivers/net/qede/base/ecore_int_api.h (revision 3b307c55f2ac7f3f4146bd0dc9b474e1f3076f97)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 #ifndef __ECORE_INT_API_H__
8 #define __ECORE_INT_API_H__
9 
10 #ifndef __EXTRACT__LINUX__
11 #define ECORE_SB_IDX		0x0002
12 
13 #define RX_PI		0
14 #define TX_PI(tc)	(RX_PI + 1 + tc)
15 
16 #ifndef ECORE_INT_MODE
17 #define ECORE_INT_MODE
18 enum ecore_int_mode {
19 	ECORE_INT_MODE_INTA,
20 	ECORE_INT_MODE_MSIX,
21 	ECORE_INT_MODE_MSI,
22 	ECORE_INT_MODE_POLL,
23 };
24 #endif
25 
26 struct ecore_sb_info {
27 	void *sb_virt; /* ptr to "struct status_block_e{4,5}" */
28 	u32 sb_size; /* size of "struct status_block_e{4,5}" */
29 	__le16 *sb_pi_array; /* ptr to "sb_virt->pi_array" */
30 	__le32 *sb_prod_index; /* ptr to "sb_virt->prod_index" */
31 #define STATUS_BLOCK_PROD_INDEX_MASK	0xFFFFFF
32 
33 	dma_addr_t sb_phys;
34 	u32 sb_ack;		/* Last given ack */
35 	u16 igu_sb_id;
36 	void OSAL_IOMEM *igu_addr;
37 	u8 flags;
38 #define ECORE_SB_INFO_INIT	0x1
39 #define ECORE_SB_INFO_SETUP	0x2
40 
41 #ifdef ECORE_CONFIG_DIRECT_HWFN
42 	struct ecore_hwfn *p_hwfn;
43 #endif
44 	struct ecore_dev *p_dev;
45 };
46 
47 struct ecore_sb_info_dbg {
48 	u32 igu_prod;
49 	u32 igu_cons;
50 	u16 pi[PIS_PER_SB];
51 };
52 
53 struct ecore_sb_cnt_info {
54 	/* Original, current, and free SBs for PF */
55 	int orig;
56 	int cnt;
57 	int free_cnt;
58 
59 	/* Original, current and free SBS for child VFs */
60 	int iov_orig;
61 	int iov_cnt;
62 	int free_cnt_iov;
63 };
64 
ecore_sb_update_sb_idx(struct ecore_sb_info * sb_info)65 static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
66 {
67 	u32 prod = 0;
68 	u16 rc = 0;
69 
70 	/* barrier(); status block is written to by the chip */
71 	/* FIXME: need some sort of barrier. */
72 	prod = OSAL_LE32_TO_CPU(*sb_info->sb_prod_index) &
73 	       STATUS_BLOCK_PROD_INDEX_MASK;
74 	if (sb_info->sb_ack != prod) {
75 		sb_info->sb_ack = prod;
76 		rc |= ECORE_SB_IDX;
77 	}
78 
79 	OSAL_MMIOWB(sb_info->p_dev);
80 	return rc;
81 }
82 
83 /**
84  *
85  * @brief This function creates an update command for interrupts that is
86  *        written to the IGU.
87  *
88  * @param sb_info	- This is the structure allocated and
89  *	   initialized per status block. Assumption is
90  *	   that it was initialized using ecore_sb_init
91  * @param int_cmd	- Enable/Disable/Nop
92  * @param upd_flg	- whether igu consumer should be
93  *	   updated.
94  *
95  * @return OSAL_INLINE void
96  */
ecore_sb_ack(struct ecore_sb_info * sb_info,enum igu_int_cmd int_cmd,u8 upd_flg)97 static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
98 				     enum igu_int_cmd int_cmd, u8 upd_flg)
99 {
100 	struct igu_prod_cons_update igu_ack;
101 
102 	OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update));
103 	igu_ack.sb_id_and_flags =
104 	    ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
105 	     (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
106 	     (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
107 	     (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
108 
109 #ifdef ECORE_CONFIG_DIRECT_HWFN
110 	DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
111 		      igu_ack.sb_id_and_flags);
112 #else
113 	DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
114 #endif
115 	/* Both segments (interrupts & acks) are written to same place address;
116 	 * Need to guarantee all commands will be received (in-order) by HW.
117 	 */
118 	OSAL_MMIOWB(sb_info->p_dev);
119 	OSAL_BARRIER(sb_info->p_dev);
120 }
121 
122 #ifdef ECORE_CONFIG_DIRECT_HWFN
__internal_ram_wr(struct ecore_hwfn * p_hwfn,void OSAL_IOMEM * addr,int size,u32 * data)123 static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
124 					  void OSAL_IOMEM *addr,
125 					  int size, u32 *data)
126 #else
127 static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn,
128 					  void OSAL_IOMEM *addr,
129 					  int size, u32 *data)
130 #endif
131 {
132 	unsigned int i;
133 
134 	for (i = 0; i < size / sizeof(*data); i++)
135 		DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
136 }
137 
138 #ifdef ECORE_CONFIG_DIRECT_HWFN
__internal_ram_wr_relaxed(struct ecore_hwfn * p_hwfn,void OSAL_IOMEM * addr,int size,u32 * data)139 static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn,
140 						  void OSAL_IOMEM * addr,
141 						  int size, u32 *data)
142 #else
143 static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn,
144 						  void OSAL_IOMEM * addr,
145 						  int size, u32 *data)
146 #endif
147 {
148 	unsigned int i;
149 
150 	for (i = 0; i < size / sizeof(*data); i++)
151 		DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i],
152 				      data[i]);
153 }
154 
155 #ifdef ECORE_CONFIG_DIRECT_HWFN
internal_ram_wr(struct ecore_hwfn * p_hwfn,void OSAL_IOMEM * addr,int size,u32 * data)156 static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
157 						void OSAL_IOMEM * addr,
158 						int size, u32 *data)
159 {
160 	__internal_ram_wr_relaxed(p_hwfn, addr, size, data);
161 }
162 #else
internal_ram_wr(void OSAL_IOMEM * addr,int size,u32 * data)163 static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
164 						int size, u32 *data)
165 {
166 	__internal_ram_wr_relaxed(OSAL_NULL, addr, size, data);
167 }
168 #endif
169 
170 #endif
171 
172 struct ecore_hwfn;
173 struct ecore_ptt;
174 
175 enum ecore_coalescing_fsm {
176 	ECORE_COAL_RX_STATE_MACHINE,
177 	ECORE_COAL_TX_STATE_MACHINE
178 };
179 
180 /**
181  * @brief ecore_int_cau_conf_pi - configure cau for a given
182  *        status block
183  *
184  * @param p_hwfn
185  * @param p_ptt
186  * @param p_sb
187  * @param pi_index
188  * @param state
189  * @param timeset
190  */
191 void ecore_int_cau_conf_pi(struct ecore_hwfn		*p_hwfn,
192 			   struct ecore_ptt		*p_ptt,
193 			   struct ecore_sb_info		*p_sb,
194 			   u32				pi_index,
195 			   enum ecore_coalescing_fsm	coalescing_fsm,
196 			   u8				timeset);
197 
198 /**
199  *
200  * @brief ecore_int_igu_enable_int - enable device interrupts
201  *
202  * @param p_hwfn
203  * @param p_ptt
204  * @param int_mode - interrupt mode to use
205  */
206 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
207 			      struct ecore_ptt *p_ptt,
208 			      enum ecore_int_mode int_mode);
209 
210 /**
211  *
212  * @brief ecore_int_igu_disable_int - disable device interrupts
213  *
214  * @param p_hwfn
215  * @param p_ptt
216  */
217 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
218 			       struct ecore_ptt *p_ptt);
219 
220 /**
221  *
222  * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
223  *        register from igu.
224  *
225  * @param p_hwfn
226  *
227  * @return u64
228  */
229 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
230 
231 #define ECORE_SP_SB_ID 0xffff
232 
233 /**
234  * @brief ecore_int_sb_init - Initializes the sb_info structure.
235  *
236  * once the structure is initialized it can be passed to sb related functions.
237  *
238  * @param p_hwfn
239  * @param p_ptt
240  * @param sb_info	points to an uninitialized (but
241  *			allocated) sb_info structure
242  * @param sb_virt_addr
243  * @param sb_phy_addr
244  * @param sb_id		the sb_id to be used (zero based in driver)
245  *			should use ECORE_SP_SB_ID for SP Status block
246  *
247  * @return enum _ecore_status_t
248  */
249 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
250 				       struct ecore_ptt *p_ptt,
251 				       struct ecore_sb_info *sb_info,
252 				       void *sb_virt_addr,
253 				       dma_addr_t sb_phy_addr, u16 sb_id);
254 /**
255  * @brief ecore_int_sb_setup - Setup the sb.
256  *
257  * @param p_hwfn
258  * @param p_ptt
259  * @param sb_info	initialized sb_info structure
260  */
261 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
262 			struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
263 
264 /**
265  * @brief ecore_int_sb_release - releases the sb_info structure.
266  *
267  * once the structure is released, it's memory can be freed
268  *
269  * @param p_hwfn
270  * @param sb_info	points to an allocated sb_info structure
271  * @param sb_id		the sb_id to be used (zero based in driver)
272  *			should never be equal to ECORE_SP_SB_ID
273  *			(SP Status block)
274  *
275  * @return enum _ecore_status_t
276  */
277 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
278 					  struct ecore_sb_info *sb_info,
279 					  u16 sb_id);
280 
281 /**
282  * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
283  *        default status block.
284  *
285  * @param p_hwfn - pointer to hwfn
286  *
287  */
288 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
289 
290 /**
291  * @brief ecore_int_get_num_sbs - get the number of status
292  *        blocks configured for this funciton in the igu.
293  *
294  * @param p_hwfn
295  * @param p_sb_cnt_info
296  *
297  * @return
298  */
299 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
300 			   struct ecore_sb_cnt_info *p_sb_cnt_info);
301 
302 /**
303  * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
304  *        release. The API need to be called after releasing all slowpath IRQs
305  *        of the device.
306  *
307  * @param p_dev
308  *
309  */
310 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
311 
312 /**
313  * @brief ecore_int_attn_clr_enable - sets whether the general behavior is
314  *        preventing attentions from being reasserted, or following the
315  *        attributes of the specific attention.
316  *
317  * @param p_dev
318  * @param clr_enable
319  *
320  */
321 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
322 
323 /**
324  * @brief Read debug information regarding a given SB.
325  *
326  * @param p_hwfn
327  * @param p_ptt
328  * @param p_sb - point to Status block for which we want to get info.
329  * @param p_info - pointer to struct to fill with information regarding SB.
330  *
331  * @return ECORE_SUCCESS if pointer is filled; failure otherwise.
332  */
333 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
334 					  struct ecore_ptt *p_ptt,
335 					  struct ecore_sb_info *p_sb,
336 					  struct ecore_sb_info_dbg *p_info);
337 
338 /**
339  * @brief - Move a free Status block between PF and child VF
340  *
341  * @param p_hwfn
342  * @param p_ptt
343  * @param sb_id - The PF fastpath vector to be moved [re-assigned if claiming
344  *                from VF, given-up if moving to VF]
345  * @param b_to_vf - PF->VF == true, VF->PF == false
346  *
347  * @return ECORE_SUCCESS if SB successfully moved.
348  */
349 enum _ecore_status_t
350 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
351 			  u16 sb_id, bool b_to_vf);
352 
353 /**
354  * @brief - Doorbell Recovery handler.
355  *          Run DB_REAL_DEAL doorbell recovery in case of PF overflow
356  *          (and flush DORQ if needed), otherwise run DB_REC_ONCE.
357  *
358  * @param p_hwfn
359  * @param p_ptt
360  */
361 enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn,
362 					  struct ecore_ptt *p_ptt);
363 #endif
364