1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #ifndef __ECORE_INT_API_H__ 8 #define __ECORE_INT_API_H__ 9 10 #ifndef __EXTRACT__LINUX__ 11 #define ECORE_SB_IDX 0x0002 12 13 #define RX_PI 0 14 #define TX_PI(tc) (RX_PI + 1 + tc) 15 16 #ifndef ECORE_INT_MODE 17 #define ECORE_INT_MODE 18 enum ecore_int_mode { 19 ECORE_INT_MODE_INTA, 20 ECORE_INT_MODE_MSIX, 21 ECORE_INT_MODE_MSI, 22 ECORE_INT_MODE_POLL, 23 }; 24 #endif 25 26 struct ecore_sb_info { 27 struct status_block_e4 *sb_virt; 28 dma_addr_t sb_phys; 29 u32 sb_ack; /* Last given ack */ 30 u16 igu_sb_id; 31 void OSAL_IOMEM *igu_addr; 32 u8 flags; 33 #define ECORE_SB_INFO_INIT 0x1 34 #define ECORE_SB_INFO_SETUP 0x2 35 36 #ifdef ECORE_CONFIG_DIRECT_HWFN 37 struct ecore_hwfn *p_hwfn; 38 #endif 39 struct ecore_dev *p_dev; 40 }; 41 42 struct ecore_sb_info_dbg { 43 u32 igu_prod; 44 u32 igu_cons; 45 u16 pi[PIS_PER_SB_E4]; 46 }; 47 48 struct ecore_sb_cnt_info { 49 /* Original, current, and free SBs for PF */ 50 int orig; 51 int cnt; 52 int free_cnt; 53 54 /* Original, current and free SBS for child VFs */ 55 int iov_orig; 56 int iov_cnt; 57 int free_cnt_iov; 58 }; 59 60 static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info) 61 { 62 u32 prod = 0; 63 u16 rc = 0; 64 65 /* barrier(); status block is written to by the chip */ 66 /* FIXME: need some sort of barrier. */ 67 prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) & 68 STATUS_BLOCK_E4_PROD_INDEX_MASK; 69 if (sb_info->sb_ack != prod) { 70 sb_info->sb_ack = prod; 71 rc |= ECORE_SB_IDX; 72 } 73 74 OSAL_MMIOWB(sb_info->p_dev); 75 return rc; 76 } 77 78 /** 79 * 80 * @brief This function creates an update command for interrupts that is 81 * written to the IGU. 82 * 83 * @param sb_info - This is the structure allocated and 84 * initialized per status block. Assumption is 85 * that it was initialized using ecore_sb_init 86 * @param int_cmd - Enable/Disable/Nop 87 * @param upd_flg - whether igu consumer should be 88 * updated. 89 * 90 * @return OSAL_INLINE void 91 */ 92 static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info, 93 enum igu_int_cmd int_cmd, u8 upd_flg) 94 { 95 struct igu_prod_cons_update igu_ack; 96 97 OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update)); 98 igu_ack.sb_id_and_flags = 99 ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 100 (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 101 (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 102 (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 103 104 #ifdef ECORE_CONFIG_DIRECT_HWFN 105 DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr, 106 igu_ack.sb_id_and_flags); 107 #else 108 DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags); 109 #endif 110 /* Both segments (interrupts & acks) are written to same place address; 111 * Need to guarantee all commands will be received (in-order) by HW. 112 */ 113 OSAL_MMIOWB(sb_info->p_dev); 114 OSAL_BARRIER(sb_info->p_dev); 115 } 116 117 #ifdef ECORE_CONFIG_DIRECT_HWFN 118 static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn, 119 void OSAL_IOMEM *addr, 120 int size, u32 *data) 121 #else 122 static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn, 123 void OSAL_IOMEM *addr, 124 int size, u32 *data) 125 #endif 126 { 127 unsigned int i; 128 129 for (i = 0; i < size / sizeof(*data); i++) 130 DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]); 131 } 132 133 #ifdef ECORE_CONFIG_DIRECT_HWFN 134 static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn, 135 void OSAL_IOMEM * addr, 136 int size, u32 *data) 137 #else 138 static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn, 139 void OSAL_IOMEM * addr, 140 int size, u32 *data) 141 #endif 142 { 143 unsigned int i; 144 145 for (i = 0; i < size / sizeof(*data); i++) 146 DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], 147 data[i]); 148 } 149 150 #ifdef ECORE_CONFIG_DIRECT_HWFN 151 static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn, 152 void OSAL_IOMEM * addr, 153 int size, u32 *data) 154 { 155 __internal_ram_wr_relaxed(p_hwfn, addr, size, data); 156 } 157 #else 158 static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr, 159 int size, u32 *data) 160 { 161 __internal_ram_wr_relaxed(OSAL_NULL, addr, size, data); 162 } 163 #endif 164 165 #endif 166 167 struct ecore_hwfn; 168 struct ecore_ptt; 169 170 enum ecore_coalescing_fsm { 171 ECORE_COAL_RX_STATE_MACHINE, 172 ECORE_COAL_TX_STATE_MACHINE 173 }; 174 175 /** 176 * @brief ecore_int_cau_conf_pi - configure cau for a given 177 * status block 178 * 179 * @param p_hwfn 180 * @param p_ptt 181 * @param p_sb 182 * @param pi_index 183 * @param state 184 * @param timeset 185 */ 186 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 187 struct ecore_ptt *p_ptt, 188 struct ecore_sb_info *p_sb, 189 u32 pi_index, 190 enum ecore_coalescing_fsm coalescing_fsm, 191 u8 timeset); 192 193 /** 194 * 195 * @brief ecore_int_igu_enable_int - enable device interrupts 196 * 197 * @param p_hwfn 198 * @param p_ptt 199 * @param int_mode - interrupt mode to use 200 */ 201 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 202 struct ecore_ptt *p_ptt, 203 enum ecore_int_mode int_mode); 204 205 /** 206 * 207 * @brief ecore_int_igu_disable_int - disable device interrupts 208 * 209 * @param p_hwfn 210 * @param p_ptt 211 */ 212 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 213 struct ecore_ptt *p_ptt); 214 215 /** 216 * 217 * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc 218 * register from igu. 219 * 220 * @param p_hwfn 221 * 222 * @return u64 223 */ 224 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn); 225 226 #define ECORE_SP_SB_ID 0xffff 227 228 /** 229 * @brief ecore_int_sb_init - Initializes the sb_info structure. 230 * 231 * once the structure is initialized it can be passed to sb related functions. 232 * 233 * @param p_hwfn 234 * @param p_ptt 235 * @param sb_info points to an uninitialized (but 236 * allocated) sb_info structure 237 * @param sb_virt_addr 238 * @param sb_phy_addr 239 * @param sb_id the sb_id to be used (zero based in driver) 240 * should use ECORE_SP_SB_ID for SP Status block 241 * 242 * @return enum _ecore_status_t 243 */ 244 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 245 struct ecore_ptt *p_ptt, 246 struct ecore_sb_info *sb_info, 247 void *sb_virt_addr, 248 dma_addr_t sb_phy_addr, u16 sb_id); 249 /** 250 * @brief ecore_int_sb_setup - Setup the sb. 251 * 252 * @param p_hwfn 253 * @param p_ptt 254 * @param sb_info initialized sb_info structure 255 */ 256 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 257 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info); 258 259 /** 260 * @brief ecore_int_sb_release - releases the sb_info structure. 261 * 262 * once the structure is released, it's memory can be freed 263 * 264 * @param p_hwfn 265 * @param sb_info points to an allocated sb_info structure 266 * @param sb_id the sb_id to be used (zero based in driver) 267 * should never be equal to ECORE_SP_SB_ID 268 * (SP Status block) 269 * 270 * @return enum _ecore_status_t 271 */ 272 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 273 struct ecore_sb_info *sb_info, 274 u16 sb_id); 275 276 /** 277 * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the 278 * default status block. 279 * 280 * @param p_hwfn - pointer to hwfn 281 * 282 */ 283 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie); 284 285 /** 286 * @brief ecore_int_get_num_sbs - get the number of status 287 * blocks configured for this funciton in the igu. 288 * 289 * @param p_hwfn 290 * @param p_sb_cnt_info 291 * 292 * @return 293 */ 294 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 295 struct ecore_sb_cnt_info *p_sb_cnt_info); 296 297 /** 298 * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR 299 * release. The API need to be called after releasing all slowpath IRQs 300 * of the device. 301 * 302 * @param p_dev 303 * 304 */ 305 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev); 306 307 /** 308 * @brief ecore_int_attn_clr_enable - sets whether the general behavior is 309 * preventing attentions from being reasserted, or following the 310 * attributes of the specific attention. 311 * 312 * @param p_dev 313 * @param clr_enable 314 * 315 */ 316 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable); 317 318 /** 319 * @brief Read debug information regarding a given SB. 320 * 321 * @param p_hwfn 322 * @param p_ptt 323 * @param p_sb - point to Status block for which we want to get info. 324 * @param p_info - pointer to struct to fill with information regarding SB. 325 * 326 * @return ECORE_SUCCESS if pointer is filled; failure otherwise. 327 */ 328 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 329 struct ecore_ptt *p_ptt, 330 struct ecore_sb_info *p_sb, 331 struct ecore_sb_info_dbg *p_info); 332 333 /** 334 * @brief - Move a free Status block between PF and child VF 335 * 336 * @param p_hwfn 337 * @param p_ptt 338 * @param sb_id - The PF fastpath vector to be moved [re-assigned if claiming 339 * from VF, given-up if moving to VF] 340 * @param b_to_vf - PF->VF == true, VF->PF == false 341 * 342 * @return ECORE_SUCCESS if SB successfully moved. 343 */ 344 enum _ecore_status_t 345 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 346 u16 sb_id, bool b_to_vf); 347 348 /** 349 * @brief - Doorbell Recovery handler. 350 * Run DB_REAL_DEAL doorbell recovery in case of PF overflow 351 * (and flush DORQ if needed), otherwise run DB_REC_ONCE. 352 * 353 * @param p_hwfn 354 * @param p_ptt 355 */ 356 enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn, 357 struct ecore_ptt *p_ptt); 358 #endif 359