1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include <rte_string_fns.h> 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_spq.h" 12 #include "ecore_gtt_reg_addr.h" 13 #include "ecore_init_ops.h" 14 #include "ecore_rt_defs.h" 15 #include "ecore_int.h" 16 #include "reg_addr.h" 17 #include "ecore_hw.h" 18 #include "ecore_sriov.h" 19 #include "ecore_vf.h" 20 #include "ecore_hw_defs.h" 21 #include "ecore_hsi_common.h" 22 #include "ecore_mcp.h" 23 24 struct ecore_pi_info { 25 ecore_int_comp_cb_t comp_cb; 26 void *cookie; /* Will be sent to the compl cb function */ 27 }; 28 29 struct ecore_sb_sp_info { 30 struct ecore_sb_info sb_info; 31 32 /* Per protocol index data */ 33 struct ecore_pi_info pi_info_arr[MAX_PIS_PER_SB]; 34 osal_size_t pi_info_arr_size; 35 }; 36 37 enum ecore_attention_type { 38 ECORE_ATTN_TYPE_ATTN, 39 ECORE_ATTN_TYPE_PARITY, 40 }; 41 42 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 43 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 44 45 struct aeu_invert_reg_bit { 46 char bit_name[30]; 47 48 #define ATTENTION_PARITY (1 << 0) 49 50 #define ATTENTION_LENGTH_MASK (0x00000ff0) 51 #define ATTENTION_LENGTH_SHIFT (4) 52 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 53 ATTENTION_LENGTH_SHIFT) 54 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 55 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 56 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 57 ATTENTION_PARITY) 58 59 /* Multiple bits start with this offset */ 60 #define ATTENTION_OFFSET_MASK (0x000ff000) 61 #define ATTENTION_OFFSET_SHIFT (12) 62 63 #define ATTENTION_BB_MASK (0xf) 64 #define ATTENTION_BB_SHIFT (20) 65 #define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT) 66 #define ATTENTION_BB_DIFFERENT (1 << 24) 67 68 #define ATTENTION_CLEAR_ENABLE (1 << 28) 69 unsigned int flags; 70 71 /* Callback to call if attention will be triggered */ 72 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 73 74 enum block_id block_index; 75 }; 76 77 struct aeu_invert_reg { 78 struct aeu_invert_reg_bit bits[32]; 79 }; 80 81 #define MAX_ATTN_GRPS (8) 82 #define NUM_ATTN_REGS (9) 83 84 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 85 { 86 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 87 88 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); 89 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); 90 91 return ECORE_SUCCESS; 92 } 93 94 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 95 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 96 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 97 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 98 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 99 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 100 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 101 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 102 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 103 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 104 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 116 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 117 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 118 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 119 { 120 u32 tmp = 121 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 122 PSWHST_REG_VF_DISABLED_ERROR_VALID); 123 124 /* Disabled VF access */ 125 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 126 u32 addr, data; 127 128 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 129 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 130 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 131 PSWHST_REG_VF_DISABLED_ERROR_DATA); 132 DP_INFO(p_hwfn->p_dev, 133 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" 134 " Write [0x%02x] Addr [0x%08x]\n", 135 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) 136 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 137 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) 138 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 139 (u8)((data & 140 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 141 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 142 (u8)((data & 143 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 144 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 145 (u8)((data & 146 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 147 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 148 addr); 149 } 150 151 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 152 PSWHST_REG_INCORRECT_ACCESS_VALID); 153 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 154 u32 addr, data, length; 155 156 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 157 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 158 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 159 PSWHST_REG_INCORRECT_ACCESS_DATA); 160 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 161 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 162 163 DP_INFO(p_hwfn->p_dev, 164 "Incorrect access to %08x of length %08x - PF [%02x]" 165 " VF [%04x] [valid %02x] client [%02x] write [%02x]" 166 " Byte-Enable [%04x] [%08x]\n", 167 addr, length, 168 (u8)((data & 169 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 170 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 171 (u8)((data & 172 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 173 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 174 (u8)((data & 175 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 176 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 177 (u8)((data & 178 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 179 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 180 (u8)((data & 181 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 182 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 183 (u8)((data & 184 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 185 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 186 data); 187 } 188 189 /* TODO - We know 'some' of these are legal due to virtualization, 190 * but is it true for all of them? 191 */ 192 return ECORE_SUCCESS; 193 } 194 195 /* Register GRC_REG_TIMEOUT_ATTN_ACCESS_VALID */ 196 #define ECORE_GRC_ATTENTION_VALID_BIT_MASK (0x1) 197 #define ECORE_GRC_ATTENTION_VALID_BIT_SHIFT (0) 198 199 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 200 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 201 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 202 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 203 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 204 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 205 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 206 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 207 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 208 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 209 static const char *grc_timeout_attn_master_to_str(u8 master) 210 { 211 switch (master) { 212 case 1: 213 return "PXP"; 214 case 2: 215 return "MCP"; 216 case 3: 217 return "MSDM"; 218 case 4: 219 return "PSDM"; 220 case 5: 221 return "YSDM"; 222 case 6: 223 return "USDM"; 224 case 7: 225 return "TSDM"; 226 case 8: 227 return "XSDM"; 228 case 9: 229 return "DBU"; 230 case 10: 231 return "DMAE"; 232 default: 233 return "Unknown"; 234 } 235 } 236 237 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 238 { 239 enum _ecore_status_t rc = ECORE_SUCCESS; 240 u32 tmp, tmp2; 241 242 /* We've already cleared the timeout interrupt register, so we learn 243 * of interrupts via the validity register. If it is not a timeout do 244 * nothing. It is too late at this stage to differentiate spurious 245 * interrupt from fatal grc attention. 246 */ 247 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 248 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 249 if (!(GET_FIELD(tmp, ECORE_GRC_ATTENTION_VALID_BIT))) 250 goto out; 251 252 /* Read the GRC timeout information */ 253 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 254 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 255 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 256 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 257 258 DP_NOTICE(p_hwfn->p_dev, false, 259 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 260 tmp2, tmp, 261 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 262 : "Read from", 263 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 264 grc_timeout_attn_master_to_str( 265 (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 266 ECORE_GRC_ATTENTION_MASTER_SHIFT), 267 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 268 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 269 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 270 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 271 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 272 ECORE_GRC_ATTENTION_VF_SHIFT); 273 274 /* Clean the validity bit */ 275 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 276 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 277 out: 278 return rc; 279 } 280 281 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 282 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 283 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 284 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 285 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 286 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 287 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 288 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 289 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 290 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 291 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 292 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 293 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 294 295 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 296 struct ecore_ptt *p_ptt, 297 bool is_hw_init) 298 { 299 u32 tmp; 300 char str[512] = {0}; 301 302 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 303 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 304 u32 addr_lo, addr_hi, details; 305 306 addr_lo = ecore_rd(p_hwfn, p_ptt, 307 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 308 addr_hi = ecore_rd(p_hwfn, p_ptt, 309 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 310 details = ecore_rd(p_hwfn, p_ptt, 311 PGLUE_B_REG_TX_ERR_WR_DETAILS); 312 OSAL_SNPRINTF(str, 512, 313 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 314 addr_hi, addr_lo, details, 315 (u8)((details & 316 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 317 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 318 (u8)((details & 319 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 320 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 321 (u8)((details & 322 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 323 tmp, 324 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 325 1 : 0), 326 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 327 1 : 0), 328 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 329 1 : 0)); 330 if (is_hw_init) 331 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); 332 else 333 DP_NOTICE(p_hwfn, false, "%s", str); 334 } 335 336 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 337 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 338 u32 addr_lo, addr_hi, details; 339 340 addr_lo = ecore_rd(p_hwfn, p_ptt, 341 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 342 addr_hi = ecore_rd(p_hwfn, p_ptt, 343 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 344 details = ecore_rd(p_hwfn, p_ptt, 345 PGLUE_B_REG_TX_ERR_RD_DETAILS); 346 347 DP_NOTICE(p_hwfn, false, 348 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 349 addr_hi, addr_lo, details, 350 (u8)((details & 351 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 352 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 353 (u8)((details & 354 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 355 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 356 (u8)((details & 357 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 358 tmp, 359 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 360 1 : 0), 361 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 362 1 : 0), 363 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 364 1 : 0)); 365 } 366 367 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 368 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 369 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); 370 371 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 372 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 373 u32 addr_hi, addr_lo; 374 375 addr_lo = ecore_rd(p_hwfn, p_ptt, 376 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 377 addr_hi = ecore_rd(p_hwfn, p_ptt, 378 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 379 380 DP_NOTICE(p_hwfn, false, 381 "ICPL erorr - %08x [Address %08x:%08x]\n", 382 tmp, addr_hi, addr_lo); 383 } 384 385 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 386 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 387 u32 addr_hi, addr_lo, details; 388 389 addr_lo = ecore_rd(p_hwfn, p_ptt, 390 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 391 addr_hi = ecore_rd(p_hwfn, p_ptt, 392 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 393 details = ecore_rd(p_hwfn, p_ptt, 394 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 395 396 DP_NOTICE(p_hwfn, false, 397 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 398 details, tmp, addr_hi, addr_lo); 399 } 400 401 /* Clear the indications */ 402 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 403 404 return ECORE_SUCCESS; 405 } 406 407 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 408 { 409 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); 410 } 411 412 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 413 { 414 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 415 416 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 417 418 return ECORE_INVAL; 419 } 420 421 static enum _ecore_status_t 422 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 423 { 424 DP_INFO(p_hwfn, "General attention 35!\n"); 425 426 return ECORE_SUCCESS; 427 } 428 429 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 430 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 431 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 432 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 433 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 434 435 #define ECORE_DB_REC_COUNT 1000 436 #define ECORE_DB_REC_INTERVAL 100 437 438 static enum _ecore_status_t ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn, 439 struct ecore_ptt *p_ptt) 440 { 441 u32 count = ECORE_DB_REC_COUNT; 442 u32 usage = 1; 443 444 /* wait for usage to zero or count to run out. This is necessary since 445 * EDPM doorbell transactions can take multiple 64b cycles, and as such 446 * can "split" over the pci. Possibly, the doorbell drop can happen with 447 * half an EDPM in the queue and other half dropped. Another EDPM 448 * doorbell to the same address (from doorbell recovery mechanism or 449 * from the doorbelling entity) could have first half dropped and second 450 * half interperted as continuation of the first. To prevent such 451 * malformed doorbells from reaching the device, flush the queue before 452 * releaseing the overflow sticky indication. 453 */ 454 while (count-- && usage) { 455 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 456 OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 457 } 458 459 /* should have been depleted by now */ 460 if (usage) { 461 DP_NOTICE(p_hwfn->p_dev, false, 462 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 463 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 464 return ECORE_TIMEOUT; 465 } 466 467 return ECORE_SUCCESS; 468 } 469 470 enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn, 471 struct ecore_ptt *p_ptt) 472 { 473 u32 overflow; 474 enum _ecore_status_t rc; 475 476 overflow = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 477 DP_NOTICE(p_hwfn, false, "PF Overflow sticky 0x%x\n", overflow); 478 if (!overflow) { 479 ecore_db_recovery_execute(p_hwfn, DB_REC_ONCE); 480 return ECORE_SUCCESS; 481 } 482 483 if (ecore_edpm_enabled(p_hwfn)) { 484 rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt); 485 if (rc != ECORE_SUCCESS) 486 return rc; 487 } 488 489 /* flush any pedning (e)dpm as they may never arrive */ 490 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 491 492 /* release overflow sticky indication (stop silently dropping 493 * everything) 494 */ 495 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 496 497 /* repeat all last doorbells (doorbell drop recovery) */ 498 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 499 500 return ECORE_SUCCESS; 501 } 502 503 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 504 { 505 u32 int_sts, first_drop_reason, details, address, all_drops_reason; 506 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 507 enum _ecore_status_t rc; 508 509 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 510 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 511 int_sts); 512 513 /* int_sts may be zero since all PFs were interrupted for doorbell 514 * overflow but another one already handled it. Can abort here. If 515 * This PF also requires overflow recovery we will be interrupted again 516 */ 517 if (!int_sts) 518 return ECORE_SUCCESS; 519 520 /* check if db_drop or overflow happened */ 521 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 522 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 523 /* obtain data about db drop/overflow */ 524 first_drop_reason = ecore_rd(p_hwfn, p_ptt, 525 DORQ_REG_DB_DROP_REASON) & 526 ECORE_DORQ_ATTENTION_REASON_MASK; 527 details = ecore_rd(p_hwfn, p_ptt, 528 DORQ_REG_DB_DROP_DETAILS); 529 address = ecore_rd(p_hwfn, p_ptt, 530 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 531 all_drops_reason = ecore_rd(p_hwfn, p_ptt, 532 DORQ_REG_DB_DROP_DETAILS_REASON); 533 534 /* log info */ 535 DP_NOTICE(p_hwfn->p_dev, false, 536 "Doorbell drop occurred\n" 537 "Address\t\t0x%08x\t(second BAR address)\n" 538 "FID\t\t0x%04x\t\t(Opaque FID)\n" 539 "Size\t\t0x%04x\t\t(in bytes)\n" 540 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 541 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", 542 address, 543 GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 544 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 545 first_drop_reason, all_drops_reason); 546 547 rc = ecore_db_rec_handler(p_hwfn, p_ptt); 548 OSAL_DB_REC_OCCURRED(p_hwfn); 549 if (rc != ECORE_SUCCESS) 550 return rc; 551 552 /* clear the doorbell drop details and prepare for next drop */ 553 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 554 555 /* mark interrupt as handeld (note: even if drop was due to a 556 * different reason than overflow we mark as handled) 557 */ 558 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 559 DORQ_REG_INT_STS_DB_DROP | 560 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 561 562 /* if there are no indications otherthan drop indications, 563 * success 564 */ 565 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 566 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 567 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 568 return ECORE_SUCCESS; 569 } 570 571 /* some other indication was present - non recoverable */ 572 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 573 574 return ECORE_INVAL; 575 } 576 577 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 578 { 579 #ifndef ASIC_ONLY 580 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 581 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 582 TM_REG_INT_STS_1); 583 584 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 585 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 586 return ECORE_INVAL; 587 588 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 589 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 590 DP_INFO(p_hwfn, 591 "TM attention on emulation - most likely" 592 " results of clock-ratios\n"); 593 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 594 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 595 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 596 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 597 598 return ECORE_SUCCESS; 599 } 600 #endif 601 602 return ECORE_INVAL; 603 } 604 605 /* Instead of major changes to the data-structure, we have a some 'special' 606 * identifiers for sources that changed meaning between adapters. 607 */ 608 enum aeu_invert_reg_special_type { 609 AEU_INVERT_REG_SPECIAL_CNIG_0, 610 AEU_INVERT_REG_SPECIAL_CNIG_1, 611 AEU_INVERT_REG_SPECIAL_CNIG_2, 612 AEU_INVERT_REG_SPECIAL_CNIG_3, 613 AEU_INVERT_REG_SPECIAL_MCP_UMP_TX, 614 AEU_INVERT_REG_SPECIAL_MCP_SCPAD, 615 AEU_INVERT_REG_SPECIAL_MAX, 616 }; 617 618 static struct aeu_invert_reg_bit 619 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 620 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 621 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 622 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 623 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 624 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 625 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 626 }; 627 628 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 629 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 630 { 631 { /* After Invert 1 */ 632 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 633 MAX_BLOCK_ID}, 634 } 635 }, 636 637 { 638 { /* After Invert 2 */ 639 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 640 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 641 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, 642 BLOCK_PGLUE_B}, 643 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 644 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 645 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 646 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 647 {"SW timers #%d", 648 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), 649 OSAL_NULL, MAX_BLOCK_ID}, 650 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 651 BLOCK_PGLCS}, 652 } 653 }, 654 655 { 656 { /* After Invert 3 */ 657 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 658 MAX_BLOCK_ID}, 659 } 660 }, 661 662 { 663 { /* After Invert 4 */ 664 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 665 ecore_fw_assertion, MAX_BLOCK_ID}, 666 {"General Attention %d", 667 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), 668 OSAL_NULL, MAX_BLOCK_ID}, 669 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 670 ecore_general_attention_35, MAX_BLOCK_ID}, 671 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 672 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 673 OSAL_NULL, BLOCK_NWS}, 674 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 675 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 676 OSAL_NULL, BLOCK_NWS}, 677 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 678 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 679 OSAL_NULL, BLOCK_NWM}, 680 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 681 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 682 OSAL_NULL, BLOCK_NWM}, 683 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 684 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 685 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 686 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 687 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 688 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 689 {"OPTE", ATTENTION_PAR, OSAL_NULL, BLOCK_OPTE}, 690 {"MCP", ATTENTION_PAR, OSAL_NULL, BLOCK_MCP}, 691 {"MS", ATTENTION_SINGLE, OSAL_NULL, BLOCK_MS}, 692 {"UMAC", ATTENTION_SINGLE, OSAL_NULL, BLOCK_UMAC}, 693 {"LED", ATTENTION_SINGLE, OSAL_NULL, BLOCK_LED}, 694 {"BMBN", ATTENTION_SINGLE, OSAL_NULL, BLOCK_BMBN}, 695 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 696 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 697 {"BMB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 698 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 699 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 700 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 701 } 702 }, 703 704 { 705 { /* After Invert 5 */ 706 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 707 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 708 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 709 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 710 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 711 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 712 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 713 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 714 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 715 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 716 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 717 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 718 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 719 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 720 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 721 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 722 } 723 }, 724 725 { 726 { /* After Invert 6 */ 727 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 728 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 729 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 730 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 731 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 732 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 733 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 734 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 735 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 736 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 737 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 738 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 739 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 740 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 741 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 742 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 743 } 744 }, 745 746 { 747 { /* After Invert 7 */ 748 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 749 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 750 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 751 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 752 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 753 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 754 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 755 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 756 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 757 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 758 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 759 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 760 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 761 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 762 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 763 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 764 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 765 } 766 }, 767 768 { 769 { /* After Invert 8 */ 770 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 771 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 772 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 773 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 774 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 775 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 776 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 777 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 778 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 779 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 780 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 781 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 782 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 783 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 784 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 785 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 786 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 787 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 788 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 789 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 790 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 791 MAX_BLOCK_ID}, 792 } 793 }, 794 795 { 796 { /* After Invert 9 */ 797 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 798 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, 799 MAX_BLOCK_ID}, 800 {"AVS", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 801 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_UMP_TX), OSAL_NULL, 802 BLOCK_AVS_WRAP}, 803 {"AVS", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 804 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_SCPAD), OSAL_NULL, 805 BLOCK_AVS_WRAP}, 806 {"PCIe core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 807 {"PCIe link up", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 808 {"PCIe hot reset", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 809 {"Reserved %d", (9 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 810 MAX_BLOCK_ID}, 811 } 812 }, 813 814 }; 815 816 static struct aeu_invert_reg_bit * 817 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 818 struct aeu_invert_reg_bit *p_bit) 819 { 820 if (!ECORE_IS_BB(p_hwfn->p_dev)) 821 return p_bit; 822 823 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 824 return p_bit; 825 826 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 827 ATTENTION_BB_SHIFT]; 828 } 829 830 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 831 struct aeu_invert_reg_bit *p_bit) 832 { 833 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 834 ATTENTION_PARITY); 835 } 836 837 #define ATTN_STATE_BITS (0xfff) 838 #define ATTN_BITS_MASKABLE (0x3ff) 839 struct ecore_sb_attn_info { 840 /* Virtual & Physical address of the SB */ 841 struct atten_status_block *sb_attn; 842 dma_addr_t sb_phys; 843 844 /* Last seen running index */ 845 u16 index; 846 847 /* A mask of the AEU bits resulting in a parity error */ 848 u32 parity_mask[NUM_ATTN_REGS]; 849 850 /* A pointer to the attention description structure */ 851 struct aeu_invert_reg *p_aeu_desc; 852 853 /* Previously asserted attentions, which are still unasserted */ 854 u16 known_attn; 855 856 /* Cleanup address for the link's general hw attention */ 857 u32 mfw_attn_addr; 858 }; 859 860 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 861 struct ecore_sb_attn_info *p_sb_desc) 862 { 863 u16 rc = 0, index; 864 865 OSAL_MMIOWB(p_hwfn->p_dev); 866 867 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 868 if (p_sb_desc->index != index) { 869 p_sb_desc->index = index; 870 rc = ECORE_SB_ATT_IDX; 871 } 872 873 OSAL_MMIOWB(p_hwfn->p_dev); 874 875 return rc; 876 } 877 878 /** 879 * @brief ecore_int_assertion - handles asserted attention bits 880 * 881 * @param p_hwfn 882 * @param asserted_bits newly asserted bits 883 * @return enum _ecore_status_t 884 */ 885 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 886 u16 asserted_bits) 887 { 888 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 889 u32 igu_mask; 890 891 /* Mask the source of the attention in the IGU */ 892 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 893 IGU_REG_ATTENTION_ENABLE); 894 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 895 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 896 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 897 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 898 899 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 900 "inner known ATTN state: 0x%04x --> 0x%04x\n", 901 sb_attn_sw->known_attn, 902 sb_attn_sw->known_attn | asserted_bits); 903 sb_attn_sw->known_attn |= asserted_bits; 904 905 /* Handle MCP events */ 906 if (asserted_bits & 0x100) { 907 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 908 /* Clean the MCP attention */ 909 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 910 sb_attn_sw->mfw_attn_addr, 0); 911 } 912 913 /* FIXME - this will change once we'll have GOOD gtt definitions */ 914 DIRECT_REG_WR(p_hwfn, 915 (u8 OSAL_IOMEM *) p_hwfn->regview + 916 GTT_BAR0_MAP_REG_IGU_CMD + 917 ((IGU_CMD_ATTN_BIT_SET_UPPER - 918 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 919 920 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 921 asserted_bits); 922 923 return ECORE_SUCCESS; 924 } 925 926 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 927 enum block_id id, enum dbg_attn_type type, 928 bool b_clear) 929 { 930 /* @DPDK */ 931 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "[block_id %d type %d]\n", id, type); 932 } 933 934 /** 935 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 936 * cause of the attention 937 * 938 * @param p_hwfn 939 * @param p_aeu - descriptor of an AEU bit which caused the attention 940 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 941 * this bit to this group. 942 * @param bit_index - index of this bit in the aeu_en_reg 943 * 944 * @return enum _ecore_status_t 945 */ 946 static enum _ecore_status_t 947 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 948 struct aeu_invert_reg_bit *p_aeu, 949 u32 aeu_en_reg, 950 const char *p_bit_name, 951 u32 bitmask) 952 { 953 enum _ecore_status_t rc = ECORE_INVAL; 954 bool b_fatal = false; 955 956 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 957 p_bit_name, bitmask); 958 959 /* Call callback before clearing the interrupt status */ 960 if (p_aeu->cb) { 961 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 962 p_bit_name); 963 rc = p_aeu->cb(p_hwfn); 964 } 965 966 if (rc != ECORE_SUCCESS) 967 b_fatal = true; 968 969 /* Print HW block interrupt registers */ 970 if (p_aeu->block_index != MAX_BLOCK_ID) { 971 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 972 ATTN_TYPE_INTERRUPT, !b_fatal); 973 } 974 975 /* @DPDK */ 976 /* Reach assertion if attention is fatal */ 977 if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) { 978 #ifndef ASIC_ONLY 979 DP_NOTICE(p_hwfn, !CHIP_REV_IS_EMUL(p_hwfn->p_dev), 980 "`%s': Fatal attention\n", p_bit_name); 981 #else 982 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 983 p_bit_name); 984 #endif 985 986 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 987 } 988 989 /* Prevent this Attention from being asserted in the future */ 990 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 991 #ifndef ASIC_ONLY 992 CHIP_REV_IS_EMUL(p_hwfn->p_dev) || 993 #endif 994 p_hwfn->p_dev->attn_clr_en) { 995 u32 val; 996 u32 mask = ~bitmask; 997 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 998 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 999 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n", 1000 p_bit_name); 1001 } 1002 1003 return rc; 1004 } 1005 1006 /** 1007 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 1008 * 1009 * @param p_hwfn 1010 * @param p_aeu - descriptor of an AEU bit which caused the parity 1011 * @param aeu_en_reg - address of the AEU enable register 1012 * @param bit_index 1013 */ 1014 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 1015 struct aeu_invert_reg_bit *p_aeu, 1016 u32 aeu_en_reg, u8 bit_index) 1017 { 1018 u32 block_id = p_aeu->block_index, mask, val; 1019 1020 DP_NOTICE(p_hwfn->p_dev, false, 1021 "%s parity attention is set [address 0x%08x, bit %d]\n", 1022 p_aeu->bit_name, aeu_en_reg, bit_index); 1023 1024 if (block_id != MAX_BLOCK_ID) { 1025 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 1026 1027 /* In A0, there's a single parity bit for several blocks */ 1028 if (block_id == BLOCK_BTB) { 1029 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 1030 ATTN_TYPE_PARITY, false); 1031 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 1032 ATTN_TYPE_PARITY, false); 1033 } 1034 } 1035 1036 /* Prevent this parity error from being re-asserted */ 1037 mask = ~(0x1 << bit_index); 1038 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 1039 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 1040 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 1041 p_aeu->bit_name); 1042 } 1043 1044 #define MISC_REG_AEU_AFTER_INVERT_IGU(n) \ 1045 (MISC_REG_AEU_AFTER_INVERT_1_IGU + (n) * 0x4) 1046 1047 #define MISC_REG_AEU_ENABLE_IGU_OUT(n, group) \ 1048 (MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (n) * 0x4 + \ 1049 (group) * 0x4 * NUM_ATTN_REGS) 1050 1051 /** 1052 * @brief - handles deassertion of previously asserted attentions. 1053 * 1054 * @param p_hwfn 1055 * @param deasserted_bits - newly deasserted bits 1056 * @return enum _ecore_status_t 1057 * 1058 */ 1059 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 1060 u16 deasserted_bits) 1061 { 1062 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1063 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1064 u8 i, j, k, bit_idx; 1065 enum _ecore_status_t rc = ECORE_SUCCESS; 1066 1067 /* Read the attention registers in the AEU */ 1068 for (i = 0; i < NUM_ATTN_REGS; i++) { 1069 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1070 MISC_REG_AEU_AFTER_INVERT_IGU(i)); 1071 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1072 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); 1073 } 1074 1075 /* Handle parity attentions first */ 1076 for (i = 0; i < NUM_ATTN_REGS; i++) { 1077 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1078 u32 parities; 1079 1080 aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, 0); 1081 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1082 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1083 1084 /* Skip register in which no parity bit is currently set */ 1085 if (!parities) 1086 continue; 1087 1088 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1089 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1090 1091 if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1092 !!(parities & (1 << bit_idx))) 1093 ecore_int_deassertion_parity(p_hwfn, p_bit, 1094 aeu_en, bit_idx); 1095 1096 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1097 } 1098 } 1099 1100 /* Find non-parity cause for attention and act */ 1101 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1102 struct aeu_invert_reg_bit *p_aeu; 1103 1104 /* Handle only groups whose attention is currently deasserted */ 1105 if (!(deasserted_bits & (1 << k))) 1106 continue; 1107 1108 for (i = 0; i < NUM_ATTN_REGS; i++) { 1109 u32 bits; 1110 1111 aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, k); 1112 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1113 bits = aeu_inv_arr[i] & en; 1114 1115 /* Skip if no bit from this group is currently set */ 1116 if (!bits) 1117 continue; 1118 1119 /* Find all set bits from current register which belong 1120 * to current group, making them responsible for the 1121 * previous assertion. 1122 */ 1123 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1124 unsigned long int bitmask; 1125 u8 bit, bit_len; 1126 1127 /* Need to account bits with changed meaning */ 1128 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1129 1130 bit = bit_idx; 1131 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1132 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1133 /* Skip Parity */ 1134 bit++; 1135 bit_len--; 1136 } 1137 1138 /* Find the bits relating to HW-block, then 1139 * shift so they'll become LSB. 1140 */ 1141 bitmask = bits & (((1 << bit_len) - 1) << bit); 1142 bitmask >>= bit; 1143 1144 if (bitmask) { 1145 u32 flags = p_aeu->flags; 1146 char bit_name[30]; 1147 u8 num; 1148 1149 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1150 bit_len); 1151 1152 /* Some bits represent more than a 1153 * a single interrupt. Correctly print 1154 * their name. 1155 */ 1156 if (ATTENTION_LENGTH(flags) > 2 || 1157 ((flags & ATTENTION_PAR_INT) && 1158 ATTENTION_LENGTH(flags) > 1)) 1159 OSAL_SNPRINTF(bit_name, 30, 1160 p_aeu->bit_name, 1161 num); 1162 else 1163 strlcpy(bit_name, 1164 p_aeu->bit_name, 1165 sizeof(bit_name)); 1166 1167 /* We now need to pass bitmask in its 1168 * correct position. 1169 */ 1170 bitmask <<= bit; 1171 1172 /* Handle source of the attention */ 1173 ecore_int_deassertion_aeu_bit(p_hwfn, 1174 p_aeu, 1175 aeu_en, 1176 bit_name, 1177 bitmask); 1178 } 1179 1180 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1181 } 1182 } 1183 } 1184 1185 /* Clear IGU indication for the deasserted bits */ 1186 /* FIXME - this will change once we'll have GOOD gtt definitions */ 1187 DIRECT_REG_WR(p_hwfn, 1188 (u8 OSAL_IOMEM *) p_hwfn->regview + 1189 GTT_BAR0_MAP_REG_IGU_CMD + 1190 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1191 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); 1192 1193 /* Unmask deasserted attentions in IGU */ 1194 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1195 IGU_REG_ATTENTION_ENABLE); 1196 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1197 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1198 1199 /* Clear deassertion from inner state */ 1200 sb_attn_sw->known_attn &= ~deasserted_bits; 1201 1202 return rc; 1203 } 1204 1205 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1206 { 1207 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1208 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1209 u16 index = 0, asserted_bits, deasserted_bits; 1210 u32 attn_bits = 0, attn_acks = 0; 1211 enum _ecore_status_t rc = ECORE_SUCCESS; 1212 1213 /* Read current attention bits/acks - safeguard against attentions 1214 * by guaranting work on a synchronized timeframe 1215 */ 1216 do { 1217 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1218 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1219 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1220 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1221 p_sb_attn->sb_index = index; 1222 1223 /* Attention / Deassertion are meaningful (and in correct state) 1224 * only when they differ and consistent with known state - deassertion 1225 * when previous attention & current ack, and assertion when current 1226 * attention with no previous attention 1227 */ 1228 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1229 ~p_sb_attn_sw->known_attn; 1230 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1231 p_sb_attn_sw->known_attn; 1232 1233 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1234 DP_INFO(p_hwfn, 1235 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1236 index, attn_bits, attn_acks, asserted_bits, 1237 deasserted_bits, p_sb_attn_sw->known_attn); 1238 else if (asserted_bits == 0x100) 1239 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1240 else 1241 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1242 "MFW indication [deassertion]\n"); 1243 1244 if (asserted_bits) { 1245 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1246 if (rc) 1247 return rc; 1248 } 1249 1250 if (deasserted_bits) 1251 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1252 1253 return rc; 1254 } 1255 1256 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1257 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1258 { 1259 struct igu_prod_cons_update igu_ack; 1260 1261 OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update)); 1262 igu_ack.sb_id_and_flags = 1263 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1264 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1265 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1266 (IGU_SEG_ACCESS_ATTN << 1267 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1268 1269 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1270 1271 /* Both segments (interrupts & acks) are written to same place address; 1272 * Need to guarantee all commands will be received (in-order) by HW. 1273 */ 1274 OSAL_MMIOWB(p_hwfn->p_dev); 1275 OSAL_BARRIER(p_hwfn->p_dev); 1276 } 1277 1278 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1279 { 1280 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1281 struct ecore_pi_info *pi_info = OSAL_NULL; 1282 struct ecore_sb_attn_info *sb_attn; 1283 struct ecore_sb_info *sb_info; 1284 u16 rc = 0; 1285 1286 if (!p_hwfn) 1287 return; 1288 1289 if (!p_hwfn->p_sp_sb) { 1290 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1291 return; 1292 } 1293 1294 sb_info = &p_hwfn->p_sp_sb->sb_info; 1295 if (!sb_info) { 1296 DP_ERR(p_hwfn->p_dev, 1297 "Status block is NULL - cannot ack interrupts\n"); 1298 return; 1299 } 1300 1301 if (!p_hwfn->p_sb_attn) { 1302 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1303 return; 1304 } 1305 sb_attn = p_hwfn->p_sb_attn; 1306 1307 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1308 p_hwfn, p_hwfn->my_id); 1309 1310 /* Disable ack for def status block. Required both for msix + 1311 * inta in non-mask mode, in inta does no harm. 1312 */ 1313 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1314 1315 /* Gather Interrupts/Attentions information */ 1316 if (!sb_info->sb_virt) { 1317 DP_ERR(p_hwfn->p_dev, 1318 "Interrupt Status block is NULL -" 1319 " cannot check for new interrupts!\n"); 1320 } else { 1321 u32 tmp_index = sb_info->sb_ack; 1322 rc = ecore_sb_update_sb_idx(sb_info); 1323 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1324 "Interrupt indices: 0x%08x --> 0x%08x\n", 1325 tmp_index, sb_info->sb_ack); 1326 } 1327 1328 if (!sb_attn || !sb_attn->sb_attn) { 1329 DP_ERR(p_hwfn->p_dev, 1330 "Attentions Status block is NULL -" 1331 " cannot check for new attentions!\n"); 1332 } else { 1333 u16 tmp_index = sb_attn->index; 1334 1335 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1336 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1337 "Attention indices: 0x%08x --> 0x%08x\n", 1338 tmp_index, sb_attn->index); 1339 } 1340 1341 /* Check if we expect interrupts at this time. if not just ack them */ 1342 if (!(rc & ECORE_SB_EVENT_MASK)) { 1343 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1344 return; 1345 } 1346 1347 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1348 1349 if (!p_hwfn->p_dpc_ptt) { 1350 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1351 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1352 return; 1353 } 1354 1355 if (rc & ECORE_SB_ATT_IDX) 1356 ecore_int_attentions(p_hwfn); 1357 1358 if (rc & ECORE_SB_IDX) { 1359 osal_size_t pi; 1360 1361 /* Since we only looked at the SB index, it's possible more 1362 * than a single protocol-index on the SB incremented. 1363 * Iterate over all configured protocol indices and check 1364 * whether something happened for each. 1365 */ 1366 for (pi = 0; pi < p_hwfn->p_sp_sb->pi_info_arr_size; pi++) { 1367 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1368 if (pi_info->comp_cb != OSAL_NULL) 1369 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1370 } 1371 } 1372 1373 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1374 /* This should be done before the interrupts are enabled, 1375 * since otherwise a new attention will be generated. 1376 */ 1377 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1378 } 1379 1380 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1381 } 1382 1383 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1384 { 1385 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1386 1387 if (!p_sb) 1388 return; 1389 1390 if (p_sb->sb_attn) { 1391 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1392 p_sb->sb_phys, 1393 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1394 } 1395 OSAL_FREE(p_hwfn->p_dev, p_sb); 1396 } 1397 1398 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1399 struct ecore_ptt *p_ptt) 1400 { 1401 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1402 1403 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1404 1405 sb_info->index = 0; 1406 sb_info->known_attn = 0; 1407 1408 /* Configure Attention Status Block in IGU */ 1409 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1410 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1411 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1412 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1413 } 1414 1415 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1416 struct ecore_ptt *p_ptt, 1417 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1418 { 1419 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1420 int i, j, k; 1421 1422 sb_info->sb_attn = sb_virt_addr; 1423 sb_info->sb_phys = sb_phy_addr; 1424 1425 /* Set the pointer to the AEU descriptors */ 1426 sb_info->p_aeu_desc = aeu_descs; 1427 1428 /* Calculate Parity Masks */ 1429 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1430 for (i = 0; i < NUM_ATTN_REGS; i++) { 1431 /* j is array index, k is bit index */ 1432 for (j = 0, k = 0; k < 32; j++) { 1433 struct aeu_invert_reg_bit *p_aeu; 1434 1435 p_aeu = &aeu_descs[i].bits[j]; 1436 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1437 sb_info->parity_mask[i] |= 1 << k; 1438 1439 k += ATTENTION_LENGTH(p_aeu->flags); 1440 } 1441 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1442 "Attn Mask [Reg %d]: 0x%08x\n", 1443 i, sb_info->parity_mask[i]); 1444 } 1445 1446 /* Set the address of cleanup for the mcp attention */ 1447 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1448 MISC_REG_AEU_GENERAL_ATTN_0; 1449 1450 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1451 } 1452 1453 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1454 struct ecore_ptt *p_ptt) 1455 { 1456 struct ecore_dev *p_dev = p_hwfn->p_dev; 1457 struct ecore_sb_attn_info *p_sb; 1458 dma_addr_t p_phys = 0; 1459 void *p_virt; 1460 1461 /* SB struct */ 1462 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1463 if (!p_sb) { 1464 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); 1465 return ECORE_NOMEM; 1466 } 1467 1468 /* SB ring */ 1469 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1470 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1471 if (!p_virt) { 1472 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); 1473 OSAL_FREE(p_dev, p_sb); 1474 return ECORE_NOMEM; 1475 } 1476 1477 /* Attention setup */ 1478 p_hwfn->p_sb_attn = p_sb; 1479 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1480 1481 return ECORE_SUCCESS; 1482 } 1483 1484 /* coalescing timeout = timeset << (timer_res + 1) */ 1485 #define ECORE_CAU_DEF_RX_USECS 24 1486 #define ECORE_CAU_DEF_TX_USECS 48 1487 1488 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1489 struct cau_sb_entry *p_sb_entry, 1490 u8 pf_id, u16 vf_number, u8 vf_valid) 1491 { 1492 struct ecore_dev *p_dev = p_hwfn->p_dev; 1493 u32 cau_state; 1494 u8 timer_res; 1495 1496 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1497 1498 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1499 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1500 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1501 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1502 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1503 1504 cau_state = CAU_HC_DISABLE_STATE; 1505 1506 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1507 cau_state = CAU_HC_ENABLE_STATE; 1508 if (!p_dev->rx_coalesce_usecs) 1509 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1510 if (!p_dev->tx_coalesce_usecs) 1511 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1512 } 1513 1514 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1515 if (p_dev->rx_coalesce_usecs <= 0x7F) 1516 timer_res = 0; 1517 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1518 timer_res = 1; 1519 else 1520 timer_res = 2; 1521 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1522 1523 if (p_dev->tx_coalesce_usecs <= 0x7F) 1524 timer_res = 0; 1525 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1526 timer_res = 1; 1527 else 1528 timer_res = 2; 1529 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1530 1531 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1532 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1533 } 1534 1535 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1536 struct ecore_ptt *p_ptt, 1537 u16 igu_sb_id, u32 pi_index, 1538 enum ecore_coalescing_fsm coalescing_fsm, 1539 u8 timeset) 1540 { 1541 struct cau_pi_entry pi_entry; 1542 u32 sb_offset, pi_offset; 1543 1544 if (IS_VF(p_hwfn->p_dev)) 1545 return;/* @@@TBD MichalK- VF CAU... */ 1546 1547 sb_offset = igu_sb_id * PIS_PER_SB; 1548 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1549 1550 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1551 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1552 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1553 else 1554 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1555 1556 pi_offset = sb_offset + pi_index; 1557 if (p_hwfn->hw_init_done) { 1558 ecore_wr(p_hwfn, p_ptt, 1559 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1560 *((u32 *)&(pi_entry))); 1561 } else { 1562 STORE_RT_REG(p_hwfn, 1563 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1564 *((u32 *)&(pi_entry))); 1565 } 1566 } 1567 1568 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1569 struct ecore_ptt *p_ptt, 1570 struct ecore_sb_info *p_sb, u32 pi_index, 1571 enum ecore_coalescing_fsm coalescing_fsm, 1572 u8 timeset) 1573 { 1574 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1575 pi_index, coalescing_fsm, timeset); 1576 } 1577 1578 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1579 struct ecore_ptt *p_ptt, 1580 dma_addr_t sb_phys, u16 igu_sb_id, 1581 u16 vf_number, u8 vf_valid) 1582 { 1583 struct cau_sb_entry sb_entry; 1584 1585 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1586 vf_number, vf_valid); 1587 1588 if (p_hwfn->hw_init_done) { 1589 /* Wide-bus, initialize via DMAE */ 1590 u64 phys_addr = (u64)sb_phys; 1591 1592 ecore_dmae_host2grc(p_hwfn, p_ptt, 1593 (u64)(osal_uintptr_t)&phys_addr, 1594 CAU_REG_SB_ADDR_MEMORY + 1595 igu_sb_id * sizeof(u64), 2, 1596 OSAL_NULL /* default parameters */); 1597 ecore_dmae_host2grc(p_hwfn, p_ptt, 1598 (u64)(osal_uintptr_t)&sb_entry, 1599 CAU_REG_SB_VAR_MEMORY + 1600 igu_sb_id * sizeof(u64), 2, 1601 OSAL_NULL /* default parameters */); 1602 } else { 1603 /* Initialize Status Block Address */ 1604 STORE_RT_REG_AGG(p_hwfn, 1605 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1606 igu_sb_id * 2, sb_phys); 1607 1608 STORE_RT_REG_AGG(p_hwfn, 1609 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1610 igu_sb_id * 2, sb_entry); 1611 } 1612 1613 /* Configure pi coalescing if set */ 1614 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1615 /* eth will open queues for all tcs, so configure all of them 1616 * properly, rather than just the active ones 1617 */ 1618 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1619 1620 u8 timeset, timer_res; 1621 u8 i; 1622 1623 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1624 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1625 timer_res = 0; 1626 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1627 timer_res = 1; 1628 else 1629 timer_res = 2; 1630 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1631 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1632 ECORE_COAL_RX_STATE_MACHINE, 1633 timeset); 1634 1635 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1636 timer_res = 0; 1637 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1638 timer_res = 1; 1639 else 1640 timer_res = 2; 1641 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1642 for (i = 0; i < num_tc; i++) { 1643 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1644 igu_sb_id, TX_PI(i), 1645 ECORE_COAL_TX_STATE_MACHINE, 1646 timeset); 1647 } 1648 } 1649 } 1650 1651 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1652 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) 1653 { 1654 /* zero status block and ack counter */ 1655 sb_info->sb_ack = 0; 1656 OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size); 1657 1658 if (IS_PF(p_hwfn->p_dev)) 1659 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1660 sb_info->igu_sb_id, 0, 0); 1661 } 1662 1663 struct ecore_igu_block * 1664 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1665 { 1666 struct ecore_igu_block *p_block; 1667 u16 igu_id; 1668 1669 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1670 igu_id++) { 1671 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1672 1673 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1674 !(p_block->status & ECORE_IGU_STATUS_FREE)) 1675 continue; 1676 1677 if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1678 b_is_pf) 1679 return p_block; 1680 } 1681 1682 return OSAL_NULL; 1683 } 1684 1685 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1686 u16 vector_id) 1687 { 1688 struct ecore_igu_block *p_block; 1689 u16 igu_id; 1690 1691 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1692 igu_id++) { 1693 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1694 1695 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1696 !p_block->is_pf || 1697 p_block->vector_number != vector_id) 1698 continue; 1699 1700 return igu_id; 1701 } 1702 1703 return ECORE_SB_INVALID_IDX; 1704 } 1705 1706 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1707 { 1708 u16 igu_sb_id; 1709 1710 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1711 if (sb_id == ECORE_SP_SB_ID) 1712 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1713 else if (IS_PF(p_hwfn->p_dev)) 1714 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1715 else 1716 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1717 1718 if (igu_sb_id == ECORE_SB_INVALID_IDX) 1719 DP_NOTICE(p_hwfn, true, 1720 "Slowpath SB vector %04x doesn't exist\n", 1721 sb_id); 1722 else if (sb_id == ECORE_SP_SB_ID) 1723 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1724 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1725 else 1726 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1727 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1728 1729 return igu_sb_id; 1730 } 1731 1732 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1733 struct ecore_ptt *p_ptt, 1734 struct ecore_sb_info *sb_info, 1735 void *sb_virt_addr, 1736 dma_addr_t sb_phy_addr, u16 sb_id) 1737 { 1738 sb_info->sb_virt = sb_virt_addr; 1739 struct status_block *sb_virt; 1740 1741 sb_virt = (struct status_block *)sb_info->sb_virt; 1742 1743 sb_info->sb_size = sizeof(*sb_virt); 1744 sb_info->sb_pi_array = sb_virt->pi_array; 1745 sb_info->sb_prod_index = &sb_virt->prod_index; 1746 1747 sb_info->sb_phys = sb_phy_addr; 1748 1749 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1750 1751 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1752 return ECORE_INVAL; 1753 1754 /* Let the igu info reference the client's SB info */ 1755 if (sb_id != ECORE_SP_SB_ID) { 1756 if (IS_PF(p_hwfn->p_dev)) { 1757 struct ecore_igu_info *p_info; 1758 struct ecore_igu_block *p_block; 1759 1760 p_info = p_hwfn->hw_info.p_igu_info; 1761 p_block = &p_info->entry[sb_info->igu_sb_id]; 1762 1763 p_block->sb_info = sb_info; 1764 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1765 p_info->usage.free_cnt--; 1766 } else { 1767 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1768 } 1769 } 1770 #ifdef ECORE_CONFIG_DIRECT_HWFN 1771 sb_info->p_hwfn = p_hwfn; 1772 #endif 1773 sb_info->p_dev = p_hwfn->p_dev; 1774 1775 /* The igu address will hold the absolute address that needs to be 1776 * written to for a specific status block 1777 */ 1778 if (IS_PF(p_hwfn->p_dev)) 1779 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + 1780 GTT_BAR0_MAP_REG_IGU_CMD + 1781 (sb_info->igu_sb_id << 3); 1782 1783 else 1784 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + 1785 PXP_VF_BAR0_START_IGU + 1786 ((IGU_CMD_INT_ACK_BASE + 1787 sb_info->igu_sb_id) << 3); 1788 1789 sb_info->flags |= ECORE_SB_INFO_INIT; 1790 1791 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1792 1793 return ECORE_SUCCESS; 1794 } 1795 1796 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1797 struct ecore_sb_info *sb_info, 1798 u16 sb_id) 1799 { 1800 struct ecore_igu_info *p_info; 1801 struct ecore_igu_block *p_block; 1802 1803 if (sb_info == OSAL_NULL) 1804 return ECORE_SUCCESS; 1805 1806 /* zero status block and ack counter */ 1807 sb_info->sb_ack = 0; 1808 OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size); 1809 1810 if (IS_VF(p_hwfn->p_dev)) { 1811 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1812 return ECORE_SUCCESS; 1813 } 1814 1815 p_info = p_hwfn->hw_info.p_igu_info; 1816 p_block = &p_info->entry[sb_info->igu_sb_id]; 1817 1818 /* Vector 0 is reserved to Default SB */ 1819 if (p_block->vector_number == 0) { 1820 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1821 return ECORE_INVAL; 1822 } 1823 1824 /* Lose reference to client's SB info, and fix counters */ 1825 p_block->sb_info = OSAL_NULL; 1826 p_block->status |= ECORE_IGU_STATUS_FREE; 1827 p_info->usage.free_cnt++; 1828 1829 return ECORE_SUCCESS; 1830 } 1831 1832 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1833 { 1834 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1835 1836 if (!p_sb) 1837 return; 1838 1839 if (p_sb->sb_info.sb_virt) { 1840 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1841 p_sb->sb_info.sb_virt, 1842 p_sb->sb_info.sb_phys, 1843 SB_ALIGNED_SIZE(p_hwfn)); 1844 } 1845 1846 OSAL_FREE(p_hwfn->p_dev, p_sb); 1847 } 1848 1849 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1850 struct ecore_ptt *p_ptt) 1851 { 1852 struct ecore_sb_sp_info *p_sb; 1853 dma_addr_t p_phys = 0; 1854 void *p_virt; 1855 1856 /* SB struct */ 1857 p_sb = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); 1858 if (!p_sb) { 1859 DP_NOTICE(p_hwfn, false, 1860 "Failed to allocate `struct ecore_sb_info'\n"); 1861 return ECORE_NOMEM; 1862 } 1863 1864 /* SB ring */ 1865 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1866 &p_phys, SB_ALIGNED_SIZE(p_hwfn)); 1867 if (!p_virt) { 1868 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); 1869 OSAL_FREE(p_hwfn->p_dev, p_sb); 1870 return ECORE_NOMEM; 1871 } 1872 1873 /* Status Block setup */ 1874 p_hwfn->p_sp_sb = p_sb; 1875 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1876 p_virt, p_phys, ECORE_SP_SB_ID); 1877 1878 p_sb->pi_info_arr_size = PIS_PER_SB; 1879 1880 return ECORE_SUCCESS; 1881 } 1882 1883 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1884 ecore_int_comp_cb_t comp_cb, 1885 void *cookie, 1886 u8 *sb_idx, __le16 **p_fw_cons) 1887 { 1888 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1889 enum _ecore_status_t rc = ECORE_NOMEM; 1890 u8 pi; 1891 1892 /* Look for a free index */ 1893 for (pi = 0; pi < p_sp_sb->pi_info_arr_size; pi++) { 1894 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1895 continue; 1896 1897 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1898 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1899 *sb_idx = pi; 1900 *p_fw_cons = &p_sp_sb->sb_info.sb_pi_array[pi]; 1901 rc = ECORE_SUCCESS; 1902 break; 1903 } 1904 1905 return rc; 1906 } 1907 1908 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) 1909 { 1910 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1911 1912 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1913 return ECORE_NOMEM; 1914 1915 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1916 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1917 return ECORE_SUCCESS; 1918 } 1919 1920 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1921 { 1922 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1923 } 1924 1925 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1926 struct ecore_ptt *p_ptt, 1927 enum ecore_int_mode int_mode) 1928 { 1929 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1930 1931 #ifndef ASIC_ONLY 1932 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1933 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1934 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1935 } 1936 #endif 1937 1938 p_hwfn->p_dev->int_mode = int_mode; 1939 switch (p_hwfn->p_dev->int_mode) { 1940 case ECORE_INT_MODE_INTA: 1941 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1942 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1943 break; 1944 1945 case ECORE_INT_MODE_MSI: 1946 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1947 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1948 break; 1949 1950 case ECORE_INT_MODE_MSIX: 1951 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1952 break; 1953 case ECORE_INT_MODE_POLL: 1954 break; 1955 } 1956 1957 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1958 } 1959 1960 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1961 struct ecore_ptt *p_ptt) 1962 { 1963 #ifndef ASIC_ONLY 1964 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1965 DP_INFO(p_hwfn, 1966 "FPGA - Don't enable Attentions in IGU and MISC\n"); 1967 return; 1968 } 1969 #endif 1970 1971 /* Configure AEU signal change to produce attentions */ 1972 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1973 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1974 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1975 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1976 1977 /* Flush the writes to IGU */ 1978 OSAL_MMIOWB(p_hwfn->p_dev); 1979 1980 /* Unmask AEU signals toward IGU */ 1981 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1982 } 1983 1984 enum _ecore_status_t 1985 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1986 enum ecore_int_mode int_mode) 1987 { 1988 enum _ecore_status_t rc = ECORE_SUCCESS; 1989 1990 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1991 1992 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1993 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1994 if (rc != ECORE_SUCCESS) { 1995 DP_NOTICE(p_hwfn, true, 1996 "Slowpath IRQ request failed\n"); 1997 return ECORE_NORESOURCES; 1998 } 1999 p_hwfn->b_int_requested = true; 2000 } 2001 2002 /* Enable interrupt Generation */ 2003 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 2004 2005 p_hwfn->b_int_enabled = 1; 2006 2007 return rc; 2008 } 2009 2010 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 2011 struct ecore_ptt *p_ptt) 2012 { 2013 p_hwfn->b_int_enabled = 0; 2014 2015 if (IS_VF(p_hwfn->p_dev)) 2016 return; 2017 2018 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 2019 } 2020 2021 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 2022 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 2023 struct ecore_ptt *p_ptt, 2024 u32 igu_sb_id, 2025 bool cleanup_set, 2026 u16 opaque_fid) 2027 { 2028 u32 data = 0, cmd_ctrl = 0, sb_bit, sb_bit_addr, pxp_addr; 2029 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH, val; 2030 u8 type = 0; 2031 2032 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 2033 IGU_REG_CLEANUP_STATUS_0) != 0x200); 2034 2035 /* USE Control Command Register to perform cleanup. There is an 2036 * option to do this using IGU bar, but then it can't be used for VFs. 2037 */ 2038 2039 /* Set the data field */ 2040 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 2041 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 2042 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 2043 2044 /* Set the control register */ 2045 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 2046 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 2047 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 2048 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 2049 2050 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 2051 2052 OSAL_BARRIER(p_hwfn->p_dev); 2053 2054 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 2055 2056 /* Flush the write to IGU */ 2057 OSAL_MMIOWB(p_hwfn->p_dev); 2058 2059 /* calculate where to read the status bit from */ 2060 sb_bit = 1 << (igu_sb_id % 32); 2061 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 2062 2063 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2064 2065 /* Now wait for the command to complete */ 2066 while (--sleep_cnt) { 2067 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2068 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2069 break; 2070 OSAL_MSLEEP(5); 2071 } 2072 2073 if (!sleep_cnt) 2074 DP_NOTICE(p_hwfn, true, 2075 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2076 val, igu_sb_id); 2077 } 2078 2079 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2080 struct ecore_ptt *p_ptt, 2081 u16 igu_sb_id, u16 opaque, bool b_set) 2082 { 2083 struct ecore_igu_block *p_block; 2084 int pi, i; 2085 2086 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2087 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2088 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2089 igu_sb_id, p_block->function_id, p_block->is_pf, 2090 p_block->vector_number); 2091 2092 /* Set */ 2093 if (b_set) 2094 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2095 2096 /* Clear */ 2097 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2098 2099 /* Wait for the IGU SB to cleanup */ 2100 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2101 u32 val; 2102 2103 val = ecore_rd(p_hwfn, p_ptt, 2104 IGU_REG_WRITE_DONE_PENDING + 2105 ((igu_sb_id / 32) * 4)); 2106 if (val & (1 << (igu_sb_id % 32))) 2107 OSAL_UDELAY(10); 2108 else 2109 break; 2110 } 2111 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2112 DP_NOTICE(p_hwfn, true, 2113 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2114 igu_sb_id); 2115 2116 /* Clear the CAU for the SB */ 2117 for (pi = 0; pi < PIS_PER_SB; pi++) 2118 ecore_wr(p_hwfn, p_ptt, 2119 CAU_REG_PI_MEMORY + 2120 (igu_sb_id * PIS_PER_SB + pi) * 4, 2121 0); 2122 } 2123 2124 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2125 struct ecore_ptt *p_ptt, 2126 bool b_set, bool b_slowpath) 2127 { 2128 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2129 struct ecore_igu_block *p_block; 2130 u16 igu_sb_id = 0; 2131 u32 val = 0; 2132 2133 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2134 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2135 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2136 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2137 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2138 /* end temporary */ 2139 2140 for (igu_sb_id = 0; 2141 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2142 igu_sb_id++) { 2143 p_block = &p_info->entry[igu_sb_id]; 2144 2145 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2146 !p_block->is_pf || 2147 (p_block->status & ECORE_IGU_STATUS_DSB)) 2148 continue; 2149 2150 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2151 p_hwfn->hw_info.opaque_fid, 2152 b_set); 2153 } 2154 2155 if (b_slowpath) 2156 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2157 p_info->igu_dsb_id, 2158 p_hwfn->hw_info.opaque_fid, 2159 b_set); 2160 } 2161 2162 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2163 struct ecore_ptt *p_ptt) 2164 { 2165 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2166 struct ecore_igu_block *p_block; 2167 int pf_sbs, vf_sbs; 2168 u16 igu_sb_id; 2169 u32 val, rval; 2170 2171 if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2172 /* We're using an old MFW - have to prevent any switching 2173 * of SBs between PF and VFs as later driver wouldn't be 2174 * able to tell which belongs to which. 2175 */ 2176 p_info->b_allow_pf_vf_change = false; 2177 } else { 2178 /* Use the numbers the MFW have provided - 2179 * don't forget MFW accounts for the default SB as well. 2180 */ 2181 p_info->b_allow_pf_vf_change = true; 2182 2183 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2184 DP_INFO(p_hwfn, 2185 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2186 RESC_NUM(p_hwfn, ECORE_SB) - 1, 2187 p_info->usage.cnt); 2188 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2189 } 2190 2191 /* TODO - how do we learn about VF SBs from MFW? */ 2192 if (IS_PF_SRIOV(p_hwfn)) { 2193 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2194 2195 if (vfs != p_info->usage.iov_cnt) 2196 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2197 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2198 p_info->usage.iov_cnt, vfs); 2199 2200 /* At this point we know how many SBs we have totally 2201 * in IGU + number of PF SBs. So we can validate that 2202 * we'd have sufficient for VF. 2203 */ 2204 if (vfs > p_info->usage.free_cnt + 2205 p_info->usage.free_cnt_iov - 2206 p_info->usage.cnt) { 2207 DP_NOTICE(p_hwfn, true, 2208 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2209 p_info->usage.free_cnt + 2210 p_info->usage.free_cnt_iov, 2211 p_info->usage.cnt, vfs); 2212 return ECORE_INVAL; 2213 } 2214 } 2215 } 2216 2217 /* Cap the number of VFs SBs by the number of VFs */ 2218 if (IS_PF_SRIOV(p_hwfn)) 2219 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; 2220 2221 /* Mark all SBs as free, now in the right PF/VFs division */ 2222 p_info->usage.free_cnt = p_info->usage.cnt; 2223 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2224 p_info->usage.orig = p_info->usage.cnt; 2225 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2226 2227 /* We now proceed to re-configure the IGU cam to reflect the initial 2228 * configuration. We can start with the Default SB. 2229 */ 2230 pf_sbs = p_info->usage.cnt; 2231 vf_sbs = p_info->usage.iov_cnt; 2232 2233 for (igu_sb_id = p_info->igu_dsb_id; 2234 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2235 igu_sb_id++) { 2236 p_block = &p_info->entry[igu_sb_id]; 2237 val = 0; 2238 2239 if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2240 continue; 2241 2242 if (p_block->status & ECORE_IGU_STATUS_DSB) { 2243 p_block->function_id = p_hwfn->rel_pf_id; 2244 p_block->is_pf = 1; 2245 p_block->vector_number = 0; 2246 p_block->status = ECORE_IGU_STATUS_VALID | 2247 ECORE_IGU_STATUS_PF | 2248 ECORE_IGU_STATUS_DSB; 2249 } else if (pf_sbs) { 2250 pf_sbs--; 2251 p_block->function_id = p_hwfn->rel_pf_id; 2252 p_block->is_pf = 1; 2253 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2254 p_block->status = ECORE_IGU_STATUS_VALID | 2255 ECORE_IGU_STATUS_PF | 2256 ECORE_IGU_STATUS_FREE; 2257 } else if (vf_sbs) { 2258 p_block->function_id = 2259 p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2260 p_info->usage.iov_cnt - vf_sbs; 2261 p_block->is_pf = 0; 2262 p_block->vector_number = 0; 2263 p_block->status = ECORE_IGU_STATUS_VALID | 2264 ECORE_IGU_STATUS_FREE; 2265 vf_sbs--; 2266 } else { 2267 p_block->function_id = 0; 2268 p_block->is_pf = 0; 2269 p_block->vector_number = 0; 2270 } 2271 2272 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2273 p_block->function_id); 2274 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2275 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2276 p_block->vector_number); 2277 2278 /* VF entries would be enabled when VF is initializaed */ 2279 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2280 2281 rval = ecore_rd(p_hwfn, p_ptt, 2282 IGU_REG_MAPPING_MEMORY + 2283 sizeof(u32) * igu_sb_id); 2284 2285 if (rval != val) { 2286 ecore_wr(p_hwfn, p_ptt, 2287 IGU_REG_MAPPING_MEMORY + 2288 sizeof(u32) * igu_sb_id, 2289 val); 2290 2291 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2292 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2293 igu_sb_id, p_block->function_id, 2294 p_block->is_pf, p_block->vector_number, 2295 rval, val); 2296 } 2297 } 2298 2299 return 0; 2300 } 2301 2302 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2303 struct ecore_ptt *p_ptt) 2304 { 2305 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2306 2307 /* Return all the usage indications to default prior to the reset; 2308 * The reset expects the !orig to reflect the initial status of the 2309 * SBs, and would re-calculate the originals based on those. 2310 */ 2311 p_cnt->cnt = p_cnt->orig; 2312 p_cnt->free_cnt = p_cnt->orig; 2313 p_cnt->iov_cnt = p_cnt->iov_orig; 2314 p_cnt->free_cnt_iov = p_cnt->iov_orig; 2315 p_cnt->orig = 0; 2316 p_cnt->iov_orig = 0; 2317 2318 /* TODO - we probably need to re-configure the CAU as well... */ 2319 return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2320 } 2321 2322 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2323 struct ecore_ptt *p_ptt, 2324 u16 igu_sb_id) 2325 { 2326 u32 val = ecore_rd(p_hwfn, p_ptt, 2327 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2328 struct ecore_igu_block *p_block; 2329 2330 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2331 2332 /* Fill the block information */ 2333 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2334 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2335 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2336 2337 p_block->igu_sb_id = igu_sb_id; 2338 } 2339 2340 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2341 struct ecore_ptt *p_ptt) 2342 { 2343 struct ecore_igu_info *p_igu_info; 2344 struct ecore_igu_block *p_block; 2345 u32 min_vf = 0, max_vf = 0; 2346 u16 igu_sb_id; 2347 2348 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2349 GFP_KERNEL, 2350 sizeof(*p_igu_info)); 2351 if (!p_hwfn->hw_info.p_igu_info) 2352 return ECORE_NOMEM; 2353 p_igu_info = p_hwfn->hw_info.p_igu_info; 2354 2355 /* Distinguish between existent and onn-existent default SB */ 2356 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2357 2358 /* Find the range of VF ids whose SB belong to this PF */ 2359 if (p_hwfn->p_dev->p_iov_info) { 2360 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2361 2362 min_vf = p_iov->first_vf_in_pf; 2363 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2364 } 2365 2366 for (igu_sb_id = 0; 2367 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2368 igu_sb_id++) { 2369 /* Read current entry; Notice it might not belong to this PF */ 2370 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2371 p_block = &p_igu_info->entry[igu_sb_id]; 2372 2373 if ((p_block->is_pf) && 2374 (p_block->function_id == p_hwfn->rel_pf_id)) { 2375 p_block->status = ECORE_IGU_STATUS_PF | 2376 ECORE_IGU_STATUS_VALID | 2377 ECORE_IGU_STATUS_FREE; 2378 2379 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2380 p_igu_info->usage.cnt++; 2381 } else if (!(p_block->is_pf) && 2382 (p_block->function_id >= min_vf) && 2383 (p_block->function_id < max_vf)) { 2384 /* Available for VFs of this PF */ 2385 p_block->status = ECORE_IGU_STATUS_VALID | 2386 ECORE_IGU_STATUS_FREE; 2387 2388 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2389 p_igu_info->usage.iov_cnt++; 2390 } 2391 2392 /* Mark the First entry belonging to the PF or its VFs 2393 * as the default SB [we'll reset IGU prior to first usage]. 2394 */ 2395 if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2396 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2397 p_igu_info->igu_dsb_id = igu_sb_id; 2398 p_block->status |= ECORE_IGU_STATUS_DSB; 2399 } 2400 2401 /* While this isn't suitable for all clients, limit number 2402 * of prints by having each PF print only its entries with the 2403 * exception of PF0 which would print everything. 2404 */ 2405 if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2406 (p_hwfn->abs_pf_id == 0)) 2407 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2408 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2409 igu_sb_id, p_block->function_id, 2410 p_block->is_pf, p_block->vector_number); 2411 } 2412 2413 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2414 DP_NOTICE(p_hwfn, true, 2415 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2416 p_igu_info->igu_dsb_id); 2417 return ECORE_INVAL; 2418 } 2419 2420 /* All non default SB are considered free at this point */ 2421 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2422 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2423 2424 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2425 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2426 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2427 p_igu_info->usage.iov_cnt); 2428 2429 return ECORE_SUCCESS; 2430 } 2431 2432 enum _ecore_status_t 2433 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2434 u16 sb_id, bool b_to_vf) 2435 { 2436 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2437 struct ecore_igu_block *p_block = OSAL_NULL; 2438 u16 igu_sb_id = 0, vf_num = 0; 2439 u32 val = 0; 2440 2441 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2442 return ECORE_INVAL; 2443 2444 if (sb_id == ECORE_SP_SB_ID) 2445 return ECORE_INVAL; 2446 2447 if (!p_info->b_allow_pf_vf_change) { 2448 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2449 return ECORE_INVAL; 2450 } 2451 2452 /* If we're moving a SB from PF to VF, the client had to specify 2453 * which vector it wants to move. 2454 */ 2455 if (b_to_vf) { 2456 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2457 if (igu_sb_id == ECORE_SB_INVALID_IDX) 2458 return ECORE_INVAL; 2459 } 2460 2461 /* If we're moving a SB from VF to PF, need to validate there isn't 2462 * already a line configured for that vector. 2463 */ 2464 if (!b_to_vf) { 2465 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2466 ECORE_SB_INVALID_IDX) 2467 return ECORE_INVAL; 2468 } 2469 2470 /* We need to validate that the SB can actually be relocated. 2471 * This would also handle the previous case where we've explicitly 2472 * stated which IGU SB needs to move. 2473 */ 2474 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2475 igu_sb_id++) { 2476 p_block = &p_info->entry[igu_sb_id]; 2477 2478 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2479 !(p_block->status & ECORE_IGU_STATUS_FREE) || 2480 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2481 if (b_to_vf) 2482 return ECORE_INVAL; 2483 else 2484 continue; 2485 } 2486 2487 break; 2488 } 2489 2490 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2491 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2492 "Failed to find a free SB to move\n"); 2493 return ECORE_INVAL; 2494 } 2495 2496 /* At this point, p_block points to the SB we want to relocate */ 2497 if (b_to_vf) { 2498 p_block->status &= ~ECORE_IGU_STATUS_PF; 2499 2500 /* It doesn't matter which VF number we choose, since we're 2501 * going to disable the line; But let's keep it in range. 2502 */ 2503 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2504 2505 p_block->function_id = (u8)vf_num; 2506 p_block->is_pf = 0; 2507 p_block->vector_number = 0; 2508 2509 p_info->usage.cnt--; 2510 p_info->usage.free_cnt--; 2511 p_info->usage.iov_cnt++; 2512 p_info->usage.free_cnt_iov++; 2513 2514 /* TODO - if SBs aren't really the limiting factor, 2515 * then it might not be accurate [in the since that 2516 * we might not need decrement the feature]. 2517 */ 2518 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2519 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2520 } else { 2521 p_block->status |= ECORE_IGU_STATUS_PF; 2522 p_block->function_id = p_hwfn->rel_pf_id; 2523 p_block->is_pf = 1; 2524 p_block->vector_number = sb_id + 1; 2525 2526 p_info->usage.cnt++; 2527 p_info->usage.free_cnt++; 2528 p_info->usage.iov_cnt--; 2529 p_info->usage.free_cnt_iov--; 2530 2531 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2532 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2533 } 2534 2535 /* Update the IGU and CAU with the new configuration */ 2536 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2537 p_block->function_id); 2538 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2539 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2540 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2541 p_block->vector_number); 2542 2543 ecore_wr(p_hwfn, p_ptt, 2544 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2545 val); 2546 2547 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2548 igu_sb_id, vf_num, 2549 p_block->is_pf ? 0 : 1); 2550 2551 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2552 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2553 igu_sb_id, p_block->function_id, 2554 p_block->is_pf, p_block->vector_number); 2555 2556 return ECORE_SUCCESS; 2557 } 2558 2559 /** 2560 * @brief Initialize igu runtime registers 2561 * 2562 * @param p_hwfn 2563 */ 2564 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2565 { 2566 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2567 2568 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2569 } 2570 2571 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2572 IGU_CMD_INT_ACK_BASE) 2573 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2574 IGU_CMD_INT_ACK_BASE) 2575 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2576 { 2577 u32 intr_status_hi = 0, intr_status_lo = 0; 2578 u64 intr_status = 0; 2579 2580 intr_status_lo = REG_RD(p_hwfn, 2581 GTT_BAR0_MAP_REG_IGU_CMD + 2582 LSB_IGU_CMD_ADDR * 8); 2583 intr_status_hi = REG_RD(p_hwfn, 2584 GTT_BAR0_MAP_REG_IGU_CMD + 2585 MSB_IGU_CMD_ADDR * 8); 2586 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2587 2588 return intr_status; 2589 } 2590 2591 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2592 { 2593 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2594 p_hwfn->b_sp_dpc_enabled = true; 2595 } 2596 2597 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2598 { 2599 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2600 if (!p_hwfn->sp_dpc) 2601 return ECORE_NOMEM; 2602 2603 return ECORE_SUCCESS; 2604 } 2605 2606 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2607 { 2608 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2609 } 2610 2611 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2612 struct ecore_ptt *p_ptt) 2613 { 2614 enum _ecore_status_t rc = ECORE_SUCCESS; 2615 2616 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2617 if (rc != ECORE_SUCCESS) { 2618 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2619 return rc; 2620 } 2621 2622 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2623 if (rc != ECORE_SUCCESS) { 2624 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2625 return rc; 2626 } 2627 2628 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2629 if (rc != ECORE_SUCCESS) 2630 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2631 2632 return rc; 2633 } 2634 2635 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2636 { 2637 ecore_int_sp_sb_free(p_hwfn); 2638 ecore_int_sb_attn_free(p_hwfn); 2639 ecore_int_sp_dpc_free(p_hwfn); 2640 } 2641 2642 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2643 { 2644 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2645 return; 2646 2647 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2648 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2649 ecore_int_sp_dpc_setup(p_hwfn); 2650 } 2651 2652 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2653 struct ecore_sb_cnt_info *p_sb_cnt_info) 2654 { 2655 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2656 2657 if (!p_igu_info || !p_sb_cnt_info) 2658 return; 2659 2660 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2661 sizeof(*p_sb_cnt_info)); 2662 } 2663 2664 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2665 { 2666 int i; 2667 2668 for_each_hwfn(p_dev, i) 2669 p_dev->hwfns[i].b_int_requested = false; 2670 } 2671 2672 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2673 { 2674 p_dev->attn_clr_en = clr_enable; 2675 } 2676 2677 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2678 struct ecore_ptt *p_ptt, 2679 u8 timer_res, u16 sb_id, bool tx) 2680 { 2681 struct cau_sb_entry sb_entry; 2682 enum _ecore_status_t rc; 2683 2684 if (!p_hwfn->hw_init_done) { 2685 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2686 return ECORE_INVAL; 2687 } 2688 2689 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2690 sb_id * sizeof(u64), 2691 (u64)(osal_uintptr_t)&sb_entry, 2, 2692 OSAL_NULL /* default parameters */); 2693 if (rc != ECORE_SUCCESS) { 2694 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2695 return rc; 2696 } 2697 2698 if (tx) 2699 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2700 else 2701 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2702 2703 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2704 (u64)(osal_uintptr_t)&sb_entry, 2705 CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2, 2706 OSAL_NULL /* default parameters */); 2707 if (rc != ECORE_SUCCESS) { 2708 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2709 return rc; 2710 } 2711 2712 return rc; 2713 } 2714 2715 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2716 struct ecore_ptt *p_ptt, 2717 struct ecore_sb_info *p_sb, 2718 struct ecore_sb_info_dbg *p_info) 2719 { 2720 u16 sbid = p_sb->igu_sb_id; 2721 u32 i; 2722 2723 if (IS_VF(p_hwfn->p_dev)) 2724 return ECORE_INVAL; 2725 2726 if (sbid >= NUM_OF_SBS(p_hwfn->p_dev)) 2727 return ECORE_INVAL; 2728 2729 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2730 IGU_REG_PRODUCER_MEMORY + sbid * 4); 2731 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2732 IGU_REG_CONSUMER_MEM + sbid * 4); 2733 2734 for (i = 0; i < PIS_PER_SB; i++) 2735 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2736 CAU_REG_PI_MEMORY + 2737 sbid * 4 * PIS_PER_SB + 2738 i * 4); 2739 2740 return ECORE_SUCCESS; 2741 } 2742 2743 void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn) 2744 { 2745 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2746 struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 2747 RESERVED_PTT_DPC); 2748 int i; 2749 2750 /* Do not reorder the following cleanup sequence */ 2751 /* Ack all attentions */ 2752 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff); 2753 2754 /* Clear driver attention */ 2755 ecore_wr(p_hwfn, p_dpc_ptt, 2756 ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0); 2757 2758 /* Clear per-PF IGU registers to restore them as if the IGU 2759 * was reset for this PF 2760 */ 2761 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2762 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2763 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 2764 2765 /* Execute IGU clean up*/ 2766 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1); 2767 2768 /* Clear Stats */ 2769 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0); 2770 2771 for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++) 2772 ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0); 2773 } 2774