1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_spq.h" 12 #include "ecore_gtt_reg_addr.h" 13 #include "ecore_init_ops.h" 14 #include "ecore_rt_defs.h" 15 #include "ecore_int.h" 16 #include "reg_addr.h" 17 #include "ecore_hw.h" 18 #include "ecore_sriov.h" 19 #include "ecore_vf.h" 20 #include "ecore_hw_defs.h" 21 #include "ecore_hsi_common.h" 22 #include "ecore_mcp.h" 23 24 struct ecore_pi_info { 25 ecore_int_comp_cb_t comp_cb; 26 void *cookie; /* Will be sent to the compl cb function */ 27 }; 28 29 struct ecore_sb_sp_info { 30 struct ecore_sb_info sb_info; 31 /* per protocol index data */ 32 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4]; 33 }; 34 35 enum ecore_attention_type { 36 ECORE_ATTN_TYPE_ATTN, 37 ECORE_ATTN_TYPE_PARITY, 38 }; 39 40 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 41 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 42 43 struct aeu_invert_reg_bit { 44 char bit_name[30]; 45 46 #define ATTENTION_PARITY (1 << 0) 47 48 #define ATTENTION_LENGTH_MASK (0x00000ff0) 49 #define ATTENTION_LENGTH_SHIFT (4) 50 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 51 ATTENTION_LENGTH_SHIFT) 52 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 53 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 54 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 55 ATTENTION_PARITY) 56 57 /* Multiple bits start with this offset */ 58 #define ATTENTION_OFFSET_MASK (0x000ff000) 59 #define ATTENTION_OFFSET_SHIFT (12) 60 61 #define ATTENTION_BB_MASK (0x00700000) 62 #define ATTENTION_BB_SHIFT (20) 63 #define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT) 64 #define ATTENTION_BB_DIFFERENT (1 << 23) 65 66 #define ATTENTION_CLEAR_ENABLE (1 << 28) 67 unsigned int flags; 68 69 /* Callback to call if attention will be triggered */ 70 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 71 72 enum block_id block_index; 73 }; 74 75 struct aeu_invert_reg { 76 struct aeu_invert_reg_bit bits[32]; 77 }; 78 79 #define MAX_ATTN_GRPS (8) 80 #define NUM_ATTN_REGS (9) 81 82 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 83 { 84 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 85 86 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); 87 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); 88 89 return ECORE_SUCCESS; 90 } 91 92 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 93 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 94 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 95 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 96 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 97 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 98 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 99 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 100 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 101 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 102 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 116 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 117 { 118 u32 tmp = 119 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 120 PSWHST_REG_VF_DISABLED_ERROR_VALID); 121 122 /* Disabled VF access */ 123 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 124 u32 addr, data; 125 126 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 127 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 128 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 129 PSWHST_REG_VF_DISABLED_ERROR_DATA); 130 DP_INFO(p_hwfn->p_dev, 131 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" 132 " Write [0x%02x] Addr [0x%08x]\n", 133 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) 134 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 135 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) 136 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 137 (u8)((data & 138 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 139 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 140 (u8)((data & 141 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 142 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 143 (u8)((data & 144 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 145 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 146 addr); 147 } 148 149 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 150 PSWHST_REG_INCORRECT_ACCESS_VALID); 151 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 152 u32 addr, data, length; 153 154 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 155 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 156 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 157 PSWHST_REG_INCORRECT_ACCESS_DATA); 158 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 159 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 160 161 DP_INFO(p_hwfn->p_dev, 162 "Incorrect access to %08x of length %08x - PF [%02x]" 163 " VF [%04x] [valid %02x] client [%02x] write [%02x]" 164 " Byte-Enable [%04x] [%08x]\n", 165 addr, length, 166 (u8)((data & 167 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 168 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 169 (u8)((data & 170 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 171 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 172 (u8)((data & 173 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 174 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 175 (u8)((data & 176 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 177 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 178 (u8)((data & 179 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 180 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 181 (u8)((data & 182 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 183 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 184 data); 185 } 186 187 /* TODO - We know 'some' of these are legal due to virtualization, 188 * but is it true for all of them? 189 */ 190 return ECORE_SUCCESS; 191 } 192 193 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 194 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 195 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 196 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 197 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 198 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 199 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 200 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 201 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 202 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 203 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 204 static const char *grc_timeout_attn_master_to_str(u8 master) 205 { 206 switch (master) { 207 case 1: 208 return "PXP"; 209 case 2: 210 return "MCP"; 211 case 3: 212 return "MSDM"; 213 case 4: 214 return "PSDM"; 215 case 5: 216 return "YSDM"; 217 case 6: 218 return "USDM"; 219 case 7: 220 return "TSDM"; 221 case 8: 222 return "XSDM"; 223 case 9: 224 return "DBU"; 225 case 10: 226 return "DMAE"; 227 default: 228 return "Unknown"; 229 } 230 } 231 232 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 233 { 234 u32 tmp, tmp2; 235 236 /* We've already cleared the timeout interrupt register, so we learn 237 * of interrupts via the validity register 238 */ 239 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 240 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 241 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) 242 goto out; 243 244 /* Read the GRC timeout information */ 245 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 246 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 247 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 248 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 249 250 DP_NOTICE(p_hwfn->p_dev, false, 251 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 252 tmp2, tmp, 253 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 254 : "Read from", 255 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 256 grc_timeout_attn_master_to_str( 257 (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 258 ECORE_GRC_ATTENTION_MASTER_SHIFT), 259 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 260 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 261 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 262 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 263 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 264 ECORE_GRC_ATTENTION_VF_SHIFT); 265 266 out: 267 /* Regardles of anything else, clean the validity bit */ 268 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 269 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 270 return ECORE_SUCCESS; 271 } 272 273 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 274 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 275 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 276 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 277 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 278 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 279 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 280 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 281 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 282 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 283 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 284 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 285 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 286 287 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 288 struct ecore_ptt *p_ptt) 289 { 290 u32 tmp; 291 292 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 293 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 294 u32 addr_lo, addr_hi, details; 295 296 addr_lo = ecore_rd(p_hwfn, p_ptt, 297 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 298 addr_hi = ecore_rd(p_hwfn, p_ptt, 299 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 300 details = ecore_rd(p_hwfn, p_ptt, 301 PGLUE_B_REG_TX_ERR_WR_DETAILS); 302 303 DP_NOTICE(p_hwfn, false, 304 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 305 addr_hi, addr_lo, details, 306 (u8)((details & 307 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 308 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 309 (u8)((details & 310 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 311 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 312 (u8)((details & 313 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 314 tmp, 315 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 316 1 : 0), 317 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 318 1 : 0), 319 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 320 1 : 0)); 321 } 322 323 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 324 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 325 u32 addr_lo, addr_hi, details; 326 327 addr_lo = ecore_rd(p_hwfn, p_ptt, 328 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 329 addr_hi = ecore_rd(p_hwfn, p_ptt, 330 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 331 details = ecore_rd(p_hwfn, p_ptt, 332 PGLUE_B_REG_TX_ERR_RD_DETAILS); 333 334 DP_NOTICE(p_hwfn, false, 335 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 336 addr_hi, addr_lo, details, 337 (u8)((details & 338 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 339 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 340 (u8)((details & 341 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 342 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 343 (u8)((details & 344 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 345 tmp, 346 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 347 1 : 0), 348 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 349 1 : 0), 350 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 351 1 : 0)); 352 } 353 354 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 355 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 356 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); 357 358 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 359 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 360 u32 addr_hi, addr_lo; 361 362 addr_lo = ecore_rd(p_hwfn, p_ptt, 363 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 364 addr_hi = ecore_rd(p_hwfn, p_ptt, 365 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 366 367 DP_NOTICE(p_hwfn, false, 368 "ICPL erorr - %08x [Address %08x:%08x]\n", 369 tmp, addr_hi, addr_lo); 370 } 371 372 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 373 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 374 u32 addr_hi, addr_lo, details; 375 376 addr_lo = ecore_rd(p_hwfn, p_ptt, 377 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 378 addr_hi = ecore_rd(p_hwfn, p_ptt, 379 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 380 details = ecore_rd(p_hwfn, p_ptt, 381 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 382 383 DP_NOTICE(p_hwfn, false, 384 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 385 details, tmp, addr_hi, addr_lo); 386 } 387 388 /* Clear the indications */ 389 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 390 391 return ECORE_SUCCESS; 392 } 393 394 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 395 { 396 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 397 } 398 399 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 400 { 401 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 402 403 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 404 405 return ECORE_INVAL; 406 } 407 408 static enum _ecore_status_t 409 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 410 { 411 DP_INFO(p_hwfn, "General attention 35!\n"); 412 413 return ECORE_SUCCESS; 414 } 415 416 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 417 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 418 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 419 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 420 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 421 422 #define ECORE_DB_REC_COUNT 10 423 #define ECORE_DB_REC_INTERVAL 100 424 425 /* assumes sticky overflow indication was set for this PF */ 426 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn, 427 struct ecore_ptt *p_ptt) 428 { 429 u8 count = ECORE_DB_REC_COUNT; 430 u32 usage = 1; 431 432 /* wait for usage to zero or count to run out. This is necessary since 433 * EDPM doorbell transactions can take multiple 64b cycles, and as such 434 * can "split" over the pci. Possibly, the doorbell drop can happen with 435 * half an EDPM in the queue and other half dropped. Another EDPM 436 * doorbell to the same address (from doorbell recovery mechanism or 437 * from the doorbelling entity) could have first half dropped and second 438 * half interperted as continuation of the first. To prevent such 439 * malformed doorbells from reaching the device, flush the queue before 440 * releaseing the overflow sticky indication. 441 */ 442 while (count-- && usage) { 443 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 444 OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 445 } 446 447 /* should have been depleted by now */ 448 if (usage) { 449 DP_NOTICE(p_hwfn->p_dev, false, 450 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 451 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 452 return ECORE_TIMEOUT; 453 } 454 455 /* flush any pedning (e)dpm as they may never arrive */ 456 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 457 458 /* release overflow sticky indication (stop silently dropping 459 * everything) 460 */ 461 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 462 463 /* repeat all last doorbells (doorbell drop recovery) */ 464 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 465 466 return ECORE_SUCCESS; 467 } 468 469 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 470 { 471 u32 int_sts, first_drop_reason, details, address, overflow, 472 all_drops_reason; 473 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 474 enum _ecore_status_t rc; 475 476 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 477 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 478 int_sts); 479 480 /* int_sts may be zero since all PFs were interrupted for doorbell 481 * overflow but another one already handled it. Can abort here. If 482 * This PF also requires overflow recovery we will be interrupted again 483 */ 484 if (!int_sts) 485 return ECORE_SUCCESS; 486 487 /* check if db_drop or overflow happened */ 488 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 489 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 490 /* obtain data about db drop/overflow */ 491 first_drop_reason = ecore_rd(p_hwfn, p_ptt, 492 DORQ_REG_DB_DROP_REASON) & 493 ECORE_DORQ_ATTENTION_REASON_MASK; 494 details = ecore_rd(p_hwfn, p_ptt, 495 DORQ_REG_DB_DROP_DETAILS); 496 address = ecore_rd(p_hwfn, p_ptt, 497 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 498 overflow = ecore_rd(p_hwfn, p_ptt, 499 DORQ_REG_PF_OVFL_STICKY); 500 all_drops_reason = ecore_rd(p_hwfn, p_ptt, 501 DORQ_REG_DB_DROP_DETAILS_REASON); 502 503 /* log info */ 504 DP_NOTICE(p_hwfn->p_dev, false, 505 "Doorbell drop occurred\n" 506 "Address\t\t0x%08x\t(second BAR address)\n" 507 "FID\t\t0x%04x\t\t(Opaque FID)\n" 508 "Size\t\t0x%04x\t\t(in bytes)\n" 509 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 510 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n" 511 "Overflow\t0x%x\t\t(a per PF indication)\n", 512 address, 513 GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 514 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 515 first_drop_reason, all_drops_reason, overflow); 516 517 /* if this PF caused overflow, initiate recovery */ 518 if (overflow) { 519 rc = ecore_db_rec_attn(p_hwfn, p_ptt); 520 if (rc != ECORE_SUCCESS) 521 return rc; 522 } 523 524 /* clear the doorbell drop details and prepare for next drop */ 525 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 526 527 /* mark interrupt as handeld (note: even if drop was due to a 528 * different reason than overflow we mark as handled) 529 */ 530 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 531 DORQ_REG_INT_STS_DB_DROP | 532 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 533 534 /* if there are no indications otherthan drop indications, 535 * success 536 */ 537 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 538 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 539 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 540 return ECORE_SUCCESS; 541 } 542 543 /* some other indication was present - non recoverable */ 544 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 545 546 return ECORE_INVAL; 547 } 548 549 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 550 { 551 #ifndef ASIC_ONLY 552 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 553 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 554 TM_REG_INT_STS_1); 555 556 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 557 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 558 return ECORE_INVAL; 559 560 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 561 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 562 DP_INFO(p_hwfn, 563 "TM attention on emulation - most likely" 564 " results of clock-ratios\n"); 565 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 566 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 567 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 568 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 569 570 return ECORE_SUCCESS; 571 } 572 #endif 573 574 return ECORE_INVAL; 575 } 576 577 /* Instead of major changes to the data-structure, we have a some 'special' 578 * identifiers for sources that changed meaning between adapters. 579 */ 580 enum aeu_invert_reg_special_type { 581 AEU_INVERT_REG_SPECIAL_CNIG_0, 582 AEU_INVERT_REG_SPECIAL_CNIG_1, 583 AEU_INVERT_REG_SPECIAL_CNIG_2, 584 AEU_INVERT_REG_SPECIAL_CNIG_3, 585 AEU_INVERT_REG_SPECIAL_MAX, 586 }; 587 588 static struct aeu_invert_reg_bit 589 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 590 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 591 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 592 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 593 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 594 }; 595 596 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 597 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 598 { 599 { /* After Invert 1 */ 600 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 601 MAX_BLOCK_ID}, 602 } 603 }, 604 605 { 606 { /* After Invert 2 */ 607 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 608 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 609 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, 610 BLOCK_PGLUE_B}, 611 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 612 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 613 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 614 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 615 {"SW timers #%d", 616 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), 617 OSAL_NULL, MAX_BLOCK_ID}, 618 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 619 BLOCK_PGLCS}, 620 } 621 }, 622 623 { 624 { /* After Invert 3 */ 625 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 626 MAX_BLOCK_ID}, 627 } 628 }, 629 630 { 631 { /* After Invert 4 */ 632 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 633 ecore_fw_assertion, MAX_BLOCK_ID}, 634 {"General Attention %d", 635 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), 636 OSAL_NULL, MAX_BLOCK_ID}, 637 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 638 ecore_general_attention_35, MAX_BLOCK_ID}, 639 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 640 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 641 OSAL_NULL, BLOCK_NWS}, 642 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 643 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 644 OSAL_NULL, BLOCK_NWS}, 645 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 646 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 647 OSAL_NULL, BLOCK_NWM}, 648 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 649 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 650 OSAL_NULL, BLOCK_NWM}, 651 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 652 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 653 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 654 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 655 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 656 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 657 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 658 MAX_BLOCK_ID}, 659 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 660 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 661 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 662 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 663 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 664 } 665 }, 666 667 { 668 { /* After Invert 5 */ 669 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 670 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 671 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 672 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 673 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 674 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 675 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 676 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 677 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 678 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 679 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 680 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 681 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 682 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 683 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 684 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 685 } 686 }, 687 688 { 689 { /* After Invert 6 */ 690 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 691 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 692 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 693 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 694 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 695 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 696 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 697 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 698 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 699 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 700 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 701 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 702 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 703 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 704 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 705 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 706 } 707 }, 708 709 { 710 { /* After Invert 7 */ 711 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 712 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 713 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 714 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 715 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 716 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 717 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 718 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 719 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 720 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 721 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 722 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 723 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 724 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 725 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 726 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 727 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 728 } 729 }, 730 731 { 732 { /* After Invert 8 */ 733 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 734 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 735 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 736 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 737 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 738 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 739 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 740 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 741 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 742 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 743 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 744 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 745 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 746 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 747 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 748 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 749 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 750 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 751 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 752 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 753 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 754 MAX_BLOCK_ID}, 755 } 756 }, 757 758 { 759 { /* After Invert 9 */ 760 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 761 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, 762 MAX_BLOCK_ID}, 763 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 764 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 765 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 766 MAX_BLOCK_ID}, 767 } 768 }, 769 770 }; 771 772 static struct aeu_invert_reg_bit * 773 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 774 struct aeu_invert_reg_bit *p_bit) 775 { 776 if (!ECORE_IS_BB(p_hwfn->p_dev)) 777 return p_bit; 778 779 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 780 return p_bit; 781 782 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 783 ATTENTION_BB_SHIFT]; 784 } 785 786 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 787 struct aeu_invert_reg_bit *p_bit) 788 { 789 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 790 ATTENTION_PARITY); 791 } 792 793 #define ATTN_STATE_BITS (0xfff) 794 #define ATTN_BITS_MASKABLE (0x3ff) 795 struct ecore_sb_attn_info { 796 /* Virtual & Physical address of the SB */ 797 struct atten_status_block *sb_attn; 798 dma_addr_t sb_phys; 799 800 /* Last seen running index */ 801 u16 index; 802 803 /* A mask of the AEU bits resulting in a parity error */ 804 u32 parity_mask[NUM_ATTN_REGS]; 805 806 /* A pointer to the attention description structure */ 807 struct aeu_invert_reg *p_aeu_desc; 808 809 /* Previously asserted attentions, which are still unasserted */ 810 u16 known_attn; 811 812 /* Cleanup address for the link's general hw attention */ 813 u32 mfw_attn_addr; 814 }; 815 816 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 817 struct ecore_sb_attn_info *p_sb_desc) 818 { 819 u16 rc = 0, index; 820 821 OSAL_MMIOWB(p_hwfn->p_dev); 822 823 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 824 if (p_sb_desc->index != index) { 825 p_sb_desc->index = index; 826 rc = ECORE_SB_ATT_IDX; 827 } 828 829 OSAL_MMIOWB(p_hwfn->p_dev); 830 831 return rc; 832 } 833 834 /** 835 * @brief ecore_int_assertion - handles asserted attention bits 836 * 837 * @param p_hwfn 838 * @param asserted_bits newly asserted bits 839 * @return enum _ecore_status_t 840 */ 841 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 842 u16 asserted_bits) 843 { 844 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 845 u32 igu_mask; 846 847 /* Mask the source of the attention in the IGU */ 848 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 849 IGU_REG_ATTENTION_ENABLE); 850 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 851 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 852 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 853 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 854 855 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 856 "inner known ATTN state: 0x%04x --> 0x%04x\n", 857 sb_attn_sw->known_attn, 858 sb_attn_sw->known_attn | asserted_bits); 859 sb_attn_sw->known_attn |= asserted_bits; 860 861 /* Handle MCP events */ 862 if (asserted_bits & 0x100) { 863 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 864 /* Clean the MCP attention */ 865 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 866 sb_attn_sw->mfw_attn_addr, 0); 867 } 868 869 /* FIXME - this will change once we'll have GOOD gtt definitions */ 870 DIRECT_REG_WR(p_hwfn, 871 (u8 OSAL_IOMEM *) p_hwfn->regview + 872 GTT_BAR0_MAP_REG_IGU_CMD + 873 ((IGU_CMD_ATTN_BIT_SET_UPPER - 874 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 875 876 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 877 asserted_bits); 878 879 return ECORE_SUCCESS; 880 } 881 882 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 883 enum block_id id, enum dbg_attn_type type, 884 bool b_clear) 885 { 886 /* @DPDK */ 887 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type); 888 } 889 890 /** 891 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 892 * cause of the attention 893 * 894 * @param p_hwfn 895 * @param p_aeu - descriptor of an AEU bit which caused the attention 896 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 897 * this bit to this group. 898 * @param bit_index - index of this bit in the aeu_en_reg 899 * 900 * @return enum _ecore_status_t 901 */ 902 static enum _ecore_status_t 903 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 904 struct aeu_invert_reg_bit *p_aeu, 905 u32 aeu_en_reg, 906 const char *p_bit_name, 907 u32 bitmask) 908 { 909 enum _ecore_status_t rc = ECORE_INVAL; 910 bool b_fatal = false; 911 912 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 913 p_bit_name, bitmask); 914 915 /* Call callback before clearing the interrupt status */ 916 if (p_aeu->cb) { 917 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 918 p_bit_name); 919 rc = p_aeu->cb(p_hwfn); 920 } 921 922 if (rc != ECORE_SUCCESS) 923 b_fatal = true; 924 925 /* Print HW block interrupt registers */ 926 if (p_aeu->block_index != MAX_BLOCK_ID) { 927 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 928 ATTN_TYPE_INTERRUPT, !b_fatal); 929 } 930 931 /* @DPDK */ 932 /* Reach assertion if attention is fatal */ 933 if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) { 934 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 935 p_bit_name); 936 937 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 938 } 939 940 /* Prevent this Attention from being asserted in the future */ 941 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 942 p_hwfn->p_dev->attn_clr_en) { 943 u32 val; 944 u32 mask = ~bitmask; 945 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 946 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 947 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n", 948 p_bit_name); 949 } 950 951 return rc; 952 } 953 954 /** 955 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 956 * 957 * @param p_hwfn 958 * @param p_aeu - descriptor of an AEU bit which caused the parity 959 * @param aeu_en_reg - address of the AEU enable register 960 * @param bit_index 961 */ 962 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 963 struct aeu_invert_reg_bit *p_aeu, 964 u32 aeu_en_reg, u8 bit_index) 965 { 966 u32 block_id = p_aeu->block_index, mask, val; 967 968 DP_NOTICE(p_hwfn->p_dev, false, 969 "%s parity attention is set [address 0x%08x, bit %d]\n", 970 p_aeu->bit_name, aeu_en_reg, bit_index); 971 972 if (block_id != MAX_BLOCK_ID) { 973 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 974 975 /* In A0, there's a single parity bit for several blocks */ 976 if (block_id == BLOCK_BTB) { 977 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 978 ATTN_TYPE_PARITY, false); 979 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 980 ATTN_TYPE_PARITY, false); 981 } 982 } 983 984 /* Prevent this parity error from being re-asserted */ 985 mask = ~(0x1 << bit_index); 986 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 987 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 988 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 989 p_aeu->bit_name); 990 } 991 992 /** 993 * @brief - handles deassertion of previously asserted attentions. 994 * 995 * @param p_hwfn 996 * @param deasserted_bits - newly deasserted bits 997 * @return enum _ecore_status_t 998 * 999 */ 1000 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 1001 u16 deasserted_bits) 1002 { 1003 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1004 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1005 u8 i, j, k, bit_idx; 1006 enum _ecore_status_t rc = ECORE_SUCCESS; 1007 1008 /* Read the attention registers in the AEU */ 1009 for (i = 0; i < NUM_ATTN_REGS; i++) { 1010 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1011 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1012 i * 0x4); 1013 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1014 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); 1015 } 1016 1017 /* Handle parity attentions first */ 1018 for (i = 0; i < NUM_ATTN_REGS; i++) { 1019 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1020 u32 parities; 1021 1022 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1023 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1024 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1025 1026 /* Skip register in which no parity bit is currently set */ 1027 if (!parities) 1028 continue; 1029 1030 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1031 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1032 1033 if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1034 !!(parities & (1 << bit_idx))) 1035 ecore_int_deassertion_parity(p_hwfn, p_bit, 1036 aeu_en, bit_idx); 1037 1038 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1039 } 1040 } 1041 1042 /* Find non-parity cause for attention and act */ 1043 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1044 struct aeu_invert_reg_bit *p_aeu; 1045 1046 /* Handle only groups whose attention is currently deasserted */ 1047 if (!(deasserted_bits & (1 << k))) 1048 continue; 1049 1050 for (i = 0; i < NUM_ATTN_REGS; i++) { 1051 u32 bits; 1052 1053 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1054 i * sizeof(u32) + 1055 k * sizeof(u32) * NUM_ATTN_REGS; 1056 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1057 bits = aeu_inv_arr[i] & en; 1058 1059 /* Skip if no bit from this group is currently set */ 1060 if (!bits) 1061 continue; 1062 1063 /* Find all set bits from current register which belong 1064 * to current group, making them responsible for the 1065 * previous assertion. 1066 */ 1067 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1068 unsigned long int bitmask; 1069 u8 bit, bit_len; 1070 1071 /* Need to account bits with changed meaning */ 1072 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1073 1074 bit = bit_idx; 1075 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1076 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1077 /* Skip Parity */ 1078 bit++; 1079 bit_len--; 1080 } 1081 1082 /* Find the bits relating to HW-block, then 1083 * shift so they'll become LSB. 1084 */ 1085 bitmask = bits & (((1 << bit_len) - 1) << bit); 1086 bitmask >>= bit; 1087 1088 if (bitmask) { 1089 u32 flags = p_aeu->flags; 1090 char bit_name[30]; 1091 u8 num; 1092 1093 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1094 bit_len); 1095 1096 /* Some bits represent more than a 1097 * a single interrupt. Correctly print 1098 * their name. 1099 */ 1100 if (ATTENTION_LENGTH(flags) > 2 || 1101 ((flags & ATTENTION_PAR_INT) && 1102 ATTENTION_LENGTH(flags) > 1)) 1103 OSAL_SNPRINTF(bit_name, 30, 1104 p_aeu->bit_name, 1105 num); 1106 else 1107 OSAL_STRNCPY(bit_name, 1108 p_aeu->bit_name, 1109 30); 1110 1111 /* We now need to pass bitmask in its 1112 * correct position. 1113 */ 1114 bitmask <<= bit; 1115 1116 /* Handle source of the attention */ 1117 ecore_int_deassertion_aeu_bit(p_hwfn, 1118 p_aeu, 1119 aeu_en, 1120 bit_name, 1121 bitmask); 1122 } 1123 1124 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1125 } 1126 } 1127 } 1128 1129 /* Clear IGU indication for the deasserted bits */ 1130 /* FIXME - this will change once we'll have GOOD gtt definitions */ 1131 DIRECT_REG_WR(p_hwfn, 1132 (u8 OSAL_IOMEM *) p_hwfn->regview + 1133 GTT_BAR0_MAP_REG_IGU_CMD + 1134 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1135 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); 1136 1137 /* Unmask deasserted attentions in IGU */ 1138 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1139 IGU_REG_ATTENTION_ENABLE); 1140 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1141 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1142 1143 /* Clear deassertion from inner state */ 1144 sb_attn_sw->known_attn &= ~deasserted_bits; 1145 1146 return rc; 1147 } 1148 1149 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1150 { 1151 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1152 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1153 u16 index = 0, asserted_bits, deasserted_bits; 1154 u32 attn_bits = 0, attn_acks = 0; 1155 enum _ecore_status_t rc = ECORE_SUCCESS; 1156 1157 /* Read current attention bits/acks - safeguard against attentions 1158 * by guaranting work on a synchronized timeframe 1159 */ 1160 do { 1161 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1162 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1163 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1164 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1165 p_sb_attn->sb_index = index; 1166 1167 /* Attention / Deassertion are meaningful (and in correct state) 1168 * only when they differ and consistent with known state - deassertion 1169 * when previous attention & current ack, and assertion when current 1170 * attention with no previous attention 1171 */ 1172 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1173 ~p_sb_attn_sw->known_attn; 1174 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1175 p_sb_attn_sw->known_attn; 1176 1177 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1178 DP_INFO(p_hwfn, 1179 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1180 index, attn_bits, attn_acks, asserted_bits, 1181 deasserted_bits, p_sb_attn_sw->known_attn); 1182 else if (asserted_bits == 0x100) 1183 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1184 else 1185 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1186 "MFW indication [deassertion]\n"); 1187 1188 if (asserted_bits) { 1189 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1190 if (rc) 1191 return rc; 1192 } 1193 1194 if (deasserted_bits) 1195 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1196 1197 return rc; 1198 } 1199 1200 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1201 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1202 { 1203 struct igu_prod_cons_update igu_ack = { 0 }; 1204 1205 igu_ack.sb_id_and_flags = 1206 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1207 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1208 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1209 (IGU_SEG_ACCESS_ATTN << 1210 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1211 1212 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1213 1214 /* Both segments (interrupts & acks) are written to same place address; 1215 * Need to guarantee all commands will be received (in-order) by HW. 1216 */ 1217 OSAL_MMIOWB(p_hwfn->p_dev); 1218 OSAL_BARRIER(p_hwfn->p_dev); 1219 } 1220 1221 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1222 { 1223 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1224 struct ecore_pi_info *pi_info = OSAL_NULL; 1225 struct ecore_sb_attn_info *sb_attn; 1226 struct ecore_sb_info *sb_info; 1227 int arr_size; 1228 u16 rc = 0; 1229 1230 if (!p_hwfn) 1231 return; 1232 1233 if (!p_hwfn->p_sp_sb) { 1234 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1235 return; 1236 } 1237 1238 sb_info = &p_hwfn->p_sp_sb->sb_info; 1239 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1240 if (!sb_info) { 1241 DP_ERR(p_hwfn->p_dev, 1242 "Status block is NULL - cannot ack interrupts\n"); 1243 return; 1244 } 1245 1246 if (!p_hwfn->p_sb_attn) { 1247 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1248 return; 1249 } 1250 sb_attn = p_hwfn->p_sb_attn; 1251 1252 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1253 p_hwfn, p_hwfn->my_id); 1254 1255 /* Disable ack for def status block. Required both for msix + 1256 * inta in non-mask mode, in inta does no harm. 1257 */ 1258 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1259 1260 /* Gather Interrupts/Attentions information */ 1261 if (!sb_info->sb_virt) { 1262 DP_ERR(p_hwfn->p_dev, 1263 "Interrupt Status block is NULL -" 1264 " cannot check for new interrupts!\n"); 1265 } else { 1266 u32 tmp_index = sb_info->sb_ack; 1267 rc = ecore_sb_update_sb_idx(sb_info); 1268 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1269 "Interrupt indices: 0x%08x --> 0x%08x\n", 1270 tmp_index, sb_info->sb_ack); 1271 } 1272 1273 if (!sb_attn || !sb_attn->sb_attn) { 1274 DP_ERR(p_hwfn->p_dev, 1275 "Attentions Status block is NULL -" 1276 " cannot check for new attentions!\n"); 1277 } else { 1278 u16 tmp_index = sb_attn->index; 1279 1280 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1281 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1282 "Attention indices: 0x%08x --> 0x%08x\n", 1283 tmp_index, sb_attn->index); 1284 } 1285 1286 /* Check if we expect interrupts at this time. if not just ack them */ 1287 if (!(rc & ECORE_SB_EVENT_MASK)) { 1288 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1289 return; 1290 } 1291 1292 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1293 1294 if (!p_hwfn->p_dpc_ptt) { 1295 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1296 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1297 return; 1298 } 1299 1300 if (rc & ECORE_SB_ATT_IDX) 1301 ecore_int_attentions(p_hwfn); 1302 1303 if (rc & ECORE_SB_IDX) { 1304 int pi; 1305 1306 /* Since we only looked at the SB index, it's possible more 1307 * than a single protocol-index on the SB incremented. 1308 * Iterate over all configured protocol indices and check 1309 * whether something happened for each. 1310 */ 1311 for (pi = 0; pi < arr_size; pi++) { 1312 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1313 if (pi_info->comp_cb != OSAL_NULL) 1314 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1315 } 1316 } 1317 1318 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1319 /* This should be done before the interrupts are enabled, 1320 * since otherwise a new attention will be generated. 1321 */ 1322 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1323 } 1324 1325 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1326 } 1327 1328 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1329 { 1330 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1331 1332 if (!p_sb) 1333 return; 1334 1335 if (p_sb->sb_attn) { 1336 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1337 p_sb->sb_phys, 1338 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1339 } 1340 OSAL_FREE(p_hwfn->p_dev, p_sb); 1341 } 1342 1343 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1344 struct ecore_ptt *p_ptt) 1345 { 1346 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1347 1348 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1349 1350 sb_info->index = 0; 1351 sb_info->known_attn = 0; 1352 1353 /* Configure Attention Status Block in IGU */ 1354 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1355 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1356 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1357 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1358 } 1359 1360 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1361 struct ecore_ptt *p_ptt, 1362 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1363 { 1364 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1365 int i, j, k; 1366 1367 sb_info->sb_attn = sb_virt_addr; 1368 sb_info->sb_phys = sb_phy_addr; 1369 1370 /* Set the pointer to the AEU descriptors */ 1371 sb_info->p_aeu_desc = aeu_descs; 1372 1373 /* Calculate Parity Masks */ 1374 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1375 for (i = 0; i < NUM_ATTN_REGS; i++) { 1376 /* j is array index, k is bit index */ 1377 for (j = 0, k = 0; k < 32; j++) { 1378 struct aeu_invert_reg_bit *p_aeu; 1379 1380 p_aeu = &aeu_descs[i].bits[j]; 1381 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1382 sb_info->parity_mask[i] |= 1 << k; 1383 1384 k += ATTENTION_LENGTH(p_aeu->flags); 1385 } 1386 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1387 "Attn Mask [Reg %d]: 0x%08x\n", 1388 i, sb_info->parity_mask[i]); 1389 } 1390 1391 /* Set the address of cleanup for the mcp attention */ 1392 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1393 MISC_REG_AEU_GENERAL_ATTN_0; 1394 1395 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1396 } 1397 1398 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1399 struct ecore_ptt *p_ptt) 1400 { 1401 struct ecore_dev *p_dev = p_hwfn->p_dev; 1402 struct ecore_sb_attn_info *p_sb; 1403 dma_addr_t p_phys = 0; 1404 void *p_virt; 1405 1406 /* SB struct */ 1407 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1408 if (!p_sb) { 1409 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); 1410 return ECORE_NOMEM; 1411 } 1412 1413 /* SB ring */ 1414 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1415 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1416 if (!p_virt) { 1417 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); 1418 OSAL_FREE(p_dev, p_sb); 1419 return ECORE_NOMEM; 1420 } 1421 1422 /* Attention setup */ 1423 p_hwfn->p_sb_attn = p_sb; 1424 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1425 1426 return ECORE_SUCCESS; 1427 } 1428 1429 /* coalescing timeout = timeset << (timer_res + 1) */ 1430 #define ECORE_CAU_DEF_RX_USECS 24 1431 #define ECORE_CAU_DEF_TX_USECS 48 1432 1433 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1434 struct cau_sb_entry *p_sb_entry, 1435 u8 pf_id, u16 vf_number, u8 vf_valid) 1436 { 1437 struct ecore_dev *p_dev = p_hwfn->p_dev; 1438 u32 cau_state; 1439 u8 timer_res; 1440 1441 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1442 1443 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1444 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1445 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1446 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1447 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1448 1449 cau_state = CAU_HC_DISABLE_STATE; 1450 1451 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1452 cau_state = CAU_HC_ENABLE_STATE; 1453 if (!p_dev->rx_coalesce_usecs) 1454 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1455 if (!p_dev->tx_coalesce_usecs) 1456 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1457 } 1458 1459 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1460 if (p_dev->rx_coalesce_usecs <= 0x7F) 1461 timer_res = 0; 1462 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1463 timer_res = 1; 1464 else 1465 timer_res = 2; 1466 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1467 1468 if (p_dev->tx_coalesce_usecs <= 0x7F) 1469 timer_res = 0; 1470 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1471 timer_res = 1; 1472 else 1473 timer_res = 2; 1474 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1475 1476 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1477 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1478 } 1479 1480 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1481 struct ecore_ptt *p_ptt, 1482 u16 igu_sb_id, u32 pi_index, 1483 enum ecore_coalescing_fsm coalescing_fsm, 1484 u8 timeset) 1485 { 1486 struct cau_pi_entry pi_entry; 1487 u32 sb_offset, pi_offset; 1488 1489 if (IS_VF(p_hwfn->p_dev)) 1490 return;/* @@@TBD MichalK- VF CAU... */ 1491 1492 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1493 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1494 1495 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1496 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1497 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1498 else 1499 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1500 1501 pi_offset = sb_offset + pi_index; 1502 if (p_hwfn->hw_init_done) { 1503 ecore_wr(p_hwfn, p_ptt, 1504 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1505 *((u32 *)&(pi_entry))); 1506 } else { 1507 STORE_RT_REG(p_hwfn, 1508 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1509 *((u32 *)&(pi_entry))); 1510 } 1511 } 1512 1513 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1514 struct ecore_ptt *p_ptt, 1515 struct ecore_sb_info *p_sb, u32 pi_index, 1516 enum ecore_coalescing_fsm coalescing_fsm, 1517 u8 timeset) 1518 { 1519 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1520 pi_index, coalescing_fsm, timeset); 1521 } 1522 1523 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1524 struct ecore_ptt *p_ptt, 1525 dma_addr_t sb_phys, u16 igu_sb_id, 1526 u16 vf_number, u8 vf_valid) 1527 { 1528 struct cau_sb_entry sb_entry; 1529 1530 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1531 vf_number, vf_valid); 1532 1533 if (p_hwfn->hw_init_done) { 1534 /* Wide-bus, initialize via DMAE */ 1535 u64 phys_addr = (u64)sb_phys; 1536 1537 ecore_dmae_host2grc(p_hwfn, p_ptt, 1538 (u64)(osal_uintptr_t)&phys_addr, 1539 CAU_REG_SB_ADDR_MEMORY + 1540 igu_sb_id * sizeof(u64), 2, 0); 1541 ecore_dmae_host2grc(p_hwfn, p_ptt, 1542 (u64)(osal_uintptr_t)&sb_entry, 1543 CAU_REG_SB_VAR_MEMORY + 1544 igu_sb_id * sizeof(u64), 2, 0); 1545 } else { 1546 /* Initialize Status Block Address */ 1547 STORE_RT_REG_AGG(p_hwfn, 1548 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1549 igu_sb_id * 2, sb_phys); 1550 1551 STORE_RT_REG_AGG(p_hwfn, 1552 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1553 igu_sb_id * 2, sb_entry); 1554 } 1555 1556 /* Configure pi coalescing if set */ 1557 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1558 /* eth will open queues for all tcs, so configure all of them 1559 * properly, rather than just the active ones 1560 */ 1561 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1562 1563 u8 timeset, timer_res; 1564 u8 i; 1565 1566 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1567 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1568 timer_res = 0; 1569 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1570 timer_res = 1; 1571 else 1572 timer_res = 2; 1573 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1574 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1575 ECORE_COAL_RX_STATE_MACHINE, 1576 timeset); 1577 1578 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1579 timer_res = 0; 1580 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1581 timer_res = 1; 1582 else 1583 timer_res = 2; 1584 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1585 for (i = 0; i < num_tc; i++) { 1586 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1587 igu_sb_id, TX_PI(i), 1588 ECORE_COAL_TX_STATE_MACHINE, 1589 timeset); 1590 } 1591 } 1592 } 1593 1594 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1595 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) 1596 { 1597 /* zero status block and ack counter */ 1598 sb_info->sb_ack = 0; 1599 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1600 1601 if (IS_PF(p_hwfn->p_dev)) 1602 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1603 sb_info->igu_sb_id, 0, 0); 1604 } 1605 1606 struct ecore_igu_block * 1607 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1608 { 1609 struct ecore_igu_block *p_block; 1610 u16 igu_id; 1611 1612 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1613 igu_id++) { 1614 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1615 1616 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1617 !(p_block->status & ECORE_IGU_STATUS_FREE)) 1618 continue; 1619 1620 if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1621 b_is_pf) 1622 return p_block; 1623 } 1624 1625 return OSAL_NULL; 1626 } 1627 1628 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1629 u16 vector_id) 1630 { 1631 struct ecore_igu_block *p_block; 1632 u16 igu_id; 1633 1634 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1635 igu_id++) { 1636 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1637 1638 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1639 !p_block->is_pf || 1640 p_block->vector_number != vector_id) 1641 continue; 1642 1643 return igu_id; 1644 } 1645 1646 return ECORE_SB_INVALID_IDX; 1647 } 1648 1649 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1650 { 1651 u16 igu_sb_id; 1652 1653 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1654 if (sb_id == ECORE_SP_SB_ID) 1655 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1656 else if (IS_PF(p_hwfn->p_dev)) 1657 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1658 else 1659 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1660 1661 if (igu_sb_id == ECORE_SB_INVALID_IDX) 1662 DP_NOTICE(p_hwfn, true, 1663 "Slowpath SB vector %04x doesn't exist\n", 1664 sb_id); 1665 else if (sb_id == ECORE_SP_SB_ID) 1666 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1667 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1668 else 1669 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1670 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1671 1672 return igu_sb_id; 1673 } 1674 1675 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1676 struct ecore_ptt *p_ptt, 1677 struct ecore_sb_info *sb_info, 1678 void *sb_virt_addr, 1679 dma_addr_t sb_phy_addr, u16 sb_id) 1680 { 1681 sb_info->sb_virt = sb_virt_addr; 1682 sb_info->sb_phys = sb_phy_addr; 1683 1684 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1685 1686 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1687 return ECORE_INVAL; 1688 1689 /* Let the igu info reference the client's SB info */ 1690 if (sb_id != ECORE_SP_SB_ID) { 1691 if (IS_PF(p_hwfn->p_dev)) { 1692 struct ecore_igu_info *p_info; 1693 struct ecore_igu_block *p_block; 1694 1695 p_info = p_hwfn->hw_info.p_igu_info; 1696 p_block = &p_info->entry[sb_info->igu_sb_id]; 1697 1698 p_block->sb_info = sb_info; 1699 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1700 p_info->usage.free_cnt--; 1701 } else { 1702 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1703 } 1704 } 1705 #ifdef ECORE_CONFIG_DIRECT_HWFN 1706 sb_info->p_hwfn = p_hwfn; 1707 #endif 1708 sb_info->p_dev = p_hwfn->p_dev; 1709 1710 /* The igu address will hold the absolute address that needs to be 1711 * written to for a specific status block 1712 */ 1713 if (IS_PF(p_hwfn->p_dev)) { 1714 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + 1715 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); 1716 1717 } else { 1718 sb_info->igu_addr = 1719 (u8 OSAL_IOMEM *)p_hwfn->regview + 1720 PXP_VF_BAR0_START_IGU + 1721 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1722 } 1723 1724 sb_info->flags |= ECORE_SB_INFO_INIT; 1725 1726 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1727 1728 return ECORE_SUCCESS; 1729 } 1730 1731 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1732 struct ecore_sb_info *sb_info, 1733 u16 sb_id) 1734 { 1735 struct ecore_igu_info *p_info; 1736 struct ecore_igu_block *p_block; 1737 1738 if (sb_info == OSAL_NULL) 1739 return ECORE_SUCCESS; 1740 1741 /* zero status block and ack counter */ 1742 sb_info->sb_ack = 0; 1743 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1744 1745 if (IS_VF(p_hwfn->p_dev)) { 1746 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1747 return ECORE_SUCCESS; 1748 } 1749 1750 p_info = p_hwfn->hw_info.p_igu_info; 1751 p_block = &p_info->entry[sb_info->igu_sb_id]; 1752 1753 /* Vector 0 is reserved to Default SB */ 1754 if (p_block->vector_number == 0) { 1755 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1756 return ECORE_INVAL; 1757 } 1758 1759 /* Lose reference to client's SB info, and fix counters */ 1760 p_block->sb_info = OSAL_NULL; 1761 p_block->status |= ECORE_IGU_STATUS_FREE; 1762 p_info->usage.free_cnt++; 1763 1764 return ECORE_SUCCESS; 1765 } 1766 1767 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1768 { 1769 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1770 1771 if (!p_sb) 1772 return; 1773 1774 if (p_sb->sb_info.sb_virt) { 1775 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1776 p_sb->sb_info.sb_virt, 1777 p_sb->sb_info.sb_phys, 1778 SB_ALIGNED_SIZE(p_hwfn)); 1779 } 1780 1781 OSAL_FREE(p_hwfn->p_dev, p_sb); 1782 } 1783 1784 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1785 struct ecore_ptt *p_ptt) 1786 { 1787 struct ecore_sb_sp_info *p_sb; 1788 dma_addr_t p_phys = 0; 1789 void *p_virt; 1790 1791 /* SB struct */ 1792 p_sb = 1793 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, 1794 sizeof(*p_sb)); 1795 if (!p_sb) { 1796 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); 1797 return ECORE_NOMEM; 1798 } 1799 1800 /* SB ring */ 1801 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1802 &p_phys, SB_ALIGNED_SIZE(p_hwfn)); 1803 if (!p_virt) { 1804 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); 1805 OSAL_FREE(p_hwfn->p_dev, p_sb); 1806 return ECORE_NOMEM; 1807 } 1808 1809 /* Status Block setup */ 1810 p_hwfn->p_sp_sb = p_sb; 1811 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1812 p_virt, p_phys, ECORE_SP_SB_ID); 1813 1814 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1815 1816 return ECORE_SUCCESS; 1817 } 1818 1819 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1820 ecore_int_comp_cb_t comp_cb, 1821 void *cookie, 1822 u8 *sb_idx, __le16 **p_fw_cons) 1823 { 1824 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1825 enum _ecore_status_t rc = ECORE_NOMEM; 1826 u8 pi; 1827 1828 /* Look for a free index */ 1829 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1830 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1831 continue; 1832 1833 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1834 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1835 *sb_idx = pi; 1836 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1837 rc = ECORE_SUCCESS; 1838 break; 1839 } 1840 1841 return rc; 1842 } 1843 1844 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) 1845 { 1846 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1847 1848 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1849 return ECORE_NOMEM; 1850 1851 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1852 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1853 return ECORE_SUCCESS; 1854 } 1855 1856 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1857 { 1858 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1859 } 1860 1861 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1862 struct ecore_ptt *p_ptt, 1863 enum ecore_int_mode int_mode) 1864 { 1865 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1866 1867 #ifndef ASIC_ONLY 1868 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1869 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1870 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1871 } 1872 #endif 1873 1874 p_hwfn->p_dev->int_mode = int_mode; 1875 switch (p_hwfn->p_dev->int_mode) { 1876 case ECORE_INT_MODE_INTA: 1877 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1878 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1879 break; 1880 1881 case ECORE_INT_MODE_MSI: 1882 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1883 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1884 break; 1885 1886 case ECORE_INT_MODE_MSIX: 1887 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1888 break; 1889 case ECORE_INT_MODE_POLL: 1890 break; 1891 } 1892 1893 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1894 } 1895 1896 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1897 struct ecore_ptt *p_ptt) 1898 { 1899 #ifndef ASIC_ONLY 1900 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1901 DP_INFO(p_hwfn, 1902 "FPGA - Don't enable Attentions in IGU and MISC\n"); 1903 return; 1904 } 1905 #endif 1906 1907 /* Configure AEU signal change to produce attentions */ 1908 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1909 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1910 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1911 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1912 1913 /* Flush the writes to IGU */ 1914 OSAL_MMIOWB(p_hwfn->p_dev); 1915 1916 /* Unmask AEU signals toward IGU */ 1917 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1918 } 1919 1920 enum _ecore_status_t 1921 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1922 enum ecore_int_mode int_mode) 1923 { 1924 enum _ecore_status_t rc = ECORE_SUCCESS; 1925 1926 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1927 1928 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1929 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1930 if (rc != ECORE_SUCCESS) { 1931 DP_NOTICE(p_hwfn, true, 1932 "Slowpath IRQ request failed\n"); 1933 return ECORE_NORESOURCES; 1934 } 1935 p_hwfn->b_int_requested = true; 1936 } 1937 1938 /* Enable interrupt Generation */ 1939 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1940 1941 p_hwfn->b_int_enabled = 1; 1942 1943 return rc; 1944 } 1945 1946 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1947 struct ecore_ptt *p_ptt) 1948 { 1949 p_hwfn->b_int_enabled = 0; 1950 1951 if (IS_VF(p_hwfn->p_dev)) 1952 return; 1953 1954 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1955 } 1956 1957 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1958 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1959 struct ecore_ptt *p_ptt, 1960 u32 igu_sb_id, 1961 bool cleanup_set, 1962 u16 opaque_fid) 1963 { 1964 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1965 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1966 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1967 u8 type = 0; /* FIXME MichalS type??? */ 1968 1969 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1970 IGU_REG_CLEANUP_STATUS_0) != 0x200); 1971 1972 /* USE Control Command Register to perform cleanup. There is an 1973 * option to do this using IGU bar, but then it can't be used for VFs. 1974 */ 1975 1976 /* Set the data field */ 1977 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1978 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 1979 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1980 1981 /* Set the control register */ 1982 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1983 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1984 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1985 1986 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1987 1988 OSAL_BARRIER(p_hwfn->p_dev); 1989 1990 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1991 1992 /* Flush the write to IGU */ 1993 OSAL_MMIOWB(p_hwfn->p_dev); 1994 1995 /* calculate where to read the status bit from */ 1996 sb_bit = 1 << (igu_sb_id % 32); 1997 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 1998 1999 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2000 2001 /* Now wait for the command to complete */ 2002 while (--sleep_cnt) { 2003 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2004 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2005 break; 2006 OSAL_MSLEEP(5); 2007 } 2008 2009 if (!sleep_cnt) 2010 DP_NOTICE(p_hwfn, true, 2011 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2012 val, igu_sb_id); 2013 } 2014 2015 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2016 struct ecore_ptt *p_ptt, 2017 u16 igu_sb_id, u16 opaque, bool b_set) 2018 { 2019 struct ecore_igu_block *p_block; 2020 int pi, i; 2021 2022 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2023 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2024 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2025 igu_sb_id, p_block->function_id, p_block->is_pf, 2026 p_block->vector_number); 2027 2028 /* Set */ 2029 if (b_set) 2030 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2031 2032 /* Clear */ 2033 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2034 2035 /* Wait for the IGU SB to cleanup */ 2036 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2037 u32 val; 2038 2039 val = ecore_rd(p_hwfn, p_ptt, 2040 IGU_REG_WRITE_DONE_PENDING + 2041 ((igu_sb_id / 32) * 4)); 2042 if (val & (1 << (igu_sb_id % 32))) 2043 OSAL_UDELAY(10); 2044 else 2045 break; 2046 } 2047 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2048 DP_NOTICE(p_hwfn, true, 2049 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2050 igu_sb_id); 2051 2052 /* Clear the CAU for the SB */ 2053 for (pi = 0; pi < 12; pi++) 2054 ecore_wr(p_hwfn, p_ptt, 2055 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2056 } 2057 2058 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2059 struct ecore_ptt *p_ptt, 2060 bool b_set, bool b_slowpath) 2061 { 2062 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2063 struct ecore_igu_block *p_block; 2064 u16 igu_sb_id = 0; 2065 u32 val = 0; 2066 2067 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2068 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2069 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2070 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2071 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2072 /* end temporary */ 2073 2074 for (igu_sb_id = 0; 2075 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2076 igu_sb_id++) { 2077 p_block = &p_info->entry[igu_sb_id]; 2078 2079 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2080 !p_block->is_pf || 2081 (p_block->status & ECORE_IGU_STATUS_DSB)) 2082 continue; 2083 2084 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2085 p_hwfn->hw_info.opaque_fid, 2086 b_set); 2087 } 2088 2089 if (b_slowpath) 2090 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2091 p_info->igu_dsb_id, 2092 p_hwfn->hw_info.opaque_fid, 2093 b_set); 2094 } 2095 2096 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2097 struct ecore_ptt *p_ptt) 2098 { 2099 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2100 struct ecore_igu_block *p_block; 2101 int pf_sbs, vf_sbs; 2102 u16 igu_sb_id; 2103 u32 val, rval; 2104 2105 if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2106 /* We're using an old MFW - have to prevent any switching 2107 * of SBs between PF and VFs as later driver wouldn't be 2108 * able to tell which belongs to which. 2109 */ 2110 p_info->b_allow_pf_vf_change = false; 2111 } else { 2112 /* Use the numbers the MFW have provided - 2113 * don't forget MFW accounts for the default SB as well. 2114 */ 2115 p_info->b_allow_pf_vf_change = true; 2116 2117 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2118 DP_INFO(p_hwfn, 2119 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2120 RESC_NUM(p_hwfn, ECORE_SB) - 1, 2121 p_info->usage.cnt); 2122 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2123 } 2124 2125 /* TODO - how do we learn about VF SBs from MFW? */ 2126 if (IS_PF_SRIOV(p_hwfn)) { 2127 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2128 2129 if (vfs != p_info->usage.iov_cnt) 2130 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2131 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2132 p_info->usage.iov_cnt, vfs); 2133 2134 /* At this point we know how many SBs we have totally 2135 * in IGU + number of PF SBs. So we can validate that 2136 * we'd have sufficient for VF. 2137 */ 2138 if (vfs > p_info->usage.free_cnt + 2139 p_info->usage.free_cnt_iov - 2140 p_info->usage.cnt) { 2141 DP_NOTICE(p_hwfn, true, 2142 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2143 p_info->usage.free_cnt + 2144 p_info->usage.free_cnt_iov, 2145 p_info->usage.cnt, vfs); 2146 return ECORE_INVAL; 2147 } 2148 } 2149 } 2150 2151 /* Cap the number of VFs SBs by the number of VFs */ 2152 if (IS_PF_SRIOV(p_hwfn)) 2153 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; 2154 2155 /* Mark all SBs as free, now in the right PF/VFs division */ 2156 p_info->usage.free_cnt = p_info->usage.cnt; 2157 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2158 p_info->usage.orig = p_info->usage.cnt; 2159 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2160 2161 /* We now proceed to re-configure the IGU cam to reflect the initial 2162 * configuration. We can start with the Default SB. 2163 */ 2164 pf_sbs = p_info->usage.cnt; 2165 vf_sbs = p_info->usage.iov_cnt; 2166 2167 for (igu_sb_id = p_info->igu_dsb_id; 2168 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2169 igu_sb_id++) { 2170 p_block = &p_info->entry[igu_sb_id]; 2171 val = 0; 2172 2173 if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2174 continue; 2175 2176 if (p_block->status & ECORE_IGU_STATUS_DSB) { 2177 p_block->function_id = p_hwfn->rel_pf_id; 2178 p_block->is_pf = 1; 2179 p_block->vector_number = 0; 2180 p_block->status = ECORE_IGU_STATUS_VALID | 2181 ECORE_IGU_STATUS_PF | 2182 ECORE_IGU_STATUS_DSB; 2183 } else if (pf_sbs) { 2184 pf_sbs--; 2185 p_block->function_id = p_hwfn->rel_pf_id; 2186 p_block->is_pf = 1; 2187 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2188 p_block->status = ECORE_IGU_STATUS_VALID | 2189 ECORE_IGU_STATUS_PF | 2190 ECORE_IGU_STATUS_FREE; 2191 } else if (vf_sbs) { 2192 p_block->function_id = 2193 p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2194 p_info->usage.iov_cnt - vf_sbs; 2195 p_block->is_pf = 0; 2196 p_block->vector_number = 0; 2197 p_block->status = ECORE_IGU_STATUS_VALID | 2198 ECORE_IGU_STATUS_FREE; 2199 vf_sbs--; 2200 } else { 2201 p_block->function_id = 0; 2202 p_block->is_pf = 0; 2203 p_block->vector_number = 0; 2204 } 2205 2206 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2207 p_block->function_id); 2208 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2209 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2210 p_block->vector_number); 2211 2212 /* VF entries would be enabled when VF is initializaed */ 2213 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2214 2215 rval = ecore_rd(p_hwfn, p_ptt, 2216 IGU_REG_MAPPING_MEMORY + 2217 sizeof(u32) * igu_sb_id); 2218 2219 if (rval != val) { 2220 ecore_wr(p_hwfn, p_ptt, 2221 IGU_REG_MAPPING_MEMORY + 2222 sizeof(u32) * igu_sb_id, 2223 val); 2224 2225 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2226 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2227 igu_sb_id, p_block->function_id, 2228 p_block->is_pf, p_block->vector_number, 2229 rval, val); 2230 } 2231 } 2232 2233 return 0; 2234 } 2235 2236 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2237 struct ecore_ptt *p_ptt) 2238 { 2239 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2240 2241 /* Return all the usage indications to default prior to the reset; 2242 * The reset expects the !orig to reflect the initial status of the 2243 * SBs, and would re-calculate the originals based on those. 2244 */ 2245 p_cnt->cnt = p_cnt->orig; 2246 p_cnt->free_cnt = p_cnt->orig; 2247 p_cnt->iov_cnt = p_cnt->iov_orig; 2248 p_cnt->free_cnt_iov = p_cnt->iov_orig; 2249 p_cnt->orig = 0; 2250 p_cnt->iov_orig = 0; 2251 2252 /* TODO - we probably need to re-configure the CAU as well... */ 2253 return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2254 } 2255 2256 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2257 struct ecore_ptt *p_ptt, 2258 u16 igu_sb_id) 2259 { 2260 u32 val = ecore_rd(p_hwfn, p_ptt, 2261 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2262 struct ecore_igu_block *p_block; 2263 2264 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2265 2266 /* Fill the block information */ 2267 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2268 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2269 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2270 2271 p_block->igu_sb_id = igu_sb_id; 2272 } 2273 2274 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2275 struct ecore_ptt *p_ptt) 2276 { 2277 struct ecore_igu_info *p_igu_info; 2278 struct ecore_igu_block *p_block; 2279 u32 min_vf = 0, max_vf = 0; 2280 u16 igu_sb_id; 2281 2282 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2283 GFP_KERNEL, 2284 sizeof(*p_igu_info)); 2285 if (!p_hwfn->hw_info.p_igu_info) 2286 return ECORE_NOMEM; 2287 p_igu_info = p_hwfn->hw_info.p_igu_info; 2288 2289 /* Distinguish between existent and onn-existent default SB */ 2290 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2291 2292 /* Find the range of VF ids whose SB belong to this PF */ 2293 if (p_hwfn->p_dev->p_iov_info) { 2294 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2295 2296 min_vf = p_iov->first_vf_in_pf; 2297 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2298 } 2299 2300 for (igu_sb_id = 0; 2301 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2302 igu_sb_id++) { 2303 /* Read current entry; Notice it might not belong to this PF */ 2304 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2305 p_block = &p_igu_info->entry[igu_sb_id]; 2306 2307 if ((p_block->is_pf) && 2308 (p_block->function_id == p_hwfn->rel_pf_id)) { 2309 p_block->status = ECORE_IGU_STATUS_PF | 2310 ECORE_IGU_STATUS_VALID | 2311 ECORE_IGU_STATUS_FREE; 2312 2313 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2314 p_igu_info->usage.cnt++; 2315 } else if (!(p_block->is_pf) && 2316 (p_block->function_id >= min_vf) && 2317 (p_block->function_id < max_vf)) { 2318 /* Available for VFs of this PF */ 2319 p_block->status = ECORE_IGU_STATUS_VALID | 2320 ECORE_IGU_STATUS_FREE; 2321 2322 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2323 p_igu_info->usage.iov_cnt++; 2324 } 2325 2326 /* Mark the First entry belonging to the PF or its VFs 2327 * as the default SB [we'll reset IGU prior to first usage]. 2328 */ 2329 if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2330 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2331 p_igu_info->igu_dsb_id = igu_sb_id; 2332 p_block->status |= ECORE_IGU_STATUS_DSB; 2333 } 2334 2335 /* While this isn't suitable for all clients, limit number 2336 * of prints by having each PF print only its entries with the 2337 * exception of PF0 which would print everything. 2338 */ 2339 if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2340 (p_hwfn->abs_pf_id == 0)) 2341 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2342 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2343 igu_sb_id, p_block->function_id, 2344 p_block->is_pf, p_block->vector_number); 2345 } 2346 2347 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2348 DP_NOTICE(p_hwfn, true, 2349 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2350 p_igu_info->igu_dsb_id); 2351 return ECORE_INVAL; 2352 } 2353 2354 /* All non default SB are considered free at this point */ 2355 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2356 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2357 2358 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2359 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2360 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2361 p_igu_info->usage.iov_cnt); 2362 2363 return ECORE_SUCCESS; 2364 } 2365 2366 enum _ecore_status_t 2367 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2368 u16 sb_id, bool b_to_vf) 2369 { 2370 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2371 struct ecore_igu_block *p_block = OSAL_NULL; 2372 u16 igu_sb_id = 0, vf_num = 0; 2373 u32 val = 0; 2374 2375 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2376 return ECORE_INVAL; 2377 2378 if (sb_id == ECORE_SP_SB_ID) 2379 return ECORE_INVAL; 2380 2381 if (!p_info->b_allow_pf_vf_change) { 2382 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2383 return ECORE_INVAL; 2384 } 2385 2386 /* If we're moving a SB from PF to VF, the client had to specify 2387 * which vector it wants to move. 2388 */ 2389 if (b_to_vf) { 2390 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2391 if (igu_sb_id == ECORE_SB_INVALID_IDX) 2392 return ECORE_INVAL; 2393 } 2394 2395 /* If we're moving a SB from VF to PF, need to validate there isn't 2396 * already a line configured for that vector. 2397 */ 2398 if (!b_to_vf) { 2399 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2400 ECORE_SB_INVALID_IDX) 2401 return ECORE_INVAL; 2402 } 2403 2404 /* We need to validate that the SB can actually be relocated. 2405 * This would also handle the previous case where we've explicitly 2406 * stated which IGU SB needs to move. 2407 */ 2408 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2409 igu_sb_id++) { 2410 p_block = &p_info->entry[igu_sb_id]; 2411 2412 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2413 !(p_block->status & ECORE_IGU_STATUS_FREE) || 2414 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2415 if (b_to_vf) 2416 return ECORE_INVAL; 2417 else 2418 continue; 2419 } 2420 2421 break; 2422 } 2423 2424 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2425 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2426 "Failed to find a free SB to move\n"); 2427 return ECORE_INVAL; 2428 } 2429 2430 /* At this point, p_block points to the SB we want to relocate */ 2431 if (b_to_vf) { 2432 p_block->status &= ~ECORE_IGU_STATUS_PF; 2433 2434 /* It doesn't matter which VF number we choose, since we're 2435 * going to disable the line; But let's keep it in range. 2436 */ 2437 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2438 2439 p_block->function_id = (u8)vf_num; 2440 p_block->is_pf = 0; 2441 p_block->vector_number = 0; 2442 2443 p_info->usage.cnt--; 2444 p_info->usage.free_cnt--; 2445 p_info->usage.iov_cnt++; 2446 p_info->usage.free_cnt_iov++; 2447 2448 /* TODO - if SBs aren't really the limiting factor, 2449 * then it might not be accurate [in the since that 2450 * we might not need decrement the feature]. 2451 */ 2452 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2453 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2454 } else { 2455 p_block->status |= ECORE_IGU_STATUS_PF; 2456 p_block->function_id = p_hwfn->rel_pf_id; 2457 p_block->is_pf = 1; 2458 p_block->vector_number = sb_id + 1; 2459 2460 p_info->usage.cnt++; 2461 p_info->usage.free_cnt++; 2462 p_info->usage.iov_cnt--; 2463 p_info->usage.free_cnt_iov--; 2464 2465 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2466 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2467 } 2468 2469 /* Update the IGU and CAU with the new configuration */ 2470 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2471 p_block->function_id); 2472 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2473 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2474 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2475 p_block->vector_number); 2476 2477 ecore_wr(p_hwfn, p_ptt, 2478 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2479 val); 2480 2481 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2482 igu_sb_id, vf_num, 2483 p_block->is_pf ? 0 : 1); 2484 2485 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2486 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2487 igu_sb_id, p_block->function_id, 2488 p_block->is_pf, p_block->vector_number); 2489 2490 return ECORE_SUCCESS; 2491 } 2492 2493 /** 2494 * @brief Initialize igu runtime registers 2495 * 2496 * @param p_hwfn 2497 */ 2498 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2499 { 2500 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2501 2502 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2503 } 2504 2505 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2506 IGU_CMD_INT_ACK_BASE) 2507 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2508 IGU_CMD_INT_ACK_BASE) 2509 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2510 { 2511 u32 intr_status_hi = 0, intr_status_lo = 0; 2512 u64 intr_status = 0; 2513 2514 intr_status_lo = REG_RD(p_hwfn, 2515 GTT_BAR0_MAP_REG_IGU_CMD + 2516 LSB_IGU_CMD_ADDR * 8); 2517 intr_status_hi = REG_RD(p_hwfn, 2518 GTT_BAR0_MAP_REG_IGU_CMD + 2519 MSB_IGU_CMD_ADDR * 8); 2520 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2521 2522 return intr_status; 2523 } 2524 2525 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2526 { 2527 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2528 p_hwfn->b_sp_dpc_enabled = true; 2529 } 2530 2531 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2532 { 2533 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2534 if (!p_hwfn->sp_dpc) 2535 return ECORE_NOMEM; 2536 2537 return ECORE_SUCCESS; 2538 } 2539 2540 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2541 { 2542 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2543 } 2544 2545 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2546 struct ecore_ptt *p_ptt) 2547 { 2548 enum _ecore_status_t rc = ECORE_SUCCESS; 2549 2550 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2551 if (rc != ECORE_SUCCESS) { 2552 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2553 return rc; 2554 } 2555 2556 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2557 if (rc != ECORE_SUCCESS) { 2558 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2559 return rc; 2560 } 2561 2562 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2563 if (rc != ECORE_SUCCESS) 2564 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2565 2566 return rc; 2567 } 2568 2569 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2570 { 2571 ecore_int_sp_sb_free(p_hwfn); 2572 ecore_int_sb_attn_free(p_hwfn); 2573 ecore_int_sp_dpc_free(p_hwfn); 2574 } 2575 2576 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2577 { 2578 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2579 return; 2580 2581 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2582 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2583 ecore_int_sp_dpc_setup(p_hwfn); 2584 } 2585 2586 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2587 struct ecore_sb_cnt_info *p_sb_cnt_info) 2588 { 2589 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2590 2591 if (!p_igu_info || !p_sb_cnt_info) 2592 return; 2593 2594 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2595 sizeof(*p_sb_cnt_info)); 2596 } 2597 2598 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2599 { 2600 int i; 2601 2602 for_each_hwfn(p_dev, i) 2603 p_dev->hwfns[i].b_int_requested = false; 2604 } 2605 2606 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2607 { 2608 p_dev->attn_clr_en = clr_enable; 2609 } 2610 2611 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2612 struct ecore_ptt *p_ptt, 2613 u8 timer_res, u16 sb_id, bool tx) 2614 { 2615 struct cau_sb_entry sb_entry; 2616 enum _ecore_status_t rc; 2617 2618 if (!p_hwfn->hw_init_done) { 2619 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2620 return ECORE_INVAL; 2621 } 2622 2623 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2624 sb_id * sizeof(u64), 2625 (u64)(osal_uintptr_t)&sb_entry, 2, 0); 2626 if (rc != ECORE_SUCCESS) { 2627 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2628 return rc; 2629 } 2630 2631 if (tx) 2632 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2633 else 2634 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2635 2636 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2637 (u64)(osal_uintptr_t)&sb_entry, 2638 CAU_REG_SB_VAR_MEMORY + 2639 sb_id * sizeof(u64), 2, 0); 2640 if (rc != ECORE_SUCCESS) { 2641 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2642 return rc; 2643 } 2644 2645 return rc; 2646 } 2647 2648 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2649 struct ecore_ptt *p_ptt, 2650 struct ecore_sb_info *p_sb, 2651 struct ecore_sb_info_dbg *p_info) 2652 { 2653 u16 sbid = p_sb->igu_sb_id; 2654 int i; 2655 2656 if (IS_VF(p_hwfn->p_dev)) 2657 return ECORE_INVAL; 2658 2659 if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) 2660 return ECORE_INVAL; 2661 2662 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2663 IGU_REG_PRODUCER_MEMORY + sbid * 4); 2664 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2665 IGU_REG_CONSUMER_MEM + sbid * 4); 2666 2667 for (i = 0; i < PIS_PER_SB_E4; i++) 2668 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2669 CAU_REG_PI_MEMORY + 2670 sbid * 4 * PIS_PER_SB_E4 + 2671 i * 4); 2672 2673 return ECORE_SUCCESS; 2674 } 2675