1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_spq.h" 12 #include "reg_addr.h" 13 #include "ecore_gtt_reg_addr.h" 14 #include "ecore_init_ops.h" 15 #include "ecore_rt_defs.h" 16 #include "ecore_int.h" 17 #include "reg_addr.h" 18 #include "ecore_hw.h" 19 #include "ecore_sriov.h" 20 #include "ecore_vf.h" 21 #include "ecore_hw_defs.h" 22 #include "ecore_hsi_common.h" 23 #include "ecore_mcp.h" 24 25 struct ecore_pi_info { 26 ecore_int_comp_cb_t comp_cb; 27 void *cookie; /* Will be sent to the compl cb function */ 28 }; 29 30 struct ecore_sb_sp_info { 31 struct ecore_sb_info sb_info; 32 /* per protocol index data */ 33 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4]; 34 }; 35 36 enum ecore_attention_type { 37 ECORE_ATTN_TYPE_ATTN, 38 ECORE_ATTN_TYPE_PARITY, 39 }; 40 41 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 42 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 43 44 struct aeu_invert_reg_bit { 45 char bit_name[30]; 46 47 #define ATTENTION_PARITY (1 << 0) 48 49 #define ATTENTION_LENGTH_MASK (0x00000ff0) 50 #define ATTENTION_LENGTH_SHIFT (4) 51 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 52 ATTENTION_LENGTH_SHIFT) 53 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 54 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 55 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 56 ATTENTION_PARITY) 57 58 /* Multiple bits start with this offset */ 59 #define ATTENTION_OFFSET_MASK (0x000ff000) 60 #define ATTENTION_OFFSET_SHIFT (12) 61 62 #define ATTENTION_BB_MASK (0x00700000) 63 #define ATTENTION_BB_SHIFT (20) 64 #define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT) 65 #define ATTENTION_BB_DIFFERENT (1 << 23) 66 67 #define ATTENTION_CLEAR_ENABLE (1 << 28) 68 unsigned int flags; 69 70 /* Callback to call if attention will be triggered */ 71 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 72 73 enum block_id block_index; 74 }; 75 76 struct aeu_invert_reg { 77 struct aeu_invert_reg_bit bits[32]; 78 }; 79 80 #define MAX_ATTN_GRPS (8) 81 #define NUM_ATTN_REGS (9) 82 83 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 84 { 85 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 86 87 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); 88 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); 89 90 return ECORE_SUCCESS; 91 } 92 93 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 94 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 95 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 96 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 97 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 98 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 99 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 100 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 101 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 102 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 103 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 116 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 117 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 118 { 119 u32 tmp = 120 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 121 PSWHST_REG_VF_DISABLED_ERROR_VALID); 122 123 /* Disabled VF access */ 124 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 125 u32 addr, data; 126 127 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 128 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 129 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 130 PSWHST_REG_VF_DISABLED_ERROR_DATA); 131 DP_INFO(p_hwfn->p_dev, 132 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" 133 " Write [0x%02x] Addr [0x%08x]\n", 134 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) 135 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 136 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) 137 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 138 (u8)((data & 139 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 140 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 141 (u8)((data & 142 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 143 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 144 (u8)((data & 145 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 146 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 147 addr); 148 } 149 150 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 151 PSWHST_REG_INCORRECT_ACCESS_VALID); 152 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 153 u32 addr, data, length; 154 155 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 156 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 157 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 158 PSWHST_REG_INCORRECT_ACCESS_DATA); 159 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 160 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 161 162 DP_INFO(p_hwfn->p_dev, 163 "Incorrect access to %08x of length %08x - PF [%02x]" 164 " VF [%04x] [valid %02x] client [%02x] write [%02x]" 165 " Byte-Enable [%04x] [%08x]\n", 166 addr, length, 167 (u8)((data & 168 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 169 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 170 (u8)((data & 171 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 172 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 173 (u8)((data & 174 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 175 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 176 (u8)((data & 177 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 178 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 179 (u8)((data & 180 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 181 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 182 (u8)((data & 183 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 184 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 185 data); 186 } 187 188 /* TODO - We know 'some' of these are legal due to virtualization, 189 * but is it true for all of them? 190 */ 191 return ECORE_SUCCESS; 192 } 193 194 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 195 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 196 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 197 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 198 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 199 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 200 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 201 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 202 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 203 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 204 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 205 static const char *grc_timeout_attn_master_to_str(u8 master) 206 { 207 switch (master) { 208 case 1: 209 return "PXP"; 210 case 2: 211 return "MCP"; 212 case 3: 213 return "MSDM"; 214 case 4: 215 return "PSDM"; 216 case 5: 217 return "YSDM"; 218 case 6: 219 return "USDM"; 220 case 7: 221 return "TSDM"; 222 case 8: 223 return "XSDM"; 224 case 9: 225 return "DBU"; 226 case 10: 227 return "DMAE"; 228 default: 229 return "Unknown"; 230 } 231 } 232 233 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 234 { 235 u32 tmp, tmp2; 236 237 /* We've already cleared the timeout interrupt register, so we learn 238 * of interrupts via the validity register 239 */ 240 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 241 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 242 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) 243 goto out; 244 245 /* Read the GRC timeout information */ 246 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 247 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 248 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 249 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 250 251 DP_NOTICE(p_hwfn->p_dev, false, 252 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 253 tmp2, tmp, 254 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 255 : "Read from", 256 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 257 grc_timeout_attn_master_to_str( 258 (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 259 ECORE_GRC_ATTENTION_MASTER_SHIFT), 260 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 261 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 262 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 263 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 264 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 265 ECORE_GRC_ATTENTION_VF_SHIFT); 266 267 out: 268 /* Regardles of anything else, clean the validity bit */ 269 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 270 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 271 return ECORE_SUCCESS; 272 } 273 274 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 275 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 276 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 277 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 278 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 279 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 280 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 281 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 282 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 283 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 284 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 285 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 286 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 287 288 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 289 struct ecore_ptt *p_ptt) 290 { 291 u32 tmp; 292 293 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 294 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 295 u32 addr_lo, addr_hi, details; 296 297 addr_lo = ecore_rd(p_hwfn, p_ptt, 298 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 299 addr_hi = ecore_rd(p_hwfn, p_ptt, 300 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 301 details = ecore_rd(p_hwfn, p_ptt, 302 PGLUE_B_REG_TX_ERR_WR_DETAILS); 303 304 DP_NOTICE(p_hwfn, false, 305 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 306 addr_hi, addr_lo, details, 307 (u8)((details & 308 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 309 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 310 (u8)((details & 311 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 312 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 313 (u8)((details & 314 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 315 tmp, 316 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 317 1 : 0), 318 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 319 1 : 0), 320 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 321 1 : 0)); 322 } 323 324 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 325 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 326 u32 addr_lo, addr_hi, details; 327 328 addr_lo = ecore_rd(p_hwfn, p_ptt, 329 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 330 addr_hi = ecore_rd(p_hwfn, p_ptt, 331 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 332 details = ecore_rd(p_hwfn, p_ptt, 333 PGLUE_B_REG_TX_ERR_RD_DETAILS); 334 335 DP_NOTICE(p_hwfn, false, 336 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 337 addr_hi, addr_lo, details, 338 (u8)((details & 339 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 340 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 341 (u8)((details & 342 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 343 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 344 (u8)((details & 345 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 346 tmp, 347 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 348 1 : 0), 349 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 350 1 : 0), 351 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 352 1 : 0)); 353 } 354 355 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 356 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 357 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); 358 359 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 360 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 361 u32 addr_hi, addr_lo; 362 363 addr_lo = ecore_rd(p_hwfn, p_ptt, 364 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 365 addr_hi = ecore_rd(p_hwfn, p_ptt, 366 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 367 368 DP_NOTICE(p_hwfn, false, 369 "ICPL erorr - %08x [Address %08x:%08x]\n", 370 tmp, addr_hi, addr_lo); 371 } 372 373 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 374 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 375 u32 addr_hi, addr_lo, details; 376 377 addr_lo = ecore_rd(p_hwfn, p_ptt, 378 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 379 addr_hi = ecore_rd(p_hwfn, p_ptt, 380 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 381 details = ecore_rd(p_hwfn, p_ptt, 382 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 383 384 DP_NOTICE(p_hwfn, false, 385 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 386 details, tmp, addr_hi, addr_lo); 387 } 388 389 /* Clear the indications */ 390 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 391 392 return ECORE_SUCCESS; 393 } 394 395 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 396 { 397 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); 398 } 399 400 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 401 { 402 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 403 404 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 405 406 return ECORE_INVAL; 407 } 408 409 static enum _ecore_status_t 410 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 411 { 412 DP_INFO(p_hwfn, "General attention 35!\n"); 413 414 return ECORE_SUCCESS; 415 } 416 417 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 418 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 419 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 420 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 421 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 422 423 #define ECORE_DB_REC_COUNT 10 424 #define ECORE_DB_REC_INTERVAL 100 425 426 /* assumes sticky overflow indication was set for this PF */ 427 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn, 428 struct ecore_ptt *p_ptt) 429 { 430 u8 count = ECORE_DB_REC_COUNT; 431 u32 usage = 1; 432 433 /* wait for usage to zero or count to run out. This is necessary since 434 * EDPM doorbell transactions can take multiple 64b cycles, and as such 435 * can "split" over the pci. Possibly, the doorbell drop can happen with 436 * half an EDPM in the queue and other half dropped. Another EDPM 437 * doorbell to the same address (from doorbell recovery mechanism or 438 * from the doorbelling entity) could have first half dropped and second 439 * half interperted as continuation of the first. To prevent such 440 * malformed doorbells from reaching the device, flush the queue before 441 * releaseing the overflow sticky indication. 442 */ 443 while (count-- && usage) { 444 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 445 OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 446 } 447 448 /* should have been depleted by now */ 449 if (usage) { 450 DP_NOTICE(p_hwfn->p_dev, false, 451 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 452 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 453 return ECORE_TIMEOUT; 454 } 455 456 /* flush any pedning (e)dpm as they may never arrive */ 457 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 458 459 /* release overflow sticky indication (stop silently dropping 460 * everything) 461 */ 462 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 463 464 /* repeat all last doorbells (doorbell drop recovery) */ 465 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 466 467 return ECORE_SUCCESS; 468 } 469 470 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 471 { 472 u32 int_sts, first_drop_reason, details, address, overflow, 473 all_drops_reason; 474 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 475 enum _ecore_status_t rc; 476 477 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 478 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 479 int_sts); 480 481 /* int_sts may be zero since all PFs were interrupted for doorbell 482 * overflow but another one already handled it. Can abort here. If 483 * This PF also requires overflow recovery we will be interrupted again 484 */ 485 if (!int_sts) 486 return ECORE_SUCCESS; 487 488 /* check if db_drop or overflow happened */ 489 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 490 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 491 /* obtain data about db drop/overflow */ 492 first_drop_reason = ecore_rd(p_hwfn, p_ptt, 493 DORQ_REG_DB_DROP_REASON) & 494 ECORE_DORQ_ATTENTION_REASON_MASK; 495 details = ecore_rd(p_hwfn, p_ptt, 496 DORQ_REG_DB_DROP_DETAILS); 497 address = ecore_rd(p_hwfn, p_ptt, 498 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 499 overflow = ecore_rd(p_hwfn, p_ptt, 500 DORQ_REG_PF_OVFL_STICKY); 501 all_drops_reason = ecore_rd(p_hwfn, p_ptt, 502 DORQ_REG_DB_DROP_DETAILS_REASON); 503 504 /* log info */ 505 DP_NOTICE(p_hwfn->p_dev, false, 506 "Doorbell drop occurred\n" 507 "Address\t\t0x%08x\t(second BAR address)\n" 508 "FID\t\t0x%04x\t\t(Opaque FID)\n" 509 "Size\t\t0x%04x\t\t(in bytes)\n" 510 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 511 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n" 512 "Overflow\t0x%x\t\t(a per PF indication)\n", 513 address, 514 GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 515 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 516 first_drop_reason, all_drops_reason, overflow); 517 518 /* if this PF caused overflow, initiate recovery */ 519 if (overflow) { 520 rc = ecore_db_rec_attn(p_hwfn, p_ptt); 521 if (rc != ECORE_SUCCESS) 522 return rc; 523 } 524 525 /* clear the doorbell drop details and prepare for next drop */ 526 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 527 528 /* mark interrupt as handeld (note: even if drop was due to a 529 * different reason than overflow we mark as handled) 530 */ 531 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 532 DORQ_REG_INT_STS_DB_DROP | 533 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 534 535 /* if there are no indications otherthan drop indications, 536 * success 537 */ 538 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 539 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 540 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 541 return ECORE_SUCCESS; 542 } 543 544 /* some other indication was present - non recoverable */ 545 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 546 547 return ECORE_INVAL; 548 } 549 550 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 551 { 552 #ifndef ASIC_ONLY 553 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 554 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 555 TM_REG_INT_STS_1); 556 557 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 558 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 559 return ECORE_INVAL; 560 561 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 562 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 563 DP_INFO(p_hwfn, 564 "TM attention on emulation - most likely" 565 " results of clock-ratios\n"); 566 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 567 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 568 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 569 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 570 571 return ECORE_SUCCESS; 572 } 573 #endif 574 575 return ECORE_INVAL; 576 } 577 578 /* Instead of major changes to the data-structure, we have a some 'special' 579 * identifiers for sources that changed meaning between adapters. 580 */ 581 enum aeu_invert_reg_special_type { 582 AEU_INVERT_REG_SPECIAL_CNIG_0, 583 AEU_INVERT_REG_SPECIAL_CNIG_1, 584 AEU_INVERT_REG_SPECIAL_CNIG_2, 585 AEU_INVERT_REG_SPECIAL_CNIG_3, 586 AEU_INVERT_REG_SPECIAL_MAX, 587 }; 588 589 static struct aeu_invert_reg_bit 590 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 591 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 592 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 593 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 594 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 595 }; 596 597 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 598 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 599 { 600 { /* After Invert 1 */ 601 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 602 MAX_BLOCK_ID}, 603 } 604 }, 605 606 { 607 { /* After Invert 2 */ 608 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 609 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 610 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, 611 BLOCK_PGLUE_B}, 612 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 613 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 614 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 615 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 616 {"SW timers #%d", 617 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), 618 OSAL_NULL, MAX_BLOCK_ID}, 619 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 620 BLOCK_PGLCS}, 621 } 622 }, 623 624 { 625 { /* After Invert 3 */ 626 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 627 MAX_BLOCK_ID}, 628 } 629 }, 630 631 { 632 { /* After Invert 4 */ 633 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 634 ecore_fw_assertion, MAX_BLOCK_ID}, 635 {"General Attention %d", 636 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), 637 OSAL_NULL, MAX_BLOCK_ID}, 638 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 639 ecore_general_attention_35, MAX_BLOCK_ID}, 640 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 641 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 642 OSAL_NULL, BLOCK_NWS}, 643 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 644 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 645 OSAL_NULL, BLOCK_NWS}, 646 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 647 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 648 OSAL_NULL, BLOCK_NWM}, 649 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 650 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 651 OSAL_NULL, BLOCK_NWM}, 652 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 653 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 654 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 655 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 656 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 657 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 658 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 659 MAX_BLOCK_ID}, 660 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 661 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 662 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 663 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 664 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 665 } 666 }, 667 668 { 669 { /* After Invert 5 */ 670 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 671 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 672 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 673 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 674 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 675 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 676 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 677 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 678 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 679 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 680 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 681 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 682 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 683 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 684 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 685 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 686 } 687 }, 688 689 { 690 { /* After Invert 6 */ 691 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 692 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 693 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 694 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 695 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 696 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 697 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 698 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 699 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 700 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 701 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 702 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 703 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 704 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 705 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 706 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 707 } 708 }, 709 710 { 711 { /* After Invert 7 */ 712 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 713 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 714 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 715 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 716 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 717 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 718 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 719 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 720 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 721 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 722 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 723 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 724 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 725 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 726 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 727 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 728 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 729 } 730 }, 731 732 { 733 { /* After Invert 8 */ 734 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 735 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 736 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 737 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 738 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 739 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 740 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 741 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 742 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 743 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 744 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 745 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 746 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 747 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 748 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 749 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 750 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 751 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 752 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 753 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 754 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 755 MAX_BLOCK_ID}, 756 } 757 }, 758 759 { 760 { /* After Invert 9 */ 761 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 762 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, 763 MAX_BLOCK_ID}, 764 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 765 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 766 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 767 MAX_BLOCK_ID}, 768 } 769 }, 770 771 }; 772 773 static struct aeu_invert_reg_bit * 774 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 775 struct aeu_invert_reg_bit *p_bit) 776 { 777 if (!ECORE_IS_BB(p_hwfn->p_dev)) 778 return p_bit; 779 780 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 781 return p_bit; 782 783 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 784 ATTENTION_BB_SHIFT]; 785 } 786 787 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 788 struct aeu_invert_reg_bit *p_bit) 789 { 790 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 791 ATTENTION_PARITY); 792 } 793 794 #define ATTN_STATE_BITS (0xfff) 795 #define ATTN_BITS_MASKABLE (0x3ff) 796 struct ecore_sb_attn_info { 797 /* Virtual & Physical address of the SB */ 798 struct atten_status_block *sb_attn; 799 dma_addr_t sb_phys; 800 801 /* Last seen running index */ 802 u16 index; 803 804 /* A mask of the AEU bits resulting in a parity error */ 805 u32 parity_mask[NUM_ATTN_REGS]; 806 807 /* A pointer to the attention description structure */ 808 struct aeu_invert_reg *p_aeu_desc; 809 810 /* Previously asserted attentions, which are still unasserted */ 811 u16 known_attn; 812 813 /* Cleanup address for the link's general hw attention */ 814 u32 mfw_attn_addr; 815 }; 816 817 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 818 struct ecore_sb_attn_info *p_sb_desc) 819 { 820 u16 rc = 0, index; 821 822 OSAL_MMIOWB(p_hwfn->p_dev); 823 824 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 825 if (p_sb_desc->index != index) { 826 p_sb_desc->index = index; 827 rc = ECORE_SB_ATT_IDX; 828 } 829 830 OSAL_MMIOWB(p_hwfn->p_dev); 831 832 return rc; 833 } 834 835 /** 836 * @brief ecore_int_assertion - handles asserted attention bits 837 * 838 * @param p_hwfn 839 * @param asserted_bits newly asserted bits 840 * @return enum _ecore_status_t 841 */ 842 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 843 u16 asserted_bits) 844 { 845 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 846 u32 igu_mask; 847 848 /* Mask the source of the attention in the IGU */ 849 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 850 IGU_REG_ATTENTION_ENABLE); 851 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 852 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 853 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 854 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 855 856 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 857 "inner known ATTN state: 0x%04x --> 0x%04x\n", 858 sb_attn_sw->known_attn, 859 sb_attn_sw->known_attn | asserted_bits); 860 sb_attn_sw->known_attn |= asserted_bits; 861 862 /* Handle MCP events */ 863 if (asserted_bits & 0x100) { 864 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 865 /* Clean the MCP attention */ 866 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 867 sb_attn_sw->mfw_attn_addr, 0); 868 } 869 870 /* FIXME - this will change once we'll have GOOD gtt definitions */ 871 DIRECT_REG_WR(p_hwfn, 872 (u8 OSAL_IOMEM *) p_hwfn->regview + 873 GTT_BAR0_MAP_REG_IGU_CMD + 874 ((IGU_CMD_ATTN_BIT_SET_UPPER - 875 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 876 877 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 878 asserted_bits); 879 880 return ECORE_SUCCESS; 881 } 882 883 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 884 enum block_id id, enum dbg_attn_type type, 885 bool b_clear) 886 { 887 /* @DPDK */ 888 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type); 889 } 890 891 /** 892 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 893 * cause of the attention 894 * 895 * @param p_hwfn 896 * @param p_aeu - descriptor of an AEU bit which caused the attention 897 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 898 * this bit to this group. 899 * @param bit_index - index of this bit in the aeu_en_reg 900 * 901 * @return enum _ecore_status_t 902 */ 903 static enum _ecore_status_t 904 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 905 struct aeu_invert_reg_bit *p_aeu, 906 u32 aeu_en_reg, 907 const char *p_bit_name, 908 u32 bitmask) 909 { 910 enum _ecore_status_t rc = ECORE_INVAL; 911 bool b_fatal = false; 912 913 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 914 p_bit_name, bitmask); 915 916 /* Call callback before clearing the interrupt status */ 917 if (p_aeu->cb) { 918 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 919 p_bit_name); 920 rc = p_aeu->cb(p_hwfn); 921 } 922 923 if (rc != ECORE_SUCCESS) 924 b_fatal = true; 925 926 /* Print HW block interrupt registers */ 927 if (p_aeu->block_index != MAX_BLOCK_ID) { 928 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 929 ATTN_TYPE_INTERRUPT, !b_fatal); 930 } 931 932 /* @DPDK */ 933 /* Reach assertion if attention is fatal */ 934 if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) { 935 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 936 p_bit_name); 937 938 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 939 } 940 941 /* Prevent this Attention from being asserted in the future */ 942 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 943 p_hwfn->p_dev->attn_clr_en) { 944 u32 val; 945 u32 mask = ~bitmask; 946 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 947 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 948 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n", 949 p_bit_name); 950 } 951 952 return rc; 953 } 954 955 /** 956 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 957 * 958 * @param p_hwfn 959 * @param p_aeu - descriptor of an AEU bit which caused the parity 960 * @param aeu_en_reg - address of the AEU enable register 961 * @param bit_index 962 */ 963 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 964 struct aeu_invert_reg_bit *p_aeu, 965 u32 aeu_en_reg, u8 bit_index) 966 { 967 u32 block_id = p_aeu->block_index, mask, val; 968 969 DP_NOTICE(p_hwfn->p_dev, false, 970 "%s parity attention is set [address 0x%08x, bit %d]\n", 971 p_aeu->bit_name, aeu_en_reg, bit_index); 972 973 if (block_id != MAX_BLOCK_ID) { 974 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 975 976 /* In A0, there's a single parity bit for several blocks */ 977 if (block_id == BLOCK_BTB) { 978 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 979 ATTN_TYPE_PARITY, false); 980 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 981 ATTN_TYPE_PARITY, false); 982 } 983 } 984 985 /* Prevent this parity error from being re-asserted */ 986 mask = ~(0x1 << bit_index); 987 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 988 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 989 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 990 p_aeu->bit_name); 991 } 992 993 /** 994 * @brief - handles deassertion of previously asserted attentions. 995 * 996 * @param p_hwfn 997 * @param deasserted_bits - newly deasserted bits 998 * @return enum _ecore_status_t 999 * 1000 */ 1001 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 1002 u16 deasserted_bits) 1003 { 1004 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1005 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1006 u8 i, j, k, bit_idx; 1007 enum _ecore_status_t rc = ECORE_SUCCESS; 1008 1009 /* Read the attention registers in the AEU */ 1010 for (i = 0; i < NUM_ATTN_REGS; i++) { 1011 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1012 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1013 i * 0x4); 1014 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1015 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); 1016 } 1017 1018 /* Handle parity attentions first */ 1019 for (i = 0; i < NUM_ATTN_REGS; i++) { 1020 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1021 u32 parities; 1022 1023 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1024 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1025 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1026 1027 /* Skip register in which no parity bit is currently set */ 1028 if (!parities) 1029 continue; 1030 1031 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1032 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1033 1034 if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1035 !!(parities & (1 << bit_idx))) 1036 ecore_int_deassertion_parity(p_hwfn, p_bit, 1037 aeu_en, bit_idx); 1038 1039 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1040 } 1041 } 1042 1043 /* Find non-parity cause for attention and act */ 1044 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1045 struct aeu_invert_reg_bit *p_aeu; 1046 1047 /* Handle only groups whose attention is currently deasserted */ 1048 if (!(deasserted_bits & (1 << k))) 1049 continue; 1050 1051 for (i = 0; i < NUM_ATTN_REGS; i++) { 1052 u32 bits; 1053 1054 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1055 i * sizeof(u32) + 1056 k * sizeof(u32) * NUM_ATTN_REGS; 1057 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1058 bits = aeu_inv_arr[i] & en; 1059 1060 /* Skip if no bit from this group is currently set */ 1061 if (!bits) 1062 continue; 1063 1064 /* Find all set bits from current register which belong 1065 * to current group, making them responsible for the 1066 * previous assertion. 1067 */ 1068 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1069 unsigned long int bitmask; 1070 u8 bit, bit_len; 1071 1072 /* Need to account bits with changed meaning */ 1073 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1074 1075 bit = bit_idx; 1076 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1077 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1078 /* Skip Parity */ 1079 bit++; 1080 bit_len--; 1081 } 1082 1083 /* Find the bits relating to HW-block, then 1084 * shift so they'll become LSB. 1085 */ 1086 bitmask = bits & (((1 << bit_len) - 1) << bit); 1087 bitmask >>= bit; 1088 1089 if (bitmask) { 1090 u32 flags = p_aeu->flags; 1091 char bit_name[30]; 1092 u8 num; 1093 1094 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1095 bit_len); 1096 1097 /* Some bits represent more than a 1098 * a single interrupt. Correctly print 1099 * their name. 1100 */ 1101 if (ATTENTION_LENGTH(flags) > 2 || 1102 ((flags & ATTENTION_PAR_INT) && 1103 ATTENTION_LENGTH(flags) > 1)) 1104 OSAL_SNPRINTF(bit_name, 30, 1105 p_aeu->bit_name, 1106 num); 1107 else 1108 OSAL_STRNCPY(bit_name, 1109 p_aeu->bit_name, 1110 30); 1111 1112 /* We now need to pass bitmask in its 1113 * correct position. 1114 */ 1115 bitmask <<= bit; 1116 1117 /* Handle source of the attention */ 1118 ecore_int_deassertion_aeu_bit(p_hwfn, 1119 p_aeu, 1120 aeu_en, 1121 bit_name, 1122 bitmask); 1123 } 1124 1125 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1126 } 1127 } 1128 } 1129 1130 /* Clear IGU indication for the deasserted bits */ 1131 /* FIXME - this will change once we'll have GOOD gtt definitions */ 1132 DIRECT_REG_WR(p_hwfn, 1133 (u8 OSAL_IOMEM *) p_hwfn->regview + 1134 GTT_BAR0_MAP_REG_IGU_CMD + 1135 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1136 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); 1137 1138 /* Unmask deasserted attentions in IGU */ 1139 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1140 IGU_REG_ATTENTION_ENABLE); 1141 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1142 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1143 1144 /* Clear deassertion from inner state */ 1145 sb_attn_sw->known_attn &= ~deasserted_bits; 1146 1147 return rc; 1148 } 1149 1150 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1151 { 1152 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1153 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1154 u16 index = 0, asserted_bits, deasserted_bits; 1155 u32 attn_bits = 0, attn_acks = 0; 1156 enum _ecore_status_t rc = ECORE_SUCCESS; 1157 1158 /* Read current attention bits/acks - safeguard against attentions 1159 * by guaranting work on a synchronized timeframe 1160 */ 1161 do { 1162 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1163 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1164 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1165 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1166 p_sb_attn->sb_index = index; 1167 1168 /* Attention / Deassertion are meaningful (and in correct state) 1169 * only when they differ and consistent with known state - deassertion 1170 * when previous attention & current ack, and assertion when current 1171 * attention with no previous attention 1172 */ 1173 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1174 ~p_sb_attn_sw->known_attn; 1175 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1176 p_sb_attn_sw->known_attn; 1177 1178 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1179 DP_INFO(p_hwfn, 1180 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1181 index, attn_bits, attn_acks, asserted_bits, 1182 deasserted_bits, p_sb_attn_sw->known_attn); 1183 else if (asserted_bits == 0x100) 1184 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1185 else 1186 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1187 "MFW indication [deassertion]\n"); 1188 1189 if (asserted_bits) { 1190 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1191 if (rc) 1192 return rc; 1193 } 1194 1195 if (deasserted_bits) 1196 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1197 1198 return rc; 1199 } 1200 1201 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1202 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1203 { 1204 struct igu_prod_cons_update igu_ack = { 0 }; 1205 1206 igu_ack.sb_id_and_flags = 1207 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1208 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1209 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1210 (IGU_SEG_ACCESS_ATTN << 1211 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1212 1213 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1214 1215 /* Both segments (interrupts & acks) are written to same place address; 1216 * Need to guarantee all commands will be received (in-order) by HW. 1217 */ 1218 OSAL_MMIOWB(p_hwfn->p_dev); 1219 OSAL_BARRIER(p_hwfn->p_dev); 1220 } 1221 1222 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1223 { 1224 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1225 struct ecore_pi_info *pi_info = OSAL_NULL; 1226 struct ecore_sb_attn_info *sb_attn; 1227 struct ecore_sb_info *sb_info; 1228 int arr_size; 1229 u16 rc = 0; 1230 1231 if (!p_hwfn) 1232 return; 1233 1234 if (!p_hwfn->p_sp_sb) { 1235 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1236 return; 1237 } 1238 1239 sb_info = &p_hwfn->p_sp_sb->sb_info; 1240 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1241 if (!sb_info) { 1242 DP_ERR(p_hwfn->p_dev, 1243 "Status block is NULL - cannot ack interrupts\n"); 1244 return; 1245 } 1246 1247 if (!p_hwfn->p_sb_attn) { 1248 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1249 return; 1250 } 1251 sb_attn = p_hwfn->p_sb_attn; 1252 1253 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1254 p_hwfn, p_hwfn->my_id); 1255 1256 /* Disable ack for def status block. Required both for msix + 1257 * inta in non-mask mode, in inta does no harm. 1258 */ 1259 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1260 1261 /* Gather Interrupts/Attentions information */ 1262 if (!sb_info->sb_virt) { 1263 DP_ERR(p_hwfn->p_dev, 1264 "Interrupt Status block is NULL -" 1265 " cannot check for new interrupts!\n"); 1266 } else { 1267 u32 tmp_index = sb_info->sb_ack; 1268 rc = ecore_sb_update_sb_idx(sb_info); 1269 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1270 "Interrupt indices: 0x%08x --> 0x%08x\n", 1271 tmp_index, sb_info->sb_ack); 1272 } 1273 1274 if (!sb_attn || !sb_attn->sb_attn) { 1275 DP_ERR(p_hwfn->p_dev, 1276 "Attentions Status block is NULL -" 1277 " cannot check for new attentions!\n"); 1278 } else { 1279 u16 tmp_index = sb_attn->index; 1280 1281 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1282 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1283 "Attention indices: 0x%08x --> 0x%08x\n", 1284 tmp_index, sb_attn->index); 1285 } 1286 1287 /* Check if we expect interrupts at this time. if not just ack them */ 1288 if (!(rc & ECORE_SB_EVENT_MASK)) { 1289 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1290 return; 1291 } 1292 1293 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1294 1295 if (!p_hwfn->p_dpc_ptt) { 1296 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1297 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1298 return; 1299 } 1300 1301 if (rc & ECORE_SB_ATT_IDX) 1302 ecore_int_attentions(p_hwfn); 1303 1304 if (rc & ECORE_SB_IDX) { 1305 int pi; 1306 1307 /* Since we only looked at the SB index, it's possible more 1308 * than a single protocol-index on the SB incremented. 1309 * Iterate over all configured protocol indices and check 1310 * whether something happened for each. 1311 */ 1312 for (pi = 0; pi < arr_size; pi++) { 1313 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1314 if (pi_info->comp_cb != OSAL_NULL) 1315 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1316 } 1317 } 1318 1319 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1320 /* This should be done before the interrupts are enabled, 1321 * since otherwise a new attention will be generated. 1322 */ 1323 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1324 } 1325 1326 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1327 } 1328 1329 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1330 { 1331 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1332 1333 if (!p_sb) 1334 return; 1335 1336 if (p_sb->sb_attn) { 1337 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1338 p_sb->sb_phys, 1339 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1340 } 1341 OSAL_FREE(p_hwfn->p_dev, p_sb); 1342 } 1343 1344 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1345 struct ecore_ptt *p_ptt) 1346 { 1347 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1348 1349 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1350 1351 sb_info->index = 0; 1352 sb_info->known_attn = 0; 1353 1354 /* Configure Attention Status Block in IGU */ 1355 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1356 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1357 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1358 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1359 } 1360 1361 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1362 struct ecore_ptt *p_ptt, 1363 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1364 { 1365 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1366 int i, j, k; 1367 1368 sb_info->sb_attn = sb_virt_addr; 1369 sb_info->sb_phys = sb_phy_addr; 1370 1371 /* Set the pointer to the AEU descriptors */ 1372 sb_info->p_aeu_desc = aeu_descs; 1373 1374 /* Calculate Parity Masks */ 1375 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1376 for (i = 0; i < NUM_ATTN_REGS; i++) { 1377 /* j is array index, k is bit index */ 1378 for (j = 0, k = 0; k < 32; j++) { 1379 struct aeu_invert_reg_bit *p_aeu; 1380 1381 p_aeu = &aeu_descs[i].bits[j]; 1382 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1383 sb_info->parity_mask[i] |= 1 << k; 1384 1385 k += ATTENTION_LENGTH(p_aeu->flags); 1386 } 1387 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1388 "Attn Mask [Reg %d]: 0x%08x\n", 1389 i, sb_info->parity_mask[i]); 1390 } 1391 1392 /* Set the address of cleanup for the mcp attention */ 1393 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1394 MISC_REG_AEU_GENERAL_ATTN_0; 1395 1396 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1397 } 1398 1399 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1400 struct ecore_ptt *p_ptt) 1401 { 1402 struct ecore_dev *p_dev = p_hwfn->p_dev; 1403 struct ecore_sb_attn_info *p_sb; 1404 dma_addr_t p_phys = 0; 1405 void *p_virt; 1406 1407 /* SB struct */ 1408 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1409 if (!p_sb) { 1410 DP_NOTICE(p_dev, true, 1411 "Failed to allocate `struct ecore_sb_attn_info'\n"); 1412 return ECORE_NOMEM; 1413 } 1414 1415 /* SB ring */ 1416 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1417 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1418 if (!p_virt) { 1419 DP_NOTICE(p_dev, true, 1420 "Failed to allocate status block (attentions)\n"); 1421 OSAL_FREE(p_dev, p_sb); 1422 return ECORE_NOMEM; 1423 } 1424 1425 /* Attention setup */ 1426 p_hwfn->p_sb_attn = p_sb; 1427 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1428 1429 return ECORE_SUCCESS; 1430 } 1431 1432 /* coalescing timeout = timeset << (timer_res + 1) */ 1433 #define ECORE_CAU_DEF_RX_USECS 24 1434 #define ECORE_CAU_DEF_TX_USECS 48 1435 1436 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1437 struct cau_sb_entry *p_sb_entry, 1438 u8 pf_id, u16 vf_number, u8 vf_valid) 1439 { 1440 struct ecore_dev *p_dev = p_hwfn->p_dev; 1441 u32 cau_state; 1442 u8 timer_res; 1443 1444 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1445 1446 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1447 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1448 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1449 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1450 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1451 1452 cau_state = CAU_HC_DISABLE_STATE; 1453 1454 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1455 cau_state = CAU_HC_ENABLE_STATE; 1456 if (!p_dev->rx_coalesce_usecs) 1457 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1458 if (!p_dev->tx_coalesce_usecs) 1459 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1460 } 1461 1462 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1463 if (p_dev->rx_coalesce_usecs <= 0x7F) 1464 timer_res = 0; 1465 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1466 timer_res = 1; 1467 else 1468 timer_res = 2; 1469 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1470 1471 if (p_dev->tx_coalesce_usecs <= 0x7F) 1472 timer_res = 0; 1473 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1474 timer_res = 1; 1475 else 1476 timer_res = 2; 1477 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1478 1479 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1480 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1481 } 1482 1483 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1484 struct ecore_ptt *p_ptt, 1485 u16 igu_sb_id, u32 pi_index, 1486 enum ecore_coalescing_fsm coalescing_fsm, 1487 u8 timeset) 1488 { 1489 struct cau_pi_entry pi_entry; 1490 u32 sb_offset, pi_offset; 1491 1492 if (IS_VF(p_hwfn->p_dev)) 1493 return;/* @@@TBD MichalK- VF CAU... */ 1494 1495 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1496 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1497 1498 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1499 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1500 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1501 else 1502 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1503 1504 pi_offset = sb_offset + pi_index; 1505 if (p_hwfn->hw_init_done) { 1506 ecore_wr(p_hwfn, p_ptt, 1507 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1508 *((u32 *)&(pi_entry))); 1509 } else { 1510 STORE_RT_REG(p_hwfn, 1511 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1512 *((u32 *)&(pi_entry))); 1513 } 1514 } 1515 1516 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1517 struct ecore_ptt *p_ptt, 1518 struct ecore_sb_info *p_sb, u32 pi_index, 1519 enum ecore_coalescing_fsm coalescing_fsm, 1520 u8 timeset) 1521 { 1522 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1523 pi_index, coalescing_fsm, timeset); 1524 } 1525 1526 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1527 struct ecore_ptt *p_ptt, 1528 dma_addr_t sb_phys, u16 igu_sb_id, 1529 u16 vf_number, u8 vf_valid) 1530 { 1531 struct cau_sb_entry sb_entry; 1532 1533 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1534 vf_number, vf_valid); 1535 1536 if (p_hwfn->hw_init_done) { 1537 /* Wide-bus, initialize via DMAE */ 1538 u64 phys_addr = (u64)sb_phys; 1539 1540 ecore_dmae_host2grc(p_hwfn, p_ptt, 1541 (u64)(osal_uintptr_t)&phys_addr, 1542 CAU_REG_SB_ADDR_MEMORY + 1543 igu_sb_id * sizeof(u64), 2, 0); 1544 ecore_dmae_host2grc(p_hwfn, p_ptt, 1545 (u64)(osal_uintptr_t)&sb_entry, 1546 CAU_REG_SB_VAR_MEMORY + 1547 igu_sb_id * sizeof(u64), 2, 0); 1548 } else { 1549 /* Initialize Status Block Address */ 1550 STORE_RT_REG_AGG(p_hwfn, 1551 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1552 igu_sb_id * 2, sb_phys); 1553 1554 STORE_RT_REG_AGG(p_hwfn, 1555 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1556 igu_sb_id * 2, sb_entry); 1557 } 1558 1559 /* Configure pi coalescing if set */ 1560 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1561 /* eth will open queues for all tcs, so configure all of them 1562 * properly, rather than just the active ones 1563 */ 1564 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1565 1566 u8 timeset, timer_res; 1567 u8 i; 1568 1569 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1570 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1571 timer_res = 0; 1572 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1573 timer_res = 1; 1574 else 1575 timer_res = 2; 1576 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1577 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1578 ECORE_COAL_RX_STATE_MACHINE, 1579 timeset); 1580 1581 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1582 timer_res = 0; 1583 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1584 timer_res = 1; 1585 else 1586 timer_res = 2; 1587 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1588 for (i = 0; i < num_tc; i++) { 1589 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1590 igu_sb_id, TX_PI(i), 1591 ECORE_COAL_TX_STATE_MACHINE, 1592 timeset); 1593 } 1594 } 1595 } 1596 1597 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1598 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) 1599 { 1600 /* zero status block and ack counter */ 1601 sb_info->sb_ack = 0; 1602 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1603 1604 if (IS_PF(p_hwfn->p_dev)) 1605 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1606 sb_info->igu_sb_id, 0, 0); 1607 } 1608 1609 struct ecore_igu_block * 1610 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1611 { 1612 struct ecore_igu_block *p_block; 1613 u16 igu_id; 1614 1615 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1616 igu_id++) { 1617 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1618 1619 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1620 !(p_block->status & ECORE_IGU_STATUS_FREE)) 1621 continue; 1622 1623 if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1624 b_is_pf) 1625 return p_block; 1626 } 1627 1628 return OSAL_NULL; 1629 } 1630 1631 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1632 u16 vector_id) 1633 { 1634 struct ecore_igu_block *p_block; 1635 u16 igu_id; 1636 1637 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1638 igu_id++) { 1639 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1640 1641 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1642 !p_block->is_pf || 1643 p_block->vector_number != vector_id) 1644 continue; 1645 1646 return igu_id; 1647 } 1648 1649 return ECORE_SB_INVALID_IDX; 1650 } 1651 1652 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1653 { 1654 u16 igu_sb_id; 1655 1656 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1657 if (sb_id == ECORE_SP_SB_ID) 1658 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1659 else if (IS_PF(p_hwfn->p_dev)) 1660 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1661 else 1662 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1663 1664 if (igu_sb_id == ECORE_SB_INVALID_IDX) 1665 DP_NOTICE(p_hwfn, true, 1666 "Slowpath SB vector %04x doesn't exist\n", 1667 sb_id); 1668 else if (sb_id == ECORE_SP_SB_ID) 1669 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1670 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1671 else 1672 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1673 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1674 1675 return igu_sb_id; 1676 } 1677 1678 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1679 struct ecore_ptt *p_ptt, 1680 struct ecore_sb_info *sb_info, 1681 void *sb_virt_addr, 1682 dma_addr_t sb_phy_addr, u16 sb_id) 1683 { 1684 sb_info->sb_virt = sb_virt_addr; 1685 sb_info->sb_phys = sb_phy_addr; 1686 1687 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1688 1689 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1690 return ECORE_INVAL; 1691 1692 /* Let the igu info reference the client's SB info */ 1693 if (sb_id != ECORE_SP_SB_ID) { 1694 if (IS_PF(p_hwfn->p_dev)) { 1695 struct ecore_igu_info *p_info; 1696 struct ecore_igu_block *p_block; 1697 1698 p_info = p_hwfn->hw_info.p_igu_info; 1699 p_block = &p_info->entry[sb_info->igu_sb_id]; 1700 1701 p_block->sb_info = sb_info; 1702 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1703 p_info->usage.free_cnt--; 1704 } else { 1705 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1706 } 1707 } 1708 #ifdef ECORE_CONFIG_DIRECT_HWFN 1709 sb_info->p_hwfn = p_hwfn; 1710 #endif 1711 sb_info->p_dev = p_hwfn->p_dev; 1712 1713 /* The igu address will hold the absolute address that needs to be 1714 * written to for a specific status block 1715 */ 1716 if (IS_PF(p_hwfn->p_dev)) { 1717 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + 1718 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); 1719 1720 } else { 1721 sb_info->igu_addr = 1722 (u8 OSAL_IOMEM *)p_hwfn->regview + 1723 PXP_VF_BAR0_START_IGU + 1724 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1725 } 1726 1727 sb_info->flags |= ECORE_SB_INFO_INIT; 1728 1729 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1730 1731 return ECORE_SUCCESS; 1732 } 1733 1734 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1735 struct ecore_sb_info *sb_info, 1736 u16 sb_id) 1737 { 1738 struct ecore_igu_info *p_info; 1739 struct ecore_igu_block *p_block; 1740 1741 if (sb_info == OSAL_NULL) 1742 return ECORE_SUCCESS; 1743 1744 /* zero status block and ack counter */ 1745 sb_info->sb_ack = 0; 1746 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1747 1748 if (IS_VF(p_hwfn->p_dev)) { 1749 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1750 return ECORE_SUCCESS; 1751 } 1752 1753 p_info = p_hwfn->hw_info.p_igu_info; 1754 p_block = &p_info->entry[sb_info->igu_sb_id]; 1755 1756 /* Vector 0 is reserved to Default SB */ 1757 if (p_block->vector_number == 0) { 1758 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1759 return ECORE_INVAL; 1760 } 1761 1762 /* Lose reference to client's SB info, and fix counters */ 1763 p_block->sb_info = OSAL_NULL; 1764 p_block->status |= ECORE_IGU_STATUS_FREE; 1765 p_info->usage.free_cnt++; 1766 1767 return ECORE_SUCCESS; 1768 } 1769 1770 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1771 { 1772 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1773 1774 if (!p_sb) 1775 return; 1776 1777 if (p_sb->sb_info.sb_virt) { 1778 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1779 p_sb->sb_info.sb_virt, 1780 p_sb->sb_info.sb_phys, 1781 SB_ALIGNED_SIZE(p_hwfn)); 1782 } 1783 1784 OSAL_FREE(p_hwfn->p_dev, p_sb); 1785 } 1786 1787 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1788 struct ecore_ptt *p_ptt) 1789 { 1790 struct ecore_sb_sp_info *p_sb; 1791 dma_addr_t p_phys = 0; 1792 void *p_virt; 1793 1794 /* SB struct */ 1795 p_sb = 1796 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, 1797 sizeof(*p_sb)); 1798 if (!p_sb) { 1799 DP_NOTICE(p_hwfn, true, 1800 "Failed to allocate `struct ecore_sb_info'\n"); 1801 return ECORE_NOMEM; 1802 } 1803 1804 /* SB ring */ 1805 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1806 &p_phys, SB_ALIGNED_SIZE(p_hwfn)); 1807 if (!p_virt) { 1808 DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n"); 1809 OSAL_FREE(p_hwfn->p_dev, p_sb); 1810 return ECORE_NOMEM; 1811 } 1812 1813 /* Status Block setup */ 1814 p_hwfn->p_sp_sb = p_sb; 1815 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1816 p_virt, p_phys, ECORE_SP_SB_ID); 1817 1818 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1819 1820 return ECORE_SUCCESS; 1821 } 1822 1823 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1824 ecore_int_comp_cb_t comp_cb, 1825 void *cookie, 1826 u8 *sb_idx, __le16 **p_fw_cons) 1827 { 1828 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1829 enum _ecore_status_t rc = ECORE_NOMEM; 1830 u8 pi; 1831 1832 /* Look for a free index */ 1833 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1834 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1835 continue; 1836 1837 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1838 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1839 *sb_idx = pi; 1840 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1841 rc = ECORE_SUCCESS; 1842 break; 1843 } 1844 1845 return rc; 1846 } 1847 1848 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) 1849 { 1850 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1851 1852 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1853 return ECORE_NOMEM; 1854 1855 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1856 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1857 return ECORE_SUCCESS; 1858 } 1859 1860 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1861 { 1862 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1863 } 1864 1865 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1866 struct ecore_ptt *p_ptt, 1867 enum ecore_int_mode int_mode) 1868 { 1869 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1870 1871 #ifndef ASIC_ONLY 1872 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1873 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1874 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1875 } 1876 #endif 1877 1878 p_hwfn->p_dev->int_mode = int_mode; 1879 switch (p_hwfn->p_dev->int_mode) { 1880 case ECORE_INT_MODE_INTA: 1881 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1882 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1883 break; 1884 1885 case ECORE_INT_MODE_MSI: 1886 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1887 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1888 break; 1889 1890 case ECORE_INT_MODE_MSIX: 1891 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1892 break; 1893 case ECORE_INT_MODE_POLL: 1894 break; 1895 } 1896 1897 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1898 } 1899 1900 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1901 struct ecore_ptt *p_ptt) 1902 { 1903 #ifndef ASIC_ONLY 1904 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1905 DP_INFO(p_hwfn, 1906 "FPGA - Don't enable Attentions in IGU and MISC\n"); 1907 return; 1908 } 1909 #endif 1910 1911 /* Configure AEU signal change to produce attentions */ 1912 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1913 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1914 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1915 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1916 1917 /* Flush the writes to IGU */ 1918 OSAL_MMIOWB(p_hwfn->p_dev); 1919 1920 /* Unmask AEU signals toward IGU */ 1921 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1922 } 1923 1924 enum _ecore_status_t 1925 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1926 enum ecore_int_mode int_mode) 1927 { 1928 enum _ecore_status_t rc = ECORE_SUCCESS; 1929 1930 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1931 1932 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1933 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1934 if (rc != ECORE_SUCCESS) { 1935 DP_NOTICE(p_hwfn, true, 1936 "Slowpath IRQ request failed\n"); 1937 return ECORE_NORESOURCES; 1938 } 1939 p_hwfn->b_int_requested = true; 1940 } 1941 1942 /* Enable interrupt Generation */ 1943 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1944 1945 p_hwfn->b_int_enabled = 1; 1946 1947 return rc; 1948 } 1949 1950 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1951 struct ecore_ptt *p_ptt) 1952 { 1953 p_hwfn->b_int_enabled = 0; 1954 1955 if (IS_VF(p_hwfn->p_dev)) 1956 return; 1957 1958 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1959 } 1960 1961 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1962 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1963 struct ecore_ptt *p_ptt, 1964 u32 igu_sb_id, 1965 bool cleanup_set, 1966 u16 opaque_fid) 1967 { 1968 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1969 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1970 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1971 u8 type = 0; /* FIXME MichalS type??? */ 1972 1973 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1974 IGU_REG_CLEANUP_STATUS_0) != 0x200); 1975 1976 /* USE Control Command Register to perform cleanup. There is an 1977 * option to do this using IGU bar, but then it can't be used for VFs. 1978 */ 1979 1980 /* Set the data field */ 1981 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1982 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 1983 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1984 1985 /* Set the control register */ 1986 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1987 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1988 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1989 1990 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1991 1992 OSAL_BARRIER(p_hwfn->p_dev); 1993 1994 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1995 1996 /* Flush the write to IGU */ 1997 OSAL_MMIOWB(p_hwfn->p_dev); 1998 1999 /* calculate where to read the status bit from */ 2000 sb_bit = 1 << (igu_sb_id % 32); 2001 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 2002 2003 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2004 2005 /* Now wait for the command to complete */ 2006 while (--sleep_cnt) { 2007 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2008 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2009 break; 2010 OSAL_MSLEEP(5); 2011 } 2012 2013 if (!sleep_cnt) 2014 DP_NOTICE(p_hwfn, true, 2015 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2016 val, igu_sb_id); 2017 } 2018 2019 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2020 struct ecore_ptt *p_ptt, 2021 u16 igu_sb_id, u16 opaque, bool b_set) 2022 { 2023 struct ecore_igu_block *p_block; 2024 int pi, i; 2025 2026 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2027 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2028 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2029 igu_sb_id, p_block->function_id, p_block->is_pf, 2030 p_block->vector_number); 2031 2032 /* Set */ 2033 if (b_set) 2034 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2035 2036 /* Clear */ 2037 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2038 2039 /* Wait for the IGU SB to cleanup */ 2040 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2041 u32 val; 2042 2043 val = ecore_rd(p_hwfn, p_ptt, 2044 IGU_REG_WRITE_DONE_PENDING + 2045 ((igu_sb_id / 32) * 4)); 2046 if (val & (1 << (igu_sb_id % 32))) 2047 OSAL_UDELAY(10); 2048 else 2049 break; 2050 } 2051 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2052 DP_NOTICE(p_hwfn, true, 2053 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2054 igu_sb_id); 2055 2056 /* Clear the CAU for the SB */ 2057 for (pi = 0; pi < 12; pi++) 2058 ecore_wr(p_hwfn, p_ptt, 2059 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2060 } 2061 2062 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2063 struct ecore_ptt *p_ptt, 2064 bool b_set, bool b_slowpath) 2065 { 2066 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2067 struct ecore_igu_block *p_block; 2068 u16 igu_sb_id = 0; 2069 u32 val = 0; 2070 2071 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2072 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2073 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2074 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2075 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2076 /* end temporary */ 2077 2078 for (igu_sb_id = 0; 2079 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2080 igu_sb_id++) { 2081 p_block = &p_info->entry[igu_sb_id]; 2082 2083 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2084 !p_block->is_pf || 2085 (p_block->status & ECORE_IGU_STATUS_DSB)) 2086 continue; 2087 2088 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2089 p_hwfn->hw_info.opaque_fid, 2090 b_set); 2091 } 2092 2093 if (b_slowpath) 2094 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2095 p_info->igu_dsb_id, 2096 p_hwfn->hw_info.opaque_fid, 2097 b_set); 2098 } 2099 2100 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2101 struct ecore_ptt *p_ptt) 2102 { 2103 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2104 struct ecore_igu_block *p_block; 2105 int pf_sbs, vf_sbs; 2106 u16 igu_sb_id; 2107 u32 val, rval; 2108 2109 if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2110 /* We're using an old MFW - have to prevent any switching 2111 * of SBs between PF and VFs as later driver wouldn't be 2112 * able to tell which belongs to which. 2113 */ 2114 p_info->b_allow_pf_vf_change = false; 2115 } else { 2116 /* Use the numbers the MFW have provided - 2117 * don't forget MFW accounts for the default SB as well. 2118 */ 2119 p_info->b_allow_pf_vf_change = true; 2120 2121 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2122 DP_INFO(p_hwfn, 2123 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2124 RESC_NUM(p_hwfn, ECORE_SB) - 1, 2125 p_info->usage.cnt); 2126 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2127 } 2128 2129 /* TODO - how do we learn about VF SBs from MFW? */ 2130 if (IS_PF_SRIOV(p_hwfn)) { 2131 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2132 2133 if (vfs != p_info->usage.iov_cnt) 2134 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2135 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2136 p_info->usage.iov_cnt, vfs); 2137 2138 /* At this point we know how many SBs we have totally 2139 * in IGU + number of PF SBs. So we can validate that 2140 * we'd have sufficient for VF. 2141 */ 2142 if (vfs > p_info->usage.free_cnt + 2143 p_info->usage.free_cnt_iov - 2144 p_info->usage.cnt) { 2145 DP_NOTICE(p_hwfn, true, 2146 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2147 p_info->usage.free_cnt + 2148 p_info->usage.free_cnt_iov, 2149 p_info->usage.cnt, vfs); 2150 return ECORE_INVAL; 2151 } 2152 } 2153 } 2154 2155 /* Cap the number of VFs SBs by the number of VFs */ 2156 if (IS_PF_SRIOV(p_hwfn)) 2157 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; 2158 2159 /* Mark all SBs as free, now in the right PF/VFs division */ 2160 p_info->usage.free_cnt = p_info->usage.cnt; 2161 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2162 p_info->usage.orig = p_info->usage.cnt; 2163 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2164 2165 /* We now proceed to re-configure the IGU cam to reflect the initial 2166 * configuration. We can start with the Default SB. 2167 */ 2168 pf_sbs = p_info->usage.cnt; 2169 vf_sbs = p_info->usage.iov_cnt; 2170 2171 for (igu_sb_id = p_info->igu_dsb_id; 2172 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2173 igu_sb_id++) { 2174 p_block = &p_info->entry[igu_sb_id]; 2175 val = 0; 2176 2177 if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2178 continue; 2179 2180 if (p_block->status & ECORE_IGU_STATUS_DSB) { 2181 p_block->function_id = p_hwfn->rel_pf_id; 2182 p_block->is_pf = 1; 2183 p_block->vector_number = 0; 2184 p_block->status = ECORE_IGU_STATUS_VALID | 2185 ECORE_IGU_STATUS_PF | 2186 ECORE_IGU_STATUS_DSB; 2187 } else if (pf_sbs) { 2188 pf_sbs--; 2189 p_block->function_id = p_hwfn->rel_pf_id; 2190 p_block->is_pf = 1; 2191 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2192 p_block->status = ECORE_IGU_STATUS_VALID | 2193 ECORE_IGU_STATUS_PF | 2194 ECORE_IGU_STATUS_FREE; 2195 } else if (vf_sbs) { 2196 p_block->function_id = 2197 p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2198 p_info->usage.iov_cnt - vf_sbs; 2199 p_block->is_pf = 0; 2200 p_block->vector_number = 0; 2201 p_block->status = ECORE_IGU_STATUS_VALID | 2202 ECORE_IGU_STATUS_FREE; 2203 vf_sbs--; 2204 } else { 2205 p_block->function_id = 0; 2206 p_block->is_pf = 0; 2207 p_block->vector_number = 0; 2208 } 2209 2210 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2211 p_block->function_id); 2212 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2213 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2214 p_block->vector_number); 2215 2216 /* VF entries would be enabled when VF is initializaed */ 2217 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2218 2219 rval = ecore_rd(p_hwfn, p_ptt, 2220 IGU_REG_MAPPING_MEMORY + 2221 sizeof(u32) * igu_sb_id); 2222 2223 if (rval != val) { 2224 ecore_wr(p_hwfn, p_ptt, 2225 IGU_REG_MAPPING_MEMORY + 2226 sizeof(u32) * igu_sb_id, 2227 val); 2228 2229 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2230 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2231 igu_sb_id, p_block->function_id, 2232 p_block->is_pf, p_block->vector_number, 2233 rval, val); 2234 } 2235 } 2236 2237 return 0; 2238 } 2239 2240 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2241 struct ecore_ptt *p_ptt) 2242 { 2243 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2244 2245 /* Return all the usage indications to default prior to the reset; 2246 * The reset expects the !orig to reflect the initial status of the 2247 * SBs, and would re-calculate the originals based on those. 2248 */ 2249 p_cnt->cnt = p_cnt->orig; 2250 p_cnt->free_cnt = p_cnt->orig; 2251 p_cnt->iov_cnt = p_cnt->iov_orig; 2252 p_cnt->free_cnt_iov = p_cnt->iov_orig; 2253 p_cnt->orig = 0; 2254 p_cnt->iov_orig = 0; 2255 2256 /* TODO - we probably need to re-configure the CAU as well... */ 2257 return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2258 } 2259 2260 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2261 struct ecore_ptt *p_ptt, 2262 u16 igu_sb_id) 2263 { 2264 u32 val = ecore_rd(p_hwfn, p_ptt, 2265 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2266 struct ecore_igu_block *p_block; 2267 2268 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2269 2270 /* Fill the block information */ 2271 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2272 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2273 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2274 2275 p_block->igu_sb_id = igu_sb_id; 2276 } 2277 2278 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2279 struct ecore_ptt *p_ptt) 2280 { 2281 struct ecore_igu_info *p_igu_info; 2282 struct ecore_igu_block *p_block; 2283 u32 min_vf = 0, max_vf = 0; 2284 u16 igu_sb_id; 2285 2286 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2287 GFP_KERNEL, 2288 sizeof(*p_igu_info)); 2289 if (!p_hwfn->hw_info.p_igu_info) 2290 return ECORE_NOMEM; 2291 p_igu_info = p_hwfn->hw_info.p_igu_info; 2292 2293 /* Distinguish between existent and onn-existent default SB */ 2294 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2295 2296 /* Find the range of VF ids whose SB belong to this PF */ 2297 if (p_hwfn->p_dev->p_iov_info) { 2298 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2299 2300 min_vf = p_iov->first_vf_in_pf; 2301 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2302 } 2303 2304 for (igu_sb_id = 0; 2305 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2306 igu_sb_id++) { 2307 /* Read current entry; Notice it might not belong to this PF */ 2308 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2309 p_block = &p_igu_info->entry[igu_sb_id]; 2310 2311 if ((p_block->is_pf) && 2312 (p_block->function_id == p_hwfn->rel_pf_id)) { 2313 p_block->status = ECORE_IGU_STATUS_PF | 2314 ECORE_IGU_STATUS_VALID | 2315 ECORE_IGU_STATUS_FREE; 2316 2317 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2318 p_igu_info->usage.cnt++; 2319 } else if (!(p_block->is_pf) && 2320 (p_block->function_id >= min_vf) && 2321 (p_block->function_id < max_vf)) { 2322 /* Available for VFs of this PF */ 2323 p_block->status = ECORE_IGU_STATUS_VALID | 2324 ECORE_IGU_STATUS_FREE; 2325 2326 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2327 p_igu_info->usage.iov_cnt++; 2328 } 2329 2330 /* Mark the First entry belonging to the PF or its VFs 2331 * as the default SB [we'll reset IGU prior to first usage]. 2332 */ 2333 if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2334 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2335 p_igu_info->igu_dsb_id = igu_sb_id; 2336 p_block->status |= ECORE_IGU_STATUS_DSB; 2337 } 2338 2339 /* While this isn't suitable for all clients, limit number 2340 * of prints by having each PF print only its entries with the 2341 * exception of PF0 which would print everything. 2342 */ 2343 if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2344 (p_hwfn->abs_pf_id == 0)) 2345 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2346 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2347 igu_sb_id, p_block->function_id, 2348 p_block->is_pf, p_block->vector_number); 2349 } 2350 2351 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2352 DP_NOTICE(p_hwfn, true, 2353 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2354 p_igu_info->igu_dsb_id); 2355 return ECORE_INVAL; 2356 } 2357 2358 /* All non default SB are considered free at this point */ 2359 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2360 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2361 2362 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2363 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2364 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2365 p_igu_info->usage.iov_cnt); 2366 2367 return ECORE_SUCCESS; 2368 } 2369 2370 enum _ecore_status_t 2371 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2372 u16 sb_id, bool b_to_vf) 2373 { 2374 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2375 struct ecore_igu_block *p_block = OSAL_NULL; 2376 u16 igu_sb_id = 0, vf_num = 0; 2377 u32 val = 0; 2378 2379 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2380 return ECORE_INVAL; 2381 2382 if (sb_id == ECORE_SP_SB_ID) 2383 return ECORE_INVAL; 2384 2385 if (!p_info->b_allow_pf_vf_change) { 2386 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2387 return ECORE_INVAL; 2388 } 2389 2390 /* If we're moving a SB from PF to VF, the client had to specify 2391 * which vector it wants to move. 2392 */ 2393 if (b_to_vf) { 2394 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2395 if (igu_sb_id == ECORE_SB_INVALID_IDX) 2396 return ECORE_INVAL; 2397 } 2398 2399 /* If we're moving a SB from VF to PF, need to validate there isn't 2400 * already a line configured for that vector. 2401 */ 2402 if (!b_to_vf) { 2403 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2404 ECORE_SB_INVALID_IDX) 2405 return ECORE_INVAL; 2406 } 2407 2408 /* We need to validate that the SB can actually be relocated. 2409 * This would also handle the previous case where we've explicitly 2410 * stated which IGU SB needs to move. 2411 */ 2412 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2413 igu_sb_id++) { 2414 p_block = &p_info->entry[igu_sb_id]; 2415 2416 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2417 !(p_block->status & ECORE_IGU_STATUS_FREE) || 2418 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2419 if (b_to_vf) 2420 return ECORE_INVAL; 2421 else 2422 continue; 2423 } 2424 2425 break; 2426 } 2427 2428 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2429 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2430 "Failed to find a free SB to move\n"); 2431 return ECORE_INVAL; 2432 } 2433 2434 /* At this point, p_block points to the SB we want to relocate */ 2435 if (b_to_vf) { 2436 p_block->status &= ~ECORE_IGU_STATUS_PF; 2437 2438 /* It doesn't matter which VF number we choose, since we're 2439 * going to disable the line; But let's keep it in range. 2440 */ 2441 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2442 2443 p_block->function_id = (u8)vf_num; 2444 p_block->is_pf = 0; 2445 p_block->vector_number = 0; 2446 2447 p_info->usage.cnt--; 2448 p_info->usage.free_cnt--; 2449 p_info->usage.iov_cnt++; 2450 p_info->usage.free_cnt_iov++; 2451 2452 /* TODO - if SBs aren't really the limiting factor, 2453 * then it might not be accurate [in the since that 2454 * we might not need decrement the feature]. 2455 */ 2456 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2457 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2458 } else { 2459 p_block->status |= ECORE_IGU_STATUS_PF; 2460 p_block->function_id = p_hwfn->rel_pf_id; 2461 p_block->is_pf = 1; 2462 p_block->vector_number = sb_id + 1; 2463 2464 p_info->usage.cnt++; 2465 p_info->usage.free_cnt++; 2466 p_info->usage.iov_cnt--; 2467 p_info->usage.free_cnt_iov--; 2468 2469 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2470 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2471 } 2472 2473 /* Update the IGU and CAU with the new configuration */ 2474 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2475 p_block->function_id); 2476 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2477 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2478 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2479 p_block->vector_number); 2480 2481 ecore_wr(p_hwfn, p_ptt, 2482 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2483 val); 2484 2485 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2486 igu_sb_id, vf_num, 2487 p_block->is_pf ? 0 : 1); 2488 2489 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2490 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2491 igu_sb_id, p_block->function_id, 2492 p_block->is_pf, p_block->vector_number); 2493 2494 return ECORE_SUCCESS; 2495 } 2496 2497 /** 2498 * @brief Initialize igu runtime registers 2499 * 2500 * @param p_hwfn 2501 */ 2502 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2503 { 2504 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2505 2506 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2507 } 2508 2509 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2510 IGU_CMD_INT_ACK_BASE) 2511 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2512 IGU_CMD_INT_ACK_BASE) 2513 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2514 { 2515 u32 intr_status_hi = 0, intr_status_lo = 0; 2516 u64 intr_status = 0; 2517 2518 intr_status_lo = REG_RD(p_hwfn, 2519 GTT_BAR0_MAP_REG_IGU_CMD + 2520 LSB_IGU_CMD_ADDR * 8); 2521 intr_status_hi = REG_RD(p_hwfn, 2522 GTT_BAR0_MAP_REG_IGU_CMD + 2523 MSB_IGU_CMD_ADDR * 8); 2524 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2525 2526 return intr_status; 2527 } 2528 2529 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2530 { 2531 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2532 p_hwfn->b_sp_dpc_enabled = true; 2533 } 2534 2535 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2536 { 2537 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2538 if (!p_hwfn->sp_dpc) 2539 return ECORE_NOMEM; 2540 2541 return ECORE_SUCCESS; 2542 } 2543 2544 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2545 { 2546 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2547 } 2548 2549 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2550 struct ecore_ptt *p_ptt) 2551 { 2552 enum _ecore_status_t rc = ECORE_SUCCESS; 2553 2554 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2555 if (rc != ECORE_SUCCESS) { 2556 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2557 return rc; 2558 } 2559 2560 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2561 if (rc != ECORE_SUCCESS) { 2562 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2563 return rc; 2564 } 2565 2566 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2567 if (rc != ECORE_SUCCESS) 2568 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2569 2570 return rc; 2571 } 2572 2573 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2574 { 2575 ecore_int_sp_sb_free(p_hwfn); 2576 ecore_int_sb_attn_free(p_hwfn); 2577 ecore_int_sp_dpc_free(p_hwfn); 2578 } 2579 2580 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2581 { 2582 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2583 return; 2584 2585 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2586 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2587 ecore_int_sp_dpc_setup(p_hwfn); 2588 } 2589 2590 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2591 struct ecore_sb_cnt_info *p_sb_cnt_info) 2592 { 2593 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2594 2595 if (!p_igu_info || !p_sb_cnt_info) 2596 return; 2597 2598 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2599 sizeof(*p_sb_cnt_info)); 2600 } 2601 2602 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2603 { 2604 int i; 2605 2606 for_each_hwfn(p_dev, i) 2607 p_dev->hwfns[i].b_int_requested = false; 2608 } 2609 2610 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2611 { 2612 p_dev->attn_clr_en = clr_enable; 2613 } 2614 2615 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2616 struct ecore_ptt *p_ptt, 2617 u8 timer_res, u16 sb_id, bool tx) 2618 { 2619 struct cau_sb_entry sb_entry; 2620 enum _ecore_status_t rc; 2621 2622 if (!p_hwfn->hw_init_done) { 2623 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2624 return ECORE_INVAL; 2625 } 2626 2627 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2628 sb_id * sizeof(u64), 2629 (u64)(osal_uintptr_t)&sb_entry, 2, 0); 2630 if (rc != ECORE_SUCCESS) { 2631 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2632 return rc; 2633 } 2634 2635 if (tx) 2636 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2637 else 2638 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2639 2640 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2641 (u64)(osal_uintptr_t)&sb_entry, 2642 CAU_REG_SB_VAR_MEMORY + 2643 sb_id * sizeof(u64), 2, 0); 2644 if (rc != ECORE_SUCCESS) { 2645 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2646 return rc; 2647 } 2648 2649 return rc; 2650 } 2651 2652 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2653 struct ecore_ptt *p_ptt, 2654 struct ecore_sb_info *p_sb, 2655 struct ecore_sb_info_dbg *p_info) 2656 { 2657 u16 sbid = p_sb->igu_sb_id; 2658 int i; 2659 2660 if (IS_VF(p_hwfn->p_dev)) 2661 return ECORE_INVAL; 2662 2663 if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) 2664 return ECORE_INVAL; 2665 2666 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2667 IGU_REG_PRODUCER_MEMORY + sbid * 4); 2668 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2669 IGU_REG_CONSUMER_MEM + sbid * 4); 2670 2671 for (i = 0; i < PIS_PER_SB_E4; i++) 2672 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2673 CAU_REG_PI_MEMORY + 2674 sbid * 4 * PIS_PER_SB_E4 + 2675 i * 4); 2676 2677 return ECORE_SUCCESS; 2678 } 2679