1 /* 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include <rte_string_fns.h> 10 11 #include "bcm_osal.h" 12 #include "ecore.h" 13 #include "ecore_spq.h" 14 #include "ecore_gtt_reg_addr.h" 15 #include "ecore_init_ops.h" 16 #include "ecore_rt_defs.h" 17 #include "ecore_int.h" 18 #include "reg_addr.h" 19 #include "ecore_hw.h" 20 #include "ecore_sriov.h" 21 #include "ecore_vf.h" 22 #include "ecore_hw_defs.h" 23 #include "ecore_hsi_common.h" 24 #include "ecore_mcp.h" 25 26 struct ecore_pi_info { 27 ecore_int_comp_cb_t comp_cb; 28 void *cookie; /* Will be sent to the compl cb function */ 29 }; 30 31 struct ecore_sb_sp_info { 32 struct ecore_sb_info sb_info; 33 /* per protocol index data */ 34 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4]; 35 }; 36 37 enum ecore_attention_type { 38 ECORE_ATTN_TYPE_ATTN, 39 ECORE_ATTN_TYPE_PARITY, 40 }; 41 42 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 43 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 44 45 struct aeu_invert_reg_bit { 46 char bit_name[30]; 47 48 #define ATTENTION_PARITY (1 << 0) 49 50 #define ATTENTION_LENGTH_MASK (0x00000ff0) 51 #define ATTENTION_LENGTH_SHIFT (4) 52 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 53 ATTENTION_LENGTH_SHIFT) 54 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 55 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 56 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 57 ATTENTION_PARITY) 58 59 /* Multiple bits start with this offset */ 60 #define ATTENTION_OFFSET_MASK (0x000ff000) 61 #define ATTENTION_OFFSET_SHIFT (12) 62 63 #define ATTENTION_BB_MASK (0x00700000) 64 #define ATTENTION_BB_SHIFT (20) 65 #define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT) 66 #define ATTENTION_BB_DIFFERENT (1 << 23) 67 68 #define ATTENTION_CLEAR_ENABLE (1 << 28) 69 unsigned int flags; 70 71 /* Callback to call if attention will be triggered */ 72 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 73 74 enum block_id block_index; 75 }; 76 77 struct aeu_invert_reg { 78 struct aeu_invert_reg_bit bits[32]; 79 }; 80 81 #define MAX_ATTN_GRPS (8) 82 #define NUM_ATTN_REGS (9) 83 84 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 85 { 86 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 87 88 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); 89 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); 90 91 return ECORE_SUCCESS; 92 } 93 94 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 95 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 96 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 97 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 98 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 99 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 100 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 101 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 102 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 103 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 104 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 116 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 117 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 118 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 119 { 120 u32 tmp = 121 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 122 PSWHST_REG_VF_DISABLED_ERROR_VALID); 123 124 /* Disabled VF access */ 125 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 126 u32 addr, data; 127 128 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 129 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 130 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 131 PSWHST_REG_VF_DISABLED_ERROR_DATA); 132 DP_INFO(p_hwfn->p_dev, 133 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" 134 " Write [0x%02x] Addr [0x%08x]\n", 135 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) 136 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 137 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) 138 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 139 (u8)((data & 140 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 141 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 142 (u8)((data & 143 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 144 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 145 (u8)((data & 146 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 147 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 148 addr); 149 } 150 151 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 152 PSWHST_REG_INCORRECT_ACCESS_VALID); 153 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 154 u32 addr, data, length; 155 156 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 157 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 158 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 159 PSWHST_REG_INCORRECT_ACCESS_DATA); 160 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 161 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 162 163 DP_INFO(p_hwfn->p_dev, 164 "Incorrect access to %08x of length %08x - PF [%02x]" 165 " VF [%04x] [valid %02x] client [%02x] write [%02x]" 166 " Byte-Enable [%04x] [%08x]\n", 167 addr, length, 168 (u8)((data & 169 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 170 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 171 (u8)((data & 172 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 173 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 174 (u8)((data & 175 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 176 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 177 (u8)((data & 178 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 179 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 180 (u8)((data & 181 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 182 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 183 (u8)((data & 184 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 185 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 186 data); 187 } 188 189 /* TODO - We know 'some' of these are legal due to virtualization, 190 * but is it true for all of them? 191 */ 192 return ECORE_SUCCESS; 193 } 194 195 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 196 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 197 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 198 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 199 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 200 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 201 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 202 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 203 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 204 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 205 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 206 static const char *grc_timeout_attn_master_to_str(u8 master) 207 { 208 switch (master) { 209 case 1: 210 return "PXP"; 211 case 2: 212 return "MCP"; 213 case 3: 214 return "MSDM"; 215 case 4: 216 return "PSDM"; 217 case 5: 218 return "YSDM"; 219 case 6: 220 return "USDM"; 221 case 7: 222 return "TSDM"; 223 case 8: 224 return "XSDM"; 225 case 9: 226 return "DBU"; 227 case 10: 228 return "DMAE"; 229 default: 230 return "Unknown"; 231 } 232 } 233 234 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 235 { 236 u32 tmp, tmp2; 237 238 /* We've already cleared the timeout interrupt register, so we learn 239 * of interrupts via the validity register 240 */ 241 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 242 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 243 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) 244 goto out; 245 246 /* Read the GRC timeout information */ 247 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 248 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 249 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 250 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 251 252 DP_NOTICE(p_hwfn->p_dev, false, 253 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 254 tmp2, tmp, 255 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 256 : "Read from", 257 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 258 grc_timeout_attn_master_to_str( 259 (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 260 ECORE_GRC_ATTENTION_MASTER_SHIFT), 261 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 262 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 263 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 264 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 265 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 266 ECORE_GRC_ATTENTION_VF_SHIFT); 267 268 out: 269 /* Regardles of anything else, clean the validity bit */ 270 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 271 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 272 return ECORE_SUCCESS; 273 } 274 275 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 276 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 277 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 278 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 279 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 280 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 281 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 282 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 283 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 284 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 285 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 286 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 287 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 288 289 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 290 struct ecore_ptt *p_ptt, 291 bool is_hw_init) 292 { 293 u32 tmp; 294 char str[512] = {0}; 295 296 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 297 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 298 u32 addr_lo, addr_hi, details; 299 300 addr_lo = ecore_rd(p_hwfn, p_ptt, 301 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 302 addr_hi = ecore_rd(p_hwfn, p_ptt, 303 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 304 details = ecore_rd(p_hwfn, p_ptt, 305 PGLUE_B_REG_TX_ERR_WR_DETAILS); 306 OSAL_SNPRINTF(str, 512, 307 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 308 addr_hi, addr_lo, details, 309 (u8)((details & 310 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 311 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 312 (u8)((details & 313 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 314 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 315 (u8)((details & 316 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 317 tmp, 318 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 319 1 : 0), 320 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 321 1 : 0), 322 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 323 1 : 0)); 324 if (is_hw_init) 325 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); 326 else 327 DP_NOTICE(p_hwfn, false, "%s", str); 328 } 329 330 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 331 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 332 u32 addr_lo, addr_hi, details; 333 334 addr_lo = ecore_rd(p_hwfn, p_ptt, 335 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 336 addr_hi = ecore_rd(p_hwfn, p_ptt, 337 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 338 details = ecore_rd(p_hwfn, p_ptt, 339 PGLUE_B_REG_TX_ERR_RD_DETAILS); 340 341 DP_NOTICE(p_hwfn, false, 342 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 343 addr_hi, addr_lo, details, 344 (u8)((details & 345 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 346 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 347 (u8)((details & 348 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 349 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 350 (u8)((details & 351 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 352 tmp, 353 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 354 1 : 0), 355 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 356 1 : 0), 357 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 358 1 : 0)); 359 } 360 361 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 362 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 363 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); 364 365 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 366 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 367 u32 addr_hi, addr_lo; 368 369 addr_lo = ecore_rd(p_hwfn, p_ptt, 370 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 371 addr_hi = ecore_rd(p_hwfn, p_ptt, 372 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 373 374 DP_NOTICE(p_hwfn, false, 375 "ICPL erorr - %08x [Address %08x:%08x]\n", 376 tmp, addr_hi, addr_lo); 377 } 378 379 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 380 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 381 u32 addr_hi, addr_lo, details; 382 383 addr_lo = ecore_rd(p_hwfn, p_ptt, 384 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 385 addr_hi = ecore_rd(p_hwfn, p_ptt, 386 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 387 details = ecore_rd(p_hwfn, p_ptt, 388 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 389 390 DP_NOTICE(p_hwfn, false, 391 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 392 details, tmp, addr_hi, addr_lo); 393 } 394 395 /* Clear the indications */ 396 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 397 398 return ECORE_SUCCESS; 399 } 400 401 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 402 { 403 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); 404 } 405 406 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 407 { 408 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 409 410 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 411 412 return ECORE_INVAL; 413 } 414 415 static enum _ecore_status_t 416 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 417 { 418 DP_INFO(p_hwfn, "General attention 35!\n"); 419 420 return ECORE_SUCCESS; 421 } 422 423 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 424 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 425 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 426 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 427 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 428 429 #define ECORE_DB_REC_COUNT 10 430 #define ECORE_DB_REC_INTERVAL 100 431 432 /* assumes sticky overflow indication was set for this PF */ 433 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn, 434 struct ecore_ptt *p_ptt) 435 { 436 u8 count = ECORE_DB_REC_COUNT; 437 u32 usage = 1; 438 439 /* wait for usage to zero or count to run out. This is necessary since 440 * EDPM doorbell transactions can take multiple 64b cycles, and as such 441 * can "split" over the pci. Possibly, the doorbell drop can happen with 442 * half an EDPM in the queue and other half dropped. Another EDPM 443 * doorbell to the same address (from doorbell recovery mechanism or 444 * from the doorbelling entity) could have first half dropped and second 445 * half interperted as continuation of the first. To prevent such 446 * malformed doorbells from reaching the device, flush the queue before 447 * releaseing the overflow sticky indication. 448 */ 449 while (count-- && usage) { 450 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 451 OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 452 } 453 454 /* should have been depleted by now */ 455 if (usage) { 456 DP_NOTICE(p_hwfn->p_dev, false, 457 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 458 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 459 return ECORE_TIMEOUT; 460 } 461 462 /* flush any pedning (e)dpm as they may never arrive */ 463 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 464 465 /* release overflow sticky indication (stop silently dropping 466 * everything) 467 */ 468 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 469 470 /* repeat all last doorbells (doorbell drop recovery) */ 471 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 472 473 return ECORE_SUCCESS; 474 } 475 476 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 477 { 478 u32 int_sts, first_drop_reason, details, address, overflow, 479 all_drops_reason; 480 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 481 enum _ecore_status_t rc; 482 483 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 484 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 485 int_sts); 486 487 /* int_sts may be zero since all PFs were interrupted for doorbell 488 * overflow but another one already handled it. Can abort here. If 489 * This PF also requires overflow recovery we will be interrupted again 490 */ 491 if (!int_sts) 492 return ECORE_SUCCESS; 493 494 /* check if db_drop or overflow happened */ 495 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 496 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 497 /* obtain data about db drop/overflow */ 498 first_drop_reason = ecore_rd(p_hwfn, p_ptt, 499 DORQ_REG_DB_DROP_REASON) & 500 ECORE_DORQ_ATTENTION_REASON_MASK; 501 details = ecore_rd(p_hwfn, p_ptt, 502 DORQ_REG_DB_DROP_DETAILS); 503 address = ecore_rd(p_hwfn, p_ptt, 504 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 505 overflow = ecore_rd(p_hwfn, p_ptt, 506 DORQ_REG_PF_OVFL_STICKY); 507 all_drops_reason = ecore_rd(p_hwfn, p_ptt, 508 DORQ_REG_DB_DROP_DETAILS_REASON); 509 510 /* log info */ 511 DP_NOTICE(p_hwfn->p_dev, false, 512 "Doorbell drop occurred\n" 513 "Address\t\t0x%08x\t(second BAR address)\n" 514 "FID\t\t0x%04x\t\t(Opaque FID)\n" 515 "Size\t\t0x%04x\t\t(in bytes)\n" 516 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 517 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n" 518 "Overflow\t0x%x\t\t(a per PF indication)\n", 519 address, 520 GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 521 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 522 first_drop_reason, all_drops_reason, overflow); 523 524 /* if this PF caused overflow, initiate recovery */ 525 if (overflow) { 526 rc = ecore_db_rec_attn(p_hwfn, p_ptt); 527 if (rc != ECORE_SUCCESS) 528 return rc; 529 } 530 531 /* clear the doorbell drop details and prepare for next drop */ 532 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 533 534 /* mark interrupt as handeld (note: even if drop was due to a 535 * different reason than overflow we mark as handled) 536 */ 537 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 538 DORQ_REG_INT_STS_DB_DROP | 539 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 540 541 /* if there are no indications otherthan drop indications, 542 * success 543 */ 544 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 545 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 546 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 547 return ECORE_SUCCESS; 548 } 549 550 /* some other indication was present - non recoverable */ 551 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 552 553 return ECORE_INVAL; 554 } 555 556 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 557 { 558 #ifndef ASIC_ONLY 559 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 560 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 561 TM_REG_INT_STS_1); 562 563 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 564 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 565 return ECORE_INVAL; 566 567 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 568 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 569 DP_INFO(p_hwfn, 570 "TM attention on emulation - most likely" 571 " results of clock-ratios\n"); 572 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 573 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 574 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 575 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 576 577 return ECORE_SUCCESS; 578 } 579 #endif 580 581 return ECORE_INVAL; 582 } 583 584 /* Instead of major changes to the data-structure, we have a some 'special' 585 * identifiers for sources that changed meaning between adapters. 586 */ 587 enum aeu_invert_reg_special_type { 588 AEU_INVERT_REG_SPECIAL_CNIG_0, 589 AEU_INVERT_REG_SPECIAL_CNIG_1, 590 AEU_INVERT_REG_SPECIAL_CNIG_2, 591 AEU_INVERT_REG_SPECIAL_CNIG_3, 592 AEU_INVERT_REG_SPECIAL_MAX, 593 }; 594 595 static struct aeu_invert_reg_bit 596 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 597 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 598 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 599 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 600 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 601 }; 602 603 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 604 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 605 { 606 { /* After Invert 1 */ 607 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 608 MAX_BLOCK_ID}, 609 } 610 }, 611 612 { 613 { /* After Invert 2 */ 614 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 615 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 616 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, 617 BLOCK_PGLUE_B}, 618 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 619 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 620 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 621 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 622 {"SW timers #%d", 623 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), 624 OSAL_NULL, MAX_BLOCK_ID}, 625 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 626 BLOCK_PGLCS}, 627 } 628 }, 629 630 { 631 { /* After Invert 3 */ 632 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 633 MAX_BLOCK_ID}, 634 } 635 }, 636 637 { 638 { /* After Invert 4 */ 639 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 640 ecore_fw_assertion, MAX_BLOCK_ID}, 641 {"General Attention %d", 642 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), 643 OSAL_NULL, MAX_BLOCK_ID}, 644 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 645 ecore_general_attention_35, MAX_BLOCK_ID}, 646 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 647 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 648 OSAL_NULL, BLOCK_NWS}, 649 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 650 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 651 OSAL_NULL, BLOCK_NWS}, 652 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 653 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 654 OSAL_NULL, BLOCK_NWM}, 655 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 656 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 657 OSAL_NULL, BLOCK_NWM}, 658 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 659 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 660 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 661 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 662 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 663 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 664 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 665 MAX_BLOCK_ID}, 666 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 667 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 668 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 669 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 670 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 671 } 672 }, 673 674 { 675 { /* After Invert 5 */ 676 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 677 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 678 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 679 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 680 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 681 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 682 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 683 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 684 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 685 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 686 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 687 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 688 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 689 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 690 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 691 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 692 } 693 }, 694 695 { 696 { /* After Invert 6 */ 697 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 698 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 699 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 700 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 701 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 702 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 703 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 704 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 705 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 706 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 707 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 708 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 709 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 710 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 711 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 712 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 713 } 714 }, 715 716 { 717 { /* After Invert 7 */ 718 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 719 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 720 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 721 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 722 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 723 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 724 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 725 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 726 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 727 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 728 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 729 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 730 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 731 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 732 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 733 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 734 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 735 } 736 }, 737 738 { 739 { /* After Invert 8 */ 740 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 741 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 742 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 743 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 744 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 745 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 746 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 747 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 748 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 749 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 750 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 751 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 752 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 753 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 754 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 755 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 756 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 757 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 758 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 759 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 760 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 761 MAX_BLOCK_ID}, 762 } 763 }, 764 765 { 766 { /* After Invert 9 */ 767 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 768 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, 769 MAX_BLOCK_ID}, 770 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 771 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 772 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 773 MAX_BLOCK_ID}, 774 } 775 }, 776 777 }; 778 779 static struct aeu_invert_reg_bit * 780 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 781 struct aeu_invert_reg_bit *p_bit) 782 { 783 if (!ECORE_IS_BB(p_hwfn->p_dev)) 784 return p_bit; 785 786 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 787 return p_bit; 788 789 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 790 ATTENTION_BB_SHIFT]; 791 } 792 793 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 794 struct aeu_invert_reg_bit *p_bit) 795 { 796 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 797 ATTENTION_PARITY); 798 } 799 800 #define ATTN_STATE_BITS (0xfff) 801 #define ATTN_BITS_MASKABLE (0x3ff) 802 struct ecore_sb_attn_info { 803 /* Virtual & Physical address of the SB */ 804 struct atten_status_block *sb_attn; 805 dma_addr_t sb_phys; 806 807 /* Last seen running index */ 808 u16 index; 809 810 /* A mask of the AEU bits resulting in a parity error */ 811 u32 parity_mask[NUM_ATTN_REGS]; 812 813 /* A pointer to the attention description structure */ 814 struct aeu_invert_reg *p_aeu_desc; 815 816 /* Previously asserted attentions, which are still unasserted */ 817 u16 known_attn; 818 819 /* Cleanup address for the link's general hw attention */ 820 u32 mfw_attn_addr; 821 }; 822 823 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 824 struct ecore_sb_attn_info *p_sb_desc) 825 { 826 u16 rc = 0, index; 827 828 OSAL_MMIOWB(p_hwfn->p_dev); 829 830 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 831 if (p_sb_desc->index != index) { 832 p_sb_desc->index = index; 833 rc = ECORE_SB_ATT_IDX; 834 } 835 836 OSAL_MMIOWB(p_hwfn->p_dev); 837 838 return rc; 839 } 840 841 /** 842 * @brief ecore_int_assertion - handles asserted attention bits 843 * 844 * @param p_hwfn 845 * @param asserted_bits newly asserted bits 846 * @return enum _ecore_status_t 847 */ 848 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 849 u16 asserted_bits) 850 { 851 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 852 u32 igu_mask; 853 854 /* Mask the source of the attention in the IGU */ 855 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 856 IGU_REG_ATTENTION_ENABLE); 857 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 858 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 859 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 860 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 861 862 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 863 "inner known ATTN state: 0x%04x --> 0x%04x\n", 864 sb_attn_sw->known_attn, 865 sb_attn_sw->known_attn | asserted_bits); 866 sb_attn_sw->known_attn |= asserted_bits; 867 868 /* Handle MCP events */ 869 if (asserted_bits & 0x100) { 870 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 871 /* Clean the MCP attention */ 872 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 873 sb_attn_sw->mfw_attn_addr, 0); 874 } 875 876 /* FIXME - this will change once we'll have GOOD gtt definitions */ 877 DIRECT_REG_WR(p_hwfn, 878 (u8 OSAL_IOMEM *) p_hwfn->regview + 879 GTT_BAR0_MAP_REG_IGU_CMD + 880 ((IGU_CMD_ATTN_BIT_SET_UPPER - 881 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 882 883 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 884 asserted_bits); 885 886 return ECORE_SUCCESS; 887 } 888 889 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 890 enum block_id id, enum dbg_attn_type type, 891 bool b_clear) 892 { 893 /* @DPDK */ 894 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type); 895 } 896 897 /** 898 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 899 * cause of the attention 900 * 901 * @param p_hwfn 902 * @param p_aeu - descriptor of an AEU bit which caused the attention 903 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 904 * this bit to this group. 905 * @param bit_index - index of this bit in the aeu_en_reg 906 * 907 * @return enum _ecore_status_t 908 */ 909 static enum _ecore_status_t 910 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 911 struct aeu_invert_reg_bit *p_aeu, 912 u32 aeu_en_reg, 913 const char *p_bit_name, 914 u32 bitmask) 915 { 916 enum _ecore_status_t rc = ECORE_INVAL; 917 bool b_fatal = false; 918 919 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 920 p_bit_name, bitmask); 921 922 /* Call callback before clearing the interrupt status */ 923 if (p_aeu->cb) { 924 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 925 p_bit_name); 926 rc = p_aeu->cb(p_hwfn); 927 } 928 929 if (rc != ECORE_SUCCESS) 930 b_fatal = true; 931 932 /* Print HW block interrupt registers */ 933 if (p_aeu->block_index != MAX_BLOCK_ID) { 934 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 935 ATTN_TYPE_INTERRUPT, !b_fatal); 936 } 937 938 /* @DPDK */ 939 /* Reach assertion if attention is fatal */ 940 if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) { 941 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 942 p_bit_name); 943 944 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 945 } 946 947 /* Prevent this Attention from being asserted in the future */ 948 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 949 p_hwfn->p_dev->attn_clr_en) { 950 u32 val; 951 u32 mask = ~bitmask; 952 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 953 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 954 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n", 955 p_bit_name); 956 } 957 958 return rc; 959 } 960 961 /** 962 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 963 * 964 * @param p_hwfn 965 * @param p_aeu - descriptor of an AEU bit which caused the parity 966 * @param aeu_en_reg - address of the AEU enable register 967 * @param bit_index 968 */ 969 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 970 struct aeu_invert_reg_bit *p_aeu, 971 u32 aeu_en_reg, u8 bit_index) 972 { 973 u32 block_id = p_aeu->block_index, mask, val; 974 975 DP_NOTICE(p_hwfn->p_dev, false, 976 "%s parity attention is set [address 0x%08x, bit %d]\n", 977 p_aeu->bit_name, aeu_en_reg, bit_index); 978 979 if (block_id != MAX_BLOCK_ID) { 980 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 981 982 /* In A0, there's a single parity bit for several blocks */ 983 if (block_id == BLOCK_BTB) { 984 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 985 ATTN_TYPE_PARITY, false); 986 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 987 ATTN_TYPE_PARITY, false); 988 } 989 } 990 991 /* Prevent this parity error from being re-asserted */ 992 mask = ~(0x1 << bit_index); 993 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 994 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 995 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 996 p_aeu->bit_name); 997 } 998 999 /** 1000 * @brief - handles deassertion of previously asserted attentions. 1001 * 1002 * @param p_hwfn 1003 * @param deasserted_bits - newly deasserted bits 1004 * @return enum _ecore_status_t 1005 * 1006 */ 1007 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 1008 u16 deasserted_bits) 1009 { 1010 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1011 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1012 u8 i, j, k, bit_idx; 1013 enum _ecore_status_t rc = ECORE_SUCCESS; 1014 1015 /* Read the attention registers in the AEU */ 1016 for (i = 0; i < NUM_ATTN_REGS; i++) { 1017 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1018 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1019 i * 0x4); 1020 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1021 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); 1022 } 1023 1024 /* Handle parity attentions first */ 1025 for (i = 0; i < NUM_ATTN_REGS; i++) { 1026 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1027 u32 parities; 1028 1029 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1030 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1031 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1032 1033 /* Skip register in which no parity bit is currently set */ 1034 if (!parities) 1035 continue; 1036 1037 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1038 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1039 1040 if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1041 !!(parities & (1 << bit_idx))) 1042 ecore_int_deassertion_parity(p_hwfn, p_bit, 1043 aeu_en, bit_idx); 1044 1045 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1046 } 1047 } 1048 1049 /* Find non-parity cause for attention and act */ 1050 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1051 struct aeu_invert_reg_bit *p_aeu; 1052 1053 /* Handle only groups whose attention is currently deasserted */ 1054 if (!(deasserted_bits & (1 << k))) 1055 continue; 1056 1057 for (i = 0; i < NUM_ATTN_REGS; i++) { 1058 u32 bits; 1059 1060 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1061 i * sizeof(u32) + 1062 k * sizeof(u32) * NUM_ATTN_REGS; 1063 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1064 bits = aeu_inv_arr[i] & en; 1065 1066 /* Skip if no bit from this group is currently set */ 1067 if (!bits) 1068 continue; 1069 1070 /* Find all set bits from current register which belong 1071 * to current group, making them responsible for the 1072 * previous assertion. 1073 */ 1074 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1075 unsigned long int bitmask; 1076 u8 bit, bit_len; 1077 1078 /* Need to account bits with changed meaning */ 1079 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1080 1081 bit = bit_idx; 1082 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1083 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1084 /* Skip Parity */ 1085 bit++; 1086 bit_len--; 1087 } 1088 1089 /* Find the bits relating to HW-block, then 1090 * shift so they'll become LSB. 1091 */ 1092 bitmask = bits & (((1 << bit_len) - 1) << bit); 1093 bitmask >>= bit; 1094 1095 if (bitmask) { 1096 u32 flags = p_aeu->flags; 1097 char bit_name[30]; 1098 u8 num; 1099 1100 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1101 bit_len); 1102 1103 /* Some bits represent more than a 1104 * a single interrupt. Correctly print 1105 * their name. 1106 */ 1107 if (ATTENTION_LENGTH(flags) > 2 || 1108 ((flags & ATTENTION_PAR_INT) && 1109 ATTENTION_LENGTH(flags) > 1)) 1110 OSAL_SNPRINTF(bit_name, 30, 1111 p_aeu->bit_name, 1112 num); 1113 else 1114 strlcpy(bit_name, 1115 p_aeu->bit_name, 1116 sizeof(bit_name)); 1117 1118 /* We now need to pass bitmask in its 1119 * correct position. 1120 */ 1121 bitmask <<= bit; 1122 1123 /* Handle source of the attention */ 1124 ecore_int_deassertion_aeu_bit(p_hwfn, 1125 p_aeu, 1126 aeu_en, 1127 bit_name, 1128 bitmask); 1129 } 1130 1131 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1132 } 1133 } 1134 } 1135 1136 /* Clear IGU indication for the deasserted bits */ 1137 /* FIXME - this will change once we'll have GOOD gtt definitions */ 1138 DIRECT_REG_WR(p_hwfn, 1139 (u8 OSAL_IOMEM *) p_hwfn->regview + 1140 GTT_BAR0_MAP_REG_IGU_CMD + 1141 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1142 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); 1143 1144 /* Unmask deasserted attentions in IGU */ 1145 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1146 IGU_REG_ATTENTION_ENABLE); 1147 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1148 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1149 1150 /* Clear deassertion from inner state */ 1151 sb_attn_sw->known_attn &= ~deasserted_bits; 1152 1153 return rc; 1154 } 1155 1156 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1157 { 1158 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1159 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1160 u16 index = 0, asserted_bits, deasserted_bits; 1161 u32 attn_bits = 0, attn_acks = 0; 1162 enum _ecore_status_t rc = ECORE_SUCCESS; 1163 1164 /* Read current attention bits/acks - safeguard against attentions 1165 * by guaranting work on a synchronized timeframe 1166 */ 1167 do { 1168 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1169 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1170 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1171 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1172 p_sb_attn->sb_index = index; 1173 1174 /* Attention / Deassertion are meaningful (and in correct state) 1175 * only when they differ and consistent with known state - deassertion 1176 * when previous attention & current ack, and assertion when current 1177 * attention with no previous attention 1178 */ 1179 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1180 ~p_sb_attn_sw->known_attn; 1181 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1182 p_sb_attn_sw->known_attn; 1183 1184 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1185 DP_INFO(p_hwfn, 1186 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1187 index, attn_bits, attn_acks, asserted_bits, 1188 deasserted_bits, p_sb_attn_sw->known_attn); 1189 else if (asserted_bits == 0x100) 1190 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1191 else 1192 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1193 "MFW indication [deassertion]\n"); 1194 1195 if (asserted_bits) { 1196 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1197 if (rc) 1198 return rc; 1199 } 1200 1201 if (deasserted_bits) 1202 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1203 1204 return rc; 1205 } 1206 1207 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1208 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1209 { 1210 struct igu_prod_cons_update igu_ack = { 0 }; 1211 1212 igu_ack.sb_id_and_flags = 1213 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1214 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1215 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1216 (IGU_SEG_ACCESS_ATTN << 1217 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1218 1219 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1220 1221 /* Both segments (interrupts & acks) are written to same place address; 1222 * Need to guarantee all commands will be received (in-order) by HW. 1223 */ 1224 OSAL_MMIOWB(p_hwfn->p_dev); 1225 OSAL_BARRIER(p_hwfn->p_dev); 1226 } 1227 1228 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1229 { 1230 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1231 struct ecore_pi_info *pi_info = OSAL_NULL; 1232 struct ecore_sb_attn_info *sb_attn; 1233 struct ecore_sb_info *sb_info; 1234 int arr_size; 1235 u16 rc = 0; 1236 1237 if (!p_hwfn) 1238 return; 1239 1240 if (!p_hwfn->p_sp_sb) { 1241 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1242 return; 1243 } 1244 1245 sb_info = &p_hwfn->p_sp_sb->sb_info; 1246 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1247 if (!sb_info) { 1248 DP_ERR(p_hwfn->p_dev, 1249 "Status block is NULL - cannot ack interrupts\n"); 1250 return; 1251 } 1252 1253 if (!p_hwfn->p_sb_attn) { 1254 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1255 return; 1256 } 1257 sb_attn = p_hwfn->p_sb_attn; 1258 1259 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1260 p_hwfn, p_hwfn->my_id); 1261 1262 /* Disable ack for def status block. Required both for msix + 1263 * inta in non-mask mode, in inta does no harm. 1264 */ 1265 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1266 1267 /* Gather Interrupts/Attentions information */ 1268 if (!sb_info->sb_virt) { 1269 DP_ERR(p_hwfn->p_dev, 1270 "Interrupt Status block is NULL -" 1271 " cannot check for new interrupts!\n"); 1272 } else { 1273 u32 tmp_index = sb_info->sb_ack; 1274 rc = ecore_sb_update_sb_idx(sb_info); 1275 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1276 "Interrupt indices: 0x%08x --> 0x%08x\n", 1277 tmp_index, sb_info->sb_ack); 1278 } 1279 1280 if (!sb_attn || !sb_attn->sb_attn) { 1281 DP_ERR(p_hwfn->p_dev, 1282 "Attentions Status block is NULL -" 1283 " cannot check for new attentions!\n"); 1284 } else { 1285 u16 tmp_index = sb_attn->index; 1286 1287 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1288 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1289 "Attention indices: 0x%08x --> 0x%08x\n", 1290 tmp_index, sb_attn->index); 1291 } 1292 1293 /* Check if we expect interrupts at this time. if not just ack them */ 1294 if (!(rc & ECORE_SB_EVENT_MASK)) { 1295 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1296 return; 1297 } 1298 1299 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1300 1301 if (!p_hwfn->p_dpc_ptt) { 1302 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1303 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1304 return; 1305 } 1306 1307 if (rc & ECORE_SB_ATT_IDX) 1308 ecore_int_attentions(p_hwfn); 1309 1310 if (rc & ECORE_SB_IDX) { 1311 int pi; 1312 1313 /* Since we only looked at the SB index, it's possible more 1314 * than a single protocol-index on the SB incremented. 1315 * Iterate over all configured protocol indices and check 1316 * whether something happened for each. 1317 */ 1318 for (pi = 0; pi < arr_size; pi++) { 1319 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1320 if (pi_info->comp_cb != OSAL_NULL) 1321 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1322 } 1323 } 1324 1325 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1326 /* This should be done before the interrupts are enabled, 1327 * since otherwise a new attention will be generated. 1328 */ 1329 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1330 } 1331 1332 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1333 } 1334 1335 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1336 { 1337 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1338 1339 if (!p_sb) 1340 return; 1341 1342 if (p_sb->sb_attn) { 1343 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1344 p_sb->sb_phys, 1345 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1346 } 1347 OSAL_FREE(p_hwfn->p_dev, p_sb); 1348 } 1349 1350 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1351 struct ecore_ptt *p_ptt) 1352 { 1353 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1354 1355 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1356 1357 sb_info->index = 0; 1358 sb_info->known_attn = 0; 1359 1360 /* Configure Attention Status Block in IGU */ 1361 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1362 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1363 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1364 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1365 } 1366 1367 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1368 struct ecore_ptt *p_ptt, 1369 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1370 { 1371 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1372 int i, j, k; 1373 1374 sb_info->sb_attn = sb_virt_addr; 1375 sb_info->sb_phys = sb_phy_addr; 1376 1377 /* Set the pointer to the AEU descriptors */ 1378 sb_info->p_aeu_desc = aeu_descs; 1379 1380 /* Calculate Parity Masks */ 1381 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1382 for (i = 0; i < NUM_ATTN_REGS; i++) { 1383 /* j is array index, k is bit index */ 1384 for (j = 0, k = 0; k < 32; j++) { 1385 struct aeu_invert_reg_bit *p_aeu; 1386 1387 p_aeu = &aeu_descs[i].bits[j]; 1388 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1389 sb_info->parity_mask[i] |= 1 << k; 1390 1391 k += ATTENTION_LENGTH(p_aeu->flags); 1392 } 1393 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1394 "Attn Mask [Reg %d]: 0x%08x\n", 1395 i, sb_info->parity_mask[i]); 1396 } 1397 1398 /* Set the address of cleanup for the mcp attention */ 1399 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1400 MISC_REG_AEU_GENERAL_ATTN_0; 1401 1402 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1403 } 1404 1405 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1406 struct ecore_ptt *p_ptt) 1407 { 1408 struct ecore_dev *p_dev = p_hwfn->p_dev; 1409 struct ecore_sb_attn_info *p_sb; 1410 dma_addr_t p_phys = 0; 1411 void *p_virt; 1412 1413 /* SB struct */ 1414 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1415 if (!p_sb) { 1416 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); 1417 return ECORE_NOMEM; 1418 } 1419 1420 /* SB ring */ 1421 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1422 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1423 if (!p_virt) { 1424 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); 1425 OSAL_FREE(p_dev, p_sb); 1426 return ECORE_NOMEM; 1427 } 1428 1429 /* Attention setup */ 1430 p_hwfn->p_sb_attn = p_sb; 1431 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1432 1433 return ECORE_SUCCESS; 1434 } 1435 1436 /* coalescing timeout = timeset << (timer_res + 1) */ 1437 #define ECORE_CAU_DEF_RX_USECS 24 1438 #define ECORE_CAU_DEF_TX_USECS 48 1439 1440 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1441 struct cau_sb_entry *p_sb_entry, 1442 u8 pf_id, u16 vf_number, u8 vf_valid) 1443 { 1444 struct ecore_dev *p_dev = p_hwfn->p_dev; 1445 u32 cau_state; 1446 u8 timer_res; 1447 1448 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1449 1450 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1451 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1452 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1453 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1454 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1455 1456 cau_state = CAU_HC_DISABLE_STATE; 1457 1458 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1459 cau_state = CAU_HC_ENABLE_STATE; 1460 if (!p_dev->rx_coalesce_usecs) 1461 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1462 if (!p_dev->tx_coalesce_usecs) 1463 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1464 } 1465 1466 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1467 if (p_dev->rx_coalesce_usecs <= 0x7F) 1468 timer_res = 0; 1469 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1470 timer_res = 1; 1471 else 1472 timer_res = 2; 1473 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1474 1475 if (p_dev->tx_coalesce_usecs <= 0x7F) 1476 timer_res = 0; 1477 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1478 timer_res = 1; 1479 else 1480 timer_res = 2; 1481 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1482 1483 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1484 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1485 } 1486 1487 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1488 struct ecore_ptt *p_ptt, 1489 u16 igu_sb_id, u32 pi_index, 1490 enum ecore_coalescing_fsm coalescing_fsm, 1491 u8 timeset) 1492 { 1493 struct cau_pi_entry pi_entry; 1494 u32 sb_offset, pi_offset; 1495 1496 if (IS_VF(p_hwfn->p_dev)) 1497 return;/* @@@TBD MichalK- VF CAU... */ 1498 1499 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1500 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1501 1502 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1503 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1504 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1505 else 1506 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1507 1508 pi_offset = sb_offset + pi_index; 1509 if (p_hwfn->hw_init_done) { 1510 ecore_wr(p_hwfn, p_ptt, 1511 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1512 *((u32 *)&(pi_entry))); 1513 } else { 1514 STORE_RT_REG(p_hwfn, 1515 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1516 *((u32 *)&(pi_entry))); 1517 } 1518 } 1519 1520 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1521 struct ecore_ptt *p_ptt, 1522 struct ecore_sb_info *p_sb, u32 pi_index, 1523 enum ecore_coalescing_fsm coalescing_fsm, 1524 u8 timeset) 1525 { 1526 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1527 pi_index, coalescing_fsm, timeset); 1528 } 1529 1530 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1531 struct ecore_ptt *p_ptt, 1532 dma_addr_t sb_phys, u16 igu_sb_id, 1533 u16 vf_number, u8 vf_valid) 1534 { 1535 struct cau_sb_entry sb_entry; 1536 1537 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1538 vf_number, vf_valid); 1539 1540 if (p_hwfn->hw_init_done) { 1541 /* Wide-bus, initialize via DMAE */ 1542 u64 phys_addr = (u64)sb_phys; 1543 1544 ecore_dmae_host2grc(p_hwfn, p_ptt, 1545 (u64)(osal_uintptr_t)&phys_addr, 1546 CAU_REG_SB_ADDR_MEMORY + 1547 igu_sb_id * sizeof(u64), 2, 0); 1548 ecore_dmae_host2grc(p_hwfn, p_ptt, 1549 (u64)(osal_uintptr_t)&sb_entry, 1550 CAU_REG_SB_VAR_MEMORY + 1551 igu_sb_id * sizeof(u64), 2, 0); 1552 } else { 1553 /* Initialize Status Block Address */ 1554 STORE_RT_REG_AGG(p_hwfn, 1555 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1556 igu_sb_id * 2, sb_phys); 1557 1558 STORE_RT_REG_AGG(p_hwfn, 1559 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1560 igu_sb_id * 2, sb_entry); 1561 } 1562 1563 /* Configure pi coalescing if set */ 1564 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1565 /* eth will open queues for all tcs, so configure all of them 1566 * properly, rather than just the active ones 1567 */ 1568 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1569 1570 u8 timeset, timer_res; 1571 u8 i; 1572 1573 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1574 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1575 timer_res = 0; 1576 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1577 timer_res = 1; 1578 else 1579 timer_res = 2; 1580 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1581 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1582 ECORE_COAL_RX_STATE_MACHINE, 1583 timeset); 1584 1585 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1586 timer_res = 0; 1587 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1588 timer_res = 1; 1589 else 1590 timer_res = 2; 1591 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1592 for (i = 0; i < num_tc; i++) { 1593 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1594 igu_sb_id, TX_PI(i), 1595 ECORE_COAL_TX_STATE_MACHINE, 1596 timeset); 1597 } 1598 } 1599 } 1600 1601 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1602 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) 1603 { 1604 /* zero status block and ack counter */ 1605 sb_info->sb_ack = 0; 1606 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1607 1608 if (IS_PF(p_hwfn->p_dev)) 1609 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1610 sb_info->igu_sb_id, 0, 0); 1611 } 1612 1613 struct ecore_igu_block * 1614 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1615 { 1616 struct ecore_igu_block *p_block; 1617 u16 igu_id; 1618 1619 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1620 igu_id++) { 1621 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1622 1623 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1624 !(p_block->status & ECORE_IGU_STATUS_FREE)) 1625 continue; 1626 1627 if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1628 b_is_pf) 1629 return p_block; 1630 } 1631 1632 return OSAL_NULL; 1633 } 1634 1635 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1636 u16 vector_id) 1637 { 1638 struct ecore_igu_block *p_block; 1639 u16 igu_id; 1640 1641 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1642 igu_id++) { 1643 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1644 1645 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1646 !p_block->is_pf || 1647 p_block->vector_number != vector_id) 1648 continue; 1649 1650 return igu_id; 1651 } 1652 1653 return ECORE_SB_INVALID_IDX; 1654 } 1655 1656 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1657 { 1658 u16 igu_sb_id; 1659 1660 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1661 if (sb_id == ECORE_SP_SB_ID) 1662 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1663 else if (IS_PF(p_hwfn->p_dev)) 1664 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1665 else 1666 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1667 1668 if (igu_sb_id == ECORE_SB_INVALID_IDX) 1669 DP_NOTICE(p_hwfn, true, 1670 "Slowpath SB vector %04x doesn't exist\n", 1671 sb_id); 1672 else if (sb_id == ECORE_SP_SB_ID) 1673 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1674 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1675 else 1676 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1677 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1678 1679 return igu_sb_id; 1680 } 1681 1682 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1683 struct ecore_ptt *p_ptt, 1684 struct ecore_sb_info *sb_info, 1685 void *sb_virt_addr, 1686 dma_addr_t sb_phy_addr, u16 sb_id) 1687 { 1688 sb_info->sb_virt = sb_virt_addr; 1689 sb_info->sb_phys = sb_phy_addr; 1690 1691 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1692 1693 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1694 return ECORE_INVAL; 1695 1696 /* Let the igu info reference the client's SB info */ 1697 if (sb_id != ECORE_SP_SB_ID) { 1698 if (IS_PF(p_hwfn->p_dev)) { 1699 struct ecore_igu_info *p_info; 1700 struct ecore_igu_block *p_block; 1701 1702 p_info = p_hwfn->hw_info.p_igu_info; 1703 p_block = &p_info->entry[sb_info->igu_sb_id]; 1704 1705 p_block->sb_info = sb_info; 1706 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1707 p_info->usage.free_cnt--; 1708 } else { 1709 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1710 } 1711 } 1712 #ifdef ECORE_CONFIG_DIRECT_HWFN 1713 sb_info->p_hwfn = p_hwfn; 1714 #endif 1715 sb_info->p_dev = p_hwfn->p_dev; 1716 1717 /* The igu address will hold the absolute address that needs to be 1718 * written to for a specific status block 1719 */ 1720 if (IS_PF(p_hwfn->p_dev)) { 1721 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + 1722 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); 1723 1724 } else { 1725 sb_info->igu_addr = 1726 (u8 OSAL_IOMEM *)p_hwfn->regview + 1727 PXP_VF_BAR0_START_IGU + 1728 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1729 } 1730 1731 sb_info->flags |= ECORE_SB_INFO_INIT; 1732 1733 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1734 1735 return ECORE_SUCCESS; 1736 } 1737 1738 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1739 struct ecore_sb_info *sb_info, 1740 u16 sb_id) 1741 { 1742 struct ecore_igu_info *p_info; 1743 struct ecore_igu_block *p_block; 1744 1745 if (sb_info == OSAL_NULL) 1746 return ECORE_SUCCESS; 1747 1748 /* zero status block and ack counter */ 1749 sb_info->sb_ack = 0; 1750 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1751 1752 if (IS_VF(p_hwfn->p_dev)) { 1753 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1754 return ECORE_SUCCESS; 1755 } 1756 1757 p_info = p_hwfn->hw_info.p_igu_info; 1758 p_block = &p_info->entry[sb_info->igu_sb_id]; 1759 1760 /* Vector 0 is reserved to Default SB */ 1761 if (p_block->vector_number == 0) { 1762 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1763 return ECORE_INVAL; 1764 } 1765 1766 /* Lose reference to client's SB info, and fix counters */ 1767 p_block->sb_info = OSAL_NULL; 1768 p_block->status |= ECORE_IGU_STATUS_FREE; 1769 p_info->usage.free_cnt++; 1770 1771 return ECORE_SUCCESS; 1772 } 1773 1774 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1775 { 1776 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1777 1778 if (!p_sb) 1779 return; 1780 1781 if (p_sb->sb_info.sb_virt) { 1782 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1783 p_sb->sb_info.sb_virt, 1784 p_sb->sb_info.sb_phys, 1785 SB_ALIGNED_SIZE(p_hwfn)); 1786 } 1787 1788 OSAL_FREE(p_hwfn->p_dev, p_sb); 1789 } 1790 1791 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1792 struct ecore_ptt *p_ptt) 1793 { 1794 struct ecore_sb_sp_info *p_sb; 1795 dma_addr_t p_phys = 0; 1796 void *p_virt; 1797 1798 /* SB struct */ 1799 p_sb = 1800 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, 1801 sizeof(*p_sb)); 1802 if (!p_sb) { 1803 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); 1804 return ECORE_NOMEM; 1805 } 1806 1807 /* SB ring */ 1808 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1809 &p_phys, SB_ALIGNED_SIZE(p_hwfn)); 1810 if (!p_virt) { 1811 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); 1812 OSAL_FREE(p_hwfn->p_dev, p_sb); 1813 return ECORE_NOMEM; 1814 } 1815 1816 /* Status Block setup */ 1817 p_hwfn->p_sp_sb = p_sb; 1818 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1819 p_virt, p_phys, ECORE_SP_SB_ID); 1820 1821 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1822 1823 return ECORE_SUCCESS; 1824 } 1825 1826 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1827 ecore_int_comp_cb_t comp_cb, 1828 void *cookie, 1829 u8 *sb_idx, __le16 **p_fw_cons) 1830 { 1831 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1832 enum _ecore_status_t rc = ECORE_NOMEM; 1833 u8 pi; 1834 1835 /* Look for a free index */ 1836 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1837 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1838 continue; 1839 1840 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1841 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1842 *sb_idx = pi; 1843 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1844 rc = ECORE_SUCCESS; 1845 break; 1846 } 1847 1848 return rc; 1849 } 1850 1851 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) 1852 { 1853 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1854 1855 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1856 return ECORE_NOMEM; 1857 1858 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1859 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1860 return ECORE_SUCCESS; 1861 } 1862 1863 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1864 { 1865 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1866 } 1867 1868 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1869 struct ecore_ptt *p_ptt, 1870 enum ecore_int_mode int_mode) 1871 { 1872 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1873 1874 #ifndef ASIC_ONLY 1875 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1876 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1877 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1878 } 1879 #endif 1880 1881 p_hwfn->p_dev->int_mode = int_mode; 1882 switch (p_hwfn->p_dev->int_mode) { 1883 case ECORE_INT_MODE_INTA: 1884 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1885 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1886 break; 1887 1888 case ECORE_INT_MODE_MSI: 1889 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1890 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1891 break; 1892 1893 case ECORE_INT_MODE_MSIX: 1894 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1895 break; 1896 case ECORE_INT_MODE_POLL: 1897 break; 1898 } 1899 1900 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1901 } 1902 1903 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1904 struct ecore_ptt *p_ptt) 1905 { 1906 #ifndef ASIC_ONLY 1907 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1908 DP_INFO(p_hwfn, 1909 "FPGA - Don't enable Attentions in IGU and MISC\n"); 1910 return; 1911 } 1912 #endif 1913 1914 /* Configure AEU signal change to produce attentions */ 1915 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1916 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1917 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1918 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1919 1920 /* Flush the writes to IGU */ 1921 OSAL_MMIOWB(p_hwfn->p_dev); 1922 1923 /* Unmask AEU signals toward IGU */ 1924 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1925 } 1926 1927 enum _ecore_status_t 1928 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1929 enum ecore_int_mode int_mode) 1930 { 1931 enum _ecore_status_t rc = ECORE_SUCCESS; 1932 1933 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1934 1935 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1936 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1937 if (rc != ECORE_SUCCESS) { 1938 DP_NOTICE(p_hwfn, true, 1939 "Slowpath IRQ request failed\n"); 1940 return ECORE_NORESOURCES; 1941 } 1942 p_hwfn->b_int_requested = true; 1943 } 1944 1945 /* Enable interrupt Generation */ 1946 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1947 1948 p_hwfn->b_int_enabled = 1; 1949 1950 return rc; 1951 } 1952 1953 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1954 struct ecore_ptt *p_ptt) 1955 { 1956 p_hwfn->b_int_enabled = 0; 1957 1958 if (IS_VF(p_hwfn->p_dev)) 1959 return; 1960 1961 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1962 } 1963 1964 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1965 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1966 struct ecore_ptt *p_ptt, 1967 u32 igu_sb_id, 1968 bool cleanup_set, 1969 u16 opaque_fid) 1970 { 1971 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1972 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1973 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1974 u8 type = 0; /* FIXME MichalS type??? */ 1975 1976 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1977 IGU_REG_CLEANUP_STATUS_0) != 0x200); 1978 1979 /* USE Control Command Register to perform cleanup. There is an 1980 * option to do this using IGU bar, but then it can't be used for VFs. 1981 */ 1982 1983 /* Set the data field */ 1984 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1985 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 1986 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1987 1988 /* Set the control register */ 1989 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1990 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1991 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1992 1993 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1994 1995 OSAL_BARRIER(p_hwfn->p_dev); 1996 1997 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1998 1999 /* Flush the write to IGU */ 2000 OSAL_MMIOWB(p_hwfn->p_dev); 2001 2002 /* calculate where to read the status bit from */ 2003 sb_bit = 1 << (igu_sb_id % 32); 2004 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 2005 2006 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2007 2008 /* Now wait for the command to complete */ 2009 while (--sleep_cnt) { 2010 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2011 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2012 break; 2013 OSAL_MSLEEP(5); 2014 } 2015 2016 if (!sleep_cnt) 2017 DP_NOTICE(p_hwfn, true, 2018 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2019 val, igu_sb_id); 2020 } 2021 2022 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2023 struct ecore_ptt *p_ptt, 2024 u16 igu_sb_id, u16 opaque, bool b_set) 2025 { 2026 struct ecore_igu_block *p_block; 2027 int pi, i; 2028 2029 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2030 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2031 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2032 igu_sb_id, p_block->function_id, p_block->is_pf, 2033 p_block->vector_number); 2034 2035 /* Set */ 2036 if (b_set) 2037 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2038 2039 /* Clear */ 2040 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2041 2042 /* Wait for the IGU SB to cleanup */ 2043 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2044 u32 val; 2045 2046 val = ecore_rd(p_hwfn, p_ptt, 2047 IGU_REG_WRITE_DONE_PENDING + 2048 ((igu_sb_id / 32) * 4)); 2049 if (val & (1 << (igu_sb_id % 32))) 2050 OSAL_UDELAY(10); 2051 else 2052 break; 2053 } 2054 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2055 DP_NOTICE(p_hwfn, true, 2056 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2057 igu_sb_id); 2058 2059 /* Clear the CAU for the SB */ 2060 for (pi = 0; pi < 12; pi++) 2061 ecore_wr(p_hwfn, p_ptt, 2062 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2063 } 2064 2065 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2066 struct ecore_ptt *p_ptt, 2067 bool b_set, bool b_slowpath) 2068 { 2069 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2070 struct ecore_igu_block *p_block; 2071 u16 igu_sb_id = 0; 2072 u32 val = 0; 2073 2074 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2075 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2076 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2077 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2078 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2079 /* end temporary */ 2080 2081 for (igu_sb_id = 0; 2082 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2083 igu_sb_id++) { 2084 p_block = &p_info->entry[igu_sb_id]; 2085 2086 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2087 !p_block->is_pf || 2088 (p_block->status & ECORE_IGU_STATUS_DSB)) 2089 continue; 2090 2091 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2092 p_hwfn->hw_info.opaque_fid, 2093 b_set); 2094 } 2095 2096 if (b_slowpath) 2097 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2098 p_info->igu_dsb_id, 2099 p_hwfn->hw_info.opaque_fid, 2100 b_set); 2101 } 2102 2103 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2104 struct ecore_ptt *p_ptt) 2105 { 2106 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2107 struct ecore_igu_block *p_block; 2108 int pf_sbs, vf_sbs; 2109 u16 igu_sb_id; 2110 u32 val, rval; 2111 2112 if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2113 /* We're using an old MFW - have to prevent any switching 2114 * of SBs between PF and VFs as later driver wouldn't be 2115 * able to tell which belongs to which. 2116 */ 2117 p_info->b_allow_pf_vf_change = false; 2118 } else { 2119 /* Use the numbers the MFW have provided - 2120 * don't forget MFW accounts for the default SB as well. 2121 */ 2122 p_info->b_allow_pf_vf_change = true; 2123 2124 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2125 DP_INFO(p_hwfn, 2126 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2127 RESC_NUM(p_hwfn, ECORE_SB) - 1, 2128 p_info->usage.cnt); 2129 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2130 } 2131 2132 /* TODO - how do we learn about VF SBs from MFW? */ 2133 if (IS_PF_SRIOV(p_hwfn)) { 2134 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2135 2136 if (vfs != p_info->usage.iov_cnt) 2137 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2138 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2139 p_info->usage.iov_cnt, vfs); 2140 2141 /* At this point we know how many SBs we have totally 2142 * in IGU + number of PF SBs. So we can validate that 2143 * we'd have sufficient for VF. 2144 */ 2145 if (vfs > p_info->usage.free_cnt + 2146 p_info->usage.free_cnt_iov - 2147 p_info->usage.cnt) { 2148 DP_NOTICE(p_hwfn, true, 2149 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2150 p_info->usage.free_cnt + 2151 p_info->usage.free_cnt_iov, 2152 p_info->usage.cnt, vfs); 2153 return ECORE_INVAL; 2154 } 2155 } 2156 } 2157 2158 /* Cap the number of VFs SBs by the number of VFs */ 2159 if (IS_PF_SRIOV(p_hwfn)) 2160 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; 2161 2162 /* Mark all SBs as free, now in the right PF/VFs division */ 2163 p_info->usage.free_cnt = p_info->usage.cnt; 2164 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2165 p_info->usage.orig = p_info->usage.cnt; 2166 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2167 2168 /* We now proceed to re-configure the IGU cam to reflect the initial 2169 * configuration. We can start with the Default SB. 2170 */ 2171 pf_sbs = p_info->usage.cnt; 2172 vf_sbs = p_info->usage.iov_cnt; 2173 2174 for (igu_sb_id = p_info->igu_dsb_id; 2175 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2176 igu_sb_id++) { 2177 p_block = &p_info->entry[igu_sb_id]; 2178 val = 0; 2179 2180 if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2181 continue; 2182 2183 if (p_block->status & ECORE_IGU_STATUS_DSB) { 2184 p_block->function_id = p_hwfn->rel_pf_id; 2185 p_block->is_pf = 1; 2186 p_block->vector_number = 0; 2187 p_block->status = ECORE_IGU_STATUS_VALID | 2188 ECORE_IGU_STATUS_PF | 2189 ECORE_IGU_STATUS_DSB; 2190 } else if (pf_sbs) { 2191 pf_sbs--; 2192 p_block->function_id = p_hwfn->rel_pf_id; 2193 p_block->is_pf = 1; 2194 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2195 p_block->status = ECORE_IGU_STATUS_VALID | 2196 ECORE_IGU_STATUS_PF | 2197 ECORE_IGU_STATUS_FREE; 2198 } else if (vf_sbs) { 2199 p_block->function_id = 2200 p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2201 p_info->usage.iov_cnt - vf_sbs; 2202 p_block->is_pf = 0; 2203 p_block->vector_number = 0; 2204 p_block->status = ECORE_IGU_STATUS_VALID | 2205 ECORE_IGU_STATUS_FREE; 2206 vf_sbs--; 2207 } else { 2208 p_block->function_id = 0; 2209 p_block->is_pf = 0; 2210 p_block->vector_number = 0; 2211 } 2212 2213 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2214 p_block->function_id); 2215 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2216 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2217 p_block->vector_number); 2218 2219 /* VF entries would be enabled when VF is initializaed */ 2220 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2221 2222 rval = ecore_rd(p_hwfn, p_ptt, 2223 IGU_REG_MAPPING_MEMORY + 2224 sizeof(u32) * igu_sb_id); 2225 2226 if (rval != val) { 2227 ecore_wr(p_hwfn, p_ptt, 2228 IGU_REG_MAPPING_MEMORY + 2229 sizeof(u32) * igu_sb_id, 2230 val); 2231 2232 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2233 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2234 igu_sb_id, p_block->function_id, 2235 p_block->is_pf, p_block->vector_number, 2236 rval, val); 2237 } 2238 } 2239 2240 return 0; 2241 } 2242 2243 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2244 struct ecore_ptt *p_ptt) 2245 { 2246 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2247 2248 /* Return all the usage indications to default prior to the reset; 2249 * The reset expects the !orig to reflect the initial status of the 2250 * SBs, and would re-calculate the originals based on those. 2251 */ 2252 p_cnt->cnt = p_cnt->orig; 2253 p_cnt->free_cnt = p_cnt->orig; 2254 p_cnt->iov_cnt = p_cnt->iov_orig; 2255 p_cnt->free_cnt_iov = p_cnt->iov_orig; 2256 p_cnt->orig = 0; 2257 p_cnt->iov_orig = 0; 2258 2259 /* TODO - we probably need to re-configure the CAU as well... */ 2260 return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2261 } 2262 2263 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2264 struct ecore_ptt *p_ptt, 2265 u16 igu_sb_id) 2266 { 2267 u32 val = ecore_rd(p_hwfn, p_ptt, 2268 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2269 struct ecore_igu_block *p_block; 2270 2271 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2272 2273 /* Fill the block information */ 2274 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2275 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2276 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2277 2278 p_block->igu_sb_id = igu_sb_id; 2279 } 2280 2281 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2282 struct ecore_ptt *p_ptt) 2283 { 2284 struct ecore_igu_info *p_igu_info; 2285 struct ecore_igu_block *p_block; 2286 u32 min_vf = 0, max_vf = 0; 2287 u16 igu_sb_id; 2288 2289 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2290 GFP_KERNEL, 2291 sizeof(*p_igu_info)); 2292 if (!p_hwfn->hw_info.p_igu_info) 2293 return ECORE_NOMEM; 2294 p_igu_info = p_hwfn->hw_info.p_igu_info; 2295 2296 /* Distinguish between existent and onn-existent default SB */ 2297 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2298 2299 /* Find the range of VF ids whose SB belong to this PF */ 2300 if (p_hwfn->p_dev->p_iov_info) { 2301 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2302 2303 min_vf = p_iov->first_vf_in_pf; 2304 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2305 } 2306 2307 for (igu_sb_id = 0; 2308 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2309 igu_sb_id++) { 2310 /* Read current entry; Notice it might not belong to this PF */ 2311 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2312 p_block = &p_igu_info->entry[igu_sb_id]; 2313 2314 if ((p_block->is_pf) && 2315 (p_block->function_id == p_hwfn->rel_pf_id)) { 2316 p_block->status = ECORE_IGU_STATUS_PF | 2317 ECORE_IGU_STATUS_VALID | 2318 ECORE_IGU_STATUS_FREE; 2319 2320 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2321 p_igu_info->usage.cnt++; 2322 } else if (!(p_block->is_pf) && 2323 (p_block->function_id >= min_vf) && 2324 (p_block->function_id < max_vf)) { 2325 /* Available for VFs of this PF */ 2326 p_block->status = ECORE_IGU_STATUS_VALID | 2327 ECORE_IGU_STATUS_FREE; 2328 2329 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2330 p_igu_info->usage.iov_cnt++; 2331 } 2332 2333 /* Mark the First entry belonging to the PF or its VFs 2334 * as the default SB [we'll reset IGU prior to first usage]. 2335 */ 2336 if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2337 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2338 p_igu_info->igu_dsb_id = igu_sb_id; 2339 p_block->status |= ECORE_IGU_STATUS_DSB; 2340 } 2341 2342 /* While this isn't suitable for all clients, limit number 2343 * of prints by having each PF print only its entries with the 2344 * exception of PF0 which would print everything. 2345 */ 2346 if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2347 (p_hwfn->abs_pf_id == 0)) 2348 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2349 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2350 igu_sb_id, p_block->function_id, 2351 p_block->is_pf, p_block->vector_number); 2352 } 2353 2354 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2355 DP_NOTICE(p_hwfn, true, 2356 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2357 p_igu_info->igu_dsb_id); 2358 return ECORE_INVAL; 2359 } 2360 2361 /* All non default SB are considered free at this point */ 2362 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2363 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2364 2365 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2366 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2367 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2368 p_igu_info->usage.iov_cnt); 2369 2370 return ECORE_SUCCESS; 2371 } 2372 2373 enum _ecore_status_t 2374 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2375 u16 sb_id, bool b_to_vf) 2376 { 2377 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2378 struct ecore_igu_block *p_block = OSAL_NULL; 2379 u16 igu_sb_id = 0, vf_num = 0; 2380 u32 val = 0; 2381 2382 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2383 return ECORE_INVAL; 2384 2385 if (sb_id == ECORE_SP_SB_ID) 2386 return ECORE_INVAL; 2387 2388 if (!p_info->b_allow_pf_vf_change) { 2389 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2390 return ECORE_INVAL; 2391 } 2392 2393 /* If we're moving a SB from PF to VF, the client had to specify 2394 * which vector it wants to move. 2395 */ 2396 if (b_to_vf) { 2397 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2398 if (igu_sb_id == ECORE_SB_INVALID_IDX) 2399 return ECORE_INVAL; 2400 } 2401 2402 /* If we're moving a SB from VF to PF, need to validate there isn't 2403 * already a line configured for that vector. 2404 */ 2405 if (!b_to_vf) { 2406 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2407 ECORE_SB_INVALID_IDX) 2408 return ECORE_INVAL; 2409 } 2410 2411 /* We need to validate that the SB can actually be relocated. 2412 * This would also handle the previous case where we've explicitly 2413 * stated which IGU SB needs to move. 2414 */ 2415 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2416 igu_sb_id++) { 2417 p_block = &p_info->entry[igu_sb_id]; 2418 2419 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2420 !(p_block->status & ECORE_IGU_STATUS_FREE) || 2421 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2422 if (b_to_vf) 2423 return ECORE_INVAL; 2424 else 2425 continue; 2426 } 2427 2428 break; 2429 } 2430 2431 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2432 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2433 "Failed to find a free SB to move\n"); 2434 return ECORE_INVAL; 2435 } 2436 2437 /* At this point, p_block points to the SB we want to relocate */ 2438 if (b_to_vf) { 2439 p_block->status &= ~ECORE_IGU_STATUS_PF; 2440 2441 /* It doesn't matter which VF number we choose, since we're 2442 * going to disable the line; But let's keep it in range. 2443 */ 2444 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2445 2446 p_block->function_id = (u8)vf_num; 2447 p_block->is_pf = 0; 2448 p_block->vector_number = 0; 2449 2450 p_info->usage.cnt--; 2451 p_info->usage.free_cnt--; 2452 p_info->usage.iov_cnt++; 2453 p_info->usage.free_cnt_iov++; 2454 2455 /* TODO - if SBs aren't really the limiting factor, 2456 * then it might not be accurate [in the since that 2457 * we might not need decrement the feature]. 2458 */ 2459 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2460 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2461 } else { 2462 p_block->status |= ECORE_IGU_STATUS_PF; 2463 p_block->function_id = p_hwfn->rel_pf_id; 2464 p_block->is_pf = 1; 2465 p_block->vector_number = sb_id + 1; 2466 2467 p_info->usage.cnt++; 2468 p_info->usage.free_cnt++; 2469 p_info->usage.iov_cnt--; 2470 p_info->usage.free_cnt_iov--; 2471 2472 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2473 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2474 } 2475 2476 /* Update the IGU and CAU with the new configuration */ 2477 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2478 p_block->function_id); 2479 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2480 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2481 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2482 p_block->vector_number); 2483 2484 ecore_wr(p_hwfn, p_ptt, 2485 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2486 val); 2487 2488 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2489 igu_sb_id, vf_num, 2490 p_block->is_pf ? 0 : 1); 2491 2492 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2493 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2494 igu_sb_id, p_block->function_id, 2495 p_block->is_pf, p_block->vector_number); 2496 2497 return ECORE_SUCCESS; 2498 } 2499 2500 /** 2501 * @brief Initialize igu runtime registers 2502 * 2503 * @param p_hwfn 2504 */ 2505 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2506 { 2507 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2508 2509 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2510 } 2511 2512 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2513 IGU_CMD_INT_ACK_BASE) 2514 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2515 IGU_CMD_INT_ACK_BASE) 2516 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2517 { 2518 u32 intr_status_hi = 0, intr_status_lo = 0; 2519 u64 intr_status = 0; 2520 2521 intr_status_lo = REG_RD(p_hwfn, 2522 GTT_BAR0_MAP_REG_IGU_CMD + 2523 LSB_IGU_CMD_ADDR * 8); 2524 intr_status_hi = REG_RD(p_hwfn, 2525 GTT_BAR0_MAP_REG_IGU_CMD + 2526 MSB_IGU_CMD_ADDR * 8); 2527 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2528 2529 return intr_status; 2530 } 2531 2532 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2533 { 2534 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2535 p_hwfn->b_sp_dpc_enabled = true; 2536 } 2537 2538 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2539 { 2540 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2541 if (!p_hwfn->sp_dpc) 2542 return ECORE_NOMEM; 2543 2544 return ECORE_SUCCESS; 2545 } 2546 2547 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2548 { 2549 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2550 } 2551 2552 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2553 struct ecore_ptt *p_ptt) 2554 { 2555 enum _ecore_status_t rc = ECORE_SUCCESS; 2556 2557 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2558 if (rc != ECORE_SUCCESS) { 2559 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2560 return rc; 2561 } 2562 2563 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2564 if (rc != ECORE_SUCCESS) { 2565 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2566 return rc; 2567 } 2568 2569 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2570 if (rc != ECORE_SUCCESS) 2571 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2572 2573 return rc; 2574 } 2575 2576 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2577 { 2578 ecore_int_sp_sb_free(p_hwfn); 2579 ecore_int_sb_attn_free(p_hwfn); 2580 ecore_int_sp_dpc_free(p_hwfn); 2581 } 2582 2583 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2584 { 2585 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2586 return; 2587 2588 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2589 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2590 ecore_int_sp_dpc_setup(p_hwfn); 2591 } 2592 2593 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2594 struct ecore_sb_cnt_info *p_sb_cnt_info) 2595 { 2596 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2597 2598 if (!p_igu_info || !p_sb_cnt_info) 2599 return; 2600 2601 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2602 sizeof(*p_sb_cnt_info)); 2603 } 2604 2605 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2606 { 2607 int i; 2608 2609 for_each_hwfn(p_dev, i) 2610 p_dev->hwfns[i].b_int_requested = false; 2611 } 2612 2613 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2614 { 2615 p_dev->attn_clr_en = clr_enable; 2616 } 2617 2618 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2619 struct ecore_ptt *p_ptt, 2620 u8 timer_res, u16 sb_id, bool tx) 2621 { 2622 struct cau_sb_entry sb_entry; 2623 enum _ecore_status_t rc; 2624 2625 if (!p_hwfn->hw_init_done) { 2626 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2627 return ECORE_INVAL; 2628 } 2629 2630 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2631 sb_id * sizeof(u64), 2632 (u64)(osal_uintptr_t)&sb_entry, 2, 0); 2633 if (rc != ECORE_SUCCESS) { 2634 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2635 return rc; 2636 } 2637 2638 if (tx) 2639 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2640 else 2641 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2642 2643 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2644 (u64)(osal_uintptr_t)&sb_entry, 2645 CAU_REG_SB_VAR_MEMORY + 2646 sb_id * sizeof(u64), 2, 0); 2647 if (rc != ECORE_SUCCESS) { 2648 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2649 return rc; 2650 } 2651 2652 return rc; 2653 } 2654 2655 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2656 struct ecore_ptt *p_ptt, 2657 struct ecore_sb_info *p_sb, 2658 struct ecore_sb_info_dbg *p_info) 2659 { 2660 u16 sbid = p_sb->igu_sb_id; 2661 int i; 2662 2663 if (IS_VF(p_hwfn->p_dev)) 2664 return ECORE_INVAL; 2665 2666 if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) 2667 return ECORE_INVAL; 2668 2669 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2670 IGU_REG_PRODUCER_MEMORY + sbid * 4); 2671 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2672 IGU_REG_CONSUMER_MEM + sbid * 4); 2673 2674 for (i = 0; i < PIS_PER_SB_E4; i++) 2675 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2676 CAU_REG_PI_MEMORY + 2677 sbid * 4 * PIS_PER_SB_E4 + 2678 i * 4); 2679 2680 return ECORE_SUCCESS; 2681 } 2682