1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include <rte_string_fns.h> 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_spq.h" 12 #include "ecore_gtt_reg_addr.h" 13 #include "ecore_init_ops.h" 14 #include "ecore_rt_defs.h" 15 #include "ecore_int.h" 16 #include "reg_addr.h" 17 #include "ecore_hw.h" 18 #include "ecore_sriov.h" 19 #include "ecore_vf.h" 20 #include "ecore_hw_defs.h" 21 #include "ecore_hsi_common.h" 22 #include "ecore_mcp.h" 23 24 struct ecore_pi_info { 25 ecore_int_comp_cb_t comp_cb; 26 void *cookie; /* Will be sent to the compl cb function */ 27 }; 28 29 struct ecore_sb_sp_info { 30 struct ecore_sb_info sb_info; 31 /* per protocol index data */ 32 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4]; 33 }; 34 35 enum ecore_attention_type { 36 ECORE_ATTN_TYPE_ATTN, 37 ECORE_ATTN_TYPE_PARITY, 38 }; 39 40 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 41 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 42 43 struct aeu_invert_reg_bit { 44 char bit_name[30]; 45 46 #define ATTENTION_PARITY (1 << 0) 47 48 #define ATTENTION_LENGTH_MASK (0x00000ff0) 49 #define ATTENTION_LENGTH_SHIFT (4) 50 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 51 ATTENTION_LENGTH_SHIFT) 52 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 53 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 54 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 55 ATTENTION_PARITY) 56 57 /* Multiple bits start with this offset */ 58 #define ATTENTION_OFFSET_MASK (0x000ff000) 59 #define ATTENTION_OFFSET_SHIFT (12) 60 61 #define ATTENTION_BB_MASK (0x00700000) 62 #define ATTENTION_BB_SHIFT (20) 63 #define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT) 64 #define ATTENTION_BB_DIFFERENT (1 << 23) 65 66 #define ATTENTION_CLEAR_ENABLE (1 << 28) 67 unsigned int flags; 68 69 /* Callback to call if attention will be triggered */ 70 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 71 72 enum block_id block_index; 73 }; 74 75 struct aeu_invert_reg { 76 struct aeu_invert_reg_bit bits[32]; 77 }; 78 79 #define MAX_ATTN_GRPS (8) 80 #define NUM_ATTN_REGS (9) 81 82 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 83 { 84 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 85 86 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); 87 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); 88 89 return ECORE_SUCCESS; 90 } 91 92 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 93 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 94 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 95 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 96 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 97 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 98 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 99 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 100 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 101 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 102 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 116 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 117 { 118 u32 tmp = 119 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 120 PSWHST_REG_VF_DISABLED_ERROR_VALID); 121 122 /* Disabled VF access */ 123 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 124 u32 addr, data; 125 126 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 127 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 128 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 129 PSWHST_REG_VF_DISABLED_ERROR_DATA); 130 DP_INFO(p_hwfn->p_dev, 131 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" 132 " Write [0x%02x] Addr [0x%08x]\n", 133 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) 134 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 135 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) 136 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 137 (u8)((data & 138 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 139 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 140 (u8)((data & 141 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 142 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 143 (u8)((data & 144 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 145 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 146 addr); 147 } 148 149 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 150 PSWHST_REG_INCORRECT_ACCESS_VALID); 151 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 152 u32 addr, data, length; 153 154 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 155 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 156 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 157 PSWHST_REG_INCORRECT_ACCESS_DATA); 158 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 159 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 160 161 DP_INFO(p_hwfn->p_dev, 162 "Incorrect access to %08x of length %08x - PF [%02x]" 163 " VF [%04x] [valid %02x] client [%02x] write [%02x]" 164 " Byte-Enable [%04x] [%08x]\n", 165 addr, length, 166 (u8)((data & 167 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 168 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 169 (u8)((data & 170 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 171 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 172 (u8)((data & 173 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 174 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 175 (u8)((data & 176 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 177 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 178 (u8)((data & 179 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 180 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 181 (u8)((data & 182 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 183 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 184 data); 185 } 186 187 /* TODO - We know 'some' of these are legal due to virtualization, 188 * but is it true for all of them? 189 */ 190 return ECORE_SUCCESS; 191 } 192 193 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 194 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 195 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 196 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 197 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 198 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 199 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 200 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 201 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 202 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 203 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 204 static const char *grc_timeout_attn_master_to_str(u8 master) 205 { 206 switch (master) { 207 case 1: 208 return "PXP"; 209 case 2: 210 return "MCP"; 211 case 3: 212 return "MSDM"; 213 case 4: 214 return "PSDM"; 215 case 5: 216 return "YSDM"; 217 case 6: 218 return "USDM"; 219 case 7: 220 return "TSDM"; 221 case 8: 222 return "XSDM"; 223 case 9: 224 return "DBU"; 225 case 10: 226 return "DMAE"; 227 default: 228 return "Unknown"; 229 } 230 } 231 232 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 233 { 234 u32 tmp, tmp2; 235 236 /* We've already cleared the timeout interrupt register, so we learn 237 * of interrupts via the validity register 238 */ 239 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 240 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 241 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) 242 goto out; 243 244 /* Read the GRC timeout information */ 245 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 246 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 247 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 248 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 249 250 DP_NOTICE(p_hwfn->p_dev, false, 251 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 252 tmp2, tmp, 253 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 254 : "Read from", 255 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 256 grc_timeout_attn_master_to_str( 257 (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 258 ECORE_GRC_ATTENTION_MASTER_SHIFT), 259 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 260 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 261 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 262 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 263 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 264 ECORE_GRC_ATTENTION_VF_SHIFT); 265 266 out: 267 /* Regardles of anything else, clean the validity bit */ 268 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 269 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 270 return ECORE_SUCCESS; 271 } 272 273 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 274 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 275 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 276 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 277 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 278 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 279 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 280 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 281 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 282 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 283 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 284 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 285 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 286 287 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 288 struct ecore_ptt *p_ptt, 289 bool is_hw_init) 290 { 291 u32 tmp; 292 char str[512] = {0}; 293 294 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 295 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 296 u32 addr_lo, addr_hi, details; 297 298 addr_lo = ecore_rd(p_hwfn, p_ptt, 299 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 300 addr_hi = ecore_rd(p_hwfn, p_ptt, 301 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 302 details = ecore_rd(p_hwfn, p_ptt, 303 PGLUE_B_REG_TX_ERR_WR_DETAILS); 304 OSAL_SNPRINTF(str, 512, 305 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 306 addr_hi, addr_lo, details, 307 (u8)((details & 308 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 309 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 310 (u8)((details & 311 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 312 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 313 (u8)((details & 314 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 315 tmp, 316 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 317 1 : 0), 318 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 319 1 : 0), 320 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 321 1 : 0)); 322 if (is_hw_init) 323 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); 324 else 325 DP_NOTICE(p_hwfn, false, "%s", str); 326 } 327 328 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 329 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 330 u32 addr_lo, addr_hi, details; 331 332 addr_lo = ecore_rd(p_hwfn, p_ptt, 333 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 334 addr_hi = ecore_rd(p_hwfn, p_ptt, 335 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 336 details = ecore_rd(p_hwfn, p_ptt, 337 PGLUE_B_REG_TX_ERR_RD_DETAILS); 338 339 DP_NOTICE(p_hwfn, false, 340 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 341 addr_hi, addr_lo, details, 342 (u8)((details & 343 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 344 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 345 (u8)((details & 346 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 347 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 348 (u8)((details & 349 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 350 tmp, 351 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 352 1 : 0), 353 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 354 1 : 0), 355 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 356 1 : 0)); 357 } 358 359 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 360 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 361 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); 362 363 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 364 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 365 u32 addr_hi, addr_lo; 366 367 addr_lo = ecore_rd(p_hwfn, p_ptt, 368 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 369 addr_hi = ecore_rd(p_hwfn, p_ptt, 370 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 371 372 DP_NOTICE(p_hwfn, false, 373 "ICPL erorr - %08x [Address %08x:%08x]\n", 374 tmp, addr_hi, addr_lo); 375 } 376 377 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 378 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 379 u32 addr_hi, addr_lo, details; 380 381 addr_lo = ecore_rd(p_hwfn, p_ptt, 382 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 383 addr_hi = ecore_rd(p_hwfn, p_ptt, 384 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 385 details = ecore_rd(p_hwfn, p_ptt, 386 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 387 388 DP_NOTICE(p_hwfn, false, 389 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 390 details, tmp, addr_hi, addr_lo); 391 } 392 393 /* Clear the indications */ 394 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 395 396 return ECORE_SUCCESS; 397 } 398 399 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 400 { 401 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); 402 } 403 404 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 405 { 406 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 407 408 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 409 410 return ECORE_INVAL; 411 } 412 413 static enum _ecore_status_t 414 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 415 { 416 DP_INFO(p_hwfn, "General attention 35!\n"); 417 418 return ECORE_SUCCESS; 419 } 420 421 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 422 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 423 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 424 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 425 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 426 427 #define ECORE_DB_REC_COUNT 10 428 #define ECORE_DB_REC_INTERVAL 100 429 430 /* assumes sticky overflow indication was set for this PF */ 431 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn, 432 struct ecore_ptt *p_ptt) 433 { 434 u8 count = ECORE_DB_REC_COUNT; 435 u32 usage = 1; 436 437 /* wait for usage to zero or count to run out. This is necessary since 438 * EDPM doorbell transactions can take multiple 64b cycles, and as such 439 * can "split" over the pci. Possibly, the doorbell drop can happen with 440 * half an EDPM in the queue and other half dropped. Another EDPM 441 * doorbell to the same address (from doorbell recovery mechanism or 442 * from the doorbelling entity) could have first half dropped and second 443 * half interperted as continuation of the first. To prevent such 444 * malformed doorbells from reaching the device, flush the queue before 445 * releaseing the overflow sticky indication. 446 */ 447 while (count-- && usage) { 448 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 449 OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 450 } 451 452 /* should have been depleted by now */ 453 if (usage) { 454 DP_NOTICE(p_hwfn->p_dev, false, 455 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 456 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 457 return ECORE_TIMEOUT; 458 } 459 460 /* flush any pedning (e)dpm as they may never arrive */ 461 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 462 463 /* release overflow sticky indication (stop silently dropping 464 * everything) 465 */ 466 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 467 468 /* repeat all last doorbells (doorbell drop recovery) */ 469 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 470 471 return ECORE_SUCCESS; 472 } 473 474 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 475 { 476 u32 int_sts, first_drop_reason, details, address, overflow, 477 all_drops_reason; 478 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 479 enum _ecore_status_t rc; 480 481 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 482 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 483 int_sts); 484 485 /* int_sts may be zero since all PFs were interrupted for doorbell 486 * overflow but another one already handled it. Can abort here. If 487 * This PF also requires overflow recovery we will be interrupted again 488 */ 489 if (!int_sts) 490 return ECORE_SUCCESS; 491 492 /* check if db_drop or overflow happened */ 493 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 494 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 495 /* obtain data about db drop/overflow */ 496 first_drop_reason = ecore_rd(p_hwfn, p_ptt, 497 DORQ_REG_DB_DROP_REASON) & 498 ECORE_DORQ_ATTENTION_REASON_MASK; 499 details = ecore_rd(p_hwfn, p_ptt, 500 DORQ_REG_DB_DROP_DETAILS); 501 address = ecore_rd(p_hwfn, p_ptt, 502 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 503 overflow = ecore_rd(p_hwfn, p_ptt, 504 DORQ_REG_PF_OVFL_STICKY); 505 all_drops_reason = ecore_rd(p_hwfn, p_ptt, 506 DORQ_REG_DB_DROP_DETAILS_REASON); 507 508 /* log info */ 509 DP_NOTICE(p_hwfn->p_dev, false, 510 "Doorbell drop occurred\n" 511 "Address\t\t0x%08x\t(second BAR address)\n" 512 "FID\t\t0x%04x\t\t(Opaque FID)\n" 513 "Size\t\t0x%04x\t\t(in bytes)\n" 514 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 515 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n" 516 "Overflow\t0x%x\t\t(a per PF indication)\n", 517 address, 518 GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 519 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 520 first_drop_reason, all_drops_reason, overflow); 521 522 /* if this PF caused overflow, initiate recovery */ 523 if (overflow) { 524 rc = ecore_db_rec_attn(p_hwfn, p_ptt); 525 if (rc != ECORE_SUCCESS) 526 return rc; 527 } 528 529 /* clear the doorbell drop details and prepare for next drop */ 530 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 531 532 /* mark interrupt as handeld (note: even if drop was due to a 533 * different reason than overflow we mark as handled) 534 */ 535 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 536 DORQ_REG_INT_STS_DB_DROP | 537 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 538 539 /* if there are no indications otherthan drop indications, 540 * success 541 */ 542 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 543 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 544 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 545 return ECORE_SUCCESS; 546 } 547 548 /* some other indication was present - non recoverable */ 549 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 550 551 return ECORE_INVAL; 552 } 553 554 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 555 { 556 #ifndef ASIC_ONLY 557 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 558 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 559 TM_REG_INT_STS_1); 560 561 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 562 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 563 return ECORE_INVAL; 564 565 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 566 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 567 DP_INFO(p_hwfn, 568 "TM attention on emulation - most likely" 569 " results of clock-ratios\n"); 570 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 571 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 572 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 573 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 574 575 return ECORE_SUCCESS; 576 } 577 #endif 578 579 return ECORE_INVAL; 580 } 581 582 /* Instead of major changes to the data-structure, we have a some 'special' 583 * identifiers for sources that changed meaning between adapters. 584 */ 585 enum aeu_invert_reg_special_type { 586 AEU_INVERT_REG_SPECIAL_CNIG_0, 587 AEU_INVERT_REG_SPECIAL_CNIG_1, 588 AEU_INVERT_REG_SPECIAL_CNIG_2, 589 AEU_INVERT_REG_SPECIAL_CNIG_3, 590 AEU_INVERT_REG_SPECIAL_MAX, 591 }; 592 593 static struct aeu_invert_reg_bit 594 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 595 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 596 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 597 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 598 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 599 }; 600 601 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 602 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 603 { 604 { /* After Invert 1 */ 605 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 606 MAX_BLOCK_ID}, 607 } 608 }, 609 610 { 611 { /* After Invert 2 */ 612 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 613 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 614 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, 615 BLOCK_PGLUE_B}, 616 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 617 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 618 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 619 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 620 {"SW timers #%d", 621 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), 622 OSAL_NULL, MAX_BLOCK_ID}, 623 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 624 BLOCK_PGLCS}, 625 } 626 }, 627 628 { 629 { /* After Invert 3 */ 630 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 631 MAX_BLOCK_ID}, 632 } 633 }, 634 635 { 636 { /* After Invert 4 */ 637 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 638 ecore_fw_assertion, MAX_BLOCK_ID}, 639 {"General Attention %d", 640 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), 641 OSAL_NULL, MAX_BLOCK_ID}, 642 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 643 ecore_general_attention_35, MAX_BLOCK_ID}, 644 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 645 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 646 OSAL_NULL, BLOCK_NWS}, 647 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 648 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 649 OSAL_NULL, BLOCK_NWS}, 650 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 651 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 652 OSAL_NULL, BLOCK_NWM}, 653 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 654 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 655 OSAL_NULL, BLOCK_NWM}, 656 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 657 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 658 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 659 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 660 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 661 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 662 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 663 MAX_BLOCK_ID}, 664 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 665 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 666 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 667 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 668 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 669 } 670 }, 671 672 { 673 { /* After Invert 5 */ 674 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 675 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 676 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 677 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 678 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 679 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 680 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 681 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 682 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 683 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 684 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 685 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 686 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 687 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 688 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 689 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 690 } 691 }, 692 693 { 694 { /* After Invert 6 */ 695 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 696 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 697 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 698 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 699 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 700 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 701 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 702 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 703 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 704 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 705 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 706 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 707 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 708 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 709 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 710 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 711 } 712 }, 713 714 { 715 { /* After Invert 7 */ 716 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 717 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 718 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 719 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 720 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 721 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 722 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 723 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 724 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 725 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 726 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 727 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 728 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 729 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 730 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 731 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 732 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 733 } 734 }, 735 736 { 737 { /* After Invert 8 */ 738 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 739 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 740 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 741 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 742 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 743 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 744 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 745 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 746 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 747 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 748 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 749 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 750 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 751 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 752 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 753 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 754 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 755 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 756 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 757 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 758 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 759 MAX_BLOCK_ID}, 760 } 761 }, 762 763 { 764 { /* After Invert 9 */ 765 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 766 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, 767 MAX_BLOCK_ID}, 768 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 769 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 770 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 771 MAX_BLOCK_ID}, 772 } 773 }, 774 775 }; 776 777 static struct aeu_invert_reg_bit * 778 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 779 struct aeu_invert_reg_bit *p_bit) 780 { 781 if (!ECORE_IS_BB(p_hwfn->p_dev)) 782 return p_bit; 783 784 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 785 return p_bit; 786 787 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 788 ATTENTION_BB_SHIFT]; 789 } 790 791 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 792 struct aeu_invert_reg_bit *p_bit) 793 { 794 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 795 ATTENTION_PARITY); 796 } 797 798 #define ATTN_STATE_BITS (0xfff) 799 #define ATTN_BITS_MASKABLE (0x3ff) 800 struct ecore_sb_attn_info { 801 /* Virtual & Physical address of the SB */ 802 struct atten_status_block *sb_attn; 803 dma_addr_t sb_phys; 804 805 /* Last seen running index */ 806 u16 index; 807 808 /* A mask of the AEU bits resulting in a parity error */ 809 u32 parity_mask[NUM_ATTN_REGS]; 810 811 /* A pointer to the attention description structure */ 812 struct aeu_invert_reg *p_aeu_desc; 813 814 /* Previously asserted attentions, which are still unasserted */ 815 u16 known_attn; 816 817 /* Cleanup address for the link's general hw attention */ 818 u32 mfw_attn_addr; 819 }; 820 821 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 822 struct ecore_sb_attn_info *p_sb_desc) 823 { 824 u16 rc = 0, index; 825 826 OSAL_MMIOWB(p_hwfn->p_dev); 827 828 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 829 if (p_sb_desc->index != index) { 830 p_sb_desc->index = index; 831 rc = ECORE_SB_ATT_IDX; 832 } 833 834 OSAL_MMIOWB(p_hwfn->p_dev); 835 836 return rc; 837 } 838 839 /** 840 * @brief ecore_int_assertion - handles asserted attention bits 841 * 842 * @param p_hwfn 843 * @param asserted_bits newly asserted bits 844 * @return enum _ecore_status_t 845 */ 846 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 847 u16 asserted_bits) 848 { 849 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 850 u32 igu_mask; 851 852 /* Mask the source of the attention in the IGU */ 853 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 854 IGU_REG_ATTENTION_ENABLE); 855 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 856 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 857 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 858 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 859 860 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 861 "inner known ATTN state: 0x%04x --> 0x%04x\n", 862 sb_attn_sw->known_attn, 863 sb_attn_sw->known_attn | asserted_bits); 864 sb_attn_sw->known_attn |= asserted_bits; 865 866 /* Handle MCP events */ 867 if (asserted_bits & 0x100) { 868 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 869 /* Clean the MCP attention */ 870 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 871 sb_attn_sw->mfw_attn_addr, 0); 872 } 873 874 /* FIXME - this will change once we'll have GOOD gtt definitions */ 875 DIRECT_REG_WR(p_hwfn, 876 (u8 OSAL_IOMEM *) p_hwfn->regview + 877 GTT_BAR0_MAP_REG_IGU_CMD + 878 ((IGU_CMD_ATTN_BIT_SET_UPPER - 879 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 880 881 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 882 asserted_bits); 883 884 return ECORE_SUCCESS; 885 } 886 887 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 888 enum block_id id, enum dbg_attn_type type, 889 bool b_clear) 890 { 891 /* @DPDK */ 892 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type); 893 } 894 895 /** 896 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 897 * cause of the attention 898 * 899 * @param p_hwfn 900 * @param p_aeu - descriptor of an AEU bit which caused the attention 901 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 902 * this bit to this group. 903 * @param bit_index - index of this bit in the aeu_en_reg 904 * 905 * @return enum _ecore_status_t 906 */ 907 static enum _ecore_status_t 908 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 909 struct aeu_invert_reg_bit *p_aeu, 910 u32 aeu_en_reg, 911 const char *p_bit_name, 912 u32 bitmask) 913 { 914 enum _ecore_status_t rc = ECORE_INVAL; 915 bool b_fatal = false; 916 917 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 918 p_bit_name, bitmask); 919 920 /* Call callback before clearing the interrupt status */ 921 if (p_aeu->cb) { 922 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 923 p_bit_name); 924 rc = p_aeu->cb(p_hwfn); 925 } 926 927 if (rc != ECORE_SUCCESS) 928 b_fatal = true; 929 930 /* Print HW block interrupt registers */ 931 if (p_aeu->block_index != MAX_BLOCK_ID) { 932 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 933 ATTN_TYPE_INTERRUPT, !b_fatal); 934 } 935 936 /* @DPDK */ 937 /* Reach assertion if attention is fatal */ 938 if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) { 939 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 940 p_bit_name); 941 942 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 943 } 944 945 /* Prevent this Attention from being asserted in the future */ 946 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 947 p_hwfn->p_dev->attn_clr_en) { 948 u32 val; 949 u32 mask = ~bitmask; 950 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 951 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 952 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n", 953 p_bit_name); 954 } 955 956 return rc; 957 } 958 959 /** 960 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 961 * 962 * @param p_hwfn 963 * @param p_aeu - descriptor of an AEU bit which caused the parity 964 * @param aeu_en_reg - address of the AEU enable register 965 * @param bit_index 966 */ 967 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 968 struct aeu_invert_reg_bit *p_aeu, 969 u32 aeu_en_reg, u8 bit_index) 970 { 971 u32 block_id = p_aeu->block_index, mask, val; 972 973 DP_NOTICE(p_hwfn->p_dev, false, 974 "%s parity attention is set [address 0x%08x, bit %d]\n", 975 p_aeu->bit_name, aeu_en_reg, bit_index); 976 977 if (block_id != MAX_BLOCK_ID) { 978 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 979 980 /* In A0, there's a single parity bit for several blocks */ 981 if (block_id == BLOCK_BTB) { 982 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 983 ATTN_TYPE_PARITY, false); 984 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 985 ATTN_TYPE_PARITY, false); 986 } 987 } 988 989 /* Prevent this parity error from being re-asserted */ 990 mask = ~(0x1 << bit_index); 991 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 992 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 993 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 994 p_aeu->bit_name); 995 } 996 997 /** 998 * @brief - handles deassertion of previously asserted attentions. 999 * 1000 * @param p_hwfn 1001 * @param deasserted_bits - newly deasserted bits 1002 * @return enum _ecore_status_t 1003 * 1004 */ 1005 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 1006 u16 deasserted_bits) 1007 { 1008 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1009 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1010 u8 i, j, k, bit_idx; 1011 enum _ecore_status_t rc = ECORE_SUCCESS; 1012 1013 /* Read the attention registers in the AEU */ 1014 for (i = 0; i < NUM_ATTN_REGS; i++) { 1015 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1016 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1017 i * 0x4); 1018 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1019 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); 1020 } 1021 1022 /* Handle parity attentions first */ 1023 for (i = 0; i < NUM_ATTN_REGS; i++) { 1024 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1025 u32 parities; 1026 1027 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1028 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1029 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1030 1031 /* Skip register in which no parity bit is currently set */ 1032 if (!parities) 1033 continue; 1034 1035 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1036 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1037 1038 if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1039 !!(parities & (1 << bit_idx))) 1040 ecore_int_deassertion_parity(p_hwfn, p_bit, 1041 aeu_en, bit_idx); 1042 1043 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1044 } 1045 } 1046 1047 /* Find non-parity cause for attention and act */ 1048 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1049 struct aeu_invert_reg_bit *p_aeu; 1050 1051 /* Handle only groups whose attention is currently deasserted */ 1052 if (!(deasserted_bits & (1 << k))) 1053 continue; 1054 1055 for (i = 0; i < NUM_ATTN_REGS; i++) { 1056 u32 bits; 1057 1058 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1059 i * sizeof(u32) + 1060 k * sizeof(u32) * NUM_ATTN_REGS; 1061 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1062 bits = aeu_inv_arr[i] & en; 1063 1064 /* Skip if no bit from this group is currently set */ 1065 if (!bits) 1066 continue; 1067 1068 /* Find all set bits from current register which belong 1069 * to current group, making them responsible for the 1070 * previous assertion. 1071 */ 1072 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1073 unsigned long int bitmask; 1074 u8 bit, bit_len; 1075 1076 /* Need to account bits with changed meaning */ 1077 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1078 1079 bit = bit_idx; 1080 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1081 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1082 /* Skip Parity */ 1083 bit++; 1084 bit_len--; 1085 } 1086 1087 /* Find the bits relating to HW-block, then 1088 * shift so they'll become LSB. 1089 */ 1090 bitmask = bits & (((1 << bit_len) - 1) << bit); 1091 bitmask >>= bit; 1092 1093 if (bitmask) { 1094 u32 flags = p_aeu->flags; 1095 char bit_name[30]; 1096 u8 num; 1097 1098 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1099 bit_len); 1100 1101 /* Some bits represent more than a 1102 * a single interrupt. Correctly print 1103 * their name. 1104 */ 1105 if (ATTENTION_LENGTH(flags) > 2 || 1106 ((flags & ATTENTION_PAR_INT) && 1107 ATTENTION_LENGTH(flags) > 1)) 1108 OSAL_SNPRINTF(bit_name, 30, 1109 p_aeu->bit_name, 1110 num); 1111 else 1112 strlcpy(bit_name, 1113 p_aeu->bit_name, 1114 sizeof(bit_name)); 1115 1116 /* We now need to pass bitmask in its 1117 * correct position. 1118 */ 1119 bitmask <<= bit; 1120 1121 /* Handle source of the attention */ 1122 ecore_int_deassertion_aeu_bit(p_hwfn, 1123 p_aeu, 1124 aeu_en, 1125 bit_name, 1126 bitmask); 1127 } 1128 1129 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1130 } 1131 } 1132 } 1133 1134 /* Clear IGU indication for the deasserted bits */ 1135 /* FIXME - this will change once we'll have GOOD gtt definitions */ 1136 DIRECT_REG_WR(p_hwfn, 1137 (u8 OSAL_IOMEM *) p_hwfn->regview + 1138 GTT_BAR0_MAP_REG_IGU_CMD + 1139 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1140 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); 1141 1142 /* Unmask deasserted attentions in IGU */ 1143 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1144 IGU_REG_ATTENTION_ENABLE); 1145 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1146 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1147 1148 /* Clear deassertion from inner state */ 1149 sb_attn_sw->known_attn &= ~deasserted_bits; 1150 1151 return rc; 1152 } 1153 1154 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1155 { 1156 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1157 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1158 u16 index = 0, asserted_bits, deasserted_bits; 1159 u32 attn_bits = 0, attn_acks = 0; 1160 enum _ecore_status_t rc = ECORE_SUCCESS; 1161 1162 /* Read current attention bits/acks - safeguard against attentions 1163 * by guaranting work on a synchronized timeframe 1164 */ 1165 do { 1166 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1167 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1168 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1169 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1170 p_sb_attn->sb_index = index; 1171 1172 /* Attention / Deassertion are meaningful (and in correct state) 1173 * only when they differ and consistent with known state - deassertion 1174 * when previous attention & current ack, and assertion when current 1175 * attention with no previous attention 1176 */ 1177 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1178 ~p_sb_attn_sw->known_attn; 1179 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1180 p_sb_attn_sw->known_attn; 1181 1182 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1183 DP_INFO(p_hwfn, 1184 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1185 index, attn_bits, attn_acks, asserted_bits, 1186 deasserted_bits, p_sb_attn_sw->known_attn); 1187 else if (asserted_bits == 0x100) 1188 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1189 else 1190 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1191 "MFW indication [deassertion]\n"); 1192 1193 if (asserted_bits) { 1194 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1195 if (rc) 1196 return rc; 1197 } 1198 1199 if (deasserted_bits) 1200 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1201 1202 return rc; 1203 } 1204 1205 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1206 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1207 { 1208 struct igu_prod_cons_update igu_ack = { 0 }; 1209 1210 igu_ack.sb_id_and_flags = 1211 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1212 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1213 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1214 (IGU_SEG_ACCESS_ATTN << 1215 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1216 1217 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1218 1219 /* Both segments (interrupts & acks) are written to same place address; 1220 * Need to guarantee all commands will be received (in-order) by HW. 1221 */ 1222 OSAL_MMIOWB(p_hwfn->p_dev); 1223 OSAL_BARRIER(p_hwfn->p_dev); 1224 } 1225 1226 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1227 { 1228 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1229 struct ecore_pi_info *pi_info = OSAL_NULL; 1230 struct ecore_sb_attn_info *sb_attn; 1231 struct ecore_sb_info *sb_info; 1232 int arr_size; 1233 u16 rc = 0; 1234 1235 if (!p_hwfn) 1236 return; 1237 1238 if (!p_hwfn->p_sp_sb) { 1239 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1240 return; 1241 } 1242 1243 sb_info = &p_hwfn->p_sp_sb->sb_info; 1244 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1245 if (!sb_info) { 1246 DP_ERR(p_hwfn->p_dev, 1247 "Status block is NULL - cannot ack interrupts\n"); 1248 return; 1249 } 1250 1251 if (!p_hwfn->p_sb_attn) { 1252 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1253 return; 1254 } 1255 sb_attn = p_hwfn->p_sb_attn; 1256 1257 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1258 p_hwfn, p_hwfn->my_id); 1259 1260 /* Disable ack for def status block. Required both for msix + 1261 * inta in non-mask mode, in inta does no harm. 1262 */ 1263 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1264 1265 /* Gather Interrupts/Attentions information */ 1266 if (!sb_info->sb_virt) { 1267 DP_ERR(p_hwfn->p_dev, 1268 "Interrupt Status block is NULL -" 1269 " cannot check for new interrupts!\n"); 1270 } else { 1271 u32 tmp_index = sb_info->sb_ack; 1272 rc = ecore_sb_update_sb_idx(sb_info); 1273 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1274 "Interrupt indices: 0x%08x --> 0x%08x\n", 1275 tmp_index, sb_info->sb_ack); 1276 } 1277 1278 if (!sb_attn || !sb_attn->sb_attn) { 1279 DP_ERR(p_hwfn->p_dev, 1280 "Attentions Status block is NULL -" 1281 " cannot check for new attentions!\n"); 1282 } else { 1283 u16 tmp_index = sb_attn->index; 1284 1285 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1286 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1287 "Attention indices: 0x%08x --> 0x%08x\n", 1288 tmp_index, sb_attn->index); 1289 } 1290 1291 /* Check if we expect interrupts at this time. if not just ack them */ 1292 if (!(rc & ECORE_SB_EVENT_MASK)) { 1293 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1294 return; 1295 } 1296 1297 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1298 1299 if (!p_hwfn->p_dpc_ptt) { 1300 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1301 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1302 return; 1303 } 1304 1305 if (rc & ECORE_SB_ATT_IDX) 1306 ecore_int_attentions(p_hwfn); 1307 1308 if (rc & ECORE_SB_IDX) { 1309 int pi; 1310 1311 /* Since we only looked at the SB index, it's possible more 1312 * than a single protocol-index on the SB incremented. 1313 * Iterate over all configured protocol indices and check 1314 * whether something happened for each. 1315 */ 1316 for (pi = 0; pi < arr_size; pi++) { 1317 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1318 if (pi_info->comp_cb != OSAL_NULL) 1319 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1320 } 1321 } 1322 1323 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1324 /* This should be done before the interrupts are enabled, 1325 * since otherwise a new attention will be generated. 1326 */ 1327 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1328 } 1329 1330 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1331 } 1332 1333 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1334 { 1335 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1336 1337 if (!p_sb) 1338 return; 1339 1340 if (p_sb->sb_attn) { 1341 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1342 p_sb->sb_phys, 1343 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1344 } 1345 OSAL_FREE(p_hwfn->p_dev, p_sb); 1346 } 1347 1348 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1349 struct ecore_ptt *p_ptt) 1350 { 1351 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1352 1353 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1354 1355 sb_info->index = 0; 1356 sb_info->known_attn = 0; 1357 1358 /* Configure Attention Status Block in IGU */ 1359 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1360 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1361 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1362 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1363 } 1364 1365 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1366 struct ecore_ptt *p_ptt, 1367 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1368 { 1369 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1370 int i, j, k; 1371 1372 sb_info->sb_attn = sb_virt_addr; 1373 sb_info->sb_phys = sb_phy_addr; 1374 1375 /* Set the pointer to the AEU descriptors */ 1376 sb_info->p_aeu_desc = aeu_descs; 1377 1378 /* Calculate Parity Masks */ 1379 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1380 for (i = 0; i < NUM_ATTN_REGS; i++) { 1381 /* j is array index, k is bit index */ 1382 for (j = 0, k = 0; k < 32; j++) { 1383 struct aeu_invert_reg_bit *p_aeu; 1384 1385 p_aeu = &aeu_descs[i].bits[j]; 1386 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1387 sb_info->parity_mask[i] |= 1 << k; 1388 1389 k += ATTENTION_LENGTH(p_aeu->flags); 1390 } 1391 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1392 "Attn Mask [Reg %d]: 0x%08x\n", 1393 i, sb_info->parity_mask[i]); 1394 } 1395 1396 /* Set the address of cleanup for the mcp attention */ 1397 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1398 MISC_REG_AEU_GENERAL_ATTN_0; 1399 1400 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1401 } 1402 1403 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1404 struct ecore_ptt *p_ptt) 1405 { 1406 struct ecore_dev *p_dev = p_hwfn->p_dev; 1407 struct ecore_sb_attn_info *p_sb; 1408 dma_addr_t p_phys = 0; 1409 void *p_virt; 1410 1411 /* SB struct */ 1412 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1413 if (!p_sb) { 1414 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); 1415 return ECORE_NOMEM; 1416 } 1417 1418 /* SB ring */ 1419 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1420 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1421 if (!p_virt) { 1422 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); 1423 OSAL_FREE(p_dev, p_sb); 1424 return ECORE_NOMEM; 1425 } 1426 1427 /* Attention setup */ 1428 p_hwfn->p_sb_attn = p_sb; 1429 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1430 1431 return ECORE_SUCCESS; 1432 } 1433 1434 /* coalescing timeout = timeset << (timer_res + 1) */ 1435 #define ECORE_CAU_DEF_RX_USECS 24 1436 #define ECORE_CAU_DEF_TX_USECS 48 1437 1438 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1439 struct cau_sb_entry *p_sb_entry, 1440 u8 pf_id, u16 vf_number, u8 vf_valid) 1441 { 1442 struct ecore_dev *p_dev = p_hwfn->p_dev; 1443 u32 cau_state; 1444 u8 timer_res; 1445 1446 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1447 1448 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1449 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1450 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1451 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1452 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1453 1454 cau_state = CAU_HC_DISABLE_STATE; 1455 1456 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1457 cau_state = CAU_HC_ENABLE_STATE; 1458 if (!p_dev->rx_coalesce_usecs) 1459 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1460 if (!p_dev->tx_coalesce_usecs) 1461 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1462 } 1463 1464 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1465 if (p_dev->rx_coalesce_usecs <= 0x7F) 1466 timer_res = 0; 1467 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1468 timer_res = 1; 1469 else 1470 timer_res = 2; 1471 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1472 1473 if (p_dev->tx_coalesce_usecs <= 0x7F) 1474 timer_res = 0; 1475 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1476 timer_res = 1; 1477 else 1478 timer_res = 2; 1479 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1480 1481 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1482 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1483 } 1484 1485 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1486 struct ecore_ptt *p_ptt, 1487 u16 igu_sb_id, u32 pi_index, 1488 enum ecore_coalescing_fsm coalescing_fsm, 1489 u8 timeset) 1490 { 1491 struct cau_pi_entry pi_entry; 1492 u32 sb_offset, pi_offset; 1493 1494 if (IS_VF(p_hwfn->p_dev)) 1495 return;/* @@@TBD MichalK- VF CAU... */ 1496 1497 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1498 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1499 1500 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1501 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1502 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1503 else 1504 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1505 1506 pi_offset = sb_offset + pi_index; 1507 if (p_hwfn->hw_init_done) { 1508 ecore_wr(p_hwfn, p_ptt, 1509 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1510 *((u32 *)&(pi_entry))); 1511 } else { 1512 STORE_RT_REG(p_hwfn, 1513 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1514 *((u32 *)&(pi_entry))); 1515 } 1516 } 1517 1518 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1519 struct ecore_ptt *p_ptt, 1520 struct ecore_sb_info *p_sb, u32 pi_index, 1521 enum ecore_coalescing_fsm coalescing_fsm, 1522 u8 timeset) 1523 { 1524 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1525 pi_index, coalescing_fsm, timeset); 1526 } 1527 1528 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1529 struct ecore_ptt *p_ptt, 1530 dma_addr_t sb_phys, u16 igu_sb_id, 1531 u16 vf_number, u8 vf_valid) 1532 { 1533 struct cau_sb_entry sb_entry; 1534 1535 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1536 vf_number, vf_valid); 1537 1538 if (p_hwfn->hw_init_done) { 1539 /* Wide-bus, initialize via DMAE */ 1540 u64 phys_addr = (u64)sb_phys; 1541 1542 ecore_dmae_host2grc(p_hwfn, p_ptt, 1543 (u64)(osal_uintptr_t)&phys_addr, 1544 CAU_REG_SB_ADDR_MEMORY + 1545 igu_sb_id * sizeof(u64), 2, 0); 1546 ecore_dmae_host2grc(p_hwfn, p_ptt, 1547 (u64)(osal_uintptr_t)&sb_entry, 1548 CAU_REG_SB_VAR_MEMORY + 1549 igu_sb_id * sizeof(u64), 2, 0); 1550 } else { 1551 /* Initialize Status Block Address */ 1552 STORE_RT_REG_AGG(p_hwfn, 1553 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1554 igu_sb_id * 2, sb_phys); 1555 1556 STORE_RT_REG_AGG(p_hwfn, 1557 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1558 igu_sb_id * 2, sb_entry); 1559 } 1560 1561 /* Configure pi coalescing if set */ 1562 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1563 /* eth will open queues for all tcs, so configure all of them 1564 * properly, rather than just the active ones 1565 */ 1566 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1567 1568 u8 timeset, timer_res; 1569 u8 i; 1570 1571 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1572 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1573 timer_res = 0; 1574 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1575 timer_res = 1; 1576 else 1577 timer_res = 2; 1578 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1579 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1580 ECORE_COAL_RX_STATE_MACHINE, 1581 timeset); 1582 1583 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1584 timer_res = 0; 1585 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1586 timer_res = 1; 1587 else 1588 timer_res = 2; 1589 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1590 for (i = 0; i < num_tc; i++) { 1591 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1592 igu_sb_id, TX_PI(i), 1593 ECORE_COAL_TX_STATE_MACHINE, 1594 timeset); 1595 } 1596 } 1597 } 1598 1599 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1600 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) 1601 { 1602 /* zero status block and ack counter */ 1603 sb_info->sb_ack = 0; 1604 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1605 1606 if (IS_PF(p_hwfn->p_dev)) 1607 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1608 sb_info->igu_sb_id, 0, 0); 1609 } 1610 1611 struct ecore_igu_block * 1612 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1613 { 1614 struct ecore_igu_block *p_block; 1615 u16 igu_id; 1616 1617 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1618 igu_id++) { 1619 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1620 1621 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1622 !(p_block->status & ECORE_IGU_STATUS_FREE)) 1623 continue; 1624 1625 if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1626 b_is_pf) 1627 return p_block; 1628 } 1629 1630 return OSAL_NULL; 1631 } 1632 1633 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1634 u16 vector_id) 1635 { 1636 struct ecore_igu_block *p_block; 1637 u16 igu_id; 1638 1639 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1640 igu_id++) { 1641 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1642 1643 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1644 !p_block->is_pf || 1645 p_block->vector_number != vector_id) 1646 continue; 1647 1648 return igu_id; 1649 } 1650 1651 return ECORE_SB_INVALID_IDX; 1652 } 1653 1654 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1655 { 1656 u16 igu_sb_id; 1657 1658 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1659 if (sb_id == ECORE_SP_SB_ID) 1660 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1661 else if (IS_PF(p_hwfn->p_dev)) 1662 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1663 else 1664 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1665 1666 if (igu_sb_id == ECORE_SB_INVALID_IDX) 1667 DP_NOTICE(p_hwfn, true, 1668 "Slowpath SB vector %04x doesn't exist\n", 1669 sb_id); 1670 else if (sb_id == ECORE_SP_SB_ID) 1671 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1672 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1673 else 1674 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1675 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1676 1677 return igu_sb_id; 1678 } 1679 1680 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1681 struct ecore_ptt *p_ptt, 1682 struct ecore_sb_info *sb_info, 1683 void *sb_virt_addr, 1684 dma_addr_t sb_phy_addr, u16 sb_id) 1685 { 1686 sb_info->sb_virt = sb_virt_addr; 1687 sb_info->sb_phys = sb_phy_addr; 1688 1689 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1690 1691 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1692 return ECORE_INVAL; 1693 1694 /* Let the igu info reference the client's SB info */ 1695 if (sb_id != ECORE_SP_SB_ID) { 1696 if (IS_PF(p_hwfn->p_dev)) { 1697 struct ecore_igu_info *p_info; 1698 struct ecore_igu_block *p_block; 1699 1700 p_info = p_hwfn->hw_info.p_igu_info; 1701 p_block = &p_info->entry[sb_info->igu_sb_id]; 1702 1703 p_block->sb_info = sb_info; 1704 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1705 p_info->usage.free_cnt--; 1706 } else { 1707 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1708 } 1709 } 1710 #ifdef ECORE_CONFIG_DIRECT_HWFN 1711 sb_info->p_hwfn = p_hwfn; 1712 #endif 1713 sb_info->p_dev = p_hwfn->p_dev; 1714 1715 /* The igu address will hold the absolute address that needs to be 1716 * written to for a specific status block 1717 */ 1718 if (IS_PF(p_hwfn->p_dev)) { 1719 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + 1720 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); 1721 1722 } else { 1723 sb_info->igu_addr = 1724 (u8 OSAL_IOMEM *)p_hwfn->regview + 1725 PXP_VF_BAR0_START_IGU + 1726 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1727 } 1728 1729 sb_info->flags |= ECORE_SB_INFO_INIT; 1730 1731 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1732 1733 return ECORE_SUCCESS; 1734 } 1735 1736 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1737 struct ecore_sb_info *sb_info, 1738 u16 sb_id) 1739 { 1740 struct ecore_igu_info *p_info; 1741 struct ecore_igu_block *p_block; 1742 1743 if (sb_info == OSAL_NULL) 1744 return ECORE_SUCCESS; 1745 1746 /* zero status block and ack counter */ 1747 sb_info->sb_ack = 0; 1748 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1749 1750 if (IS_VF(p_hwfn->p_dev)) { 1751 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1752 return ECORE_SUCCESS; 1753 } 1754 1755 p_info = p_hwfn->hw_info.p_igu_info; 1756 p_block = &p_info->entry[sb_info->igu_sb_id]; 1757 1758 /* Vector 0 is reserved to Default SB */ 1759 if (p_block->vector_number == 0) { 1760 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1761 return ECORE_INVAL; 1762 } 1763 1764 /* Lose reference to client's SB info, and fix counters */ 1765 p_block->sb_info = OSAL_NULL; 1766 p_block->status |= ECORE_IGU_STATUS_FREE; 1767 p_info->usage.free_cnt++; 1768 1769 return ECORE_SUCCESS; 1770 } 1771 1772 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1773 { 1774 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1775 1776 if (!p_sb) 1777 return; 1778 1779 if (p_sb->sb_info.sb_virt) { 1780 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1781 p_sb->sb_info.sb_virt, 1782 p_sb->sb_info.sb_phys, 1783 SB_ALIGNED_SIZE(p_hwfn)); 1784 } 1785 1786 OSAL_FREE(p_hwfn->p_dev, p_sb); 1787 } 1788 1789 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1790 struct ecore_ptt *p_ptt) 1791 { 1792 struct ecore_sb_sp_info *p_sb; 1793 dma_addr_t p_phys = 0; 1794 void *p_virt; 1795 1796 /* SB struct */ 1797 p_sb = 1798 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, 1799 sizeof(*p_sb)); 1800 if (!p_sb) { 1801 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); 1802 return ECORE_NOMEM; 1803 } 1804 1805 /* SB ring */ 1806 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1807 &p_phys, SB_ALIGNED_SIZE(p_hwfn)); 1808 if (!p_virt) { 1809 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); 1810 OSAL_FREE(p_hwfn->p_dev, p_sb); 1811 return ECORE_NOMEM; 1812 } 1813 1814 /* Status Block setup */ 1815 p_hwfn->p_sp_sb = p_sb; 1816 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1817 p_virt, p_phys, ECORE_SP_SB_ID); 1818 1819 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1820 1821 return ECORE_SUCCESS; 1822 } 1823 1824 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1825 ecore_int_comp_cb_t comp_cb, 1826 void *cookie, 1827 u8 *sb_idx, __le16 **p_fw_cons) 1828 { 1829 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1830 enum _ecore_status_t rc = ECORE_NOMEM; 1831 u8 pi; 1832 1833 /* Look for a free index */ 1834 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1835 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1836 continue; 1837 1838 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1839 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1840 *sb_idx = pi; 1841 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1842 rc = ECORE_SUCCESS; 1843 break; 1844 } 1845 1846 return rc; 1847 } 1848 1849 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) 1850 { 1851 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1852 1853 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1854 return ECORE_NOMEM; 1855 1856 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1857 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1858 return ECORE_SUCCESS; 1859 } 1860 1861 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1862 { 1863 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1864 } 1865 1866 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1867 struct ecore_ptt *p_ptt, 1868 enum ecore_int_mode int_mode) 1869 { 1870 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1871 1872 #ifndef ASIC_ONLY 1873 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1874 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1875 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1876 } 1877 #endif 1878 1879 p_hwfn->p_dev->int_mode = int_mode; 1880 switch (p_hwfn->p_dev->int_mode) { 1881 case ECORE_INT_MODE_INTA: 1882 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1883 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1884 break; 1885 1886 case ECORE_INT_MODE_MSI: 1887 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1888 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1889 break; 1890 1891 case ECORE_INT_MODE_MSIX: 1892 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1893 break; 1894 case ECORE_INT_MODE_POLL: 1895 break; 1896 } 1897 1898 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1899 } 1900 1901 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1902 struct ecore_ptt *p_ptt) 1903 { 1904 #ifndef ASIC_ONLY 1905 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1906 DP_INFO(p_hwfn, 1907 "FPGA - Don't enable Attentions in IGU and MISC\n"); 1908 return; 1909 } 1910 #endif 1911 1912 /* Configure AEU signal change to produce attentions */ 1913 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1914 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1915 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1916 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1917 1918 /* Flush the writes to IGU */ 1919 OSAL_MMIOWB(p_hwfn->p_dev); 1920 1921 /* Unmask AEU signals toward IGU */ 1922 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1923 } 1924 1925 enum _ecore_status_t 1926 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1927 enum ecore_int_mode int_mode) 1928 { 1929 enum _ecore_status_t rc = ECORE_SUCCESS; 1930 1931 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1932 1933 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1934 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1935 if (rc != ECORE_SUCCESS) { 1936 DP_NOTICE(p_hwfn, true, 1937 "Slowpath IRQ request failed\n"); 1938 return ECORE_NORESOURCES; 1939 } 1940 p_hwfn->b_int_requested = true; 1941 } 1942 1943 /* Enable interrupt Generation */ 1944 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1945 1946 p_hwfn->b_int_enabled = 1; 1947 1948 return rc; 1949 } 1950 1951 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1952 struct ecore_ptt *p_ptt) 1953 { 1954 p_hwfn->b_int_enabled = 0; 1955 1956 if (IS_VF(p_hwfn->p_dev)) 1957 return; 1958 1959 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1960 } 1961 1962 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1963 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1964 struct ecore_ptt *p_ptt, 1965 u32 igu_sb_id, 1966 bool cleanup_set, 1967 u16 opaque_fid) 1968 { 1969 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1970 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1971 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1972 u8 type = 0; /* FIXME MichalS type??? */ 1973 1974 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1975 IGU_REG_CLEANUP_STATUS_0) != 0x200); 1976 1977 /* USE Control Command Register to perform cleanup. There is an 1978 * option to do this using IGU bar, but then it can't be used for VFs. 1979 */ 1980 1981 /* Set the data field */ 1982 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1983 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 1984 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1985 1986 /* Set the control register */ 1987 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1988 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1989 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1990 1991 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1992 1993 OSAL_BARRIER(p_hwfn->p_dev); 1994 1995 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1996 1997 /* Flush the write to IGU */ 1998 OSAL_MMIOWB(p_hwfn->p_dev); 1999 2000 /* calculate where to read the status bit from */ 2001 sb_bit = 1 << (igu_sb_id % 32); 2002 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 2003 2004 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2005 2006 /* Now wait for the command to complete */ 2007 while (--sleep_cnt) { 2008 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2009 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2010 break; 2011 OSAL_MSLEEP(5); 2012 } 2013 2014 if (!sleep_cnt) 2015 DP_NOTICE(p_hwfn, true, 2016 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2017 val, igu_sb_id); 2018 } 2019 2020 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2021 struct ecore_ptt *p_ptt, 2022 u16 igu_sb_id, u16 opaque, bool b_set) 2023 { 2024 struct ecore_igu_block *p_block; 2025 int pi, i; 2026 2027 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2028 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2029 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2030 igu_sb_id, p_block->function_id, p_block->is_pf, 2031 p_block->vector_number); 2032 2033 /* Set */ 2034 if (b_set) 2035 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2036 2037 /* Clear */ 2038 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2039 2040 /* Wait for the IGU SB to cleanup */ 2041 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2042 u32 val; 2043 2044 val = ecore_rd(p_hwfn, p_ptt, 2045 IGU_REG_WRITE_DONE_PENDING + 2046 ((igu_sb_id / 32) * 4)); 2047 if (val & (1 << (igu_sb_id % 32))) 2048 OSAL_UDELAY(10); 2049 else 2050 break; 2051 } 2052 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2053 DP_NOTICE(p_hwfn, true, 2054 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2055 igu_sb_id); 2056 2057 /* Clear the CAU for the SB */ 2058 for (pi = 0; pi < 12; pi++) 2059 ecore_wr(p_hwfn, p_ptt, 2060 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2061 } 2062 2063 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2064 struct ecore_ptt *p_ptt, 2065 bool b_set, bool b_slowpath) 2066 { 2067 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2068 struct ecore_igu_block *p_block; 2069 u16 igu_sb_id = 0; 2070 u32 val = 0; 2071 2072 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2073 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2074 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2075 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2076 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2077 /* end temporary */ 2078 2079 for (igu_sb_id = 0; 2080 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2081 igu_sb_id++) { 2082 p_block = &p_info->entry[igu_sb_id]; 2083 2084 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2085 !p_block->is_pf || 2086 (p_block->status & ECORE_IGU_STATUS_DSB)) 2087 continue; 2088 2089 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2090 p_hwfn->hw_info.opaque_fid, 2091 b_set); 2092 } 2093 2094 if (b_slowpath) 2095 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2096 p_info->igu_dsb_id, 2097 p_hwfn->hw_info.opaque_fid, 2098 b_set); 2099 } 2100 2101 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2102 struct ecore_ptt *p_ptt) 2103 { 2104 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2105 struct ecore_igu_block *p_block; 2106 int pf_sbs, vf_sbs; 2107 u16 igu_sb_id; 2108 u32 val, rval; 2109 2110 if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2111 /* We're using an old MFW - have to prevent any switching 2112 * of SBs between PF and VFs as later driver wouldn't be 2113 * able to tell which belongs to which. 2114 */ 2115 p_info->b_allow_pf_vf_change = false; 2116 } else { 2117 /* Use the numbers the MFW have provided - 2118 * don't forget MFW accounts for the default SB as well. 2119 */ 2120 p_info->b_allow_pf_vf_change = true; 2121 2122 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2123 DP_INFO(p_hwfn, 2124 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2125 RESC_NUM(p_hwfn, ECORE_SB) - 1, 2126 p_info->usage.cnt); 2127 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2128 } 2129 2130 /* TODO - how do we learn about VF SBs from MFW? */ 2131 if (IS_PF_SRIOV(p_hwfn)) { 2132 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2133 2134 if (vfs != p_info->usage.iov_cnt) 2135 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2136 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2137 p_info->usage.iov_cnt, vfs); 2138 2139 /* At this point we know how many SBs we have totally 2140 * in IGU + number of PF SBs. So we can validate that 2141 * we'd have sufficient for VF. 2142 */ 2143 if (vfs > p_info->usage.free_cnt + 2144 p_info->usage.free_cnt_iov - 2145 p_info->usage.cnt) { 2146 DP_NOTICE(p_hwfn, true, 2147 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2148 p_info->usage.free_cnt + 2149 p_info->usage.free_cnt_iov, 2150 p_info->usage.cnt, vfs); 2151 return ECORE_INVAL; 2152 } 2153 } 2154 } 2155 2156 /* Cap the number of VFs SBs by the number of VFs */ 2157 if (IS_PF_SRIOV(p_hwfn)) 2158 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; 2159 2160 /* Mark all SBs as free, now in the right PF/VFs division */ 2161 p_info->usage.free_cnt = p_info->usage.cnt; 2162 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2163 p_info->usage.orig = p_info->usage.cnt; 2164 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2165 2166 /* We now proceed to re-configure the IGU cam to reflect the initial 2167 * configuration. We can start with the Default SB. 2168 */ 2169 pf_sbs = p_info->usage.cnt; 2170 vf_sbs = p_info->usage.iov_cnt; 2171 2172 for (igu_sb_id = p_info->igu_dsb_id; 2173 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2174 igu_sb_id++) { 2175 p_block = &p_info->entry[igu_sb_id]; 2176 val = 0; 2177 2178 if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2179 continue; 2180 2181 if (p_block->status & ECORE_IGU_STATUS_DSB) { 2182 p_block->function_id = p_hwfn->rel_pf_id; 2183 p_block->is_pf = 1; 2184 p_block->vector_number = 0; 2185 p_block->status = ECORE_IGU_STATUS_VALID | 2186 ECORE_IGU_STATUS_PF | 2187 ECORE_IGU_STATUS_DSB; 2188 } else if (pf_sbs) { 2189 pf_sbs--; 2190 p_block->function_id = p_hwfn->rel_pf_id; 2191 p_block->is_pf = 1; 2192 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2193 p_block->status = ECORE_IGU_STATUS_VALID | 2194 ECORE_IGU_STATUS_PF | 2195 ECORE_IGU_STATUS_FREE; 2196 } else if (vf_sbs) { 2197 p_block->function_id = 2198 p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2199 p_info->usage.iov_cnt - vf_sbs; 2200 p_block->is_pf = 0; 2201 p_block->vector_number = 0; 2202 p_block->status = ECORE_IGU_STATUS_VALID | 2203 ECORE_IGU_STATUS_FREE; 2204 vf_sbs--; 2205 } else { 2206 p_block->function_id = 0; 2207 p_block->is_pf = 0; 2208 p_block->vector_number = 0; 2209 } 2210 2211 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2212 p_block->function_id); 2213 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2214 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2215 p_block->vector_number); 2216 2217 /* VF entries would be enabled when VF is initializaed */ 2218 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2219 2220 rval = ecore_rd(p_hwfn, p_ptt, 2221 IGU_REG_MAPPING_MEMORY + 2222 sizeof(u32) * igu_sb_id); 2223 2224 if (rval != val) { 2225 ecore_wr(p_hwfn, p_ptt, 2226 IGU_REG_MAPPING_MEMORY + 2227 sizeof(u32) * igu_sb_id, 2228 val); 2229 2230 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2231 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2232 igu_sb_id, p_block->function_id, 2233 p_block->is_pf, p_block->vector_number, 2234 rval, val); 2235 } 2236 } 2237 2238 return 0; 2239 } 2240 2241 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2242 struct ecore_ptt *p_ptt) 2243 { 2244 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2245 2246 /* Return all the usage indications to default prior to the reset; 2247 * The reset expects the !orig to reflect the initial status of the 2248 * SBs, and would re-calculate the originals based on those. 2249 */ 2250 p_cnt->cnt = p_cnt->orig; 2251 p_cnt->free_cnt = p_cnt->orig; 2252 p_cnt->iov_cnt = p_cnt->iov_orig; 2253 p_cnt->free_cnt_iov = p_cnt->iov_orig; 2254 p_cnt->orig = 0; 2255 p_cnt->iov_orig = 0; 2256 2257 /* TODO - we probably need to re-configure the CAU as well... */ 2258 return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2259 } 2260 2261 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2262 struct ecore_ptt *p_ptt, 2263 u16 igu_sb_id) 2264 { 2265 u32 val = ecore_rd(p_hwfn, p_ptt, 2266 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2267 struct ecore_igu_block *p_block; 2268 2269 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2270 2271 /* Fill the block information */ 2272 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2273 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2274 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2275 2276 p_block->igu_sb_id = igu_sb_id; 2277 } 2278 2279 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2280 struct ecore_ptt *p_ptt) 2281 { 2282 struct ecore_igu_info *p_igu_info; 2283 struct ecore_igu_block *p_block; 2284 u32 min_vf = 0, max_vf = 0; 2285 u16 igu_sb_id; 2286 2287 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2288 GFP_KERNEL, 2289 sizeof(*p_igu_info)); 2290 if (!p_hwfn->hw_info.p_igu_info) 2291 return ECORE_NOMEM; 2292 p_igu_info = p_hwfn->hw_info.p_igu_info; 2293 2294 /* Distinguish between existent and onn-existent default SB */ 2295 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2296 2297 /* Find the range of VF ids whose SB belong to this PF */ 2298 if (p_hwfn->p_dev->p_iov_info) { 2299 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2300 2301 min_vf = p_iov->first_vf_in_pf; 2302 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2303 } 2304 2305 for (igu_sb_id = 0; 2306 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2307 igu_sb_id++) { 2308 /* Read current entry; Notice it might not belong to this PF */ 2309 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2310 p_block = &p_igu_info->entry[igu_sb_id]; 2311 2312 if ((p_block->is_pf) && 2313 (p_block->function_id == p_hwfn->rel_pf_id)) { 2314 p_block->status = ECORE_IGU_STATUS_PF | 2315 ECORE_IGU_STATUS_VALID | 2316 ECORE_IGU_STATUS_FREE; 2317 2318 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2319 p_igu_info->usage.cnt++; 2320 } else if (!(p_block->is_pf) && 2321 (p_block->function_id >= min_vf) && 2322 (p_block->function_id < max_vf)) { 2323 /* Available for VFs of this PF */ 2324 p_block->status = ECORE_IGU_STATUS_VALID | 2325 ECORE_IGU_STATUS_FREE; 2326 2327 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2328 p_igu_info->usage.iov_cnt++; 2329 } 2330 2331 /* Mark the First entry belonging to the PF or its VFs 2332 * as the default SB [we'll reset IGU prior to first usage]. 2333 */ 2334 if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2335 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2336 p_igu_info->igu_dsb_id = igu_sb_id; 2337 p_block->status |= ECORE_IGU_STATUS_DSB; 2338 } 2339 2340 /* While this isn't suitable for all clients, limit number 2341 * of prints by having each PF print only its entries with the 2342 * exception of PF0 which would print everything. 2343 */ 2344 if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2345 (p_hwfn->abs_pf_id == 0)) 2346 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2347 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2348 igu_sb_id, p_block->function_id, 2349 p_block->is_pf, p_block->vector_number); 2350 } 2351 2352 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2353 DP_NOTICE(p_hwfn, true, 2354 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2355 p_igu_info->igu_dsb_id); 2356 return ECORE_INVAL; 2357 } 2358 2359 /* All non default SB are considered free at this point */ 2360 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2361 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2362 2363 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2364 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2365 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2366 p_igu_info->usage.iov_cnt); 2367 2368 return ECORE_SUCCESS; 2369 } 2370 2371 enum _ecore_status_t 2372 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2373 u16 sb_id, bool b_to_vf) 2374 { 2375 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2376 struct ecore_igu_block *p_block = OSAL_NULL; 2377 u16 igu_sb_id = 0, vf_num = 0; 2378 u32 val = 0; 2379 2380 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2381 return ECORE_INVAL; 2382 2383 if (sb_id == ECORE_SP_SB_ID) 2384 return ECORE_INVAL; 2385 2386 if (!p_info->b_allow_pf_vf_change) { 2387 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2388 return ECORE_INVAL; 2389 } 2390 2391 /* If we're moving a SB from PF to VF, the client had to specify 2392 * which vector it wants to move. 2393 */ 2394 if (b_to_vf) { 2395 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2396 if (igu_sb_id == ECORE_SB_INVALID_IDX) 2397 return ECORE_INVAL; 2398 } 2399 2400 /* If we're moving a SB from VF to PF, need to validate there isn't 2401 * already a line configured for that vector. 2402 */ 2403 if (!b_to_vf) { 2404 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2405 ECORE_SB_INVALID_IDX) 2406 return ECORE_INVAL; 2407 } 2408 2409 /* We need to validate that the SB can actually be relocated. 2410 * This would also handle the previous case where we've explicitly 2411 * stated which IGU SB needs to move. 2412 */ 2413 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2414 igu_sb_id++) { 2415 p_block = &p_info->entry[igu_sb_id]; 2416 2417 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2418 !(p_block->status & ECORE_IGU_STATUS_FREE) || 2419 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2420 if (b_to_vf) 2421 return ECORE_INVAL; 2422 else 2423 continue; 2424 } 2425 2426 break; 2427 } 2428 2429 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2430 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2431 "Failed to find a free SB to move\n"); 2432 return ECORE_INVAL; 2433 } 2434 2435 /* At this point, p_block points to the SB we want to relocate */ 2436 if (b_to_vf) { 2437 p_block->status &= ~ECORE_IGU_STATUS_PF; 2438 2439 /* It doesn't matter which VF number we choose, since we're 2440 * going to disable the line; But let's keep it in range. 2441 */ 2442 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2443 2444 p_block->function_id = (u8)vf_num; 2445 p_block->is_pf = 0; 2446 p_block->vector_number = 0; 2447 2448 p_info->usage.cnt--; 2449 p_info->usage.free_cnt--; 2450 p_info->usage.iov_cnt++; 2451 p_info->usage.free_cnt_iov++; 2452 2453 /* TODO - if SBs aren't really the limiting factor, 2454 * then it might not be accurate [in the since that 2455 * we might not need decrement the feature]. 2456 */ 2457 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2458 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2459 } else { 2460 p_block->status |= ECORE_IGU_STATUS_PF; 2461 p_block->function_id = p_hwfn->rel_pf_id; 2462 p_block->is_pf = 1; 2463 p_block->vector_number = sb_id + 1; 2464 2465 p_info->usage.cnt++; 2466 p_info->usage.free_cnt++; 2467 p_info->usage.iov_cnt--; 2468 p_info->usage.free_cnt_iov--; 2469 2470 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2471 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2472 } 2473 2474 /* Update the IGU and CAU with the new configuration */ 2475 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2476 p_block->function_id); 2477 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2478 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2479 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2480 p_block->vector_number); 2481 2482 ecore_wr(p_hwfn, p_ptt, 2483 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2484 val); 2485 2486 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2487 igu_sb_id, vf_num, 2488 p_block->is_pf ? 0 : 1); 2489 2490 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2491 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2492 igu_sb_id, p_block->function_id, 2493 p_block->is_pf, p_block->vector_number); 2494 2495 return ECORE_SUCCESS; 2496 } 2497 2498 /** 2499 * @brief Initialize igu runtime registers 2500 * 2501 * @param p_hwfn 2502 */ 2503 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2504 { 2505 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2506 2507 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2508 } 2509 2510 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2511 IGU_CMD_INT_ACK_BASE) 2512 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2513 IGU_CMD_INT_ACK_BASE) 2514 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2515 { 2516 u32 intr_status_hi = 0, intr_status_lo = 0; 2517 u64 intr_status = 0; 2518 2519 intr_status_lo = REG_RD(p_hwfn, 2520 GTT_BAR0_MAP_REG_IGU_CMD + 2521 LSB_IGU_CMD_ADDR * 8); 2522 intr_status_hi = REG_RD(p_hwfn, 2523 GTT_BAR0_MAP_REG_IGU_CMD + 2524 MSB_IGU_CMD_ADDR * 8); 2525 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2526 2527 return intr_status; 2528 } 2529 2530 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2531 { 2532 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2533 p_hwfn->b_sp_dpc_enabled = true; 2534 } 2535 2536 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2537 { 2538 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2539 if (!p_hwfn->sp_dpc) 2540 return ECORE_NOMEM; 2541 2542 return ECORE_SUCCESS; 2543 } 2544 2545 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2546 { 2547 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2548 } 2549 2550 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2551 struct ecore_ptt *p_ptt) 2552 { 2553 enum _ecore_status_t rc = ECORE_SUCCESS; 2554 2555 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2556 if (rc != ECORE_SUCCESS) { 2557 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2558 return rc; 2559 } 2560 2561 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2562 if (rc != ECORE_SUCCESS) { 2563 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2564 return rc; 2565 } 2566 2567 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2568 if (rc != ECORE_SUCCESS) 2569 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2570 2571 return rc; 2572 } 2573 2574 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2575 { 2576 ecore_int_sp_sb_free(p_hwfn); 2577 ecore_int_sb_attn_free(p_hwfn); 2578 ecore_int_sp_dpc_free(p_hwfn); 2579 } 2580 2581 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2582 { 2583 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2584 return; 2585 2586 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2587 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2588 ecore_int_sp_dpc_setup(p_hwfn); 2589 } 2590 2591 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2592 struct ecore_sb_cnt_info *p_sb_cnt_info) 2593 { 2594 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2595 2596 if (!p_igu_info || !p_sb_cnt_info) 2597 return; 2598 2599 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2600 sizeof(*p_sb_cnt_info)); 2601 } 2602 2603 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2604 { 2605 int i; 2606 2607 for_each_hwfn(p_dev, i) 2608 p_dev->hwfns[i].b_int_requested = false; 2609 } 2610 2611 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2612 { 2613 p_dev->attn_clr_en = clr_enable; 2614 } 2615 2616 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2617 struct ecore_ptt *p_ptt, 2618 u8 timer_res, u16 sb_id, bool tx) 2619 { 2620 struct cau_sb_entry sb_entry; 2621 enum _ecore_status_t rc; 2622 2623 if (!p_hwfn->hw_init_done) { 2624 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2625 return ECORE_INVAL; 2626 } 2627 2628 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2629 sb_id * sizeof(u64), 2630 (u64)(osal_uintptr_t)&sb_entry, 2, 0); 2631 if (rc != ECORE_SUCCESS) { 2632 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2633 return rc; 2634 } 2635 2636 if (tx) 2637 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2638 else 2639 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2640 2641 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2642 (u64)(osal_uintptr_t)&sb_entry, 2643 CAU_REG_SB_VAR_MEMORY + 2644 sb_id * sizeof(u64), 2, 0); 2645 if (rc != ECORE_SUCCESS) { 2646 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2647 return rc; 2648 } 2649 2650 return rc; 2651 } 2652 2653 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2654 struct ecore_ptt *p_ptt, 2655 struct ecore_sb_info *p_sb, 2656 struct ecore_sb_info_dbg *p_info) 2657 { 2658 u16 sbid = p_sb->igu_sb_id; 2659 int i; 2660 2661 if (IS_VF(p_hwfn->p_dev)) 2662 return ECORE_INVAL; 2663 2664 if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) 2665 return ECORE_INVAL; 2666 2667 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2668 IGU_REG_PRODUCER_MEMORY + sbid * 4); 2669 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2670 IGU_REG_CONSUMER_MEM + sbid * 4); 2671 2672 for (i = 0; i < PIS_PER_SB_E4; i++) 2673 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2674 CAU_REG_PI_MEMORY + 2675 sbid * 4 * PIS_PER_SB_E4 + 2676 i * 4); 2677 2678 return ECORE_SUCCESS; 2679 } 2680