1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_spq.h" 12 #include "reg_addr.h" 13 #include "ecore_gtt_reg_addr.h" 14 #include "ecore_init_ops.h" 15 #include "ecore_rt_defs.h" 16 #include "ecore_int.h" 17 #include "reg_addr.h" 18 #include "ecore_hw.h" 19 #include "ecore_sriov.h" 20 #include "ecore_vf.h" 21 #include "ecore_hw_defs.h" 22 #include "ecore_hsi_common.h" 23 #include "ecore_mcp.h" 24 25 struct ecore_pi_info { 26 ecore_int_comp_cb_t comp_cb; 27 void *cookie; /* Will be sent to the compl cb function */ 28 }; 29 30 struct ecore_sb_sp_info { 31 struct ecore_sb_info sb_info; 32 /* per protocol index data */ 33 struct ecore_pi_info pi_info_arr[PIS_PER_SB]; 34 }; 35 36 enum ecore_attention_type { 37 ECORE_ATTN_TYPE_ATTN, 38 ECORE_ATTN_TYPE_PARITY, 39 }; 40 41 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 42 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 43 44 struct aeu_invert_reg_bit { 45 char bit_name[30]; 46 47 #define ATTENTION_PARITY (1 << 0) 48 49 #define ATTENTION_LENGTH_MASK (0x00000ff0) 50 #define ATTENTION_LENGTH_SHIFT (4) 51 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 52 ATTENTION_LENGTH_SHIFT) 53 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 54 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 55 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 56 ATTENTION_PARITY) 57 58 /* Multiple bits start with this offset */ 59 #define ATTENTION_OFFSET_MASK (0x000ff000) 60 #define ATTENTION_OFFSET_SHIFT (12) 61 62 #define ATTENTION_CLEAR_ENABLE (1 << 28) 63 unsigned int flags; 64 65 /* Callback to call if attention will be triggered */ 66 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 67 68 enum block_id block_index; 69 }; 70 71 struct aeu_invert_reg { 72 struct aeu_invert_reg_bit bits[32]; 73 }; 74 75 #define MAX_ATTN_GRPS (8) 76 #define NUM_ATTN_REGS (9) 77 78 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 79 { 80 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 81 82 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); 83 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); 84 85 return ECORE_SUCCESS; 86 } 87 88 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 89 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 90 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 91 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 92 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 93 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 94 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 95 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 96 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 97 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 98 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 99 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 100 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 101 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 102 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 112 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 113 { 114 u32 tmp = 115 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 116 PSWHST_REG_VF_DISABLED_ERROR_VALID); 117 118 /* Disabled VF access */ 119 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 120 u32 addr, data; 121 122 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 123 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 124 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 125 PSWHST_REG_VF_DISABLED_ERROR_DATA); 126 DP_INFO(p_hwfn->p_dev, 127 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" 128 " Write [0x%02x] Addr [0x%08x]\n", 129 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) 130 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 131 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) 132 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 133 (u8)((data & 134 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 135 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 136 (u8)((data & 137 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 138 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 139 (u8)((data & 140 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 141 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 142 addr); 143 } 144 145 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 146 PSWHST_REG_INCORRECT_ACCESS_VALID); 147 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 148 u32 addr, data, length; 149 150 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 151 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 152 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 153 PSWHST_REG_INCORRECT_ACCESS_DATA); 154 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 155 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 156 157 DP_INFO(p_hwfn->p_dev, 158 "Incorrect access to %08x of length %08x - PF [%02x]" 159 " VF [%04x] [valid %02x] client [%02x] write [%02x]" 160 " Byte-Enable [%04x] [%08x]\n", 161 addr, length, 162 (u8)((data & 163 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 164 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 165 (u8)((data & 166 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 167 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 168 (u8)((data & 169 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 170 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 171 (u8)((data & 172 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 173 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 174 (u8)((data & 175 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 176 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 177 (u8)((data & 178 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 179 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 180 data); 181 } 182 183 /* TODO - We know 'some' of these are legal due to virtualization, 184 * but is it true for all of them? 185 */ 186 return ECORE_SUCCESS; 187 } 188 189 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 190 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 191 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 192 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 193 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 194 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 195 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 196 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 197 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 198 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 199 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 200 static const char *grc_timeout_attn_master_to_str(u8 master) 201 { 202 switch (master) { 203 case 1: 204 return "PXP"; 205 case 2: 206 return "MCP"; 207 case 3: 208 return "MSDM"; 209 case 4: 210 return "PSDM"; 211 case 5: 212 return "YSDM"; 213 case 6: 214 return "USDM"; 215 case 7: 216 return "TSDM"; 217 case 8: 218 return "XSDM"; 219 case 9: 220 return "DBU"; 221 case 10: 222 return "DMAE"; 223 default: 224 return "Unknown"; 225 } 226 } 227 228 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 229 { 230 u32 tmp, tmp2; 231 232 /* We've already cleared the timeout interrupt register, so we learn 233 * of interrupts via the validity register 234 */ 235 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 236 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 237 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) 238 goto out; 239 240 /* Read the GRC timeout information */ 241 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 242 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 243 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 244 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 245 246 DP_INFO(p_hwfn->p_dev, 247 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]" 248 " [PF: %02x %s %02x]\n", 249 tmp2, tmp, 250 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", 251 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 252 grc_timeout_attn_master_to_str((tmp & 253 ECORE_GRC_ATTENTION_MASTER_MASK) >> 254 ECORE_GRC_ATTENTION_MASTER_SHIFT), 255 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 256 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 257 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 258 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 259 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 260 ECORE_GRC_ATTENTION_VF_SHIFT); 261 262 out: 263 /* Regardles of anything else, clean the validity bit */ 264 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 265 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 266 return ECORE_SUCCESS; 267 } 268 269 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 270 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 271 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 272 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 273 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 274 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 275 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 276 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 277 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 278 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 279 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 280 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 281 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 282 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 283 { 284 u32 tmp; 285 286 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 287 PGLUE_B_REG_TX_ERR_WR_DETAILS2); 288 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 289 u32 addr_lo, addr_hi, details; 290 291 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 292 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 293 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 294 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 295 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 296 PGLUE_B_REG_TX_ERR_WR_DETAILS); 297 298 DP_INFO(p_hwfn, 299 "Illegal write by chip to [%08x:%08x] blocked." 300 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]" 301 " Details2 %08x [Was_error %02x BME deassert %02x" 302 " FID_enable deassert %02x]\n", 303 addr_hi, addr_lo, details, 304 (u8)((details & 305 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 306 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 307 (u8)((details & 308 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 309 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 310 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) 311 ? 1 : 0), tmp, 312 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 313 : 0), 314 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 315 0), 316 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 317 : 0)); 318 } 319 320 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 321 PGLUE_B_REG_TX_ERR_RD_DETAILS2); 322 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 323 u32 addr_lo, addr_hi, details; 324 325 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 326 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 327 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 328 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 329 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 330 PGLUE_B_REG_TX_ERR_RD_DETAILS); 331 332 DP_INFO(p_hwfn, 333 "Illegal read by chip from [%08x:%08x] blocked." 334 " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]" 335 " Details2 %08x [Was_error %02x BME deassert %02x" 336 " FID_enable deassert %02x]\n", 337 addr_hi, addr_lo, details, 338 (u8)((details & 339 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 340 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 341 (u8)((details & 342 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 343 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 344 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) 345 ? 1 : 0), tmp, 346 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 347 : 0), 348 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 349 0), 350 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 351 : 0)); 352 } 353 354 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 355 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 356 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 357 DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp); 358 359 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 360 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 361 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 362 u32 addr_hi, addr_lo; 363 364 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 365 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 366 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 367 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 368 369 DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n", 370 tmp, addr_hi, addr_lo); 371 } 372 373 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 374 PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 375 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 376 u32 addr_hi, addr_lo, details; 377 378 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 379 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 380 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 381 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 382 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 383 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 384 385 DP_INFO(p_hwfn, 386 "ILT error - Details %08x Details2 %08x" 387 " [Address %08x:%08x]\n", 388 details, tmp, addr_hi, addr_lo); 389 } 390 391 /* Clear the indications */ 392 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 393 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 394 395 return ECORE_SUCCESS; 396 } 397 398 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 399 { 400 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 401 402 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 403 404 return ECORE_INVAL; 405 } 406 407 static enum _ecore_status_t 408 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 409 { 410 DP_INFO(p_hwfn, "General attention 35!\n"); 411 412 return ECORE_SUCCESS; 413 } 414 415 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 416 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 417 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 418 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 419 420 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 421 { 422 u32 reason; 423 424 reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & 425 ECORE_DORQ_ATTENTION_REASON_MASK; 426 if (reason) { 427 u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 428 DORQ_REG_DB_DROP_DETAILS); 429 430 DP_INFO(p_hwfn->p_dev, 431 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x" 432 " Size [bytes] 0x%08x Reason: 0x%08x\n", 433 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 434 DORQ_REG_DB_DROP_DETAILS_ADDRESS), 435 (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK), 436 ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >> 437 ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason); 438 } 439 440 return ECORE_INVAL; 441 } 442 443 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 444 { 445 #ifndef ASIC_ONLY 446 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 447 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 448 TM_REG_INT_STS_1); 449 450 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 451 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 452 return ECORE_INVAL; 453 454 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 455 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 456 DP_INFO(p_hwfn, 457 "TM attention on emulation - most likely" 458 " results of clock-ratios\n"); 459 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 460 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 461 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 462 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 463 464 return ECORE_SUCCESS; 465 } 466 #endif 467 468 return ECORE_INVAL; 469 } 470 471 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 472 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 473 { 474 { /* After Invert 1 */ 475 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 476 MAX_BLOCK_ID}, 477 } 478 }, 479 480 { 481 { /* After Invert 2 */ 482 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 483 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 484 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb, 485 BLOCK_PGLUE_B}, 486 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 487 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 488 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 489 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 490 {"SW timers #%d", 491 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), 492 OSAL_NULL, MAX_BLOCK_ID}, 493 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 494 BLOCK_PGLCS}, 495 } 496 }, 497 498 { 499 { /* After Invert 3 */ 500 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 501 MAX_BLOCK_ID}, 502 } 503 }, 504 505 { 506 { /* After Invert 4 */ 507 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 508 ecore_fw_assertion, MAX_BLOCK_ID}, 509 {"General Attention %d", 510 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), 511 OSAL_NULL, MAX_BLOCK_ID}, 512 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 513 ecore_general_attention_35, MAX_BLOCK_ID}, 514 {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 515 BLOCK_CNIG}, 516 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 517 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 518 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 519 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 520 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 521 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 522 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 523 MAX_BLOCK_ID}, 524 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 525 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 526 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 527 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 528 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 529 } 530 }, 531 532 { 533 { /* After Invert 5 */ 534 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 535 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 536 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 537 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 538 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 539 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 540 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 541 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 542 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 543 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 544 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 545 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 546 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 547 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 548 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 549 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 550 } 551 }, 552 553 { 554 { /* After Invert 6 */ 555 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 556 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 557 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 558 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 559 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 560 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 561 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 562 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 563 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 564 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 565 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 566 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 567 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 568 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 569 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 570 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 571 } 572 }, 573 574 { 575 { /* After Invert 7 */ 576 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 577 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 578 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 579 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 580 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 581 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 582 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 583 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 584 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 585 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 586 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 587 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 588 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 589 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 590 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 591 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 592 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 593 } 594 }, 595 596 { 597 { /* After Invert 8 */ 598 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 599 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 600 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 601 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 602 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 603 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 604 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 605 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 606 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 607 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 608 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 609 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 610 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 611 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 612 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 613 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 614 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 615 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 616 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 617 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 618 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 619 MAX_BLOCK_ID}, 620 } 621 }, 622 623 { 624 { /* After Invert 9 */ 625 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 626 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, 627 MAX_BLOCK_ID}, 628 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 629 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 630 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 631 MAX_BLOCK_ID}, 632 } 633 }, 634 635 }; 636 637 #define ATTN_STATE_BITS (0xfff) 638 #define ATTN_BITS_MASKABLE (0x3ff) 639 struct ecore_sb_attn_info { 640 /* Virtual & Physical address of the SB */ 641 struct atten_status_block *sb_attn; 642 dma_addr_t sb_phys; 643 644 /* Last seen running index */ 645 u16 index; 646 647 /* A mask of the AEU bits resulting in a parity error */ 648 u32 parity_mask[NUM_ATTN_REGS]; 649 650 /* A pointer to the attention description structure */ 651 struct aeu_invert_reg *p_aeu_desc; 652 653 /* Previously asserted attentions, which are still unasserted */ 654 u16 known_attn; 655 656 /* Cleanup address for the link's general hw attention */ 657 u32 mfw_attn_addr; 658 }; 659 660 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 661 struct ecore_sb_attn_info *p_sb_desc) 662 { 663 u16 rc = 0, index; 664 665 OSAL_MMIOWB(p_hwfn->p_dev); 666 667 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 668 if (p_sb_desc->index != index) { 669 p_sb_desc->index = index; 670 rc = ECORE_SB_ATT_IDX; 671 } 672 673 OSAL_MMIOWB(p_hwfn->p_dev); 674 675 return rc; 676 } 677 678 /** 679 * @brief ecore_int_assertion - handles asserted attention bits 680 * 681 * @param p_hwfn 682 * @param asserted_bits newly asserted bits 683 * @return enum _ecore_status_t 684 */ 685 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 686 u16 asserted_bits) 687 { 688 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 689 u32 igu_mask; 690 691 /* Mask the source of the attention in the IGU */ 692 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 693 IGU_REG_ATTENTION_ENABLE); 694 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 695 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 696 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 697 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 698 699 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 700 "inner known ATTN state: 0x%04x --> 0x%04x\n", 701 sb_attn_sw->known_attn, 702 sb_attn_sw->known_attn | asserted_bits); 703 sb_attn_sw->known_attn |= asserted_bits; 704 705 /* Handle MCP events */ 706 if (asserted_bits & 0x100) { 707 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 708 /* Clean the MCP attention */ 709 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 710 sb_attn_sw->mfw_attn_addr, 0); 711 } 712 713 /* FIXME - this will change once we'll have GOOD gtt definitions */ 714 DIRECT_REG_WR(p_hwfn, 715 (u8 OSAL_IOMEM *) p_hwfn->regview + 716 GTT_BAR0_MAP_REG_IGU_CMD + 717 ((IGU_CMD_ATTN_BIT_SET_UPPER - 718 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 719 720 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 721 asserted_bits); 722 723 return ECORE_SUCCESS; 724 } 725 726 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 727 enum block_id id, enum dbg_attn_type type, 728 bool b_clear) 729 { 730 /* @DPDK */ 731 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type); 732 } 733 734 /** 735 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 736 * cause of the attention 737 * 738 * @param p_hwfn 739 * @param p_aeu - descriptor of an AEU bit which caused the attention 740 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 741 * this bit to this group. 742 * @param bit_index - index of this bit in the aeu_en_reg 743 * 744 * @return enum _ecore_status_t 745 */ 746 static enum _ecore_status_t 747 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 748 struct aeu_invert_reg_bit *p_aeu, 749 u32 aeu_en_reg, 750 const char *p_bit_name, 751 u32 bitmask) 752 { 753 enum _ecore_status_t rc = ECORE_INVAL; 754 bool b_fatal = false; 755 756 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 757 p_bit_name, bitmask); 758 759 /* Call callback before clearing the interrupt status */ 760 if (p_aeu->cb) { 761 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 762 p_bit_name); 763 rc = p_aeu->cb(p_hwfn); 764 } 765 766 if (rc != ECORE_SUCCESS) 767 b_fatal = true; 768 769 /* Print HW block interrupt registers */ 770 if (p_aeu->block_index != MAX_BLOCK_ID) { 771 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 772 ATTN_TYPE_INTERRUPT, !b_fatal); 773 } 774 775 /* Reach assertion if attention is fatal */ 776 if (b_fatal) { 777 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 778 p_bit_name); 779 780 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 781 } 782 783 /* Prevent this Attention from being asserted in the future */ 784 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 785 p_hwfn->p_dev->attn_clr_en) { 786 u32 val; 787 u32 mask = ~bitmask; 788 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 789 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 790 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", 791 p_bit_name); 792 } 793 794 return rc; 795 } 796 797 /** 798 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 799 * 800 * @param p_hwfn 801 * @param p_aeu - descriptor of an AEU bit which caused the 802 * parity 803 * @param bit_index 804 */ 805 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 806 struct aeu_invert_reg_bit *p_aeu, 807 u8 bit_index) 808 { 809 u32 block_id = p_aeu->block_index; 810 811 DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n", 812 p_aeu->bit_name, bit_index); 813 814 if (block_id == MAX_BLOCK_ID) 815 return; 816 817 ecore_int_attn_print(p_hwfn, block_id, 818 ATTN_TYPE_PARITY, false); 819 820 /* In A0, there's a single parity bit for several blocks */ 821 if (block_id == BLOCK_BTB) { 822 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 823 ATTN_TYPE_PARITY, false); 824 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 825 ATTN_TYPE_PARITY, false); 826 } 827 } 828 829 /** 830 * @brief - handles deassertion of previously asserted attentions. 831 * 832 * @param p_hwfn 833 * @param deasserted_bits - newly deasserted bits 834 * @return enum _ecore_status_t 835 * 836 */ 837 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 838 u16 deasserted_bits) 839 { 840 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 841 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask; 842 bool b_parity = false; 843 u8 i, j, k, bit_idx; 844 enum _ecore_status_t rc = ECORE_SUCCESS; 845 846 /* Read the attention registers in the AEU */ 847 for (i = 0; i < NUM_ATTN_REGS; i++) { 848 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 849 MISC_REG_AEU_AFTER_INVERT_1_IGU + 850 i * 0x4); 851 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 852 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); 853 } 854 855 /* Handle parity attentions first */ 856 for (i = 0; i < NUM_ATTN_REGS; i++) { 857 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 858 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 859 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 860 i * sizeof(u32)); 861 862 u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 863 864 /* Skip register in which no parity bit is currently set */ 865 if (!parities) 866 continue; 867 868 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 869 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 870 871 if ((p_bit->flags & ATTENTION_PARITY) && 872 !!(parities & (1 << bit_idx))) { 873 ecore_int_deassertion_parity(p_hwfn, p_bit, 874 bit_idx); 875 b_parity = true; 876 } 877 878 bit_idx += ATTENTION_LENGTH(p_bit->flags); 879 } 880 } 881 882 /* Find non-parity cause for attention and act */ 883 for (k = 0; k < MAX_ATTN_GRPS; k++) { 884 struct aeu_invert_reg_bit *p_aeu; 885 886 /* Handle only groups whose attention is currently deasserted */ 887 if (!(deasserted_bits & (1 << k))) 888 continue; 889 890 for (i = 0; i < NUM_ATTN_REGS; i++) { 891 u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 892 i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS; 893 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 894 u32 bits = aeu_inv_arr[i] & en; 895 896 /* Skip if no bit from this group is currently set */ 897 if (!bits) 898 continue; 899 900 /* Find all set bits from current register which belong 901 * to current group, making them responsible for the 902 * previous assertion. 903 */ 904 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 905 unsigned long int bitmask; 906 u8 bit, bit_len; 907 908 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 909 910 /* No need to handle attention-only bits */ 911 if (p_aeu->flags == ATTENTION_PAR) 912 continue; 913 914 bit = bit_idx; 915 bit_len = ATTENTION_LENGTH(p_aeu->flags); 916 if (p_aeu->flags & ATTENTION_PAR_INT) { 917 /* Skip Parity */ 918 bit++; 919 bit_len--; 920 } 921 922 bitmask = bits & (((1 << bit_len) - 1) << bit); 923 if (bitmask) { 924 u32 flags = p_aeu->flags; 925 char bit_name[30]; 926 927 bit = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 928 bit_len); 929 930 /* Some bits represent more than a 931 * a single interrupt. Correctly print 932 * their name. 933 */ 934 if (ATTENTION_LENGTH(flags) > 2 || 935 ((flags & ATTENTION_PAR_INT) && 936 ATTENTION_LENGTH(flags) > 1)) 937 OSAL_SNPRINTF(bit_name, 30, 938 p_aeu->bit_name, 939 bit); 940 else 941 OSAL_STRNCPY(bit_name, 942 p_aeu->bit_name, 943 30); 944 /* Handle source of the attention */ 945 ecore_int_deassertion_aeu_bit(p_hwfn, 946 p_aeu, 947 aeu_en, 948 bit_name, 949 bitmask); 950 } 951 952 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 953 } 954 } 955 } 956 957 /* Clear IGU indication for the deasserted bits */ 958 /* FIXME - this will change once we'll have GOOD gtt definitions */ 959 DIRECT_REG_WR(p_hwfn, 960 (u8 OSAL_IOMEM *) p_hwfn->regview + 961 GTT_BAR0_MAP_REG_IGU_CMD + 962 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 963 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); 964 965 /* Unmask deasserted attentions in IGU */ 966 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 967 IGU_REG_ATTENTION_ENABLE); 968 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 969 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 970 971 /* Clear deassertion from inner state */ 972 sb_attn_sw->known_attn &= ~deasserted_bits; 973 974 return rc; 975 } 976 977 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 978 { 979 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 980 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 981 u16 index = 0, asserted_bits, deasserted_bits; 982 u32 attn_bits = 0, attn_acks = 0; 983 enum _ecore_status_t rc = ECORE_SUCCESS; 984 985 /* Read current attention bits/acks - safeguard against attentions 986 * by guaranting work on a synchronized timeframe 987 */ 988 do { 989 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 990 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 991 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 992 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 993 p_sb_attn->sb_index = index; 994 995 /* Attention / Deassertion are meaningful (and in correct state) 996 * only when they differ and consistent with known state - deassertion 997 * when previous attention & current ack, and assertion when current 998 * attention with no previous attention 999 */ 1000 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1001 ~p_sb_attn_sw->known_attn; 1002 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1003 p_sb_attn_sw->known_attn; 1004 1005 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1006 DP_INFO(p_hwfn, 1007 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1008 index, attn_bits, attn_acks, asserted_bits, 1009 deasserted_bits, p_sb_attn_sw->known_attn); 1010 else if (asserted_bits == 0x100) 1011 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1012 else 1013 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1014 "MFW indication [deassertion]\n"); 1015 1016 if (asserted_bits) { 1017 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1018 if (rc) 1019 return rc; 1020 } 1021 1022 if (deasserted_bits) 1023 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1024 1025 return rc; 1026 } 1027 1028 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1029 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1030 { 1031 struct igu_prod_cons_update igu_ack = { 0 }; 1032 1033 igu_ack.sb_id_and_flags = 1034 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1035 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1036 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1037 (IGU_SEG_ACCESS_ATTN << 1038 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1039 1040 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1041 1042 /* Both segments (interrupts & acks) are written to same place address; 1043 * Need to guarantee all commands will be received (in-order) by HW. 1044 */ 1045 OSAL_MMIOWB(p_hwfn->p_dev); 1046 OSAL_BARRIER(p_hwfn->p_dev); 1047 } 1048 1049 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1050 { 1051 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1052 struct ecore_pi_info *pi_info = OSAL_NULL; 1053 struct ecore_sb_attn_info *sb_attn; 1054 struct ecore_sb_info *sb_info; 1055 int arr_size; 1056 u16 rc = 0; 1057 1058 if (!p_hwfn) 1059 return; 1060 1061 if (!p_hwfn->p_sp_sb) { 1062 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1063 return; 1064 } 1065 1066 sb_info = &p_hwfn->p_sp_sb->sb_info; 1067 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1068 if (!sb_info) { 1069 DP_ERR(p_hwfn->p_dev, 1070 "Status block is NULL - cannot ack interrupts\n"); 1071 return; 1072 } 1073 1074 if (!p_hwfn->p_sb_attn) { 1075 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1076 return; 1077 } 1078 sb_attn = p_hwfn->p_sb_attn; 1079 1080 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1081 p_hwfn, p_hwfn->my_id); 1082 1083 /* Disable ack for def status block. Required both for msix + 1084 * inta in non-mask mode, in inta does no harm. 1085 */ 1086 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1087 1088 /* Gather Interrupts/Attentions information */ 1089 if (!sb_info->sb_virt) { 1090 DP_ERR(p_hwfn->p_dev, 1091 "Interrupt Status block is NULL -" 1092 " cannot check for new interrupts!\n"); 1093 } else { 1094 u32 tmp_index = sb_info->sb_ack; 1095 rc = ecore_sb_update_sb_idx(sb_info); 1096 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1097 "Interrupt indices: 0x%08x --> 0x%08x\n", 1098 tmp_index, sb_info->sb_ack); 1099 } 1100 1101 if (!sb_attn || !sb_attn->sb_attn) { 1102 DP_ERR(p_hwfn->p_dev, 1103 "Attentions Status block is NULL -" 1104 " cannot check for new attentions!\n"); 1105 } else { 1106 u16 tmp_index = sb_attn->index; 1107 1108 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1109 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1110 "Attention indices: 0x%08x --> 0x%08x\n", 1111 tmp_index, sb_attn->index); 1112 } 1113 1114 /* Check if we expect interrupts at this time. if not just ack them */ 1115 if (!(rc & ECORE_SB_EVENT_MASK)) { 1116 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1117 return; 1118 } 1119 1120 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1121 1122 if (!p_hwfn->p_dpc_ptt) { 1123 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1124 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1125 return; 1126 } 1127 1128 if (rc & ECORE_SB_ATT_IDX) 1129 ecore_int_attentions(p_hwfn); 1130 1131 if (rc & ECORE_SB_IDX) { 1132 int pi; 1133 1134 /* Since we only looked at the SB index, it's possible more 1135 * than a single protocol-index on the SB incremented. 1136 * Iterate over all configured protocol indices and check 1137 * whether something happened for each. 1138 */ 1139 for (pi = 0; pi < arr_size; pi++) { 1140 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1141 if (pi_info->comp_cb != OSAL_NULL) 1142 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1143 } 1144 } 1145 1146 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1147 /* This should be done before the interrupts are enabled, 1148 * since otherwise a new attention will be generated. 1149 */ 1150 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1151 } 1152 1153 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1154 } 1155 1156 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1157 { 1158 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1159 1160 if (!p_sb) 1161 return; 1162 1163 if (p_sb->sb_attn) { 1164 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1165 p_sb->sb_phys, 1166 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1167 } 1168 OSAL_FREE(p_hwfn->p_dev, p_sb); 1169 } 1170 1171 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1172 struct ecore_ptt *p_ptt) 1173 { 1174 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1175 1176 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1177 1178 sb_info->index = 0; 1179 sb_info->known_attn = 0; 1180 1181 /* Configure Attention Status Block in IGU */ 1182 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1183 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1184 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1185 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1186 } 1187 1188 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1189 struct ecore_ptt *p_ptt, 1190 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1191 { 1192 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1193 int i, j, k; 1194 1195 sb_info->sb_attn = sb_virt_addr; 1196 sb_info->sb_phys = sb_phy_addr; 1197 1198 /* Set the pointer to the AEU descriptors */ 1199 sb_info->p_aeu_desc = aeu_descs; 1200 1201 /* Calculate Parity Masks */ 1202 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1203 for (i = 0; i < NUM_ATTN_REGS; i++) { 1204 /* j is array index, k is bit index */ 1205 for (j = 0, k = 0; k < 32; j++) { 1206 unsigned int flags = aeu_descs[i].bits[j].flags; 1207 1208 if (flags & ATTENTION_PARITY) 1209 sb_info->parity_mask[i] |= 1 << k; 1210 1211 k += ATTENTION_LENGTH(flags); 1212 } 1213 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1214 "Attn Mask [Reg %d]: 0x%08x\n", 1215 i, sb_info->parity_mask[i]); 1216 } 1217 1218 /* Set the address of cleanup for the mcp attention */ 1219 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1220 MISC_REG_AEU_GENERAL_ATTN_0; 1221 1222 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1223 } 1224 1225 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1226 struct ecore_ptt *p_ptt) 1227 { 1228 struct ecore_dev *p_dev = p_hwfn->p_dev; 1229 struct ecore_sb_attn_info *p_sb; 1230 dma_addr_t p_phys = 0; 1231 void *p_virt; 1232 1233 /* SB struct */ 1234 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1235 if (!p_sb) { 1236 DP_NOTICE(p_dev, true, 1237 "Failed to allocate `struct ecore_sb_attn_info'"); 1238 return ECORE_NOMEM; 1239 } 1240 1241 /* SB ring */ 1242 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1243 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1244 if (!p_virt) { 1245 DP_NOTICE(p_dev, true, 1246 "Failed to allocate status block (attentions)"); 1247 OSAL_FREE(p_dev, p_sb); 1248 return ECORE_NOMEM; 1249 } 1250 1251 /* Attention setup */ 1252 p_hwfn->p_sb_attn = p_sb; 1253 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1254 1255 return ECORE_SUCCESS; 1256 } 1257 1258 /* coalescing timeout = timeset << (timer_res + 1) */ 1259 #define ECORE_CAU_DEF_RX_USECS 24 1260 #define ECORE_CAU_DEF_TX_USECS 48 1261 1262 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1263 struct cau_sb_entry *p_sb_entry, 1264 u8 pf_id, u16 vf_number, u8 vf_valid) 1265 { 1266 struct ecore_dev *p_dev = p_hwfn->p_dev; 1267 u32 cau_state; 1268 u8 timer_res; 1269 1270 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1271 1272 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1273 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1274 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1275 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1276 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1277 1278 cau_state = CAU_HC_DISABLE_STATE; 1279 1280 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1281 cau_state = CAU_HC_ENABLE_STATE; 1282 if (!p_dev->rx_coalesce_usecs) 1283 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1284 if (!p_dev->tx_coalesce_usecs) 1285 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1286 } 1287 1288 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1289 if (p_dev->rx_coalesce_usecs <= 0x7F) 1290 timer_res = 0; 1291 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1292 timer_res = 1; 1293 else 1294 timer_res = 2; 1295 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1296 1297 if (p_dev->tx_coalesce_usecs <= 0x7F) 1298 timer_res = 0; 1299 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1300 timer_res = 1; 1301 else 1302 timer_res = 2; 1303 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1304 1305 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1306 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1307 } 1308 1309 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1310 struct ecore_ptt *p_ptt, 1311 dma_addr_t sb_phys, u16 igu_sb_id, 1312 u16 vf_number, u8 vf_valid) 1313 { 1314 struct cau_sb_entry sb_entry; 1315 1316 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1317 vf_number, vf_valid); 1318 1319 if (p_hwfn->hw_init_done) { 1320 /* Wide-bus, initialize via DMAE */ 1321 u64 phys_addr = (u64)sb_phys; 1322 1323 ecore_dmae_host2grc(p_hwfn, p_ptt, 1324 (u64)(osal_uintptr_t)&phys_addr, 1325 CAU_REG_SB_ADDR_MEMORY + 1326 igu_sb_id * sizeof(u64), 2, 0); 1327 ecore_dmae_host2grc(p_hwfn, p_ptt, 1328 (u64)(osal_uintptr_t)&sb_entry, 1329 CAU_REG_SB_VAR_MEMORY + 1330 igu_sb_id * sizeof(u64), 2, 0); 1331 } else { 1332 /* Initialize Status Block Address */ 1333 STORE_RT_REG_AGG(p_hwfn, 1334 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1335 igu_sb_id * 2, sb_phys); 1336 1337 STORE_RT_REG_AGG(p_hwfn, 1338 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1339 igu_sb_id * 2, sb_entry); 1340 } 1341 1342 /* Configure pi coalescing if set */ 1343 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1344 /* eth will open queues for all tcs, so configure all of them 1345 * properly, rather than just the active ones 1346 */ 1347 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1348 1349 u8 timeset, timer_res; 1350 u8 i; 1351 1352 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1353 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1354 timer_res = 0; 1355 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1356 timer_res = 1; 1357 else 1358 timer_res = 2; 1359 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1360 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1361 ECORE_COAL_RX_STATE_MACHINE, timeset); 1362 1363 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1364 timer_res = 0; 1365 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1366 timer_res = 1; 1367 else 1368 timer_res = 2; 1369 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1370 for (i = 0; i < num_tc; i++) { 1371 ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1372 igu_sb_id, TX_PI(i), 1373 ECORE_COAL_TX_STATE_MACHINE, 1374 timeset); 1375 } 1376 } 1377 } 1378 1379 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1380 struct ecore_ptt *p_ptt, 1381 u16 igu_sb_id, u32 pi_index, 1382 enum ecore_coalescing_fsm coalescing_fsm, u8 timeset) 1383 { 1384 struct cau_pi_entry pi_entry; 1385 u32 sb_offset, pi_offset; 1386 1387 if (IS_VF(p_hwfn->p_dev)) 1388 return; /* @@@TBD MichalK- VF CAU... */ 1389 1390 sb_offset = igu_sb_id * PIS_PER_SB; 1391 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1392 1393 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1394 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1395 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1396 else 1397 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1398 1399 pi_offset = sb_offset + pi_index; 1400 if (p_hwfn->hw_init_done) { 1401 ecore_wr(p_hwfn, p_ptt, 1402 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1403 *((u32 *)&(pi_entry))); 1404 } else { 1405 STORE_RT_REG(p_hwfn, 1406 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1407 *((u32 *)&(pi_entry))); 1408 } 1409 } 1410 1411 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1412 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) 1413 { 1414 /* zero status block and ack counter */ 1415 sb_info->sb_ack = 0; 1416 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1417 1418 if (IS_PF(p_hwfn->p_dev)) 1419 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1420 sb_info->igu_sb_id, 0, 0); 1421 } 1422 1423 /** 1424 * @brief ecore_get_igu_sb_id - given a sw sb_id return the 1425 * igu_sb_id 1426 * 1427 * @param p_hwfn 1428 * @param sb_id 1429 * 1430 * @return u16 1431 */ 1432 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1433 { 1434 u16 igu_sb_id; 1435 1436 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1437 if (sb_id == ECORE_SP_SB_ID) 1438 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1439 else if (IS_PF(p_hwfn->p_dev)) 1440 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; 1441 else 1442 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1443 1444 if (sb_id == ECORE_SP_SB_ID) 1445 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1446 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1447 else 1448 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1449 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1450 1451 return igu_sb_id; 1452 } 1453 1454 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1455 struct ecore_ptt *p_ptt, 1456 struct ecore_sb_info *sb_info, 1457 void *sb_virt_addr, 1458 dma_addr_t sb_phy_addr, u16 sb_id) 1459 { 1460 sb_info->sb_virt = sb_virt_addr; 1461 sb_info->sb_phys = sb_phy_addr; 1462 1463 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1464 1465 if (sb_id != ECORE_SP_SB_ID) { 1466 p_hwfn->sbs_info[sb_id] = sb_info; 1467 p_hwfn->num_sbs++; 1468 } 1469 #ifdef ECORE_CONFIG_DIRECT_HWFN 1470 sb_info->p_hwfn = p_hwfn; 1471 #endif 1472 sb_info->p_dev = p_hwfn->p_dev; 1473 1474 /* The igu address will hold the absolute address that needs to be 1475 * written to for a specific status block 1476 */ 1477 if (IS_PF(p_hwfn->p_dev)) { 1478 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + 1479 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); 1480 1481 } else { 1482 sb_info->igu_addr = 1483 (u8 OSAL_IOMEM *)p_hwfn->regview + 1484 PXP_VF_BAR0_START_IGU + 1485 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1486 } 1487 1488 sb_info->flags |= ECORE_SB_INFO_INIT; 1489 1490 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1491 1492 return ECORE_SUCCESS; 1493 } 1494 1495 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1496 struct ecore_sb_info *sb_info, 1497 u16 sb_id) 1498 { 1499 if (sb_id == ECORE_SP_SB_ID) { 1500 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1501 return ECORE_INVAL; 1502 } 1503 1504 /* zero status block and ack counter */ 1505 sb_info->sb_ack = 0; 1506 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1507 1508 if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) { 1509 p_hwfn->sbs_info[sb_id] = OSAL_NULL; 1510 p_hwfn->num_sbs--; 1511 } 1512 1513 return ECORE_SUCCESS; 1514 } 1515 1516 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1517 { 1518 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1519 1520 if (!p_sb) 1521 return; 1522 1523 if (p_sb->sb_info.sb_virt) { 1524 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1525 p_sb->sb_info.sb_virt, 1526 p_sb->sb_info.sb_phys, 1527 SB_ALIGNED_SIZE(p_hwfn)); 1528 } 1529 1530 OSAL_FREE(p_hwfn->p_dev, p_sb); 1531 } 1532 1533 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1534 struct ecore_ptt *p_ptt) 1535 { 1536 struct ecore_sb_sp_info *p_sb; 1537 dma_addr_t p_phys = 0; 1538 void *p_virt; 1539 1540 /* SB struct */ 1541 p_sb = 1542 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, 1543 sizeof(*p_sb)); 1544 if (!p_sb) { 1545 DP_NOTICE(p_hwfn, true, 1546 "Failed to allocate `struct ecore_sb_info'\n"); 1547 return ECORE_NOMEM; 1548 } 1549 1550 /* SB ring */ 1551 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1552 &p_phys, SB_ALIGNED_SIZE(p_hwfn)); 1553 if (!p_virt) { 1554 DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n"); 1555 OSAL_FREE(p_hwfn->p_dev, p_sb); 1556 return ECORE_NOMEM; 1557 } 1558 1559 /* Status Block setup */ 1560 p_hwfn->p_sp_sb = p_sb; 1561 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1562 p_virt, p_phys, ECORE_SP_SB_ID); 1563 1564 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1565 1566 return ECORE_SUCCESS; 1567 } 1568 1569 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1570 ecore_int_comp_cb_t comp_cb, 1571 void *cookie, 1572 u8 *sb_idx, __le16 **p_fw_cons) 1573 { 1574 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1575 enum _ecore_status_t rc = ECORE_NOMEM; 1576 u8 pi; 1577 1578 /* Look for a free index */ 1579 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1580 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1581 continue; 1582 1583 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1584 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1585 *sb_idx = pi; 1586 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1587 rc = ECORE_SUCCESS; 1588 break; 1589 } 1590 1591 return rc; 1592 } 1593 1594 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) 1595 { 1596 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1597 1598 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1599 return ECORE_NOMEM; 1600 1601 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1602 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1603 return ECORE_SUCCESS; 1604 } 1605 1606 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1607 { 1608 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1609 } 1610 1611 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1612 struct ecore_ptt *p_ptt, 1613 enum ecore_int_mode int_mode) 1614 { 1615 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1616 1617 #ifndef ASIC_ONLY 1618 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1619 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1620 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1621 } 1622 #endif 1623 1624 p_hwfn->p_dev->int_mode = int_mode; 1625 switch (p_hwfn->p_dev->int_mode) { 1626 case ECORE_INT_MODE_INTA: 1627 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1628 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1629 break; 1630 1631 case ECORE_INT_MODE_MSI: 1632 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1633 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1634 break; 1635 1636 case ECORE_INT_MODE_MSIX: 1637 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1638 break; 1639 case ECORE_INT_MODE_POLL: 1640 break; 1641 } 1642 1643 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1644 } 1645 1646 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1647 struct ecore_ptt *p_ptt) 1648 { 1649 #ifndef ASIC_ONLY 1650 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1651 DP_INFO(p_hwfn, 1652 "FPGA - Don't enable Attentions in IGU and MISC\n"); 1653 return; 1654 } 1655 #endif 1656 1657 /* Configure AEU signal change to produce attentions */ 1658 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1659 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1660 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1661 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1662 1663 /* Flush the writes to IGU */ 1664 OSAL_MMIOWB(p_hwfn->p_dev); 1665 1666 /* Unmask AEU signals toward IGU */ 1667 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1668 } 1669 1670 enum _ecore_status_t 1671 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1672 enum ecore_int_mode int_mode) 1673 { 1674 enum _ecore_status_t rc = ECORE_SUCCESS; 1675 u32 tmp; 1676 1677 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop 1678 * attentions. Since we're waiting for BRCM answer regarding this 1679 * attention, in the meanwhile we simply mask it. 1680 */ 1681 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0); 1682 tmp &= ~0x800; 1683 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp); 1684 1685 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1686 1687 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1688 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1689 if (rc != ECORE_SUCCESS) { 1690 DP_NOTICE(p_hwfn, true, 1691 "Slowpath IRQ request failed\n"); 1692 return ECORE_NORESOURCES; 1693 } 1694 p_hwfn->b_int_requested = true; 1695 } 1696 1697 /* Enable interrupt Generation */ 1698 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1699 1700 p_hwfn->b_int_enabled = 1; 1701 1702 return rc; 1703 } 1704 1705 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1706 struct ecore_ptt *p_ptt) 1707 { 1708 p_hwfn->b_int_enabled = 0; 1709 1710 if (IS_VF(p_hwfn->p_dev)) 1711 return; 1712 1713 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1714 } 1715 1716 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1717 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1718 struct ecore_ptt *p_ptt, 1719 u32 sb_id, bool cleanup_set, u16 opaque_fid) 1720 { 1721 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1722 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; 1723 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1724 u8 type = 0; /* FIXME MichalS type??? */ 1725 1726 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1727 IGU_REG_CLEANUP_STATUS_0) != 0x200); 1728 1729 /* USE Control Command Register to perform cleanup. There is an 1730 * option to do this using IGU bar, but then it can't be used for VFs. 1731 */ 1732 1733 /* Set the data field */ 1734 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 1735 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 1736 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 1737 1738 /* Set the control register */ 1739 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 1740 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 1741 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 1742 1743 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 1744 1745 OSAL_BARRIER(p_hwfn->p_dev); 1746 1747 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 1748 1749 /* Flush the write to IGU */ 1750 OSAL_MMIOWB(p_hwfn->p_dev); 1751 1752 /* calculate where to read the status bit from */ 1753 sb_bit = 1 << (sb_id % 32); 1754 sb_bit_addr = sb_id / 32 * sizeof(u32); 1755 1756 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 1757 1758 /* Now wait for the command to complete */ 1759 while (--sleep_cnt) { 1760 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 1761 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 1762 break; 1763 OSAL_MSLEEP(5); 1764 } 1765 1766 if (!sleep_cnt) 1767 DP_NOTICE(p_hwfn, true, 1768 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 1769 val, sb_id); 1770 } 1771 1772 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 1773 struct ecore_ptt *p_ptt, 1774 u32 sb_id, u16 opaque, bool b_set) 1775 { 1776 int pi, i; 1777 1778 /* Set */ 1779 if (b_set) 1780 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); 1781 1782 /* Clear */ 1783 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); 1784 1785 /* Wait for the IGU SB to cleanup */ 1786 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 1787 u32 val; 1788 1789 val = ecore_rd(p_hwfn, p_ptt, 1790 IGU_REG_WRITE_DONE_PENDING + 1791 ((sb_id / 32) * 4)); 1792 if (val & (1 << (sb_id % 32))) 1793 OSAL_UDELAY(10); 1794 else 1795 break; 1796 } 1797 if (i == IGU_CLEANUP_SLEEP_LENGTH) 1798 DP_NOTICE(p_hwfn, true, 1799 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 1800 sb_id); 1801 1802 /* Clear the CAU for the SB */ 1803 for (pi = 0; pi < 12; pi++) 1804 ecore_wr(p_hwfn, p_ptt, 1805 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); 1806 } 1807 1808 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 1809 struct ecore_ptt *p_ptt, 1810 bool b_set, bool b_slowpath) 1811 { 1812 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; 1813 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; 1814 u32 sb_id = 0, val = 0; 1815 1816 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 1817 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 1818 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 1819 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 1820 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 1821 /* end temporary */ 1822 1823 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1824 "IGU cleaning SBs [%d,...,%d]\n", 1825 igu_base_sb, igu_base_sb + igu_sb_cnt - 1); 1826 1827 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) 1828 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, 1829 p_hwfn->hw_info.opaque_fid, 1830 b_set); 1831 1832 if (!b_slowpath) 1833 return; 1834 1835 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1836 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1837 "IGU cleaning slowpath SB [%d]\n", sb_id); 1838 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, 1839 p_hwfn->hw_info.opaque_fid, b_set); 1840 } 1841 1842 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 1843 struct ecore_ptt *p_ptt, u16 sb_id) 1844 { 1845 u32 val = ecore_rd(p_hwfn, p_ptt, 1846 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); 1847 struct ecore_igu_block *p_block; 1848 1849 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; 1850 1851 /* stop scanning when hit first invalid PF entry */ 1852 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && 1853 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) 1854 goto out; 1855 1856 /* Fill the block information */ 1857 p_block->status = ECORE_IGU_STATUS_VALID; 1858 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 1859 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 1860 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 1861 1862 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1863 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d" 1864 " is_pf = %d vector_num = 0x%x\n", 1865 sb_id, val, p_block->function_id, p_block->is_pf, 1866 p_block->vector_number); 1867 1868 out: 1869 return val; 1870 } 1871 1872 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 1873 struct ecore_ptt *p_ptt) 1874 { 1875 struct ecore_igu_info *p_igu_info; 1876 struct ecore_igu_block *p_block; 1877 u32 min_vf = 0, max_vf = 0, val; 1878 u16 sb_id, last_iov_sb_id = 0; 1879 u16 prev_sb_id = 0xFF; 1880 1881 p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev, 1882 GFP_KERNEL, 1883 sizeof(*p_igu_info)); 1884 if (!p_hwfn->hw_info.p_igu_info) 1885 return ECORE_NOMEM; 1886 1887 OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info)); 1888 1889 p_igu_info = p_hwfn->hw_info.p_igu_info; 1890 1891 /* Initialize base sb / sb cnt for PFs and VFs */ 1892 p_igu_info->igu_base_sb = 0xffff; 1893 p_igu_info->igu_sb_cnt = 0; 1894 p_igu_info->igu_dsb_id = 0xffff; 1895 p_igu_info->igu_base_sb_iov = 0xffff; 1896 1897 if (p_hwfn->p_dev->p_iov_info) { 1898 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 1899 1900 min_vf = p_iov->first_vf_in_pf; 1901 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 1902 } 1903 for (sb_id = 0; 1904 sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1905 sb_id++) { 1906 p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; 1907 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); 1908 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && 1909 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) 1910 break; 1911 1912 if (p_block->is_pf) { 1913 if (p_block->function_id == p_hwfn->rel_pf_id) { 1914 p_block->status |= ECORE_IGU_STATUS_PF; 1915 1916 if (p_block->vector_number == 0) { 1917 if (p_igu_info->igu_dsb_id == 0xffff) 1918 p_igu_info->igu_dsb_id = sb_id; 1919 } else { 1920 if (p_igu_info->igu_base_sb == 0xffff) { 1921 p_igu_info->igu_base_sb = sb_id; 1922 } else if (prev_sb_id != sb_id - 1) { 1923 DP_NOTICE(p_hwfn->p_dev, false, 1924 "consecutive igu" 1925 " vectors for HWFN" 1926 " %x broken", 1927 p_hwfn->rel_pf_id); 1928 break; 1929 } 1930 prev_sb_id = sb_id; 1931 /* we don't count the default */ 1932 (p_igu_info->igu_sb_cnt)++; 1933 } 1934 } 1935 } else { 1936 if ((p_block->function_id >= min_vf) && 1937 (p_block->function_id < max_vf)) { 1938 /* Available for VFs of this PF */ 1939 if (p_igu_info->igu_base_sb_iov == 0xffff) { 1940 p_igu_info->igu_base_sb_iov = sb_id; 1941 } else if (last_iov_sb_id != sb_id - 1) { 1942 if (!val) 1943 DP_VERBOSE(p_hwfn->p_dev, 1944 ECORE_MSG_INTR, 1945 "First uninited IGU" 1946 " CAM entry at" 1947 " index 0x%04x\n", 1948 sb_id); 1949 else 1950 DP_NOTICE(p_hwfn->p_dev, false, 1951 "Consecutive igu" 1952 " vectors for HWFN" 1953 " %x vfs is broken" 1954 " [jumps from %04x" 1955 " to %04x]\n", 1956 p_hwfn->rel_pf_id, 1957 last_iov_sb_id, 1958 sb_id); 1959 break; 1960 } 1961 p_block->status |= ECORE_IGU_STATUS_FREE; 1962 p_hwfn->hw_info.p_igu_info->free_blks++; 1963 last_iov_sb_id = sb_id; 1964 } 1965 } 1966 } 1967 p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; 1968 1969 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1970 "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] " 1971 "igu_dsb_id=0x%x\n", 1972 p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov, 1973 p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov, 1974 p_igu_info->igu_dsb_id); 1975 1976 if (p_igu_info->igu_base_sb == 0xffff || 1977 p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) { 1978 DP_NOTICE(p_hwfn, true, 1979 "IGU CAM returned invalid values igu_base_sb=0x%x " 1980 "igu_sb_cnt=%d igu_dsb_id=0x%x\n", 1981 p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt, 1982 p_igu_info->igu_dsb_id); 1983 return ECORE_INVAL; 1984 } 1985 1986 return ECORE_SUCCESS; 1987 } 1988 1989 /** 1990 * @brief Initialize igu runtime registers 1991 * 1992 * @param p_hwfn 1993 */ 1994 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 1995 { 1996 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 1997 1998 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 1999 } 2000 2001 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2002 IGU_CMD_INT_ACK_BASE) 2003 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2004 IGU_CMD_INT_ACK_BASE) 2005 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2006 { 2007 u32 intr_status_hi = 0, intr_status_lo = 0; 2008 u64 intr_status = 0; 2009 2010 intr_status_lo = REG_RD(p_hwfn, 2011 GTT_BAR0_MAP_REG_IGU_CMD + 2012 LSB_IGU_CMD_ADDR * 8); 2013 intr_status_hi = REG_RD(p_hwfn, 2014 GTT_BAR0_MAP_REG_IGU_CMD + 2015 MSB_IGU_CMD_ADDR * 8); 2016 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2017 2018 return intr_status; 2019 } 2020 2021 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2022 { 2023 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2024 p_hwfn->b_sp_dpc_enabled = true; 2025 } 2026 2027 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2028 { 2029 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2030 if (!p_hwfn->sp_dpc) 2031 return ECORE_NOMEM; 2032 2033 return ECORE_SUCCESS; 2034 } 2035 2036 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2037 { 2038 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2039 } 2040 2041 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2042 struct ecore_ptt *p_ptt) 2043 { 2044 enum _ecore_status_t rc = ECORE_SUCCESS; 2045 2046 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2047 if (rc != ECORE_SUCCESS) { 2048 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2049 return rc; 2050 } 2051 2052 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2053 if (rc != ECORE_SUCCESS) { 2054 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2055 return rc; 2056 } 2057 2058 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2059 if (rc != ECORE_SUCCESS) 2060 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2061 2062 return rc; 2063 } 2064 2065 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2066 { 2067 ecore_int_sp_sb_free(p_hwfn); 2068 ecore_int_sb_attn_free(p_hwfn); 2069 ecore_int_sp_dpc_free(p_hwfn); 2070 } 2071 2072 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2073 { 2074 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2075 return; 2076 2077 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2078 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2079 ecore_int_sp_dpc_setup(p_hwfn); 2080 } 2081 2082 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2083 struct ecore_sb_cnt_info *p_sb_cnt_info) 2084 { 2085 struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info; 2086 2087 if (!info || !p_sb_cnt_info) 2088 return; 2089 2090 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; 2091 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; 2092 p_sb_cnt_info->sb_free_blk = info->free_blks; 2093 } 2094 2095 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 2096 { 2097 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2098 2099 /* Determine origin of SB id */ 2100 if ((sb_id >= p_info->igu_base_sb) && 2101 (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { 2102 return sb_id - p_info->igu_base_sb; 2103 } else if ((sb_id >= p_info->igu_base_sb_iov) && 2104 (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { 2105 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt; 2106 } else { 2107 DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n", 2108 sb_id); 2109 return 0; 2110 } 2111 } 2112 2113 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2114 { 2115 int i; 2116 2117 for_each_hwfn(p_dev, i) 2118 p_dev->hwfns[i].b_int_requested = false; 2119 } 2120 2121 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2122 { 2123 p_dev->attn_clr_en = clr_enable; 2124 } 2125 2126 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2127 struct ecore_ptt *p_ptt, 2128 u8 timer_res, u16 sb_id, bool tx) 2129 { 2130 enum _ecore_status_t rc; 2131 struct cau_sb_entry sb_entry; 2132 2133 if (!p_hwfn->hw_init_done) { 2134 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2135 return ECORE_INVAL; 2136 } 2137 2138 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2139 sb_id * sizeof(u64), 2140 (u64)(osal_uintptr_t)&sb_entry, 2, 0); 2141 if (rc != ECORE_SUCCESS) { 2142 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2143 return rc; 2144 } 2145 2146 if (tx) 2147 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2148 else 2149 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2150 2151 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2152 (u64)(osal_uintptr_t)&sb_entry, 2153 CAU_REG_SB_VAR_MEMORY + 2154 sb_id * sizeof(u64), 2, 0); 2155 if (rc != ECORE_SUCCESS) { 2156 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2157 return rc; 2158 } 2159 2160 return rc; 2161 } 2162