1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include <rte_string_fns.h> 8 9 #include "bcm_osal.h" 10 #include "ecore.h" 11 #include "ecore_spq.h" 12 #include "ecore_gtt_reg_addr.h" 13 #include "ecore_init_ops.h" 14 #include "ecore_rt_defs.h" 15 #include "ecore_int.h" 16 #include "reg_addr.h" 17 #include "ecore_hw.h" 18 #include "ecore_sriov.h" 19 #include "ecore_vf.h" 20 #include "ecore_hw_defs.h" 21 #include "ecore_hsi_common.h" 22 #include "ecore_mcp.h" 23 24 struct ecore_pi_info { 25 ecore_int_comp_cb_t comp_cb; 26 void *cookie; /* Will be sent to the compl cb function */ 27 }; 28 29 struct ecore_sb_sp_info { 30 struct ecore_sb_info sb_info; 31 /* per protocol index data */ 32 struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4]; 33 }; 34 35 enum ecore_attention_type { 36 ECORE_ATTN_TYPE_ATTN, 37 ECORE_ATTN_TYPE_PARITY, 38 }; 39 40 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ 41 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) 42 43 struct aeu_invert_reg_bit { 44 char bit_name[30]; 45 46 #define ATTENTION_PARITY (1 << 0) 47 48 #define ATTENTION_LENGTH_MASK (0x00000ff0) 49 #define ATTENTION_LENGTH_SHIFT (4) 50 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ 51 ATTENTION_LENGTH_SHIFT) 52 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) 53 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) 54 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ 55 ATTENTION_PARITY) 56 57 /* Multiple bits start with this offset */ 58 #define ATTENTION_OFFSET_MASK (0x000ff000) 59 #define ATTENTION_OFFSET_SHIFT (12) 60 61 #define ATTENTION_BB_MASK (0x00700000) 62 #define ATTENTION_BB_SHIFT (20) 63 #define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT) 64 #define ATTENTION_BB_DIFFERENT (1 << 23) 65 66 #define ATTENTION_CLEAR_ENABLE (1 << 28) 67 unsigned int flags; 68 69 /* Callback to call if attention will be triggered */ 70 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); 71 72 enum block_id block_index; 73 }; 74 75 struct aeu_invert_reg { 76 struct aeu_invert_reg_bit bits[32]; 77 }; 78 79 #define MAX_ATTN_GRPS (8) 80 #define NUM_ATTN_REGS (9) 81 82 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) 83 { 84 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); 85 86 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); 87 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); 88 89 return ECORE_SUCCESS; 90 } 91 92 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) 93 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) 94 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) 95 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) 96 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) 97 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) 98 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) 99 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) 100 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) 101 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) 102 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) 103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) 104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) 105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) 106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) 107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) 108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) 109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) 110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) 111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) 112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) 113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) 114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) 115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) 116 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) 117 { 118 u32 tmp = 119 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 120 PSWHST_REG_VF_DISABLED_ERROR_VALID); 121 122 /* Disabled VF access */ 123 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { 124 u32 addr, data; 125 126 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 127 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); 128 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 129 PSWHST_REG_VF_DISABLED_ERROR_DATA); 130 DP_INFO(p_hwfn->p_dev, 131 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" 132 " Write [0x%02x] Addr [0x%08x]\n", 133 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) 134 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), 135 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) 136 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), 137 (u8)((data & 138 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> 139 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), 140 (u8)((data & 141 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> 142 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), 143 (u8)((data & 144 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> 145 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), 146 addr); 147 } 148 149 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 150 PSWHST_REG_INCORRECT_ACCESS_VALID); 151 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { 152 u32 addr, data, length; 153 154 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 155 PSWHST_REG_INCORRECT_ACCESS_ADDRESS); 156 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 157 PSWHST_REG_INCORRECT_ACCESS_DATA); 158 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 159 PSWHST_REG_INCORRECT_ACCESS_LENGTH); 160 161 DP_INFO(p_hwfn->p_dev, 162 "Incorrect access to %08x of length %08x - PF [%02x]" 163 " VF [%04x] [valid %02x] client [%02x] write [%02x]" 164 " Byte-Enable [%04x] [%08x]\n", 165 addr, length, 166 (u8)((data & 167 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> 168 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), 169 (u8)((data & 170 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> 171 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), 172 (u8)((data & 173 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> 174 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), 175 (u8)((data & 176 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> 177 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), 178 (u8)((data & 179 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> 180 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), 181 (u8)((data & 182 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> 183 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), 184 data); 185 } 186 187 /* TODO - We know 'some' of these are legal due to virtualization, 188 * but is it true for all of them? 189 */ 190 return ECORE_SUCCESS; 191 } 192 193 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) 194 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) 195 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) 196 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) 197 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) 198 #define ECORE_GRC_ATTENTION_PF_MASK (0xf) 199 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) 200 #define ECORE_GRC_ATTENTION_VF_SHIFT (4) 201 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) 202 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) 203 #define ECORE_GRC_ATTENTION_PRIV_VF (0) 204 static const char *grc_timeout_attn_master_to_str(u8 master) 205 { 206 switch (master) { 207 case 1: 208 return "PXP"; 209 case 2: 210 return "MCP"; 211 case 3: 212 return "MSDM"; 213 case 4: 214 return "PSDM"; 215 case 5: 216 return "YSDM"; 217 case 6: 218 return "USDM"; 219 case 7: 220 return "TSDM"; 221 case 8: 222 return "XSDM"; 223 case 9: 224 return "DBU"; 225 case 10: 226 return "DMAE"; 227 default: 228 return "Unknown"; 229 } 230 } 231 232 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) 233 { 234 enum _ecore_status_t rc = ECORE_SUCCESS; 235 u32 tmp, tmp2; 236 237 /* We've already cleared the timeout interrupt register, so we learn 238 * of interrupts via the validity register. 239 * Any attention which is not for a timeout event is treated as fatal. 240 */ 241 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 242 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); 243 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) { 244 rc = ECORE_INVAL; 245 goto out; 246 } 247 248 /* Read the GRC timeout information */ 249 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 250 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); 251 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 252 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); 253 254 DP_NOTICE(p_hwfn->p_dev, false, 255 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", 256 tmp2, tmp, 257 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" 258 : "Read from", 259 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, 260 grc_timeout_attn_master_to_str( 261 (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >> 262 ECORE_GRC_ATTENTION_MASTER_SHIFT), 263 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), 264 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> 265 ECORE_GRC_ATTENTION_PRIV_SHIFT) == 266 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", 267 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> 268 ECORE_GRC_ATTENTION_VF_SHIFT); 269 270 /* Clean the validity bit */ 271 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 272 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); 273 out: 274 return rc; 275 } 276 277 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) 278 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) 279 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) 280 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) 281 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) 282 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) 283 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) 284 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) 285 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) 286 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) 287 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) 288 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) 289 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) 290 291 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 292 struct ecore_ptt *p_ptt, 293 bool is_hw_init) 294 { 295 u32 tmp; 296 char str[512] = {0}; 297 298 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); 299 if (tmp & ECORE_PGLUE_ATTENTION_VALID) { 300 u32 addr_lo, addr_hi, details; 301 302 addr_lo = ecore_rd(p_hwfn, p_ptt, 303 PGLUE_B_REG_TX_ERR_WR_ADD_31_0); 304 addr_hi = ecore_rd(p_hwfn, p_ptt, 305 PGLUE_B_REG_TX_ERR_WR_ADD_63_32); 306 details = ecore_rd(p_hwfn, p_ptt, 307 PGLUE_B_REG_TX_ERR_WR_DETAILS); 308 OSAL_SNPRINTF(str, 512, 309 "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 310 addr_hi, addr_lo, details, 311 (u8)((details & 312 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 313 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 314 (u8)((details & 315 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 316 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 317 (u8)((details & 318 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 319 tmp, 320 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 321 1 : 0), 322 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 323 1 : 0), 324 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 325 1 : 0)); 326 if (is_hw_init) 327 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); 328 else 329 DP_NOTICE(p_hwfn, false, "%s", str); 330 } 331 332 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); 333 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { 334 u32 addr_lo, addr_hi, details; 335 336 addr_lo = ecore_rd(p_hwfn, p_ptt, 337 PGLUE_B_REG_TX_ERR_RD_ADD_31_0); 338 addr_hi = ecore_rd(p_hwfn, p_ptt, 339 PGLUE_B_REG_TX_ERR_RD_ADD_63_32); 340 details = ecore_rd(p_hwfn, p_ptt, 341 PGLUE_B_REG_TX_ERR_RD_DETAILS); 342 343 DP_NOTICE(p_hwfn, false, 344 "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", 345 addr_hi, addr_lo, details, 346 (u8)((details & 347 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> 348 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), 349 (u8)((details & 350 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> 351 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), 352 (u8)((details & 353 ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0), 354 tmp, 355 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 356 1 : 0), 357 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 358 1 : 0), 359 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 360 1 : 0)); 361 } 362 363 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); 364 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) 365 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); 366 367 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); 368 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { 369 u32 addr_hi, addr_lo; 370 371 addr_lo = ecore_rd(p_hwfn, p_ptt, 372 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); 373 addr_hi = ecore_rd(p_hwfn, p_ptt, 374 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); 375 376 DP_NOTICE(p_hwfn, false, 377 "ICPL erorr - %08x [Address %08x:%08x]\n", 378 tmp, addr_hi, addr_lo); 379 } 380 381 tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); 382 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { 383 u32 addr_hi, addr_lo, details; 384 385 addr_lo = ecore_rd(p_hwfn, p_ptt, 386 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); 387 addr_hi = ecore_rd(p_hwfn, p_ptt, 388 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); 389 details = ecore_rd(p_hwfn, p_ptt, 390 PGLUE_B_REG_VF_ILT_ERR_DETAILS); 391 392 DP_NOTICE(p_hwfn, false, 393 "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", 394 details, tmp, addr_hi, addr_lo); 395 } 396 397 /* Clear the indications */ 398 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); 399 400 return ECORE_SUCCESS; 401 } 402 403 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) 404 { 405 return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); 406 } 407 408 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) 409 { 410 DP_NOTICE(p_hwfn, false, "FW assertion!\n"); 411 412 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); 413 414 return ECORE_INVAL; 415 } 416 417 static enum _ecore_status_t 418 ecore_general_attention_35(struct ecore_hwfn *p_hwfn) 419 { 420 DP_INFO(p_hwfn, "General attention 35!\n"); 421 422 return ECORE_SUCCESS; 423 } 424 425 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) 426 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) 427 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) 428 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) 429 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) 430 431 #define ECORE_DB_REC_COUNT 1000 432 #define ECORE_DB_REC_INTERVAL 100 433 434 static enum _ecore_status_t ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn, 435 struct ecore_ptt *p_ptt) 436 { 437 u32 count = ECORE_DB_REC_COUNT; 438 u32 usage = 1; 439 440 /* wait for usage to zero or count to run out. This is necessary since 441 * EDPM doorbell transactions can take multiple 64b cycles, and as such 442 * can "split" over the pci. Possibly, the doorbell drop can happen with 443 * half an EDPM in the queue and other half dropped. Another EDPM 444 * doorbell to the same address (from doorbell recovery mechanism or 445 * from the doorbelling entity) could have first half dropped and second 446 * half interperted as continuation of the first. To prevent such 447 * malformed doorbells from reaching the device, flush the queue before 448 * releaseing the overflow sticky indication. 449 */ 450 while (count-- && usage) { 451 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); 452 OSAL_UDELAY(ECORE_DB_REC_INTERVAL); 453 } 454 455 /* should have been depleted by now */ 456 if (usage) { 457 DP_NOTICE(p_hwfn->p_dev, false, 458 "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", 459 ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage); 460 return ECORE_TIMEOUT; 461 } 462 463 return ECORE_SUCCESS; 464 } 465 466 enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn, 467 struct ecore_ptt *p_ptt) 468 { 469 u32 overflow; 470 enum _ecore_status_t rc; 471 472 overflow = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 473 DP_NOTICE(p_hwfn, false, "PF Overflow sticky 0x%x\n", overflow); 474 if (!overflow) { 475 ecore_db_recovery_execute(p_hwfn, DB_REC_ONCE); 476 return ECORE_SUCCESS; 477 } 478 479 if (ecore_edpm_enabled(p_hwfn)) { 480 rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt); 481 if (rc != ECORE_SUCCESS) 482 return rc; 483 } 484 485 /* flush any pedning (e)dpm as they may never arrive */ 486 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); 487 488 /* release overflow sticky indication (stop silently dropping 489 * everything) 490 */ 491 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 492 493 /* repeat all last doorbells (doorbell drop recovery) */ 494 ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 495 496 return ECORE_SUCCESS; 497 } 498 499 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) 500 { 501 u32 int_sts, first_drop_reason, details, address, all_drops_reason; 502 struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt; 503 enum _ecore_status_t rc; 504 505 int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 506 DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n", 507 int_sts); 508 509 /* int_sts may be zero since all PFs were interrupted for doorbell 510 * overflow but another one already handled it. Can abort here. If 511 * This PF also requires overflow recovery we will be interrupted again 512 */ 513 if (!int_sts) 514 return ECORE_SUCCESS; 515 516 /* check if db_drop or overflow happened */ 517 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 518 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 519 /* obtain data about db drop/overflow */ 520 first_drop_reason = ecore_rd(p_hwfn, p_ptt, 521 DORQ_REG_DB_DROP_REASON) & 522 ECORE_DORQ_ATTENTION_REASON_MASK; 523 details = ecore_rd(p_hwfn, p_ptt, 524 DORQ_REG_DB_DROP_DETAILS); 525 address = ecore_rd(p_hwfn, p_ptt, 526 DORQ_REG_DB_DROP_DETAILS_ADDRESS); 527 all_drops_reason = ecore_rd(p_hwfn, p_ptt, 528 DORQ_REG_DB_DROP_DETAILS_REASON); 529 530 /* log info */ 531 DP_NOTICE(p_hwfn->p_dev, false, 532 "Doorbell drop occurred\n" 533 "Address\t\t0x%08x\t(second BAR address)\n" 534 "FID\t\t0x%04x\t\t(Opaque FID)\n" 535 "Size\t\t0x%04x\t\t(in bytes)\n" 536 "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" 537 "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", 538 address, 539 GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE), 540 GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4, 541 first_drop_reason, all_drops_reason); 542 543 rc = ecore_db_rec_handler(p_hwfn, p_ptt); 544 OSAL_DB_REC_OCCURRED(p_hwfn); 545 if (rc != ECORE_SUCCESS) 546 return rc; 547 548 /* clear the doorbell drop details and prepare for next drop */ 549 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 550 551 /* mark interrupt as handeld (note: even if drop was due to a 552 * different reason than overflow we mark as handled) 553 */ 554 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR, 555 DORQ_REG_INT_STS_DB_DROP | 556 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); 557 558 /* if there are no indications otherthan drop indications, 559 * success 560 */ 561 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | 562 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | 563 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) 564 return ECORE_SUCCESS; 565 } 566 567 /* some other indication was present - non recoverable */ 568 DP_INFO(p_hwfn, "DORQ fatal attention\n"); 569 570 return ECORE_INVAL; 571 } 572 573 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) 574 { 575 #ifndef ASIC_ONLY 576 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { 577 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 578 TM_REG_INT_STS_1); 579 580 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | 581 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 582 return ECORE_INVAL; 583 584 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | 585 TM_REG_INT_STS_1_PEND_CONN_SCAN)) 586 DP_INFO(p_hwfn, 587 "TM attention on emulation - most likely" 588 " results of clock-ratios\n"); 589 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); 590 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | 591 TM_REG_INT_MASK_1_PEND_TASK_SCAN; 592 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); 593 594 return ECORE_SUCCESS; 595 } 596 #endif 597 598 return ECORE_INVAL; 599 } 600 601 /* Instead of major changes to the data-structure, we have a some 'special' 602 * identifiers for sources that changed meaning between adapters. 603 */ 604 enum aeu_invert_reg_special_type { 605 AEU_INVERT_REG_SPECIAL_CNIG_0, 606 AEU_INVERT_REG_SPECIAL_CNIG_1, 607 AEU_INVERT_REG_SPECIAL_CNIG_2, 608 AEU_INVERT_REG_SPECIAL_CNIG_3, 609 AEU_INVERT_REG_SPECIAL_MAX, 610 }; 611 612 static struct aeu_invert_reg_bit 613 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { 614 {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 615 {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 616 {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 617 {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG}, 618 }; 619 620 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ 621 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { 622 { 623 { /* After Invert 1 */ 624 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 625 MAX_BLOCK_ID}, 626 } 627 }, 628 629 { 630 { /* After Invert 2 */ 631 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 632 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 633 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb, 634 BLOCK_PGLUE_B}, 635 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 636 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 637 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 638 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 639 {"SW timers #%d", 640 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), 641 OSAL_NULL, MAX_BLOCK_ID}, 642 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 643 BLOCK_PGLCS}, 644 } 645 }, 646 647 { 648 { /* After Invert 3 */ 649 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 650 MAX_BLOCK_ID}, 651 } 652 }, 653 654 { 655 { /* After Invert 4 */ 656 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 657 ecore_fw_assertion, MAX_BLOCK_ID}, 658 {"General Attention %d", 659 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), 660 OSAL_NULL, MAX_BLOCK_ID}, 661 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, 662 ecore_general_attention_35, MAX_BLOCK_ID}, 663 {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 664 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), 665 OSAL_NULL, BLOCK_NWS}, 666 {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 667 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), 668 OSAL_NULL, BLOCK_NWS}, 669 {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT | 670 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), 671 OSAL_NULL, BLOCK_NWM}, 672 {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | 673 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), 674 OSAL_NULL, BLOCK_NWM}, 675 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, 676 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 677 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 678 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 679 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 680 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 681 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 682 MAX_BLOCK_ID}, 683 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG}, 684 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, 685 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, 686 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, 687 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, 688 } 689 }, 690 691 { 692 { /* After Invert 5 */ 693 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, 694 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, 695 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, 696 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, 697 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, 698 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, 699 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, 700 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, 701 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, 702 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, 703 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, 704 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, 705 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, 706 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, 707 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, 708 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, 709 } 710 }, 711 712 { 713 { /* After Invert 6 */ 714 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, 715 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, 716 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, 717 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, 718 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, 719 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, 720 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, 721 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, 722 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, 723 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, 724 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, 725 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, 726 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, 727 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, 728 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, 729 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, 730 } 731 }, 732 733 { 734 { /* After Invert 7 */ 735 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, 736 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, 737 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, 738 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, 739 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, 740 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, 741 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, 742 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, 743 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, 744 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, 745 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, 746 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, 747 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, 748 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, 749 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, 750 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 751 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, 752 } 753 }, 754 755 { 756 { /* After Invert 8 */ 757 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, 758 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, 759 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, 760 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, 761 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, 762 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, 763 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, 764 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, 765 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, 766 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, 767 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 768 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 769 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 770 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 771 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 772 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 773 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, 774 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, 775 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 776 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, 777 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 778 MAX_BLOCK_ID}, 779 } 780 }, 781 782 { 783 { /* After Invert 9 */ 784 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 785 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, 786 MAX_BLOCK_ID}, 787 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 788 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, 789 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, 790 MAX_BLOCK_ID}, 791 } 792 }, 793 794 }; 795 796 static struct aeu_invert_reg_bit * 797 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn, 798 struct aeu_invert_reg_bit *p_bit) 799 { 800 if (!ECORE_IS_BB(p_hwfn->p_dev)) 801 return p_bit; 802 803 if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) 804 return p_bit; 805 806 return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> 807 ATTENTION_BB_SHIFT]; 808 } 809 810 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn, 811 struct aeu_invert_reg_bit *p_bit) 812 { 813 return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags & 814 ATTENTION_PARITY); 815 } 816 817 #define ATTN_STATE_BITS (0xfff) 818 #define ATTN_BITS_MASKABLE (0x3ff) 819 struct ecore_sb_attn_info { 820 /* Virtual & Physical address of the SB */ 821 struct atten_status_block *sb_attn; 822 dma_addr_t sb_phys; 823 824 /* Last seen running index */ 825 u16 index; 826 827 /* A mask of the AEU bits resulting in a parity error */ 828 u32 parity_mask[NUM_ATTN_REGS]; 829 830 /* A pointer to the attention description structure */ 831 struct aeu_invert_reg *p_aeu_desc; 832 833 /* Previously asserted attentions, which are still unasserted */ 834 u16 known_attn; 835 836 /* Cleanup address for the link's general hw attention */ 837 u32 mfw_attn_addr; 838 }; 839 840 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, 841 struct ecore_sb_attn_info *p_sb_desc) 842 { 843 u16 rc = 0, index; 844 845 OSAL_MMIOWB(p_hwfn->p_dev); 846 847 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); 848 if (p_sb_desc->index != index) { 849 p_sb_desc->index = index; 850 rc = ECORE_SB_ATT_IDX; 851 } 852 853 OSAL_MMIOWB(p_hwfn->p_dev); 854 855 return rc; 856 } 857 858 /** 859 * @brief ecore_int_assertion - handles asserted attention bits 860 * 861 * @param p_hwfn 862 * @param asserted_bits newly asserted bits 863 * @return enum _ecore_status_t 864 */ 865 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, 866 u16 asserted_bits) 867 { 868 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 869 u32 igu_mask; 870 871 /* Mask the source of the attention in the IGU */ 872 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 873 IGU_REG_ATTENTION_ENABLE); 874 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", 875 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); 876 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); 877 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); 878 879 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 880 "inner known ATTN state: 0x%04x --> 0x%04x\n", 881 sb_attn_sw->known_attn, 882 sb_attn_sw->known_attn | asserted_bits); 883 sb_attn_sw->known_attn |= asserted_bits; 884 885 /* Handle MCP events */ 886 if (asserted_bits & 0x100) { 887 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); 888 /* Clean the MCP attention */ 889 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, 890 sb_attn_sw->mfw_attn_addr, 0); 891 } 892 893 /* FIXME - this will change once we'll have GOOD gtt definitions */ 894 DIRECT_REG_WR(p_hwfn, 895 (u8 OSAL_IOMEM *) p_hwfn->regview + 896 GTT_BAR0_MAP_REG_IGU_CMD + 897 ((IGU_CMD_ATTN_BIT_SET_UPPER - 898 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); 899 900 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", 901 asserted_bits); 902 903 return ECORE_SUCCESS; 904 } 905 906 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn, 907 enum block_id id, enum dbg_attn_type type, 908 bool b_clear) 909 { 910 /* @DPDK */ 911 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type); 912 } 913 914 /** 915 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single 916 * cause of the attention 917 * 918 * @param p_hwfn 919 * @param p_aeu - descriptor of an AEU bit which caused the attention 920 * @param aeu_en_reg - register offset of the AEU enable reg. which configured 921 * this bit to this group. 922 * @param bit_index - index of this bit in the aeu_en_reg 923 * 924 * @return enum _ecore_status_t 925 */ 926 static enum _ecore_status_t 927 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, 928 struct aeu_invert_reg_bit *p_aeu, 929 u32 aeu_en_reg, 930 const char *p_bit_name, 931 u32 bitmask) 932 { 933 enum _ecore_status_t rc = ECORE_INVAL; 934 bool b_fatal = false; 935 936 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", 937 p_bit_name, bitmask); 938 939 /* Call callback before clearing the interrupt status */ 940 if (p_aeu->cb) { 941 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", 942 p_bit_name); 943 rc = p_aeu->cb(p_hwfn); 944 } 945 946 if (rc != ECORE_SUCCESS) 947 b_fatal = true; 948 949 /* Print HW block interrupt registers */ 950 if (p_aeu->block_index != MAX_BLOCK_ID) { 951 ecore_int_attn_print(p_hwfn, p_aeu->block_index, 952 ATTN_TYPE_INTERRUPT, !b_fatal); 953 } 954 955 /* @DPDK */ 956 /* Reach assertion if attention is fatal */ 957 if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) { 958 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", 959 p_bit_name); 960 961 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); 962 } 963 964 /* Prevent this Attention from being asserted in the future */ 965 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE || 966 p_hwfn->p_dev->attn_clr_en) { 967 u32 val; 968 u32 mask = ~bitmask; 969 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 970 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); 971 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n", 972 p_bit_name); 973 } 974 975 return rc; 976 } 977 978 /** 979 * @brief ecore_int_deassertion_parity - handle a single parity AEU source 980 * 981 * @param p_hwfn 982 * @param p_aeu - descriptor of an AEU bit which caused the parity 983 * @param aeu_en_reg - address of the AEU enable register 984 * @param bit_index 985 */ 986 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, 987 struct aeu_invert_reg_bit *p_aeu, 988 u32 aeu_en_reg, u8 bit_index) 989 { 990 u32 block_id = p_aeu->block_index, mask, val; 991 992 DP_NOTICE(p_hwfn->p_dev, false, 993 "%s parity attention is set [address 0x%08x, bit %d]\n", 994 p_aeu->bit_name, aeu_en_reg, bit_index); 995 996 if (block_id != MAX_BLOCK_ID) { 997 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); 998 999 /* In A0, there's a single parity bit for several blocks */ 1000 if (block_id == BLOCK_BTB) { 1001 ecore_int_attn_print(p_hwfn, BLOCK_OPTE, 1002 ATTN_TYPE_PARITY, false); 1003 ecore_int_attn_print(p_hwfn, BLOCK_MCP, 1004 ATTN_TYPE_PARITY, false); 1005 } 1006 } 1007 1008 /* Prevent this parity error from being re-asserted */ 1009 mask = ~(0x1 << bit_index); 1010 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); 1011 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); 1012 DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", 1013 p_aeu->bit_name); 1014 } 1015 1016 /** 1017 * @brief - handles deassertion of previously asserted attentions. 1018 * 1019 * @param p_hwfn 1020 * @param deasserted_bits - newly deasserted bits 1021 * @return enum _ecore_status_t 1022 * 1023 */ 1024 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, 1025 u16 deasserted_bits) 1026 { 1027 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; 1028 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; 1029 u8 i, j, k, bit_idx; 1030 enum _ecore_status_t rc = ECORE_SUCCESS; 1031 1032 /* Read the attention registers in the AEU */ 1033 for (i = 0; i < NUM_ATTN_REGS; i++) { 1034 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1035 MISC_REG_AEU_AFTER_INVERT_1_IGU + 1036 i * 0x4); 1037 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1038 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); 1039 } 1040 1041 /* Handle parity attentions first */ 1042 for (i = 0; i < NUM_ATTN_REGS; i++) { 1043 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; 1044 u32 parities; 1045 1046 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); 1047 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1048 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; 1049 1050 /* Skip register in which no parity bit is currently set */ 1051 if (!parities) 1052 continue; 1053 1054 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1055 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; 1056 1057 if (ecore_int_is_parity_flag(p_hwfn, p_bit) && 1058 !!(parities & (1 << bit_idx))) 1059 ecore_int_deassertion_parity(p_hwfn, p_bit, 1060 aeu_en, bit_idx); 1061 1062 bit_idx += ATTENTION_LENGTH(p_bit->flags); 1063 } 1064 } 1065 1066 /* Find non-parity cause for attention and act */ 1067 for (k = 0; k < MAX_ATTN_GRPS; k++) { 1068 struct aeu_invert_reg_bit *p_aeu; 1069 1070 /* Handle only groups whose attention is currently deasserted */ 1071 if (!(deasserted_bits & (1 << k))) 1072 continue; 1073 1074 for (i = 0; i < NUM_ATTN_REGS; i++) { 1075 u32 bits; 1076 1077 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + 1078 i * sizeof(u32) + 1079 k * sizeof(u32) * NUM_ATTN_REGS; 1080 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); 1081 bits = aeu_inv_arr[i] & en; 1082 1083 /* Skip if no bit from this group is currently set */ 1084 if (!bits) 1085 continue; 1086 1087 /* Find all set bits from current register which belong 1088 * to current group, making them responsible for the 1089 * previous assertion. 1090 */ 1091 for (j = 0, bit_idx = 0; bit_idx < 32; j++) { 1092 unsigned long int bitmask; 1093 u8 bit, bit_len; 1094 1095 /* Need to account bits with changed meaning */ 1096 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; 1097 1098 bit = bit_idx; 1099 bit_len = ATTENTION_LENGTH(p_aeu->flags); 1100 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) { 1101 /* Skip Parity */ 1102 bit++; 1103 bit_len--; 1104 } 1105 1106 /* Find the bits relating to HW-block, then 1107 * shift so they'll become LSB. 1108 */ 1109 bitmask = bits & (((1 << bit_len) - 1) << bit); 1110 bitmask >>= bit; 1111 1112 if (bitmask) { 1113 u32 flags = p_aeu->flags; 1114 char bit_name[30]; 1115 u8 num; 1116 1117 num = (u8)OSAL_FIND_FIRST_BIT(&bitmask, 1118 bit_len); 1119 1120 /* Some bits represent more than a 1121 * a single interrupt. Correctly print 1122 * their name. 1123 */ 1124 if (ATTENTION_LENGTH(flags) > 2 || 1125 ((flags & ATTENTION_PAR_INT) && 1126 ATTENTION_LENGTH(flags) > 1)) 1127 OSAL_SNPRINTF(bit_name, 30, 1128 p_aeu->bit_name, 1129 num); 1130 else 1131 strlcpy(bit_name, 1132 p_aeu->bit_name, 1133 sizeof(bit_name)); 1134 1135 /* We now need to pass bitmask in its 1136 * correct position. 1137 */ 1138 bitmask <<= bit; 1139 1140 /* Handle source of the attention */ 1141 ecore_int_deassertion_aeu_bit(p_hwfn, 1142 p_aeu, 1143 aeu_en, 1144 bit_name, 1145 bitmask); 1146 } 1147 1148 bit_idx += ATTENTION_LENGTH(p_aeu->flags); 1149 } 1150 } 1151 } 1152 1153 /* Clear IGU indication for the deasserted bits */ 1154 /* FIXME - this will change once we'll have GOOD gtt definitions */ 1155 DIRECT_REG_WR(p_hwfn, 1156 (u8 OSAL_IOMEM *) p_hwfn->regview + 1157 GTT_BAR0_MAP_REG_IGU_CMD + 1158 ((IGU_CMD_ATTN_BIT_CLR_UPPER - 1159 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); 1160 1161 /* Unmask deasserted attentions in IGU */ 1162 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, 1163 IGU_REG_ATTENTION_ENABLE); 1164 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); 1165 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); 1166 1167 /* Clear deassertion from inner state */ 1168 sb_attn_sw->known_attn &= ~deasserted_bits; 1169 1170 return rc; 1171 } 1172 1173 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) 1174 { 1175 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; 1176 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; 1177 u16 index = 0, asserted_bits, deasserted_bits; 1178 u32 attn_bits = 0, attn_acks = 0; 1179 enum _ecore_status_t rc = ECORE_SUCCESS; 1180 1181 /* Read current attention bits/acks - safeguard against attentions 1182 * by guaranting work on a synchronized timeframe 1183 */ 1184 do { 1185 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); 1186 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); 1187 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); 1188 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); 1189 p_sb_attn->sb_index = index; 1190 1191 /* Attention / Deassertion are meaningful (and in correct state) 1192 * only when they differ and consistent with known state - deassertion 1193 * when previous attention & current ack, and assertion when current 1194 * attention with no previous attention 1195 */ 1196 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & 1197 ~p_sb_attn_sw->known_attn; 1198 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & 1199 p_sb_attn_sw->known_attn; 1200 1201 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) 1202 DP_INFO(p_hwfn, 1203 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", 1204 index, attn_bits, attn_acks, asserted_bits, 1205 deasserted_bits, p_sb_attn_sw->known_attn); 1206 else if (asserted_bits == 0x100) 1207 DP_INFO(p_hwfn, "MFW indication via attention\n"); 1208 else 1209 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1210 "MFW indication [deassertion]\n"); 1211 1212 if (asserted_bits) { 1213 rc = ecore_int_assertion(p_hwfn, asserted_bits); 1214 if (rc) 1215 return rc; 1216 } 1217 1218 if (deasserted_bits) 1219 rc = ecore_int_deassertion(p_hwfn, deasserted_bits); 1220 1221 return rc; 1222 } 1223 1224 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, 1225 void OSAL_IOMEM *igu_addr, u32 ack_cons) 1226 { 1227 struct igu_prod_cons_update igu_ack; 1228 1229 OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update)); 1230 igu_ack.sb_id_and_flags = 1231 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | 1232 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | 1233 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | 1234 (IGU_SEG_ACCESS_ATTN << 1235 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); 1236 1237 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); 1238 1239 /* Both segments (interrupts & acks) are written to same place address; 1240 * Need to guarantee all commands will be received (in-order) by HW. 1241 */ 1242 OSAL_MMIOWB(p_hwfn->p_dev); 1243 OSAL_BARRIER(p_hwfn->p_dev); 1244 } 1245 1246 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) 1247 { 1248 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; 1249 struct ecore_pi_info *pi_info = OSAL_NULL; 1250 struct ecore_sb_attn_info *sb_attn; 1251 struct ecore_sb_info *sb_info; 1252 int arr_size; 1253 u16 rc = 0; 1254 1255 if (!p_hwfn) 1256 return; 1257 1258 if (!p_hwfn->p_sp_sb) { 1259 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); 1260 return; 1261 } 1262 1263 sb_info = &p_hwfn->p_sp_sb->sb_info; 1264 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); 1265 if (!sb_info) { 1266 DP_ERR(p_hwfn->p_dev, 1267 "Status block is NULL - cannot ack interrupts\n"); 1268 return; 1269 } 1270 1271 if (!p_hwfn->p_sb_attn) { 1272 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); 1273 return; 1274 } 1275 sb_attn = p_hwfn->p_sb_attn; 1276 1277 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", 1278 p_hwfn, p_hwfn->my_id); 1279 1280 /* Disable ack for def status block. Required both for msix + 1281 * inta in non-mask mode, in inta does no harm. 1282 */ 1283 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); 1284 1285 /* Gather Interrupts/Attentions information */ 1286 if (!sb_info->sb_virt) { 1287 DP_ERR(p_hwfn->p_dev, 1288 "Interrupt Status block is NULL -" 1289 " cannot check for new interrupts!\n"); 1290 } else { 1291 u32 tmp_index = sb_info->sb_ack; 1292 rc = ecore_sb_update_sb_idx(sb_info); 1293 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1294 "Interrupt indices: 0x%08x --> 0x%08x\n", 1295 tmp_index, sb_info->sb_ack); 1296 } 1297 1298 if (!sb_attn || !sb_attn->sb_attn) { 1299 DP_ERR(p_hwfn->p_dev, 1300 "Attentions Status block is NULL -" 1301 " cannot check for new attentions!\n"); 1302 } else { 1303 u16 tmp_index = sb_attn->index; 1304 1305 rc |= ecore_attn_update_idx(p_hwfn, sb_attn); 1306 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, 1307 "Attention indices: 0x%08x --> 0x%08x\n", 1308 tmp_index, sb_attn->index); 1309 } 1310 1311 /* Check if we expect interrupts at this time. if not just ack them */ 1312 if (!(rc & ECORE_SB_EVENT_MASK)) { 1313 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1314 return; 1315 } 1316 1317 /* Check the validity of the DPC ptt. If not ack interrupts and fail */ 1318 1319 if (!p_hwfn->p_dpc_ptt) { 1320 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); 1321 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1322 return; 1323 } 1324 1325 if (rc & ECORE_SB_ATT_IDX) 1326 ecore_int_attentions(p_hwfn); 1327 1328 if (rc & ECORE_SB_IDX) { 1329 int pi; 1330 1331 /* Since we only looked at the SB index, it's possible more 1332 * than a single protocol-index on the SB incremented. 1333 * Iterate over all configured protocol indices and check 1334 * whether something happened for each. 1335 */ 1336 for (pi = 0; pi < arr_size; pi++) { 1337 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; 1338 if (pi_info->comp_cb != OSAL_NULL) 1339 pi_info->comp_cb(p_hwfn, pi_info->cookie); 1340 } 1341 } 1342 1343 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { 1344 /* This should be done before the interrupts are enabled, 1345 * since otherwise a new attention will be generated. 1346 */ 1347 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); 1348 } 1349 1350 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); 1351 } 1352 1353 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) 1354 { 1355 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; 1356 1357 if (!p_sb) 1358 return; 1359 1360 if (p_sb->sb_attn) { 1361 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, 1362 p_sb->sb_phys, 1363 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1364 } 1365 OSAL_FREE(p_hwfn->p_dev, p_sb); 1366 } 1367 1368 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, 1369 struct ecore_ptt *p_ptt) 1370 { 1371 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1372 1373 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); 1374 1375 sb_info->index = 0; 1376 sb_info->known_attn = 0; 1377 1378 /* Configure Attention Status Block in IGU */ 1379 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, 1380 DMA_LO(p_hwfn->p_sb_attn->sb_phys)); 1381 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, 1382 DMA_HI(p_hwfn->p_sb_attn->sb_phys)); 1383 } 1384 1385 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, 1386 struct ecore_ptt *p_ptt, 1387 void *sb_virt_addr, dma_addr_t sb_phy_addr) 1388 { 1389 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; 1390 int i, j, k; 1391 1392 sb_info->sb_attn = sb_virt_addr; 1393 sb_info->sb_phys = sb_phy_addr; 1394 1395 /* Set the pointer to the AEU descriptors */ 1396 sb_info->p_aeu_desc = aeu_descs; 1397 1398 /* Calculate Parity Masks */ 1399 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); 1400 for (i = 0; i < NUM_ATTN_REGS; i++) { 1401 /* j is array index, k is bit index */ 1402 for (j = 0, k = 0; k < 32; j++) { 1403 struct aeu_invert_reg_bit *p_aeu; 1404 1405 p_aeu = &aeu_descs[i].bits[j]; 1406 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) 1407 sb_info->parity_mask[i] |= 1 << k; 1408 1409 k += ATTENTION_LENGTH(p_aeu->flags); 1410 } 1411 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1412 "Attn Mask [Reg %d]: 0x%08x\n", 1413 i, sb_info->parity_mask[i]); 1414 } 1415 1416 /* Set the address of cleanup for the mcp attention */ 1417 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + 1418 MISC_REG_AEU_GENERAL_ATTN_0; 1419 1420 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 1421 } 1422 1423 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, 1424 struct ecore_ptt *p_ptt) 1425 { 1426 struct ecore_dev *p_dev = p_hwfn->p_dev; 1427 struct ecore_sb_attn_info *p_sb; 1428 dma_addr_t p_phys = 0; 1429 void *p_virt; 1430 1431 /* SB struct */ 1432 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); 1433 if (!p_sb) { 1434 DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); 1435 return ECORE_NOMEM; 1436 } 1437 1438 /* SB ring */ 1439 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, 1440 SB_ATTN_ALIGNED_SIZE(p_hwfn)); 1441 if (!p_virt) { 1442 DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); 1443 OSAL_FREE(p_dev, p_sb); 1444 return ECORE_NOMEM; 1445 } 1446 1447 /* Attention setup */ 1448 p_hwfn->p_sb_attn = p_sb; 1449 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); 1450 1451 return ECORE_SUCCESS; 1452 } 1453 1454 /* coalescing timeout = timeset << (timer_res + 1) */ 1455 #define ECORE_CAU_DEF_RX_USECS 24 1456 #define ECORE_CAU_DEF_TX_USECS 48 1457 1458 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, 1459 struct cau_sb_entry *p_sb_entry, 1460 u8 pf_id, u16 vf_number, u8 vf_valid) 1461 { 1462 struct ecore_dev *p_dev = p_hwfn->p_dev; 1463 u32 cau_state; 1464 u8 timer_res; 1465 1466 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); 1467 1468 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); 1469 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); 1470 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); 1471 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); 1472 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); 1473 1474 cau_state = CAU_HC_DISABLE_STATE; 1475 1476 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1477 cau_state = CAU_HC_ENABLE_STATE; 1478 if (!p_dev->rx_coalesce_usecs) 1479 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; 1480 if (!p_dev->tx_coalesce_usecs) 1481 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; 1482 } 1483 1484 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ 1485 if (p_dev->rx_coalesce_usecs <= 0x7F) 1486 timer_res = 0; 1487 else if (p_dev->rx_coalesce_usecs <= 0xFF) 1488 timer_res = 1; 1489 else 1490 timer_res = 2; 1491 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 1492 1493 if (p_dev->tx_coalesce_usecs <= 0x7F) 1494 timer_res = 0; 1495 else if (p_dev->tx_coalesce_usecs <= 0xFF) 1496 timer_res = 1; 1497 else 1498 timer_res = 2; 1499 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 1500 1501 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); 1502 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); 1503 } 1504 1505 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1506 struct ecore_ptt *p_ptt, 1507 u16 igu_sb_id, u32 pi_index, 1508 enum ecore_coalescing_fsm coalescing_fsm, 1509 u8 timeset) 1510 { 1511 struct cau_pi_entry pi_entry; 1512 u32 sb_offset, pi_offset; 1513 1514 if (IS_VF(p_hwfn->p_dev)) 1515 return;/* @@@TBD MichalK- VF CAU... */ 1516 1517 sb_offset = igu_sb_id * PIS_PER_SB_E4; 1518 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); 1519 1520 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); 1521 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) 1522 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); 1523 else 1524 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); 1525 1526 pi_offset = sb_offset + pi_index; 1527 if (p_hwfn->hw_init_done) { 1528 ecore_wr(p_hwfn, p_ptt, 1529 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), 1530 *((u32 *)&(pi_entry))); 1531 } else { 1532 STORE_RT_REG(p_hwfn, 1533 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, 1534 *((u32 *)&(pi_entry))); 1535 } 1536 } 1537 1538 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, 1539 struct ecore_ptt *p_ptt, 1540 struct ecore_sb_info *p_sb, u32 pi_index, 1541 enum ecore_coalescing_fsm coalescing_fsm, 1542 u8 timeset) 1543 { 1544 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id, 1545 pi_index, coalescing_fsm, timeset); 1546 } 1547 1548 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, 1549 struct ecore_ptt *p_ptt, 1550 dma_addr_t sb_phys, u16 igu_sb_id, 1551 u16 vf_number, u8 vf_valid) 1552 { 1553 struct cau_sb_entry sb_entry; 1554 1555 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, 1556 vf_number, vf_valid); 1557 1558 if (p_hwfn->hw_init_done) { 1559 /* Wide-bus, initialize via DMAE */ 1560 u64 phys_addr = (u64)sb_phys; 1561 1562 ecore_dmae_host2grc(p_hwfn, p_ptt, 1563 (u64)(osal_uintptr_t)&phys_addr, 1564 CAU_REG_SB_ADDR_MEMORY + 1565 igu_sb_id * sizeof(u64), 2, 1566 OSAL_NULL /* default parameters */); 1567 ecore_dmae_host2grc(p_hwfn, p_ptt, 1568 (u64)(osal_uintptr_t)&sb_entry, 1569 CAU_REG_SB_VAR_MEMORY + 1570 igu_sb_id * sizeof(u64), 2, 1571 OSAL_NULL /* default parameters */); 1572 } else { 1573 /* Initialize Status Block Address */ 1574 STORE_RT_REG_AGG(p_hwfn, 1575 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + 1576 igu_sb_id * 2, sb_phys); 1577 1578 STORE_RT_REG_AGG(p_hwfn, 1579 CAU_REG_SB_VAR_MEMORY_RT_OFFSET + 1580 igu_sb_id * 2, sb_entry); 1581 } 1582 1583 /* Configure pi coalescing if set */ 1584 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { 1585 /* eth will open queues for all tcs, so configure all of them 1586 * properly, rather than just the active ones 1587 */ 1588 u8 num_tc = p_hwfn->hw_info.num_hw_tc; 1589 1590 u8 timeset, timer_res; 1591 u8 i; 1592 1593 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ 1594 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F) 1595 timer_res = 0; 1596 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF) 1597 timer_res = 1; 1598 else 1599 timer_res = 2; 1600 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res); 1601 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, 1602 ECORE_COAL_RX_STATE_MACHINE, 1603 timeset); 1604 1605 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F) 1606 timer_res = 0; 1607 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF) 1608 timer_res = 1; 1609 else 1610 timer_res = 2; 1611 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res); 1612 for (i = 0; i < num_tc; i++) { 1613 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, 1614 igu_sb_id, TX_PI(i), 1615 ECORE_COAL_TX_STATE_MACHINE, 1616 timeset); 1617 } 1618 } 1619 } 1620 1621 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, 1622 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) 1623 { 1624 /* zero status block and ack counter */ 1625 sb_info->sb_ack = 0; 1626 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1627 1628 if (IS_PF(p_hwfn->p_dev)) 1629 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, 1630 sb_info->igu_sb_id, 0, 0); 1631 } 1632 1633 struct ecore_igu_block * 1634 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf) 1635 { 1636 struct ecore_igu_block *p_block; 1637 u16 igu_id; 1638 1639 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1640 igu_id++) { 1641 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1642 1643 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1644 !(p_block->status & ECORE_IGU_STATUS_FREE)) 1645 continue; 1646 1647 if (!!(p_block->status & ECORE_IGU_STATUS_PF) == 1648 b_is_pf) 1649 return p_block; 1650 } 1651 1652 return OSAL_NULL; 1653 } 1654 1655 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn, 1656 u16 vector_id) 1657 { 1658 struct ecore_igu_block *p_block; 1659 u16 igu_id; 1660 1661 for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 1662 igu_id++) { 1663 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; 1664 1665 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 1666 !p_block->is_pf || 1667 p_block->vector_number != vector_id) 1668 continue; 1669 1670 return igu_id; 1671 } 1672 1673 return ECORE_SB_INVALID_IDX; 1674 } 1675 1676 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) 1677 { 1678 u16 igu_sb_id; 1679 1680 /* Assuming continuous set of IGU SBs dedicated for given PF */ 1681 if (sb_id == ECORE_SP_SB_ID) 1682 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 1683 else if (IS_PF(p_hwfn->p_dev)) 1684 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 1685 else 1686 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); 1687 1688 if (igu_sb_id == ECORE_SB_INVALID_IDX) 1689 DP_NOTICE(p_hwfn, true, 1690 "Slowpath SB vector %04x doesn't exist\n", 1691 sb_id); 1692 else if (sb_id == ECORE_SP_SB_ID) 1693 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1694 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); 1695 else 1696 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 1697 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); 1698 1699 return igu_sb_id; 1700 } 1701 1702 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, 1703 struct ecore_ptt *p_ptt, 1704 struct ecore_sb_info *sb_info, 1705 void *sb_virt_addr, 1706 dma_addr_t sb_phy_addr, u16 sb_id) 1707 { 1708 sb_info->sb_virt = sb_virt_addr; 1709 sb_info->sb_phys = sb_phy_addr; 1710 1711 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); 1712 1713 if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX) 1714 return ECORE_INVAL; 1715 1716 /* Let the igu info reference the client's SB info */ 1717 if (sb_id != ECORE_SP_SB_ID) { 1718 if (IS_PF(p_hwfn->p_dev)) { 1719 struct ecore_igu_info *p_info; 1720 struct ecore_igu_block *p_block; 1721 1722 p_info = p_hwfn->hw_info.p_igu_info; 1723 p_block = &p_info->entry[sb_info->igu_sb_id]; 1724 1725 p_block->sb_info = sb_info; 1726 p_block->status &= ~ECORE_IGU_STATUS_FREE; 1727 p_info->usage.free_cnt--; 1728 } else { 1729 ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info); 1730 } 1731 } 1732 #ifdef ECORE_CONFIG_DIRECT_HWFN 1733 sb_info->p_hwfn = p_hwfn; 1734 #endif 1735 sb_info->p_dev = p_hwfn->p_dev; 1736 1737 /* The igu address will hold the absolute address that needs to be 1738 * written to for a specific status block 1739 */ 1740 if (IS_PF(p_hwfn->p_dev)) { 1741 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + 1742 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); 1743 1744 } else { 1745 sb_info->igu_addr = 1746 (u8 OSAL_IOMEM *)p_hwfn->regview + 1747 PXP_VF_BAR0_START_IGU + 1748 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); 1749 } 1750 1751 sb_info->flags |= ECORE_SB_INFO_INIT; 1752 1753 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); 1754 1755 return ECORE_SUCCESS; 1756 } 1757 1758 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, 1759 struct ecore_sb_info *sb_info, 1760 u16 sb_id) 1761 { 1762 struct ecore_igu_info *p_info; 1763 struct ecore_igu_block *p_block; 1764 1765 if (sb_info == OSAL_NULL) 1766 return ECORE_SUCCESS; 1767 1768 /* zero status block and ack counter */ 1769 sb_info->sb_ack = 0; 1770 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); 1771 1772 if (IS_VF(p_hwfn->p_dev)) { 1773 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL); 1774 return ECORE_SUCCESS; 1775 } 1776 1777 p_info = p_hwfn->hw_info.p_igu_info; 1778 p_block = &p_info->entry[sb_info->igu_sb_id]; 1779 1780 /* Vector 0 is reserved to Default SB */ 1781 if (p_block->vector_number == 0) { 1782 DP_ERR(p_hwfn, "Do Not free sp sb using this function"); 1783 return ECORE_INVAL; 1784 } 1785 1786 /* Lose reference to client's SB info, and fix counters */ 1787 p_block->sb_info = OSAL_NULL; 1788 p_block->status |= ECORE_IGU_STATUS_FREE; 1789 p_info->usage.free_cnt++; 1790 1791 return ECORE_SUCCESS; 1792 } 1793 1794 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) 1795 { 1796 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; 1797 1798 if (!p_sb) 1799 return; 1800 1801 if (p_sb->sb_info.sb_virt) { 1802 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1803 p_sb->sb_info.sb_virt, 1804 p_sb->sb_info.sb_phys, 1805 SB_ALIGNED_SIZE(p_hwfn)); 1806 } 1807 1808 OSAL_FREE(p_hwfn->p_dev, p_sb); 1809 } 1810 1811 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, 1812 struct ecore_ptt *p_ptt) 1813 { 1814 struct ecore_sb_sp_info *p_sb; 1815 dma_addr_t p_phys = 0; 1816 void *p_virt; 1817 1818 /* SB struct */ 1819 p_sb = 1820 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, 1821 sizeof(*p_sb)); 1822 if (!p_sb) { 1823 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); 1824 return ECORE_NOMEM; 1825 } 1826 1827 /* SB ring */ 1828 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1829 &p_phys, SB_ALIGNED_SIZE(p_hwfn)); 1830 if (!p_virt) { 1831 DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); 1832 OSAL_FREE(p_hwfn->p_dev, p_sb); 1833 return ECORE_NOMEM; 1834 } 1835 1836 /* Status Block setup */ 1837 p_hwfn->p_sp_sb = p_sb; 1838 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, 1839 p_virt, p_phys, ECORE_SP_SB_ID); 1840 1841 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); 1842 1843 return ECORE_SUCCESS; 1844 } 1845 1846 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, 1847 ecore_int_comp_cb_t comp_cb, 1848 void *cookie, 1849 u8 *sb_idx, __le16 **p_fw_cons) 1850 { 1851 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1852 enum _ecore_status_t rc = ECORE_NOMEM; 1853 u8 pi; 1854 1855 /* Look for a free index */ 1856 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { 1857 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) 1858 continue; 1859 1860 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; 1861 p_sp_sb->pi_info_arr[pi].cookie = cookie; 1862 *sb_idx = pi; 1863 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; 1864 rc = ECORE_SUCCESS; 1865 break; 1866 } 1867 1868 return rc; 1869 } 1870 1871 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) 1872 { 1873 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; 1874 1875 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) 1876 return ECORE_NOMEM; 1877 1878 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; 1879 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; 1880 return ECORE_SUCCESS; 1881 } 1882 1883 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) 1884 { 1885 return p_hwfn->p_sp_sb->sb_info.igu_sb_id; 1886 } 1887 1888 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, 1889 struct ecore_ptt *p_ptt, 1890 enum ecore_int_mode int_mode) 1891 { 1892 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; 1893 1894 #ifndef ASIC_ONLY 1895 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1896 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); 1897 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN; 1898 } 1899 #endif 1900 1901 p_hwfn->p_dev->int_mode = int_mode; 1902 switch (p_hwfn->p_dev->int_mode) { 1903 case ECORE_INT_MODE_INTA: 1904 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; 1905 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1906 break; 1907 1908 case ECORE_INT_MODE_MSI: 1909 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1910 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 1911 break; 1912 1913 case ECORE_INT_MODE_MSIX: 1914 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; 1915 break; 1916 case ECORE_INT_MODE_POLL: 1917 break; 1918 } 1919 1920 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 1921 } 1922 1923 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, 1924 struct ecore_ptt *p_ptt) 1925 { 1926 #ifndef ASIC_ONLY 1927 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { 1928 DP_INFO(p_hwfn, 1929 "FPGA - Don't enable Attentions in IGU and MISC\n"); 1930 return; 1931 } 1932 #endif 1933 1934 /* Configure AEU signal change to produce attentions */ 1935 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); 1936 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 1937 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 1938 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); 1939 1940 /* Flush the writes to IGU */ 1941 OSAL_MMIOWB(p_hwfn->p_dev); 1942 1943 /* Unmask AEU signals toward IGU */ 1944 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 1945 } 1946 1947 enum _ecore_status_t 1948 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 1949 enum ecore_int_mode int_mode) 1950 { 1951 enum _ecore_status_t rc = ECORE_SUCCESS; 1952 1953 ecore_int_igu_enable_attn(p_hwfn, p_ptt); 1954 1955 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { 1956 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); 1957 if (rc != ECORE_SUCCESS) { 1958 DP_NOTICE(p_hwfn, true, 1959 "Slowpath IRQ request failed\n"); 1960 return ECORE_NORESOURCES; 1961 } 1962 p_hwfn->b_int_requested = true; 1963 } 1964 1965 /* Enable interrupt Generation */ 1966 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); 1967 1968 p_hwfn->b_int_enabled = 1; 1969 1970 return rc; 1971 } 1972 1973 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, 1974 struct ecore_ptt *p_ptt) 1975 { 1976 p_hwfn->b_int_enabled = 0; 1977 1978 if (IS_VF(p_hwfn->p_dev)) 1979 return; 1980 1981 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 1982 } 1983 1984 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 1985 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, 1986 struct ecore_ptt *p_ptt, 1987 u32 igu_sb_id, 1988 bool cleanup_set, 1989 u16 opaque_fid) 1990 { 1991 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 1992 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; 1993 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 1994 u8 type = 0; /* FIXME MichalS type??? */ 1995 1996 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - 1997 IGU_REG_CLEANUP_STATUS_0) != 0x200); 1998 1999 /* USE Control Command Register to perform cleanup. There is an 2000 * option to do this using IGU bar, but then it can't be used for VFs. 2001 */ 2002 2003 /* Set the data field */ 2004 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); 2005 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); 2006 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); 2007 2008 /* Set the control register */ 2009 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); 2010 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); 2011 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); 2012 2013 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); 2014 2015 OSAL_BARRIER(p_hwfn->p_dev); 2016 2017 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); 2018 2019 /* Flush the write to IGU */ 2020 OSAL_MMIOWB(p_hwfn->p_dev); 2021 2022 /* calculate where to read the status bit from */ 2023 sb_bit = 1 << (igu_sb_id % 32); 2024 sb_bit_addr = igu_sb_id / 32 * sizeof(u32); 2025 2026 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); 2027 2028 /* Now wait for the command to complete */ 2029 while (--sleep_cnt) { 2030 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); 2031 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) 2032 break; 2033 OSAL_MSLEEP(5); 2034 } 2035 2036 if (!sleep_cnt) 2037 DP_NOTICE(p_hwfn, true, 2038 "Timeout waiting for clear status 0x%08x [for sb %d]\n", 2039 val, igu_sb_id); 2040 } 2041 2042 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, 2043 struct ecore_ptt *p_ptt, 2044 u16 igu_sb_id, u16 opaque, bool b_set) 2045 { 2046 struct ecore_igu_block *p_block; 2047 int pi, i; 2048 2049 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2050 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2051 "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", 2052 igu_sb_id, p_block->function_id, p_block->is_pf, 2053 p_block->vector_number); 2054 2055 /* Set */ 2056 if (b_set) 2057 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); 2058 2059 /* Clear */ 2060 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); 2061 2062 /* Wait for the IGU SB to cleanup */ 2063 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2064 u32 val; 2065 2066 val = ecore_rd(p_hwfn, p_ptt, 2067 IGU_REG_WRITE_DONE_PENDING + 2068 ((igu_sb_id / 32) * 4)); 2069 if (val & (1 << (igu_sb_id % 32))) 2070 OSAL_UDELAY(10); 2071 else 2072 break; 2073 } 2074 if (i == IGU_CLEANUP_SLEEP_LENGTH) 2075 DP_NOTICE(p_hwfn, true, 2076 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2077 igu_sb_id); 2078 2079 /* Clear the CAU for the SB */ 2080 for (pi = 0; pi < 12; pi++) 2081 ecore_wr(p_hwfn, p_ptt, 2082 CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); 2083 } 2084 2085 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, 2086 struct ecore_ptt *p_ptt, 2087 bool b_set, bool b_slowpath) 2088 { 2089 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2090 struct ecore_igu_block *p_block; 2091 u16 igu_sb_id = 0; 2092 u32 val = 0; 2093 2094 /* @@@TBD MichalK temporary... should be moved to init-tool... */ 2095 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2096 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; 2097 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; 2098 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); 2099 /* end temporary */ 2100 2101 for (igu_sb_id = 0; 2102 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2103 igu_sb_id++) { 2104 p_block = &p_info->entry[igu_sb_id]; 2105 2106 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2107 !p_block->is_pf || 2108 (p_block->status & ECORE_IGU_STATUS_DSB)) 2109 continue; 2110 2111 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, 2112 p_hwfn->hw_info.opaque_fid, 2113 b_set); 2114 } 2115 2116 if (b_slowpath) 2117 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 2118 p_info->igu_dsb_id, 2119 p_hwfn->hw_info.opaque_fid, 2120 b_set); 2121 } 2122 2123 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn, 2124 struct ecore_ptt *p_ptt) 2125 { 2126 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2127 struct ecore_igu_block *p_block; 2128 int pf_sbs, vf_sbs; 2129 u16 igu_sb_id; 2130 u32 val, rval; 2131 2132 if (!RESC_NUM(p_hwfn, ECORE_SB)) { 2133 /* We're using an old MFW - have to prevent any switching 2134 * of SBs between PF and VFs as later driver wouldn't be 2135 * able to tell which belongs to which. 2136 */ 2137 p_info->b_allow_pf_vf_change = false; 2138 } else { 2139 /* Use the numbers the MFW have provided - 2140 * don't forget MFW accounts for the default SB as well. 2141 */ 2142 p_info->b_allow_pf_vf_change = true; 2143 2144 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) { 2145 DP_INFO(p_hwfn, 2146 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", 2147 RESC_NUM(p_hwfn, ECORE_SB) - 1, 2148 p_info->usage.cnt); 2149 p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1; 2150 } 2151 2152 /* TODO - how do we learn about VF SBs from MFW? */ 2153 if (IS_PF_SRIOV(p_hwfn)) { 2154 u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs; 2155 2156 if (vfs != p_info->usage.iov_cnt) 2157 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2158 "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", 2159 p_info->usage.iov_cnt, vfs); 2160 2161 /* At this point we know how many SBs we have totally 2162 * in IGU + number of PF SBs. So we can validate that 2163 * we'd have sufficient for VF. 2164 */ 2165 if (vfs > p_info->usage.free_cnt + 2166 p_info->usage.free_cnt_iov - 2167 p_info->usage.cnt) { 2168 DP_NOTICE(p_hwfn, true, 2169 "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", 2170 p_info->usage.free_cnt + 2171 p_info->usage.free_cnt_iov, 2172 p_info->usage.cnt, vfs); 2173 return ECORE_INVAL; 2174 } 2175 } 2176 } 2177 2178 /* Cap the number of VFs SBs by the number of VFs */ 2179 if (IS_PF_SRIOV(p_hwfn)) 2180 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs; 2181 2182 /* Mark all SBs as free, now in the right PF/VFs division */ 2183 p_info->usage.free_cnt = p_info->usage.cnt; 2184 p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; 2185 p_info->usage.orig = p_info->usage.cnt; 2186 p_info->usage.iov_orig = p_info->usage.iov_cnt; 2187 2188 /* We now proceed to re-configure the IGU cam to reflect the initial 2189 * configuration. We can start with the Default SB. 2190 */ 2191 pf_sbs = p_info->usage.cnt; 2192 vf_sbs = p_info->usage.iov_cnt; 2193 2194 for (igu_sb_id = p_info->igu_dsb_id; 2195 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2196 igu_sb_id++) { 2197 p_block = &p_info->entry[igu_sb_id]; 2198 val = 0; 2199 2200 if (!(p_block->status & ECORE_IGU_STATUS_VALID)) 2201 continue; 2202 2203 if (p_block->status & ECORE_IGU_STATUS_DSB) { 2204 p_block->function_id = p_hwfn->rel_pf_id; 2205 p_block->is_pf = 1; 2206 p_block->vector_number = 0; 2207 p_block->status = ECORE_IGU_STATUS_VALID | 2208 ECORE_IGU_STATUS_PF | 2209 ECORE_IGU_STATUS_DSB; 2210 } else if (pf_sbs) { 2211 pf_sbs--; 2212 p_block->function_id = p_hwfn->rel_pf_id; 2213 p_block->is_pf = 1; 2214 p_block->vector_number = p_info->usage.cnt - pf_sbs; 2215 p_block->status = ECORE_IGU_STATUS_VALID | 2216 ECORE_IGU_STATUS_PF | 2217 ECORE_IGU_STATUS_FREE; 2218 } else if (vf_sbs) { 2219 p_block->function_id = 2220 p_hwfn->p_dev->p_iov_info->first_vf_in_pf + 2221 p_info->usage.iov_cnt - vf_sbs; 2222 p_block->is_pf = 0; 2223 p_block->vector_number = 0; 2224 p_block->status = ECORE_IGU_STATUS_VALID | 2225 ECORE_IGU_STATUS_FREE; 2226 vf_sbs--; 2227 } else { 2228 p_block->function_id = 0; 2229 p_block->is_pf = 0; 2230 p_block->vector_number = 0; 2231 } 2232 2233 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2234 p_block->function_id); 2235 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2236 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2237 p_block->vector_number); 2238 2239 /* VF entries would be enabled when VF is initializaed */ 2240 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2241 2242 rval = ecore_rd(p_hwfn, p_ptt, 2243 IGU_REG_MAPPING_MEMORY + 2244 sizeof(u32) * igu_sb_id); 2245 2246 if (rval != val) { 2247 ecore_wr(p_hwfn, p_ptt, 2248 IGU_REG_MAPPING_MEMORY + 2249 sizeof(u32) * igu_sb_id, 2250 val); 2251 2252 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2253 "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", 2254 igu_sb_id, p_block->function_id, 2255 p_block->is_pf, p_block->vector_number, 2256 rval, val); 2257 } 2258 } 2259 2260 return 0; 2261 } 2262 2263 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn, 2264 struct ecore_ptt *p_ptt) 2265 { 2266 struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage; 2267 2268 /* Return all the usage indications to default prior to the reset; 2269 * The reset expects the !orig to reflect the initial status of the 2270 * SBs, and would re-calculate the originals based on those. 2271 */ 2272 p_cnt->cnt = p_cnt->orig; 2273 p_cnt->free_cnt = p_cnt->orig; 2274 p_cnt->iov_cnt = p_cnt->iov_orig; 2275 p_cnt->free_cnt_iov = p_cnt->iov_orig; 2276 p_cnt->orig = 0; 2277 p_cnt->iov_orig = 0; 2278 2279 /* TODO - we probably need to re-configure the CAU as well... */ 2280 return ecore_int_igu_reset_cam(p_hwfn, p_ptt); 2281 } 2282 2283 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, 2284 struct ecore_ptt *p_ptt, 2285 u16 igu_sb_id) 2286 { 2287 u32 val = ecore_rd(p_hwfn, p_ptt, 2288 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); 2289 struct ecore_igu_block *p_block; 2290 2291 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; 2292 2293 /* Fill the block information */ 2294 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); 2295 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); 2296 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); 2297 2298 p_block->igu_sb_id = igu_sb_id; 2299 } 2300 2301 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, 2302 struct ecore_ptt *p_ptt) 2303 { 2304 struct ecore_igu_info *p_igu_info; 2305 struct ecore_igu_block *p_block; 2306 u32 min_vf = 0, max_vf = 0; 2307 u16 igu_sb_id; 2308 2309 p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev, 2310 GFP_KERNEL, 2311 sizeof(*p_igu_info)); 2312 if (!p_hwfn->hw_info.p_igu_info) 2313 return ECORE_NOMEM; 2314 p_igu_info = p_hwfn->hw_info.p_igu_info; 2315 2316 /* Distinguish between existent and onn-existent default SB */ 2317 p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX; 2318 2319 /* Find the range of VF ids whose SB belong to this PF */ 2320 if (p_hwfn->p_dev->p_iov_info) { 2321 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 2322 2323 min_vf = p_iov->first_vf_in_pf; 2324 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; 2325 } 2326 2327 for (igu_sb_id = 0; 2328 igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2329 igu_sb_id++) { 2330 /* Read current entry; Notice it might not belong to this PF */ 2331 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); 2332 p_block = &p_igu_info->entry[igu_sb_id]; 2333 2334 if ((p_block->is_pf) && 2335 (p_block->function_id == p_hwfn->rel_pf_id)) { 2336 p_block->status = ECORE_IGU_STATUS_PF | 2337 ECORE_IGU_STATUS_VALID | 2338 ECORE_IGU_STATUS_FREE; 2339 2340 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2341 p_igu_info->usage.cnt++; 2342 } else if (!(p_block->is_pf) && 2343 (p_block->function_id >= min_vf) && 2344 (p_block->function_id < max_vf)) { 2345 /* Available for VFs of this PF */ 2346 p_block->status = ECORE_IGU_STATUS_VALID | 2347 ECORE_IGU_STATUS_FREE; 2348 2349 if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX) 2350 p_igu_info->usage.iov_cnt++; 2351 } 2352 2353 /* Mark the First entry belonging to the PF or its VFs 2354 * as the default SB [we'll reset IGU prior to first usage]. 2355 */ 2356 if ((p_block->status & ECORE_IGU_STATUS_VALID) && 2357 (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) { 2358 p_igu_info->igu_dsb_id = igu_sb_id; 2359 p_block->status |= ECORE_IGU_STATUS_DSB; 2360 } 2361 2362 /* While this isn't suitable for all clients, limit number 2363 * of prints by having each PF print only its entries with the 2364 * exception of PF0 which would print everything. 2365 */ 2366 if ((p_block->status & ECORE_IGU_STATUS_VALID) || 2367 (p_hwfn->abs_pf_id == 0)) 2368 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2369 "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2370 igu_sb_id, p_block->function_id, 2371 p_block->is_pf, p_block->vector_number); 2372 } 2373 2374 if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) { 2375 DP_NOTICE(p_hwfn, true, 2376 "IGU CAM returned invalid values igu_dsb_id=0x%x\n", 2377 p_igu_info->igu_dsb_id); 2378 return ECORE_INVAL; 2379 } 2380 2381 /* All non default SB are considered free at this point */ 2382 p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; 2383 p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; 2384 2385 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2386 "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", 2387 p_igu_info->igu_dsb_id, p_igu_info->usage.cnt, 2388 p_igu_info->usage.iov_cnt); 2389 2390 return ECORE_SUCCESS; 2391 } 2392 2393 enum _ecore_status_t 2394 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, 2395 u16 sb_id, bool b_to_vf) 2396 { 2397 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; 2398 struct ecore_igu_block *p_block = OSAL_NULL; 2399 u16 igu_sb_id = 0, vf_num = 0; 2400 u32 val = 0; 2401 2402 if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn)) 2403 return ECORE_INVAL; 2404 2405 if (sb_id == ECORE_SP_SB_ID) 2406 return ECORE_INVAL; 2407 2408 if (!p_info->b_allow_pf_vf_change) { 2409 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n"); 2410 return ECORE_INVAL; 2411 } 2412 2413 /* If we're moving a SB from PF to VF, the client had to specify 2414 * which vector it wants to move. 2415 */ 2416 if (b_to_vf) { 2417 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1); 2418 if (igu_sb_id == ECORE_SB_INVALID_IDX) 2419 return ECORE_INVAL; 2420 } 2421 2422 /* If we're moving a SB from VF to PF, need to validate there isn't 2423 * already a line configured for that vector. 2424 */ 2425 if (!b_to_vf) { 2426 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) != 2427 ECORE_SB_INVALID_IDX) 2428 return ECORE_INVAL; 2429 } 2430 2431 /* We need to validate that the SB can actually be relocated. 2432 * This would also handle the previous case where we've explicitly 2433 * stated which IGU SB needs to move. 2434 */ 2435 for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); 2436 igu_sb_id++) { 2437 p_block = &p_info->entry[igu_sb_id]; 2438 2439 if (!(p_block->status & ECORE_IGU_STATUS_VALID) || 2440 !(p_block->status & ECORE_IGU_STATUS_FREE) || 2441 (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) { 2442 if (b_to_vf) 2443 return ECORE_INVAL; 2444 else 2445 continue; 2446 } 2447 2448 break; 2449 } 2450 2451 if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) { 2452 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV), 2453 "Failed to find a free SB to move\n"); 2454 return ECORE_INVAL; 2455 } 2456 2457 /* At this point, p_block points to the SB we want to relocate */ 2458 if (b_to_vf) { 2459 p_block->status &= ~ECORE_IGU_STATUS_PF; 2460 2461 /* It doesn't matter which VF number we choose, since we're 2462 * going to disable the line; But let's keep it in range. 2463 */ 2464 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf; 2465 2466 p_block->function_id = (u8)vf_num; 2467 p_block->is_pf = 0; 2468 p_block->vector_number = 0; 2469 2470 p_info->usage.cnt--; 2471 p_info->usage.free_cnt--; 2472 p_info->usage.iov_cnt++; 2473 p_info->usage.free_cnt_iov++; 2474 2475 /* TODO - if SBs aren't really the limiting factor, 2476 * then it might not be accurate [in the since that 2477 * we might not need decrement the feature]. 2478 */ 2479 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--; 2480 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++; 2481 } else { 2482 p_block->status |= ECORE_IGU_STATUS_PF; 2483 p_block->function_id = p_hwfn->rel_pf_id; 2484 p_block->is_pf = 1; 2485 p_block->vector_number = sb_id + 1; 2486 2487 p_info->usage.cnt++; 2488 p_info->usage.free_cnt++; 2489 p_info->usage.iov_cnt--; 2490 p_info->usage.free_cnt_iov--; 2491 2492 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++; 2493 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--; 2494 } 2495 2496 /* Update the IGU and CAU with the new configuration */ 2497 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, 2498 p_block->function_id); 2499 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); 2500 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); 2501 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, 2502 p_block->vector_number); 2503 2504 ecore_wr(p_hwfn, p_ptt, 2505 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id, 2506 val); 2507 2508 ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0, 2509 igu_sb_id, vf_num, 2510 p_block->is_pf ? 0 : 1); 2511 2512 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, 2513 "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", 2514 igu_sb_id, p_block->function_id, 2515 p_block->is_pf, p_block->vector_number); 2516 2517 return ECORE_SUCCESS; 2518 } 2519 2520 /** 2521 * @brief Initialize igu runtime registers 2522 * 2523 * @param p_hwfn 2524 */ 2525 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) 2526 { 2527 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; 2528 2529 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); 2530 } 2531 2532 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ 2533 IGU_CMD_INT_ACK_BASE) 2534 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ 2535 IGU_CMD_INT_ACK_BASE) 2536 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) 2537 { 2538 u32 intr_status_hi = 0, intr_status_lo = 0; 2539 u64 intr_status = 0; 2540 2541 intr_status_lo = REG_RD(p_hwfn, 2542 GTT_BAR0_MAP_REG_IGU_CMD + 2543 LSB_IGU_CMD_ADDR * 8); 2544 intr_status_hi = REG_RD(p_hwfn, 2545 GTT_BAR0_MAP_REG_IGU_CMD + 2546 MSB_IGU_CMD_ADDR * 8); 2547 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; 2548 2549 return intr_status; 2550 } 2551 2552 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) 2553 { 2554 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); 2555 p_hwfn->b_sp_dpc_enabled = true; 2556 } 2557 2558 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) 2559 { 2560 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); 2561 if (!p_hwfn->sp_dpc) 2562 return ECORE_NOMEM; 2563 2564 return ECORE_SUCCESS; 2565 } 2566 2567 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) 2568 { 2569 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); 2570 } 2571 2572 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, 2573 struct ecore_ptt *p_ptt) 2574 { 2575 enum _ecore_status_t rc = ECORE_SUCCESS; 2576 2577 rc = ecore_int_sp_dpc_alloc(p_hwfn); 2578 if (rc != ECORE_SUCCESS) { 2579 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); 2580 return rc; 2581 } 2582 2583 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); 2584 if (rc != ECORE_SUCCESS) { 2585 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); 2586 return rc; 2587 } 2588 2589 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); 2590 if (rc != ECORE_SUCCESS) 2591 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); 2592 2593 return rc; 2594 } 2595 2596 void ecore_int_free(struct ecore_hwfn *p_hwfn) 2597 { 2598 ecore_int_sp_sb_free(p_hwfn); 2599 ecore_int_sb_attn_free(p_hwfn); 2600 ecore_int_sp_dpc_free(p_hwfn); 2601 } 2602 2603 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 2604 { 2605 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) 2606 return; 2607 2608 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); 2609 ecore_int_sb_attn_setup(p_hwfn, p_ptt); 2610 ecore_int_sp_dpc_setup(p_hwfn); 2611 } 2612 2613 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, 2614 struct ecore_sb_cnt_info *p_sb_cnt_info) 2615 { 2616 struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info; 2617 2618 if (!p_igu_info || !p_sb_cnt_info) 2619 return; 2620 2621 OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage, 2622 sizeof(*p_sb_cnt_info)); 2623 } 2624 2625 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) 2626 { 2627 int i; 2628 2629 for_each_hwfn(p_dev, i) 2630 p_dev->hwfns[i].b_int_requested = false; 2631 } 2632 2633 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable) 2634 { 2635 p_dev->attn_clr_en = clr_enable; 2636 } 2637 2638 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, 2639 struct ecore_ptt *p_ptt, 2640 u8 timer_res, u16 sb_id, bool tx) 2641 { 2642 struct cau_sb_entry sb_entry; 2643 enum _ecore_status_t rc; 2644 2645 if (!p_hwfn->hw_init_done) { 2646 DP_ERR(p_hwfn, "hardware not initialized yet\n"); 2647 return ECORE_INVAL; 2648 } 2649 2650 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2651 sb_id * sizeof(u64), 2652 (u64)(osal_uintptr_t)&sb_entry, 2, 2653 OSAL_NULL /* default parameters */); 2654 if (rc != ECORE_SUCCESS) { 2655 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2656 return rc; 2657 } 2658 2659 if (tx) 2660 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); 2661 else 2662 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); 2663 2664 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 2665 (u64)(osal_uintptr_t)&sb_entry, 2666 CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2, 2667 OSAL_NULL /* default parameters */); 2668 if (rc != ECORE_SUCCESS) { 2669 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); 2670 return rc; 2671 } 2672 2673 return rc; 2674 } 2675 2676 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn, 2677 struct ecore_ptt *p_ptt, 2678 struct ecore_sb_info *p_sb, 2679 struct ecore_sb_info_dbg *p_info) 2680 { 2681 u16 sbid = p_sb->igu_sb_id; 2682 int i; 2683 2684 if (IS_VF(p_hwfn->p_dev)) 2685 return ECORE_INVAL; 2686 2687 if (sbid > NUM_OF_SBS(p_hwfn->p_dev)) 2688 return ECORE_INVAL; 2689 2690 p_info->igu_prod = ecore_rd(p_hwfn, p_ptt, 2691 IGU_REG_PRODUCER_MEMORY + sbid * 4); 2692 p_info->igu_cons = ecore_rd(p_hwfn, p_ptt, 2693 IGU_REG_CONSUMER_MEM + sbid * 4); 2694 2695 for (i = 0; i < PIS_PER_SB_E4; i++) 2696 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt, 2697 CAU_REG_PI_MEMORY + 2698 sbid * 4 * PIS_PER_SB_E4 + 2699 i * 4); 2700 2701 return ECORE_SUCCESS; 2702 } 2703 2704 void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn) 2705 { 2706 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; 2707 struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, 2708 RESERVED_PTT_DPC); 2709 int i; 2710 2711 /* Do not reorder the following cleanup sequence */ 2712 /* Ack all attentions */ 2713 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff); 2714 2715 /* Clear driver attention */ 2716 ecore_wr(p_hwfn, p_dpc_ptt, 2717 ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0); 2718 2719 /* Clear per-PF IGU registers to restore them as if the IGU 2720 * was reset for this PF 2721 */ 2722 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); 2723 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); 2724 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); 2725 2726 /* Execute IGU clean up*/ 2727 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1); 2728 2729 /* Clear Stats */ 2730 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0); 2731 2732 for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++) 2733 ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0); 2734 } 2735