1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 /* include the precompiled configuration values - only once */ 8 #include "bcm_osal.h" 9 #include "ecore_hsi_common.h" 10 #include "ecore.h" 11 #include "ecore_hw.h" 12 #include "ecore_status.h" 13 #include "ecore_rt_defs.h" 14 #include "ecore_init_fw_funcs.h" 15 16 #include "ecore_iro_values.h" 17 #include "ecore_sriov.h" 18 #include "reg_addr.h" 19 #include "ecore_init_ops.h" 20 21 #define ECORE_INIT_MAX_POLL_COUNT 100 22 #define ECORE_INIT_POLL_PERIOD_US 500 23 24 void ecore_init_iro_array(struct ecore_dev *p_dev) 25 { 26 p_dev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET; 27 } 28 29 /* Runtime configuration helpers */ 30 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn) 31 { 32 int i; 33 34 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) 35 p_hwfn->rt_data.b_valid[i] = false; 36 } 37 38 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val) 39 { 40 if (rt_offset >= RUNTIME_ARRAY_SIZE) { 41 DP_ERR(p_hwfn, 42 "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n", 43 val, rt_offset, RUNTIME_ARRAY_SIZE); 44 return; 45 } 46 47 p_hwfn->rt_data.init_val[rt_offset] = val; 48 p_hwfn->rt_data.b_valid[rt_offset] = true; 49 } 50 51 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn, 52 u32 rt_offset, u32 *p_val, osal_size_t size) 53 { 54 osal_size_t i; 55 56 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { 57 DP_ERR(p_hwfn, 58 "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n", 59 rt_offset, (u32)(rt_offset + size - 1), 60 RUNTIME_ARRAY_SIZE); 61 return; 62 } 63 64 for (i = 0; i < size / sizeof(u32); i++) { 65 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; 66 p_hwfn->rt_data.b_valid[rt_offset + i] = true; 67 } 68 } 69 70 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn, 71 struct ecore_ptt *p_ptt, 72 u32 addr, 73 u16 rt_offset, 74 u16 size, bool b_must_dmae) 75 { 76 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; 77 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; 78 u16 i, segment; 79 enum _ecore_status_t rc = ECORE_SUCCESS; 80 81 /* Since not all RT entries are initialized, go over the RT and 82 * for each segment of initialized values use DMA. 83 */ 84 for (i = 0; i < size; i++) { 85 if (!p_valid[i]) 86 continue; 87 88 /* In case there isn't any wide-bus configuration here, 89 * simply write the data instead of using dmae. 90 */ 91 if (!b_must_dmae) { 92 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); 93 continue; 94 } 95 96 /* Start of a new segment */ 97 for (segment = 1; i + segment < size; segment++) 98 if (!p_valid[i + segment]) 99 break; 100 101 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 102 (osal_uintptr_t)(p_init_val + i), 103 addr + (i << 2), segment, 104 OSAL_NULL /* default parameters */); 105 if (rc != ECORE_SUCCESS) 106 return rc; 107 108 /* Jump over the entire segment, including invalid entry */ 109 i += segment; 110 } 111 112 return rc; 113 } 114 115 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn) 116 { 117 struct ecore_rt_data *rt_data = &p_hwfn->rt_data; 118 119 if (IS_VF(p_hwfn->p_dev)) 120 return ECORE_SUCCESS; 121 122 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 123 sizeof(bool) * RUNTIME_ARRAY_SIZE); 124 if (!rt_data->b_valid) 125 return ECORE_NOMEM; 126 127 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 128 sizeof(u32) * RUNTIME_ARRAY_SIZE); 129 if (!rt_data->init_val) { 130 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid); 131 return ECORE_NOMEM; 132 } 133 134 return ECORE_SUCCESS; 135 } 136 137 void ecore_init_free(struct ecore_hwfn *p_hwfn) 138 { 139 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val); 140 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid); 141 } 142 143 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn, 144 struct ecore_ptt *p_ptt, 145 u32 addr, 146 u32 dmae_data_offset, 147 u32 size, const u32 *p_buf, 148 bool b_must_dmae, 149 bool b_can_dmae) 150 { 151 enum _ecore_status_t rc = ECORE_SUCCESS; 152 153 /* Perform DMAE only for lengthy enough sections or for wide-bus */ 154 #ifndef ASIC_ONLY 155 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) || 156 !b_can_dmae || (!b_must_dmae && (size < 16))) { 157 #else 158 if (!b_can_dmae || (!b_must_dmae && (size < 16))) { 159 #endif 160 const u32 *data = p_buf + dmae_data_offset; 161 u32 i; 162 163 for (i = 0; i < size; i++) 164 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); 165 } else { 166 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 167 (osal_uintptr_t)(p_buf + 168 dmae_data_offset), 169 addr, size, 170 OSAL_NULL /* default parameters */); 171 } 172 173 return rc; 174 } 175 176 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn, 177 struct ecore_ptt *p_ptt, 178 u32 addr, u32 fill_count) 179 { 180 static u32 zero_buffer[DMAE_MAX_RW_SIZE]; 181 struct dmae_params params; 182 183 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); 184 185 OSAL_MEMSET(¶ms, 0, sizeof(params)); 186 SET_FIELD(params.flags, DMAE_PARAMS_RW_REPL_SRC, 0x1); 187 return ecore_dmae_host2grc(p_hwfn, p_ptt, 188 (osal_uintptr_t)&zero_buffer[0], 189 addr, fill_count, ¶ms); 190 } 191 192 static void ecore_init_fill(struct ecore_hwfn *p_hwfn, 193 struct ecore_ptt *p_ptt, 194 u32 addr, u32 fill, u32 fill_count) 195 { 196 u32 i; 197 198 for (i = 0; i < fill_count; i++, addr += sizeof(u32)) 199 ecore_wr(p_hwfn, p_ptt, addr, fill); 200 } 201 202 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn, 203 struct ecore_ptt *p_ptt, 204 struct init_write_op *cmd, 205 bool b_must_dmae, 206 bool b_can_dmae) 207 { 208 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset); 209 u32 data = OSAL_LE32_TO_CPU(cmd->data); 210 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; 211 #ifdef CONFIG_ECORE_ZIPPED_FW 212 u32 offset, output_len, input_len, max_size; 213 #endif 214 struct ecore_dev *p_dev = p_hwfn->p_dev; 215 union init_array_hdr *hdr; 216 const u32 *array_data; 217 enum _ecore_status_t rc = ECORE_SUCCESS; 218 u32 size; 219 220 array_data = p_dev->fw_data->arr_data; 221 222 hdr = (union init_array_hdr *) 223 (uintptr_t)(array_data + dmae_array_offset); 224 data = OSAL_LE32_TO_CPU(hdr->raw.data); 225 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { 226 case INIT_ARR_ZIPPED: 227 #ifdef CONFIG_ECORE_ZIPPED_FW 228 offset = dmae_array_offset + 1; 229 input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); 230 max_size = MAX_ZIPPED_SIZE * 4; 231 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size); 232 233 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len, 234 (u8 *)(uintptr_t)&array_data[offset], 235 max_size, 236 (u8 *)p_hwfn->unzip_buf); 237 if (output_len) { 238 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0, 239 output_len, 240 p_hwfn->unzip_buf, 241 b_must_dmae, b_can_dmae); 242 } else { 243 DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n"); 244 rc = ECORE_INVAL; 245 } 246 #else 247 DP_NOTICE(p_hwfn, true, 248 "Using zipped firmware without config enabled\n"); 249 rc = ECORE_INVAL; 250 #endif 251 break; 252 case INIT_ARR_PATTERN: 253 { 254 u32 repeats = GET_FIELD(data, 255 INIT_ARRAY_PATTERN_HDR_REPETITIONS); 256 u32 i; 257 258 size = GET_FIELD(data, 259 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); 260 261 for (i = 0; i < repeats; i++, addr += size << 2) { 262 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 263 dmae_array_offset + 264 1, size, array_data, 265 b_must_dmae, 266 b_can_dmae); 267 if (rc) 268 break; 269 } 270 break; 271 } 272 case INIT_ARR_STANDARD: 273 size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE); 274 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 275 dmae_array_offset + 1, 276 size, array_data, 277 b_must_dmae, b_can_dmae); 278 break; 279 } 280 281 return rc; 282 } 283 284 /* init_ops write command */ 285 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn, 286 struct ecore_ptt *p_ptt, 287 struct init_write_op *p_cmd, 288 bool b_can_dmae) 289 { 290 u32 data = OSAL_LE32_TO_CPU(p_cmd->data); 291 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); 292 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; 293 enum _ecore_status_t rc = ECORE_SUCCESS; 294 295 /* Sanitize */ 296 if (b_must_dmae && !b_can_dmae) { 297 DP_NOTICE(p_hwfn, true, 298 "Need to write to %08x for Wide-bus but DMAE isn't" 299 " allowed\n", 300 addr); 301 return ECORE_INVAL; 302 } 303 304 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { 305 case INIT_SRC_INLINE: 306 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val); 307 ecore_wr(p_hwfn, p_ptt, addr, data); 308 break; 309 case INIT_SRC_ZEROS: 310 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count); 311 if (b_must_dmae || (b_can_dmae && (data >= 64))) 312 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data); 313 else 314 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data); 315 break; 316 case INIT_SRC_ARRAY: 317 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd, 318 b_must_dmae, b_can_dmae); 319 break; 320 case INIT_SRC_RUNTIME: 321 rc = ecore_init_rt(p_hwfn, p_ptt, addr, 322 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset), 323 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size), 324 b_must_dmae); 325 break; 326 } 327 328 return rc; 329 } 330 331 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val) 332 { 333 return (val == expected_val); 334 } 335 336 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val) 337 { 338 return (val & expected_val) == expected_val; 339 } 340 341 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val) 342 { 343 return (val | expected_val) > 0; 344 } 345 346 /* init_ops read/poll commands */ 347 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, 348 struct ecore_ptt *p_ptt, struct init_read_op *cmd) 349 { 350 bool (*comp_check)(u32 val, u32 expected_val); 351 u32 delay = ECORE_INIT_POLL_PERIOD_US, val; 352 u32 data, addr, poll; 353 int i; 354 355 data = OSAL_LE32_TO_CPU(cmd->op_data); 356 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; 357 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); 358 359 #ifndef ASIC_ONLY 360 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 361 delay *= 100; 362 #endif 363 364 val = ecore_rd(p_hwfn, p_ptt, addr); 365 366 if (poll == INIT_POLL_NONE) 367 return; 368 369 switch (poll) { 370 case INIT_POLL_EQ: 371 comp_check = comp_eq; 372 break; 373 case INIT_POLL_OR: 374 comp_check = comp_or; 375 break; 376 case INIT_POLL_AND: 377 comp_check = comp_and; 378 break; 379 default: 380 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", 381 cmd->op_data); 382 return; 383 } 384 385 data = OSAL_LE32_TO_CPU(cmd->expected_val); 386 for (i = 0; 387 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) { 388 OSAL_UDELAY(delay); 389 val = ecore_rd(p_hwfn, p_ptt, addr); 390 } 391 392 if (i == ECORE_INIT_MAX_POLL_COUNT) 393 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", 394 addr, OSAL_LE32_TO_CPU(cmd->expected_val), val, 395 OSAL_LE32_TO_CPU(cmd->op_data)); 396 } 397 398 /* init_ops callbacks entry point */ 399 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, 400 struct ecore_ptt *p_ptt, 401 struct init_callback_op *p_cmd) 402 { 403 enum _ecore_status_t rc; 404 405 switch (p_cmd->callback_id) { 406 case DMAE_READY_CB: 407 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); 408 break; 409 default: 410 DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n", 411 p_cmd->callback_id); 412 return ECORE_INVAL; 413 } 414 415 return rc; 416 } 417 418 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, 419 u16 *p_offset, int modes) 420 { 421 struct ecore_dev *p_dev = p_hwfn->p_dev; 422 u8 arg1, arg2, tree_val; 423 const u8 *modes_tree; 424 425 modes_tree = p_dev->fw_data->modes_tree_buf; 426 tree_val = modes_tree[(*p_offset)++]; 427 switch (tree_val) { 428 case INIT_MODE_OP_NOT: 429 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; 430 case INIT_MODE_OP_OR: 431 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 432 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 433 return arg1 | arg2; 434 case INIT_MODE_OP_AND: 435 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 436 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 437 return arg1 & arg2; 438 default: 439 tree_val -= MAX_INIT_MODE_OPS; 440 return (modes & (1 << tree_val)) ? 1 : 0; 441 } 442 } 443 444 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn, 445 struct init_if_mode_op *p_cmd, int modes) 446 { 447 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset); 448 449 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes)) 450 return 0; 451 else 452 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data), 453 INIT_IF_MODE_OP_CMD_OFFSET); 454 } 455 456 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd, 457 u32 phase, u32 phase_id) 458 { 459 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data); 460 u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data); 461 462 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && 463 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || 464 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) 465 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET); 466 else 467 return 0; 468 } 469 470 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, 471 struct ecore_ptt *p_ptt, 472 int phase, int phase_id, int modes) 473 { 474 struct ecore_dev *p_dev = p_hwfn->p_dev; 475 bool b_dmae = (phase != PHASE_ENGINE); 476 u32 cmd_num, num_init_ops; 477 union init_op *init; 478 enum _ecore_status_t rc = ECORE_SUCCESS; 479 480 num_init_ops = p_dev->fw_data->init_ops_size; 481 init = p_dev->fw_data->init_ops; 482 483 #ifdef CONFIG_ECORE_ZIPPED_FW 484 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 485 MAX_ZIPPED_SIZE * 4); 486 if (!p_hwfn->unzip_buf) { 487 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n"); 488 return ECORE_NOMEM; 489 } 490 #endif 491 492 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { 493 union init_op *cmd = &init[cmd_num]; 494 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data); 495 496 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { 497 case INIT_OP_WRITE: 498 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, 499 b_dmae); 500 break; 501 502 case INIT_OP_READ: 503 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); 504 break; 505 506 case INIT_OP_IF_MODE: 507 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode, 508 modes); 509 break; 510 case INIT_OP_IF_PHASE: 511 cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase, 512 phase_id); 513 break; 514 case INIT_OP_DELAY: 515 /* ecore_init_run is always invoked from 516 * sleep-able context 517 */ 518 OSAL_UDELAY(cmd->delay.delay); 519 break; 520 521 case INIT_OP_CALLBACK: 522 rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); 523 if (phase == PHASE_ENGINE && 524 cmd->callback.callback_id == DMAE_READY_CB) 525 b_dmae = true; 526 break; 527 } 528 529 if (rc) 530 break; 531 } 532 #ifdef CONFIG_ECORE_ZIPPED_FW 533 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf); 534 #endif 535 return rc; 536 } 537 538 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, 539 #ifdef CONFIG_ECORE_BINARY_FW 540 const u8 *fw_data) 541 #else 542 const u8 OSAL_UNUSED * fw_data) 543 #endif 544 { 545 struct ecore_fw_data *fw = p_dev->fw_data; 546 547 #ifdef CONFIG_ECORE_BINARY_FW 548 struct bin_buffer_hdr *buf_hdr; 549 u32 offset, len; 550 551 if (!fw_data) { 552 DP_NOTICE(p_dev, true, "Invalid fw data\n"); 553 return ECORE_INVAL; 554 } 555 556 buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data; 557 558 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; 559 fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset)); 560 561 offset = buf_hdr[BIN_BUF_INIT_CMD].offset; 562 fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset)); 563 564 offset = buf_hdr[BIN_BUF_INIT_VAL].offset; 565 fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset)); 566 567 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; 568 fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset)); 569 len = buf_hdr[BIN_BUF_INIT_CMD].length; 570 fw->init_ops_size = len / sizeof(struct init_raw_op); 571 offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset; 572 fw->fw_overlays = (u32 *)(fw_data + offset); 573 len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length; 574 fw->fw_overlays_len = len; 575 #else 576 fw->init_ops = (union init_op *)init_ops; 577 fw->arr_data = (u32 *)init_val; 578 fw->modes_tree_buf = (u8 *)modes_tree_buf; 579 fw->init_ops_size = init_ops_size; 580 fw->fw_overlays = fw_overlays; 581 fw->fw_overlays_len = sizeof(fw_overlays); 582 #endif 583 584 return ECORE_SUCCESS; 585 } 586