1 /* 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 /* include the precompiled configuration values - only once */ 10 #include "bcm_osal.h" 11 #include "ecore_hsi_common.h" 12 #include "ecore.h" 13 #include "ecore_hw.h" 14 #include "ecore_status.h" 15 #include "ecore_rt_defs.h" 16 #include "ecore_init_fw_funcs.h" 17 18 #include "ecore_iro_values.h" 19 #include "ecore_sriov.h" 20 #include "ecore_gtt_values.h" 21 #include "reg_addr.h" 22 #include "ecore_init_ops.h" 23 24 #define ECORE_INIT_MAX_POLL_COUNT 100 25 #define ECORE_INIT_POLL_PERIOD_US 500 26 27 void ecore_init_iro_array(struct ecore_dev *p_dev) 28 { 29 p_dev->iro_arr = iro_arr; 30 } 31 32 /* Runtime configuration helpers */ 33 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn) 34 { 35 int i; 36 37 for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) 38 p_hwfn->rt_data.b_valid[i] = false; 39 } 40 41 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val) 42 { 43 if (rt_offset >= RUNTIME_ARRAY_SIZE) { 44 DP_ERR(p_hwfn, 45 "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n", 46 val, rt_offset, RUNTIME_ARRAY_SIZE); 47 return; 48 } 49 50 p_hwfn->rt_data.init_val[rt_offset] = val; 51 p_hwfn->rt_data.b_valid[rt_offset] = true; 52 } 53 54 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn, 55 u32 rt_offset, u32 *p_val, osal_size_t size) 56 { 57 osal_size_t i; 58 59 if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { 60 DP_ERR(p_hwfn, 61 "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n", 62 rt_offset, (u32)(rt_offset + size - 1), 63 RUNTIME_ARRAY_SIZE); 64 return; 65 } 66 67 for (i = 0; i < size / sizeof(u32); i++) { 68 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; 69 p_hwfn->rt_data.b_valid[rt_offset + i] = true; 70 } 71 } 72 73 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn, 74 struct ecore_ptt *p_ptt, 75 u32 addr, 76 u16 rt_offset, 77 u16 size, bool b_must_dmae) 78 { 79 u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; 80 bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; 81 u16 i, segment; 82 enum _ecore_status_t rc = ECORE_SUCCESS; 83 84 /* Since not all RT entries are initialized, go over the RT and 85 * for each segment of initialized values use DMA. 86 */ 87 for (i = 0; i < size; i++) { 88 if (!p_valid[i]) 89 continue; 90 91 /* In case there isn't any wide-bus configuration here, 92 * simply write the data instead of using dmae. 93 */ 94 if (!b_must_dmae) { 95 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); 96 continue; 97 } 98 99 /* Start of a new segment */ 100 for (segment = 1; i + segment < size; segment++) 101 if (!p_valid[i + segment]) 102 break; 103 104 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 105 (osal_uintptr_t)(p_init_val + i), 106 addr + (i << 2), segment, 0); 107 if (rc != ECORE_SUCCESS) 108 return rc; 109 110 /* Jump over the entire segment, including invalid entry */ 111 i += segment; 112 } 113 114 return rc; 115 } 116 117 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn) 118 { 119 struct ecore_rt_data *rt_data = &p_hwfn->rt_data; 120 121 if (IS_VF(p_hwfn->p_dev)) 122 return ECORE_SUCCESS; 123 124 rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 125 sizeof(bool) * RUNTIME_ARRAY_SIZE); 126 if (!rt_data->b_valid) 127 return ECORE_NOMEM; 128 129 rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 130 sizeof(u32) * RUNTIME_ARRAY_SIZE); 131 if (!rt_data->init_val) { 132 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid); 133 return ECORE_NOMEM; 134 } 135 136 return ECORE_SUCCESS; 137 } 138 139 void ecore_init_free(struct ecore_hwfn *p_hwfn) 140 { 141 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val); 142 OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid); 143 } 144 145 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn, 146 struct ecore_ptt *p_ptt, 147 u32 addr, 148 u32 dmae_data_offset, 149 u32 size, const u32 *p_buf, 150 bool b_must_dmae, 151 bool b_can_dmae) 152 { 153 enum _ecore_status_t rc = ECORE_SUCCESS; 154 155 /* Perform DMAE only for lengthy enough sections or for wide-bus */ 156 #ifndef ASIC_ONLY 157 if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) || 158 !b_can_dmae || (!b_must_dmae && (size < 16))) { 159 #else 160 if (!b_can_dmae || (!b_must_dmae && (size < 16))) { 161 #endif 162 const u32 *data = p_buf + dmae_data_offset; 163 u32 i; 164 165 for (i = 0; i < size; i++) 166 ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); 167 } else { 168 rc = ecore_dmae_host2grc(p_hwfn, p_ptt, 169 (osal_uintptr_t)(p_buf + 170 dmae_data_offset), 171 addr, size, 0); 172 } 173 174 return rc; 175 } 176 177 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn, 178 struct ecore_ptt *p_ptt, 179 u32 addr, u32 fill_count) 180 { 181 static u32 zero_buffer[DMAE_MAX_RW_SIZE]; 182 183 OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); 184 185 return ecore_dmae_host2grc(p_hwfn, p_ptt, 186 (osal_uintptr_t)&zero_buffer[0], 187 addr, fill_count, 188 ECORE_DMAE_FLAG_RW_REPL_SRC); 189 } 190 191 static void ecore_init_fill(struct ecore_hwfn *p_hwfn, 192 struct ecore_ptt *p_ptt, 193 u32 addr, u32 fill, u32 fill_count) 194 { 195 u32 i; 196 197 for (i = 0; i < fill_count; i++, addr += sizeof(u32)) 198 ecore_wr(p_hwfn, p_ptt, addr, fill); 199 } 200 201 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn, 202 struct ecore_ptt *p_ptt, 203 struct init_write_op *cmd, 204 bool b_must_dmae, 205 bool b_can_dmae) 206 { 207 u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset); 208 u32 data = OSAL_LE32_TO_CPU(cmd->data); 209 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; 210 #ifdef CONFIG_ECORE_ZIPPED_FW 211 u32 offset, output_len, input_len, max_size; 212 #endif 213 struct ecore_dev *p_dev = p_hwfn->p_dev; 214 union init_array_hdr *hdr; 215 const u32 *array_data; 216 enum _ecore_status_t rc = ECORE_SUCCESS; 217 u32 size; 218 219 array_data = p_dev->fw_data->arr_data; 220 221 hdr = (union init_array_hdr *) 222 (uintptr_t)(array_data + dmae_array_offset); 223 data = OSAL_LE32_TO_CPU(hdr->raw.data); 224 switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { 225 case INIT_ARR_ZIPPED: 226 #ifdef CONFIG_ECORE_ZIPPED_FW 227 offset = dmae_array_offset + 1; 228 input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); 229 max_size = MAX_ZIPPED_SIZE * 4; 230 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size); 231 232 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len, 233 (u8 *)(uintptr_t)&array_data[offset], 234 max_size, 235 (u8 *)p_hwfn->unzip_buf); 236 if (output_len) { 237 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0, 238 output_len, 239 p_hwfn->unzip_buf, 240 b_must_dmae, b_can_dmae); 241 } else { 242 DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n"); 243 rc = ECORE_INVAL; 244 } 245 #else 246 DP_NOTICE(p_hwfn, true, 247 "Using zipped firmware without config enabled\n"); 248 rc = ECORE_INVAL; 249 #endif 250 break; 251 case INIT_ARR_PATTERN: 252 { 253 u32 repeats = GET_FIELD(data, 254 INIT_ARRAY_PATTERN_HDR_REPETITIONS); 255 u32 i; 256 257 size = GET_FIELD(data, 258 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); 259 260 for (i = 0; i < repeats; i++, addr += size << 2) { 261 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 262 dmae_array_offset + 263 1, size, array_data, 264 b_must_dmae, 265 b_can_dmae); 266 if (rc) 267 break; 268 } 269 break; 270 } 271 case INIT_ARR_STANDARD: 272 size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE); 273 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 274 dmae_array_offset + 1, 275 size, array_data, 276 b_must_dmae, b_can_dmae); 277 break; 278 } 279 280 return rc; 281 } 282 283 /* init_ops write command */ 284 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn, 285 struct ecore_ptt *p_ptt, 286 struct init_write_op *p_cmd, 287 bool b_can_dmae) 288 { 289 u32 data = OSAL_LE32_TO_CPU(p_cmd->data); 290 bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); 291 u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; 292 enum _ecore_status_t rc = ECORE_SUCCESS; 293 294 /* Sanitize */ 295 if (b_must_dmae && !b_can_dmae) { 296 DP_NOTICE(p_hwfn, true, 297 "Need to write to %08x for Wide-bus but DMAE isn't" 298 " allowed\n", 299 addr); 300 return ECORE_INVAL; 301 } 302 303 switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { 304 case INIT_SRC_INLINE: 305 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val); 306 ecore_wr(p_hwfn, p_ptt, addr, data); 307 break; 308 case INIT_SRC_ZEROS: 309 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count); 310 if (b_must_dmae || (b_can_dmae && (data >= 64))) 311 rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data); 312 else 313 ecore_init_fill(p_hwfn, p_ptt, addr, 0, data); 314 break; 315 case INIT_SRC_ARRAY: 316 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd, 317 b_must_dmae, b_can_dmae); 318 break; 319 case INIT_SRC_RUNTIME: 320 rc = ecore_init_rt(p_hwfn, p_ptt, addr, 321 OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset), 322 OSAL_LE16_TO_CPU(p_cmd->args.runtime.size), 323 b_must_dmae); 324 break; 325 } 326 327 return rc; 328 } 329 330 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val) 331 { 332 return (val == expected_val); 333 } 334 335 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val) 336 { 337 return (val & expected_val) == expected_val; 338 } 339 340 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val) 341 { 342 return (val | expected_val) > 0; 343 } 344 345 /* init_ops read/poll commands */ 346 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, 347 struct ecore_ptt *p_ptt, struct init_read_op *cmd) 348 { 349 bool (*comp_check)(u32 val, u32 expected_val); 350 u32 delay = ECORE_INIT_POLL_PERIOD_US, val; 351 u32 data, addr, poll; 352 int i; 353 354 data = OSAL_LE32_TO_CPU(cmd->op_data); 355 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; 356 poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); 357 358 #ifndef ASIC_ONLY 359 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 360 delay *= 100; 361 #endif 362 363 val = ecore_rd(p_hwfn, p_ptt, addr); 364 365 if (poll == INIT_POLL_NONE) 366 return; 367 368 switch (poll) { 369 case INIT_POLL_EQ: 370 comp_check = comp_eq; 371 break; 372 case INIT_POLL_OR: 373 comp_check = comp_or; 374 break; 375 case INIT_POLL_AND: 376 comp_check = comp_and; 377 break; 378 default: 379 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", 380 cmd->op_data); 381 return; 382 } 383 384 data = OSAL_LE32_TO_CPU(cmd->expected_val); 385 for (i = 0; 386 i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) { 387 OSAL_UDELAY(delay); 388 val = ecore_rd(p_hwfn, p_ptt, addr); 389 } 390 391 if (i == ECORE_INIT_MAX_POLL_COUNT) 392 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", 393 addr, OSAL_LE32_TO_CPU(cmd->expected_val), val, 394 OSAL_LE32_TO_CPU(cmd->op_data)); 395 } 396 397 /* init_ops callbacks entry point */ 398 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, 399 struct ecore_ptt *p_ptt, 400 struct init_callback_op *p_cmd) 401 { 402 enum _ecore_status_t rc; 403 404 switch (p_cmd->callback_id) { 405 case DMAE_READY_CB: 406 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); 407 break; 408 default: 409 DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n", 410 p_cmd->callback_id); 411 return ECORE_INVAL; 412 } 413 414 return rc; 415 } 416 417 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, 418 u16 *p_offset, int modes) 419 { 420 struct ecore_dev *p_dev = p_hwfn->p_dev; 421 const u8 *modes_tree_buf; 422 u8 arg1, arg2, tree_val; 423 424 modes_tree_buf = p_dev->fw_data->modes_tree_buf; 425 tree_val = modes_tree_buf[(*p_offset)++]; 426 switch (tree_val) { 427 case INIT_MODE_OP_NOT: 428 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; 429 case INIT_MODE_OP_OR: 430 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 431 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 432 return arg1 | arg2; 433 case INIT_MODE_OP_AND: 434 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 435 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); 436 return arg1 & arg2; 437 default: 438 tree_val -= MAX_INIT_MODE_OPS; 439 return (modes & (1 << tree_val)) ? 1 : 0; 440 } 441 } 442 443 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn, 444 struct init_if_mode_op *p_cmd, int modes) 445 { 446 u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset); 447 448 if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes)) 449 return 0; 450 else 451 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data), 452 INIT_IF_MODE_OP_CMD_OFFSET); 453 } 454 455 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd, 456 u32 phase, u32 phase_id) 457 { 458 u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data); 459 u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data); 460 461 if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && 462 (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || 463 GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) 464 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET); 465 else 466 return 0; 467 } 468 469 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, 470 struct ecore_ptt *p_ptt, 471 int phase, int phase_id, int modes) 472 { 473 struct ecore_dev *p_dev = p_hwfn->p_dev; 474 u32 cmd_num, num_init_ops; 475 union init_op *init_ops; 476 bool b_dmae = false; 477 enum _ecore_status_t rc = ECORE_SUCCESS; 478 479 num_init_ops = p_dev->fw_data->init_ops_size; 480 init_ops = p_dev->fw_data->init_ops; 481 482 #ifdef CONFIG_ECORE_ZIPPED_FW 483 p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, 484 MAX_ZIPPED_SIZE * 4); 485 if (!p_hwfn->unzip_buf) { 486 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n"); 487 return ECORE_NOMEM; 488 } 489 #endif 490 491 for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { 492 union init_op *cmd = &init_ops[cmd_num]; 493 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data); 494 495 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { 496 case INIT_OP_WRITE: 497 rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, 498 b_dmae); 499 break; 500 501 case INIT_OP_READ: 502 ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); 503 break; 504 505 case INIT_OP_IF_MODE: 506 cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode, 507 modes); 508 break; 509 case INIT_OP_IF_PHASE: 510 cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase, 511 phase_id); 512 b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE); 513 break; 514 case INIT_OP_DELAY: 515 /* ecore_init_run is always invoked from 516 * sleep-able context 517 */ 518 OSAL_UDELAY(cmd->delay.delay); 519 break; 520 521 case INIT_OP_CALLBACK: 522 rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); 523 break; 524 } 525 526 if (rc) 527 break; 528 } 529 #ifdef CONFIG_ECORE_ZIPPED_FW 530 OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf); 531 #endif 532 return rc; 533 } 534 535 void ecore_gtt_init(struct ecore_hwfn *p_hwfn, 536 struct ecore_ptt *p_ptt) 537 { 538 u32 gtt_base; 539 u32 i; 540 541 #ifndef ASIC_ONLY 542 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 543 /* This is done by MFW on ASIC; regardless, this should only 544 * be done once per chip [i.e., common]. Implementation is 545 * not too bright, but it should work on the simple FPGA/EMUL 546 * scenarios. 547 */ 548 static bool initialized; 549 int poll_cnt = 500; 550 u32 val; 551 552 /* initialize PTT/GTT (poll for completion) */ 553 if (!initialized) { 554 ecore_wr(p_hwfn, p_ptt, 555 PGLUE_B_REG_START_INIT_PTT_GTT, 1); 556 initialized = true; 557 } 558 559 do { 560 /* ptt might be overrided by HW until this is done */ 561 OSAL_UDELAY(10); 562 ecore_ptt_invalidate(p_hwfn); 563 val = ecore_rd(p_hwfn, p_ptt, 564 PGLUE_B_REG_INIT_DONE_PTT_GTT); 565 } while ((val != 1) && --poll_cnt); 566 567 if (!poll_cnt) 568 DP_ERR(p_hwfn, 569 "PGLUE_B_REG_INIT_DONE didn't complete\n"); 570 } 571 #endif 572 573 /* Set the global windows */ 574 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; 575 576 for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++) 577 if (pxp_global_win[i]) 578 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, 579 pxp_global_win[i]); 580 } 581 582 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, 583 #ifdef CONFIG_ECORE_BINARY_FW 584 const u8 *fw_data) 585 #else 586 const u8 OSAL_UNUSED * fw_data) 587 #endif 588 { 589 struct ecore_fw_data *fw = p_dev->fw_data; 590 591 #ifdef CONFIG_ECORE_BINARY_FW 592 struct bin_buffer_hdr *buf_hdr; 593 u32 offset, len; 594 595 if (!fw_data) { 596 DP_NOTICE(p_dev, true, "Invalid fw data\n"); 597 return ECORE_INVAL; 598 } 599 600 buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data; 601 602 offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; 603 fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset)); 604 605 offset = buf_hdr[BIN_BUF_INIT_CMD].offset; 606 fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset)); 607 608 offset = buf_hdr[BIN_BUF_INIT_VAL].offset; 609 fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset)); 610 611 offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; 612 fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset)); 613 len = buf_hdr[BIN_BUF_INIT_CMD].length; 614 fw->init_ops_size = len / sizeof(struct init_raw_op); 615 #else 616 fw->init_ops = (union init_op *)init_ops; 617 fw->arr_data = (u32 *)init_val; 618 fw->modes_tree_buf = (u8 *)modes_tree_buf; 619 fw->init_ops_size = init_ops_size; 620 #endif 621 622 return ECORE_SUCCESS; 623 } 624