xref: /dpdk/drivers/net/qede/base/ecore_init_ops.c (revision aaffc740ec2cfdb304523c67020f09ad343cfacf)
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 /* include the precompiled configuration values - only once */
10 #include "bcm_osal.h"
11 #include "ecore_hsi_common.h"
12 #include "ecore.h"
13 #include "ecore_hw.h"
14 #include "ecore_status.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_init_fw_funcs.h"
17 
18 #include "ecore_iro_values.h"
19 #include "ecore_sriov.h"
20 #include "ecore_gtt_values.h"
21 #include "reg_addr.h"
22 #include "ecore_init_ops.h"
23 
24 #define ECORE_INIT_MAX_POLL_COUNT	100
25 #define ECORE_INIT_POLL_PERIOD_US	500
26 
27 void ecore_init_iro_array(struct ecore_dev *p_dev)
28 {
29 	p_dev->iro_arr = iro_arr;
30 }
31 
32 /* Runtime configuration helpers */
33 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
34 {
35 	int i;
36 
37 	for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
38 		p_hwfn->rt_data.b_valid[i] = false;
39 }
40 
41 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
42 {
43 	p_hwfn->rt_data.init_val[rt_offset] = val;
44 	p_hwfn->rt_data.b_valid[rt_offset] = true;
45 }
46 
47 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
48 			     u32 rt_offset, u32 *p_val, osal_size_t size)
49 {
50 	osal_size_t i;
51 
52 	for (i = 0; i < size / sizeof(u32); i++) {
53 		p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
54 		p_hwfn->rt_data.b_valid[rt_offset + i] = true;
55 	}
56 }
57 
58 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
59 					  struct ecore_ptt *p_ptt,
60 					  u32 addr,
61 					  u16 rt_offset,
62 					  u16 size, bool b_must_dmae)
63 {
64 	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
65 	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
66 	enum _ecore_status_t rc = ECORE_SUCCESS;
67 	u16 i, segment;
68 
69 	/* Since not all RT entries are initialized, go over the RT and
70 	 * for each segment of initialized values use DMA.
71 	 */
72 	for (i = 0; i < size; i++) {
73 		if (!p_valid[i])
74 			continue;
75 
76 		/* In case there isn't any wide-bus configuration here,
77 		 * simply write the data instead of using dmae.
78 		 */
79 		if (!b_must_dmae) {
80 			ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
81 			continue;
82 		}
83 
84 		/* Start of a new segment */
85 		for (segment = 1; i + segment < size; segment++)
86 			if (!p_valid[i + segment])
87 				break;
88 
89 		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
90 					 (osal_uintptr_t)(p_init_val + i),
91 					 addr + (i << 2), segment, 0);
92 		if (rc != ECORE_SUCCESS)
93 			return rc;
94 
95 		/* Jump over the entire segment, including invalid entry */
96 		i += segment;
97 	}
98 
99 	return rc;
100 }
101 
102 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
103 {
104 	struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
105 
106 	if (IS_VF(p_hwfn->p_dev))
107 		return ECORE_SUCCESS;
108 
109 	rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
110 				       sizeof(bool) * RUNTIME_ARRAY_SIZE);
111 	if (!rt_data->b_valid)
112 		return ECORE_NOMEM;
113 
114 	rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
115 					sizeof(u32) * RUNTIME_ARRAY_SIZE);
116 	if (!rt_data->init_val) {
117 		OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
118 		return ECORE_NOMEM;
119 	}
120 
121 	return ECORE_SUCCESS;
122 }
123 
124 void ecore_init_free(struct ecore_hwfn *p_hwfn)
125 {
126 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
127 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
128 }
129 
130 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
131 						  struct ecore_ptt *p_ptt,
132 						  u32 addr,
133 						  u32 dmae_data_offset,
134 						  u32 size, const u32 *p_buf,
135 						  bool b_must_dmae,
136 						  bool b_can_dmae)
137 {
138 	enum _ecore_status_t rc = ECORE_SUCCESS;
139 
140 	/* Perform DMAE only for lengthy enough sections or for wide-bus */
141 #ifndef ASIC_ONLY
142 	if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
143 	    !b_can_dmae || (!b_must_dmae && (size < 16))) {
144 #else
145 	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
146 #endif
147 		const u32 *data = p_buf + dmae_data_offset;
148 		u32 i;
149 
150 		for (i = 0; i < size; i++)
151 			ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
152 	} else {
153 		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
154 					 (osal_uintptr_t)(p_buf +
155 							   dmae_data_offset),
156 					 addr, size, 0);
157 	}
158 
159 	return rc;
160 }
161 
162 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
163 						 struct ecore_ptt *p_ptt,
164 						 u32 addr, u32 fill,
165 						 u32 fill_count)
166 {
167 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
168 
169 	OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
170 
171 	return ecore_dmae_host2grc(p_hwfn, p_ptt,
172 				   (osal_uintptr_t)&zero_buffer[0],
173 				   addr, fill_count,
174 				   ECORE_DMAE_FLAG_RW_REPL_SRC);
175 }
176 
177 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
178 			    struct ecore_ptt *p_ptt,
179 			    u32 addr, u32 fill, u32 fill_count)
180 {
181 	u32 i;
182 
183 	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
184 		ecore_wr(p_hwfn, p_ptt, addr, fill);
185 }
186 
187 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
188 						 struct ecore_ptt *p_ptt,
189 						 struct init_write_op *cmd,
190 						 bool b_must_dmae,
191 						 bool b_can_dmae)
192 {
193 #ifdef CONFIG_ECORE_ZIPPED_FW
194 	u32 offset, output_len, input_len, max_size;
195 #endif
196 	u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
197 	struct ecore_dev *p_dev = p_hwfn->p_dev;
198 	enum _ecore_status_t rc = ECORE_SUCCESS;
199 	union init_array_hdr *hdr;
200 	const u32 *array_data;
201 	u32 size, addr, data;
202 
203 	array_data = p_dev->fw_data->arr_data;
204 	data = OSAL_LE32_TO_CPU(cmd->data);
205 	addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
206 
207 	hdr = (union init_array_hdr *)
208 		(uintptr_t)(array_data + dmae_array_offset);
209 	data = OSAL_LE32_TO_CPU(hdr->raw.data);
210 	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
211 	case INIT_ARR_ZIPPED:
212 #ifdef CONFIG_ECORE_ZIPPED_FW
213 		offset = dmae_array_offset + 1;
214 		input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
215 		max_size = MAX_ZIPPED_SIZE * 4;
216 		OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
217 
218 		output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
219 				(u8 *)(uintptr_t)&array_data[offset],
220 				max_size,
221 				(u8 *)p_hwfn->unzip_buf);
222 		if (output_len) {
223 			rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
224 						   output_len,
225 						   p_hwfn->unzip_buf,
226 						   b_must_dmae, b_can_dmae);
227 		} else {
228 			DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
229 			rc = ECORE_INVAL;
230 		}
231 #else
232 		DP_NOTICE(p_hwfn, true,
233 			  "Using zipped firmware without config enabled\n");
234 		rc = ECORE_INVAL;
235 #endif
236 		break;
237 	case INIT_ARR_PATTERN:
238 		{
239 			u32 repeats = GET_FIELD(data,
240 					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
241 			u32 i;
242 
243 			size = GET_FIELD(data,
244 					 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
245 
246 			for (i = 0; i < repeats; i++, addr += size << 2) {
247 				rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
248 							   dmae_array_offset +
249 							   1, size, array_data,
250 							   b_must_dmae,
251 							   b_can_dmae);
252 				if (rc)
253 					break;
254 		}
255 		break;
256 	}
257 	case INIT_ARR_STANDARD:
258 		size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
259 		rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
260 					   dmae_array_offset + 1,
261 					   size, array_data,
262 					   b_must_dmae, b_can_dmae);
263 		break;
264 	}
265 
266 	return rc;
267 }
268 
269 /* init_ops write command */
270 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
271 					      struct ecore_ptt *p_ptt,
272 					      struct init_write_op *p_cmd,
273 					      bool b_can_dmae)
274 {
275 	enum _ecore_status_t rc = ECORE_SUCCESS;
276 	bool b_must_dmae;
277 	u32 addr, data;
278 
279 	data = OSAL_LE32_TO_CPU(p_cmd->data);
280 	b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
281 	addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
282 
283 	/* Sanitize */
284 	if (b_must_dmae && !b_can_dmae) {
285 		DP_NOTICE(p_hwfn, true,
286 			  "Need to write to %08x for Wide-bus but DMAE isn't"
287 			  " allowed\n",
288 			  addr);
289 		return ECORE_INVAL;
290 	}
291 
292 	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
293 	case INIT_SRC_INLINE:
294 		data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
295 		ecore_wr(p_hwfn, p_ptt, addr, data);
296 		break;
297 	case INIT_SRC_ZEROS:
298 		data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
299 		if (b_must_dmae || (b_can_dmae && (data >= 64)))
300 			rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
301 		else
302 			ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
303 		break;
304 	case INIT_SRC_ARRAY:
305 		rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
306 					  b_must_dmae, b_can_dmae);
307 		break;
308 	case INIT_SRC_RUNTIME:
309 		ecore_init_rt(p_hwfn, p_ptt, addr,
310 			      OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
311 			      OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
312 			      b_must_dmae);
313 		break;
314 	}
315 
316 	return rc;
317 }
318 
319 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
320 {
321 	return (val == expected_val);
322 }
323 
324 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
325 {
326 	return (val & expected_val) == expected_val;
327 }
328 
329 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
330 {
331 	return (val | expected_val) > 0;
332 }
333 
334 /* init_ops read/poll commands */
335 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
336 			      struct ecore_ptt *p_ptt, struct init_read_op *cmd)
337 {
338 	bool (*comp_check)(u32 val, u32 expected_val);
339 	u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
340 	u32 data, addr, poll;
341 	int i;
342 
343 	data = OSAL_LE32_TO_CPU(cmd->op_data);
344 	addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
345 	poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
346 
347 #ifndef ASIC_ONLY
348 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
349 		delay *= 100;
350 #endif
351 
352 	val = ecore_rd(p_hwfn, p_ptt, addr);
353 
354 	if (poll == INIT_POLL_NONE)
355 		return;
356 
357 	switch (poll) {
358 	case INIT_POLL_EQ:
359 		comp_check = comp_eq;
360 		break;
361 	case INIT_POLL_OR:
362 		comp_check = comp_or;
363 		break;
364 	case INIT_POLL_AND:
365 		comp_check = comp_and;
366 		break;
367 	default:
368 		DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
369 		       cmd->op_data);
370 		return;
371 	}
372 
373 	data = OSAL_LE32_TO_CPU(cmd->expected_val);
374 	for (i = 0;
375 	     i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
376 		OSAL_UDELAY(delay);
377 		val = ecore_rd(p_hwfn, p_ptt, addr);
378 	}
379 
380 	if (i == ECORE_INIT_MAX_POLL_COUNT)
381 		DP_ERR(p_hwfn,
382 		       "Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
383 		       " Got: %08x (comparsion %08x)]\n",
384 		       addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
385 		       OSAL_LE32_TO_CPU(cmd->op_data));
386 }
387 
388 /* init_ops callbacks entry point */
389 static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
390 			      struct ecore_ptt *p_ptt,
391 			      struct init_callback_op *p_cmd)
392 {
393 	DP_NOTICE(p_hwfn, true,
394 		  "Currently init values have no need of callbacks\n");
395 }
396 
397 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
398 				    u16 *p_offset, int modes)
399 {
400 	struct ecore_dev *p_dev = p_hwfn->p_dev;
401 	const u8 *modes_tree_buf;
402 	u8 arg1, arg2, tree_val;
403 
404 	modes_tree_buf = p_dev->fw_data->modes_tree_buf;
405 	tree_val = modes_tree_buf[(*p_offset)++];
406 	switch (tree_val) {
407 	case INIT_MODE_OP_NOT:
408 		return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
409 	case INIT_MODE_OP_OR:
410 		arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
411 		arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
412 		return arg1 | arg2;
413 	case INIT_MODE_OP_AND:
414 		arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
415 		arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
416 		return arg1 & arg2;
417 	default:
418 		tree_val -= MAX_INIT_MODE_OPS;
419 		return (modes & (1 << tree_val)) ? 1 : 0;
420 	}
421 }
422 
423 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
424 			       struct init_if_mode_op *p_cmd, int modes)
425 {
426 	u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
427 
428 	if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
429 		return 0;
430 	else
431 		return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
432 				 INIT_IF_MODE_OP_CMD_OFFSET);
433 }
434 
435 static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
436 				struct init_if_phase_op *p_cmd,
437 				u32 phase, u32 phase_id)
438 {
439 	u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
440 
441 	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
442 	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
443 	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
444 		return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
445 				 INIT_IF_PHASE_OP_CMD_OFFSET);
446 	else
447 		return 0;
448 }
449 
450 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
451 				    struct ecore_ptt *p_ptt,
452 				    int phase, int phase_id, int modes)
453 {
454 	struct ecore_dev *p_dev = p_hwfn->p_dev;
455 	enum _ecore_status_t rc = ECORE_SUCCESS;
456 	u32 cmd_num, num_init_ops;
457 	union init_op *init_ops;
458 	bool b_dmae = false;
459 
460 	num_init_ops = p_dev->fw_data->init_ops_size;
461 	init_ops = p_dev->fw_data->init_ops;
462 
463 #ifdef CONFIG_ECORE_ZIPPED_FW
464 	p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
465 					MAX_ZIPPED_SIZE * 4);
466 	if (!p_hwfn->unzip_buf) {
467 		DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
468 		return ECORE_NOMEM;
469 	}
470 #endif
471 
472 	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
473 		union init_op *cmd = &init_ops[cmd_num];
474 		u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
475 
476 		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
477 		case INIT_OP_WRITE:
478 			rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
479 					       b_dmae);
480 			break;
481 
482 		case INIT_OP_READ:
483 			ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
484 			break;
485 
486 		case INIT_OP_IF_MODE:
487 			cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
488 						       modes);
489 			break;
490 		case INIT_OP_IF_PHASE:
491 			cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
492 							phase, phase_id);
493 			b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
494 			break;
495 		case INIT_OP_DELAY:
496 			/* ecore_init_run is always invoked from
497 			 * sleep-able context
498 			 */
499 			OSAL_UDELAY(cmd->delay.delay);
500 			break;
501 
502 		case INIT_OP_CALLBACK:
503 			ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
504 			break;
505 		}
506 
507 		if (rc)
508 			break;
509 	}
510 #ifdef CONFIG_ECORE_ZIPPED_FW
511 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
512 #endif
513 	return rc;
514 }
515 
516 void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
517 {
518 	u32 gtt_base;
519 	u32 i;
520 
521 #ifndef ASIC_ONLY
522 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
523 		/* This is done by MFW on ASIC; regardless, this should only
524 		 * be done once per chip [i.e., common]. Implementation is
525 		 * not too bright, but it should work on the simple FPGA/EMUL
526 		 * scenarios.
527 		 */
528 		static bool initialized;
529 		int poll_cnt = 500;
530 		u32 val;
531 
532 		/* initialize PTT/GTT (poll for completion) */
533 		if (!initialized) {
534 			ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
535 				 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
536 			initialized = true;
537 		}
538 
539 		do {
540 			/* ptt might be overrided by HW until this is done */
541 			OSAL_UDELAY(10);
542 			ecore_ptt_invalidate(p_hwfn);
543 			val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
544 				       PGLUE_B_REG_INIT_DONE_PTT_GTT);
545 		} while ((val != 1) && --poll_cnt);
546 
547 		if (!poll_cnt)
548 			DP_ERR(p_hwfn,
549 			       "PGLUE_B_REG_INIT_DONE didn't complete\n");
550 	}
551 #endif
552 
553 	/* Set the global windows */
554 	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
555 
556 	for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
557 		if (pxp_global_win[i])
558 			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
559 			       pxp_global_win[i]);
560 }
561 
562 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
563 					const u8 *data)
564 {
565 	struct ecore_fw_data *fw = p_dev->fw_data;
566 
567 #ifdef CONFIG_ECORE_BINARY_FW
568 	struct bin_buffer_hdr *buf_hdr;
569 	u32 offset, len;
570 
571 	if (!data) {
572 		DP_NOTICE(p_dev, true, "Invalid fw data\n");
573 		return ECORE_INVAL;
574 	}
575 
576 	/* First Dword contains metadata and should be skipped */
577 	buf_hdr = (struct bin_buffer_hdr *)((uintptr_t)(data + sizeof(u32)));
578 
579 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
580 	fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
581 
582 	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
583 	fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
584 
585 	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
586 	fw->arr_data = (u32 *)((uintptr_t)(data + offset));
587 
588 	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
589 	fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
590 	len = buf_hdr[BIN_BUF_INIT_CMD].length;
591 	fw->init_ops_size = len / sizeof(struct init_raw_op);
592 #else
593 	fw->init_ops = (union init_op *)init_ops;
594 	fw->arr_data = (u32 *)init_val;
595 	fw->modes_tree_buf = (u8 *)modes_tree_buf;
596 	fw->init_ops_size = init_ops_size;
597 #endif
598 
599 	return ECORE_SUCCESS;
600 }
601