xref: /dpdk/drivers/net/qede/base/ecore_init_ops.c (revision 14ad4f01845331a0ae98c681efa3086eeed3343a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6 
7 /* include the precompiled configuration values - only once */
8 #include "bcm_osal.h"
9 #include "ecore_hsi_common.h"
10 #include "ecore.h"
11 #include "ecore_hw.h"
12 #include "ecore_status.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_init_fw_funcs.h"
15 
16 #include "ecore_iro_values.h"
17 #include "ecore_sriov.h"
18 #include "ecore_gtt_values.h"
19 #include "reg_addr.h"
20 #include "ecore_init_ops.h"
21 
22 #define ECORE_INIT_MAX_POLL_COUNT	100
23 #define ECORE_INIT_POLL_PERIOD_US	500
24 
25 void ecore_init_iro_array(struct ecore_dev *p_dev)
26 {
27 	p_dev->iro_arr = iro_arr;
28 }
29 
30 /* Runtime configuration helpers */
31 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
32 {
33 	int i;
34 
35 	for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
36 		p_hwfn->rt_data.b_valid[i] = false;
37 }
38 
39 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
40 {
41 	if (rt_offset >= RUNTIME_ARRAY_SIZE) {
42 		DP_ERR(p_hwfn,
43 		       "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
44 		       val, rt_offset, RUNTIME_ARRAY_SIZE);
45 		return;
46 	}
47 
48 	p_hwfn->rt_data.init_val[rt_offset] = val;
49 	p_hwfn->rt_data.b_valid[rt_offset] = true;
50 }
51 
52 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
53 			     u32 rt_offset, u32 *p_val, osal_size_t size)
54 {
55 	osal_size_t i;
56 
57 	if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
58 		DP_ERR(p_hwfn,
59 		       "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
60 		       rt_offset, (u32)(rt_offset + size - 1),
61 		       RUNTIME_ARRAY_SIZE);
62 		return;
63 	}
64 
65 	for (i = 0; i < size / sizeof(u32); i++) {
66 		p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
67 		p_hwfn->rt_data.b_valid[rt_offset + i] = true;
68 	}
69 }
70 
71 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
72 					  struct ecore_ptt *p_ptt,
73 					  u32 addr,
74 					  u16 rt_offset,
75 					  u16 size, bool b_must_dmae)
76 {
77 	u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
78 	bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
79 	u16 i, segment;
80 	enum _ecore_status_t rc = ECORE_SUCCESS;
81 
82 	/* Since not all RT entries are initialized, go over the RT and
83 	 * for each segment of initialized values use DMA.
84 	 */
85 	for (i = 0; i < size; i++) {
86 		if (!p_valid[i])
87 			continue;
88 
89 		/* In case there isn't any wide-bus configuration here,
90 		 * simply write the data instead of using dmae.
91 		 */
92 		if (!b_must_dmae) {
93 			ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
94 			continue;
95 		}
96 
97 		/* Start of a new segment */
98 		for (segment = 1; i + segment < size; segment++)
99 			if (!p_valid[i + segment])
100 				break;
101 
102 		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
103 					 (osal_uintptr_t)(p_init_val + i),
104 					 addr + (i << 2), segment,
105 					 OSAL_NULL /* default parameters */);
106 		if (rc != ECORE_SUCCESS)
107 			return rc;
108 
109 		/* Jump over the entire segment, including invalid entry */
110 		i += segment;
111 	}
112 
113 	return rc;
114 }
115 
116 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
117 {
118 	struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
119 
120 	if (IS_VF(p_hwfn->p_dev))
121 		return ECORE_SUCCESS;
122 
123 	rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
124 				       sizeof(bool) * RUNTIME_ARRAY_SIZE);
125 	if (!rt_data->b_valid)
126 		return ECORE_NOMEM;
127 
128 	rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
129 					sizeof(u32) * RUNTIME_ARRAY_SIZE);
130 	if (!rt_data->init_val) {
131 		OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
132 		return ECORE_NOMEM;
133 	}
134 
135 	return ECORE_SUCCESS;
136 }
137 
138 void ecore_init_free(struct ecore_hwfn *p_hwfn)
139 {
140 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
141 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
142 }
143 
144 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
145 						  struct ecore_ptt *p_ptt,
146 						  u32 addr,
147 						  u32 dmae_data_offset,
148 						  u32 size, const u32 *p_buf,
149 						  bool b_must_dmae,
150 						  bool b_can_dmae)
151 {
152 	enum _ecore_status_t rc = ECORE_SUCCESS;
153 
154 	/* Perform DMAE only for lengthy enough sections or for wide-bus */
155 #ifndef ASIC_ONLY
156 	if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
157 	    !b_can_dmae || (!b_must_dmae && (size < 16))) {
158 #else
159 	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
160 #endif
161 		const u32 *data = p_buf + dmae_data_offset;
162 		u32 i;
163 
164 		for (i = 0; i < size; i++)
165 			ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
166 	} else {
167 		rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
168 					 (osal_uintptr_t)(p_buf +
169 							  dmae_data_offset),
170 					 addr, size,
171 					 OSAL_NULL /* default parameters */);
172 	}
173 
174 	return rc;
175 }
176 
177 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
178 						 struct ecore_ptt *p_ptt,
179 						 u32 addr, u32 fill_count)
180 {
181 	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
182 	struct ecore_dmae_params params;
183 
184 	OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
185 
186 	OSAL_MEMSET(&params, 0, sizeof(params));
187 	params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC;
188 	return ecore_dmae_host2grc(p_hwfn, p_ptt,
189 				   (osal_uintptr_t)&zero_buffer[0],
190 				   addr, fill_count, &params);
191 }
192 
193 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
194 			    struct ecore_ptt *p_ptt,
195 			    u32 addr, u32 fill, u32 fill_count)
196 {
197 	u32 i;
198 
199 	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
200 		ecore_wr(p_hwfn, p_ptt, addr, fill);
201 }
202 
203 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
204 						 struct ecore_ptt *p_ptt,
205 						 struct init_write_op *cmd,
206 						 bool b_must_dmae,
207 						 bool b_can_dmae)
208 {
209 	u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
210 	u32 data = OSAL_LE32_TO_CPU(cmd->data);
211 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
212 #ifdef CONFIG_ECORE_ZIPPED_FW
213 	u32 offset, output_len, input_len, max_size;
214 #endif
215 	struct ecore_dev *p_dev = p_hwfn->p_dev;
216 	union init_array_hdr *hdr;
217 	const u32 *array_data;
218 	enum _ecore_status_t rc = ECORE_SUCCESS;
219 	u32 size;
220 
221 	array_data = p_dev->fw_data->arr_data;
222 
223 	hdr = (union init_array_hdr *)
224 		(uintptr_t)(array_data + dmae_array_offset);
225 	data = OSAL_LE32_TO_CPU(hdr->raw.data);
226 	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
227 	case INIT_ARR_ZIPPED:
228 #ifdef CONFIG_ECORE_ZIPPED_FW
229 		offset = dmae_array_offset + 1;
230 		input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
231 		max_size = MAX_ZIPPED_SIZE * 4;
232 		OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
233 
234 		output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
235 				(u8 *)(uintptr_t)&array_data[offset],
236 				max_size,
237 				(u8 *)p_hwfn->unzip_buf);
238 		if (output_len) {
239 			rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
240 						   output_len,
241 						   p_hwfn->unzip_buf,
242 						   b_must_dmae, b_can_dmae);
243 		} else {
244 			DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
245 			rc = ECORE_INVAL;
246 		}
247 #else
248 		DP_NOTICE(p_hwfn, true,
249 			  "Using zipped firmware without config enabled\n");
250 		rc = ECORE_INVAL;
251 #endif
252 		break;
253 	case INIT_ARR_PATTERN:
254 		{
255 			u32 repeats = GET_FIELD(data,
256 					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
257 			u32 i;
258 
259 			size = GET_FIELD(data,
260 					 INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
261 
262 			for (i = 0; i < repeats; i++, addr += size << 2) {
263 				rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
264 							   dmae_array_offset +
265 							   1, size, array_data,
266 							   b_must_dmae,
267 							   b_can_dmae);
268 				if (rc)
269 					break;
270 		}
271 		break;
272 	}
273 	case INIT_ARR_STANDARD:
274 		size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
275 		rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
276 					   dmae_array_offset + 1,
277 					   size, array_data,
278 					   b_must_dmae, b_can_dmae);
279 		break;
280 	}
281 
282 	return rc;
283 }
284 
285 /* init_ops write command */
286 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
287 					      struct ecore_ptt *p_ptt,
288 					      struct init_write_op *p_cmd,
289 					      bool b_can_dmae)
290 {
291 	u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
292 	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
293 	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
294 	enum _ecore_status_t rc = ECORE_SUCCESS;
295 
296 	/* Sanitize */
297 	if (b_must_dmae && !b_can_dmae) {
298 		DP_NOTICE(p_hwfn, true,
299 			  "Need to write to %08x for Wide-bus but DMAE isn't"
300 			  " allowed\n",
301 			  addr);
302 		return ECORE_INVAL;
303 	}
304 
305 	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
306 	case INIT_SRC_INLINE:
307 		data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
308 		ecore_wr(p_hwfn, p_ptt, addr, data);
309 		break;
310 	case INIT_SRC_ZEROS:
311 		data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
312 		if (b_must_dmae || (b_can_dmae && (data >= 64)))
313 			rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
314 		else
315 			ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
316 		break;
317 	case INIT_SRC_ARRAY:
318 		rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
319 					  b_must_dmae, b_can_dmae);
320 		break;
321 	case INIT_SRC_RUNTIME:
322 		rc = ecore_init_rt(p_hwfn, p_ptt, addr,
323 				   OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
324 				   OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
325 				   b_must_dmae);
326 		break;
327 	}
328 
329 	return rc;
330 }
331 
332 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
333 {
334 	return (val == expected_val);
335 }
336 
337 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
338 {
339 	return (val & expected_val) == expected_val;
340 }
341 
342 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
343 {
344 	return (val | expected_val) > 0;
345 }
346 
347 /* init_ops read/poll commands */
348 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
349 			      struct ecore_ptt *p_ptt, struct init_read_op *cmd)
350 {
351 	bool (*comp_check)(u32 val, u32 expected_val);
352 	u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
353 	u32 data, addr, poll;
354 	int i;
355 
356 	data = OSAL_LE32_TO_CPU(cmd->op_data);
357 	addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
358 	poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
359 
360 #ifndef ASIC_ONLY
361 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
362 		delay *= 100;
363 #endif
364 
365 	val = ecore_rd(p_hwfn, p_ptt, addr);
366 
367 	if (poll == INIT_POLL_NONE)
368 		return;
369 
370 	switch (poll) {
371 	case INIT_POLL_EQ:
372 		comp_check = comp_eq;
373 		break;
374 	case INIT_POLL_OR:
375 		comp_check = comp_or;
376 		break;
377 	case INIT_POLL_AND:
378 		comp_check = comp_and;
379 		break;
380 	default:
381 		DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
382 		       cmd->op_data);
383 		return;
384 	}
385 
386 	data = OSAL_LE32_TO_CPU(cmd->expected_val);
387 	for (i = 0;
388 	     i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
389 		OSAL_UDELAY(delay);
390 		val = ecore_rd(p_hwfn, p_ptt, addr);
391 	}
392 
393 	if (i == ECORE_INIT_MAX_POLL_COUNT)
394 		DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
395 		       addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
396 		       OSAL_LE32_TO_CPU(cmd->op_data));
397 }
398 
399 /* init_ops callbacks entry point */
400 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
401 					      struct ecore_ptt *p_ptt,
402 					      struct init_callback_op *p_cmd)
403 {
404 	enum _ecore_status_t rc;
405 
406 	switch (p_cmd->callback_id) {
407 	case DMAE_READY_CB:
408 		rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
409 		break;
410 	default:
411 		DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
412 			  p_cmd->callback_id);
413 		return ECORE_INVAL;
414 	}
415 
416 	return rc;
417 }
418 
419 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
420 				    u16 *p_offset, int modes)
421 {
422 	struct ecore_dev *p_dev = p_hwfn->p_dev;
423 	u8 arg1, arg2, tree_val;
424 	const u8 *modes_tree;
425 
426 	modes_tree = p_dev->fw_data->modes_tree_buf;
427 	tree_val = modes_tree[(*p_offset)++];
428 	switch (tree_val) {
429 	case INIT_MODE_OP_NOT:
430 		return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
431 	case INIT_MODE_OP_OR:
432 		arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
433 		arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
434 		return arg1 | arg2;
435 	case INIT_MODE_OP_AND:
436 		arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
437 		arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
438 		return arg1 & arg2;
439 	default:
440 		tree_val -= MAX_INIT_MODE_OPS;
441 		return (modes & (1 << tree_val)) ? 1 : 0;
442 	}
443 }
444 
445 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
446 			       struct init_if_mode_op *p_cmd, int modes)
447 {
448 	u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
449 
450 	if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
451 		return 0;
452 	else
453 		return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
454 				 INIT_IF_MODE_OP_CMD_OFFSET);
455 }
456 
457 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
458 				u32 phase, u32 phase_id)
459 {
460 	u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
461 	u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
462 
463 	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
464 	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
465 	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
466 		return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
467 	else
468 		return 0;
469 }
470 
471 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
472 				    struct ecore_ptt *p_ptt,
473 				    int phase, int phase_id, int modes)
474 {
475 	struct ecore_dev *p_dev = p_hwfn->p_dev;
476 	u32 cmd_num, num_init_ops;
477 	union init_op *init;
478 	bool b_dmae = false;
479 	enum _ecore_status_t rc = ECORE_SUCCESS;
480 
481 	num_init_ops = p_dev->fw_data->init_ops_size;
482 	init = p_dev->fw_data->init_ops;
483 
484 #ifdef CONFIG_ECORE_ZIPPED_FW
485 	p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
486 					MAX_ZIPPED_SIZE * 4);
487 	if (!p_hwfn->unzip_buf) {
488 		DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
489 		return ECORE_NOMEM;
490 	}
491 #endif
492 
493 	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
494 		union init_op *cmd = &init[cmd_num];
495 		u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
496 
497 		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
498 		case INIT_OP_WRITE:
499 			rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
500 					       b_dmae);
501 			break;
502 
503 		case INIT_OP_READ:
504 			ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
505 			break;
506 
507 		case INIT_OP_IF_MODE:
508 			cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
509 						       modes);
510 			break;
511 		case INIT_OP_IF_PHASE:
512 			cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
513 							phase_id);
514 			b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
515 			break;
516 		case INIT_OP_DELAY:
517 			/* ecore_init_run is always invoked from
518 			 * sleep-able context
519 			 */
520 			OSAL_UDELAY(cmd->delay.delay);
521 			break;
522 
523 		case INIT_OP_CALLBACK:
524 			rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
525 			break;
526 		}
527 
528 		if (rc)
529 			break;
530 	}
531 #ifdef CONFIG_ECORE_ZIPPED_FW
532 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
533 #endif
534 	return rc;
535 }
536 
537 void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
538 		    struct ecore_ptt *p_ptt)
539 {
540 	u32 gtt_base;
541 	u32 i;
542 
543 #ifndef ASIC_ONLY
544 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
545 		/* This is done by MFW on ASIC; regardless, this should only
546 		 * be done once per chip [i.e., common]. Implementation is
547 		 * not too bright, but it should work on the simple FPGA/EMUL
548 		 * scenarios.
549 		 */
550 		static bool initialized;
551 		int poll_cnt = 500;
552 		u32 val;
553 
554 		/* initialize PTT/GTT (poll for completion) */
555 		if (!initialized) {
556 			ecore_wr(p_hwfn, p_ptt,
557 				 PGLUE_B_REG_START_INIT_PTT_GTT, 1);
558 			initialized = true;
559 		}
560 
561 		do {
562 			/* ptt might be overrided by HW until this is done */
563 			OSAL_UDELAY(10);
564 			ecore_ptt_invalidate(p_hwfn);
565 			val = ecore_rd(p_hwfn, p_ptt,
566 				       PGLUE_B_REG_INIT_DONE_PTT_GTT);
567 		} while ((val != 1) && --poll_cnt);
568 
569 		if (!poll_cnt)
570 			DP_ERR(p_hwfn,
571 			       "PGLUE_B_REG_INIT_DONE didn't complete\n");
572 	}
573 #endif
574 
575 	/* Set the global windows */
576 	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
577 
578 	for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
579 		if (pxp_global_win[i])
580 			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
581 			       pxp_global_win[i]);
582 }
583 
584 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
585 #ifdef CONFIG_ECORE_BINARY_FW
586 					const u8 *fw_data)
587 #else
588 					const u8 OSAL_UNUSED * fw_data)
589 #endif
590 {
591 	struct ecore_fw_data *fw = p_dev->fw_data;
592 
593 #ifdef CONFIG_ECORE_BINARY_FW
594 	struct bin_buffer_hdr *buf_hdr;
595 	u32 offset, len;
596 
597 	if (!fw_data) {
598 		DP_NOTICE(p_dev, true, "Invalid fw data\n");
599 		return ECORE_INVAL;
600 	}
601 
602 	buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
603 
604 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
605 	fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
606 
607 	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
608 	fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
609 
610 	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
611 	fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
612 
613 	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
614 	fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
615 	len = buf_hdr[BIN_BUF_INIT_CMD].length;
616 	fw->init_ops_size = len / sizeof(struct init_raw_op);
617 #else
618 	fw->init_ops = (union init_op *)init_ops;
619 	fw->arr_data = (u32 *)init_val;
620 	fw->modes_tree_buf = (u8 *)modes_tree_buf;
621 	fw->init_ops_size = init_ops_size;
622 #endif
623 
624 	return ECORE_SUCCESS;
625 }
626