xref: /dpdk/drivers/net/qede/base/ecore_hw.c (revision d80e42cce4c7017ed8c99dabb8ae444a492acc1c)
1 /*
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8 
9 #include "bcm_osal.h"
10 #include "ecore_hsi_common.h"
11 #include "ecore_status.h"
12 #include "ecore.h"
13 #include "ecore_hw.h"
14 #include "reg_addr.h"
15 #include "ecore_utils.h"
16 #include "ecore_iov_api.h"
17 
18 #ifndef ASIC_ONLY
19 #define ECORE_EMUL_FACTOR 2000
20 #define ECORE_FPGA_FACTOR 200
21 #endif
22 
23 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
24 
25 /* Invalid values */
26 #define ECORE_BAR_INVALID_OFFSET	(OSAL_CPU_TO_LE32(-1))
27 
28 struct ecore_ptt {
29 	osal_list_entry_t list_entry;
30 	unsigned int idx;
31 	struct pxp_ptt_entry pxp;
32 	u8 hwfn_id;
33 };
34 
35 struct ecore_ptt_pool {
36 	osal_list_t free_list;
37 	osal_spinlock_t lock; /* ptt synchronized access */
38 	struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
39 };
40 
41 void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
42 {
43 	OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
44 	p_hwfn->p_ptt_pool = OSAL_NULL;
45 }
46 
47 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
48 {
49 	struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
50 						   GFP_KERNEL,
51 						   sizeof(*p_pool));
52 	int i;
53 
54 	if (!p_pool)
55 		return ECORE_NOMEM;
56 
57 	OSAL_LIST_INIT(&p_pool->free_list);
58 	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
59 		p_pool->ptts[i].idx = i;
60 		p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
61 		p_pool->ptts[i].pxp.pretend.control = 0;
62 		p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
63 
64 		/* There are special PTT entries that are taken only by design.
65 		 * The rest are added ot the list for general usage.
66 		 */
67 		if (i >= RESERVED_PTT_MAX)
68 			OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
69 					    &p_pool->free_list);
70 	}
71 
72 	p_hwfn->p_ptt_pool = p_pool;
73 #ifdef CONFIG_ECORE_LOCK_ALLOC
74 	if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
75 		__ecore_ptt_pool_free(p_hwfn);
76 		return ECORE_NOMEM;
77 	}
78 #endif
79 	OSAL_SPIN_LOCK_INIT(&p_pool->lock);
80 	return ECORE_SUCCESS;
81 }
82 
83 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
84 {
85 	struct ecore_ptt *p_ptt;
86 	int i;
87 
88 	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
89 		p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
90 		p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
91 	}
92 }
93 
94 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
95 {
96 #ifdef CONFIG_ECORE_LOCK_ALLOC
97 	if (p_hwfn->p_ptt_pool)
98 		OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
99 #endif
100 	__ecore_ptt_pool_free(p_hwfn);
101 }
102 
103 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
104 {
105 	struct ecore_ptt *p_ptt;
106 	unsigned int i;
107 
108 	/* Take the free PTT from the list */
109 	for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
110 		OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
111 		if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
112 			p_ptt = OSAL_LIST_FIRST_ENTRY(
113 						&p_hwfn->p_ptt_pool->free_list,
114 						struct ecore_ptt, list_entry);
115 			OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
116 					       &p_hwfn->p_ptt_pool->free_list);
117 
118 			OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
119 
120 			DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
121 				   "allocated ptt %d\n", p_ptt->idx);
122 
123 			return p_ptt;
124 		}
125 
126 		OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
127 		OSAL_MSLEEP(1);
128 	}
129 
130 	DP_NOTICE(p_hwfn, true,
131 		  "PTT acquire timeout - failed to allocate PTT\n");
132 	return OSAL_NULL;
133 }
134 
135 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
136 {
137 	/* This PTT should not be set to pretend if it is being released */
138 	/* TODO - add some pretend sanity checks, to make sure pretend
139 	 * isn't set on this ptt
140 	 */
141 
142 	OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
143 	OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
144 	OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
145 }
146 
147 static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
148 {
149 	/* The HW is using DWORDS and we need to translate it to Bytes */
150 	return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
151 }
152 
153 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
154 {
155 	return PXP_PF_WINDOW_ADMIN_PER_PF_START +
156 	    p_ptt->idx * sizeof(struct pxp_ptt_entry);
157 }
158 
159 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
160 {
161 	return PXP_EXTERNAL_BAR_PF_WINDOW_START +
162 	    p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
163 }
164 
165 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
166 		       struct ecore_ptt *p_ptt, u32 new_hw_addr)
167 {
168 	u32 prev_hw_addr;
169 
170 	prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
171 
172 	if (new_hw_addr == prev_hw_addr)
173 		return;
174 
175 	/* Update PTT entery in admin window */
176 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
177 		   "Updating PTT entry %d to offset 0x%x\n",
178 		   p_ptt->idx, new_hw_addr);
179 
180 	/* The HW is using DWORDS and the address is in Bytes */
181 	p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
182 
183 	REG_WR(p_hwfn,
184 	       ecore_ptt_config_addr(p_ptt) +
185 	       OFFSETOF(struct pxp_ptt_entry, offset),
186 	       OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
187 }
188 
189 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
190 			 struct ecore_ptt *p_ptt, u32 hw_addr)
191 {
192 	u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
193 	u32 offset;
194 
195 	offset = hw_addr - win_hw_addr;
196 
197 	if (p_ptt->hwfn_id != p_hwfn->my_id)
198 		DP_NOTICE(p_hwfn, true,
199 			  "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
200 			  p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
201 
202 	/* Verify the address is within the window */
203 	if (hw_addr < win_hw_addr ||
204 	    offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
205 		ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
206 		offset = 0;
207 	}
208 
209 	return ecore_ptt_get_bar_addr(p_ptt) + offset;
210 }
211 
212 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
213 					 enum reserved_ptts ptt_idx)
214 {
215 	if (ptt_idx >= RESERVED_PTT_MAX) {
216 		DP_NOTICE(p_hwfn, true,
217 			  "Requested PTT %d is out of range\n", ptt_idx);
218 		return OSAL_NULL;
219 	}
220 
221 	return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
222 }
223 
224 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
225 				    struct ecore_ptt *p_ptt)
226 {
227 	bool is_empty = true;
228 	u32 bar_addr;
229 
230 	if (!p_hwfn->p_dev->chk_reg_fifo)
231 		goto out;
232 
233 	/* ecore_rd() cannot be used here since it calls this function */
234 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
235 	is_empty = REG_RD(p_hwfn, bar_addr) == 0;
236 
237 #ifndef ASIC_ONLY
238 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
239 		OSAL_UDELAY(100);
240 #endif
241 
242 out:
243 	return is_empty;
244 }
245 
246 void ecore_wr(struct ecore_hwfn *p_hwfn,
247 	      struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
248 {
249 	bool prev_fifo_err;
250 	u32 bar_addr;
251 
252 	prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
253 
254 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
255 	REG_WR(p_hwfn, bar_addr, val);
256 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
257 		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
258 		   bar_addr, hw_addr, val);
259 
260 #ifndef ASIC_ONLY
261 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
262 		OSAL_UDELAY(100);
263 #endif
264 
265 	OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
266 		  "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n",
267 		  hw_addr, val);
268 }
269 
270 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
271 {
272 	bool prev_fifo_err;
273 	u32 bar_addr, val;
274 
275 	prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
276 
277 	bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
278 	val = REG_RD(p_hwfn, bar_addr);
279 
280 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
281 		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
282 		   bar_addr, hw_addr, val);
283 
284 #ifndef ASIC_ONLY
285 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
286 		OSAL_UDELAY(100);
287 #endif
288 
289 	OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
290 		  "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
291 		  hw_addr);
292 
293 	return val;
294 }
295 
296 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
297 			    struct ecore_ptt *p_ptt,
298 			    void *addr,
299 			    u32 hw_addr, osal_size_t n, bool to_device)
300 {
301 	u32 dw_count, *host_addr, hw_offset;
302 	osal_size_t quota, done = 0;
303 	u32 OSAL_IOMEM *reg_addr;
304 
305 	while (done < n) {
306 		quota = OSAL_MIN_T(osal_size_t, n - done,
307 				   PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
308 
309 		if (IS_PF(p_hwfn->p_dev)) {
310 			ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
311 			hw_offset = ecore_ptt_get_bar_addr(p_ptt);
312 		} else {
313 			hw_offset = hw_addr + done;
314 		}
315 
316 		dw_count = quota / 4;
317 		host_addr = (u32 *)((u8 *)addr + done);
318 		reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
319 
320 		if (to_device)
321 			while (dw_count--)
322 				DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
323 		else
324 			while (dw_count--)
325 				*host_addr++ = DIRECT_REG_RD(p_hwfn,
326 							     reg_addr++);
327 
328 		done += quota;
329 	}
330 }
331 
332 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
333 		       struct ecore_ptt *p_ptt,
334 		       void *dest, u32 hw_addr, osal_size_t n)
335 {
336 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
337 		   "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
338 		   hw_addr, dest, hw_addr, (unsigned long)n);
339 
340 	ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
341 }
342 
343 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
344 		     struct ecore_ptt *p_ptt,
345 		     u32 hw_addr, void *src, osal_size_t n)
346 {
347 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
348 		   "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
349 		   hw_addr, hw_addr, src, (unsigned long)n);
350 
351 	ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
352 }
353 
354 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
355 		       struct ecore_ptt *p_ptt, u16 fid)
356 {
357 	u16 control = 0;
358 
359 	SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
360 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
361 
362 /* Every pretend undos prev pretends, including previous port pretend */
363 
364 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
365 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
366 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
367 
368 	if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
369 		fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
370 
371 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
372 	p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
373 
374 	REG_WR(p_hwfn,
375 	       ecore_ptt_config_addr(p_ptt) +
376 	       OFFSETOF(struct pxp_ptt_entry, pretend),
377 			*(u32 *)&p_ptt->pxp.pretend);
378 }
379 
380 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
381 			struct ecore_ptt *p_ptt, u8 port_id)
382 {
383 	u16 control = 0;
384 
385 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
386 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
387 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
388 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
389 
390 	REG_WR(p_hwfn,
391 	       ecore_ptt_config_addr(p_ptt) +
392 	       OFFSETOF(struct pxp_ptt_entry, pretend),
393 			*(u32 *)&p_ptt->pxp.pretend);
394 }
395 
396 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
397 {
398 	u16 control = 0;
399 
400 	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
401 	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
402 	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
403 
404 	p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
405 
406 	REG_WR(p_hwfn,
407 	       ecore_ptt_config_addr(p_ptt) +
408 	       OFFSETOF(struct pxp_ptt_entry, pretend),
409 			*(u32 *)&p_ptt->pxp.pretend);
410 }
411 
412 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
413 {
414 	u32 concrete_fid = 0;
415 
416 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
417 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
418 	SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
419 
420 	return concrete_fid;
421 }
422 
423 /* Not in use @DPDK
424  * Ecore HW lock
425  * =============
426  * Although the implementation is ready, today we don't have any flow that
427  * utliizes said locks - and we want to keep it this way.
428  * If this changes, this needs to be revisted.
429  */
430 
431 /* Ecore DMAE
432  * =============
433  */
434 static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
435 			      const u8 is_src_type_grc,
436 			      const u8 is_dst_type_grc,
437 			      struct ecore_dmae_params *p_params)
438 {
439 	u16 opcode_b = 0;
440 	u32 opcode = 0;
441 
442 	/* Whether the source is the PCIe or the GRC.
443 	 * 0- The source is the PCIe
444 	 * 1- The source is the GRC.
445 	 */
446 	opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
447 		   : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
448 	opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
449 	    DMAE_CMD_SRC_PF_ID_SHIFT;
450 
451 	/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
452 	opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
453 		   : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
454 	opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
455 	    DMAE_CMD_DST_PF_ID_SHIFT;
456 
457 	/* DMAE_E4_TODO need to check which value to specifiy here. */
458 	/* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
459 
460 	/* Whether to write a completion word to the completion destination:
461 	 * 0-Do not write a completion word
462 	 * 1-Write the completion word
463 	 */
464 	opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
465 	opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
466 
467 	if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
468 		opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
469 
470 	/* swapping mode 3 - big endian there should be a define ifdefed in
471 	 * the HSI somewhere. Since it is currently
472 	 */
473 	opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
474 
475 	opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
476 
477 	/* reset source address in next go */
478 	opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
479 
480 	/* reset dest address in next go */
481 	opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
482 
483 	/* SRC/DST VFID: all 1's - pf, otherwise VF id */
484 	if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
485 		opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
486 		opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
487 	} else {
488 		opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
489 			     DMAE_CMD_SRC_VF_ID_SHIFT);
490 	}
491 	if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
492 		opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
493 		opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
494 	} else {
495 		opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
496 	}
497 
498 	p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
499 	p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
500 }
501 
502 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
503 {
504 	OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
505 
506 	/* All the DMAE 'go' registers form an array in internal memory */
507 	return DMAE_REG_GO_C0 + (idx << 2);
508 }
509 
510 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
511 						    struct ecore_ptt *p_ptt)
512 {
513 	struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
514 	u8 idx_cmd = p_hwfn->dmae_info.channel, i;
515 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
516 
517 	/* verify address is not OSAL_NULL */
518 	if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
519 	     ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
520 		DP_NOTICE(p_hwfn, true,
521 			  "source or destination address 0 idx_cmd=%d\n"
522 			  "opcode = [0x%08x,0x%04x] len=0x%x"
523 			  " src=0x%x:%x dst=0x%x:%x\n",
524 			  idx_cmd,
525 			  OSAL_LE32_TO_CPU(p_command->opcode),
526 			  OSAL_LE16_TO_CPU(p_command->opcode_b),
527 			  OSAL_LE16_TO_CPU(p_command->length_dw),
528 			  OSAL_LE32_TO_CPU(p_command->src_addr_hi),
529 			  OSAL_LE32_TO_CPU(p_command->src_addr_lo),
530 			  OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
531 			  OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
532 
533 		return ECORE_INVAL;
534 	}
535 
536 	DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
537 		   "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
538 		   "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
539 		   idx_cmd,
540 		   OSAL_LE32_TO_CPU(p_command->opcode),
541 		   OSAL_LE16_TO_CPU(p_command->opcode_b),
542 		   OSAL_LE16_TO_CPU(p_command->length_dw),
543 		   OSAL_LE32_TO_CPU(p_command->src_addr_hi),
544 		   OSAL_LE32_TO_CPU(p_command->src_addr_lo),
545 		   OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
546 		   OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
547 
548 	/* Copy the command to DMAE - need to do it before every call
549 	 * for source/dest address no reset.
550 	 * The number of commands have been increased to 16 (previous was 14)
551 	 * The first 9 DWs are the command registers, the 10 DW is the
552 	 * GO register, and
553 	 * the rest are result registers (which are read only by the client).
554 	 */
555 	for (i = 0; i < DMAE_CMD_SIZE; i++) {
556 		u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
557 		    *(((u32 *)p_command) + i) : 0;
558 
559 		ecore_wr(p_hwfn, p_ptt,
560 			 DMAE_REG_CMD_MEM +
561 			 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
562 			 (i * sizeof(u32)), data);
563 	}
564 
565 	ecore_wr(p_hwfn, p_ptt,
566 		 ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
567 
568 	return ecore_status;
569 }
570 
571 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
572 {
573 	dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
574 	struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
575 	u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
576 	u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
577 
578 	*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
579 	if (*p_comp == OSAL_NULL) {
580 		DP_NOTICE(p_hwfn, false,
581 			  "Failed to allocate `p_completion_word'\n");
582 		goto err;
583 	}
584 
585 	p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
586 	*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
587 					 sizeof(struct dmae_cmd));
588 	if (*p_cmd == OSAL_NULL) {
589 		DP_NOTICE(p_hwfn, false,
590 			  "Failed to allocate `struct dmae_cmd'\n");
591 		goto err;
592 	}
593 
594 	p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
595 	*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
596 					  sizeof(u32) * DMAE_MAX_RW_SIZE);
597 	if (*p_buff == OSAL_NULL) {
598 		DP_NOTICE(p_hwfn, false,
599 			  "Failed to allocate `intermediate_buffer'\n");
600 		goto err;
601 	}
602 
603 		p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
604 		p_hwfn->dmae_info.b_mem_ready = true;
605 
606 	return ECORE_SUCCESS;
607 err:
608 	ecore_dmae_info_free(p_hwfn);
609 	return ECORE_NOMEM;
610 }
611 
612 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
613 {
614 	dma_addr_t p_phys;
615 
616 	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
617 	p_hwfn->dmae_info.b_mem_ready = false;
618 	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
619 
620 	if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
621 		p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
622 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
623 				       p_hwfn->dmae_info.p_completion_word,
624 				       p_phys, sizeof(u32));
625 		p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
626 	}
627 
628 	if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
629 		p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
630 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
631 				       p_hwfn->dmae_info.p_dmae_cmd,
632 				       p_phys, sizeof(struct dmae_cmd));
633 		p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
634 	}
635 
636 	if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
637 		p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
638 		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
639 				       p_hwfn->dmae_info.p_intermediate_buffer,
640 				       p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
641 		p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
642 	}
643 }
644 
645 static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
646 {
647 	u32 wait_cnt_limit = 10000, wait_cnt = 0;
648 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
649 
650 #ifndef ASIC_ONLY
651 	u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
652 		      ECORE_EMUL_FACTOR :
653 		      (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
654 		       ECORE_FPGA_FACTOR : 1));
655 
656 	wait_cnt_limit *= factor;
657 #endif
658 
659 	/* DMAE_E4_TODO : TODO check if we have to call any other function
660 	 * other than BARRIER to sync the completion_word since we are not
661 	 * using the volatile keyword for this
662 	 */
663 	OSAL_BARRIER(p_hwfn->p_dev);
664 	while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
665 		OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
666 		if (++wait_cnt > wait_cnt_limit) {
667 			DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
668 				  "Timed-out waiting for operation to"
669 				  " complete. Completion word is 0x%08x"
670 				  " expected 0x%08x.\n",
671 				  *p_hwfn->dmae_info.p_completion_word,
672 				  DMAE_COMPLETION_VAL);
673 			ecore_status = ECORE_TIMEOUT;
674 			break;
675 		}
676 		/* to sync the completion_word since we are not
677 		 * using the volatile keyword for p_completion_word
678 		 */
679 		OSAL_BARRIER(p_hwfn->p_dev);
680 	}
681 
682 	if (ecore_status == ECORE_SUCCESS)
683 		*p_hwfn->dmae_info.p_completion_word = 0;
684 
685 	return ecore_status;
686 }
687 
688 static enum _ecore_status_t
689 ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
690 				 struct ecore_ptt *p_ptt,
691 				 u64 src_addr,
692 				 u64 dst_addr,
693 				 u8 src_type, u8 dst_type, u32 length_dw)
694 {
695 	dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
696 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
697 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
698 
699 	switch (src_type) {
700 	case ECORE_DMAE_ADDRESS_GRC:
701 	case ECORE_DMAE_ADDRESS_HOST_PHYS:
702 		cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
703 		cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
704 		break;
705 		/* for virt source addresses we use the intermediate buffer. */
706 	case ECORE_DMAE_ADDRESS_HOST_VIRT:
707 		cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
708 		cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
709 		OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
710 			    (void *)(osal_uintptr_t)src_addr,
711 			    length_dw * sizeof(u32));
712 		break;
713 	default:
714 		return ECORE_INVAL;
715 	}
716 
717 	switch (dst_type) {
718 	case ECORE_DMAE_ADDRESS_GRC:
719 	case ECORE_DMAE_ADDRESS_HOST_PHYS:
720 		cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
721 		cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
722 		break;
723 		/* for virt destination address we use the intermediate buff. */
724 	case ECORE_DMAE_ADDRESS_HOST_VIRT:
725 		cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
726 		cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
727 		break;
728 	default:
729 		return ECORE_INVAL;
730 	}
731 
732 	cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
733 
734 	if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
735 	    src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
736 		OSAL_DMA_SYNC(p_hwfn->p_dev,
737 			      (void *)HILO_U64(cmd->src_addr_hi,
738 					       cmd->src_addr_lo),
739 			      length_dw * sizeof(u32), false);
740 
741 	ecore_dmae_post_command(p_hwfn, p_ptt);
742 
743 	ecore_status = ecore_dmae_operation_wait(p_hwfn);
744 
745 	/* TODO - is it true ? */
746 	if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
747 	    src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
748 		OSAL_DMA_SYNC(p_hwfn->p_dev,
749 			      (void *)HILO_U64(cmd->src_addr_hi,
750 					       cmd->src_addr_lo),
751 			      length_dw * sizeof(u32), true);
752 
753 	if (ecore_status != ECORE_SUCCESS) {
754 		DP_NOTICE(p_hwfn, ECORE_MSG_HW,
755 			  "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n",
756 			  (unsigned long)src_addr, (unsigned long)dst_addr,
757 			  length_dw,
758 			  (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
759 		return ecore_status;
760 	}
761 
762 	if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
763 		OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
764 			    &p_hwfn->dmae_info.p_intermediate_buffer[0],
765 			    length_dw * sizeof(u32));
766 
767 	return ECORE_SUCCESS;
768 }
769 
770 static enum _ecore_status_t
771 ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
772 			   struct ecore_ptt *p_ptt,
773 			   u64 src_addr,
774 			   u64 dst_addr,
775 			   u8 src_type,
776 			   u8 dst_type,
777 			   u32 size_in_dwords,
778 			   struct ecore_dmae_params *p_params)
779 {
780 	dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
781 	u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
782 	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
783 	u64 src_addr_split = 0, dst_addr_split = 0;
784 	u16 length_limit = DMAE_MAX_RW_SIZE;
785 	enum _ecore_status_t ecore_status = ECORE_SUCCESS;
786 	u32 offset = 0;
787 
788 	if (!p_hwfn->dmae_info.b_mem_ready) {
789 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
790 			   "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
791 			   (unsigned long)src_addr, src_type,
792 			   (unsigned long)dst_addr, dst_type,
793 			   size_in_dwords);
794 		return ECORE_NOMEM;
795 	}
796 
797 	if (p_hwfn->p_dev->recov_in_prog) {
798 		DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
799 			   "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
800 			   (unsigned long)src_addr, src_type,
801 			   (unsigned long)dst_addr, dst_type,
802 			   size_in_dwords);
803 		/* Return success to let the flow to be completed successfully
804 		 * w/o any error handling.
805 		 */
806 		return ECORE_SUCCESS;
807 	}
808 
809 	if (!cmd) {
810 		DP_NOTICE(p_hwfn, true,
811 			  "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n",
812 			  (unsigned long)src_addr,
813 			  (unsigned long)dst_addr,
814 			  length_cur);
815 		return ECORE_INVAL;
816 	}
817 
818 	ecore_dmae_opcode(p_hwfn,
819 			  (src_type == ECORE_DMAE_ADDRESS_GRC),
820 			  (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
821 
822 	cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
823 	cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
824 	cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
825 
826 	/* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
827 	cnt_split = size_in_dwords / length_limit;
828 	length_mod = size_in_dwords % length_limit;
829 
830 	src_addr_split = src_addr;
831 	dst_addr_split = dst_addr;
832 
833 	for (i = 0; i <= cnt_split; i++) {
834 		offset = length_limit * i;
835 
836 		if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
837 			if (src_type == ECORE_DMAE_ADDRESS_GRC)
838 				src_addr_split = src_addr + offset;
839 			else
840 				src_addr_split = src_addr + (offset * 4);
841 		}
842 
843 		if (dst_type == ECORE_DMAE_ADDRESS_GRC)
844 			dst_addr_split = dst_addr + offset;
845 		else
846 			dst_addr_split = dst_addr + (offset * 4);
847 
848 		length_cur = (cnt_split == i) ? length_mod : length_limit;
849 
850 		/* might be zero on last iteration */
851 		if (!length_cur)
852 			continue;
853 
854 		ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
855 								p_ptt,
856 								src_addr_split,
857 								dst_addr_split,
858 								src_type,
859 								dst_type,
860 								length_cur);
861 		if (ecore_status != ECORE_SUCCESS) {
862 			DP_NOTICE(p_hwfn, false,
863 				  "ecore_dmae_execute_sub_operation Failed"
864 				  " with error 0x%x. source_addr 0x%lx,"
865 				  " dest addr 0x%lx, size_in_dwords 0x%x\n",
866 				  ecore_status, (unsigned long)src_addr,
867 				  (unsigned long)dst_addr, length_cur);
868 
869 			ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
870 			break;
871 		}
872 	}
873 
874 	return ecore_status;
875 }
876 
877 enum _ecore_status_t
878 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
879 		    struct ecore_ptt *p_ptt,
880 		    u64 source_addr,
881 		    u32 grc_addr, u32 size_in_dwords, u32 flags)
882 {
883 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
884 	struct ecore_dmae_params params;
885 	enum _ecore_status_t rc;
886 
887 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
888 	params.flags = flags;
889 
890 	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
891 
892 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
893 					grc_addr_in_dw,
894 					ECORE_DMAE_ADDRESS_HOST_VIRT,
895 					ECORE_DMAE_ADDRESS_GRC,
896 					size_in_dwords, &params);
897 
898 	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
899 
900 	return rc;
901 }
902 
903 enum _ecore_status_t
904 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
905 		    struct ecore_ptt *p_ptt,
906 		    u32 grc_addr,
907 		    dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
908 {
909 	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
910 	struct ecore_dmae_params params;
911 	enum _ecore_status_t rc;
912 
913 	OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
914 	params.flags = flags;
915 
916 	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
917 
918 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
919 					dest_addr, ECORE_DMAE_ADDRESS_GRC,
920 					ECORE_DMAE_ADDRESS_HOST_VIRT,
921 					size_in_dwords, &params);
922 
923 	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
924 
925 	return rc;
926 }
927 
928 enum _ecore_status_t
929 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
930 		     struct ecore_ptt *p_ptt,
931 		     dma_addr_t source_addr,
932 		     dma_addr_t dest_addr,
933 		     u32 size_in_dwords, struct ecore_dmae_params *p_params)
934 {
935 	enum _ecore_status_t rc;
936 
937 	OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
938 
939 	rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
940 					dest_addr,
941 					ECORE_DMAE_ADDRESS_HOST_PHYS,
942 					ECORE_DMAE_ADDRESS_HOST_PHYS,
943 					size_in_dwords, p_params);
944 
945 	OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
946 
947 	return rc;
948 }
949 
950 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
951 			 enum ecore_hw_err_type err_type)
952 {
953 	/* Fan failure cannot be masked by handling of another HW error */
954 	if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
955 		DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
956 			   "Recovery is in progress."
957 			   "Avoid notifying about HW error %d.\n",
958 			   err_type);
959 		return;
960 	}
961 
962 	OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
963 }
964 
965 enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
966 				       struct ecore_ptt *p_ptt,
967 				       const char *phase)
968 {
969 	u32 size = OSAL_PAGE_SIZE / 2, val;
970 	struct ecore_dmae_params params;
971 	enum _ecore_status_t rc = ECORE_SUCCESS;
972 	dma_addr_t p_phys;
973 	void *p_virt;
974 	u32 *p_tmp;
975 
976 	p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
977 	if (!p_virt) {
978 		DP_NOTICE(p_hwfn, false,
979 			  "DMAE sanity [%s]: failed to allocate memory\n",
980 			  phase);
981 		return ECORE_NOMEM;
982 	}
983 
984 	/* Fill the bottom half of the allocated memory with a known pattern */
985 	for (p_tmp = (u32 *)p_virt;
986 	     p_tmp < (u32 *)((u8 *)p_virt + size);
987 	     p_tmp++) {
988 		/* Save the address itself as the value */
989 		val = (u32)(osal_uintptr_t)p_tmp;
990 		*p_tmp = val;
991 	}
992 
993 	/* Zero the top half of the allocated memory */
994 	OSAL_MEM_ZERO((u8 *)p_virt + size, size);
995 
996 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
997 		   "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n",
998 		   phase, (unsigned long)p_phys, p_virt,
999 		   (unsigned long)(p_phys + size),
1000 		   (u8 *)p_virt + size, size);
1001 
1002 	OSAL_MEMSET(&params, 0, sizeof(params));
1003 	rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
1004 				  size / 4 /* size_in_dwords */, &params);
1005 	if (rc != ECORE_SUCCESS) {
1006 		DP_NOTICE(p_hwfn, false,
1007 			  "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
1008 			  phase, rc);
1009 		goto out;
1010 	}
1011 
1012 	/* Verify that the top half of the allocated memory has the pattern */
1013 	for (p_tmp = (u32 *)((u8 *)p_virt + size);
1014 	     p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
1015 	     p_tmp++) {
1016 		/* The corresponding address in the bottom half */
1017 		val = (u32)(osal_uintptr_t)p_tmp - size;
1018 
1019 		if (*p_tmp != val) {
1020 			DP_NOTICE(p_hwfn, false,
1021 				  "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
1022 				  phase,
1023 				  (unsigned long)p_phys +
1024 				   ((u8 *)p_tmp - (u8 *)p_virt),
1025 				  p_tmp, *p_tmp, val);
1026 			rc = ECORE_UNKNOWN_ERROR;
1027 			goto out;
1028 		}
1029 	}
1030 
1031 out:
1032 	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
1033 	return rc;
1034 }
1035