1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
3 * All rights reserved.
4 * www.cavium.com
5 */
6
7 #include "bcm_osal.h"
8 #include "ecore_hsi_common.h"
9 #include "ecore_status.h"
10 #include "ecore.h"
11 #include "ecore_hw.h"
12 #include "reg_addr.h"
13 #include "ecore_utils.h"
14 #include "ecore_iov_api.h"
15 #include "ecore_gtt_values.h"
16 #include "ecore_dev_api.h"
17
18 #ifndef ASIC_ONLY
19 #define ECORE_EMUL_FACTOR 2000
20 #define ECORE_FPGA_FACTOR 200
21 #endif
22
23 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
24
25 /* Invalid values */
26 #define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1))
27
28 struct ecore_ptt {
29 osal_list_entry_t list_entry;
30 unsigned int idx;
31 struct pxp_ptt_entry pxp;
32 u8 hwfn_id;
33 };
34
35 struct ecore_ptt_pool {
36 osal_list_t free_list;
37 osal_spinlock_t lock; /* ptt synchronized access */
38 struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
39 };
40
__ecore_ptt_pool_free(struct ecore_hwfn * p_hwfn)41 void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
42 {
43 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
44 p_hwfn->p_ptt_pool = OSAL_NULL;
45 }
46
ecore_ptt_pool_alloc(struct ecore_hwfn * p_hwfn)47 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
48 {
49 struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
50 GFP_KERNEL,
51 sizeof(*p_pool));
52 int i;
53
54 if (!p_pool)
55 return ECORE_NOMEM;
56
57 OSAL_LIST_INIT(&p_pool->free_list);
58 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
59 p_pool->ptts[i].idx = i;
60 p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
61 p_pool->ptts[i].pxp.pretend.control = 0;
62 p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
63
64 /* There are special PTT entries that are taken only by design.
65 * The rest are added ot the list for general usage.
66 */
67 if (i >= RESERVED_PTT_MAX)
68 OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
69 &p_pool->free_list);
70 }
71
72 p_hwfn->p_ptt_pool = p_pool;
73 #ifdef CONFIG_ECORE_LOCK_ALLOC
74 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
75 __ecore_ptt_pool_free(p_hwfn);
76 return ECORE_NOMEM;
77 }
78 #endif
79 OSAL_SPIN_LOCK_INIT(&p_pool->lock);
80 return ECORE_SUCCESS;
81 }
82
ecore_gtt_init(struct ecore_hwfn * p_hwfn)83 void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
84 {
85 u32 gtt_base;
86 u32 i;
87
88 /* Set the global windows */
89 gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
90
91 for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
92 if (pxp_global_win[i])
93 REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
94 pxp_global_win[i]);
95 }
96
ecore_ptt_invalidate(struct ecore_hwfn * p_hwfn)97 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
98 {
99 struct ecore_ptt *p_ptt;
100 int i;
101
102 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
103 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
104 p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
105 }
106 }
107
ecore_ptt_pool_free(struct ecore_hwfn * p_hwfn)108 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
109 {
110 #ifdef CONFIG_ECORE_LOCK_ALLOC
111 if (p_hwfn->p_ptt_pool)
112 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
113 #endif
114 __ecore_ptt_pool_free(p_hwfn);
115 }
116
ecore_ptt_acquire(struct ecore_hwfn * p_hwfn)117 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
118 {
119 struct ecore_ptt *p_ptt;
120 unsigned int i;
121
122 /* Take the free PTT from the list */
123 for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
124 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
125 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
126 p_ptt = OSAL_LIST_FIRST_ENTRY(
127 &p_hwfn->p_ptt_pool->free_list,
128 struct ecore_ptt, list_entry);
129 OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
130 &p_hwfn->p_ptt_pool->free_list);
131
132 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
133
134 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
135 "allocated ptt %d\n", p_ptt->idx);
136
137 return p_ptt;
138 }
139
140 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
141 OSAL_MSLEEP(1);
142 }
143
144 DP_NOTICE(p_hwfn, true,
145 "PTT acquire timeout - failed to allocate PTT\n");
146 return OSAL_NULL;
147 }
148
ecore_ptt_release(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)149 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
150 {
151 /* This PTT should not be set to pretend if it is being released */
152 /* TODO - add some pretend sanity checks, to make sure pretend
153 * isn't set on this ptt
154 */
155
156 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
157 OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
158 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
159 }
160
ecore_ptt_get_hw_addr(struct ecore_ptt * p_ptt)161 static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
162 {
163 /* The HW is using DWORDS and we need to translate it to Bytes */
164 return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
165 }
166
ecore_ptt_config_addr(struct ecore_ptt * p_ptt)167 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
168 {
169 return PXP_PF_WINDOW_ADMIN_PER_PF_START +
170 p_ptt->idx * sizeof(struct pxp_ptt_entry);
171 }
172
ecore_ptt_get_bar_addr(struct ecore_ptt * p_ptt)173 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
174 {
175 return PXP_EXTERNAL_BAR_PF_WINDOW_START +
176 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
177 }
178
ecore_ptt_set_win(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 new_hw_addr)179 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
180 struct ecore_ptt *p_ptt, u32 new_hw_addr)
181 {
182 u32 prev_hw_addr;
183
184 prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
185
186 if (new_hw_addr == prev_hw_addr)
187 return;
188
189 /* Update PTT entery in admin window */
190 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
191 "Updating PTT entry %d to offset 0x%x\n",
192 p_ptt->idx, new_hw_addr);
193
194 /* The HW is using DWORDS and the address is in Bytes */
195 p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
196
197 REG_WR(p_hwfn,
198 ecore_ptt_config_addr(p_ptt) +
199 OFFSETOF(struct pxp_ptt_entry, offset),
200 OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
201 }
202
ecore_set_ptt(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 hw_addr)203 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
204 struct ecore_ptt *p_ptt, u32 hw_addr)
205 {
206 u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
207 u32 offset;
208
209 offset = hw_addr - win_hw_addr;
210
211 if (p_ptt->hwfn_id != p_hwfn->my_id)
212 DP_NOTICE(p_hwfn, true,
213 "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
214 p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
215
216 /* Verify the address is within the window */
217 if (hw_addr < win_hw_addr ||
218 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
219 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
220 offset = 0;
221 }
222
223 return ecore_ptt_get_bar_addr(p_ptt) + offset;
224 }
225
ecore_get_reserved_ptt(struct ecore_hwfn * p_hwfn,enum reserved_ptts ptt_idx)226 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
227 enum reserved_ptts ptt_idx)
228 {
229 if (ptt_idx >= RESERVED_PTT_MAX) {
230 DP_NOTICE(p_hwfn, true,
231 "Requested PTT %d is out of range\n", ptt_idx);
232 return OSAL_NULL;
233 }
234
235 return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
236 }
237
ecore_is_reg_fifo_empty(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)238 static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
239 struct ecore_ptt *p_ptt)
240 {
241 bool is_empty = true;
242 u32 bar_addr;
243
244 if (!p_hwfn->p_dev->chk_reg_fifo)
245 goto out;
246
247 /* ecore_rd() cannot be used here since it calls this function */
248 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
249 is_empty = REG_RD(p_hwfn, bar_addr) == 0;
250
251 #ifndef ASIC_ONLY
252 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
253 OSAL_UDELAY(100);
254 #endif
255
256 out:
257 return is_empty;
258 }
259
ecore_wr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 hw_addr,u32 val)260 void ecore_wr(struct ecore_hwfn *p_hwfn,
261 struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
262 {
263 bool prev_fifo_err;
264 u32 bar_addr;
265
266 prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
267
268 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
269 REG_WR(p_hwfn, bar_addr, val);
270 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
271 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
272 bar_addr, hw_addr, val);
273
274 #ifndef ASIC_ONLY
275 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
276 OSAL_UDELAY(100);
277 #endif
278
279 OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
280 "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n",
281 hw_addr, val);
282 }
283
ecore_rd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 hw_addr)284 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
285 {
286 bool prev_fifo_err;
287 u32 bar_addr, val;
288
289 prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
290
291 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
292 val = REG_RD(p_hwfn, bar_addr);
293
294 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
295 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
296 bar_addr, hw_addr, val);
297
298 #ifndef ASIC_ONLY
299 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
300 OSAL_UDELAY(100);
301 #endif
302
303 OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
304 "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
305 hw_addr);
306
307 return val;
308 }
309
ecore_memcpy_hw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,void * addr,u32 hw_addr,osal_size_t n,bool to_device)310 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
311 struct ecore_ptt *p_ptt,
312 void *addr,
313 u32 hw_addr, osal_size_t n, bool to_device)
314 {
315 u32 dw_count, *host_addr, hw_offset;
316 osal_size_t quota, done = 0;
317 u32 OSAL_IOMEM *reg_addr;
318
319 while (done < n) {
320 quota = OSAL_MIN_T(osal_size_t, n - done,
321 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
322
323 if (IS_PF(p_hwfn->p_dev)) {
324 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
325 hw_offset = ecore_ptt_get_bar_addr(p_ptt);
326 } else {
327 hw_offset = hw_addr + done;
328 }
329
330 dw_count = quota / 4;
331 host_addr = (u32 *)((u8 *)addr + done);
332 reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
333
334 if (to_device)
335 while (dw_count--)
336 DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
337 else
338 while (dw_count--)
339 *host_addr++ = DIRECT_REG_RD(p_hwfn,
340 reg_addr++);
341
342 done += quota;
343 }
344 }
345
ecore_memcpy_from(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,void * dest,u32 hw_addr,osal_size_t n)346 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
347 struct ecore_ptt *p_ptt,
348 void *dest, u32 hw_addr, osal_size_t n)
349 {
350 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
351 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
352 hw_addr, dest, hw_addr, (unsigned long)n);
353
354 ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
355 }
356
ecore_memcpy_to(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 hw_addr,void * src,osal_size_t n)357 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
358 struct ecore_ptt *p_ptt,
359 u32 hw_addr, void *src, osal_size_t n)
360 {
361 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
362 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
363 hw_addr, hw_addr, src, (unsigned long)n);
364
365 ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
366 }
367
ecore_fid_pretend(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u16 fid)368 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
369 struct ecore_ptt *p_ptt, u16 fid)
370 {
371 u16 control = 0;
372
373 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
374 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
375
376 /* Every pretend undos prev pretends, including previous port pretend */
377
378 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
379 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
380 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
381
382 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
383 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
384
385 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
386 p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
387
388 REG_WR(p_hwfn,
389 ecore_ptt_config_addr(p_ptt) +
390 OFFSETOF(struct pxp_ptt_entry, pretend),
391 *(u32 *)&p_ptt->pxp.pretend);
392 }
393
ecore_port_pretend(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 port_id)394 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
395 struct ecore_ptt *p_ptt, u8 port_id)
396 {
397 u16 control = 0;
398
399 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
400 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
401 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
402 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
403
404 REG_WR(p_hwfn,
405 ecore_ptt_config_addr(p_ptt) +
406 OFFSETOF(struct pxp_ptt_entry, pretend),
407 *(u32 *)&p_ptt->pxp.pretend);
408 }
409
ecore_port_unpretend(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)410 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
411 {
412 u16 control = 0;
413
414 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
415 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
416 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
417
418 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
419
420 REG_WR(p_hwfn,
421 ecore_ptt_config_addr(p_ptt) +
422 OFFSETOF(struct pxp_ptt_entry, pretend),
423 *(u32 *)&p_ptt->pxp.pretend);
424 }
425
ecore_port_fid_pretend(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 port_id,u16 fid)426 void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
427 u8 port_id, u16 fid)
428 {
429 u16 control = 0;
430
431 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
432 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
433 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
434
435 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
436 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
437
438 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
439 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
440
441 p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
442 p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
443
444 REG_WR(p_hwfn,
445 ecore_ptt_config_addr(p_ptt) +
446 OFFSETOF(struct pxp_ptt_entry, pretend),
447 *(u32 *)&p_ptt->pxp.pretend);
448 }
449
ecore_vfid_to_concrete(struct ecore_hwfn * p_hwfn,u8 vfid)450 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
451 {
452 u32 concrete_fid = 0;
453
454 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
455 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
456 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
457
458 return concrete_fid;
459 }
460
461 /* Not in use @DPDK
462 * Ecore HW lock
463 * =============
464 * Although the implementation is ready, today we don't have any flow that
465 * utliizes said locks - and we want to keep it this way.
466 * If this changes, this needs to be revisted.
467 */
468
469 /* DMAE */
470
471 #define ECORE_DMAE_FLAGS_IS_SET(params, flag) \
472 ((params) != OSAL_NULL && \
473 GET_FIELD((params)->flags, DMAE_PARAMS_##flag))
474
ecore_dmae_opcode(struct ecore_hwfn * p_hwfn,const u8 is_src_type_grc,const u8 is_dst_type_grc,struct dmae_params * p_params)475 static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
476 const u8 is_src_type_grc,
477 const u8 is_dst_type_grc,
478 struct dmae_params *p_params)
479 {
480 u8 src_pf_id, dst_pf_id, port_id;
481 u16 opcode_b = 0;
482 u32 opcode = 0;
483
484 /* Whether the source is the PCIe or the GRC.
485 * 0- The source is the PCIe
486 * 1- The source is the GRC.
487 */
488 opcode |= (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie) <<
489 DMAE_CMD_SRC_SHIFT;
490 src_pf_id = ECORE_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
491 p_params->src_pf_id : p_hwfn->rel_pf_id;
492 opcode |= (src_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
493 DMAE_CMD_SRC_PF_ID_SHIFT;
494
495 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
496 opcode |= (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie) <<
497 DMAE_CMD_DST_SHIFT;
498 dst_pf_id = ECORE_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
499 p_params->dst_pf_id : p_hwfn->rel_pf_id;
500 opcode |= (dst_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
501 DMAE_CMD_DST_PF_ID_SHIFT;
502
503 /* DMAE_E4_TODO need to check which value to specify here. */
504 /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
505
506 /* Whether to write a completion word to the completion destination:
507 * 0-Do not write a completion word
508 * 1-Write the completion word
509 */
510 opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
511 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
512
513 if (ECORE_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
514 opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
515
516 /* swapping mode 3 - big endian there should be a define ifdefed in
517 * the HSI somewhere. Since it is currently
518 */
519 opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
520
521 port_id = (ECORE_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
522 p_params->port_id : p_hwfn->port_id;
523 opcode |= port_id << DMAE_CMD_PORT_ID_SHIFT;
524
525 /* reset source address in next go */
526 opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
527
528 /* reset dest address in next go */
529 opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
530
531 /* SRC/DST VFID: all 1's - pf, otherwise VF id */
532 if (ECORE_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
533 opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
534 opcode_b |= (p_params->src_vf_id << DMAE_CMD_SRC_VF_ID_SHIFT);
535 } else {
536 opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
537 DMAE_CMD_SRC_VF_ID_SHIFT);
538 }
539 if (ECORE_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
540 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
541 opcode_b |= p_params->dst_vf_id << DMAE_CMD_DST_VF_ID_SHIFT;
542 } else {
543 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
544 }
545
546 p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
547 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
548 }
549
ecore_dmae_idx_to_go_cmd(u8 idx)550 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
551 {
552 OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
553
554 /* All the DMAE 'go' registers form an array in internal memory */
555 return DMAE_REG_GO_C0 + (idx << 2);
556 }
557
ecore_dmae_post_command(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)558 static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
559 struct ecore_ptt *p_ptt)
560 {
561 struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
562 u8 idx_cmd = p_hwfn->dmae_info.channel, i;
563 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
564
565 /* verify address is not OSAL_NULL */
566 if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
567 ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
568 DP_NOTICE(p_hwfn, true,
569 "source or destination address 0 idx_cmd=%d\n"
570 "opcode = [0x%08x,0x%04x] len=0x%x"
571 " src=0x%x:%x dst=0x%x:%x\n",
572 idx_cmd,
573 OSAL_LE32_TO_CPU(p_command->opcode),
574 OSAL_LE16_TO_CPU(p_command->opcode_b),
575 OSAL_LE16_TO_CPU(p_command->length_dw),
576 OSAL_LE32_TO_CPU(p_command->src_addr_hi),
577 OSAL_LE32_TO_CPU(p_command->src_addr_lo),
578 OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
579 OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
580
581 return ECORE_INVAL;
582 }
583
584 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
585 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
586 "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
587 idx_cmd,
588 OSAL_LE32_TO_CPU(p_command->opcode),
589 OSAL_LE16_TO_CPU(p_command->opcode_b),
590 OSAL_LE16_TO_CPU(p_command->length_dw),
591 OSAL_LE32_TO_CPU(p_command->src_addr_hi),
592 OSAL_LE32_TO_CPU(p_command->src_addr_lo),
593 OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
594 OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
595
596 /* Copy the command to DMAE - need to do it before every call
597 * for source/dest address no reset.
598 * The number of commands have been increased to 16 (previous was 14)
599 * The first 9 DWs are the command registers, the 10 DW is the
600 * GO register, and
601 * the rest are result registers (which are read only by the client).
602 */
603 for (i = 0; i < DMAE_CMD_SIZE; i++) {
604 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
605 *(((u32 *)p_command) + i) : 0;
606
607 ecore_wr(p_hwfn, p_ptt,
608 DMAE_REG_CMD_MEM +
609 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
610 (i * sizeof(u32)), data);
611 }
612
613 ecore_wr(p_hwfn, p_ptt,
614 ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
615
616 return ecore_status;
617 }
618
ecore_dmae_info_alloc(struct ecore_hwfn * p_hwfn)619 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
620 {
621 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
622 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
623 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
624 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
625
626 *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
627 if (*p_comp == OSAL_NULL) {
628 DP_NOTICE(p_hwfn, false,
629 "Failed to allocate `p_completion_word'\n");
630 goto err;
631 }
632
633 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
634 *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
635 sizeof(struct dmae_cmd));
636 if (*p_cmd == OSAL_NULL) {
637 DP_NOTICE(p_hwfn, false,
638 "Failed to allocate `struct dmae_cmd'\n");
639 goto err;
640 }
641
642 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
643 *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
644 sizeof(u32) * DMAE_MAX_RW_SIZE);
645 if (*p_buff == OSAL_NULL) {
646 DP_NOTICE(p_hwfn, false,
647 "Failed to allocate `intermediate_buffer'\n");
648 goto err;
649 }
650
651 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
652 p_hwfn->dmae_info.b_mem_ready = true;
653
654 return ECORE_SUCCESS;
655 err:
656 ecore_dmae_info_free(p_hwfn);
657 return ECORE_NOMEM;
658 }
659
ecore_dmae_info_free(struct ecore_hwfn * p_hwfn)660 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
661 {
662 dma_addr_t p_phys;
663
664 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
665 p_hwfn->dmae_info.b_mem_ready = false;
666 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
667
668 if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
669 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
670 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
671 p_hwfn->dmae_info.p_completion_word,
672 p_phys, sizeof(u32));
673 p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
674 }
675
676 if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
677 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
678 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
679 p_hwfn->dmae_info.p_dmae_cmd,
680 p_phys, sizeof(struct dmae_cmd));
681 p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
682 }
683
684 if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
685 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
686 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
687 p_hwfn->dmae_info.p_intermediate_buffer,
688 p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
689 p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
690 }
691 }
692
ecore_dmae_operation_wait(struct ecore_hwfn * p_hwfn)693 static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
694 {
695 u32 wait_cnt_limit = 10000, wait_cnt = 0;
696 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
697
698 #ifndef ASIC_ONLY
699 u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
700 ECORE_EMUL_FACTOR :
701 (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
702 ECORE_FPGA_FACTOR : 1));
703
704 wait_cnt_limit *= factor;
705 #endif
706
707 /* DMAE_E4_TODO : TODO check if we have to call any other function
708 * other than BARRIER to sync the completion_word since we are not
709 * using the volatile keyword for this
710 */
711 OSAL_BARRIER(p_hwfn->p_dev);
712 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
713 OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
714 if (++wait_cnt > wait_cnt_limit) {
715 DP_NOTICE(p_hwfn->p_dev, false,
716 "Timed-out waiting for operation to"
717 " complete. Completion word is 0x%08x"
718 " expected 0x%08x.\n",
719 *p_hwfn->dmae_info.p_completion_word,
720 DMAE_COMPLETION_VAL);
721 ecore_status = ECORE_TIMEOUT;
722 break;
723 }
724 /* to sync the completion_word since we are not
725 * using the volatile keyword for p_completion_word
726 */
727 OSAL_BARRIER(p_hwfn->p_dev);
728 }
729
730 if (ecore_status == ECORE_SUCCESS)
731 *p_hwfn->dmae_info.p_completion_word = 0;
732
733 return ecore_status;
734 }
735
736 enum ecore_dmae_address_type {
737 ECORE_DMAE_ADDRESS_HOST_VIRT,
738 ECORE_DMAE_ADDRESS_HOST_PHYS,
739 ECORE_DMAE_ADDRESS_GRC
740 };
741
742 static enum _ecore_status_t
ecore_dmae_execute_sub_operation(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 src_addr,u64 dst_addr,u8 src_type,u8 dst_type,u32 length_dw)743 ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
744 struct ecore_ptt *p_ptt,
745 u64 src_addr,
746 u64 dst_addr,
747 u8 src_type, u8 dst_type, u32 length_dw)
748 {
749 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
750 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
751 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
752
753 switch (src_type) {
754 case ECORE_DMAE_ADDRESS_GRC:
755 case ECORE_DMAE_ADDRESS_HOST_PHYS:
756 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
757 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
758 break;
759 /* for virt source addresses we use the intermediate buffer. */
760 case ECORE_DMAE_ADDRESS_HOST_VIRT:
761 cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
762 cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
763 OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
764 (void *)(osal_uintptr_t)src_addr,
765 length_dw * sizeof(u32));
766 break;
767 default:
768 return ECORE_INVAL;
769 }
770
771 switch (dst_type) {
772 case ECORE_DMAE_ADDRESS_GRC:
773 case ECORE_DMAE_ADDRESS_HOST_PHYS:
774 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
775 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
776 break;
777 /* for virt destination address we use the intermediate buff. */
778 case ECORE_DMAE_ADDRESS_HOST_VIRT:
779 cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
780 cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
781 break;
782 default:
783 return ECORE_INVAL;
784 }
785
786 cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
787
788 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
789 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
790 OSAL_DMA_SYNC(p_hwfn->p_dev,
791 (void *)HILO_U64(cmd->src_addr_hi,
792 cmd->src_addr_lo),
793 length_dw * sizeof(u32), false);
794
795 ecore_dmae_post_command(p_hwfn, p_ptt);
796
797 ecore_status = ecore_dmae_operation_wait(p_hwfn);
798
799 /* TODO - is it true ? */
800 if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
801 src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
802 OSAL_DMA_SYNC(p_hwfn->p_dev,
803 (void *)HILO_U64(cmd->src_addr_hi,
804 cmd->src_addr_lo),
805 length_dw * sizeof(u32), true);
806
807 if (ecore_status != ECORE_SUCCESS) {
808 DP_NOTICE(p_hwfn, false,
809 "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n",
810 (unsigned long)src_addr, (unsigned long)dst_addr,
811 length_dw,
812 (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
813 return ecore_status;
814 }
815
816 if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
817 OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
818 &p_hwfn->dmae_info.p_intermediate_buffer[0],
819 length_dw * sizeof(u32));
820
821 return ECORE_SUCCESS;
822 }
823
824 static enum _ecore_status_t
ecore_dmae_execute_command(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 src_addr,u64 dst_addr,u8 src_type,u8 dst_type,u32 size_in_dwords,struct dmae_params * p_params)825 ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
826 struct ecore_ptt *p_ptt,
827 u64 src_addr,
828 u64 dst_addr,
829 u8 src_type,
830 u8 dst_type,
831 u32 size_in_dwords,
832 struct dmae_params *p_params)
833 {
834 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
835 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
836 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
837 u64 src_addr_split = 0, dst_addr_split = 0;
838 u16 length_limit = DMAE_MAX_RW_SIZE;
839 enum _ecore_status_t ecore_status = ECORE_SUCCESS;
840 u32 offset = 0;
841
842 if (!p_hwfn->dmae_info.b_mem_ready) {
843 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
844 "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
845 (unsigned long)src_addr, src_type,
846 (unsigned long)dst_addr, dst_type,
847 size_in_dwords);
848 return ECORE_NOMEM;
849 }
850
851 if (p_hwfn->p_dev->recov_in_prog) {
852 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
853 "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
854 (unsigned long)src_addr, src_type,
855 (unsigned long)dst_addr, dst_type,
856 size_in_dwords);
857 /* Return success to let the flow to be completed successfully
858 * w/o any error handling.
859 */
860 return ECORE_SUCCESS;
861 }
862
863 if (!cmd) {
864 DP_NOTICE(p_hwfn, true,
865 "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n",
866 (unsigned long)src_addr,
867 (unsigned long)dst_addr,
868 length_cur);
869 return ECORE_INVAL;
870 }
871
872 ecore_dmae_opcode(p_hwfn,
873 (src_type == ECORE_DMAE_ADDRESS_GRC),
874 (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
875
876 cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
877 cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
878 cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
879
880 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
881 cnt_split = size_in_dwords / length_limit;
882 length_mod = size_in_dwords % length_limit;
883
884 src_addr_split = src_addr;
885 dst_addr_split = dst_addr;
886
887 for (i = 0; i <= cnt_split; i++) {
888 offset = length_limit * i;
889
890 if (!ECORE_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
891 if (src_type == ECORE_DMAE_ADDRESS_GRC)
892 src_addr_split = src_addr + offset;
893 else
894 src_addr_split = src_addr + (offset * 4);
895 }
896
897 if (dst_type == ECORE_DMAE_ADDRESS_GRC)
898 dst_addr_split = dst_addr + offset;
899 else
900 dst_addr_split = dst_addr + (offset * 4);
901
902 length_cur = (cnt_split == i) ? length_mod : length_limit;
903
904 /* might be zero on last iteration */
905 if (!length_cur)
906 continue;
907
908 ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
909 p_ptt,
910 src_addr_split,
911 dst_addr_split,
912 src_type,
913 dst_type,
914 length_cur);
915 if (ecore_status != ECORE_SUCCESS) {
916 DP_NOTICE(p_hwfn, false,
917 "ecore_dmae_execute_sub_operation Failed"
918 " with error 0x%x. source_addr 0x%lx,"
919 " dest addr 0x%lx, size_in_dwords 0x%x\n",
920 ecore_status, (unsigned long)src_addr,
921 (unsigned long)dst_addr, length_cur);
922
923 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
924 break;
925 }
926 }
927
928 return ecore_status;
929 }
930
ecore_dmae_host2grc(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u64 source_addr,u32 grc_addr,u32 size_in_dwords,struct dmae_params * p_params)931 enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
932 struct ecore_ptt *p_ptt,
933 u64 source_addr,
934 u32 grc_addr,
935 u32 size_in_dwords,
936 struct dmae_params *p_params)
937 {
938 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
939 enum _ecore_status_t rc;
940
941 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
942
943 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
944 grc_addr_in_dw,
945 ECORE_DMAE_ADDRESS_HOST_VIRT,
946 ECORE_DMAE_ADDRESS_GRC,
947 size_in_dwords, p_params);
948
949 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
950
951 return rc;
952 }
953
ecore_dmae_grc2host(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u32 grc_addr,dma_addr_t dest_addr,u32 size_in_dwords,struct dmae_params * p_params)954 enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
955 struct ecore_ptt *p_ptt,
956 u32 grc_addr,
957 dma_addr_t dest_addr,
958 u32 size_in_dwords,
959 struct dmae_params *p_params)
960 {
961 u32 grc_addr_in_dw = grc_addr / sizeof(u32);
962 enum _ecore_status_t rc;
963
964 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
965
966 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
967 dest_addr, ECORE_DMAE_ADDRESS_GRC,
968 ECORE_DMAE_ADDRESS_HOST_VIRT,
969 size_in_dwords, p_params);
970
971 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
972
973 return rc;
974 }
975
976 enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,dma_addr_t source_addr,dma_addr_t dest_addr,u32 size_in_dwords,struct dmae_params * p_params)977 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
978 struct ecore_ptt *p_ptt,
979 dma_addr_t source_addr,
980 dma_addr_t dest_addr,
981 u32 size_in_dwords,
982 struct dmae_params *p_params)
983 {
984 enum _ecore_status_t rc;
985
986 OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
987
988 rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
989 dest_addr,
990 ECORE_DMAE_ADDRESS_HOST_PHYS,
991 ECORE_DMAE_ADDRESS_HOST_PHYS,
992 size_in_dwords, p_params);
993
994 OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
995
996 return rc;
997 }
998
ecore_hw_err_notify(struct ecore_hwfn * p_hwfn,enum ecore_hw_err_type err_type)999 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
1000 enum ecore_hw_err_type err_type)
1001 {
1002 /* Fan failure cannot be masked by handling of another HW error */
1003 if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
1004 DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
1005 "Recovery is in progress."
1006 "Avoid notifying about HW error %d.\n",
1007 err_type);
1008 return;
1009 }
1010
1011 OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
1012 }
1013
ecore_dmae_sanity(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,const char * phase)1014 enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
1015 struct ecore_ptt *p_ptt,
1016 const char *phase)
1017 {
1018 u32 size = OSAL_PAGE_SIZE / 2, val;
1019 enum _ecore_status_t rc = ECORE_SUCCESS;
1020 dma_addr_t p_phys;
1021 void *p_virt;
1022 u32 *p_tmp;
1023
1024 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
1025 if (!p_virt) {
1026 DP_NOTICE(p_hwfn, false,
1027 "DMAE sanity [%s]: failed to allocate memory\n",
1028 phase);
1029 return ECORE_NOMEM;
1030 }
1031
1032 /* Fill the bottom half of the allocated memory with a known pattern */
1033 for (p_tmp = (u32 *)p_virt;
1034 p_tmp < (u32 *)((u8 *)p_virt + size);
1035 p_tmp++) {
1036 /* Save the address itself as the value */
1037 val = (u32)(osal_uintptr_t)p_tmp;
1038 *p_tmp = val;
1039 }
1040
1041 /* Zero the top half of the allocated memory */
1042 OSAL_MEM_ZERO((u8 *)p_virt + size, size);
1043
1044 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1045 "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n",
1046 phase, (unsigned long)p_phys, p_virt,
1047 (unsigned long)(p_phys + size),
1048 (u8 *)p_virt + size, size);
1049
1050 rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
1051 size / 4 /* size_in_dwords */,
1052 OSAL_NULL /* default parameters */);
1053 if (rc != ECORE_SUCCESS) {
1054 DP_NOTICE(p_hwfn, false,
1055 "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
1056 phase, rc);
1057 goto out;
1058 }
1059
1060 /* Verify that the top half of the allocated memory has the pattern */
1061 for (p_tmp = (u32 *)((u8 *)p_virt + size);
1062 p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
1063 p_tmp++) {
1064 /* The corresponding address in the bottom half */
1065 val = (u32)(osal_uintptr_t)p_tmp - size;
1066
1067 if (*p_tmp != val) {
1068 DP_NOTICE(p_hwfn, false,
1069 "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
1070 phase,
1071 (unsigned long)p_phys +
1072 ((u8 *)p_tmp - (u8 *)p_virt),
1073 p_tmp, *p_tmp, val);
1074 rc = ECORE_UNKNOWN_ERROR;
1075 goto out;
1076 }
1077 }
1078
1079 out:
1080 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
1081 return rc;
1082 }
1083
ecore_ppfid_wr(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 abs_ppfid,u32 hw_addr,u32 val)1084 void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1085 u8 abs_ppfid, u32 hw_addr, u32 val)
1086 {
1087 u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
1088
1089 ecore_fid_pretend(p_hwfn, p_ptt,
1090 pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1091 ecore_wr(p_hwfn, p_ptt, hw_addr, val);
1092 ecore_fid_pretend(p_hwfn, p_ptt,
1093 p_hwfn->rel_pf_id <<
1094 PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1095 }
1096
ecore_ppfid_rd(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,u8 abs_ppfid,u32 hw_addr)1097 u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1098 u8 abs_ppfid, u32 hw_addr)
1099 {
1100 u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
1101 u32 val;
1102
1103 ecore_fid_pretend(p_hwfn, p_ptt,
1104 pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1105 val = ecore_rd(p_hwfn, p_ptt, hw_addr);
1106 ecore_fid_pretend(p_hwfn, p_ptt,
1107 p_hwfn->rel_pf_id <<
1108 PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
1109
1110 return val;
1111 }
1112