1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2019-2020 NXP
5 */
6 /* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
7 * driver. They are only included via qbman_private.h, which is itself a
8 * platform-independent file and is included by all the other driver source.
9 *
10 * qbman_sys_decl.h is included prior to all other declarations and logic, and
11 * it exists to provide compatibility with any linux interfaces our
12 * single-source driver code is dependent on (eg. kmalloc). Ie. this file
13 * provides linux compatibility.
14 *
15 * This qbman_sys.h header, on the other hand, is included *after* any common
16 * and platform-neutral declarations and logic in qbman_private.h, and exists to
17 * implement any platform-specific logic of the qbman driver itself. Ie. it is
18 * *not* to provide linux compatibility.
19 */
20
21 #ifndef _QBMAN_SYS_H_
22 #define _QBMAN_SYS_H_
23
24 #include "qbman_sys_decl.h"
25
26 #define CENA_WRITE_ENABLE 0
27 #define CINH_WRITE_ENABLE 1
28
29 /* CINH register offsets */
30 #define QBMAN_CINH_SWP_EQCR_PI 0x800
31 #define QBMAN_CINH_SWP_EQCR_CI 0x840
32 #define QBMAN_CINH_SWP_EQAR 0x8c0
33 #define QBMAN_CINH_SWP_CR_RT 0x900
34 #define QBMAN_CINH_SWP_VDQCR_RT 0x940
35 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
36 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
37 #define QBMAN_CINH_SWP_DQPI 0xa00
38 #define QBMAN_CINH_SWP_DQRR_ITR 0xa80
39 #define QBMAN_CINH_SWP_DCAP 0xac0
40 #define QBMAN_CINH_SWP_SDQCR 0xb00
41 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
42 #define QBMAN_CINH_SWP_RCR_PI 0xc00
43 #define QBMAN_CINH_SWP_RAR 0xcc0
44 #define QBMAN_CINH_SWP_ISR 0xe00
45 #define QBMAN_CINH_SWP_IER 0xe40
46 #define QBMAN_CINH_SWP_ISDR 0xe80
47 #define QBMAN_CINH_SWP_IIR 0xec0
48 #define QBMAN_CINH_SWP_ITPR 0xf40
49
50 /* CENA register offsets */
51 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
52 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
53 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
54 #define QBMAN_CENA_SWP_CR 0x600
55 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
56 #define QBMAN_CENA_SWP_VDQCR 0x780
57 #define QBMAN_CENA_SWP_EQCR_CI 0x840
58 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
59
60 /* CENA register offsets in memory-backed mode */
61 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((uint32_t)(n) << 6))
62 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((uint32_t)(n) << 6))
63 #define QBMAN_CENA_SWP_CR_MEM 0x1600
64 #define QBMAN_CENA_SWP_RR_MEM 0x1680
65 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
66
67 /* Debugging assists */
__hexdump(unsigned long start,unsigned long end,unsigned long p,size_t sz,const unsigned char * c)68 static inline void __hexdump(unsigned long start, unsigned long end,
69 unsigned long p, size_t sz, const unsigned char *c)
70 {
71 while (start < end) {
72 unsigned int pos = 0;
73 char buf[64];
74 int nl = 0;
75
76 pos += sprintf(buf + pos, "%08lx: ", start);
77 do {
78 if ((start < p) || (start >= (p + sz)))
79 pos += sprintf(buf + pos, "..");
80 else
81 pos += sprintf(buf + pos, "%02x", *(c++));
82 if (!(++start & 15)) {
83 buf[pos++] = '\n';
84 nl = 1;
85 } else {
86 nl = 0;
87 if (!(start & 1))
88 buf[pos++] = ' ';
89 if (!(start & 3))
90 buf[pos++] = ' ';
91 }
92 } while (start & 15);
93 if (!nl)
94 buf[pos++] = '\n';
95 buf[pos] = '\0';
96 pr_info("%s", buf);
97 }
98 }
99
hexdump(const void * ptr,size_t sz)100 static inline void hexdump(const void *ptr, size_t sz)
101 {
102 unsigned long p = (unsigned long)ptr;
103 unsigned long start = p & ~15;
104 unsigned long end = (p + sz + 15) & ~15;
105 const unsigned char *c = ptr;
106
107 __hexdump(start, end, p, sz, c);
108 }
109
110 /* Currently, the CENA support code expects each 32-bit word to be written in
111 * host order, and these are converted to hardware (little-endian) order on
112 * command submission. However, 64-bit quantities are must be written (and read)
113 * as two 32-bit words with the least-significant word first, irrespective of
114 * host endianness.
115 */
u64_to_le32_copy(void * d,const uint64_t * s,unsigned int cnt)116 static inline void u64_to_le32_copy(void *d, const uint64_t *s,
117 unsigned int cnt)
118 {
119 uint32_t *dd = d;
120 const uint32_t *ss = (const uint32_t *)s;
121
122 while (cnt--) {
123 /* TBD: the toolchain was choking on the use of 64-bit types up
124 * until recently so this works entirely with 32-bit variables.
125 * When 64-bit types become usable again, investigate better
126 * ways of doing this.
127 */
128 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
129 *(dd++) = ss[1];
130 *(dd++) = ss[0];
131 ss += 2;
132 #else
133 *(dd++) = *(ss++);
134 *(dd++) = *(ss++);
135 #endif
136 }
137 }
138
u64_from_le32_copy(uint64_t * d,const void * s,unsigned int cnt)139 static inline void u64_from_le32_copy(uint64_t *d, const void *s,
140 unsigned int cnt)
141 {
142 const uint32_t *ss = s;
143 uint32_t *dd = (uint32_t *)d;
144
145 while (cnt--) {
146 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
147 dd[1] = *(ss++);
148 dd[0] = *(ss++);
149 dd += 2;
150 #else
151 *(dd++) = *(ss++);
152 *(dd++) = *(ss++);
153 #endif
154 }
155 }
156
157 /******************/
158 /* Portal access */
159 /******************/
160 struct qbman_swp_sys {
161 /* On GPP, the sys support for qbman_swp is here. The CENA region isi
162 * not an mmap() of the real portal registers, but an allocated
163 * place-holder, because the actual writes/reads to/from the portal are
164 * marshalled from these allocated areas using QBMan's "MC access
165 * registers". CINH accesses are atomic so there's no need for a
166 * place-holder.
167 */
168 uint8_t *cena;
169 uint8_t *addr_cena;
170 uint8_t *addr_cinh;
171 uint32_t idx;
172 enum qbman_eqcr_mode eqcr_mode;
173 };
174
175 /* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
176 * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
177 * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
178 * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
179 * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
180 * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
181 */
182
qbman_cinh_write(struct qbman_swp_sys * s,uint32_t offset,uint32_t val)183 static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
184 uint32_t val)
185 {
186 __raw_writel(val, s->addr_cinh + offset);
187 #ifdef QBMAN_CINH_TRACE
188 pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
189 s->addr_cinh, s->idx, offset, val);
190 #endif
191 }
192
qbman_cinh_write_start_wo_shadow(struct qbman_swp_sys * s,uint32_t offset)193 static inline void *qbman_cinh_write_start_wo_shadow(struct qbman_swp_sys *s,
194 uint32_t offset)
195 {
196 #ifdef QBMAN_CINH_TRACE
197 pr_info("qbman_cinh_write_start(%p:%d:0x%03x)\n",
198 s->addr_cinh, s->idx, offset);
199 #endif
200 QBMAN_BUG_ON(offset & 63);
201 return (s->addr_cinh + offset);
202 }
203
qbman_cinh_write_complete(struct qbman_swp_sys * s,uint32_t offset,void * cmd)204 static inline void qbman_cinh_write_complete(struct qbman_swp_sys *s,
205 uint32_t offset, void *cmd)
206 {
207 const uint32_t *shadow = cmd;
208 int loop;
209 #ifdef QBMAN_CINH_TRACE
210 pr_info("qbman_cinh_write_complete(%p:%d:0x%03x) %p\n",
211 s->addr_cinh, s->idx, offset, shadow);
212 hexdump(cmd, 64);
213 #endif
214 for (loop = 15; loop >= 1; loop--)
215 __raw_writel(shadow[loop], s->addr_cinh +
216 offset + loop * 4);
217 lwsync();
218 __raw_writel(shadow[0], s->addr_cinh + offset);
219 }
220
qbman_cinh_read(struct qbman_swp_sys * s,uint32_t offset)221 static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
222 {
223 uint32_t reg = __raw_readl(s->addr_cinh + offset);
224 #ifdef QBMAN_CINH_TRACE
225 pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
226 s->addr_cinh, s->idx, offset, reg);
227 #endif
228 return reg;
229 }
230
qbman_cinh_read_shadow(struct qbman_swp_sys * s,uint32_t offset)231 static inline void *qbman_cinh_read_shadow(struct qbman_swp_sys *s,
232 uint32_t offset)
233 {
234 uint32_t *shadow = (uint32_t *)(s->cena + offset);
235 unsigned int loop;
236 #ifdef QBMAN_CINH_TRACE
237 pr_info(" %s (%p:%d:0x%03x) %p\n", __func__,
238 s->addr_cinh, s->idx, offset, shadow);
239 #endif
240
241 for (loop = 0; loop < 16; loop++)
242 shadow[loop] = __raw_readl(s->addr_cinh + offset
243 + loop * 4);
244 #ifdef QBMAN_CINH_TRACE
245 hexdump(shadow, 64);
246 #endif
247 return shadow;
248 }
249
qbman_cinh_read_wo_shadow(struct qbman_swp_sys * s,uint32_t offset)250 static inline void *qbman_cinh_read_wo_shadow(struct qbman_swp_sys *s,
251 uint32_t offset)
252 {
253 #ifdef QBMAN_CINH_TRACE
254 pr_info("qbman_cinh_read(%p:%d:0x%03x)\n",
255 s->addr_cinh, s->idx, offset);
256 #endif
257 return s->addr_cinh + offset;
258 }
259
qbman_cena_write_start(struct qbman_swp_sys * s,uint32_t offset)260 static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
261 uint32_t offset)
262 {
263 void *shadow = s->cena + offset;
264
265 #ifdef QBMAN_CENA_TRACE
266 pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
267 s->addr_cena, s->idx, offset, shadow);
268 #endif
269 QBMAN_BUG_ON(offset & 63);
270 dcbz(shadow);
271 return shadow;
272 }
273
qbman_cena_write_start_wo_shadow(struct qbman_swp_sys * s,uint32_t offset)274 static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
275 uint32_t offset)
276 {
277 #ifdef QBMAN_CENA_TRACE
278 pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
279 s->addr_cena, s->idx, offset);
280 #endif
281 QBMAN_BUG_ON(offset & 63);
282 #ifdef RTE_ARCH_64
283 return (s->addr_cena + offset);
284 #else
285 return (s->addr_cinh + offset);
286 #endif
287 }
288
qbman_cena_write_complete(struct qbman_swp_sys * s,uint32_t offset,void * cmd)289 static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
290 uint32_t offset, void *cmd)
291 {
292 const uint32_t *shadow = cmd;
293 int loop;
294 #ifdef QBMAN_CENA_TRACE
295 pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
296 s->addr_cena, s->idx, offset, shadow);
297 hexdump(cmd, 64);
298 #endif
299 #ifdef RTE_ARCH_64
300 for (loop = 15; loop >= 1; loop--)
301 __raw_writel(shadow[loop], s->addr_cena +
302 offset + loop * 4);
303 lwsync();
304 __raw_writel(shadow[0], s->addr_cena + offset);
305 #else
306 for (loop = 15; loop >= 1; loop--)
307 __raw_writel(shadow[loop], s->addr_cinh +
308 offset + loop * 4);
309 lwsync();
310 __raw_writel(shadow[0], s->addr_cinh + offset);
311 #endif
312 dcbf(s->addr_cena + offset);
313 }
314
qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys * s,uint32_t offset)315 static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
316 uint32_t offset)
317 {
318 #ifdef QBMAN_CENA_TRACE
319 pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
320 s->addr_cena, s->idx, offset);
321 #endif
322 dcbf(s->addr_cena + offset);
323 }
324
qbman_cena_read_reg(struct qbman_swp_sys * s,uint32_t offset)325 static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
326 uint32_t offset)
327 {
328 return __raw_readl(s->addr_cena + offset);
329 }
330
qbman_cena_read(struct qbman_swp_sys * s,uint32_t offset)331 static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
332 {
333 uint32_t *shadow = (uint32_t *)(s->cena + offset);
334 unsigned int loop;
335 #ifdef QBMAN_CENA_TRACE
336 pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
337 s->addr_cena, s->idx, offset, shadow);
338 #endif
339
340 #ifdef RTE_ARCH_64
341 for (loop = 0; loop < 16; loop++)
342 shadow[loop] = __raw_readl(s->addr_cena + offset
343 + loop * 4);
344 #else
345 for (loop = 0; loop < 16; loop++)
346 shadow[loop] = __raw_readl(s->addr_cinh + offset
347 + loop * 4);
348 #endif
349 #ifdef QBMAN_CENA_TRACE
350 hexdump(shadow, 64);
351 #endif
352 return shadow;
353 }
354
qbman_cena_read_wo_shadow(struct qbman_swp_sys * s,uint32_t offset)355 static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
356 uint32_t offset)
357 {
358 #ifdef QBMAN_CENA_TRACE
359 pr_info("qbman_cena_read(%p:%d:0x%03x)\n",
360 s->addr_cena, s->idx, offset);
361 #endif
362 return s->addr_cena + offset;
363 }
364
qbman_cena_invalidate(struct qbman_swp_sys * s,uint32_t offset)365 static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
366 uint32_t offset)
367 {
368 dccivac(s->addr_cena + offset);
369 }
370
qbman_cena_invalidate_prefetch(struct qbman_swp_sys * s,uint32_t offset)371 static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
372 uint32_t offset)
373 {
374 dccivac(s->addr_cena + offset);
375 prefetch_for_load(s->addr_cena + offset);
376 }
377
qbman_cena_prefetch(struct qbman_swp_sys * s,uint32_t offset)378 static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
379 uint32_t offset)
380 {
381 prefetch_for_load(s->addr_cena + offset);
382 }
383
384 /******************/
385 /* Portal support */
386 /******************/
387
388 /* The SWP_CFG portal register is special, in that it is used by the
389 * platform-specific code rather than the platform-independent code in
390 * qbman_portal.c. So use of it is declared locally here.
391 */
392 #define QBMAN_CINH_SWP_CFG 0xd00
393
394 #define SWP_CFG_DQRR_MF_SHIFT 20
395 #define SWP_CFG_EST_SHIFT 16
396 #define SWP_CFG_CPBS_SHIFT 15
397 #define SWP_CFG_WN_SHIFT 14
398 #define SWP_CFG_RPM_SHIFT 12
399 #define SWP_CFG_DCM_SHIFT 10
400 #define SWP_CFG_EPM_SHIFT 8
401 #define SWP_CFG_VPM_SHIFT 7
402 #define SWP_CFG_CPM_SHIFT 6
403 #define SWP_CFG_SD_SHIFT 5
404 #define SWP_CFG_SP_SHIFT 4
405 #define SWP_CFG_SE_SHIFT 3
406 #define SWP_CFG_DP_SHIFT 2
407 #define SWP_CFG_DE_SHIFT 1
408 #define SWP_CFG_EP_SHIFT 0
409
qbman_set_swp_cfg(uint8_t max_fill,uint8_t wn,uint8_t est,uint8_t rpm,uint8_t dcm,uint8_t epm,int sd,int sp,int se,int dp,int de,int ep)410 static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
411 uint8_t est, uint8_t rpm, uint8_t dcm,
412 uint8_t epm, int sd, int sp, int se,
413 int dp, int de, int ep)
414 {
415 uint32_t reg;
416
417 reg = (max_fill << SWP_CFG_DQRR_MF_SHIFT |
418 est << SWP_CFG_EST_SHIFT |
419 wn << SWP_CFG_WN_SHIFT |
420 rpm << SWP_CFG_RPM_SHIFT |
421 dcm << SWP_CFG_DCM_SHIFT |
422 epm << SWP_CFG_EPM_SHIFT |
423 sd << SWP_CFG_SD_SHIFT |
424 sp << SWP_CFG_SP_SHIFT |
425 se << SWP_CFG_SE_SHIFT |
426 dp << SWP_CFG_DP_SHIFT |
427 de << SWP_CFG_DE_SHIFT |
428 ep << SWP_CFG_EP_SHIFT);
429
430 return reg;
431 }
432
433 #define QMAN_RT_MODE 0x00000100
434
435 #define QMAN_REV_4000 0x04000000
436 #define QMAN_REV_4100 0x04010000
437 #define QMAN_REV_4101 0x04010001
438 #define QMAN_REV_5000 0x05000000
439 #define QMAN_REV_MASK 0xffff0000
440
441 #define SVR_LS1080A 0x87030000
442 #define SVR_LS2080A 0x87010000
443 #define SVR_LS2088A 0x87090000
444 #define SVR_LX2160A 0x87360000
445
446 /* Variable to store DPAA2 platform type */
447 extern uint32_t dpaa2_svr_family;
448
qbman_swp_sys_init(struct qbman_swp_sys * s,const struct qbman_swp_desc * d,uint8_t dqrr_size)449 static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
450 const struct qbman_swp_desc *d,
451 uint8_t dqrr_size)
452 {
453 uint32_t reg;
454 int i;
455 int cena_region_size = 4*1024;
456 uint8_t est = 1;
457 #ifdef RTE_ARCH_64
458 uint8_t wn = CENA_WRITE_ENABLE;
459 #else
460 uint8_t wn = CINH_WRITE_ENABLE;
461 #endif
462
463
464 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
465 && (d->cena_access_mode == qman_cena_fastest_access))
466 cena_region_size = 64*1024;
467 s->addr_cena = d->cena_bar;
468 s->addr_cinh = d->cinh_bar;
469 s->idx = (uint32_t)d->idx;
470 s->cena = malloc(cena_region_size);
471
472 if (!s->cena) {
473 pr_err("Could not allocate page for cena shadow\n");
474 return -1;
475 }
476 s->eqcr_mode = d->eqcr_mode;
477 QBMAN_BUG_ON(d->idx < 0);
478 #ifdef QBMAN_CHECKING
479 /* We should never be asked to initialise for a portal that isn't in
480 * the power-on state. (Ie. don't forget to reset portals when they are
481 * decommissioned!)
482 */
483 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
484 QBMAN_BUG_ON(reg);
485 #endif
486 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
487 && (d->cena_access_mode == qman_cena_fastest_access))
488 memset(s->addr_cena, 0, cena_region_size);
489 else {
490 /* Invalidate the portal memory.
491 * This ensures no stale cache lines
492 */
493 for (i = 0; i < cena_region_size; i += 64)
494 dccivac(s->addr_cena + i);
495 }
496
497 if (dpaa2_svr_family == SVR_LS1080A)
498 est = 0;
499
500 if (s->eqcr_mode == qman_eqcr_vb_array) {
501 reg = qbman_set_swp_cfg(dqrr_size, wn,
502 0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
503 } else {
504 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000 &&
505 (d->cena_access_mode == qman_cena_fastest_access))
506 reg = qbman_set_swp_cfg(dqrr_size, wn,
507 1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
508 else
509 reg = qbman_set_swp_cfg(dqrr_size, wn,
510 est, 3, 2, 2, 1, 1, 1, 1, 1, 1);
511 }
512
513 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
514 && (d->cena_access_mode == qman_cena_fastest_access))
515 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
516 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
517 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
518
519 qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
520 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
521 if (!reg) {
522 pr_err("The portal %d is not enabled!\n", s->idx);
523 free(s->cena);
524 return -1;
525 }
526
527 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
528 && (d->cena_access_mode == qman_cena_fastest_access)) {
529 qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
530 qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
531 }
532
533 return 0;
534 }
535
qbman_swp_sys_update(struct qbman_swp_sys * s,const struct qbman_swp_desc * d,uint8_t dqrr_size,int stash_off)536 static inline int qbman_swp_sys_update(struct qbman_swp_sys *s,
537 const struct qbman_swp_desc *d,
538 uint8_t dqrr_size,
539 int stash_off)
540 {
541 uint32_t reg;
542 int i;
543 int cena_region_size = 4*1024;
544 uint8_t est = 1;
545 #ifdef RTE_ARCH_64
546 uint8_t wn = CENA_WRITE_ENABLE;
547 #else
548 uint8_t wn = CINH_WRITE_ENABLE;
549 #endif
550
551 if (stash_off)
552 wn = CINH_WRITE_ENABLE;
553
554 QBMAN_BUG_ON(d->idx < 0);
555 #ifdef QBMAN_CHECKING
556 /* We should never be asked to initialise for a portal that isn't in
557 * the power-on state. (Ie. don't forget to reset portals when they are
558 * decommissioned!)
559 */
560 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
561 QBMAN_BUG_ON(reg);
562 #endif
563 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
564 && (d->cena_access_mode == qman_cena_fastest_access))
565 memset(s->addr_cena, 0, cena_region_size);
566 else {
567 /* Invalidate the portal memory.
568 * This ensures no stale cache lines
569 */
570 for (i = 0; i < cena_region_size; i += 64)
571 dccivac(s->addr_cena + i);
572 }
573
574 if (dpaa2_svr_family == SVR_LS1080A)
575 est = 0;
576
577 if (s->eqcr_mode == qman_eqcr_vb_array) {
578 reg = qbman_set_swp_cfg(dqrr_size, wn,
579 0, 3, 2, 3, 1, 1, 1, 1, 1, 1);
580 } else {
581 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000 &&
582 (d->cena_access_mode == qman_cena_fastest_access))
583 reg = qbman_set_swp_cfg(dqrr_size, wn,
584 1, 3, 2, 0, 1, 1, 1, 1, 1, 1);
585 else
586 reg = qbman_set_swp_cfg(dqrr_size, wn,
587 est, 3, 2, 2, 1, 1, 1, 1, 1, 1);
588 }
589
590 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
591 && (d->cena_access_mode == qman_cena_fastest_access))
592 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
593 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
594 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
595
596 qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
597 reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
598 if (!reg) {
599 pr_err("The portal %d is not enabled!\n", s->idx);
600 return -1;
601 }
602
603 if ((d->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000
604 && (d->cena_access_mode == qman_cena_fastest_access)) {
605 qbman_cinh_write(s, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
606 qbman_cinh_write(s, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
607 }
608
609 return 0;
610 }
611
qbman_swp_sys_finish(struct qbman_swp_sys * s)612 static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
613 {
614 free(s->cena);
615 }
616
617 #endif /* _QBMAN_SYS_H_ */
618