1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #ifndef _ROC_IO_H_
6 #define _ROC_IO_H_
7
8 #include "roc_platform.h" /* for __plt_always_inline macro */
9
10 #ifndef ROC_LMT_BASE_ID_GET
11 #define ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id) \
12 do { \
13 /* 32 Lines per core */ \
14 lmt_id = plt_lcore_id() << ROC_LMT_LINES_PER_CORE_LOG2; \
15 /* Each line is of 128B */ \
16 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2); \
17 } while (0)
18 #endif
19
20 /* Define it if not defined in roc_platform.h */
21 #ifndef ROC_LMT_CPT_BASE_ID_GET
22 #define ROC_LMT_CPT_BASE_ID_GET(lmt_addr, lmt_id) \
23 do { \
24 /* 16 Lines per core */ \
25 lmt_id = ROC_LMT_CPT_BASE_ID_OFF; \
26 lmt_id += (plt_lcore_id() << ROC_LMT_CPT_LINES_PER_CORE_LOG2); \
27 /* Each line is of 128B */ \
28 (lmt_addr) += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2); \
29 } while (0)
30 #endif
31
32 #define roc_load_pair(val0, val1, addr) \
33 __extension__ ({ \
34 asm volatile("ldp %x[x0], %x[x1], [%x[p1]]" \
35 : [x0] "=r"(val0), [x1] "=r"(val1) \
36 : [p1] "r"(addr)); \
37 })
38
39 #define roc_store_pair(val0, val1, addr) \
40 __extension__ ({ \
41 asm volatile( \
42 "stp %x[x0], %x[x1], [%x[p1], #0]!" ::[x0] "r"(val0), \
43 [x1] "r"(val1), [p1] "r"(addr)); \
44 })
45
46 #define roc_prefetch_store_keep(ptr) \
47 __extension__ ({ asm volatile("prfm pstl1keep, [%x0]\n" : : "r"(ptr)); })
48
49 #if defined(__clang__)
50 static __plt_always_inline void
roc_atomic128_cas_noreturn(uint64_t swap0,uint64_t swap1,int64_t * ptr)51 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, int64_t *ptr)
52 {
53 register uint64_t x0 __asm("x0") = swap0;
54 register uint64_t x1 __asm("x1") = swap1;
55
56 asm volatile(PLT_CPU_FEATURE_PREAMBLE
57 "casp %[x0], %[x1], %[x0], %[x1], [%[ptr]]\n"
58 : [x0] "+r"(x0), [x1] "+r"(x1)
59 : [ptr] "r"(ptr)
60 : "memory");
61 }
62 #else
63 static __plt_always_inline void
roc_atomic128_cas_noreturn(uint64_t swap0,uint64_t swap1,uint64_t ptr)64 roc_atomic128_cas_noreturn(uint64_t swap0, uint64_t swap1, uint64_t ptr)
65 {
66 __uint128_t wdata = swap0 | ((__uint128_t)swap1 << 64);
67
68 asm volatile(PLT_CPU_FEATURE_PREAMBLE
69 "casp %[wdata], %H[wdata], %[wdata], %H[wdata], [%[ptr]]\n"
70 : [wdata] "+r"(wdata)
71 : [ptr] "r"(ptr)
72 : "memory");
73 }
74 #endif
75
76 static __plt_always_inline uint64_t
roc_atomic64_cas(uint64_t compare,uint64_t swap,int64_t * ptr)77 roc_atomic64_cas(uint64_t compare, uint64_t swap, int64_t *ptr)
78 {
79 asm volatile(PLT_CPU_FEATURE_PREAMBLE
80 "cas %[compare], %[swap], [%[ptr]]\n"
81 : [compare] "+r"(compare)
82 : [swap] "r"(swap), [ptr] "r"(ptr)
83 : "memory");
84
85 return compare;
86 }
87
88 static __plt_always_inline uint64_t
roc_atomic64_casl(uint64_t compare,uint64_t swap,int64_t * ptr)89 roc_atomic64_casl(uint64_t compare, uint64_t swap, int64_t *ptr)
90 {
91 asm volatile(PLT_CPU_FEATURE_PREAMBLE
92 "casl %[compare], %[swap], [%[ptr]]\n"
93 : [compare] "+r"(compare)
94 : [swap] "r"(swap), [ptr] "r"(ptr)
95 : "memory");
96
97 return compare;
98 }
99
100 static __plt_always_inline uint64_t
roc_atomic64_add_nosync(int64_t incr,int64_t * ptr)101 roc_atomic64_add_nosync(int64_t incr, int64_t *ptr)
102 {
103 uint64_t result;
104
105 /* Atomic add with no ordering */
106 asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadd %x[i], %x[r], [%[b]]"
107 : [r] "=r"(result), "+m"(*ptr)
108 : [i] "r"(incr), [b] "r"(ptr)
109 : "memory");
110 return result;
111 }
112
113 static __plt_always_inline uint64_t
roc_atomic64_add_sync(int64_t incr,int64_t * ptr)114 roc_atomic64_add_sync(int64_t incr, int64_t *ptr)
115 {
116 uint64_t result;
117
118 /* Atomic add with ordering */
119 asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldadda %x[i], %x[r], [%[b]]"
120 : [r] "=r"(result), "+m"(*ptr)
121 : [i] "r"(incr), [b] "r"(ptr)
122 : "memory");
123 return result;
124 }
125
126 static __plt_always_inline uint64_t
roc_lmt_submit_ldeor(plt_iova_t io_address)127 roc_lmt_submit_ldeor(plt_iova_t io_address)
128 {
129 uint64_t result;
130
131 asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeor xzr, %x[rf], [%[rs]]"
132 : [rf] "=r"(result)
133 : [rs] "r"(io_address)
134 : "memory");
135 return result;
136 }
137
138 static __plt_always_inline uint64_t
roc_lmt_submit_ldeorl(plt_iova_t io_address)139 roc_lmt_submit_ldeorl(plt_iova_t io_address)
140 {
141 uint64_t result;
142
143 asm volatile(PLT_CPU_FEATURE_PREAMBLE "ldeorl xzr,%x[rf],[%[rs]]"
144 : [rf] "=r"(result)
145 : [rs] "r"(io_address)
146 : "memory");
147 return result;
148 }
149
150 static __plt_always_inline void
roc_lmt_submit_steor(uint64_t data,plt_iova_t io_address)151 roc_lmt_submit_steor(uint64_t data, plt_iova_t io_address)
152 {
153 asm volatile(PLT_CPU_FEATURE_PREAMBLE
154 "steor %x[d], [%[rs]]" ::[d] "r"(data),
155 [rs] "r"(io_address)
156 : "memory");
157 }
158
159 static __plt_always_inline void
roc_lmt_submit_steorl(uint64_t data,plt_iova_t io_address)160 roc_lmt_submit_steorl(uint64_t data, plt_iova_t io_address)
161 {
162 asm volatile(PLT_CPU_FEATURE_PREAMBLE
163 "steorl %x[d], [%[rs]]" ::[d] "r"(data),
164 [rs] "r"(io_address)
165 : "memory");
166 }
167
168 static __plt_always_inline void
roc_lmt_mov(void * out,const void * in,const uint32_t lmtext)169 roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
170 {
171 volatile const __uint128_t *src128 = (const __uint128_t *)in;
172 volatile __uint128_t *dst128 = (__uint128_t *)out;
173 uint32_t i;
174
175 dst128[0] = src128[0];
176 dst128[1] = src128[1];
177 /* lmtext receives following value:
178 * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
179 */
180 for (i = 0; i < lmtext; i++)
181 dst128[2 + i] = src128[2 + i];
182 }
183
184 static __plt_always_inline void
roc_lmt_mov64(void * out,const void * in)185 roc_lmt_mov64(void *out, const void *in)
186 {
187 volatile const __uint128_t *src128 = (const __uint128_t *)in;
188 volatile __uint128_t *dst128 = (__uint128_t *)out;
189
190 dst128[0] = src128[0];
191 dst128[1] = src128[1];
192 dst128[2] = src128[2];
193 dst128[3] = src128[3];
194 }
195
196 static __plt_always_inline void
roc_lmt_mov_nv(void * out,const void * in,const uint32_t lmtext)197 roc_lmt_mov_nv(void *out, const void *in, const uint32_t lmtext)
198 {
199 const __uint128_t *src128 = (const __uint128_t *)in;
200 __uint128_t *dst128 = (__uint128_t *)out;
201
202 dst128[0] = src128[0];
203 dst128[1] = src128[1];
204 /* lmtext receives following value:
205 * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
206 */
207 if (lmtext)
208 dst128[2] = src128[2];
209 }
210
211 static __plt_always_inline void
roc_lmt_mov_seg(void * out,const void * in,const uint16_t segdw)212 roc_lmt_mov_seg(void *out, const void *in, const uint16_t segdw)
213 {
214 volatile const __uint128_t *src128 = (const __uint128_t *)in;
215 volatile __uint128_t *dst128 = (__uint128_t *)out;
216 uint8_t i;
217
218 for (i = 0; i < segdw; i++)
219 dst128[i] = src128[i];
220 }
221
222 static __plt_always_inline void
roc_lmt_mov_one(void * out,const void * in)223 roc_lmt_mov_one(void *out, const void *in)
224 {
225 volatile const __uint128_t *src128 = (const __uint128_t *)in;
226 volatile __uint128_t *dst128 = (__uint128_t *)out;
227
228 *dst128 = *src128;
229 }
230
231 /* Non volatile version of roc_lmt_mov_seg() */
232 static __plt_always_inline void
roc_lmt_mov_seg_nv(void * out,const void * in,const uint16_t segdw)233 roc_lmt_mov_seg_nv(void *out, const void *in, const uint16_t segdw)
234 {
235 const __uint128_t *src128 = (const __uint128_t *)in;
236 __uint128_t *dst128 = (__uint128_t *)out;
237 uint8_t i;
238
239 for (i = 0; i < segdw; i++)
240 dst128[i] = src128[i];
241 }
242
243 static __plt_always_inline void
roc_atf_ret(void)244 roc_atf_ret(void)
245 {
246 /* This will allow wfi in EL0 to cause async exception to EL3
247 * which will optionally perform necessary actions.
248 */
249 __asm("wfi");
250 }
251
252 #endif /* _ROC_IO_H_ */
253