xref: /dpdk/drivers/common/cnxk/roc_idev.c (revision 318ee1b0468299e92411ea8616073c477743b34e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "roc_api.h"
6 #include "roc_priv.h"
7 
8 struct idev_cfg *
9 idev_get_cfg(void)
10 {
11 	static const char name[] = "roc_cn10k_intra_device_conf";
12 	const struct plt_memzone *mz;
13 	struct idev_cfg *idev;
14 
15 	mz = plt_memzone_lookup(name);
16 	if (mz != NULL)
17 		return mz->addr;
18 
19 	/* Request for the first time */
20 	mz = plt_memzone_reserve_cache_align(name, sizeof(struct idev_cfg));
21 	if (mz != NULL) {
22 		idev = mz->addr;
23 		idev_set_defaults(idev);
24 		return idev;
25 	}
26 	return NULL;
27 }
28 
29 void
30 idev_set_defaults(struct idev_cfg *idev)
31 {
32 	idev->sso_pf_func = 0;
33 	idev->npa = NULL;
34 	idev->npa_pf_func = 0;
35 	idev->max_pools = 128;
36 	idev->lmt_pf_func = 0;
37 	idev->lmt_base_addr = 0;
38 	idev->num_lmtlines = 0;
39 	idev->bphy = NULL;
40 	idev->cpt = NULL;
41 	TAILQ_INIT(&idev->rvu_lf_list);
42 	TAILQ_INIT(&idev->mcs_list);
43 	idev->nix_inl_dev = NULL;
44 	TAILQ_INIT(&idev->roc_nix_list);
45 	plt_spinlock_init(&idev->nix_inl_dev_lock);
46 	plt_spinlock_init(&idev->npa_dev_lock);
47 	__atomic_store_n(&idev->npa_refcnt, 0, __ATOMIC_RELEASE);
48 }
49 
50 uint16_t
51 idev_sso_pffunc_get(void)
52 {
53 	struct idev_cfg *idev;
54 	uint16_t sso_pf_func;
55 
56 	idev = idev_get_cfg();
57 	sso_pf_func = 0;
58 	if (idev != NULL)
59 		sso_pf_func = __atomic_load_n(&idev->sso_pf_func,
60 					      __ATOMIC_ACQUIRE);
61 
62 	return sso_pf_func;
63 }
64 
65 void
66 idev_sso_pffunc_set(uint16_t sso_pf_func)
67 {
68 	struct idev_cfg *idev;
69 
70 	idev = idev_get_cfg();
71 	if (idev != NULL)
72 		__atomic_store_n(&idev->sso_pf_func, sso_pf_func,
73 				 __ATOMIC_RELEASE);
74 }
75 
76 uint16_t
77 idev_npa_pffunc_get(void)
78 {
79 	struct idev_cfg *idev;
80 	uint16_t npa_pf_func;
81 
82 	idev = idev_get_cfg();
83 	npa_pf_func = 0;
84 	if (idev != NULL)
85 		npa_pf_func = idev->npa_pf_func;
86 
87 	return npa_pf_func;
88 }
89 
90 struct npa_lf *
91 idev_npa_obj_get(void)
92 {
93 	struct idev_cfg *idev;
94 
95 	idev = idev_get_cfg();
96 	if (idev && __atomic_load_n(&idev->npa_refcnt, __ATOMIC_ACQUIRE))
97 		return idev->npa;
98 
99 	return NULL;
100 }
101 
102 uint32_t
103 roc_idev_npa_maxpools_get(void)
104 {
105 	struct idev_cfg *idev;
106 	uint32_t max_pools;
107 
108 	idev = idev_get_cfg();
109 	max_pools = 0;
110 	if (idev != NULL)
111 		max_pools = idev->max_pools;
112 
113 	return max_pools;
114 }
115 
116 void
117 roc_idev_npa_maxpools_set(uint32_t max_pools)
118 {
119 	struct idev_cfg *idev;
120 
121 	idev = idev_get_cfg();
122 	if (idev != NULL)
123 		__atomic_store_n(&idev->max_pools, max_pools, __ATOMIC_RELEASE);
124 }
125 
126 uint16_t
127 idev_npa_lf_active(struct dev *dev)
128 {
129 	struct idev_cfg *idev;
130 
131 	/* Check if npalf is actively used on this dev */
132 	idev = idev_get_cfg();
133 	if (!idev || !idev->npa || idev->npa->mbox != dev->mbox)
134 		return 0;
135 
136 	return __atomic_load_n(&idev->npa_refcnt, __ATOMIC_ACQUIRE);
137 }
138 
139 uint16_t
140 idev_lmt_pffunc_get(void)
141 {
142 	struct idev_cfg *idev;
143 	uint16_t lmt_pf_func;
144 
145 	idev = idev_get_cfg();
146 	lmt_pf_func = 0;
147 	if (idev != NULL)
148 		lmt_pf_func = idev->lmt_pf_func;
149 
150 	return lmt_pf_func;
151 }
152 
153 uint64_t
154 roc_idev_lmt_base_addr_get(void)
155 {
156 	uint64_t lmt_base_addr;
157 	struct idev_cfg *idev;
158 
159 	idev = idev_get_cfg();
160 	lmt_base_addr = 0;
161 	if (idev != NULL)
162 		lmt_base_addr = idev->lmt_base_addr;
163 
164 	return lmt_base_addr;
165 }
166 
167 uint16_t
168 roc_idev_num_lmtlines_get(void)
169 {
170 	struct idev_cfg *idev;
171 	uint16_t num_lmtlines;
172 
173 	idev = idev_get_cfg();
174 	num_lmtlines = 0;
175 	if (idev != NULL)
176 		num_lmtlines = idev->num_lmtlines;
177 
178 	return num_lmtlines;
179 }
180 
181 struct roc_cpt *
182 roc_idev_cpt_get(void)
183 {
184 	struct idev_cfg *idev = idev_get_cfg();
185 
186 	if (idev != NULL)
187 		return idev->cpt;
188 
189 	return NULL;
190 }
191 
192 struct roc_rvu_lf *
193 roc_idev_rvu_lf_get(uint8_t rvu_lf_idx)
194 {
195 	struct idev_cfg *idev = idev_get_cfg();
196 	struct roc_rvu_lf *rvu_lf = NULL;
197 
198 	if (idev != NULL) {
199 		TAILQ_FOREACH(rvu_lf, &idev->rvu_lf_list, next) {
200 			if (rvu_lf->idx == rvu_lf_idx)
201 				return rvu_lf;
202 		}
203 	}
204 
205 	return NULL;
206 }
207 
208 void
209 roc_idev_rvu_lf_set(struct roc_rvu_lf *rvu)
210 {
211 	struct idev_cfg *idev = idev_get_cfg();
212 	struct roc_rvu_lf *rvu_lf_iter = NULL;
213 
214 	if (idev != NULL) {
215 		TAILQ_FOREACH(rvu_lf_iter, &idev->rvu_lf_list, next) {
216 			if (rvu_lf_iter->idx == rvu->idx)
217 				return;
218 		}
219 		TAILQ_INSERT_TAIL(&idev->rvu_lf_list, rvu, next);
220 	}
221 }
222 
223 void
224 roc_idev_rvu_lf_free(struct roc_rvu_lf *rvu)
225 {
226 	struct idev_cfg *idev = idev_get_cfg();
227 	struct roc_rvu_lf *rvu_lf_iter = NULL;
228 
229 	if (idev != NULL) {
230 		TAILQ_FOREACH(rvu_lf_iter, &idev->rvu_lf_list, next) {
231 			if (rvu_lf_iter->idx == rvu->idx)
232 				TAILQ_REMOVE(&idev->rvu_lf_list, rvu, next);
233 		}
234 	}
235 }
236 
237 struct roc_mcs *
238 roc_idev_mcs_get(uint8_t mcs_idx)
239 {
240 	struct idev_cfg *idev = idev_get_cfg();
241 	struct roc_mcs *mcs = NULL;
242 
243 	if (idev != NULL) {
244 		TAILQ_FOREACH(mcs, &idev->mcs_list, next) {
245 			if (mcs->idx == mcs_idx)
246 				return mcs;
247 		}
248 	}
249 
250 	return NULL;
251 }
252 
253 void
254 roc_idev_mcs_set(struct roc_mcs *mcs)
255 {
256 	struct idev_cfg *idev = idev_get_cfg();
257 	struct roc_mcs *mcs_iter = NULL;
258 
259 	if (idev != NULL) {
260 		TAILQ_FOREACH(mcs_iter, &idev->mcs_list, next) {
261 			if (mcs_iter->idx == mcs->idx)
262 				return;
263 		}
264 		TAILQ_INSERT_TAIL(&idev->mcs_list, mcs, next);
265 	}
266 }
267 
268 void
269 roc_idev_mcs_free(struct roc_mcs *mcs)
270 {
271 	struct idev_cfg *idev = idev_get_cfg();
272 	struct roc_mcs *mcs_iter = NULL;
273 
274 	if (idev != NULL) {
275 		TAILQ_FOREACH(mcs_iter, &idev->mcs_list, next) {
276 			if (mcs_iter->idx == mcs->idx)
277 				TAILQ_REMOVE(&idev->mcs_list, mcs, next);
278 		}
279 	}
280 }
281 
282 uint64_t *
283 roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix)
284 {
285 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
286 	struct idev_cfg *idev = idev_get_cfg();
287 	struct nix_inl_dev *inl_dev;
288 
289 	if (!idev || !idev->nix_inl_dev)
290 		return NULL;
291 
292 	inl_dev = idev->nix_inl_dev;
293 
294 	return (uint64_t *)&inl_dev->sa_soft_exp_ring[nix->outb_se_ring_base];
295 }
296 
297 struct roc_nix_list *
298 roc_idev_nix_list_get(void)
299 {
300 	struct idev_cfg *idev;
301 
302 	idev = idev_get_cfg();
303 	if (idev != NULL)
304 		return &idev->roc_nix_list;
305 	return NULL;
306 }
307 
308 void
309 roc_idev_cpt_set(struct roc_cpt *cpt)
310 {
311 	struct idev_cfg *idev = idev_get_cfg();
312 
313 	if (idev != NULL)
314 		__atomic_store_n(&idev->cpt, cpt, __ATOMIC_RELEASE);
315 }
316 
317 struct roc_nix *
318 roc_idev_npa_nix_get(void)
319 {
320 	struct npa_lf *npa_lf = idev_npa_obj_get();
321 	struct dev *dev;
322 
323 	if (!npa_lf)
324 		return NULL;
325 
326 	dev = container_of(npa_lf, struct dev, npa);
327 	return dev->roc_nix;
328 }
329 
330 struct roc_sso *
331 idev_sso_get(void)
332 {
333 	struct idev_cfg *idev = idev_get_cfg();
334 
335 	if (idev != NULL)
336 		return __atomic_load_n(&idev->sso, __ATOMIC_ACQUIRE);
337 
338 	return NULL;
339 }
340 
341 void
342 idev_sso_set(struct roc_sso *sso)
343 {
344 	struct idev_cfg *idev = idev_get_cfg();
345 
346 	if (idev != NULL)
347 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
348 }
349 
350 void
351 idev_dma_cs_offset_set(uint8_t offset)
352 {
353 	struct idev_cfg *idev = idev_get_cfg();
354 
355 	if (idev != NULL)
356 		idev->dma_cs_offset = offset;
357 }
358 
359 uint8_t
360 idev_dma_cs_offset_get(void)
361 {
362 	struct idev_cfg *idev = idev_get_cfg();
363 
364 	if (idev != NULL)
365 		return idev->dma_cs_offset;
366 
367 	return 0;
368 }
369 
370 uint64_t
371 roc_idev_nix_inl_meta_aura_get(void)
372 {
373 	struct idev_cfg *idev = idev_get_cfg();
374 
375 	if (idev != NULL)
376 		return idev->inl_cfg.meta_aura;
377 	return 0;
378 }
379 
380 uint8_t
381 roc_idev_nix_rx_inject_get(uint16_t port)
382 {
383 	struct idev_cfg *idev;
384 
385 	idev = idev_get_cfg();
386 	if (idev != NULL && port < PLT_MAX_ETHPORTS)
387 		return idev->inl_rx_inj_cfg.rx_inject_en[port];
388 
389 	return 0;
390 }
391 
392 void
393 roc_idev_nix_rx_inject_set(uint16_t port, uint8_t enable)
394 {
395 	struct idev_cfg *idev;
396 
397 	idev = idev_get_cfg();
398 	if (idev != NULL && port < PLT_MAX_ETHPORTS)
399 		__atomic_store_n(&idev->inl_rx_inj_cfg.rx_inject_en[port], enable,
400 				 __ATOMIC_RELEASE);
401 }
402 
403 uint16_t *
404 roc_idev_nix_rx_chan_base_get(void)
405 {
406 	struct idev_cfg *idev = idev_get_cfg();
407 
408 	if (idev != NULL)
409 		return (uint16_t *)&idev->inl_rx_inj_cfg.chan;
410 
411 	return NULL;
412 }
413 
414 void
415 roc_idev_nix_rx_chan_set(uint16_t port, uint16_t chan)
416 {
417 	struct idev_cfg *idev;
418 
419 	idev = idev_get_cfg();
420 	if (idev != NULL && port < PLT_MAX_ETHPORTS)
421 		__atomic_store_n(&idev->inl_rx_inj_cfg.chan[port], chan, __ATOMIC_RELEASE);
422 }
423 
424 uint16_t
425 roc_idev_nix_inl_dev_pffunc_get(void)
426 {
427 	return nix_inl_dev_pffunc_get();
428 }
429