xref: /dpdk/drivers/net/nfp/nfpcore/nfp_sync.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2024 Corigine, Inc.
3  * All rights reserved.
4  */
5 
6 #include "nfp_sync.h"
7 
8 #include <rte_dev.h>
9 #include <rte_malloc.h>
10 #include <rte_memzone.h>
11 #include <rte_spinlock.h>
12 
13 #include "nfp_logs.h"
14 
15 #define NFP_SYNC_ELEMENT_MAX    8
16 #define NFP_SYNC_PCI_MAX        32
17 
18 struct nfp_sync_element {
19 	uint16_t count;
20 	/** Element ID, use ASCII - SYN<> */
21 	uint32_t magic;
22 	void *handle;
23 };
24 
25 struct nfp_sync_common {
26 	char pci_name[PCI_PRI_STR_SIZE + 1];
27 	uint16_t avail;
28 	struct nfp_sync_element element[NFP_SYNC_ELEMENT_MAX];
29 };
30 
31 struct nfp_sync {
32 	rte_spinlock_t spinlock;
33 
34 	uint16_t alloc_count;
35 
36 	struct nfp_sync_common process;
37 
38 	struct nfp_sync_common pci[NFP_SYNC_PCI_MAX];
39 
40 	const struct rte_memzone *mz;
41 };
42 
43 struct nfp_sync *
44 nfp_sync_alloc(void)
45 {
46 	uint16_t i;
47 	struct nfp_sync *sync;
48 	const struct rte_memzone *mz;
49 
50 	mz = rte_memzone_lookup("nfp_sync");
51 	if (mz != NULL) {
52 		sync = mz->addr;
53 		sync->alloc_count++;
54 
55 		return sync;
56 	}
57 
58 	mz = rte_memzone_reserve("nfp_sync",  sizeof(*sync), SOCKET_ID_ANY,
59 			RTE_MEMZONE_SIZE_HINT_ONLY);
60 	if (mz == NULL)
61 		return NULL;
62 
63 	sync = mz->addr;
64 
65 	memset(sync, 0, sizeof(*sync));
66 
67 	rte_spinlock_init(&sync->spinlock);
68 	sync->alloc_count = 1;
69 	sync->mz = mz;
70 
71 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++)
72 		sync->pci[i].avail = NFP_SYNC_ELEMENT_MAX;
73 
74 	sync->process.avail = NFP_SYNC_ELEMENT_MAX;
75 
76 	return sync;
77 }
78 
79 void
80 nfp_sync_free(struct nfp_sync *sync)
81 {
82 	uint16_t i;
83 
84 	rte_spinlock_lock(&sync->spinlock);
85 
86 	sync->alloc_count--;
87 	if (sync->alloc_count != 0) {
88 		rte_spinlock_unlock(&sync->spinlock);
89 		return;
90 	}
91 
92 	if (sync->process.avail != NFP_SYNC_ELEMENT_MAX)
93 		PMD_DRV_LOG(ERR, "Sync process handle residue");
94 
95 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
96 		if (sync->pci[i].avail != NFP_SYNC_ELEMENT_MAX)
97 			PMD_DRV_LOG(ERR, "Sync %s pci handle residue",
98 					sync->pci[i].pci_name);
99 	}
100 
101 	rte_spinlock_unlock(&sync->spinlock);
102 
103 	rte_memzone_free(sync->mz);
104 }
105 
106 static void *
107 nfp_sync_element_alloc(struct nfp_sync_element *element,
108 		uint32_t magic,
109 		uint32_t size)
110 {
111 	void *handle;
112 
113 	handle = rte_zmalloc(NULL, size, 0);
114 	if (handle == NULL)
115 		return NULL;
116 
117 	element->handle = handle;
118 	element->count = 1;
119 	element->magic = magic;
120 
121 	return handle;
122 }
123 
124 static void
125 nfp_sync_element_free(struct nfp_sync_element *element,
126 		void *handle)
127 {
128 	element->count--;
129 	if (element->count != 0)
130 		return;
131 
132 	rte_free(handle);
133 	element->handle = NULL;
134 	element->magic = 0;
135 }
136 
137 static void *
138 nfp_sync_common_handle_alloc(struct nfp_sync_common *common,
139 		uint32_t magic,
140 		uint32_t size)
141 {
142 	uint16_t i;
143 	void *handle = NULL;
144 	uint16_t avail_slot = NFP_SYNC_ELEMENT_MAX;
145 
146 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
147 		if (common->element[i].magic != magic)
148 			continue;
149 
150 		common->element[i].count++;
151 
152 		return common->element[i].handle;
153 	}
154 
155 	if (common->avail == 0)
156 		return NULL;
157 
158 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
159 		if (common->element[i].magic == 0) {
160 			avail_slot = i;
161 			break;
162 		}
163 	}
164 
165 	handle = nfp_sync_element_alloc(&common->element[avail_slot], magic, size);
166 	if (handle == NULL)
167 		return NULL;
168 
169 	common->avail--;
170 
171 	return handle;
172 }
173 
174 static void
175 nfp_sync_common_handle_free(struct nfp_sync_common *common,
176 		void *handle)
177 {
178 	uint16_t i;
179 
180 	if (common->avail == NFP_SYNC_ELEMENT_MAX)
181 		return;
182 
183 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
184 		if (common->element[i].handle == handle)
185 			break;
186 	}
187 
188 	if (i == NFP_SYNC_ELEMENT_MAX)
189 		return;
190 
191 	nfp_sync_element_free(&common->element[i], handle);
192 
193 	if (common->element[i].count == 0)
194 		common->avail++;
195 }
196 
197 static void *
198 nfp_sync_process_inner_handle_alloc(struct nfp_sync *sync,
199 		uint32_t magic,
200 		uint32_t size)
201 {
202 	void *handle = NULL;
203 
204 	rte_spinlock_lock(&sync->spinlock);
205 
206 	handle = nfp_sync_common_handle_alloc(&sync->process, magic, size);
207 	if (handle == NULL)
208 		PMD_DRV_LOG(ERR, "Process handle alloc failed");
209 
210 	rte_spinlock_unlock(&sync->spinlock);
211 
212 	return handle;
213 }
214 
215 static void
216 nfp_sync_process_inner_handle_free(struct nfp_sync *sync,
217 		void *handle)
218 {
219 	rte_spinlock_lock(&sync->spinlock);
220 
221 	nfp_sync_common_handle_free(&sync->process, handle);
222 
223 	rte_spinlock_unlock(&sync->spinlock);
224 }
225 
226 static uint16_t
227 nfp_sync_process_handle_count_get(struct nfp_sync *sync,
228 		void *handle)
229 {
230 	uint16_t i;
231 	uint16_t count = 0;
232 
233 	rte_spinlock_lock(&sync->spinlock);
234 
235 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
236 		if (sync->process.element[i].handle == handle) {
237 			count = sync->process.element[i].count;
238 			break;
239 		}
240 	}
241 
242 	rte_spinlock_unlock(&sync->spinlock);
243 
244 	return count;
245 }
246 
247 static void *
248 nfp_sync_pci_inner_handle_alloc(struct nfp_sync *sync,
249 		const char *pci_name,
250 		uint32_t magic,
251 		uint32_t size)
252 {
253 	uint16_t i;
254 	void *handle = NULL;
255 	uint16_t pci_avail_id = NFP_SYNC_PCI_MAX;
256 
257 	rte_spinlock_lock(&sync->spinlock);
258 
259 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
260 		if (strcmp(pci_name, sync->pci[i].pci_name) == 0) {
261 			pci_avail_id = i;
262 			goto common_alloc;
263 		}
264 	}
265 
266 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
267 		if (strlen(sync->pci[i].pci_name) == 0) {
268 			pci_avail_id = i;
269 			strcpy(sync->pci[pci_avail_id].pci_name, pci_name);
270 			goto common_alloc;
271 		}
272 	}
273 
274 	rte_spinlock_unlock(&sync->spinlock);
275 
276 	return NULL;
277 
278 common_alloc:
279 	handle = nfp_sync_common_handle_alloc(&sync->pci[pci_avail_id],
280 			magic, size);
281 	if (handle == NULL)
282 		PMD_DRV_LOG(ERR, "PCI handle alloc failed");
283 
284 	rte_spinlock_unlock(&sync->spinlock);
285 
286 	return handle;
287 }
288 
289 static void
290 nfp_sync_pci_inner_handle_free(struct nfp_sync *sync,
291 		const char *pci_name,
292 		void *handle)
293 {
294 	uint16_t i;
295 	char *name_tmp;
296 
297 	rte_spinlock_lock(&sync->spinlock);
298 
299 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
300 		name_tmp = sync->pci[i].pci_name;
301 		if (strlen(name_tmp) != 0 && strcmp(pci_name, name_tmp) == 0) {
302 			nfp_sync_common_handle_free(&sync->pci[i], handle);
303 			if (sync->pci[i].avail == NFP_SYNC_ELEMENT_MAX)
304 				name_tmp[0] = 0;
305 			break;
306 		}
307 	}
308 
309 	rte_spinlock_unlock(&sync->spinlock);
310 }
311 
312 static uint16_t
313 nfp_sync_pci_handle_count_get(struct nfp_sync *sync,
314 		const char *pci_name,
315 		void *handle)
316 {
317 	uint16_t i;
318 	uint16_t count = 0;
319 	struct nfp_sync_common *pci_common;
320 
321 	rte_spinlock_lock(&sync->spinlock);
322 
323 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
324 		if (strcmp(sync->pci[i].pci_name, pci_name) == 0)
325 			break;
326 	}
327 
328 	if (i == NFP_SYNC_PCI_MAX) {
329 		rte_spinlock_unlock(&sync->spinlock);
330 		return 0;
331 	}
332 
333 	pci_common = &sync->pci[i];
334 
335 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
336 		if (pci_common->element[i].handle == handle) {
337 			count = pci_common->element[i].count;
338 			break;
339 		}
340 	}
341 
342 	rte_spinlock_unlock(&sync->spinlock);
343 
344 	return count;
345 }
346 
347 void *
348 nfp_sync_handle_alloc(struct nfp_sync *sync,
349 		struct rte_pci_device *pci_dev,
350 		uint32_t magic,
351 		uint32_t size)
352 {
353 	if (pci_dev == NULL)
354 		return nfp_sync_process_inner_handle_alloc(sync, magic, size);
355 
356 	return nfp_sync_pci_inner_handle_alloc(sync, pci_dev->device.name,
357 			magic, size);
358 }
359 
360 void
361 nfp_sync_handle_free(struct nfp_sync *sync,
362 		struct rte_pci_device *pci_dev,
363 		void *handle)
364 {
365 	if (pci_dev == NULL) {
366 		nfp_sync_process_inner_handle_free(sync, handle);
367 		return;
368 	}
369 
370 	nfp_sync_pci_inner_handle_free(sync, pci_dev->device.name, handle);
371 }
372 
373 uint16_t
374 nfp_sync_handle_count_get(struct nfp_sync *sync,
375 		struct rte_pci_device *pci_dev,
376 		void *handle)
377 {
378 	if (pci_dev == NULL)
379 		return nfp_sync_process_handle_count_get(sync, handle);
380 
381 	return nfp_sync_pci_handle_count_get(sync, pci_dev->device.name, handle);
382 }
383