xref: /dpdk/drivers/net/nfp/nfpcore/nfp_sync.c (revision b6de43530dfa30cbf6b70857e3835099701063d4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2024 Corigine, Inc.
3  * All rights reserved.
4  */
5 
6 #include "nfp_sync.h"
7 
8 #include <rte_dev.h>
9 #include <rte_malloc.h>
10 #include <rte_memzone.h>
11 #include <rte_spinlock.h>
12 #include <rte_string_fns.h>
13 
14 #include "nfp_logs.h"
15 
16 #define NFP_SYNC_ELEMENT_MAX    8
17 #define NFP_SYNC_PCI_MAX        32
18 
19 struct nfp_sync_element {
20 	uint16_t count;
21 	/** Element ID, use ASCII - SYN<> */
22 	uint32_t magic;
23 	void *handle;
24 };
25 
26 struct nfp_sync_common {
27 	char pci_name[PCI_PRI_STR_SIZE + 1];
28 	uint16_t avail;
29 	struct nfp_sync_element element[NFP_SYNC_ELEMENT_MAX];
30 };
31 
32 struct nfp_sync {
33 	rte_spinlock_t spinlock;
34 
35 	uint16_t alloc_count;
36 
37 	struct nfp_sync_common process;
38 
39 	struct nfp_sync_common pci[NFP_SYNC_PCI_MAX];
40 
41 	const struct rte_memzone *mz;
42 };
43 
44 struct nfp_sync *
45 nfp_sync_alloc(void)
46 {
47 	uint16_t i;
48 	struct nfp_sync *sync;
49 	const struct rte_memzone *mz;
50 
51 	mz = rte_memzone_lookup("nfp_sync");
52 	if (mz != NULL) {
53 		sync = mz->addr;
54 		sync->alloc_count++;
55 
56 		return sync;
57 	}
58 
59 	mz = rte_memzone_reserve("nfp_sync",  sizeof(*sync), SOCKET_ID_ANY,
60 			RTE_MEMZONE_SIZE_HINT_ONLY);
61 	if (mz == NULL)
62 		return NULL;
63 
64 	sync = mz->addr;
65 
66 	memset(sync, 0, sizeof(*sync));
67 
68 	rte_spinlock_init(&sync->spinlock);
69 	sync->alloc_count = 1;
70 	sync->mz = mz;
71 
72 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++)
73 		sync->pci[i].avail = NFP_SYNC_ELEMENT_MAX;
74 
75 	sync->process.avail = NFP_SYNC_ELEMENT_MAX;
76 
77 	return sync;
78 }
79 
80 void
81 nfp_sync_free(struct nfp_sync *sync)
82 {
83 	uint16_t i;
84 
85 	rte_spinlock_lock(&sync->spinlock);
86 
87 	sync->alloc_count--;
88 	if (sync->alloc_count != 0) {
89 		rte_spinlock_unlock(&sync->spinlock);
90 		return;
91 	}
92 
93 	if (sync->process.avail != NFP_SYNC_ELEMENT_MAX)
94 		PMD_DRV_LOG(ERR, "Sync process handle residue.");
95 
96 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
97 		if (sync->pci[i].avail != NFP_SYNC_ELEMENT_MAX)
98 			PMD_DRV_LOG(ERR, "Sync %s pci handle residue.",
99 					sync->pci[i].pci_name);
100 	}
101 
102 	rte_spinlock_unlock(&sync->spinlock);
103 
104 	rte_memzone_free(sync->mz);
105 }
106 
107 static void *
108 nfp_sync_element_alloc(struct nfp_sync_element *element,
109 		uint32_t magic,
110 		uint32_t size)
111 {
112 	void *handle;
113 
114 	handle = rte_zmalloc(NULL, size, 0);
115 	if (handle == NULL)
116 		return NULL;
117 
118 	element->handle = handle;
119 	element->count = 1;
120 	element->magic = magic;
121 
122 	return handle;
123 }
124 
125 static void
126 nfp_sync_element_free(struct nfp_sync_element *element,
127 		void *handle)
128 {
129 	element->count--;
130 	if (element->count != 0)
131 		return;
132 
133 	rte_free(handle);
134 	element->handle = NULL;
135 	element->magic = 0;
136 }
137 
138 static void *
139 nfp_sync_common_handle_alloc(struct nfp_sync_common *common,
140 		uint32_t magic,
141 		uint32_t size)
142 {
143 	uint16_t i;
144 	void *handle = NULL;
145 	uint16_t avail_slot = NFP_SYNC_ELEMENT_MAX;
146 
147 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
148 		if (common->element[i].magic != magic)
149 			continue;
150 
151 		common->element[i].count++;
152 
153 		return common->element[i].handle;
154 	}
155 
156 	if (common->avail == 0)
157 		return NULL;
158 
159 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
160 		if (common->element[i].magic == 0) {
161 			avail_slot = i;
162 			break;
163 		}
164 	}
165 
166 	handle = nfp_sync_element_alloc(&common->element[avail_slot], magic, size);
167 	if (handle == NULL)
168 		return NULL;
169 
170 	common->avail--;
171 
172 	return handle;
173 }
174 
175 static void
176 nfp_sync_common_handle_free(struct nfp_sync_common *common,
177 		void *handle)
178 {
179 	uint16_t i;
180 
181 	if (common->avail == NFP_SYNC_ELEMENT_MAX)
182 		return;
183 
184 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
185 		if (common->element[i].handle == handle)
186 			break;
187 	}
188 
189 	if (i == NFP_SYNC_ELEMENT_MAX)
190 		return;
191 
192 	nfp_sync_element_free(&common->element[i], handle);
193 
194 	if (common->element[i].count == 0)
195 		common->avail++;
196 }
197 
198 static void *
199 nfp_sync_process_inner_handle_alloc(struct nfp_sync *sync,
200 		uint32_t magic,
201 		uint32_t size)
202 {
203 	void *handle = NULL;
204 
205 	rte_spinlock_lock(&sync->spinlock);
206 
207 	handle = nfp_sync_common_handle_alloc(&sync->process, magic, size);
208 	if (handle == NULL)
209 		PMD_DRV_LOG(ERR, "Process handle alloc failed.");
210 
211 	rte_spinlock_unlock(&sync->spinlock);
212 
213 	return handle;
214 }
215 
216 static void
217 nfp_sync_process_inner_handle_free(struct nfp_sync *sync,
218 		void *handle)
219 {
220 	rte_spinlock_lock(&sync->spinlock);
221 
222 	nfp_sync_common_handle_free(&sync->process, handle);
223 
224 	rte_spinlock_unlock(&sync->spinlock);
225 }
226 
227 static uint16_t
228 nfp_sync_process_handle_count_get(struct nfp_sync *sync,
229 		void *handle)
230 {
231 	uint16_t i;
232 	uint16_t count = 0;
233 
234 	rte_spinlock_lock(&sync->spinlock);
235 
236 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
237 		if (sync->process.element[i].handle == handle) {
238 			count = sync->process.element[i].count;
239 			break;
240 		}
241 	}
242 
243 	rte_spinlock_unlock(&sync->spinlock);
244 
245 	return count;
246 }
247 
248 static void *
249 nfp_sync_pci_inner_handle_alloc(struct nfp_sync *sync,
250 		const char *pci_name,
251 		uint32_t magic,
252 		uint32_t size)
253 {
254 	uint16_t i;
255 	void *handle = NULL;
256 	uint16_t pci_avail_id = NFP_SYNC_PCI_MAX;
257 
258 	rte_spinlock_lock(&sync->spinlock);
259 
260 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
261 		if (strcmp(pci_name, sync->pci[i].pci_name) == 0) {
262 			pci_avail_id = i;
263 			goto common_alloc;
264 		}
265 	}
266 
267 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
268 		if (strlen(sync->pci[i].pci_name) == 0) {
269 			pci_avail_id = i;
270 			rte_strlcpy(sync->pci[pci_avail_id].pci_name, pci_name, PCI_PRI_STR_SIZE);
271 			goto common_alloc;
272 		}
273 	}
274 
275 	rte_spinlock_unlock(&sync->spinlock);
276 
277 	return NULL;
278 
279 common_alloc:
280 	handle = nfp_sync_common_handle_alloc(&sync->pci[pci_avail_id],
281 			magic, size);
282 	if (handle == NULL)
283 		PMD_DRV_LOG(ERR, "PCI handle alloc failed.");
284 
285 	rte_spinlock_unlock(&sync->spinlock);
286 
287 	return handle;
288 }
289 
290 static void
291 nfp_sync_pci_inner_handle_free(struct nfp_sync *sync,
292 		const char *pci_name,
293 		void *handle)
294 {
295 	uint16_t i;
296 	char *name_tmp;
297 
298 	rte_spinlock_lock(&sync->spinlock);
299 
300 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
301 		name_tmp = sync->pci[i].pci_name;
302 		if (strlen(name_tmp) != 0 && strcmp(pci_name, name_tmp) == 0) {
303 			nfp_sync_common_handle_free(&sync->pci[i], handle);
304 			if (sync->pci[i].avail == NFP_SYNC_ELEMENT_MAX)
305 				name_tmp[0] = 0;
306 			break;
307 		}
308 	}
309 
310 	rte_spinlock_unlock(&sync->spinlock);
311 }
312 
313 static uint16_t
314 nfp_sync_pci_handle_count_get(struct nfp_sync *sync,
315 		const char *pci_name,
316 		void *handle)
317 {
318 	uint16_t i;
319 	uint16_t count = 0;
320 	struct nfp_sync_common *pci_common;
321 
322 	rte_spinlock_lock(&sync->spinlock);
323 
324 	for (i = 0; i < NFP_SYNC_PCI_MAX; i++) {
325 		if (strcmp(sync->pci[i].pci_name, pci_name) == 0)
326 			break;
327 	}
328 
329 	if (i == NFP_SYNC_PCI_MAX) {
330 		rte_spinlock_unlock(&sync->spinlock);
331 		return 0;
332 	}
333 
334 	pci_common = &sync->pci[i];
335 
336 	for (i = 0; i < NFP_SYNC_ELEMENT_MAX; i++) {
337 		if (pci_common->element[i].handle == handle) {
338 			count = pci_common->element[i].count;
339 			break;
340 		}
341 	}
342 
343 	rte_spinlock_unlock(&sync->spinlock);
344 
345 	return count;
346 }
347 
348 void *
349 nfp_sync_handle_alloc(struct nfp_sync *sync,
350 		struct rte_pci_device *pci_dev,
351 		uint32_t magic,
352 		uint32_t size)
353 {
354 	if (pci_dev == NULL)
355 		return nfp_sync_process_inner_handle_alloc(sync, magic, size);
356 
357 	return nfp_sync_pci_inner_handle_alloc(sync, pci_dev->device.name,
358 			magic, size);
359 }
360 
361 void
362 nfp_sync_handle_free(struct nfp_sync *sync,
363 		struct rte_pci_device *pci_dev,
364 		void *handle)
365 {
366 	if (pci_dev == NULL) {
367 		nfp_sync_process_inner_handle_free(sync, handle);
368 		return;
369 	}
370 
371 	nfp_sync_pci_inner_handle_free(sync, pci_dev->device.name, handle);
372 }
373 
374 uint16_t
375 nfp_sync_handle_count_get(struct nfp_sync *sync,
376 		struct rte_pci_device *pci_dev,
377 		void *handle)
378 {
379 	if (pci_dev == NULL)
380 		return nfp_sync_process_handle_count_get(sync, handle);
381 
382 	return nfp_sync_pci_handle_count_get(sync, pci_dev->device.name, handle);
383 }
384