xref: /dpdk/lib/graph/node.c (revision 6f3dbd306de03410cffb40a0f0b47a2cdcfcf362)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4 
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 
10 #include <rte_common.h>
11 #include <rte_debug.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 
15 #include "graph_private.h"
16 
17 static struct node_head node_list = STAILQ_HEAD_INITIALIZER(node_list);
18 static rte_node_t node_id;
19 
20 #define NODE_ID_CHECK(id) ID_CHECK(id, node_id)
21 
22 /* Private functions */
23 struct node_head *
24 node_list_head_get(void)
25 {
26 	return &node_list;
27 }
28 
29 struct node *
30 node_from_name(const char *name)
31 {
32 	struct node *node;
33 
34 	STAILQ_FOREACH(node, &node_list, next)
35 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
36 			return node;
37 
38 	return NULL;
39 }
40 
41 static bool
42 node_has_duplicate_entry(const char *name)
43 {
44 	struct node *node;
45 
46 	/* Is duplicate name registered */
47 	STAILQ_FOREACH(node, &node_list, next) {
48 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0) {
49 			rte_errno = EEXIST;
50 			return 1;
51 		}
52 	}
53 	return 0;
54 }
55 
56 /* Public functions */
57 rte_node_t
58 __rte_node_register(const struct rte_node_register *reg)
59 {
60 	struct node *node;
61 	rte_edge_t i;
62 	size_t sz;
63 
64 	/* Limit Node specific metadata to one cacheline on 64B CL machine */
65 	RTE_BUILD_BUG_ON((offsetof(struct rte_node, nodes) -
66 			  offsetof(struct rte_node, ctx)) !=
67 			 RTE_CACHE_LINE_MIN_SIZE);
68 
69 	graph_spinlock_lock();
70 
71 	/* Check sanity */
72 	if (reg == NULL || reg->process == NULL) {
73 		rte_errno = EINVAL;
74 		goto fail;
75 	}
76 
77 	/* Check for duplicate name */
78 	if (node_has_duplicate_entry(reg->name))
79 		goto fail;
80 
81 	sz = sizeof(struct node) + (reg->nb_edges * RTE_NODE_NAMESIZE);
82 	node = calloc(1, sz);
83 	if (node == NULL) {
84 		rte_errno = ENOMEM;
85 		goto fail;
86 	}
87 
88 	if (reg->xstats) {
89 		sz = sizeof(*reg->xstats) + (reg->xstats->nb_xstats * RTE_NODE_XSTAT_DESC_SIZE);
90 		node->xstats = calloc(1, sz);
91 		if (node->xstats == NULL) {
92 			rte_errno = ENOMEM;
93 			goto free;
94 		}
95 
96 		node->xstats->nb_xstats = reg->xstats->nb_xstats;
97 		for (i = 0; i < reg->xstats->nb_xstats; i++)
98 			if (rte_strscpy(node->xstats->xstat_desc[i], reg->xstats->xstat_desc[i],
99 					RTE_NODE_XSTAT_DESC_SIZE) < 0)
100 				goto free_xstat;
101 	}
102 
103 	/* Initialize the node */
104 	if (rte_strscpy(node->name, reg->name, RTE_NODE_NAMESIZE) < 0)
105 		goto free_xstat;
106 	node->flags = reg->flags;
107 	node->process = reg->process;
108 	node->init = reg->init;
109 	node->fini = reg->fini;
110 	node->nb_edges = reg->nb_edges;
111 	node->parent_id = reg->parent_id;
112 	for (i = 0; i < reg->nb_edges; i++) {
113 		if (rte_strscpy(node->next_nodes[i], reg->next_nodes[i],
114 				RTE_NODE_NAMESIZE) < 0)
115 			goto free_xstat;
116 	}
117 
118 	node->lcore_id = RTE_MAX_LCORE;
119 	node->id = node_id++;
120 
121 	/* Add the node at tail */
122 	STAILQ_INSERT_TAIL(&node_list, node, next);
123 	graph_spinlock_unlock();
124 
125 	return node->id;
126 free_xstat:
127 	free(node->xstats);
128 free:
129 	free(node);
130 fail:
131 	graph_spinlock_unlock();
132 	return RTE_NODE_ID_INVALID;
133 }
134 
135 static rte_node_t
136 node_clone(struct node *node, const char *name)
137 {
138 	rte_node_t rc = RTE_NODE_ID_INVALID;
139 	struct rte_node_register *reg;
140 	rte_edge_t i;
141 
142 	/* Don't allow to clone a node from a cloned node */
143 	if (node->parent_id != RTE_NODE_ID_INVALID) {
144 		rte_errno = EEXIST;
145 		goto fail;
146 	}
147 
148 	reg = calloc(1, sizeof(*reg) + (sizeof(char *) * node->nb_edges));
149 	if (reg == NULL) {
150 		rte_errno = ENOMEM;
151 		goto fail;
152 	}
153 
154 	if (node->xstats) {
155 		reg->xstats = calloc(1, sizeof(*node->xstats) +
156 				     (node->xstats->nb_xstats * RTE_NODE_XSTAT_DESC_SIZE));
157 		if (reg->xstats == NULL) {
158 			rte_errno = ENOMEM;
159 			goto fail;
160 		}
161 
162 		for (i = 0; i < node->xstats->nb_xstats; i++)
163 			if (rte_strscpy(reg->xstats->xstat_desc[i], node->xstats->xstat_desc[i],
164 					RTE_NODE_XSTAT_DESC_SIZE) < 0)
165 				goto free_xstat;
166 	}
167 
168 	/* Clone the source node */
169 	reg->flags = node->flags;
170 	reg->process = node->process;
171 	reg->init = node->init;
172 	reg->fini = node->fini;
173 	reg->nb_edges = node->nb_edges;
174 	reg->parent_id = node->id;
175 
176 	for (i = 0; i < node->nb_edges; i++)
177 		reg->next_nodes[i] = node->next_nodes[i];
178 
179 	/* Naming ceremony of the new node. name is node->name + "-" + name */
180 	if (clone_name(reg->name, node->name, name))
181 		goto free;
182 
183 	rc = __rte_node_register(reg);
184 free_xstat:
185 	free(reg->xstats);
186 free:
187 	free(reg);
188 fail:
189 	return rc;
190 }
191 
192 rte_node_t
193 rte_node_clone(rte_node_t id, const char *name)
194 {
195 	struct node *node;
196 
197 	NODE_ID_CHECK(id);
198 	STAILQ_FOREACH(node, &node_list, next)
199 		if (node->id == id)
200 			return node_clone(node, name);
201 
202 fail:
203 	return RTE_NODE_ID_INVALID;
204 }
205 
206 rte_node_t
207 rte_node_from_name(const char *name)
208 {
209 	struct node *node;
210 
211 	STAILQ_FOREACH(node, &node_list, next)
212 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
213 			return node->id;
214 
215 	return RTE_NODE_ID_INVALID;
216 }
217 
218 char *
219 rte_node_id_to_name(rte_node_t id)
220 {
221 	struct node *node;
222 
223 	NODE_ID_CHECK(id);
224 	STAILQ_FOREACH(node, &node_list, next)
225 		if (node->id == id)
226 			return node->name;
227 
228 fail:
229 	return NULL;
230 }
231 
232 rte_edge_t
233 rte_node_edge_count(rte_node_t id)
234 {
235 	struct node *node;
236 
237 	NODE_ID_CHECK(id);
238 	STAILQ_FOREACH(node, &node_list, next)
239 		if (node->id == id)
240 			return node->nb_edges;
241 fail:
242 	return RTE_EDGE_ID_INVALID;
243 }
244 
245 static rte_edge_t
246 edge_update(struct node *node, struct node *prev, rte_edge_t from,
247 	    const char **next_nodes, rte_edge_t nb_edges)
248 {
249 	rte_edge_t i, max_edges, count = 0;
250 	struct node *new_node;
251 	bool need_realloc;
252 	size_t sz;
253 
254 	if (from == RTE_EDGE_ID_INVALID)
255 		from = node->nb_edges;
256 
257 	/* Don't create hole in next_nodes[] list */
258 	if (from > node->nb_edges) {
259 		rte_errno = ENOMEM;
260 		goto fail;
261 	}
262 
263 	/* Remove me from list */
264 	STAILQ_REMOVE(&node_list, node, node, next);
265 
266 	/* Allocate the storage space for new node if required */
267 	max_edges = from + nb_edges;
268 	need_realloc = max_edges > node->nb_edges;
269 	if (need_realloc) {
270 		sz = sizeof(struct node) + (max_edges * RTE_NODE_NAMESIZE);
271 		new_node = realloc(node, sz);
272 		if (new_node == NULL) {
273 			rte_errno = ENOMEM;
274 			goto restore;
275 		} else {
276 			node = new_node;
277 		}
278 	}
279 
280 	/* Update the new nodes name */
281 	for (i = from; i < max_edges; i++, count++) {
282 		if (rte_strscpy(node->next_nodes[i], next_nodes[count],
283 				RTE_NODE_NAMESIZE) < 0)
284 			goto restore;
285 	}
286 restore:
287 	/* Update the linked list to point new node address in prev node */
288 	if (prev)
289 		STAILQ_INSERT_AFTER(&node_list, prev, node, next);
290 	else
291 		STAILQ_INSERT_HEAD(&node_list, node, next);
292 
293 	if (need_realloc)
294 		node->nb_edges = max_edges;
295 
296 fail:
297 	return count;
298 }
299 
300 rte_edge_t
301 rte_node_edge_shrink(rte_node_t id, rte_edge_t size)
302 {
303 	rte_edge_t rc = RTE_EDGE_ID_INVALID;
304 	struct node *node;
305 
306 	NODE_ID_CHECK(id);
307 	graph_spinlock_lock();
308 
309 	STAILQ_FOREACH(node, &node_list, next) {
310 		if (node->id == id) {
311 			if (node->nb_edges < size) {
312 				rte_errno = E2BIG;
313 			} else {
314 				node->nb_edges = size;
315 				rc = size;
316 			}
317 			break;
318 		}
319 	}
320 
321 	graph_spinlock_unlock();
322 fail:
323 	return rc;
324 }
325 
326 rte_edge_t
327 rte_node_edge_update(rte_node_t id, rte_edge_t from, const char **next_nodes,
328 		     uint16_t nb_edges)
329 {
330 	rte_edge_t rc = RTE_EDGE_ID_INVALID;
331 	struct node *n, *prev;
332 
333 	NODE_ID_CHECK(id);
334 	graph_spinlock_lock();
335 
336 	prev = NULL;
337 	STAILQ_FOREACH(n, &node_list, next) {
338 		if (n->id == id) {
339 			rc = edge_update(n, prev, from, next_nodes, nb_edges);
340 			break;
341 		}
342 		prev = n;
343 	}
344 
345 	graph_spinlock_unlock();
346 fail:
347 	return rc;
348 }
349 
350 static rte_node_t
351 node_copy_edges(struct node *node, char *next_nodes[])
352 {
353 	rte_edge_t i;
354 
355 	for (i = 0; i < node->nb_edges; i++)
356 		next_nodes[i] = node->next_nodes[i];
357 
358 	return i;
359 }
360 
361 rte_node_t
362 rte_node_edge_get(rte_node_t id, char *next_nodes[])
363 {
364 	rte_node_t rc = RTE_NODE_ID_INVALID;
365 	struct node *node;
366 
367 	NODE_ID_CHECK(id);
368 	graph_spinlock_lock();
369 
370 	STAILQ_FOREACH(node, &node_list, next) {
371 		if (node->id == id) {
372 			if (next_nodes == NULL)
373 				rc = sizeof(char *) * node->nb_edges;
374 			else
375 				rc = node_copy_edges(node, next_nodes);
376 			break;
377 		}
378 	}
379 
380 	graph_spinlock_unlock();
381 fail:
382 	return rc;
383 }
384 
385 static void
386 node_scan_dump(FILE *f, rte_node_t id, bool all)
387 {
388 	struct node *node;
389 
390 	RTE_ASSERT(f != NULL);
391 	NODE_ID_CHECK(id);
392 
393 	STAILQ_FOREACH(node, &node_list, next) {
394 		if (all == true) {
395 			node_dump(f, node);
396 		} else if (node->id == id) {
397 			node_dump(f, node);
398 			return;
399 		}
400 	}
401 fail:
402 	return;
403 }
404 
405 void
406 rte_node_dump(FILE *f, rte_node_t id)
407 {
408 	node_scan_dump(f, id, false);
409 }
410 
411 void
412 rte_node_list_dump(FILE *f)
413 {
414 	node_scan_dump(f, 0, true);
415 }
416 
417 rte_node_t
418 rte_node_max_count(void)
419 {
420 	return node_id;
421 }
422