xref: /dpdk/lib/graph/node.c (revision 455a771fd6f1a9cb6edc8711ff278ad31709cf7c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4 
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 
10 #include <rte_common.h>
11 #include <rte_debug.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 
15 #include "graph_private.h"
16 
17 static struct node_head node_list = STAILQ_HEAD_INITIALIZER(node_list);
18 static rte_node_t node_id;
19 
20 #define NODE_ID_CHECK(id) ID_CHECK(id, node_id)
21 
22 /* Private functions */
23 struct node_head *
24 node_list_head_get(void)
25 {
26 	return &node_list;
27 }
28 
29 struct node *
30 node_from_name(const char *name)
31 {
32 	struct node *node;
33 
34 	STAILQ_FOREACH(node, &node_list, next)
35 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
36 			return node;
37 
38 	return NULL;
39 }
40 
41 static bool
42 node_has_duplicate_entry(const char *name)
43 {
44 	struct node *node;
45 
46 	/* Is duplicate name registered */
47 	STAILQ_FOREACH(node, &node_list, next) {
48 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0) {
49 			rte_errno = EEXIST;
50 			return 1;
51 		}
52 	}
53 	return 0;
54 }
55 
56 /* Public functions */
57 rte_node_t
58 __rte_node_register(const struct rte_node_register *reg)
59 {
60 	struct node *node;
61 	rte_edge_t i;
62 	size_t sz;
63 
64 	/* Limit Node specific metadata to one cacheline on 64B CL machine */
65 	RTE_BUILD_BUG_ON((offsetof(struct rte_node, nodes) -
66 			  offsetof(struct rte_node, ctx)) !=
67 			 RTE_CACHE_LINE_MIN_SIZE);
68 
69 	graph_spinlock_lock();
70 
71 	/* Check sanity */
72 	if (reg == NULL || reg->process == NULL) {
73 		rte_errno = EINVAL;
74 		goto fail;
75 	}
76 
77 	/* Check for duplicate name */
78 	if (node_has_duplicate_entry(reg->name))
79 		goto fail;
80 
81 	sz = sizeof(struct node) + (reg->nb_edges * RTE_NODE_NAMESIZE);
82 	node = calloc(1, sz);
83 	if (node == NULL) {
84 		rte_errno = ENOMEM;
85 		goto fail;
86 	}
87 
88 	/* Initialize the node */
89 	if (rte_strscpy(node->name, reg->name, RTE_NODE_NAMESIZE) < 0)
90 		goto free;
91 	node->flags = reg->flags;
92 	node->process = reg->process;
93 	node->init = reg->init;
94 	node->fini = reg->fini;
95 	node->nb_edges = reg->nb_edges;
96 	node->parent_id = reg->parent_id;
97 	for (i = 0; i < reg->nb_edges; i++) {
98 		if (rte_strscpy(node->next_nodes[i], reg->next_nodes[i],
99 				RTE_NODE_NAMESIZE) < 0)
100 			goto free;
101 	}
102 
103 	node->lcore_id = RTE_MAX_LCORE;
104 	node->id = node_id++;
105 
106 	/* Add the node at tail */
107 	STAILQ_INSERT_TAIL(&node_list, node, next);
108 	graph_spinlock_unlock();
109 
110 	return node->id;
111 free:
112 	free(node);
113 fail:
114 	graph_spinlock_unlock();
115 	return RTE_NODE_ID_INVALID;
116 }
117 
118 static rte_node_t
119 node_clone(struct node *node, const char *name)
120 {
121 	rte_node_t rc = RTE_NODE_ID_INVALID;
122 	struct rte_node_register *reg;
123 	rte_edge_t i;
124 
125 	/* Don't allow to clone a node from a cloned node */
126 	if (node->parent_id != RTE_NODE_ID_INVALID) {
127 		rte_errno = EEXIST;
128 		goto fail;
129 	}
130 
131 	reg = calloc(1, sizeof(*reg) + (sizeof(char *) * node->nb_edges));
132 	if (reg == NULL) {
133 		rte_errno = ENOMEM;
134 		goto fail;
135 	}
136 
137 	/* Clone the source node */
138 	reg->flags = node->flags;
139 	reg->process = node->process;
140 	reg->init = node->init;
141 	reg->fini = node->fini;
142 	reg->nb_edges = node->nb_edges;
143 	reg->parent_id = node->id;
144 
145 	for (i = 0; i < node->nb_edges; i++)
146 		reg->next_nodes[i] = node->next_nodes[i];
147 
148 	/* Naming ceremony of the new node. name is node->name + "-" + name */
149 	if (clone_name(reg->name, node->name, name))
150 		goto free;
151 
152 	rc = __rte_node_register(reg);
153 free:
154 	free(reg);
155 fail:
156 	return rc;
157 }
158 
159 rte_node_t
160 rte_node_clone(rte_node_t id, const char *name)
161 {
162 	struct node *node;
163 
164 	NODE_ID_CHECK(id);
165 	STAILQ_FOREACH(node, &node_list, next)
166 		if (node->id == id)
167 			return node_clone(node, name);
168 
169 fail:
170 	return RTE_NODE_ID_INVALID;
171 }
172 
173 rte_node_t
174 rte_node_from_name(const char *name)
175 {
176 	struct node *node;
177 
178 	STAILQ_FOREACH(node, &node_list, next)
179 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
180 			return node->id;
181 
182 	return RTE_NODE_ID_INVALID;
183 }
184 
185 char *
186 rte_node_id_to_name(rte_node_t id)
187 {
188 	struct node *node;
189 
190 	NODE_ID_CHECK(id);
191 	STAILQ_FOREACH(node, &node_list, next)
192 		if (node->id == id)
193 			return node->name;
194 
195 fail:
196 	return NULL;
197 }
198 
199 rte_edge_t
200 rte_node_edge_count(rte_node_t id)
201 {
202 	struct node *node;
203 
204 	NODE_ID_CHECK(id);
205 	STAILQ_FOREACH(node, &node_list, next)
206 		if (node->id == id)
207 			return node->nb_edges;
208 fail:
209 	return RTE_EDGE_ID_INVALID;
210 }
211 
212 static rte_edge_t
213 edge_update(struct node *node, struct node *prev, rte_edge_t from,
214 	    const char **next_nodes, rte_edge_t nb_edges)
215 {
216 	rte_edge_t i, max_edges, count = 0;
217 	struct node *new_node;
218 	bool need_realloc;
219 	size_t sz;
220 
221 	if (from == RTE_EDGE_ID_INVALID)
222 		from = node->nb_edges;
223 
224 	/* Don't create hole in next_nodes[] list */
225 	if (from > node->nb_edges) {
226 		rte_errno = ENOMEM;
227 		goto fail;
228 	}
229 
230 	/* Remove me from list */
231 	STAILQ_REMOVE(&node_list, node, node, next);
232 
233 	/* Allocate the storage space for new node if required */
234 	max_edges = from + nb_edges;
235 	need_realloc = max_edges > node->nb_edges;
236 	if (need_realloc) {
237 		sz = sizeof(struct node) + (max_edges * RTE_NODE_NAMESIZE);
238 		new_node = realloc(node, sz);
239 		if (new_node == NULL) {
240 			rte_errno = ENOMEM;
241 			goto restore;
242 		} else {
243 			node = new_node;
244 		}
245 	}
246 
247 	/* Update the new nodes name */
248 	for (i = from; i < max_edges; i++, count++) {
249 		if (rte_strscpy(node->next_nodes[i], next_nodes[count],
250 				RTE_NODE_NAMESIZE) < 0)
251 			goto restore;
252 	}
253 restore:
254 	/* Update the linked list to point new node address in prev node */
255 	if (prev)
256 		STAILQ_INSERT_AFTER(&node_list, prev, node, next);
257 	else
258 		STAILQ_INSERT_HEAD(&node_list, node, next);
259 
260 	if (need_realloc)
261 		node->nb_edges = max_edges;
262 
263 fail:
264 	return count;
265 }
266 
267 rte_edge_t
268 rte_node_edge_shrink(rte_node_t id, rte_edge_t size)
269 {
270 	rte_edge_t rc = RTE_EDGE_ID_INVALID;
271 	struct node *node;
272 
273 	NODE_ID_CHECK(id);
274 	graph_spinlock_lock();
275 
276 	STAILQ_FOREACH(node, &node_list, next) {
277 		if (node->id == id) {
278 			if (node->nb_edges < size) {
279 				rte_errno = E2BIG;
280 			} else {
281 				node->nb_edges = size;
282 				rc = size;
283 			}
284 			break;
285 		}
286 	}
287 
288 	graph_spinlock_unlock();
289 fail:
290 	return rc;
291 }
292 
293 rte_edge_t
294 rte_node_edge_update(rte_node_t id, rte_edge_t from, const char **next_nodes,
295 		     uint16_t nb_edges)
296 {
297 	rte_edge_t rc = RTE_EDGE_ID_INVALID;
298 	struct node *n, *prev;
299 
300 	NODE_ID_CHECK(id);
301 	graph_spinlock_lock();
302 
303 	prev = NULL;
304 	STAILQ_FOREACH(n, &node_list, next) {
305 		if (n->id == id) {
306 			rc = edge_update(n, prev, from, next_nodes, nb_edges);
307 			break;
308 		}
309 		prev = n;
310 	}
311 
312 	graph_spinlock_unlock();
313 fail:
314 	return rc;
315 }
316 
317 static rte_node_t
318 node_copy_edges(struct node *node, char *next_nodes[])
319 {
320 	rte_edge_t i;
321 
322 	for (i = 0; i < node->nb_edges; i++)
323 		next_nodes[i] = node->next_nodes[i];
324 
325 	return i;
326 }
327 
328 rte_node_t
329 rte_node_edge_get(rte_node_t id, char *next_nodes[])
330 {
331 	rte_node_t rc = RTE_NODE_ID_INVALID;
332 	struct node *node;
333 
334 	NODE_ID_CHECK(id);
335 	graph_spinlock_lock();
336 
337 	STAILQ_FOREACH(node, &node_list, next) {
338 		if (node->id == id) {
339 			if (next_nodes == NULL)
340 				rc = sizeof(char *) * node->nb_edges;
341 			else
342 				rc = node_copy_edges(node, next_nodes);
343 			break;
344 		}
345 	}
346 
347 	graph_spinlock_unlock();
348 fail:
349 	return rc;
350 }
351 
352 static void
353 node_scan_dump(FILE *f, rte_node_t id, bool all)
354 {
355 	struct node *node;
356 
357 	RTE_ASSERT(f != NULL);
358 	NODE_ID_CHECK(id);
359 
360 	STAILQ_FOREACH(node, &node_list, next) {
361 		if (all == true) {
362 			node_dump(f, node);
363 		} else if (node->id == id) {
364 			node_dump(f, node);
365 			return;
366 		}
367 	}
368 fail:
369 	return;
370 }
371 
372 void
373 rte_node_dump(FILE *f, rte_node_t id)
374 {
375 	node_scan_dump(f, id, false);
376 }
377 
378 void
379 rte_node_list_dump(FILE *f)
380 {
381 	node_scan_dump(f, 0, true);
382 }
383 
384 rte_node_t
385 rte_node_max_count(void)
386 {
387 	return node_id;
388 }
389