xref: /dpdk/lib/graph/node.c (revision a2258ea1beaac8f162fd38911a2d6691366dcd66)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4 
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <string.h>
8 
9 #include <rte_common.h>
10 #include <rte_debug.h>
11 #include <rte_eal.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 
15 #include "graph_private.h"
16 
17 static struct node_head node_list = STAILQ_HEAD_INITIALIZER(node_list);
18 static rte_node_t node_id;
19 
20 #define NODE_ID_CHECK(id) ID_CHECK(id, node_id)
21 
22 /* Private functions */
23 struct node_head *
24 node_list_head_get(void)
25 {
26 	return &node_list;
27 }
28 
29 struct node *
30 node_from_name(const char *name)
31 {
32 	struct node *node;
33 
34 	STAILQ_FOREACH(node, &node_list, next)
35 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
36 			return node;
37 
38 	return NULL;
39 }
40 
41 static bool
42 node_has_duplicate_entry(const char *name)
43 {
44 	struct node *node;
45 
46 	/* Is duplicate name registered */
47 	STAILQ_FOREACH(node, &node_list, next) {
48 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0) {
49 			rte_errno = EEXIST;
50 			return 1;
51 		}
52 	}
53 	return 0;
54 }
55 
56 /* Public functions */
57 rte_node_t
58 __rte_node_register(const struct rte_node_register *reg)
59 {
60 	struct node *node;
61 	rte_edge_t i;
62 	size_t sz;
63 
64 	/* Limit Node specific metadata to one cacheline on 64B CL machine */
65 	RTE_BUILD_BUG_ON((offsetof(struct rte_node, nodes) -
66 			  offsetof(struct rte_node, ctx)) !=
67 			 RTE_CACHE_LINE_MIN_SIZE);
68 
69 	graph_spinlock_lock();
70 
71 	/* Check sanity */
72 	if (reg == NULL || reg->process == NULL) {
73 		rte_errno = EINVAL;
74 		goto fail;
75 	}
76 
77 	/* Check for duplicate name */
78 	if (node_has_duplicate_entry(reg->name))
79 		goto fail;
80 
81 	sz = sizeof(struct node) + (reg->nb_edges * RTE_NODE_NAMESIZE);
82 	node = calloc(1, sz);
83 	if (node == NULL) {
84 		rte_errno = ENOMEM;
85 		goto fail;
86 	}
87 
88 	/* Initialize the node */
89 	if (rte_strscpy(node->name, reg->name, RTE_NODE_NAMESIZE) < 0) {
90 		rte_errno = E2BIG;
91 		goto free;
92 	}
93 	node->flags = reg->flags;
94 	node->process = reg->process;
95 	node->init = reg->init;
96 	node->fini = reg->fini;
97 	node->nb_edges = reg->nb_edges;
98 	node->parent_id = reg->parent_id;
99 	for (i = 0; i < reg->nb_edges; i++) {
100 		if (rte_strscpy(node->next_nodes[i], reg->next_nodes[i],
101 				RTE_NODE_NAMESIZE) < 0) {
102 			rte_errno = E2BIG;
103 			goto free;
104 		}
105 	}
106 
107 	node->id = node_id++;
108 
109 	/* Add the node at tail */
110 	STAILQ_INSERT_TAIL(&node_list, node, next);
111 	graph_spinlock_unlock();
112 
113 	return node->id;
114 free:
115 	free(node);
116 fail:
117 	graph_spinlock_unlock();
118 	return RTE_NODE_ID_INVALID;
119 }
120 
121 static int
122 clone_name(struct rte_node_register *reg, struct node *node, const char *name)
123 {
124 	ssize_t sz, rc;
125 
126 #define SZ RTE_NODE_NAMESIZE
127 	rc = rte_strscpy(reg->name, node->name, SZ);
128 	if (rc < 0)
129 		goto fail;
130 	sz = rc;
131 	rc = rte_strscpy(reg->name + sz, "-", RTE_MAX((int16_t)(SZ - sz), 0));
132 	if (rc < 0)
133 		goto fail;
134 	sz += rc;
135 	sz = rte_strscpy(reg->name + sz, name, RTE_MAX((int16_t)(SZ - sz), 0));
136 	if (sz < 0)
137 		goto fail;
138 
139 	return 0;
140 fail:
141 	rte_errno = E2BIG;
142 	return -rte_errno;
143 }
144 
145 static rte_node_t
146 node_clone(struct node *node, const char *name)
147 {
148 	rte_node_t rc = RTE_NODE_ID_INVALID;
149 	struct rte_node_register *reg;
150 	rte_edge_t i;
151 
152 	/* Don't allow to clone a node from a cloned node */
153 	if (node->parent_id != RTE_NODE_ID_INVALID) {
154 		rte_errno = EEXIST;
155 		goto fail;
156 	}
157 
158 	/* Check for duplicate name */
159 	if (node_has_duplicate_entry(name))
160 		goto fail;
161 
162 	reg = calloc(1, sizeof(*reg) + (sizeof(char *) * node->nb_edges));
163 	if (reg == NULL) {
164 		rte_errno = ENOMEM;
165 		goto fail;
166 	}
167 
168 	/* Clone the source node */
169 	reg->flags = node->flags;
170 	reg->process = node->process;
171 	reg->init = node->init;
172 	reg->fini = node->fini;
173 	reg->nb_edges = node->nb_edges;
174 	reg->parent_id = node->id;
175 
176 	for (i = 0; i < node->nb_edges; i++)
177 		reg->next_nodes[i] = node->next_nodes[i];
178 
179 	/* Naming ceremony of the new node. name is node->name + "-" + name */
180 	if (clone_name(reg, node, name))
181 		goto free;
182 
183 	rc = __rte_node_register(reg);
184 free:
185 	free(reg);
186 fail:
187 	return rc;
188 }
189 
190 rte_node_t
191 rte_node_clone(rte_node_t id, const char *name)
192 {
193 	struct node *node;
194 
195 	NODE_ID_CHECK(id);
196 	STAILQ_FOREACH(node, &node_list, next)
197 		if (node->id == id)
198 			return node_clone(node, name);
199 
200 fail:
201 	return RTE_NODE_ID_INVALID;
202 }
203 
204 rte_node_t
205 rte_node_from_name(const char *name)
206 {
207 	struct node *node;
208 
209 	STAILQ_FOREACH(node, &node_list, next)
210 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
211 			return node->id;
212 
213 	return RTE_NODE_ID_INVALID;
214 }
215 
216 char *
217 rte_node_id_to_name(rte_node_t id)
218 {
219 	struct node *node;
220 
221 	NODE_ID_CHECK(id);
222 	STAILQ_FOREACH(node, &node_list, next)
223 		if (node->id == id)
224 			return node->name;
225 
226 fail:
227 	return NULL;
228 }
229 
230 rte_edge_t
231 rte_node_edge_count(rte_node_t id)
232 {
233 	struct node *node;
234 
235 	NODE_ID_CHECK(id);
236 	STAILQ_FOREACH(node, &node_list, next)
237 		if (node->id == id)
238 			return node->nb_edges;
239 fail:
240 	return RTE_EDGE_ID_INVALID;
241 }
242 
243 static rte_edge_t
244 edge_update(struct node *node, struct node *prev, rte_edge_t from,
245 	    const char **next_nodes, rte_edge_t nb_edges)
246 {
247 	rte_edge_t i, max_edges, count = 0;
248 	struct node *new_node;
249 	bool need_realloc;
250 	size_t sz;
251 
252 	if (from == RTE_EDGE_ID_INVALID)
253 		from = node->nb_edges;
254 
255 	/* Don't create hole in next_nodes[] list */
256 	if (from > node->nb_edges) {
257 		rte_errno = ENOMEM;
258 		goto fail;
259 	}
260 
261 	/* Remove me from list */
262 	STAILQ_REMOVE(&node_list, node, node, next);
263 
264 	/* Allocate the storage space for new node if required */
265 	max_edges = from + nb_edges;
266 	need_realloc = max_edges > node->nb_edges;
267 	if (need_realloc) {
268 		sz = sizeof(struct node) + (max_edges * RTE_NODE_NAMESIZE);
269 		new_node = realloc(node, sz);
270 		if (new_node == NULL) {
271 			rte_errno = ENOMEM;
272 			goto restore;
273 		} else {
274 			node = new_node;
275 		}
276 	}
277 
278 	/* Update the new nodes name */
279 	for (i = from; i < max_edges; i++, count++) {
280 		if (rte_strscpy(node->next_nodes[i], next_nodes[count],
281 				RTE_NODE_NAMESIZE) < 0) {
282 			rte_errno = E2BIG;
283 			goto restore;
284 		}
285 	}
286 restore:
287 	/* Update the linked list to point new node address in prev node */
288 	if (prev)
289 		STAILQ_INSERT_AFTER(&node_list, prev, node, next);
290 	else
291 		STAILQ_INSERT_HEAD(&node_list, node, next);
292 
293 	if (need_realloc)
294 		node->nb_edges = max_edges;
295 
296 fail:
297 	return count;
298 }
299 
300 rte_edge_t
301 rte_node_edge_shrink(rte_node_t id, rte_edge_t size)
302 {
303 	rte_edge_t rc = RTE_EDGE_ID_INVALID;
304 	struct node *node;
305 
306 	NODE_ID_CHECK(id);
307 	graph_spinlock_lock();
308 
309 	STAILQ_FOREACH(node, &node_list, next) {
310 		if (node->id == id) {
311 			if (node->nb_edges < size) {
312 				rte_errno = E2BIG;
313 				goto fail;
314 			}
315 			node->nb_edges = size;
316 			rc = size;
317 			break;
318 		}
319 	}
320 
321 fail:
322 	graph_spinlock_unlock();
323 	return rc;
324 }
325 
326 rte_edge_t
327 rte_node_edge_update(rte_node_t id, rte_edge_t from, const char **next_nodes,
328 		     uint16_t nb_edges)
329 {
330 	rte_edge_t rc = RTE_EDGE_ID_INVALID;
331 	struct node *n, *prev;
332 
333 	NODE_ID_CHECK(id);
334 	graph_spinlock_lock();
335 
336 	prev = NULL;
337 	STAILQ_FOREACH(n, &node_list, next) {
338 		if (n->id == id) {
339 			rc = edge_update(n, prev, from, next_nodes, nb_edges);
340 			break;
341 		}
342 		prev = n;
343 	}
344 
345 	graph_spinlock_unlock();
346 fail:
347 	return rc;
348 }
349 
350 static rte_node_t
351 node_copy_edges(struct node *node, char *next_nodes[])
352 {
353 	rte_edge_t i;
354 
355 	for (i = 0; i < node->nb_edges; i++)
356 		next_nodes[i] = node->next_nodes[i];
357 
358 	return i;
359 }
360 
361 rte_node_t
362 rte_node_edge_get(rte_node_t id, char *next_nodes[])
363 {
364 	rte_node_t rc = RTE_NODE_ID_INVALID;
365 	struct node *node;
366 
367 	NODE_ID_CHECK(id);
368 	graph_spinlock_lock();
369 
370 	STAILQ_FOREACH(node, &node_list, next) {
371 		if (node->id == id) {
372 			if (next_nodes == NULL)
373 				rc = sizeof(char *) * node->nb_edges;
374 			else
375 				rc = node_copy_edges(node, next_nodes);
376 			break;
377 		}
378 	}
379 
380 	graph_spinlock_unlock();
381 fail:
382 	return rc;
383 }
384 
385 static void
386 node_scan_dump(FILE *f, rte_node_t id, bool all)
387 {
388 	struct node *node;
389 
390 	RTE_ASSERT(f != NULL);
391 	NODE_ID_CHECK(id);
392 
393 	STAILQ_FOREACH(node, &node_list, next) {
394 		if (all == true) {
395 			node_dump(f, node);
396 		} else if (node->id == id) {
397 			node_dump(f, node);
398 			return;
399 		}
400 	}
401 fail:
402 	return;
403 }
404 
405 void
406 rte_node_dump(FILE *f, rte_node_t id)
407 {
408 	node_scan_dump(f, id, false);
409 }
410 
411 void
412 rte_node_list_dump(FILE *f)
413 {
414 	node_scan_dump(f, 0, true);
415 }
416 
417 rte_node_t
418 rte_node_max_count(void)
419 {
420 	return node_id;
421 }
422