xref: /dpdk/lib/graph/node.c (revision 7fc6ae50369d75b9aa550072182fa92f8c4e13a4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4 
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 
10 #include <rte_common.h>
11 #include <rte_debug.h>
12 #include <rte_errno.h>
13 #include <rte_string_fns.h>
14 
15 #include "graph_private.h"
16 
17 static struct node_head node_list = STAILQ_HEAD_INITIALIZER(node_list);
18 static rte_node_t node_id;
19 
20 #define NODE_ID_CHECK(id) ID_CHECK(id, node_id)
21 
22 /* Private functions */
23 struct node_head *
24 node_list_head_get(void)
25 {
26 	return &node_list;
27 }
28 
29 struct node *
30 node_from_name(const char *name)
31 {
32 	struct node *node;
33 
34 	STAILQ_FOREACH(node, &node_list, next)
35 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
36 			return node;
37 
38 	return NULL;
39 }
40 
41 static bool
42 node_has_duplicate_entry(const char *name)
43 {
44 	struct node *node;
45 
46 	/* Is duplicate name registered */
47 	STAILQ_FOREACH(node, &node_list, next) {
48 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0) {
49 			rte_errno = EEXIST;
50 			return 1;
51 		}
52 	}
53 	return 0;
54 }
55 
56 /* Public functions */
57 rte_node_t
58 __rte_node_register(const struct rte_node_register *reg)
59 {
60 	struct node *node;
61 	rte_edge_t i;
62 	size_t sz;
63 
64 	/* Limit Node specific metadata to one cacheline on 64B CL machine */
65 	RTE_BUILD_BUG_ON((offsetof(struct rte_node, nodes) -
66 			  offsetof(struct rte_node, ctx)) !=
67 			 RTE_CACHE_LINE_MIN_SIZE);
68 
69 	graph_spinlock_lock();
70 
71 	/* Check sanity */
72 	if (reg == NULL || reg->process == NULL) {
73 		rte_errno = EINVAL;
74 		goto fail;
75 	}
76 
77 	/* Check for duplicate name */
78 	if (node_has_duplicate_entry(reg->name))
79 		goto fail;
80 
81 	sz = sizeof(struct node) + (reg->nb_edges * RTE_NODE_NAMESIZE);
82 	node = calloc(1, sz);
83 	if (node == NULL) {
84 		rte_errno = ENOMEM;
85 		goto fail;
86 	}
87 
88 	/* Initialize the node */
89 	if (rte_strscpy(node->name, reg->name, RTE_NODE_NAMESIZE) < 0)
90 		goto free;
91 	node->flags = reg->flags;
92 	node->process = reg->process;
93 	node->init = reg->init;
94 	node->fini = reg->fini;
95 	node->nb_edges = reg->nb_edges;
96 	node->parent_id = reg->parent_id;
97 	for (i = 0; i < reg->nb_edges; i++) {
98 		if (rte_strscpy(node->next_nodes[i], reg->next_nodes[i],
99 				RTE_NODE_NAMESIZE) < 0)
100 			goto free;
101 	}
102 
103 	node->id = node_id++;
104 
105 	/* Add the node at tail */
106 	STAILQ_INSERT_TAIL(&node_list, node, next);
107 	graph_spinlock_unlock();
108 
109 	return node->id;
110 free:
111 	free(node);
112 fail:
113 	graph_spinlock_unlock();
114 	return RTE_NODE_ID_INVALID;
115 }
116 
117 static int
118 clone_name(struct rte_node_register *reg, struct node *node, const char *name)
119 {
120 	ssize_t sz, rc;
121 
122 #define SZ RTE_NODE_NAMESIZE
123 	rc = rte_strscpy(reg->name, node->name, SZ);
124 	if (rc < 0)
125 		goto fail;
126 	sz = rc;
127 	rc = rte_strscpy(reg->name + sz, "-", RTE_MAX((int16_t)(SZ - sz), 0));
128 	if (rc < 0)
129 		goto fail;
130 	sz += rc;
131 	sz = rte_strscpy(reg->name + sz, name, RTE_MAX((int16_t)(SZ - sz), 0));
132 	if (sz < 0)
133 		goto fail;
134 
135 	return 0;
136 fail:
137 	rte_errno = E2BIG;
138 	return -rte_errno;
139 }
140 
141 static rte_node_t
142 node_clone(struct node *node, const char *name)
143 {
144 	rte_node_t rc = RTE_NODE_ID_INVALID;
145 	struct rte_node_register *reg;
146 	rte_edge_t i;
147 
148 	/* Don't allow to clone a node from a cloned node */
149 	if (node->parent_id != RTE_NODE_ID_INVALID) {
150 		rte_errno = EEXIST;
151 		goto fail;
152 	}
153 
154 	reg = calloc(1, sizeof(*reg) + (sizeof(char *) * node->nb_edges));
155 	if (reg == NULL) {
156 		rte_errno = ENOMEM;
157 		goto fail;
158 	}
159 
160 	/* Clone the source node */
161 	reg->flags = node->flags;
162 	reg->process = node->process;
163 	reg->init = node->init;
164 	reg->fini = node->fini;
165 	reg->nb_edges = node->nb_edges;
166 	reg->parent_id = node->id;
167 
168 	for (i = 0; i < node->nb_edges; i++)
169 		reg->next_nodes[i] = node->next_nodes[i];
170 
171 	/* Naming ceremony of the new node. name is node->name + "-" + name */
172 	if (clone_name(reg, node, name))
173 		goto free;
174 
175 	rc = __rte_node_register(reg);
176 free:
177 	free(reg);
178 fail:
179 	return rc;
180 }
181 
182 rte_node_t
183 rte_node_clone(rte_node_t id, const char *name)
184 {
185 	struct node *node;
186 
187 	NODE_ID_CHECK(id);
188 	STAILQ_FOREACH(node, &node_list, next)
189 		if (node->id == id)
190 			return node_clone(node, name);
191 
192 fail:
193 	return RTE_NODE_ID_INVALID;
194 }
195 
196 rte_node_t
197 rte_node_from_name(const char *name)
198 {
199 	struct node *node;
200 
201 	STAILQ_FOREACH(node, &node_list, next)
202 		if (strncmp(node->name, name, RTE_NODE_NAMESIZE) == 0)
203 			return node->id;
204 
205 	return RTE_NODE_ID_INVALID;
206 }
207 
208 char *
209 rte_node_id_to_name(rte_node_t id)
210 {
211 	struct node *node;
212 
213 	NODE_ID_CHECK(id);
214 	STAILQ_FOREACH(node, &node_list, next)
215 		if (node->id == id)
216 			return node->name;
217 
218 fail:
219 	return NULL;
220 }
221 
222 rte_edge_t
223 rte_node_edge_count(rte_node_t id)
224 {
225 	struct node *node;
226 
227 	NODE_ID_CHECK(id);
228 	STAILQ_FOREACH(node, &node_list, next)
229 		if (node->id == id)
230 			return node->nb_edges;
231 fail:
232 	return RTE_EDGE_ID_INVALID;
233 }
234 
235 static rte_edge_t
236 edge_update(struct node *node, struct node *prev, rte_edge_t from,
237 	    const char **next_nodes, rte_edge_t nb_edges)
238 {
239 	rte_edge_t i, max_edges, count = 0;
240 	struct node *new_node;
241 	bool need_realloc;
242 	size_t sz;
243 
244 	if (from == RTE_EDGE_ID_INVALID)
245 		from = node->nb_edges;
246 
247 	/* Don't create hole in next_nodes[] list */
248 	if (from > node->nb_edges) {
249 		rte_errno = ENOMEM;
250 		goto fail;
251 	}
252 
253 	/* Remove me from list */
254 	STAILQ_REMOVE(&node_list, node, node, next);
255 
256 	/* Allocate the storage space for new node if required */
257 	max_edges = from + nb_edges;
258 	need_realloc = max_edges > node->nb_edges;
259 	if (need_realloc) {
260 		sz = sizeof(struct node) + (max_edges * RTE_NODE_NAMESIZE);
261 		new_node = realloc(node, sz);
262 		if (new_node == NULL) {
263 			rte_errno = ENOMEM;
264 			goto restore;
265 		} else {
266 			node = new_node;
267 		}
268 	}
269 
270 	/* Update the new nodes name */
271 	for (i = from; i < max_edges; i++, count++) {
272 		if (rte_strscpy(node->next_nodes[i], next_nodes[count],
273 				RTE_NODE_NAMESIZE) < 0)
274 			goto restore;
275 	}
276 restore:
277 	/* Update the linked list to point new node address in prev node */
278 	if (prev)
279 		STAILQ_INSERT_AFTER(&node_list, prev, node, next);
280 	else
281 		STAILQ_INSERT_HEAD(&node_list, node, next);
282 
283 	if (need_realloc)
284 		node->nb_edges = max_edges;
285 
286 fail:
287 	return count;
288 }
289 
290 rte_edge_t
291 rte_node_edge_shrink(rte_node_t id, rte_edge_t size)
292 {
293 	rte_edge_t rc = RTE_EDGE_ID_INVALID;
294 	struct node *node;
295 
296 	NODE_ID_CHECK(id);
297 	graph_spinlock_lock();
298 
299 	STAILQ_FOREACH(node, &node_list, next) {
300 		if (node->id == id) {
301 			if (node->nb_edges < size) {
302 				rte_errno = E2BIG;
303 			} else {
304 				node->nb_edges = size;
305 				rc = size;
306 			}
307 			break;
308 		}
309 	}
310 
311 	graph_spinlock_unlock();
312 fail:
313 	return rc;
314 }
315 
316 rte_edge_t
317 rte_node_edge_update(rte_node_t id, rte_edge_t from, const char **next_nodes,
318 		     uint16_t nb_edges)
319 {
320 	rte_edge_t rc = RTE_EDGE_ID_INVALID;
321 	struct node *n, *prev;
322 
323 	NODE_ID_CHECK(id);
324 	graph_spinlock_lock();
325 
326 	prev = NULL;
327 	STAILQ_FOREACH(n, &node_list, next) {
328 		if (n->id == id) {
329 			rc = edge_update(n, prev, from, next_nodes, nb_edges);
330 			break;
331 		}
332 		prev = n;
333 	}
334 
335 	graph_spinlock_unlock();
336 fail:
337 	return rc;
338 }
339 
340 static rte_node_t
341 node_copy_edges(struct node *node, char *next_nodes[])
342 {
343 	rte_edge_t i;
344 
345 	for (i = 0; i < node->nb_edges; i++)
346 		next_nodes[i] = node->next_nodes[i];
347 
348 	return i;
349 }
350 
351 rte_node_t
352 rte_node_edge_get(rte_node_t id, char *next_nodes[])
353 {
354 	rte_node_t rc = RTE_NODE_ID_INVALID;
355 	struct node *node;
356 
357 	NODE_ID_CHECK(id);
358 	graph_spinlock_lock();
359 
360 	STAILQ_FOREACH(node, &node_list, next) {
361 		if (node->id == id) {
362 			if (next_nodes == NULL)
363 				rc = sizeof(char *) * node->nb_edges;
364 			else
365 				rc = node_copy_edges(node, next_nodes);
366 			break;
367 		}
368 	}
369 
370 	graph_spinlock_unlock();
371 fail:
372 	return rc;
373 }
374 
375 static void
376 node_scan_dump(FILE *f, rte_node_t id, bool all)
377 {
378 	struct node *node;
379 
380 	RTE_ASSERT(f != NULL);
381 	NODE_ID_CHECK(id);
382 
383 	STAILQ_FOREACH(node, &node_list, next) {
384 		if (all == true) {
385 			node_dump(f, node);
386 		} else if (node->id == id) {
387 			node_dump(f, node);
388 			return;
389 		}
390 	}
391 fail:
392 	return;
393 }
394 
395 void
396 rte_node_dump(FILE *f, rte_node_t id)
397 {
398 	node_scan_dump(f, id, false);
399 }
400 
401 void
402 rte_node_list_dump(FILE *f)
403 {
404 	node_scan_dump(f, 0, true);
405 }
406 
407 rte_node_t
408 rte_node_max_count(void)
409 {
410 	return node_id;
411 }
412