xref: /dpdk/lib/stack/rte_stack_std.h (revision 99a2dd955fba6e4cc23b77d590a033650ced9c45)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #ifndef _RTE_STACK_STD_H_
6 #define _RTE_STACK_STD_H_
7 
8 #include <rte_branch_prediction.h>
9 
10 /**
11  * @internal Push several objects on the stack (MT-safe).
12  *
13  * @param s
14  *   A pointer to the stack structure.
15  * @param obj_table
16  *   A pointer to a table of void * pointers (objects).
17  * @param n
18  *   The number of objects to push on the stack from the obj_table.
19  * @return
20  *   Actual number of objects pushed (either 0 or *n*).
21  */
22 static __rte_always_inline unsigned int
__rte_stack_std_push(struct rte_stack * s,void * const * obj_table,unsigned int n)23 __rte_stack_std_push(struct rte_stack *s, void * const *obj_table,
24 		     unsigned int n)
25 {
26 	struct rte_stack_std *stack = &s->stack_std;
27 	unsigned int index;
28 	void **cache_objs;
29 
30 	rte_spinlock_lock(&stack->lock);
31 	cache_objs = &stack->objs[stack->len];
32 
33 	/* Is there sufficient space in the stack? */
34 	if ((stack->len + n) > s->capacity) {
35 		rte_spinlock_unlock(&stack->lock);
36 		return 0;
37 	}
38 
39 	/* Add elements back into the cache */
40 	for (index = 0; index < n; ++index, obj_table++)
41 		cache_objs[index] = *obj_table;
42 
43 	stack->len += n;
44 
45 	rte_spinlock_unlock(&stack->lock);
46 	return n;
47 }
48 
49 /**
50  * @internal Pop several objects from the stack (MT-safe).
51  *
52  * @param s
53  *   A pointer to the stack structure.
54  * @param obj_table
55  *   A pointer to a table of void * pointers (objects).
56  * @param n
57  *   The number of objects to pull from the stack.
58  * @return
59  *   Actual number of objects popped (either 0 or *n*).
60  */
61 static __rte_always_inline unsigned int
__rte_stack_std_pop(struct rte_stack * s,void ** obj_table,unsigned int n)62 __rte_stack_std_pop(struct rte_stack *s, void **obj_table, unsigned int n)
63 {
64 	struct rte_stack_std *stack = &s->stack_std;
65 	unsigned int index, len;
66 	void **cache_objs;
67 
68 	rte_spinlock_lock(&stack->lock);
69 
70 	if (unlikely(n > stack->len)) {
71 		rte_spinlock_unlock(&stack->lock);
72 		return 0;
73 	}
74 
75 	cache_objs = stack->objs;
76 
77 	for (index = 0, len = stack->len - 1; index < n;
78 			++index, len--, obj_table++)
79 		*obj_table = cache_objs[len];
80 
81 	stack->len -= n;
82 	rte_spinlock_unlock(&stack->lock);
83 
84 	return n;
85 }
86 
87 /**
88  * @internal Return the number of used entries in a stack.
89  *
90  * @param s
91  *   A pointer to the stack structure.
92  * @return
93  *   The number of used entries in the stack.
94  */
95 static __rte_always_inline unsigned int
__rte_stack_std_count(struct rte_stack * s)96 __rte_stack_std_count(struct rte_stack *s)
97 {
98 	return (unsigned int)s->stack_std.len;
99 }
100 
101 /**
102  * @internal Initialize a standard stack.
103  *
104  * @param s
105  *   A pointer to the stack structure.
106  */
107 void
108 rte_stack_std_init(struct rte_stack *s);
109 
110 /**
111  * @internal Return the memory required for a standard stack.
112  *
113  * @param count
114  *   The size of the stack.
115  * @return
116  *   The bytes to allocate for a standard stack.
117  */
118 ssize_t
119 rte_stack_std_get_memsize(unsigned int count);
120 
121 #endif /* _RTE_STACK_STD_H_ */
122