1*1263b426SShreyansh Jain /*- 2*1263b426SShreyansh Jain * BSD LICENSE 3*1263b426SShreyansh Jain * 4*1263b426SShreyansh Jain * Copyright(c) 2016 Intel Corporation. All rights reserved. 5*1263b426SShreyansh Jain * All rights reserved. 6*1263b426SShreyansh Jain * 7*1263b426SShreyansh Jain * Redistribution and use in source and binary forms, with or without 8*1263b426SShreyansh Jain * modification, are permitted provided that the following conditions 9*1263b426SShreyansh Jain * are met: 10*1263b426SShreyansh Jain * 11*1263b426SShreyansh Jain * * Redistributions of source code must retain the above copyright 12*1263b426SShreyansh Jain * notice, this list of conditions and the following disclaimer. 13*1263b426SShreyansh Jain * * Redistributions in binary form must reproduce the above copyright 14*1263b426SShreyansh Jain * notice, this list of conditions and the following disclaimer in 15*1263b426SShreyansh Jain * the documentation and/or other materials provided with the 16*1263b426SShreyansh Jain * distribution. 17*1263b426SShreyansh Jain * * Neither the name of Intel Corporation nor the names of its 18*1263b426SShreyansh Jain * contributors may be used to endorse or promote products derived 19*1263b426SShreyansh Jain * from this software without specific prior written permission. 20*1263b426SShreyansh Jain * 21*1263b426SShreyansh Jain * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22*1263b426SShreyansh Jain * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23*1263b426SShreyansh Jain * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24*1263b426SShreyansh Jain * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25*1263b426SShreyansh Jain * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26*1263b426SShreyansh Jain * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27*1263b426SShreyansh Jain * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28*1263b426SShreyansh Jain * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29*1263b426SShreyansh Jain * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30*1263b426SShreyansh Jain * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31*1263b426SShreyansh Jain * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32*1263b426SShreyansh Jain */ 33*1263b426SShreyansh Jain 34*1263b426SShreyansh Jain #include <stdio.h> 35*1263b426SShreyansh Jain #include <rte_mempool.h> 36*1263b426SShreyansh Jain #include <rte_malloc.h> 37*1263b426SShreyansh Jain 38*1263b426SShreyansh Jain struct rte_mempool_stack { 39*1263b426SShreyansh Jain rte_spinlock_t sl; 40*1263b426SShreyansh Jain 41*1263b426SShreyansh Jain uint32_t size; 42*1263b426SShreyansh Jain uint32_t len; 43*1263b426SShreyansh Jain void *objs[]; 44*1263b426SShreyansh Jain }; 45*1263b426SShreyansh Jain 46*1263b426SShreyansh Jain static int 47*1263b426SShreyansh Jain stack_alloc(struct rte_mempool *mp) 48*1263b426SShreyansh Jain { 49*1263b426SShreyansh Jain struct rte_mempool_stack *s; 50*1263b426SShreyansh Jain unsigned n = mp->size; 51*1263b426SShreyansh Jain int size = sizeof(*s) + (n+16)*sizeof(void *); 52*1263b426SShreyansh Jain 53*1263b426SShreyansh Jain /* Allocate our local memory structure */ 54*1263b426SShreyansh Jain s = rte_zmalloc_socket("mempool-stack", 55*1263b426SShreyansh Jain size, 56*1263b426SShreyansh Jain RTE_CACHE_LINE_SIZE, 57*1263b426SShreyansh Jain mp->socket_id); 58*1263b426SShreyansh Jain if (s == NULL) { 59*1263b426SShreyansh Jain RTE_LOG(ERR, MEMPOOL, "Cannot allocate stack!\n"); 60*1263b426SShreyansh Jain return -ENOMEM; 61*1263b426SShreyansh Jain } 62*1263b426SShreyansh Jain 63*1263b426SShreyansh Jain rte_spinlock_init(&s->sl); 64*1263b426SShreyansh Jain 65*1263b426SShreyansh Jain s->size = n; 66*1263b426SShreyansh Jain mp->pool_data = s; 67*1263b426SShreyansh Jain 68*1263b426SShreyansh Jain return 0; 69*1263b426SShreyansh Jain } 70*1263b426SShreyansh Jain 71*1263b426SShreyansh Jain static int 72*1263b426SShreyansh Jain stack_enqueue(struct rte_mempool *mp, void * const *obj_table, 73*1263b426SShreyansh Jain unsigned n) 74*1263b426SShreyansh Jain { 75*1263b426SShreyansh Jain struct rte_mempool_stack *s = mp->pool_data; 76*1263b426SShreyansh Jain void **cache_objs; 77*1263b426SShreyansh Jain unsigned index; 78*1263b426SShreyansh Jain 79*1263b426SShreyansh Jain rte_spinlock_lock(&s->sl); 80*1263b426SShreyansh Jain cache_objs = &s->objs[s->len]; 81*1263b426SShreyansh Jain 82*1263b426SShreyansh Jain /* Is there sufficient space in the stack ? */ 83*1263b426SShreyansh Jain if ((s->len + n) > s->size) { 84*1263b426SShreyansh Jain rte_spinlock_unlock(&s->sl); 85*1263b426SShreyansh Jain return -ENOBUFS; 86*1263b426SShreyansh Jain } 87*1263b426SShreyansh Jain 88*1263b426SShreyansh Jain /* Add elements back into the cache */ 89*1263b426SShreyansh Jain for (index = 0; index < n; ++index, obj_table++) 90*1263b426SShreyansh Jain cache_objs[index] = *obj_table; 91*1263b426SShreyansh Jain 92*1263b426SShreyansh Jain s->len += n; 93*1263b426SShreyansh Jain 94*1263b426SShreyansh Jain rte_spinlock_unlock(&s->sl); 95*1263b426SShreyansh Jain return 0; 96*1263b426SShreyansh Jain } 97*1263b426SShreyansh Jain 98*1263b426SShreyansh Jain static int 99*1263b426SShreyansh Jain stack_dequeue(struct rte_mempool *mp, void **obj_table, 100*1263b426SShreyansh Jain unsigned n) 101*1263b426SShreyansh Jain { 102*1263b426SShreyansh Jain struct rte_mempool_stack *s = mp->pool_data; 103*1263b426SShreyansh Jain void **cache_objs; 104*1263b426SShreyansh Jain unsigned index, len; 105*1263b426SShreyansh Jain 106*1263b426SShreyansh Jain rte_spinlock_lock(&s->sl); 107*1263b426SShreyansh Jain 108*1263b426SShreyansh Jain if (unlikely(n > s->len)) { 109*1263b426SShreyansh Jain rte_spinlock_unlock(&s->sl); 110*1263b426SShreyansh Jain return -ENOENT; 111*1263b426SShreyansh Jain } 112*1263b426SShreyansh Jain 113*1263b426SShreyansh Jain cache_objs = s->objs; 114*1263b426SShreyansh Jain 115*1263b426SShreyansh Jain for (index = 0, len = s->len - 1; index < n; 116*1263b426SShreyansh Jain ++index, len--, obj_table++) 117*1263b426SShreyansh Jain *obj_table = cache_objs[len]; 118*1263b426SShreyansh Jain 119*1263b426SShreyansh Jain s->len -= n; 120*1263b426SShreyansh Jain rte_spinlock_unlock(&s->sl); 121*1263b426SShreyansh Jain return 0; 122*1263b426SShreyansh Jain } 123*1263b426SShreyansh Jain 124*1263b426SShreyansh Jain static unsigned 125*1263b426SShreyansh Jain stack_get_count(const struct rte_mempool *mp) 126*1263b426SShreyansh Jain { 127*1263b426SShreyansh Jain struct rte_mempool_stack *s = mp->pool_data; 128*1263b426SShreyansh Jain 129*1263b426SShreyansh Jain return s->len; 130*1263b426SShreyansh Jain } 131*1263b426SShreyansh Jain 132*1263b426SShreyansh Jain static void 133*1263b426SShreyansh Jain stack_free(struct rte_mempool *mp) 134*1263b426SShreyansh Jain { 135*1263b426SShreyansh Jain rte_free((void *)(mp->pool_data)); 136*1263b426SShreyansh Jain } 137*1263b426SShreyansh Jain 138*1263b426SShreyansh Jain static struct rte_mempool_ops ops_stack = { 139*1263b426SShreyansh Jain .name = "stack", 140*1263b426SShreyansh Jain .alloc = stack_alloc, 141*1263b426SShreyansh Jain .free = stack_free, 142*1263b426SShreyansh Jain .enqueue = stack_enqueue, 143*1263b426SShreyansh Jain .dequeue = stack_dequeue, 144*1263b426SShreyansh Jain .get_count = stack_get_count 145*1263b426SShreyansh Jain }; 146*1263b426SShreyansh Jain 147*1263b426SShreyansh Jain MEMPOOL_REGISTER_OPS(ops_stack); 148