1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include <stdio.h> 6 #include <rte_mempool.h> 7 #include <rte_malloc.h> 8 #include <rte_mbuf.h> 9 10 #include "octeontx_fpavf.h" 11 12 static int 13 octeontx_fpavf_alloc(struct rte_mempool *mp) 14 { 15 uintptr_t pool; 16 uint32_t memseg_count = mp->size; 17 uint32_t object_size; 18 int rc = 0; 19 20 object_size = mp->elt_size + mp->header_size + mp->trailer_size; 21 22 pool = octeontx_fpa_bufpool_create(object_size, memseg_count, 23 OCTEONTX_FPAVF_BUF_OFFSET, 24 mp->socket_id); 25 rc = octeontx_fpa_bufpool_block_size(pool); 26 if (rc < 0) 27 goto _end; 28 29 if ((uint32_t)rc != object_size) 30 fpavf_log_err("buffer size mismatch: %d instead of %u\n", 31 rc, object_size); 32 33 fpavf_log_info("Pool created %p with .. ", (void *)pool); 34 fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count); 35 36 /* assign pool handle to mempool */ 37 mp->pool_id = (uint64_t)pool; 38 39 return 0; 40 41 _end: 42 return rc; 43 } 44 45 static void 46 octeontx_fpavf_free(struct rte_mempool *mp) 47 { 48 uintptr_t pool; 49 pool = (uintptr_t)mp->pool_id; 50 51 octeontx_fpa_bufpool_destroy(pool, mp->socket_id); 52 } 53 54 static __rte_always_inline void * 55 octeontx_fpa_bufpool_alloc(uintptr_t handle) 56 { 57 return (void *)(uintptr_t)fpavf_read64((void *)(handle + 58 FPA_VF_VHAURA_OP_ALLOC(0))); 59 } 60 61 static __rte_always_inline void 62 octeontx_fpa_bufpool_free(uintptr_t handle, void *buf) 63 { 64 uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0), 65 0 /* DWB */, 1 /* FABS */); 66 67 fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr)); 68 } 69 70 static int 71 octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table, 72 unsigned int n) 73 { 74 uintptr_t pool; 75 unsigned int index; 76 77 pool = (uintptr_t)mp->pool_id; 78 /* Get pool bar address from handle */ 79 pool &= ~(uint64_t)FPA_GPOOL_MASK; 80 for (index = 0; index < n; index++, obj_table++) 81 octeontx_fpa_bufpool_free(pool, *obj_table); 82 83 return 0; 84 } 85 86 static int 87 octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table, 88 unsigned int n) 89 { 90 unsigned int index; 91 uintptr_t pool; 92 void *obj; 93 94 pool = (uintptr_t)mp->pool_id; 95 /* Get pool bar address from handle */ 96 pool &= ~(uint64_t)FPA_GPOOL_MASK; 97 for (index = 0; index < n; index++, obj_table++) { 98 obj = octeontx_fpa_bufpool_alloc(pool); 99 if (obj == NULL) { 100 /* 101 * Failed to allocate the requested number of objects 102 * from the pool. Current pool implementation requires 103 * completing the entire request or returning error 104 * otherwise. 105 * Free already allocated buffers to the pool. 106 */ 107 for (; index > 0; index--) { 108 obj_table--; 109 octeontx_fpa_bufpool_free(pool, *obj_table); 110 } 111 return -ENOMEM; 112 } 113 *obj_table = obj; 114 } 115 116 return 0; 117 } 118 119 static unsigned int 120 octeontx_fpavf_get_count(const struct rte_mempool *mp) 121 { 122 uintptr_t pool; 123 124 pool = (uintptr_t)mp->pool_id; 125 126 return octeontx_fpa_bufpool_free_count(pool); 127 } 128 129 static ssize_t 130 octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp, 131 uint32_t obj_num, uint32_t pg_shift, 132 size_t *min_chunk_size, size_t *align) 133 { 134 ssize_t mem_size; 135 136 /* 137 * Simply need space for one more object to be able to 138 * fulfil alignment requirements. 139 */ 140 mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1, 141 pg_shift, 142 min_chunk_size, align); 143 if (mem_size >= 0) { 144 /* 145 * Memory area which contains objects must be physically 146 * contiguous. 147 */ 148 *min_chunk_size = mem_size; 149 } 150 151 return mem_size; 152 } 153 154 static int 155 octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs, 156 void *vaddr, rte_iova_t iova, size_t len, 157 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg) 158 { 159 size_t total_elt_sz; 160 size_t off; 161 uint8_t gpool; 162 uintptr_t pool_bar; 163 int ret; 164 165 if (iova == RTE_BAD_IOVA) 166 return -EINVAL; 167 168 total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size; 169 170 /* align object start address to a multiple of total_elt_sz */ 171 off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz); 172 173 if (len < off) 174 return -EINVAL; 175 176 vaddr = (char *)vaddr + off; 177 iova += off; 178 len -= off; 179 180 gpool = octeontx_fpa_bufpool_gpool(mp->pool_id); 181 pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK; 182 183 ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool); 184 if (ret < 0) 185 return ret; 186 187 return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len, 188 obj_cb, obj_cb_arg); 189 } 190 191 static struct rte_mempool_ops octeontx_fpavf_ops = { 192 .name = "octeontx_fpavf", 193 .alloc = octeontx_fpavf_alloc, 194 .free = octeontx_fpavf_free, 195 .enqueue = octeontx_fpavf_enqueue, 196 .dequeue = octeontx_fpavf_dequeue, 197 .get_count = octeontx_fpavf_get_count, 198 .calc_mem_size = octeontx_fpavf_calc_mem_size, 199 .populate = octeontx_fpavf_populate, 200 }; 201 202 MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops); 203