xref: /dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <stdio.h>
6 #include <rte_mempool.h>
7 #include <rte_malloc.h>
8 #include <rte_mbuf.h>
9 
10 #include "octeontx_fpavf.h"
11 
12 static int
13 octeontx_fpavf_alloc(struct rte_mempool *mp)
14 {
15 	uintptr_t pool;
16 	uint32_t memseg_count = mp->size;
17 	uint32_t object_size;
18 	int rc = 0;
19 
20 	object_size = mp->elt_size + mp->header_size + mp->trailer_size;
21 
22 	pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
23 						OCTEONTX_FPAVF_BUF_OFFSET,
24 						mp->socket_id);
25 	rc = octeontx_fpa_bufpool_block_size(pool);
26 	if (rc < 0)
27 		goto _end;
28 
29 	if ((uint32_t)rc != object_size)
30 		fpavf_log_err("buffer size mismatch: %d instead of %u",
31 				rc, object_size);
32 
33 	fpavf_log_info("Pool created %p with .. obj_sz %d, cnt %d",
34 		(void *)pool, object_size, memseg_count);
35 
36 	/* assign pool handle to mempool */
37 	mp->pool_id = (uint64_t)pool;
38 
39 	return 0;
40 
41 _end:
42 	return rc;
43 }
44 
45 static void
46 octeontx_fpavf_free(struct rte_mempool *mp)
47 {
48 	uintptr_t pool;
49 	pool = (uintptr_t)mp->pool_id;
50 
51 	octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
52 }
53 
54 static __rte_always_inline void *
55 octeontx_fpa_bufpool_alloc(uintptr_t handle)
56 {
57 	return (void *)(uintptr_t)fpavf_read64((void *)(handle +
58 						FPA_VF_VHAURA_OP_ALLOC(0)));
59 }
60 
61 static __rte_always_inline void
62 octeontx_fpa_bufpool_free(uintptr_t handle, void *buf)
63 {
64 	uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0),
65 						 0 /* DWB */, 1 /* FABS */);
66 
67 	fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr));
68 }
69 
70 static int
71 octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table,
72 			unsigned int n)
73 {
74 	uintptr_t pool;
75 	unsigned int index;
76 
77 	pool = (uintptr_t)mp->pool_id;
78 	/* Get pool bar address from handle */
79 	pool &= ~(uint64_t)FPA_GPOOL_MASK;
80 	for (index = 0; index < n; index++, obj_table++)
81 		octeontx_fpa_bufpool_free(pool, *obj_table);
82 
83 	return 0;
84 }
85 
86 static int
87 octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table,
88 			unsigned int n)
89 {
90 	unsigned int index;
91 	uintptr_t pool;
92 	void *obj;
93 
94 	pool = (uintptr_t)mp->pool_id;
95 	/* Get pool bar address from handle */
96 	pool &= ~(uint64_t)FPA_GPOOL_MASK;
97 	for (index = 0; index < n; index++, obj_table++) {
98 		obj = octeontx_fpa_bufpool_alloc(pool);
99 		if (obj == NULL) {
100 			/*
101 			 * Failed to allocate the requested number of objects
102 			 * from the pool. Current pool implementation requires
103 			 * completing the entire request or returning error
104 			 * otherwise.
105 			 * Free already allocated buffers to the pool.
106 			 */
107 			for (; index > 0; index--) {
108 				obj_table--;
109 				octeontx_fpa_bufpool_free(pool, *obj_table);
110 			}
111 			return -ENOMEM;
112 		}
113 		*obj_table = obj;
114 	}
115 
116 	return 0;
117 }
118 
119 static unsigned int
120 octeontx_fpavf_get_count(const struct rte_mempool *mp)
121 {
122 	uintptr_t pool;
123 
124 	pool = (uintptr_t)mp->pool_id;
125 
126 	return octeontx_fpa_bufpool_free_count(pool);
127 }
128 
129 static ssize_t
130 octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
131 			     uint32_t obj_num, uint32_t pg_shift,
132 			     size_t *min_chunk_size, size_t *align)
133 {
134 	ssize_t mem_size;
135 	size_t total_elt_sz;
136 
137 	/* Need space for one more obj on each chunk to fulfill
138 	 * alignment requirements.
139 	 */
140 	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
141 	mem_size = rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift,
142 						total_elt_sz, min_chunk_size,
143 						align);
144 	if (mem_size >= 0) {
145 		/*
146 		 * Memory area which contains objects must be physically
147 		 * contiguous.
148 		 */
149 		*min_chunk_size = mem_size;
150 	}
151 
152 	return mem_size;
153 }
154 
155 static int
156 octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
157 			void *vaddr, rte_iova_t iova, size_t len,
158 			rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
159 {
160 	size_t total_elt_sz;
161 	size_t off;
162 	uint8_t gpool;
163 	uintptr_t pool_bar;
164 	int ret;
165 
166 	if (iova == RTE_BAD_IOVA)
167 		return -EINVAL;
168 
169 	total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
170 
171 	/* align object start address to a multiple of total_elt_sz */
172 	off = total_elt_sz - ((((uintptr_t)vaddr - 1) % total_elt_sz) + 1);
173 
174 	if (len < off)
175 		return -EINVAL;
176 
177 	vaddr = (char *)vaddr + off;
178 	iova += off;
179 	len -= off;
180 
181 	gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
182 	pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
183 
184 	ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
185 	if (ret < 0)
186 		return ret;
187 
188 	return rte_mempool_op_populate_helper(mp,
189 					RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ,
190 					max_objs, vaddr, iova, len,
191 					obj_cb, obj_cb_arg);
192 }
193 
194 static struct rte_mempool_ops octeontx_fpavf_ops = {
195 	.name = "octeontx_fpavf",
196 	.alloc = octeontx_fpavf_alloc,
197 	.free = octeontx_fpavf_free,
198 	.enqueue = octeontx_fpavf_enqueue,
199 	.dequeue = octeontx_fpavf_dequeue,
200 	.get_count = octeontx_fpavf_get_count,
201 	.calc_mem_size = octeontx_fpavf_calc_mem_size,
202 	.populate = octeontx_fpavf_populate,
203 };
204 
205 RTE_MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
206