xref: /dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c (revision f26ab687a74fc86455e0c98371553a53bf68c76e)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) 2017 Cavium Inc. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 #include <stdio.h>
33 #include <rte_mempool.h>
34 #include <rte_malloc.h>
35 #include <rte_mbuf.h>
36 
37 #include "octeontx_fpavf.h"
38 
39 /*
40  * Per-pool descriptor.
41  * Links mempool with the corresponding memzone,
42  * that provides memory under the pool's elements.
43  */
44 struct octeontx_pool_info {
45 	const struct rte_mempool *mp;
46 	uintptr_t mz_addr;
47 
48 	SLIST_ENTRY(octeontx_pool_info) link;
49 };
50 
51 SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
52 
53 /* List of the allocated pools */
54 static struct octeontx_pool_list octeontx_pool_head =
55 				SLIST_HEAD_INITIALIZER(octeontx_pool_head);
56 /* Spinlock to protect pool list */
57 static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
58 
59 static int
60 octeontx_fpavf_alloc(struct rte_mempool *mp)
61 {
62 	uintptr_t pool;
63 	struct octeontx_pool_info *pool_info;
64 	uint32_t memseg_count = mp->size;
65 	uint32_t object_size;
66 	uintptr_t va_start;
67 	int rc = 0;
68 
69 	rte_spinlock_lock(&pool_list_lock);
70 	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
71 		if (pool_info->mp == mp)
72 			break;
73 	}
74 	if (pool_info == NULL) {
75 		rte_spinlock_unlock(&pool_list_lock);
76 		return -ENXIO;
77 	}
78 
79 	/* virtual hugepage mapped addr */
80 	va_start = pool_info->mz_addr;
81 	rte_spinlock_unlock(&pool_list_lock);
82 
83 	object_size = mp->elt_size + mp->header_size + mp->trailer_size;
84 
85 	pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
86 						OCTEONTX_FPAVF_BUF_OFFSET,
87 						(char **)&va_start,
88 						mp->socket_id);
89 	rc = octeontx_fpa_bufpool_block_size(pool);
90 	if (rc < 0)
91 		goto _end;
92 
93 	if ((uint32_t)rc != object_size)
94 		fpavf_log_err("buffer size mismatch: %d instead of %u\n",
95 				rc, object_size);
96 
97 	fpavf_log_info("Pool created %p with .. ", (void *)pool);
98 	fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count);
99 
100 	/* assign pool handle to mempool */
101 	mp->pool_id = (uint64_t)pool;
102 
103 	return 0;
104 
105 _end:
106 	return rc;
107 }
108 
109 static void
110 octeontx_fpavf_free(struct rte_mempool *mp)
111 {
112 	struct octeontx_pool_info *pool_info;
113 	uintptr_t pool;
114 
115 	pool = (uintptr_t)mp->pool_id;
116 
117 	rte_spinlock_lock(&pool_list_lock);
118 	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
119 		if (pool_info->mp == mp)
120 			break;
121 	}
122 
123 	if (pool_info == NULL) {
124 		rte_spinlock_unlock(&pool_list_lock);
125 		rte_panic("%s: trying to free pool with no valid metadata",
126 		    __func__);
127 	}
128 
129 	SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
130 	rte_spinlock_unlock(&pool_list_lock);
131 
132 	rte_free(pool_info);
133 	octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
134 }
135 
136 static __rte_always_inline void *
137 octeontx_fpa_bufpool_alloc(uintptr_t handle)
138 {
139 	return (void *)(uintptr_t)fpavf_read64((void *)(handle +
140 						FPA_VF_VHAURA_OP_ALLOC(0)));
141 }
142 
143 static __rte_always_inline void
144 octeontx_fpa_bufpool_free(uintptr_t handle, void *buf)
145 {
146 	uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0),
147 						 0 /* DWB */, 1 /* FABS */);
148 
149 	fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr));
150 }
151 
152 static int
153 octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table,
154 			unsigned int n)
155 {
156 	uintptr_t pool;
157 	unsigned int index;
158 
159 	pool = (uintptr_t)mp->pool_id;
160 	/* Get pool bar address from handle */
161 	pool &= ~(uint64_t)FPA_GPOOL_MASK;
162 	for (index = 0; index < n; index++, obj_table++)
163 		octeontx_fpa_bufpool_free(pool, *obj_table);
164 
165 	return 0;
166 }
167 
168 static int
169 octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table,
170 			unsigned int n)
171 {
172 	unsigned int index;
173 	uintptr_t pool;
174 	void *obj;
175 
176 	pool = (uintptr_t)mp->pool_id;
177 	/* Get pool bar address from handle */
178 	pool &= ~(uint64_t)FPA_GPOOL_MASK;
179 	for (index = 0; index < n; index++, obj_table++) {
180 		obj = octeontx_fpa_bufpool_alloc(pool);
181 		if (obj == NULL) {
182 			/*
183 			 * Failed to allocate the requested number of objects
184 			 * from the pool. Current pool implementation requires
185 			 * completing the entire request or returning error
186 			 * otherwise.
187 			 * Free already allocated buffers to the pool.
188 			 */
189 			for (; index > 0; index--) {
190 				obj_table--;
191 				octeontx_fpa_bufpool_free(pool, *obj_table);
192 			}
193 			return -ENOMEM;
194 		}
195 		*obj_table = obj;
196 	}
197 
198 	return 0;
199 }
200 
201 static unsigned int
202 octeontx_fpavf_get_count(const struct rte_mempool *mp)
203 {
204 	uintptr_t pool;
205 
206 	pool = (uintptr_t)mp->pool_id;
207 
208 	return octeontx_fpa_bufpool_free_count(pool);
209 }
210 
211 static int
212 octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
213 				unsigned int *flags)
214 {
215 	RTE_SET_USED(mp);
216 	*flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
217 			MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
218 	return 0;
219 }
220 
221 static int
222 octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
223 				    char *vaddr, phys_addr_t paddr, size_t len)
224 {
225 	struct octeontx_pool_info *pool_info;
226 
227 	RTE_SET_USED(paddr);
228 	RTE_SET_USED(len);
229 
230 	pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
231 	if (pool_info == NULL)
232 		return -ENOMEM;
233 
234 	pool_info->mp = mp;
235 	pool_info->mz_addr = (uintptr_t)vaddr;
236 	rte_spinlock_lock(&pool_list_lock);
237 	SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
238 	rte_spinlock_unlock(&pool_list_lock);
239 	return 0;
240 }
241 
242 static struct rte_mempool_ops octeontx_fpavf_ops = {
243 	.name = "octeontx_fpavf",
244 	.alloc = octeontx_fpavf_alloc,
245 	.free = octeontx_fpavf_free,
246 	.enqueue = octeontx_fpavf_enqueue,
247 	.dequeue = octeontx_fpavf_dequeue,
248 	.get_count = octeontx_fpavf_get_count,
249 	.get_capabilities = octeontx_fpavf_get_capabilities,
250 	.register_memory_area = octeontx_fpavf_register_memory_area,
251 };
252 
253 MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
254