xref: /dpdk/kernel/freebsd/contigmem/contigmem.c (revision cbe57f351b4e6541eb7b50a5c0d6f7e77b45c9db)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <sys/cdefs.h>
6 __FBSDID("$FreeBSD$");
7 
8 #include <sys/param.h>
9 #include <sys/bio.h>
10 #include <sys/bus.h>
11 #include <sys/conf.h>
12 #include <sys/kernel.h>
13 #include <sys/malloc.h>
14 #include <sys/module.h>
15 #include <sys/proc.h>
16 #include <sys/lock.h>
17 #include <sys/rwlock.h>
18 #include <sys/mutex.h>
19 #include <sys/systm.h>
20 #include <sys/sysctl.h>
21 #include <sys/vmmeter.h>
22 #include <sys/eventhandler.h>
23 
24 #include <machine/bus.h>
25 
26 #include <vm/vm.h>
27 #include <vm/pmap.h>
28 #include <vm/vm_param.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_pager.h>
32 #include <vm/vm_phys.h>
33 
34 struct contigmem_buffer {
35 	void           *addr;
36 	int             refcnt;
37 	struct mtx      mtx;
38 };
39 
40 struct contigmem_vm_handle {
41 	int             buffer_index;
42 };
43 
44 static int              contigmem_load(void);
45 static int              contigmem_unload(void);
46 static int              contigmem_physaddr(SYSCTL_HANDLER_ARGS);
47 
48 static d_mmap_single_t  contigmem_mmap_single;
49 static d_open_t         contigmem_open;
50 static d_close_t        contigmem_close;
51 
52 static int              contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
53 static int64_t          contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
54 static bool             contigmem_coredump_enable;
55 
56 static eventhandler_tag contigmem_eh_tag;
57 static struct contigmem_buffer contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
58 static struct cdev     *contigmem_cdev = NULL;
59 static int              contigmem_refcnt;
60 
61 TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
62 TUNABLE_QUAD("hw.contigmem.buffer_size", &contigmem_buffer_size);
63 TUNABLE_BOOL("hw.contigmem.coredump_enable", &contigmem_coredump_enable);
64 
65 static SYSCTL_NODE(_hw, OID_AUTO, contigmem, CTLFLAG_RD, 0, "contigmem");
66 
67 SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
68 	&contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
69 SYSCTL_QUAD(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
70 	&contigmem_buffer_size, 0, "Size of each contiguous buffer");
71 SYSCTL_INT(_hw_contigmem, OID_AUTO, num_references, CTLFLAG_RD,
72 	&contigmem_refcnt, 0, "Number of references to contigmem");
73 SYSCTL_BOOL(_hw_contigmem, OID_AUTO, coredump_enable, CTLFLAG_RD,
74 	&contigmem_coredump_enable, 0, "Include mapped buffers in core dump");
75 
76 static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
77 	"physaddr");
78 
79 MALLOC_DEFINE(M_CONTIGMEM, "contigmem", "contigmem(4) allocations");
80 
81 static int contigmem_modevent(module_t mod, int type, void *arg)
82 {
83 	int error = 0;
84 
85 	switch (type) {
86 	case MOD_LOAD:
87 		error = contigmem_load();
88 		break;
89 	case MOD_UNLOAD:
90 		error = contigmem_unload();
91 		break;
92 	default:
93 		break;
94 	}
95 
96 	return error;
97 }
98 
99 moduledata_t contigmem_mod = {
100 	"contigmem",
101 	(modeventhand_t)contigmem_modevent,
102 	0
103 };
104 
105 DECLARE_MODULE(contigmem, contigmem_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
106 MODULE_VERSION(contigmem, 1);
107 
108 static struct cdevsw contigmem_ops = {
109 	.d_name         = "contigmem",
110 	.d_version      = D_VERSION,
111 	.d_flags        = D_TRACKCLOSE,
112 	.d_mmap_single  = contigmem_mmap_single,
113 	.d_open         = contigmem_open,
114 	.d_close        = contigmem_close,
115 };
116 
117 static int
118 contigmem_load(void)
119 {
120 	char index_string[8], description[32];
121 	int  i, error = 0;
122 	void *addr;
123 
124 	if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
125 		printf("%d buffers requested is greater than %d allowed\n",
126 				contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
127 		error = EINVAL;
128 		goto error;
129 	}
130 
131 	if (contigmem_buffer_size < PAGE_SIZE ||
132 			(contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
133 		printf("buffer size 0x%lx is not greater than PAGE_SIZE and "
134 				"power of two\n", contigmem_buffer_size);
135 		error = EINVAL;
136 		goto error;
137 	}
138 
139 	for (i = 0; i < contigmem_num_buffers; i++) {
140 		addr = contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO,
141 			0, BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
142 		if (addr == NULL) {
143 			printf("contigmalloc failed for buffer %d\n", i);
144 			error = ENOMEM;
145 			goto error;
146 		}
147 
148 		printf("%2u: virt=%p phys=%p\n", i, addr,
149 			(void *)pmap_kextract((vm_offset_t)addr));
150 
151 		mtx_init(&contigmem_buffers[i].mtx, "contigmem", NULL, MTX_DEF);
152 		contigmem_buffers[i].addr = addr;
153 		contigmem_buffers[i].refcnt = 0;
154 
155 		snprintf(index_string, sizeof(index_string), "%d", i);
156 		snprintf(description, sizeof(description),
157 				"phys addr for buffer %d", i);
158 		SYSCTL_ADD_PROC(NULL,
159 				&SYSCTL_NODE_CHILDREN(_hw_contigmem, physaddr), OID_AUTO,
160 				index_string, CTLTYPE_U64 | CTLFLAG_RD,
161 				(void *)(uintptr_t)i, 0, contigmem_physaddr, "LU",
162 				description);
163 	}
164 
165 	contigmem_cdev = make_dev_credf(0, &contigmem_ops, 0, NULL, UID_ROOT,
166 			GID_WHEEL, 0600, "contigmem");
167 
168 	return 0;
169 
170 error:
171 	for (i = 0; i < contigmem_num_buffers; i++) {
172 		if (contigmem_buffers[i].addr != NULL) {
173 			contigfree(contigmem_buffers[i].addr,
174 				contigmem_buffer_size, M_CONTIGMEM);
175 			contigmem_buffers[i].addr = NULL;
176 		}
177 		if (mtx_initialized(&contigmem_buffers[i].mtx))
178 			mtx_destroy(&contigmem_buffers[i].mtx);
179 	}
180 
181 	return error;
182 }
183 
184 static int
185 contigmem_unload(void)
186 {
187 	int i;
188 
189 	if (contigmem_refcnt > 0)
190 		return EBUSY;
191 
192 	if (contigmem_cdev != NULL)
193 		destroy_dev(contigmem_cdev);
194 
195 	if (contigmem_eh_tag != NULL)
196 		EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
197 
198 	for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) {
199 		if (contigmem_buffers[i].addr != NULL)
200 			contigfree(contigmem_buffers[i].addr,
201 				contigmem_buffer_size, M_CONTIGMEM);
202 		if (mtx_initialized(&contigmem_buffers[i].mtx))
203 			mtx_destroy(&contigmem_buffers[i].mtx);
204 	}
205 
206 	return 0;
207 }
208 
209 static int
210 contigmem_physaddr(SYSCTL_HANDLER_ARGS)
211 {
212 	uint64_t	physaddr;
213 	int		index = (int)(uintptr_t)arg1;
214 
215 	physaddr = (uint64_t)vtophys(contigmem_buffers[index].addr);
216 	return sysctl_handle_64(oidp, &physaddr, 0, req);
217 }
218 
219 static int
220 contigmem_open(struct cdev *cdev, int fflags, int devtype,
221 		struct thread *td)
222 {
223 
224 	atomic_add_int(&contigmem_refcnt, 1);
225 
226 	return 0;
227 }
228 
229 static int
230 contigmem_close(struct cdev *cdev, int fflags, int devtype,
231 		struct thread *td)
232 {
233 
234 	atomic_subtract_int(&contigmem_refcnt, 1);
235 
236 	return 0;
237 }
238 
239 static int
240 contigmem_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
241 		vm_ooffset_t foff, struct ucred *cred, u_short *color)
242 {
243 	struct contigmem_vm_handle *vmh = handle;
244 	struct contigmem_buffer *buf;
245 
246 	buf = &contigmem_buffers[vmh->buffer_index];
247 
248 	atomic_add_int(&contigmem_refcnt, 1);
249 
250 	mtx_lock(&buf->mtx);
251 	if (buf->refcnt == 0)
252 		memset(buf->addr, 0, contigmem_buffer_size);
253 	buf->refcnt++;
254 	mtx_unlock(&buf->mtx);
255 
256 	return 0;
257 }
258 
259 static void
260 contigmem_cdev_pager_dtor(void *handle)
261 {
262 	struct contigmem_vm_handle *vmh = handle;
263 	struct contigmem_buffer *buf;
264 
265 	buf = &contigmem_buffers[vmh->buffer_index];
266 
267 	mtx_lock(&buf->mtx);
268 	buf->refcnt--;
269 	mtx_unlock(&buf->mtx);
270 
271 	free(vmh, M_CONTIGMEM);
272 
273 	atomic_subtract_int(&contigmem_refcnt, 1);
274 }
275 
276 static int
277 contigmem_cdev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
278 		vm_page_t *mres)
279 {
280 	vm_paddr_t paddr;
281 	vm_page_t m_paddr, page;
282 	vm_memattr_t memattr, memattr1;
283 
284 	memattr = object->memattr;
285 
286 	VM_OBJECT_WUNLOCK(object);
287 
288 	paddr = offset;
289 
290 	m_paddr = vm_phys_paddr_to_vm_page(paddr);
291 	if (m_paddr != NULL) {
292 		memattr1 = pmap_page_get_memattr(m_paddr);
293 		if (memattr1 != memattr)
294 			memattr = memattr1;
295 	}
296 
297 	if (((*mres)->flags & PG_FICTITIOUS) != 0) {
298 		/*
299 		 * If the passed in result page is a fake page, update it with
300 		 * the new physical address.
301 		 */
302 		page = *mres;
303 		VM_OBJECT_WLOCK(object);
304 		vm_page_updatefake(page, paddr, memattr);
305 	} else {
306 		/*
307 		 * Replace the passed in reqpage page with our own fake page and
308 		 * free up the original page.
309 		 */
310 		page = vm_page_getfake(paddr, memattr);
311 		VM_OBJECT_WLOCK(object);
312 #if __FreeBSD__ >= 13
313 		vm_page_replace(page, object, (*mres)->pindex, *mres);
314 #else
315 		vm_page_t mret = vm_page_replace(page, object, (*mres)->pindex);
316 		KASSERT(mret == *mres,
317 		    ("invalid page replacement, old=%p, ret=%p", *mres, mret));
318 		vm_page_lock(mret);
319 		vm_page_free(mret);
320 		vm_page_unlock(mret);
321 #endif
322 		*mres = page;
323 	}
324 
325 	page->valid = VM_PAGE_BITS_ALL;
326 
327 	return VM_PAGER_OK;
328 }
329 
330 static struct cdev_pager_ops contigmem_cdev_pager_ops = {
331 	.cdev_pg_ctor = contigmem_cdev_pager_ctor,
332 	.cdev_pg_dtor = contigmem_cdev_pager_dtor,
333 	.cdev_pg_fault = contigmem_cdev_pager_fault,
334 };
335 
336 static int
337 contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
338 		struct vm_object **obj, int nprot)
339 {
340 	struct contigmem_vm_handle *vmh;
341 	uint64_t buffer_index;
342 
343 	/*
344 	 * The buffer index is encoded in the offset.  Divide the offset by
345 	 *  PAGE_SIZE to get the index of the buffer requested by the user
346 	 *  app.
347 	 */
348 	buffer_index = *offset / PAGE_SIZE;
349 	if (buffer_index >= contigmem_num_buffers)
350 		return EINVAL;
351 
352 	if (size > contigmem_buffer_size)
353 		return EINVAL;
354 
355 	vmh = malloc(sizeof(*vmh), M_CONTIGMEM, M_NOWAIT | M_ZERO);
356 	if (vmh == NULL)
357 		return ENOMEM;
358 	vmh->buffer_index = buffer_index;
359 
360 	*offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index].addr);
361 	*obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &contigmem_cdev_pager_ops,
362 			size, nprot, *offset, curthread->td_ucred);
363 
364 	/* Mappings backed by OBJ_FICTITIOUS are excluded from core dump. */
365 	if (contigmem_coredump_enable)
366 		(*obj)->flags &= ~OBJ_FICTITIOUS;
367 
368 	return 0;
369 }
370