xref: /netbsd-src/sys/arch/xen/x86/xen_bus_dma.c (revision e4ebea9efd33d7fbff602d6288b15240e56427d2)
1 /*	$NetBSD: xen_bus_dma.c,v 1.34 2024/05/14 19:00:44 andvar Exp $	*/
2 /*	NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
3 
4 /*-
5  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10  * Simulation Facility, NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.34 2024/05/14 19:00:44 andvar Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/mbuf.h>
41 #include <sys/proc.h>
42 #include <sys/bus.h>
43 
44 #include <machine/bus_private.h>
45 #include <machine/pmap_private.h>
46 
47 #include <uvm/uvm.h>
48 
49 #include "opt_xen.h"
50 
51 /* No special needs */
52 struct x86_bus_dma_tag xenbus_bus_dma_tag = {
53 	._tag_needs_free	= 0,
54 	._bounce_thresh		= 0,
55 	._bounce_alloc_lo	= 0,
56 	._bounce_alloc_hi	= 0,
57 	._may_bounce		= NULL,
58 };
59 
60 #ifdef XENPV
61 
62 extern paddr_t avail_end;
63 
64 /* Pure 2^n version of get_order */
get_order(unsigned long size)65 static inline int get_order(unsigned long size)
66 {
67 	int order = -1;
68 	size = (size - 1) >> (PAGE_SHIFT - 1);
69 	do {
70 		size >>= 1;
71 		order++;
72 	} while (size);
73 	return order;
74 }
75 
76 static int
_xen_alloc_contig(bus_size_t size,bus_size_t alignment,struct pglist * mlistp,int flags,bus_addr_t low,bus_addr_t high)77 _xen_alloc_contig(bus_size_t size, bus_size_t alignment,
78     struct pglist *mlistp, int flags, bus_addr_t low, bus_addr_t high)
79 {
80 	int order, i;
81 	unsigned long npagesreq, npages, mfn;
82 	bus_addr_t pa;
83 	struct vm_page *pg, *pgnext;
84 	int s, error;
85 	struct xen_memory_reservation res;
86 
87 	/*
88 	 * When requesting a contiguous memory region, the hypervisor will
89 	 * return a memory range aligned on size.
90 	 * The only way to enforce alignment is to request a memory region
91 	 * of size max(alignment, size).
92 	 */
93 	order = uimax(get_order(size), get_order(alignment));
94 	npages = (1 << order);
95 	npagesreq = (size >> PAGE_SHIFT);
96 	KASSERT(npages >= npagesreq);
97 
98 	/* get npages from UVM, and give them back to the hypervisor */
99 	error = uvm_pglistalloc(((psize_t)npages) << PAGE_SHIFT,
100             0, avail_end, 0, 0, mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0);
101 	if (error)
102 		return (error);
103 
104 	for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) {
105 		pa = VM_PAGE_TO_PHYS(pg);
106 		mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
107 		xpmap_ptom_unmap(pa);
108 		set_xen_guest_handle(res.extent_start, &mfn);
109 		res.nr_extents = 1;
110 		res.extent_order = 0;
111 		res.mem_flags = 0;
112 		res.domid = DOMID_SELF;
113 		error = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &res);
114 		if (error != 1) {
115 #ifdef DEBUG
116 			printf("xen_alloc_contig: XENMEM_decrease_reservation "
117 			    "failed: err %d (pa %#" PRIxPADDR " mfn %#lx)\n",
118 			    error, pa, mfn);
119 #endif
120 			xpmap_ptom_map(pa, ptoa(mfn));
121 
122 			error = ENOMEM;
123 			goto failed;
124 		}
125 	}
126 	/* Get the new contiguous memory extent */
127 	set_xen_guest_handle(res.extent_start, &mfn);
128 	res.nr_extents = 1;
129 	res.extent_order = order;
130 	res.mem_flags = XENMEMF_address_bits(get_order(high) + PAGE_SHIFT);
131 	res.domid = DOMID_SELF;
132 	error = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res);
133 	if (error != 1) {
134 #ifdef DEBUG
135 		printf("xen_alloc_contig: XENMEM_increase_reservation "
136 		    "failed: %d (order %d mem_flags %d)\n",
137 		    error, order, res.mem_flags);
138 #endif
139 		error = ENOMEM;
140 		pg = NULL;
141 		goto failed;
142 	}
143 	s = splvm(); /* XXXSMP */
144 	/* Map the new extent in place of the old pages */
145 	for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
146 		pgnext = pg->pageq.queue.tqe_next;
147 		pa = VM_PAGE_TO_PHYS(pg);
148 		xpmap_ptom_map(pa, ptoa(mfn+i));
149 		xpq_queue_machphys_update(((paddr_t)(mfn+i)) << PAGE_SHIFT, pa);
150 	}
151 	/* Flush updates through and flush the TLB */
152 	xpq_queue_tlb_flush();
153 	splx(s);
154 	/* now that ptom/mtop are valid, give the extra pages back to UVM */
155 	for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
156 		pgnext = pg->pageq.queue.tqe_next;
157 		if (i >= npagesreq) {
158 			TAILQ_REMOVE(mlistp, pg, pageq.queue);
159 			uvm_pagefree(pg);
160 		}
161 	}
162 	return 0;
163 
164 failed:
165 	/*
166 	 * Attempt to recover from a failed decrease or increase reservation:
167 	 * if decrease_reservation failed, we don't have given all pages
168 	 * back to Xen; give them back to UVM, and get the missing pages
169 	 * from Xen.
170 	 * if increase_reservation failed, we expect pg to be NULL and we just
171 	 * get back the missing pages from Xen one by one.
172 	 */
173 	/* give back remaining pages to UVM */
174 	for (; pg != NULL; pg = pgnext) {
175 		pgnext = pg->pageq.queue.tqe_next;
176 		TAILQ_REMOVE(mlistp, pg, pageq.queue);
177 		uvm_pagefree(pg);
178 	}
179 	/* remplace the pages that we already gave to Xen */
180 	s = splvm(); /* XXXSMP */
181 	for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) {
182 		pgnext = pg->pageq.queue.tqe_next;
183 		set_xen_guest_handle(res.extent_start, &mfn);
184 		res.nr_extents = 1;
185 		res.extent_order = 0;
186 		res.mem_flags = XENMEMF_address_bits(32);
187 		res.domid = DOMID_SELF;
188 		if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res)
189 		    < 0) {
190 			printf("xen_alloc_contig: recovery "
191 			    "XENMEM_increase_reservation failed!\n");
192 			break;
193 		}
194 		pa = VM_PAGE_TO_PHYS(pg);
195 		xpmap_ptom_map(pa, ptoa(mfn));
196 		xpq_queue_machphys_update(((paddr_t)mfn) << PAGE_SHIFT, pa);
197 		/* slow but we don't care */
198 		xpq_queue_tlb_flush();
199 		TAILQ_REMOVE(mlistp, pg, pageq.queue);
200 		uvm_pagefree(pg);
201 	}
202 	splx(s);
203 	return error;
204 }
205 
206 
207 /*
208  * Allocate physical memory from the given physical address range.
209  * Called by DMA-safe memory allocation methods.
210  * We need our own version to deal with physical vs machine addresses.
211  */
212 int
_xen_bus_dmamem_alloc_range(bus_dma_tag_t t,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags,bus_addr_t low,bus_addr_t high)213 _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
214     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
215     int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
216 {
217 	bus_addr_t curaddr, lastaddr;
218 	struct vm_page *m;
219 	struct pglist mlist;
220 	int curseg, error;
221 	int doingrealloc = 0;
222 	bus_size_t uboundary;
223 
224 	/* Always round the size. */
225 	size = round_page(size);
226 
227 	KASSERT((alignment & (alignment - 1)) == 0);
228 	KASSERT((boundary & (boundary - 1)) == 0);
229 	KASSERT(boundary >= PAGE_SIZE || boundary == 0);
230 
231 	if (alignment < PAGE_SIZE)
232 		alignment = PAGE_SIZE;
233 
234 	/*
235 	 * Allocate pages from the VM system.
236 	 * We accept boundaries < size, splitting in multiple segments
237 	 * if needed. uvm_pglistalloc does not, so compute an appropriate
238 	 * boundary: next power of 2 >= size
239 	 */
240 	if (boundary == 0)
241 		uboundary = 0;
242 	else {
243 		uboundary = boundary;
244 		while (uboundary < size)
245 			uboundary = uboundary << 1;
246 	}
247 	error = uvm_pglistalloc(size, 0, avail_end, alignment, uboundary,
248 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
249 	if (error)
250 		return (error);
251 again:
252 
253 	/*
254 	 * Compute the location, size, and number of segments actually
255 	 * returned by the VM code.
256 	 */
257 	m = mlist.tqh_first;
258 	curseg = 0;
259 	curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m);
260 	if (curaddr < low || curaddr >= high)
261 		goto badaddr;
262 	segs[curseg].ds_len = PAGE_SIZE;
263 	m = m->pageq.queue.tqe_next;
264 	if ((segs[curseg].ds_addr & (alignment - 1)) != 0)
265 		goto dorealloc;
266 
267 	for (; m != NULL; m = m->pageq.queue.tqe_next) {
268 		curaddr = _BUS_VM_PAGE_TO_BUS(m);
269 		if (curaddr < low || curaddr >= high)
270 			goto badaddr;
271 		if (curaddr == (lastaddr + PAGE_SIZE) &&
272 		    (lastaddr & boundary) == (curaddr & boundary)) {
273 			segs[curseg].ds_len += PAGE_SIZE;
274 		} else {
275 			curseg++;
276 			if (curseg >= nsegs ||
277 			    (curaddr & (alignment - 1)) != 0) {
278 				if (doingrealloc)
279 					return EFBIG;
280 				else
281 					goto dorealloc;
282 			}
283 			segs[curseg].ds_addr = curaddr;
284 			segs[curseg].ds_len = PAGE_SIZE;
285 		}
286 		lastaddr = curaddr;
287 	}
288 
289 	*rsegs = curseg + 1;
290 	return (0);
291 
292 badaddr:
293 	if (doingrealloc == 0)
294 		goto dorealloc;
295 	if (curaddr < low) {
296 		/* no way to enforce this */
297 		printf("_xen_bus_dmamem_alloc_range: no way to "
298 		    "enforce address range (0x%" PRIx64 " - 0x%" PRIx64 ")\n",
299 		    (uint64_t)low, (uint64_t)high);
300 		uvm_pglistfree(&mlist);
301 		return EINVAL;
302 	}
303 	printf("xen_bus_dmamem_alloc_range: "
304 	    "curraddr=0x%lx > high=0x%lx\n",
305 	    (u_long)curaddr, (u_long)high);
306 	panic("xen_bus_dmamem_alloc_range 1");
307 dorealloc:
308 	if (doingrealloc == 1)
309 		panic("_xen_bus_dmamem_alloc_range: "
310 		   "xen_alloc_contig returned "
311 		   "too much segments");
312 	doingrealloc = 1;
313 	/*
314 	 * Too much segments, or memory doesn't fit
315 	 * constraints. Free this memory and
316 	 * get a contiguous segment from the hypervisor.
317 	 */
318 	uvm_pglistfree(&mlist);
319 	for (curseg = 0; curseg < nsegs; curseg++) {
320 		segs[curseg].ds_addr = 0;
321 		segs[curseg].ds_len = 0;
322 	}
323 	error = _xen_alloc_contig(size, alignment,
324 	    &mlist, flags, low, high);
325 	if (error)
326 		return error;
327 	goto again;
328 }
329 #endif /* XENPV */
330