xref: /netbsd-src/sys/arch/xen/x86/xen_bus_dma.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: xen_bus_dma.c,v 1.27 2018/06/24 20:28:57 jdolecek Exp $	*/
2 /*	NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */
3 
4 /*-
5  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10  * Simulation Facility, NASA Ames Research Center.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.27 2018/06/24 20:28:57 jdolecek Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/mbuf.h>
41 #include <sys/proc.h>
42 
43 #include <sys/bus.h>
44 #include <machine/bus_private.h>
45 
46 #include <uvm/uvm.h>
47 
48 extern paddr_t avail_end;
49 
50 /* Pure 2^n version of get_order */
51 static inline int get_order(unsigned long size)
52 {
53 	int order = -1;
54 	size = (size - 1) >> (PAGE_SHIFT - 1);
55 	do {
56 		size >>= 1;
57 		order++;
58 	} while (size);
59 	return order;
60 }
61 
62 static int
63 _xen_alloc_contig(bus_size_t size, bus_size_t alignment,
64     struct pglist *mlistp, int flags, bus_addr_t low, bus_addr_t high)
65 {
66 	int order, i;
67 	unsigned long npagesreq, npages, mfn;
68 	bus_addr_t pa;
69 	struct vm_page *pg, *pgnext;
70 	int s, error;
71 	struct xen_memory_reservation res;
72 
73 	/*
74 	 * When requesting a contigous memory region, the hypervisor will
75 	 * return a memory range aligned on size.
76 	 * The only way to enforce alignment is to request a memory region
77 	 * of size max(alignment, size).
78 	 */
79 	order = max(get_order(size), get_order(alignment));
80 	npages = (1 << order);
81 	npagesreq = (size >> PAGE_SHIFT);
82 	KASSERT(npages >= npagesreq);
83 
84 	/* get npages from UVM, and give them back to the hypervisor */
85 	error = uvm_pglistalloc(((psize_t)npages) << PAGE_SHIFT,
86             0, avail_end, 0, 0, mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0);
87 	if (error)
88 		return (error);
89 
90 	for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) {
91 		pa = VM_PAGE_TO_PHYS(pg);
92 		mfn = xpmap_ptom(pa) >> PAGE_SHIFT;
93 		xpmap_ptom_unmap(pa);
94 		set_xen_guest_handle(res.extent_start, &mfn);
95 		res.nr_extents = 1;
96 		res.extent_order = 0;
97 		res.address_bits = 0;
98 		res.domid = DOMID_SELF;
99 		error = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &res);
100 		if (error != 1) {
101 #ifdef DEBUG
102 			printf("xen_alloc_contig: XENMEM_decrease_reservation "
103 			    "failed: err %d (pa %#" PRIxPADDR " mfn %#lx)\n",
104 			    error, pa, mfn);
105 #endif
106 			xpmap_ptom_map(pa, ptoa(mfn));
107 
108 			error = ENOMEM;
109 			goto failed;
110 		}
111 	}
112 	/* Get the new contiguous memory extent */
113 	set_xen_guest_handle(res.extent_start, &mfn);
114 	res.nr_extents = 1;
115 	res.extent_order = order;
116 	res.address_bits = get_order(high) + PAGE_SHIFT;
117 	res.domid = DOMID_SELF;
118 	error = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res);
119 	if (error != 1) {
120 #ifdef DEBUG
121 		printf("xen_alloc_contig: XENMEM_increase_reservation "
122 		    "failed: %d (order %d address_bits %d)\n",
123 		    error, order, res.address_bits);
124 #endif
125 		error = ENOMEM;
126 		pg = NULL;
127 		goto failed;
128 	}
129 	s = splvm(); /* XXXSMP */
130 	/* Map the new extent in place of the old pages */
131 	for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) {
132 		pgnext = pg->pageq.queue.tqe_next;
133 		pa = VM_PAGE_TO_PHYS(pg);
134 		xpmap_ptom_map(pa, ptoa(mfn+i));
135 		xpq_queue_machphys_update(((paddr_t)(mfn+i)) << PAGE_SHIFT, pa);
136 		/* while here, give extra pages back to UVM */
137 		if (i >= npagesreq) {
138 			TAILQ_REMOVE(mlistp, pg, pageq.queue);
139 			uvm_pagefree(pg);
140 		}
141 	}
142 	/* Flush updates through and flush the TLB */
143 	xpq_queue_tlb_flush();
144 	splx(s);
145 	return 0;
146 
147 failed:
148 	/*
149 	 * Attempt to recover from a failed decrease or increase reservation:
150 	 * if decrease_reservation failed, we don't have given all pages
151 	 * back to Xen; give them back to UVM, and get the missing pages
152 	 * from Xen.
153 	 * if increase_reservation failed, we expect pg to be NULL and we just
154 	 * get back the missing pages from Xen one by one.
155 	 */
156 	/* give back remaining pages to UVM */
157 	for (; pg != NULL; pg = pgnext) {
158 		pgnext = pg->pageq.queue.tqe_next;
159 		TAILQ_REMOVE(mlistp, pg, pageq.queue);
160 		uvm_pagefree(pg);
161 	}
162 	/* remplace the pages that we already gave to Xen */
163 	s = splvm(); /* XXXSMP */
164 	for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) {
165 		pgnext = pg->pageq.queue.tqe_next;
166 		set_xen_guest_handle(res.extent_start, &mfn);
167 		res.nr_extents = 1;
168 		res.extent_order = 0;
169 		res.address_bits = 32;
170 		res.domid = DOMID_SELF;
171 		if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res)
172 		    < 0) {
173 			printf("xen_alloc_contig: recovery "
174 			    "XENMEM_increase_reservation failed!\n");
175 			break;
176 		}
177 		pa = VM_PAGE_TO_PHYS(pg);
178 		xpmap_ptom_map(pa, ptoa(mfn));
179 		xpq_queue_machphys_update(((paddr_t)mfn) << PAGE_SHIFT, pa);
180 		TAILQ_REMOVE(mlistp, pg, pageq.queue);
181 		uvm_pagefree(pg);
182 	}
183 	/* Flush updates through and flush the TLB */
184 	xpq_queue_tlb_flush();
185 	splx(s);
186 	return error;
187 }
188 
189 
190 /*
191  * Allocate physical memory from the given physical address range.
192  * Called by DMA-safe memory allocation methods.
193  * We need our own version to deal with physical vs machine addresses.
194  */
195 int
196 _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size,
197     bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
198     int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high)
199 {
200 	bus_addr_t curaddr, lastaddr;
201 	struct vm_page *m;
202 	struct pglist mlist;
203 	int curseg, error;
204 	int doingrealloc = 0;
205 	bus_size_t uboundary;
206 
207 	/* Always round the size. */
208 	size = round_page(size);
209 
210 	KASSERT((alignment & (alignment - 1)) == 0);
211 	KASSERT((boundary & (boundary - 1)) == 0);
212 	KASSERT(boundary >= PAGE_SIZE || boundary == 0);
213 
214 	if (alignment < PAGE_SIZE)
215 		alignment = PAGE_SIZE;
216 
217 	/*
218 	 * Allocate pages from the VM system.
219 	 * We accept boundaries < size, splitting in multiple segments
220 	 * if needed. uvm_pglistalloc does not, so compute an appropriate
221 	 * boundary: next power of 2 >= size
222 	 */
223 	if (boundary == 0)
224 		uboundary = 0;
225 	else {
226 		uboundary = boundary;
227 		while (uboundary < size)
228 			uboundary = uboundary << 1;
229 	}
230 	error = uvm_pglistalloc(size, 0, avail_end, alignment, uboundary,
231 	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
232 	if (error)
233 		return (error);
234 again:
235 
236 	/*
237 	 * Compute the location, size, and number of segments actually
238 	 * returned by the VM code.
239 	 */
240 	m = mlist.tqh_first;
241 	curseg = 0;
242 	curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m);
243 	if (curaddr < low || curaddr >= high)
244 		goto badaddr;
245 	segs[curseg].ds_len = PAGE_SIZE;
246 	m = m->pageq.queue.tqe_next;
247 	if ((segs[curseg].ds_addr & (alignment - 1)) != 0)
248 		goto dorealloc;
249 
250 	for (; m != NULL; m = m->pageq.queue.tqe_next) {
251 		curaddr = _BUS_VM_PAGE_TO_BUS(m);
252 		if (curaddr < low || curaddr >= high)
253 			goto badaddr;
254 		if (curaddr == (lastaddr + PAGE_SIZE) &&
255 		    (lastaddr & boundary) == (curaddr & boundary)) {
256 			segs[curseg].ds_len += PAGE_SIZE;
257 		} else {
258 			curseg++;
259 			if (curseg >= nsegs ||
260 			    (curaddr & (alignment - 1)) != 0) {
261 				if (doingrealloc)
262 					return EFBIG;
263 				else
264 					goto dorealloc;
265 			}
266 			segs[curseg].ds_addr = curaddr;
267 			segs[curseg].ds_len = PAGE_SIZE;
268 		}
269 		lastaddr = curaddr;
270 	}
271 
272 	*rsegs = curseg + 1;
273 	return (0);
274 
275 badaddr:
276 	if (doingrealloc == 0)
277 		goto dorealloc;
278 	if (curaddr < low) {
279 		/* no way to enforce this */
280 		printf("_xen_bus_dmamem_alloc_range: no way to "
281 		    "enforce address range (0x%" PRIx64 " - 0x%" PRIx64 ")\n",
282 		    (uint64_t)low, (uint64_t)high);
283 		uvm_pglistfree(&mlist);
284 		return EINVAL;
285 	}
286 	printf("xen_bus_dmamem_alloc_range: "
287 	    "curraddr=0x%lx > high=0x%lx\n",
288 	    (u_long)curaddr, (u_long)high);
289 	panic("xen_bus_dmamem_alloc_range 1");
290 dorealloc:
291 	if (doingrealloc == 1)
292 		panic("_xen_bus_dmamem_alloc_range: "
293 		   "xen_alloc_contig returned "
294 		   "too much segments");
295 	doingrealloc = 1;
296 	/*
297 	 * Too much segments, or memory doesn't fit
298 	 * constraints. Free this memory and
299 	 * get a contigous segment from the hypervisor.
300 	 */
301 	uvm_pglistfree(&mlist);
302 	for (curseg = 0; curseg < nsegs; curseg++) {
303 		segs[curseg].ds_addr = 0;
304 		segs[curseg].ds_len = 0;
305 	}
306 	error = _xen_alloc_contig(size, alignment,
307 	    &mlist, flags, low, high);
308 	if (error)
309 		return error;
310 	goto again;
311 }
312