1 /* $NetBSD: xen_bus_dma.c,v 1.19 2010/03/02 00:13:50 jym Exp $ */ 2 /* NetBSD bus_dma.c,v 1.21 2005/04/16 07:53:35 yamt Exp */ 3 4 /*- 5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: xen_bus_dma.c,v 1.19 2010/03/02 00:13:50 jym Exp $"); 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/mbuf.h> 41 #include <sys/proc.h> 42 43 #include <machine/bus.h> 44 #include <machine/bus_private.h> 45 46 #include <uvm/uvm_extern.h> 47 48 extern paddr_t avail_end; 49 50 /* Pure 2^n version of get_order */ 51 static inline int get_order(unsigned long size) 52 { 53 int order = -1; 54 size = (size - 1) >> (PAGE_SHIFT - 1); 55 do { 56 size >>= 1; 57 order++; 58 } while (size); 59 return order; 60 } 61 62 static int 63 _xen_alloc_contig(bus_size_t size, bus_size_t alignment, bus_size_t boundary, 64 struct pglist *mlistp, int flags, bus_addr_t low, bus_addr_t high) 65 { 66 int order, i; 67 unsigned long npagesreq, npages, mfn; 68 bus_addr_t pa; 69 struct vm_page *pg, *pgnext; 70 int s, error; 71 struct xen_memory_reservation res; 72 73 /* 74 * When requesting a contigous memory region, the hypervisor will 75 * return a memory range aligned on size. This will automagically 76 * handle "boundary", but the only way to enforce alignment 77 * is to request a memory region of size max(alignment, size). 78 */ 79 order = max(get_order(size), get_order(alignment)); 80 npages = (1 << order); 81 npagesreq = (size >> PAGE_SHIFT); 82 KASSERT(npages >= npagesreq); 83 84 /* get npages from UVM, and give them back to the hypervisor */ 85 error = uvm_pglistalloc(((psize_t)npages) << PAGE_SHIFT, 86 0, avail_end, 0, 0, mlistp, npages, (flags & BUS_DMA_NOWAIT) == 0); 87 if (error) 88 return (error); 89 90 for (pg = mlistp->tqh_first; pg != NULL; pg = pg->pageq.queue.tqe_next) { 91 pa = VM_PAGE_TO_PHYS(pg); 92 mfn = xpmap_ptom(pa) >> PAGE_SHIFT; 93 xpmap_phys_to_machine_mapping[ 94 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = INVALID_P2M_ENTRY; 95 xenguest_handle(res.extent_start) = &mfn; 96 res.nr_extents = 1; 97 res.extent_order = 0; 98 res.domid = DOMID_SELF; 99 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &res); 100 if (error != 1) { 101 #ifdef DEBUG 102 printf("xen_alloc_contig: XENMEM_decrease_reservation " 103 "failed: err %d (pa %#" PRIxPADDR " mfn %#lx)\n", 104 error, pa, mfn); 105 #endif 106 xpmap_phys_to_machine_mapping[ 107 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn; 108 109 error = ENOMEM; 110 goto failed; 111 } 112 } 113 /* Get the new contiguous memory extent */ 114 xenguest_handle(res.extent_start) = &mfn; 115 res.nr_extents = 1; 116 res.extent_order = order; 117 res.address_bits = get_order(high) + PAGE_SHIFT; 118 res.domid = DOMID_SELF; 119 error = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res); 120 if (error != 1) { 121 #ifdef DEBUG 122 printf("xen_alloc_contig: XENMEM_increase_reservation " 123 "failed: %d (order %d address_bits %d)\n", 124 error, order, res.address_bits); 125 #endif 126 error = ENOMEM; 127 pg = NULL; 128 goto failed; 129 } 130 s = splvm(); 131 /* Map the new extent in place of the old pages */ 132 for (pg = mlistp->tqh_first, i = 0; pg != NULL; pg = pgnext, i++) { 133 pgnext = pg->pageq.queue.tqe_next; 134 pa = VM_PAGE_TO_PHYS(pg); 135 xpmap_phys_to_machine_mapping[ 136 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn+i; 137 xpq_queue_machphys_update(((paddr_t)(mfn+i)) << PAGE_SHIFT, pa); 138 /* while here, give extra pages back to UVM */ 139 if (i >= npagesreq) { 140 TAILQ_REMOVE(mlistp, pg, pageq.queue); 141 uvm_pagefree(pg); 142 } 143 } 144 /* Flush updates through and flush the TLB */ 145 xpq_queue_tlb_flush(); 146 splx(s); 147 return 0; 148 149 failed: 150 /* 151 * Attempt to recover from a failed decrease or increase reservation: 152 * if decrease_reservation failed, we don't have given all pages 153 * back to Xen; give them back to UVM, and get the missing pages 154 * from Xen. 155 * if increase_reservation failed, we expect pg to be NULL and we just 156 * get back the missing pages from Xen one by one. 157 */ 158 /* give back remaining pages to UVM */ 159 for (; pg != NULL; pg = pgnext) { 160 pgnext = pg->pageq.queue.tqe_next; 161 TAILQ_REMOVE(mlistp, pg, pageq.queue); 162 uvm_pagefree(pg); 163 } 164 /* remplace the pages that we already gave to Xen */ 165 s = splvm(); 166 for (pg = mlistp->tqh_first; pg != NULL; pg = pgnext) { 167 pgnext = pg->pageq.queue.tqe_next; 168 xenguest_handle(res.extent_start) = &mfn; 169 res.nr_extents = 1; 170 res.extent_order = 0; 171 res.address_bits = 32; 172 res.domid = DOMID_SELF; 173 if (HYPERVISOR_memory_op(XENMEM_increase_reservation, &res) 174 < 0) { 175 printf("xen_alloc_contig: recovery " 176 "XENMEM_increase_reservation failed!\n"); 177 break; 178 } 179 pa = VM_PAGE_TO_PHYS(pg); 180 xpmap_phys_to_machine_mapping[ 181 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = mfn; 182 xpq_queue_machphys_update(((paddr_t)mfn) << PAGE_SHIFT, pa); 183 TAILQ_REMOVE(mlistp, pg, pageq.queue); 184 uvm_pagefree(pg); 185 } 186 /* Flush updates through and flush the TLB */ 187 xpq_queue_tlb_flush(); 188 splx(s); 189 return error; 190 } 191 192 193 /* 194 * Allocate physical memory from the given physical address range. 195 * Called by DMA-safe memory allocation methods. 196 * We need our own version to deal with physical vs machine addresses. 197 */ 198 int 199 _xen_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, 200 bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, 201 int nsegs, int *rsegs, int flags, bus_addr_t low, bus_addr_t high) 202 { 203 bus_addr_t curaddr, lastaddr; 204 struct vm_page *m; 205 struct pglist mlist; 206 int curseg, error; 207 int doingrealloc = 0; 208 209 /* Always round the size. */ 210 size = round_page(size); 211 212 KASSERT((alignment & (alignment - 1)) == 0); 213 KASSERT((boundary & (boundary - 1)) == 0); 214 if (alignment < PAGE_SIZE) 215 alignment = PAGE_SIZE; 216 if (boundary != 0 && boundary < size) 217 return (EINVAL); 218 219 /* 220 * Allocate pages from the VM system. 221 */ 222 error = uvm_pglistalloc(size, 0, avail_end, alignment, boundary, 223 &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0); 224 if (error) 225 return (error); 226 again: 227 228 /* 229 * Compute the location, size, and number of segments actually 230 * returned by the VM code. 231 */ 232 m = mlist.tqh_first; 233 curseg = 0; 234 curaddr = lastaddr = segs[curseg].ds_addr = _BUS_VM_PAGE_TO_BUS(m); 235 if (curaddr < low || curaddr >= high) 236 goto badaddr; 237 segs[curseg].ds_len = PAGE_SIZE; 238 m = m->pageq.queue.tqe_next; 239 if ((segs[curseg].ds_addr & (alignment - 1)) != 0) 240 goto dorealloc; 241 242 for (; m != NULL; m = m->pageq.queue.tqe_next) { 243 curaddr = _BUS_VM_PAGE_TO_BUS(m); 244 if (curaddr < low || curaddr >= high) 245 goto badaddr; 246 if (curaddr == (lastaddr + PAGE_SIZE)) { 247 segs[curseg].ds_len += PAGE_SIZE; 248 if ((lastaddr & boundary) != (curaddr & boundary)) 249 goto dorealloc; 250 } else { 251 curseg++; 252 if (curseg >= nsegs || (curaddr & (alignment - 1)) != 0) 253 goto dorealloc; 254 segs[curseg].ds_addr = curaddr; 255 segs[curseg].ds_len = PAGE_SIZE; 256 } 257 lastaddr = curaddr; 258 } 259 260 *rsegs = curseg + 1; 261 return (0); 262 263 badaddr: 264 if (doingrealloc == 0) 265 goto dorealloc; 266 if (curaddr < low) { 267 /* no way to enforce this */ 268 printf("_xen_bus_dmamem_alloc_range: no way to " 269 "enforce address range (0x%" PRIx64 " - 0x%" PRIx64 ")\n", 270 (uint64_t)low, (uint64_t)high); 271 uvm_pglistfree(&mlist); 272 return EINVAL; 273 } 274 printf("xen_bus_dmamem_alloc_range: " 275 "curraddr=0x%lx > high=0x%lx\n", 276 (u_long)curaddr, (u_long)high); 277 panic("xen_bus_dmamem_alloc_range 1"); 278 dorealloc: 279 if (doingrealloc == 1) 280 panic("_xen_bus_dmamem_alloc_range: " 281 "xen_alloc_contig returned " 282 "too much segments"); 283 doingrealloc = 1; 284 /* 285 * Too much segments, or memory doesn't fit 286 * constraints. Free this memory and 287 * get a contigous segment from the hypervisor. 288 */ 289 uvm_pglistfree(&mlist); 290 for (curseg = 0; curseg < nsegs; curseg++) { 291 segs[curseg].ds_addr = 0; 292 segs[curseg].ds_len = 0; 293 } 294 error = _xen_alloc_contig(size, alignment, 295 boundary, &mlist, flags, low, high); 296 if (error) 297 return error; 298 goto again; 299 } 300