1 /* 2 * Copyright (c) 2004 Matthew Dillon <dillon@backplane.com> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $DragonFly: src/sys/kern/kern_xio.c,v 1.4 2004/04/03 08:20:10 dillon Exp $ 27 */ 28 /* 29 * Kernel XIO interface. An initialized XIO is basically a collection of 30 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and 31 * can represent userspace or kernelspace buffers, and can be passed to 32 * foreign threads outside of the originating vmspace. XIO buffers are 33 * not mapped into KVM and thus can be manipulated and passed around with 34 * very low overheads. 35 * 36 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other 37 * places that need to pass (possibly userspace) data between threads. 38 * 39 * TODO: check for busy page when modifying, check writeable. 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/malloc.h> 45 #include <sys/proc.h> 46 #include <sys/vmmeter.h> 47 #include <sys/vnode.h> 48 #include <sys/xio.h> 49 #include <sys/sfbuf.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_param.h> 53 #include <sys/lock.h> 54 #include <vm/vm_kern.h> 55 #include <vm/pmap.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_page.h> 59 #include <vm/vm_pageout.h> 60 #include <vm/vm_pager.h> 61 #include <vm/vm_extern.h> 62 #include <vm/vm_page2.h> 63 64 /* 65 * Initialize an XIO given a userspace buffer. 0 is returned on success, 66 * an error code on failure. The actual number of bytes that could be 67 * accomodated in the XIO will be stored in xio_bytes. 68 */ 69 int 70 xio_init_ubuf(xio_t xio, void *ubase, size_t ubytes, int flags) 71 { 72 vm_offset_t addr; 73 vm_paddr_t paddr; 74 vm_page_t m; 75 int i; 76 int n; 77 int vmprot; 78 79 addr = trunc_page((vm_offset_t)ubase); 80 xio->xio_flags = flags; 81 xio->xio_bytes = 0; 82 xio->xio_error = 0; 83 if (ubytes == 0) { 84 xio->xio_offset = 0; 85 xio->xio_npages = 0; 86 } else { 87 vmprot = (flags & XIOF_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; 88 xio->xio_offset = (vm_offset_t)ubase & PAGE_MASK; 89 xio->xio_pages = xio->xio_internal_pages; 90 if ((n = PAGE_SIZE - xio->xio_offset) > ubytes) 91 n = ubytes; 92 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) { 93 if (vm_fault_quick((caddr_t)addr, vmprot) < 0) 94 break; 95 if ((paddr = pmap_kextract(addr)) == 0) 96 break; 97 m = PHYS_TO_VM_PAGE(paddr); 98 vm_page_hold(m); 99 xio->xio_pages[i] = m; 100 ubytes -= n; 101 xio->xio_bytes += n; 102 if ((n = ubytes) > PAGE_SIZE) 103 n = PAGE_SIZE; 104 addr += PAGE_SIZE; 105 } 106 xio->xio_npages = i; 107 108 /* 109 * If a failure occured clean out what we loaded and return EFAULT. 110 * Return 0 on success. 111 */ 112 if (i < XIO_INTERNAL_PAGES && n) { 113 xio_release(xio); 114 xio->xio_error = EFAULT; 115 } 116 } 117 return(xio->xio_error); 118 } 119 120 /* 121 * Initialize an XIO given a kernelspace buffer. 0 is returned on success, 122 * an error code on failure. The actual number of bytes that could be 123 * accomodated in the XIO will be stored in xio_bytes. 124 * 125 * vmprot is usually either VM_PROT_READ or VM_PROT_WRITE. 126 */ 127 int 128 xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes) 129 { 130 vm_offset_t addr; 131 vm_paddr_t paddr; 132 vm_page_t m; 133 int i; 134 int n; 135 136 addr = trunc_page((vm_offset_t)kbase); 137 xio->xio_flags = 0; 138 xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK; 139 xio->xio_bytes = 0; 140 xio->xio_pages = xio->xio_internal_pages; 141 xio->xio_error = 0; 142 if ((n = PAGE_SIZE - xio->xio_offset) > kbytes) 143 n = kbytes; 144 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) { 145 if ((paddr = pmap_kextract(addr)) == 0) 146 break; 147 m = PHYS_TO_VM_PAGE(paddr); 148 vm_page_hold(m); 149 xio->xio_pages[i] = m; 150 kbytes -= n; 151 xio->xio_bytes += n; 152 if ((n = kbytes) > PAGE_SIZE) 153 n = PAGE_SIZE; 154 addr += PAGE_SIZE; 155 } 156 xio->xio_npages = i; 157 158 /* 159 * If a failure occured clean out what we loaded and return EFAULT. 160 * Return 0 on success. 161 */ 162 if (i < XIO_INTERNAL_PAGES && n) { 163 xio_release(xio); 164 xio->xio_error = EFAULT; 165 } 166 return(xio->xio_error); 167 } 168 169 void 170 xio_release(xio_t xio) 171 { 172 int i; 173 vm_page_t m; 174 175 for (i = 0; i < xio->xio_npages; ++i) { 176 m = xio->xio_pages[i]; 177 vm_page_unhold(m); 178 } 179 xio->xio_offset = 0; 180 xio->xio_npages = 0; 181 xio->xio_bytes = 0; 182 xio->xio_error = ENOBUFS; 183 } 184 185 /* 186 * Copy data between an XIO and a UIO. If the UIO represents userspace it 187 * must be relative to the current context. Both the UIO and the XIO are 188 * modified, but the XIO's pages are not released when exhausted. 189 * 190 * UIO_READ xio -> uio 191 * UIO_WRITE uio -> xio 192 */ 193 int 194 xio_uio_copy(xio_t xio, struct uio *uio, int *sizep) 195 { 196 int error; 197 int bytes; 198 199 if ((bytes = xio->xio_bytes) > uio->uio_resid) 200 bytes = uio->uio_resid; 201 error = uiomove_fromphys(xio->xio_pages, xio->xio_offset, bytes, uio); 202 if (error == 0) { 203 xio->xio_bytes -= bytes; 204 xio->xio_offset += bytes; 205 *sizep = bytes; 206 } else { 207 *sizep = 0; 208 } 209 return(error); 210 } 211 212 /* 213 * Copy the specified number of bytes from the xio to a userland 214 * buffer. Return an error code or 0 on success. 215 * 216 * The XIO is modified, but the XIO's pages are not released when exhausted. 217 */ 218 int 219 xio_copy_xtou(xio_t xio, void *uptr, int bytes) 220 { 221 int i; 222 int n; 223 int error; 224 int offset; 225 vm_page_t m; 226 struct sf_buf *sf; 227 228 if (bytes > xio->xio_bytes) 229 return(EFAULT); 230 231 offset = xio->xio_offset & PAGE_MASK; 232 if ((n = PAGE_SIZE - offset) > bytes) 233 n = bytes; 234 235 error = 0; 236 for (i = xio->xio_offset >> PAGE_SHIFT; i < xio->xio_npages; ++i) { 237 m = xio->xio_pages[i]; 238 sf = sf_buf_alloc(m, SFBA_QUICK); 239 error = copyout((char *)sf_buf_kva(sf) + offset, uptr, n); 240 sf_buf_free(sf); 241 if (error) 242 break; 243 bytes -= n; 244 xio->xio_bytes -= n; 245 xio->xio_offset += n; 246 uptr = (char *)uptr + n; 247 if (bytes == 0) 248 break; 249 if ((n = bytes) > PAGE_SIZE) 250 n = PAGE_SIZE; 251 offset = 0; 252 } 253 return(error); 254 } 255 256 /* 257 * Copy the specified number of bytes from the xio to a kernel 258 * buffer. Return an error code or 0 on success. 259 * 260 * The XIO is modified, but the XIO's pages are not released when exhausted. 261 */ 262 int 263 xio_copy_xtok(xio_t xio, void *kptr, int bytes) 264 { 265 int i; 266 int n; 267 int error; 268 int offset; 269 vm_page_t m; 270 struct sf_buf *sf; 271 272 if (bytes > xio->xio_bytes) 273 return(EFAULT); 274 275 offset = xio->xio_offset & PAGE_MASK; 276 if ((n = PAGE_SIZE - offset) > bytes) 277 n = bytes; 278 279 error = 0; 280 for (i = xio->xio_offset >> PAGE_SHIFT; i < xio->xio_npages; ++i) { 281 m = xio->xio_pages[i]; 282 sf = sf_buf_alloc(m, SFBA_QUICK); 283 bcopy((char *)sf_buf_kva(sf) + offset, kptr, n); 284 sf_buf_free(sf); 285 bytes -= n; 286 xio->xio_bytes -= n; 287 xio->xio_offset += n; 288 kptr = (char *)kptr + n; 289 if (bytes == 0) 290 break; 291 if ((n = bytes) > PAGE_SIZE) 292 n = PAGE_SIZE; 293 offset = 0; 294 } 295 return(error); 296 } 297 298