1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/kern_xio.c,v 1.15 2007/08/13 17:20:04 dillon Exp $ 35 */ 36 /* 37 * Kernel XIO interface. An initialized XIO is basically a collection of 38 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and 39 * can represent userspace or kernelspace buffers, and can be passed to 40 * foreign threads outside of the originating vmspace. XIO buffers are 41 * not mapped into KVM and thus can be manipulated and passed around with 42 * very low overheads. 43 * 44 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other 45 * places that need to pass (possibly userspace) data between threads. 46 * 47 * TODO: check for busy page when modifying, check writeable. 48 */ 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/malloc.h> 53 #include <sys/proc.h> 54 #include <sys/vmmeter.h> 55 #include <sys/vnode.h> 56 #include <sys/xio.h> 57 #include <sys/sfbuf.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_param.h> 61 #include <sys/lock.h> 62 #include <vm/vm_kern.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_pageout.h> 68 #include <vm/vm_pager.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_page2.h> 71 72 /* 73 * Just do basic initialization of an empty XIO 74 */ 75 void 76 xio_init(xio_t xio) 77 { 78 xio->xio_flags = 0; 79 xio->xio_bytes = 0; 80 xio->xio_error = 0; 81 xio->xio_offset = 0; 82 xio->xio_npages = 0; 83 xio->xio_pages = xio->xio_internal_pages; 84 } 85 86 /* 87 * Initialize an XIO given a userspace buffer. 0 is returned on success, 88 * an error code on failure. The actual number of bytes that could be 89 * accomodated in the XIO will be stored in xio_bytes and the page offset 90 * will be stored in xio_offset. 91 */ 92 int 93 xio_init_ubuf(xio_t xio, void *ubase, size_t ubytes, int flags) 94 { 95 vm_offset_t addr; 96 vm_page_t m; 97 vm_page_t m0; 98 int error; 99 int i; 100 int n; 101 int vmprot; 102 103 addr = trunc_page((vm_offset_t)ubase); 104 xio->xio_flags = flags; 105 xio->xio_bytes = 0; 106 xio->xio_error = 0; 107 if (ubytes == 0) { 108 xio->xio_offset = 0; 109 xio->xio_npages = 0; 110 } else { 111 vmprot = (flags & XIOF_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; 112 xio->xio_offset = (vm_offset_t)ubase & PAGE_MASK; 113 xio->xio_pages = xio->xio_internal_pages; 114 if ((n = PAGE_SIZE - xio->xio_offset) > ubytes) 115 n = ubytes; 116 m0 = NULL; 117 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) { 118 m = vm_fault_page_quick(addr, vmprot, &error); 119 if (m == NULL) 120 break; 121 xio->xio_pages[i] = m; 122 ubytes -= n; 123 xio->xio_bytes += n; 124 if ((n = ubytes) > PAGE_SIZE) 125 n = PAGE_SIZE; 126 addr += PAGE_SIZE; 127 128 /* 129 * Check linearity, used by syslink to memory map DMA buffers. 130 */ 131 if (flags & XIOF_VMLINEAR) { 132 if (i == 0) { 133 m0 = m; 134 } else 135 if (m->object != m0->object || m->pindex != m0->pindex + i) { 136 error = EINVAL; 137 break; 138 } 139 } 140 } 141 xio->xio_npages = i; 142 143 /* 144 * If a failure occured clean out what we loaded and return EFAULT. 145 * Return 0 on success. 146 */ 147 if (i < XIO_INTERNAL_PAGES && n) { 148 xio_release(xio); 149 xio->xio_error = EFAULT; 150 } 151 } 152 return(xio->xio_error); 153 } 154 155 /* 156 * Initialize an XIO given a kernelspace buffer. 0 is returned on success, 157 * an error code on failure. The actual number of bytes that could be 158 * accomodated in the XIO will be stored in xio_bytes and the page offset 159 * will be stored in xio_offset. 160 */ 161 int 162 xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes) 163 { 164 vm_offset_t addr; 165 vm_paddr_t paddr; 166 vm_page_t m; 167 int i; 168 int n; 169 170 addr = trunc_page((vm_offset_t)kbase); 171 xio->xio_flags = 0; 172 xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK; 173 xio->xio_bytes = 0; 174 xio->xio_pages = xio->xio_internal_pages; 175 xio->xio_error = 0; 176 if ((n = PAGE_SIZE - xio->xio_offset) > kbytes) 177 n = kbytes; 178 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) { 179 if ((paddr = pmap_kextract(addr)) == 0) 180 break; 181 crit_enter(); 182 m = PHYS_TO_VM_PAGE(paddr); 183 vm_page_hold(m); 184 crit_exit(); 185 xio->xio_pages[i] = m; 186 kbytes -= n; 187 xio->xio_bytes += n; 188 if ((n = kbytes) > PAGE_SIZE) 189 n = PAGE_SIZE; 190 addr += PAGE_SIZE; 191 } 192 xio->xio_npages = i; 193 194 /* 195 * If a failure occured clean out what we loaded and return EFAULT. 196 * Return 0 on success. 197 */ 198 if (i < XIO_INTERNAL_PAGES && n) { 199 xio_release(xio); 200 xio->xio_error = EFAULT; 201 } 202 return(xio->xio_error); 203 } 204 205 /* 206 * Initialize an XIO given an array of vm_page pointers. 207 */ 208 int 209 xio_init_pages(xio_t xio, struct vm_page **mbase, int npages, int xflags) 210 { 211 int i; 212 213 KKASSERT(npages <= XIO_INTERNAL_PAGES); 214 215 xio->xio_flags = xflags; 216 xio->xio_offset = 0; 217 xio->xio_bytes = 0; 218 xio->xio_pages = xio->xio_internal_pages; 219 xio->xio_npages = npages; 220 xio->xio_error = 0; 221 crit_enter(); 222 for (i = 0; i < npages; ++i) { 223 vm_page_hold(mbase[i]); 224 xio->xio_pages[i] = mbase[i]; 225 } 226 crit_exit(); 227 return(0); 228 } 229 230 /* 231 * Cleanup an XIO so it can be destroyed. The pages associated with the 232 * XIO are released. 233 */ 234 void 235 xio_release(xio_t xio) 236 { 237 int i; 238 vm_page_t m; 239 240 crit_enter(); 241 for (i = 0; i < xio->xio_npages; ++i) { 242 m = xio->xio_pages[i]; 243 vm_page_unhold(m); 244 } 245 crit_exit(); 246 xio->xio_offset = 0; 247 xio->xio_npages = 0; 248 xio->xio_bytes = 0; 249 xio->xio_error = ENOBUFS; 250 } 251 252 /* 253 * Copy data between an XIO and a UIO. If the UIO represents userspace it 254 * must be relative to the current context. 255 * 256 * uoffset is the abstracted starting offset in the XIO, not the actual 257 * offset, and usually starts at 0. 258 * 259 * The XIO is not modified. The UIO is updated to reflect the copy. 260 * 261 * UIO_READ xio -> uio 262 * UIO_WRITE uio -> xio 263 */ 264 int 265 xio_uio_copy(xio_t xio, int uoffset, struct uio *uio, int *sizep) 266 { 267 int error; 268 int bytes; 269 270 bytes = xio->xio_bytes - uoffset; 271 if (bytes > uio->uio_resid) 272 bytes = uio->uio_resid; 273 KKASSERT(bytes >= 0); 274 error = uiomove_fromphys(xio->xio_pages, xio->xio_offset + uoffset, 275 bytes, uio); 276 if (error == 0) 277 *sizep = bytes; 278 else 279 *sizep = 0; 280 return(error); 281 } 282 283 /* 284 * Copy the specified number of bytes from the xio to a userland 285 * buffer. Return an error code or 0 on success. 286 * 287 * uoffset is the abstracted starting offset in the XIO, not the actual 288 * offset, and usually starts at 0. 289 * 290 * The XIO is not modified. 291 */ 292 int 293 xio_copy_xtou(xio_t xio, int uoffset, void *uptr, int bytes) 294 { 295 int i; 296 int n; 297 int error; 298 int offset; 299 vm_page_t m; 300 struct sf_buf *sf; 301 302 if (uoffset + bytes > xio->xio_bytes) 303 return(EFAULT); 304 305 offset = (xio->xio_offset + uoffset) & PAGE_MASK; 306 if ((n = PAGE_SIZE - offset) > bytes) 307 n = bytes; 308 309 error = 0; 310 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT; 311 i < xio->xio_npages; 312 ++i 313 ) { 314 m = xio->xio_pages[i]; 315 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 316 error = copyout((char *)sf_buf_kva(sf) + offset, uptr, n); 317 sf_buf_free(sf); 318 if (error) 319 break; 320 bytes -= n; 321 uptr = (char *)uptr + n; 322 if (bytes == 0) 323 break; 324 if ((n = bytes) > PAGE_SIZE) 325 n = PAGE_SIZE; 326 offset = 0; 327 } 328 return(error); 329 } 330 331 /* 332 * Copy the specified number of bytes from the xio to a kernel 333 * buffer. Return an error code or 0 on success. 334 * 335 * uoffset is the abstracted starting offset in the XIO, not the actual 336 * offset, and usually starts at 0. 337 * 338 * The XIO is not modified. 339 */ 340 int 341 xio_copy_xtok(xio_t xio, int uoffset, void *kptr, int bytes) 342 { 343 int i; 344 int n; 345 int error; 346 int offset; 347 vm_page_t m; 348 struct sf_buf *sf; 349 350 if (bytes + uoffset > xio->xio_bytes) 351 return(EFAULT); 352 353 offset = (xio->xio_offset + uoffset) & PAGE_MASK; 354 if ((n = PAGE_SIZE - offset) > bytes) 355 n = bytes; 356 357 error = 0; 358 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT; 359 i < xio->xio_npages; 360 ++i 361 ) { 362 m = xio->xio_pages[i]; 363 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 364 bcopy((char *)sf_buf_kva(sf) + offset, kptr, n); 365 sf_buf_free(sf); 366 bytes -= n; 367 kptr = (char *)kptr + n; 368 if (bytes == 0) 369 break; 370 if ((n = bytes) > PAGE_SIZE) 371 n = PAGE_SIZE; 372 offset = 0; 373 } 374 return(error); 375 } 376 377 /* 378 * Copy the specified number of bytes from userland to the xio. 379 * Return an error code or 0 on success. 380 * 381 * uoffset is the abstracted starting offset in the XIO, not the actual 382 * offset, and usually starts at 0. 383 * 384 * Data in pages backing the XIO will be modified. 385 */ 386 int 387 xio_copy_utox(xio_t xio, int uoffset, const void *uptr, int bytes) 388 { 389 int i; 390 int n; 391 int error; 392 int offset; 393 vm_page_t m; 394 struct sf_buf *sf; 395 396 if (uoffset + bytes > xio->xio_bytes) 397 return(EFAULT); 398 399 offset = (xio->xio_offset + uoffset) & PAGE_MASK; 400 if ((n = PAGE_SIZE - offset) > bytes) 401 n = bytes; 402 403 error = 0; 404 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT; 405 i < xio->xio_npages; 406 ++i 407 ) { 408 m = xio->xio_pages[i]; 409 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 410 error = copyin(uptr, (char *)sf_buf_kva(sf) + offset, n); 411 sf_buf_free(sf); 412 if (error) 413 break; 414 bytes -= n; 415 uptr = (const char *)uptr + n; 416 if (bytes == 0) 417 break; 418 if ((n = bytes) > PAGE_SIZE) 419 n = PAGE_SIZE; 420 offset = 0; 421 } 422 return(error); 423 } 424 425 /* 426 * Copy the specified number of bytes from the kernel to the xio. 427 * Return an error code or 0 on success. 428 * 429 * uoffset is the abstracted starting offset in the XIO, not the actual 430 * offset, and usually starts at 0. 431 * 432 * Data in pages backing the XIO will be modified. 433 */ 434 int 435 xio_copy_ktox(xio_t xio, int uoffset, const void *kptr, int bytes) 436 { 437 int i; 438 int n; 439 int error; 440 int offset; 441 vm_page_t m; 442 struct sf_buf *sf; 443 444 if (uoffset + bytes > xio->xio_bytes) 445 return(EFAULT); 446 447 offset = (xio->xio_offset + uoffset) & PAGE_MASK; 448 if ((n = PAGE_SIZE - offset) > bytes) 449 n = bytes; 450 451 error = 0; 452 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT; 453 i < xio->xio_npages; 454 ++i 455 ) { 456 m = xio->xio_pages[i]; 457 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 458 bcopy(kptr, (char *)sf_buf_kva(sf) + offset, n); 459 sf_buf_free(sf); 460 bytes -= n; 461 kptr = (const char *)kptr + n; 462 if (bytes == 0) 463 break; 464 if ((n = bytes) > PAGE_SIZE) 465 n = PAGE_SIZE; 466 offset = 0; 467 } 468 return(error); 469 } 470