1 /* $NetBSD: vm_vfs.c,v 1.12 2009/10/07 10:23:50 pooka Exp $ */ 2 3 /* 4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved. 5 * 6 * Development of this software was supported by the 7 * Finnish Cultural Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __KERNEL_RCSID(0, "$NetBSD: vm_vfs.c,v 1.12 2009/10/07 10:23:50 pooka Exp $"); 33 34 #include <sys/param.h> 35 36 #include <sys/buf.h> 37 #include <sys/vnode.h> 38 39 #include <uvm/uvm.h> 40 #include <uvm/uvm_readahead.h> 41 42 /* 43 * release resources held during async io. this is almost the 44 * same as uvm_aio_aiodone() from uvm_pager.c and only lacks the 45 * call to uvm_aio_aiodone_pages(): unbusies pages directly here. 46 */ 47 void 48 uvm_aio_aiodone(struct buf *bp) 49 { 50 int i, npages = bp->b_bufsize >> PAGE_SHIFT; 51 struct vm_page **pgs; 52 vaddr_t va; 53 54 pgs = kmem_alloc(npages * sizeof(*pgs), KM_SLEEP); 55 for (i = 0; i < npages; i++) { 56 va = (vaddr_t)bp->b_data + (i << PAGE_SHIFT); 57 pgs[i] = uvm_pageratop(va); 58 pgs[i]->flags &= ~PG_PAGEOUT; 59 } 60 61 uvm_pagermapout((vaddr_t)bp->b_data, npages); 62 uvm_page_unbusy(pgs, npages); 63 64 if (BUF_ISWRITE(bp) && (bp->b_cflags & BC_AGE) != 0) { 65 mutex_enter(bp->b_objlock); 66 vwakeup(bp); 67 mutex_exit(bp->b_objlock); 68 } 69 70 putiobuf(bp); 71 72 kmem_free(pgs, npages * sizeof(*pgs)); 73 } 74 75 void 76 uvm_aio_biodone(struct buf *bp) 77 { 78 79 uvm_aio_aiodone(bp); 80 } 81 82 /* 83 * UBC 84 */ 85 86 void 87 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len) 88 { 89 struct uvm_object *uobj = &vp->v_uobj; 90 struct vm_page **pgs; 91 int maxpages = MIN(32, round_page(len) >> PAGE_SHIFT); 92 int rv, npages, i; 93 94 pgs = kmem_zalloc(maxpages * sizeof(pgs), KM_SLEEP); 95 while (len) { 96 npages = MIN(maxpages, round_page(len) >> PAGE_SHIFT); 97 memset(pgs, 0, npages * sizeof(struct vm_page *)); 98 mutex_enter(&uobj->vmobjlock); 99 rv = uobj->pgops->pgo_get(uobj, off, pgs, &npages, 0, 0, 0, 0); 100 KASSERT(npages > 0); 101 102 for (i = 0; i < npages; i++) { 103 uint8_t *start; 104 size_t chunkoff, chunklen; 105 106 chunkoff = off & PAGE_MASK; 107 chunklen = MIN(PAGE_SIZE - chunkoff, len); 108 start = (uint8_t *)pgs[i]->uanon + chunkoff; 109 110 memset(start, 0, chunklen); 111 pgs[i]->flags &= ~PG_CLEAN; 112 113 off += chunklen; 114 len -= chunklen; 115 } 116 uvm_page_unbusy(pgs, npages); 117 } 118 kmem_free(pgs, maxpages * sizeof(pgs)); 119 120 return; 121 } 122 123 /* dumdidumdum */ 124 #define len2npages(off, len) \ 125 (((((len) + PAGE_MASK) & ~(PAGE_MASK)) >> PAGE_SHIFT) \ 126 + (((off & PAGE_MASK) + (len & PAGE_MASK)) > PAGE_SIZE)) 127 128 int 129 ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, 130 int advice, int flags) 131 { 132 struct vm_page **pgs; 133 int npages = len2npages(uio->uio_offset, todo); 134 size_t pgalloc; 135 int i, rv, pagerflags; 136 137 pgalloc = npages * sizeof(pgs); 138 pgs = kmem_zalloc(pgalloc, KM_SLEEP); 139 140 pagerflags = PGO_SYNCIO | PGO_NOBLOCKALLOC | PGO_NOTIMESTAMP; 141 if (flags & UBC_WRITE) 142 pagerflags |= PGO_PASTEOF; 143 if (flags & UBC_FAULTBUSY) 144 pagerflags |= PGO_OVERWRITE; 145 146 do { 147 mutex_enter(&uobj->vmobjlock); 148 rv = uobj->pgops->pgo_get(uobj, uio->uio_offset & ~PAGE_MASK, 149 pgs, &npages, 0, VM_PROT_READ | VM_PROT_WRITE, 0, 150 pagerflags); 151 if (rv) 152 goto out; 153 154 for (i = 0; i < npages; i++) { 155 size_t xfersize; 156 off_t pageoff; 157 158 pageoff = uio->uio_offset & PAGE_MASK; 159 xfersize = MIN(MIN(todo, PAGE_SIZE), PAGE_SIZE-pageoff); 160 uiomove((uint8_t *)pgs[i]->uanon + pageoff, 161 xfersize, uio); 162 if (uio->uio_rw == UIO_WRITE) 163 pgs[i]->flags &= ~PG_CLEAN; 164 todo -= xfersize; 165 } 166 uvm_page_unbusy(pgs, npages); 167 } while (todo); 168 169 out: 170 kmem_free(pgs, pgalloc); 171 return rv; 172 } 173