1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/kern/kern_umtx.c,v 1.9 2008/05/09 07:24:45 dillon Exp $ 37 */ 38 39 /* 40 * This module implements userland mutex helper functions. umtx_sleep() 41 * handling blocking and umtx_wakeup() handles wakeups. The sleep/wakeup 42 * functions operate on user addresses. 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/sysproto.h> 49 #include <sys/sysunion.h> 50 #include <sys/sysent.h> 51 #include <sys/syscall.h> 52 #include <sys/module.h> 53 54 #include <cpu/lwbuf.h> 55 56 #include <vm/vm.h> 57 #include <vm/vm_param.h> 58 #include <sys/lock.h> 59 #include <vm/pmap.h> 60 #include <vm/vm_map.h> 61 #include <vm/vm_object.h> 62 #include <vm/vm_page.h> 63 #include <vm/vm_pager.h> 64 #include <vm/vm_pageout.h> 65 #include <vm/vm_extern.h> 66 #include <vm/vm_page.h> 67 #include <vm/vm_kern.h> 68 69 #include <vm/vm_page2.h> 70 71 static void umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action); 72 73 /* 74 * If the contents of the userland-supplied pointer matches the specified 75 * value enter an interruptable sleep for up to <timeout> microseconds. 76 * If the contents does not match then return immediately. 77 * 78 * Returns 0 if we slept and were woken up, -1 and EWOULDBLOCK if we slept 79 * and timed out, and EBUSY if the contents of the pointer already does 80 * not match the specified value. A timeout of 0 indicates an unlimited sleep. 81 * EINTR is returned if the call was interrupted by a signal (even if 82 * the signal specifies that the system call should restart). 83 * 84 * This function interlocks against call to umtx_wakeup. It does NOT interlock 85 * against changes in *ptr. However, it does not have to. The standard use 86 * of *ptr is to differentiate between an uncontested and a contested mutex 87 * and call umtx_wakeup when releasing a contested mutex. Therefore we can 88 * safely race against changes in *ptr as long as we are properly interlocked 89 * against the umtx_wakeup() call. 90 * 91 * The VM page associated with the mutex is held in an attempt to keep 92 * the mutex's physical address consistent, allowing umtx_sleep() and 93 * umtx_wakeup() to use the physical address as their rendezvous. BUT 94 * situations can arise where the physical address may change, particularly 95 * if a threaded program fork()'s and the mutex's memory becomes 96 * copy-on-write. We register an event on the VM page to catch COWs. 97 * 98 * umtx_sleep { const int *ptr, int value, int timeout } 99 */ 100 int 101 sys_umtx_sleep(struct umtx_sleep_args *uap) 102 { 103 struct lwbuf lwb_cache; 104 struct lwbuf *lwb; 105 struct vm_page_action action; 106 vm_page_t m; 107 void *waddr; 108 int offset; 109 int timeout; 110 int error = EBUSY; 111 112 if (uap->timeout < 0) 113 return (EINVAL); 114 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1)) 115 return (EFAULT); 116 117 /* 118 * When faulting in the page, force any COW pages to be resolved. 119 * Otherwise the physical page we sleep on my not match the page 120 * being woken up. 121 */ 122 lwkt_gettoken(&vm_token); 123 m = vm_fault_page_quick((vm_offset_t)uap->ptr, 124 VM_PROT_READ|VM_PROT_WRITE, &error); 125 if (m == NULL) { 126 error = EFAULT; 127 goto done; 128 } 129 lwb = lwbuf_alloc(m, &lwb_cache); 130 offset = (vm_offset_t)uap->ptr & PAGE_MASK; 131 132 /* 133 * The critical section is required to interlock the tsleep against 134 * a wakeup from another cpu. The lfence forces synchronization. 135 */ 136 if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) { 137 if ((timeout = uap->timeout) != 0) { 138 timeout = (timeout / 1000000) * hz + 139 ((timeout % 1000000) * hz + 999999) / 1000000; 140 } 141 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset); 142 crit_enter(); 143 tsleep_interlock(waddr, PCATCH | PDOMAIN_UMTX); 144 if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) { 145 vm_page_init_action(m, &action, umtx_sleep_page_action_cow, waddr); 146 vm_page_register_action(&action, VMEVENT_COW); 147 if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) { 148 error = tsleep(waddr, PCATCH | PINTERLOCKED | PDOMAIN_UMTX, 149 "umtxsl", timeout); 150 } else { 151 error = EBUSY; 152 } 153 vm_page_unregister_action(&action); 154 } else { 155 error = EBUSY; 156 } 157 crit_exit(); 158 /* Always break out in case of signal, even if restartable */ 159 if (error == ERESTART) 160 error = EINTR; 161 } else { 162 error = EBUSY; 163 } 164 165 lwbuf_free(lwb); 166 /*vm_page_dirty(m); we don't actually dirty the page */ 167 vm_page_unhold(m); 168 done: 169 lwkt_reltoken(&vm_token); 170 return(error); 171 } 172 173 /* 174 * If this page is being copied it may no longer represent the page 175 * underlying our virtual address. Wake up any umtx_sleep()'s 176 * that were waiting on its physical address to force them to retry. 177 */ 178 static void 179 umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action) 180 { 181 lwkt_gettoken(&vm_token); 182 wakeup_domain(action->data, PDOMAIN_UMTX); 183 lwkt_reltoken(&vm_token); 184 } 185 186 /* 187 * umtx_wakeup { const int *ptr, int count } 188 * 189 * Wakeup the specified number of processes held in umtx_sleep() on the 190 * specified user address. A count of 0 wakes up all waiting processes. 191 * 192 * XXX assumes that the physical address space does not exceed the virtual 193 * address space. 194 */ 195 int 196 sys_umtx_wakeup(struct umtx_wakeup_args *uap) 197 { 198 vm_page_t m; 199 int offset; 200 int error; 201 void *waddr; 202 203 cpu_mfence(); 204 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1)) 205 return (EFAULT); 206 lwkt_gettoken(&vm_token); 207 m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ, &error); 208 if (m == NULL) { 209 error = EFAULT; 210 goto done; 211 } 212 offset = (vm_offset_t)uap->ptr & PAGE_MASK; 213 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset); 214 215 if (uap->count == 1) { 216 wakeup_domain_one(waddr, PDOMAIN_UMTX); 217 } else { 218 /* XXX wakes them all up for now */ 219 wakeup_domain(waddr, PDOMAIN_UMTX); 220 } 221 vm_page_unhold(m); 222 error = 0; 223 done: 224 lwkt_reltoken(&vm_token); 225 return(error); 226 } 227 228