1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/kern_umtx.c,v 1.9 2008/05/09 07:24:45 dillon Exp $ 35 */ 36 37 /* 38 * This module implements userland mutex helper functions. umtx_sleep() 39 * handling blocking and umtx_wakeup() handles wakeups. The sleep/wakeup 40 * functions operate on user addresses. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/sysproto.h> 47 #include <sys/sysunion.h> 48 #include <sys/sysent.h> 49 #include <sys/syscall.h> 50 #include <sys/sfbuf.h> 51 #include <sys/module.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_map.h> 58 #include <vm/vm_object.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 #include <vm/vm_pageout.h> 62 #include <vm/vm_extern.h> 63 #include <vm/vm_page.h> 64 #include <vm/vm_kern.h> 65 66 #include <vm/vm_page2.h> 67 #include <sys/mplock2.h> 68 69 static void umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action); 70 71 /* 72 * If the contents of the userland-supplied pointer matches the specified 73 * value enter an interruptable sleep for up to <timeout> microseconds. 74 * If the contents does not match then return immediately. 75 * 76 * Returns 0 if we slept and were woken up, -1 and EWOULDBLOCK if we slept 77 * and timed out, and EBUSY if the contents of the pointer already does 78 * not match the specified value. A timeout of 0 indicates an unlimited sleep. 79 * EINTR is returned if the call was interrupted by a signal (even if 80 * the signal specifies that the system call should restart). 81 * 82 * This function interlocks against call to umtx_wakeup. It does NOT interlock 83 * against changes in *ptr. However, it does not have to. The standard use 84 * of *ptr is to differentiate between an uncontested and a contested mutex 85 * and call umtx_wakeup when releasing a contested mutex. Therefore we can 86 * safely race against changes in *ptr as long as we are properly interlocked 87 * against the umtx_wakeup() call. 88 * 89 * The VM page associated with the mutex is held in an attempt to keep 90 * the mutex's physical address consistent, allowing umtx_sleep() and 91 * umtx_wakeup() to use the physical address as their rendezvous. BUT 92 * situations can arise where the physical address may change, particularly 93 * if a threaded program fork()'s and the mutex's memory becomes 94 * copy-on-write. We register an event on the VM page to catch COWs. 95 * 96 * umtx_sleep { const int *ptr, int value, int timeout } 97 * 98 * MPALMOSTSAFE 99 */ 100 int 101 sys_umtx_sleep(struct umtx_sleep_args *uap) 102 { 103 int error = EBUSY; 104 struct sf_buf *sf; 105 struct vm_page_action action; 106 vm_page_t m; 107 void *waddr; 108 int offset; 109 int timeout; 110 111 if (uap->timeout < 0) 112 return (EINVAL); 113 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1)) 114 return (EFAULT); 115 116 /* 117 * When faulting in the page, force any COW pages to be resolved. 118 * Otherwise the physical page we sleep on my not match the page 119 * being woken up. 120 */ 121 get_mplock(); 122 m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ|VM_PROT_WRITE, &error); 123 if (m == NULL) { 124 error = EFAULT; 125 goto done; 126 } 127 sf = sf_buf_alloc(m, SFB_CPUPRIVATE); 128 offset = (vm_offset_t)uap->ptr & PAGE_MASK; 129 130 /* 131 * The critical section is required to interlock the tsleep against 132 * a wakeup from another cpu. The lfence forces synchronization. 133 */ 134 if (*(int *)(sf_buf_kva(sf) + offset) == uap->value) { 135 if ((timeout = uap->timeout) != 0) { 136 timeout = (timeout / 1000000) * hz + 137 ((timeout % 1000000) * hz + 999999) / 1000000; 138 } 139 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset); 140 crit_enter(); 141 tsleep_interlock(waddr, PCATCH | PDOMAIN_UMTX); 142 if (*(int *)(sf_buf_kva(sf) + offset) == uap->value) { 143 vm_page_init_action(&action, umtx_sleep_page_action_cow, waddr); 144 vm_page_register_action(m, &action, VMEVENT_COW); 145 error = tsleep(waddr, PCATCH | PINTERLOCKED | PDOMAIN_UMTX, 146 "umtxsl", timeout); 147 vm_page_unregister_action(m, &action); 148 } else { 149 error = EBUSY; 150 } 151 crit_exit(); 152 /* Always break out in case of signal, even if restartable */ 153 if (error == ERESTART) 154 error = EINTR; 155 } else { 156 error = EBUSY; 157 } 158 159 sf_buf_free(sf); 160 /*vm_page_dirty(m); we don't actually dirty the page */ 161 vm_page_unhold(m); 162 done: 163 rel_mplock(); 164 return(error); 165 } 166 167 /* 168 * If this page is being copied it may no longer represent the page 169 * underlying our virtual address. Wake up any umtx_sleep()'s 170 * that were waiting on its physical address to force them to retry. 171 */ 172 static void 173 umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action) 174 { 175 wakeup_domain(action->data, PDOMAIN_UMTX); 176 } 177 178 /* 179 * umtx_wakeup { const int *ptr, int count } 180 * 181 * Wakeup the specified number of processes held in umtx_sleep() on the 182 * specified user address. A count of 0 wakes up all waiting processes. 183 * 184 * XXX assumes that the physical address space does not exceed the virtual 185 * address space. 186 * 187 * MPALMOSTSAFE 188 */ 189 int 190 sys_umtx_wakeup(struct umtx_wakeup_args *uap) 191 { 192 vm_page_t m; 193 int offset; 194 int error; 195 void *waddr; 196 197 cpu_mfence(); 198 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1)) 199 return (EFAULT); 200 get_mplock(); 201 m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ, &error); 202 if (m == NULL) { 203 error = EFAULT; 204 goto done; 205 } 206 offset = (vm_offset_t)uap->ptr & PAGE_MASK; 207 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset); 208 209 if (uap->count == 1) { 210 wakeup_domain_one(waddr, PDOMAIN_UMTX); 211 } else { 212 /* XXX wakes them all up for now */ 213 wakeup_domain(waddr, PDOMAIN_UMTX); 214 } 215 vm_page_unhold(m); 216 error = 0; 217 done: 218 rel_mplock(); 219 return(error); 220 } 221 222