xref: /dflybsd-src/sys/kern/kern_umtx.c (revision 0087561d6d4d84b8ac1a312cc720339cbf66781d)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 2003,2004,2010 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 /*
38  * This module implements userland mutex helper functions.  umtx_sleep()
39  * handling blocking and umtx_wakeup() handles wakeups.  The sleep/wakeup
40  * functions operate on user addresses.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysunion.h>
48 #include <sys/sysent.h>
49 #include <sys/syscall.h>
50 #include <sys/module.h>
51 
52 #include <cpu/lwbuf.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <sys/lock.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vm_pageout.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_kern.h>
65 
66 #include <vm/vm_page2.h>
67 
68 #include <machine/vmm.h>
69 
70 static void umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action);
71 
72 /*
73  * If the contents of the userland-supplied pointer matches the specified
74  * value enter an interruptable sleep for up to <timeout> microseconds.
75  * If the contents does not match then return immediately.
76  *
77  * Returns 0 if we slept and were woken up, -1 and EWOULDBLOCK if we slept
78  * and timed out, and EBUSY if the contents of the pointer already does
79  * not match the specified value.  A timeout of 0 indicates an unlimited sleep.
80  * EINTR is returned if the call was interrupted by a signal (even if
81  * the signal specifies that the system call should restart).
82  *
83  * This function interlocks against call to umtx_wakeup.  It does NOT interlock
84  * against changes in *ptr.  However, it does not have to.  The standard use
85  * of *ptr is to differentiate between an uncontested and a contested mutex
86  * and call umtx_wakeup when releasing a contested mutex.  Therefore we can
87  * safely race against changes in *ptr as long as we are properly interlocked
88  * against the umtx_wakeup() call.
89  *
90  * The VM page associated with the mutex is held in an attempt to keep
91  * the mutex's physical address consistent, allowing umtx_sleep() and
92  * umtx_wakeup() to use the physical address as their rendezvous.  BUT
93  * situations can arise where the physical address may change, particularly
94  * if a threaded program fork()'s and the mutex's memory becomes
95  * copy-on-write.  We register an event on the VM page to catch COWs.
96  *
97  * umtx_sleep { const int *ptr, int value, int timeout }
98  */
99 int
100 sys_umtx_sleep(struct umtx_sleep_args *uap)
101 {
102     struct lwbuf lwb_cache;
103     struct lwbuf *lwb;
104     struct vm_page_action action;
105     vm_page_t m;
106     void *waddr;
107     int offset;
108     int timeout;
109     int error = EBUSY;
110 
111     if (uap->timeout < 0)
112 	return (EINVAL);
113 
114     if (curthread->td_vmm) {
115 	register_t gpa;
116 	vmm_vm_get_gpa(curproc, &gpa, (register_t) uap->ptr);
117 	uap->ptr = (const int *)gpa;
118     }
119 
120     if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
121 	return (EFAULT);
122 
123     /*
124      * When faulting in the page, force any COW pages to be resolved.
125      * Otherwise the physical page we sleep on my not match the page
126      * being woken up.
127      */
128     m = vm_fault_page_quick((vm_offset_t)uap->ptr,
129 			    VM_PROT_READ|VM_PROT_WRITE, &error);
130     if (m == NULL) {
131 	error = EFAULT;
132 	goto done;
133     }
134     lwb = lwbuf_alloc(m, &lwb_cache);
135     offset = (vm_offset_t)uap->ptr & PAGE_MASK;
136 
137     /*
138      * The critical section is required to interlock the tsleep against
139      * a wakeup from another cpu.  The lfence forces synchronization.
140      */
141     if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) {
142 	if ((timeout = uap->timeout) != 0) {
143 	    timeout = (timeout / 1000000) * hz +
144 		      ((timeout % 1000000) * hz + 999999) / 1000000;
145 	}
146 	waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
147 
148 	/*
149 	 * Wake us up if the memory location COWs while we are sleeping.
150 	 */
151 	crit_enter();
152 	vm_page_init_action(m, &action, umtx_sleep_page_action_cow, waddr);
153 	vm_page_register_action(&action, VMEVENT_COW);
154 
155 	/*
156 	 * We must interlock just before sleeping.  If we interlock before
157 	 * registration the lock operations done by the registration can
158 	 * interfere with it.
159 	 */
160 	tsleep_interlock(waddr, PCATCH | PDOMAIN_UMTX);
161 	if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value &&
162 	    action.event == VMEVENT_COW) {
163 		error = tsleep(waddr, PCATCH | PINTERLOCKED | PDOMAIN_UMTX,
164 			       "umtxsl", timeout);
165 	} else {
166 		error = EBUSY;
167 	}
168 	vm_page_unregister_action(&action);
169 	crit_exit();
170 	/* Always break out in case of signal, even if restartable */
171 	if (error == ERESTART)
172 		error = EINTR;
173     } else {
174 	error = EBUSY;
175     }
176 
177     lwbuf_free(lwb);
178     /*vm_page_dirty(m); we don't actually dirty the page */
179     vm_page_unhold(m);
180 done:
181     return(error);
182 }
183 
184 /*
185  * If this page is being copied it may no longer represent the page
186  * underlying our virtual address.  Wake up any umtx_sleep()'s
187  * that were waiting on its physical address to force them to retry.
188  */
189 static void
190 umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action)
191 {
192     wakeup_domain(action->data, PDOMAIN_UMTX);
193 }
194 
195 /*
196  * umtx_wakeup { const int *ptr, int count }
197  *
198  * Wakeup the specified number of processes held in umtx_sleep() on the
199  * specified user address.  A count of 0 wakes up all waiting processes.
200  *
201  * XXX assumes that the physical address space does not exceed the virtual
202  * address space.
203  */
204 int
205 sys_umtx_wakeup(struct umtx_wakeup_args *uap)
206 {
207     vm_page_t m;
208     int offset;
209     int error;
210     void *waddr;
211 
212     if (curthread->td_vmm) {
213 	register_t gpa;
214 	vmm_vm_get_gpa(curproc, &gpa, (register_t) uap->ptr);
215 	uap->ptr = (const int *)gpa;
216     }
217 
218     cpu_mfence();
219     if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
220 	return (EFAULT);
221     m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ, &error);
222     if (m == NULL) {
223 	error = EFAULT;
224 	goto done;
225     }
226     offset = (vm_offset_t)uap->ptr & PAGE_MASK;
227     waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
228 
229     if (uap->count == 1) {
230 	wakeup_domain_one(waddr, PDOMAIN_UMTX);
231     } else {
232 	/* XXX wakes them all up for now */
233 	wakeup_domain(waddr, PDOMAIN_UMTX);
234     }
235     vm_page_unhold(m);
236     error = 0;
237 done:
238     return(error);
239 }
240 
241