xref: /dflybsd-src/sys/kern/lwkt_token.c (revision 0402ebbc7d4b6f34d02791995169d25c4aec3b15)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/lwkt_token.c,v 1.13 2005/04/18 01:03:28 dillon Exp $
35  */
36 
37 #ifdef _KERNEL
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/rtprio.h>
44 #include <sys/queue.h>
45 #include <sys/thread2.h>
46 #include <sys/sysctl.h>
47 #include <sys/kthread.h>
48 #include <machine/cpu.h>
49 #include <sys/lock.h>
50 #include <sys/caps.h>
51 
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_zone.h>
61 
62 #include <machine/stdarg.h>
63 #include <machine/ipl.h>
64 #include <machine/smp.h>
65 
66 #define THREAD_STACK	(UPAGES * PAGE_SIZE)
67 
68 #else
69 
70 #include <sys/stdint.h>
71 #include <libcaps/thread.h>
72 #include <sys/thread.h>
73 #include <sys/msgport.h>
74 #include <sys/errno.h>
75 #include <libcaps/globaldata.h>
76 #include <machine/cpufunc.h>
77 #include <sys/thread2.h>
78 #include <sys/msgport2.h>
79 #include <stdio.h>
80 #include <stdlib.h>
81 #include <string.h>
82 #include <machine/lock.h>
83 #include <machine/cpu.h>
84 
85 #endif
86 
87 #define	MAKE_TOKENS_SPIN
88 /* #define MAKE_TOKENS_YIELD */
89 
90 #ifndef LWKT_NUM_POOL_TOKENS
91 #define LWKT_NUM_POOL_TOKENS	1024	/* power of 2 */
92 #endif
93 #define LWKT_MASK_POOL_TOKENS	(LWKT_NUM_POOL_TOKENS - 1)
94 
95 #ifdef INVARIANTS
96 static int token_debug = 0;
97 #endif
98 
99 static void lwkt_reqtoken_remote(void *data);
100 
101 static lwkt_token	pool_tokens[LWKT_NUM_POOL_TOKENS];
102 
103 #ifdef _KERNEL
104 
105 #ifdef INVARIANTS
106 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
107 #endif
108 
109 #endif
110 
111 #ifdef SMP
112 
113 /*
114  * Determine if we own all the tokens in the token reference list.
115  * Return 1 on success, 0 on failure.
116  *
117  * As a side effect, queue requests for tokens we want which are owned
118  * by other cpus.  The magic number is used to communicate when the
119  * target cpu has processed the request.  Note, however, that the
120  * target cpu may not be able to assign the token to us which is why
121  * the scheduler must spin.
122  */
123 int
124 lwkt_chktokens(thread_t td)
125 {
126     globaldata_t gd = td->td_gd;	/* mycpu */
127     lwkt_tokref_t refs;
128     globaldata_t dgd;
129     lwkt_token_t tok;
130     int r = 1;
131 
132     for (refs = td->td_toks; refs; refs = refs->tr_next) {
133 	tok = refs->tr_tok;
134 	if ((dgd = tok->t_cpu) != gd) {
135 	    cpu_mb1();
136 	    r = 0;
137 
138 	    /*
139 	     * Queue a request to the target cpu, exit the loop early if
140 	     * we are unable to queue the IPI message.  The magic number
141 	     * flags whether we have a pending ipi request queued or not.
142 	     * It can be set from MAGIC2 to MAGIC1 by a remote cpu but can
143 	     * only be set from MAGIC1 to MAGIC2 by our cpu.
144 	     */
145 	    if (refs->tr_magic == LWKT_TOKREF_MAGIC1) {
146 		refs->tr_magic = LWKT_TOKREF_MAGIC2;	/* MP synched slowreq*/
147 		refs->tr_reqgd = gd;
148 		tok->t_reqcpu = gd;	/* MP unsynchronized 'fast' req */
149 		if (lwkt_send_ipiq_nowait(dgd, lwkt_reqtoken_remote, refs)) {
150 		    /* failed */
151 		    refs->tr_magic = LWKT_TOKREF_MAGIC1;
152 		    break;
153 		}
154 	    }
155 	}
156     }
157     return(r);
158 }
159 
160 #endif
161 
162 /*
163  * Check if we already own the token.  Return 1 on success, 0 on failure.
164  */
165 int
166 lwkt_havetoken(lwkt_token_t tok)
167 {
168     globaldata_t gd = mycpu;
169     thread_t td = gd->gd_curthread;
170     lwkt_tokref_t ref;
171 
172     for (ref = td->td_toks; ref; ref = ref->tr_next) {
173         if (ref->tr_tok == tok)
174             return(1);
175     }
176     return(0);
177 }
178 
179 int
180 lwkt_havetokref(lwkt_tokref_t xref)
181 {
182     globaldata_t gd = mycpu;
183     thread_t td = gd->gd_curthread;
184     lwkt_tokref_t ref;
185 
186     for (ref = td->td_toks; ref; ref = ref->tr_next) {
187         if (ref == xref)
188             return(1);
189     }
190     return(0);
191 }
192 
193 #ifdef SMP
194 
195 /*
196  * Returns 1 if it is ok to give a token away, 0 if it is not.
197  */
198 static int
199 lwkt_oktogiveaway_token(lwkt_token_t tok)
200 {
201     globaldata_t gd = mycpu;
202     lwkt_tokref_t ref;
203     thread_t td;
204 
205     for (td = gd->gd_curthread; td; td = td->td_preempted) {
206 	for (ref = td->td_toks; ref; ref = ref->tr_next) {
207 	    if (ref->tr_tok == tok)
208 		return(0);
209 	}
210     }
211     return(1);
212 }
213 
214 #endif
215 
216 /*
217  * Acquire a serializing token
218  */
219 
220 static __inline
221 void
222 _lwkt_gettokref(lwkt_tokref_t ref)
223 {
224     lwkt_token_t tok;
225     globaldata_t gd;
226     thread_t td;
227 
228     gd = mycpu;			/* our cpu */
229     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
230     td = gd->gd_curthread;	/* our thread */
231 
232     /*
233      * Link the request into our thread's list.  This interlocks against
234      * remote requests from other cpus and prevents the token from being
235      * given away if our cpu already owns it.  This also allows us to
236      * avoid using a critical section.
237      */
238     ref->tr_next = td->td_toks;
239     cpu_mb1();		/* order memory / we can be interrupted */
240     td->td_toks = ref;
241 
242     /*
243      * If our cpu does not own the token then let the scheduler deal with
244      * it.  We are guarenteed to own the tokens on our thread's token
245      * list when we are switched back in.
246      *
247      * Otherwise make sure the token is not held by a thread we are
248      * preempting.  If it is, let the scheduler deal with it.
249      */
250     tok = ref->tr_tok;
251 #ifdef SMP
252     if (tok->t_cpu != gd) {
253 	/*
254 	 * Temporarily operate on tokens synchronously.  We have to fix
255 	 * a number of interlocks and especially the softupdates code to
256 	 * be able to properly yield.  ZZZ
257 	 */
258 #if defined(MAKE_TOKENS_SPIN)
259 	int x = 40000000;
260 	int y = 10;
261 	crit_enter();
262 	while (lwkt_chktokens(td) == 0) {
263 	    lwkt_process_ipiq();
264 	    lwkt_drain_token_requests();
265 	    if (--x == 0) {
266 		x = 40000000;
267 		printf("CHKTOKEN looping on cpu %d\n", gd->gd_cpuid);
268 #ifdef _KERNEL
269 		if (--y == 0)
270 			panic("CHKTOKEN looping on cpu %d", gd->gd_cpuid);
271 #endif
272 	    }
273 	    splz();
274 	}
275 	crit_exit();
276 #elif defined(MAKE_TOKENS_YIELD)
277 	lwkt_yield();
278 #else
279 #error MAKE_TOKENS_XXX ?
280 #endif
281 	KKASSERT(tok->t_cpu == gd);
282     } else /* NOTE CONDITIONAL */
283 #endif
284     if (td->td_preempted) {
285 	while ((td = td->td_preempted) != NULL) {
286 	    lwkt_tokref_t scan;
287 	    for (scan = td->td_toks; scan; scan = scan->tr_next) {
288 		if (scan->tr_tok == tok) {
289 		    lwkt_yield();
290 		    KKASSERT(tok->t_cpu == gd);
291 		    goto breakout;
292 		}
293 	    }
294 	}
295 breakout: ;
296     }
297     /* 'td' variable no longer valid due to preempt loop above */
298 }
299 
300 
301 /*
302  * Attempt to acquire a serializing token
303  */
304 static __inline
305 int
306 _lwkt_trytokref(lwkt_tokref_t ref)
307 {
308     lwkt_token_t tok;
309     globaldata_t gd;
310     thread_t td;
311 
312     gd = mycpu;			/* our cpu */
313     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1);
314     td = gd->gd_curthread;	/* our thread */
315 
316     /*
317      * Link the request into our thread's list.  This interlocks against
318      * remote requests from other cpus and prevents the token from being
319      * given away if our cpu already owns it.  This also allows us to
320      * avoid using a critical section.
321      */
322     ref->tr_next = td->td_toks;
323     cpu_mb1();		/* order memory / we can be interrupted */
324     td->td_toks = ref;
325 
326     /*
327      * If our cpu does not own the token then stop now.
328      *
329      * Otherwise make sure the token is not held by a thread we are
330      * preempting.  If it is, stop.
331      */
332     tok = ref->tr_tok;
333 #ifdef SMP
334     if (tok->t_cpu != gd) {
335 	td->td_toks = ref->tr_next;	/* remove ref */
336 	return(0);
337     } else /* NOTE CONDITIONAL */
338 #endif
339     if (td->td_preempted) {
340 	while ((td = td->td_preempted) != NULL) {
341 	    lwkt_tokref_t scan;
342 	    for (scan = td->td_toks; scan; scan = scan->tr_next) {
343 		if (scan->tr_tok == tok) {
344 		    td = gd->gd_curthread;	/* our thread */
345 		    td->td_toks = ref->tr_next;	/* remove ref */
346 		    return(0);
347 		}
348 	    }
349 	}
350     }
351     /* 'td' variable no longer valid */
352     return(1);
353 }
354 
355 void
356 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok)
357 {
358     lwkt_tokref_init(ref, tok);
359     _lwkt_gettokref(ref);
360 }
361 
362 void
363 lwkt_gettokref(lwkt_tokref_t ref)
364 {
365     _lwkt_gettokref(ref);
366 }
367 
368 int
369 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok)
370 {
371     lwkt_tokref_init(ref, tok);
372     return(_lwkt_trytokref(ref));
373 }
374 
375 int
376 lwkt_trytokref(lwkt_tokref_t ref)
377 {
378     return(_lwkt_trytokref(ref));
379 }
380 
381 /*
382  * Release a serializing token
383  */
384 void
385 lwkt_reltoken(lwkt_tokref *_ref)
386 {
387     lwkt_tokref *ref;
388     lwkt_tokref **pref;
389     lwkt_token_t tok;
390     globaldata_t gd;
391     thread_t td;
392 
393     /*
394      * Guard check and stack check (if in the same stack page).  We must
395      * also wait for any action pending on remote cpus which we do by
396      * checking the magic number and yielding in a loop.
397      */
398     ref = _ref;
399 #ifdef INVARIANTS
400     if ((((intptr_t)ref ^ (intptr_t)&_ref) & ~(intptr_t)PAGE_MASK) == 0)
401 	KKASSERT((char *)ref > (char *)&_ref);
402     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1 ||
403 	     ref->tr_magic == LWKT_TOKREF_MAGIC2);
404 #endif
405     /*
406      * Locate and unlink the token.  Interlock with the token's cpureq
407      * to give the token away before we release it from our thread list,
408      * which allows us to avoid using a critical section.
409      */
410     gd = mycpu;
411     td = gd->gd_curthread;
412     for (pref = &td->td_toks; (ref = *pref) != _ref; pref = &ref->tr_next) {
413 	KKASSERT(ref != NULL);
414     }
415     tok = ref->tr_tok;
416     KKASSERT(tok->t_cpu == gd);
417     tok->t_cpu = tok->t_reqcpu;	/* we do not own 'tok' after this */
418     *pref = ref->tr_next;	/* note: also removes giveaway interlock */
419 
420     /*
421      * If we had gotten the token opportunistically and it still happens to
422      * be queued to a target cpu, we have to wait for the target cpu
423      * to finish processing it.  This does not happen very often and does
424      * not need to be optimal.
425      */
426     while (ref->tr_magic == LWKT_TOKREF_MAGIC2) {
427 #if defined(MAKE_TOKENS_SPIN)
428 	crit_enter();
429 #ifdef SMP
430 	lwkt_process_ipiq();
431 #endif
432 	splz();
433 	crit_exit();
434 #elif defined(MAKE_TOKENS_YIELD)
435 	lwkt_yield();
436 #else
437 #error MAKE_TOKENS_XXX ?
438 #endif
439     }
440 }
441 
442 /*
443  * Pool tokens are used to provide a type-stable serializing token
444  * pointer that does not race against disappearing data structures.
445  *
446  * This routine is called in early boot just after we setup the BSP's
447  * globaldata structure.
448  */
449 void
450 lwkt_token_pool_init(void)
451 {
452     int i;
453 
454     for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
455 	lwkt_token_init(&pool_tokens[i]);
456 }
457 
458 lwkt_token_t
459 lwkt_token_pool_get(void *ptraddr)
460 {
461     int i;
462 
463     i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12);
464     return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
465 }
466 
467 #ifdef SMP
468 
469 /*
470  * This is the receiving side of a remote IPI requesting a token.  If we
471  * cannot immediately hand the token off to another cpu we queue it.
472  *
473  * NOTE!  we 'own' the ref structure, but we only 'own' the token if
474  * t_cpu == mycpu.
475  */
476 static void
477 lwkt_reqtoken_remote(void *data)
478 {
479     lwkt_tokref_t ref = data;
480     globaldata_t gd = mycpu;
481     lwkt_token_t tok = ref->tr_tok;
482 
483     /*
484      * We do not have to queue the token if we can give it away
485      * immediately.  Otherwise we queue it to our globaldata structure.
486      */
487     KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
488     if (lwkt_oktogiveaway_token(tok)) {
489 	if (tok->t_cpu == gd)
490 	    tok->t_cpu = ref->tr_reqgd;
491 	cpu_mb1();
492 	ref->tr_magic = LWKT_TOKREF_MAGIC1;
493     } else {
494 	ref->tr_gdreqnext = gd->gd_tokreqbase;
495 	gd->gd_tokreqbase = ref;
496     }
497 }
498 
499 /*
500  * Must be called from a critical section.  Satisfy all remote token
501  * requests that are pending on our globaldata structure.  The request
502  * does not have to be satisfied with a successful change of ownership
503  * but we do have to acknowledge that we have completed processing the
504  * request by setting the magic number back to MAGIC1.
505  *
506  * NOTE!  we 'own' the ref structure, but we only 'own' the token if
507  * t_cpu == mycpu.
508  */
509 void
510 lwkt_drain_token_requests(void)
511 {
512     globaldata_t gd = mycpu;
513     lwkt_tokref_t ref;
514 
515     while ((ref = gd->gd_tokreqbase) != NULL) {
516 	gd->gd_tokreqbase = ref->tr_gdreqnext;
517 	KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2);
518 	if (ref->tr_tok->t_cpu == gd)
519 	    ref->tr_tok->t_cpu = ref->tr_reqgd;
520 	cpu_mb1();
521 	ref->tr_magic = LWKT_TOKREF_MAGIC1;
522     }
523 }
524 
525 #endif
526 
527 /*
528  * Initialize the owner and release-to cpu to the current cpu
529  * and reset the generation count.
530  */
531 void
532 lwkt_token_init(lwkt_token_t tok)
533 {
534     tok->t_cpu = tok->t_reqcpu = mycpu;
535 }
536 
537 void
538 lwkt_token_uninit(lwkt_token_t tok)
539 {
540     /* empty */
541 }
542