1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/kern/lwkt_token.c,v 1.15 2005/06/03 23:57:32 dillon Exp $ 35 */ 36 37 #ifdef _KERNEL 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/proc.h> 43 #include <sys/rtprio.h> 44 #include <sys/queue.h> 45 #include <sys/thread2.h> 46 #include <sys/sysctl.h> 47 #include <sys/kthread.h> 48 #include <machine/cpu.h> 49 #include <sys/lock.h> 50 #include <sys/caps.h> 51 52 #include <vm/vm.h> 53 #include <vm/vm_param.h> 54 #include <vm/vm_kern.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_page.h> 57 #include <vm/vm_map.h> 58 #include <vm/vm_pager.h> 59 #include <vm/vm_extern.h> 60 #include <vm/vm_zone.h> 61 62 #include <machine/stdarg.h> 63 #include <machine/ipl.h> 64 #include <machine/smp.h> 65 66 #define THREAD_STACK (UPAGES * PAGE_SIZE) 67 68 #else 69 70 #include <sys/stdint.h> 71 #include <libcaps/thread.h> 72 #include <sys/thread.h> 73 #include <sys/msgport.h> 74 #include <sys/errno.h> 75 #include <libcaps/globaldata.h> 76 #include <machine/cpufunc.h> 77 #include <sys/thread2.h> 78 #include <sys/msgport2.h> 79 #include <stdio.h> 80 #include <stdlib.h> 81 #include <string.h> 82 #include <machine/lock.h> 83 #include <machine/cpu.h> 84 85 #endif 86 87 #define MAKE_TOKENS_SPIN 88 /* #define MAKE_TOKENS_YIELD */ 89 90 #ifndef LWKT_NUM_POOL_TOKENS 91 #define LWKT_NUM_POOL_TOKENS 1024 /* power of 2 */ 92 #endif 93 #define LWKT_MASK_POOL_TOKENS (LWKT_NUM_POOL_TOKENS - 1) 94 95 #ifdef INVARIANTS 96 static int token_debug = 0; 97 #endif 98 99 #ifdef SMP 100 static void lwkt_reqtoken_remote(void *data); 101 #endif 102 103 static lwkt_token pool_tokens[LWKT_NUM_POOL_TOKENS]; 104 105 #ifdef _KERNEL 106 107 #ifdef INVARIANTS 108 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, ""); 109 #endif 110 111 #endif 112 113 #ifdef SMP 114 115 /* 116 * Determine if we own all the tokens in the token reference list. 117 * Return 1 on success, 0 on failure. 118 * 119 * As a side effect, queue requests for tokens we want which are owned 120 * by other cpus. The magic number is used to communicate when the 121 * target cpu has processed the request. Note, however, that the 122 * target cpu may not be able to assign the token to us which is why 123 * the scheduler must spin. 124 */ 125 int 126 lwkt_chktokens(thread_t td) 127 { 128 globaldata_t gd = td->td_gd; /* mycpu */ 129 lwkt_tokref_t refs; 130 globaldata_t dgd; 131 lwkt_token_t tok; 132 int r = 1; 133 134 for (refs = td->td_toks; refs; refs = refs->tr_next) { 135 tok = refs->tr_tok; 136 if ((dgd = tok->t_cpu) != gd) { 137 cpu_ccfence(); /* don't let the compiler reload tok->t_cpu */ 138 r = 0; 139 140 /* 141 * Queue a request to the target cpu, exit the loop early if 142 * we are unable to queue the IPI message. The magic number 143 * flags whether we have a pending ipi request queued or not. 144 * It can be set from MAGIC2 to MAGIC1 by a remote cpu but can 145 * only be set from MAGIC1 to MAGIC2 by our cpu. 146 */ 147 if (refs->tr_magic == LWKT_TOKREF_MAGIC1) { 148 refs->tr_magic = LWKT_TOKREF_MAGIC2; /* MP synched slowreq*/ 149 refs->tr_reqgd = gd; 150 tok->t_reqcpu = gd; /* MP unsynchronized 'fast' req */ 151 if (lwkt_send_ipiq_nowait(dgd, lwkt_reqtoken_remote, refs)) { 152 /* failed */ 153 refs->tr_magic = LWKT_TOKREF_MAGIC1; 154 break; 155 } 156 } 157 } 158 } 159 return(r); 160 } 161 162 #endif 163 164 /* 165 * Check if we already own the token. Return 1 on success, 0 on failure. 166 */ 167 int 168 lwkt_havetoken(lwkt_token_t tok) 169 { 170 globaldata_t gd = mycpu; 171 thread_t td = gd->gd_curthread; 172 lwkt_tokref_t ref; 173 174 for (ref = td->td_toks; ref; ref = ref->tr_next) { 175 if (ref->tr_tok == tok) 176 return(1); 177 } 178 return(0); 179 } 180 181 int 182 lwkt_havetokref(lwkt_tokref_t xref) 183 { 184 globaldata_t gd = mycpu; 185 thread_t td = gd->gd_curthread; 186 lwkt_tokref_t ref; 187 188 for (ref = td->td_toks; ref; ref = ref->tr_next) { 189 if (ref == xref) 190 return(1); 191 } 192 return(0); 193 } 194 195 #ifdef SMP 196 197 /* 198 * Returns 1 if it is ok to give a token away, 0 if it is not. 199 */ 200 static int 201 lwkt_oktogiveaway_token(lwkt_token_t tok) 202 { 203 globaldata_t gd = mycpu; 204 lwkt_tokref_t ref; 205 thread_t td; 206 207 for (td = gd->gd_curthread; td; td = td->td_preempted) { 208 for (ref = td->td_toks; ref; ref = ref->tr_next) { 209 if (ref->tr_tok == tok) 210 return(0); 211 } 212 } 213 return(1); 214 } 215 216 #endif 217 218 /* 219 * Acquire a serializing token 220 */ 221 222 static __inline 223 void 224 _lwkt_gettokref(lwkt_tokref_t ref) 225 { 226 lwkt_token_t tok; 227 globaldata_t gd; 228 thread_t td; 229 230 gd = mycpu; /* our cpu */ 231 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1); 232 td = gd->gd_curthread; /* our thread */ 233 234 /* 235 * Link the request into our thread's list. This interlocks against 236 * remote requests from other cpus and prevents the token from being 237 * given away if our cpu already owns it. This also allows us to 238 * avoid using a critical section. 239 */ 240 ref->tr_next = td->td_toks; 241 cpu_ccfence(); /* prevent compiler reordering */ 242 td->td_toks = ref; 243 244 /* 245 * If our cpu does not own the token then let the scheduler deal with 246 * it. We are guarenteed to own the tokens on our thread's token 247 * list when we are switched back in. 248 * 249 * Otherwise make sure the token is not held by a thread we are 250 * preempting. If it is, let the scheduler deal with it. 251 */ 252 tok = ref->tr_tok; 253 #ifdef SMP 254 if (tok->t_cpu != gd) { 255 /* 256 * Temporarily operate on tokens synchronously. We have to fix 257 * a number of interlocks and especially the softupdates code to 258 * be able to properly yield. ZZZ 259 */ 260 #if defined(MAKE_TOKENS_SPIN) 261 int x = 40000000; 262 int y = 10; 263 crit_enter(); 264 while (lwkt_chktokens(td) == 0) { 265 lwkt_process_ipiq(); 266 lwkt_drain_token_requests(); 267 if (--x == 0) { 268 x = 40000000; 269 printf("CHKTOKEN looping on cpu %d\n", gd->gd_cpuid); 270 #ifdef _KERNEL 271 if (--y == 0) 272 panic("CHKTOKEN looping on cpu %d", gd->gd_cpuid); 273 #endif 274 } 275 splz(); 276 } 277 crit_exit(); 278 #elif defined(MAKE_TOKENS_YIELD) 279 lwkt_yield(); 280 #else 281 #error MAKE_TOKENS_XXX ? 282 #endif 283 KKASSERT(tok->t_cpu == gd); 284 } else /* NOTE CONDITIONAL */ 285 #endif 286 if (td->td_preempted) { 287 while ((td = td->td_preempted) != NULL) { 288 lwkt_tokref_t scan; 289 for (scan = td->td_toks; scan; scan = scan->tr_next) { 290 if (scan->tr_tok == tok) { 291 lwkt_yield(); 292 KKASSERT(tok->t_cpu == gd); 293 goto breakout; 294 } 295 } 296 } 297 breakout: ; 298 } 299 /* 'td' variable no longer valid due to preempt loop above */ 300 } 301 302 303 /* 304 * Attempt to acquire a serializing token 305 */ 306 static __inline 307 int 308 _lwkt_trytokref(lwkt_tokref_t ref) 309 { 310 lwkt_token_t tok; 311 globaldata_t gd; 312 thread_t td; 313 314 gd = mycpu; /* our cpu */ 315 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1); 316 td = gd->gd_curthread; /* our thread */ 317 318 /* 319 * Link the request into our thread's list. This interlocks against 320 * remote requests from other cpus and prevents the token from being 321 * given away if our cpu already owns it. This also allows us to 322 * avoid using a critical section. 323 */ 324 ref->tr_next = td->td_toks; 325 cpu_ccfence(); /* prevent compiler reordering */ 326 td->td_toks = ref; 327 328 /* 329 * If our cpu does not own the token then stop now. 330 * 331 * Otherwise make sure the token is not held by a thread we are 332 * preempting. If it is, stop. 333 */ 334 tok = ref->tr_tok; 335 #ifdef SMP 336 if (tok->t_cpu != gd) { 337 td->td_toks = ref->tr_next; /* remove ref */ 338 return(0); 339 } else /* NOTE CONDITIONAL */ 340 #endif 341 if (td->td_preempted) { 342 while ((td = td->td_preempted) != NULL) { 343 lwkt_tokref_t scan; 344 for (scan = td->td_toks; scan; scan = scan->tr_next) { 345 if (scan->tr_tok == tok) { 346 td = gd->gd_curthread; /* our thread */ 347 td->td_toks = ref->tr_next; /* remove ref */ 348 return(0); 349 } 350 } 351 } 352 } 353 /* 'td' variable no longer valid */ 354 return(1); 355 } 356 357 void 358 lwkt_gettoken(lwkt_tokref_t ref, lwkt_token_t tok) 359 { 360 lwkt_tokref_init(ref, tok); 361 _lwkt_gettokref(ref); 362 } 363 364 void 365 lwkt_gettokref(lwkt_tokref_t ref) 366 { 367 _lwkt_gettokref(ref); 368 } 369 370 int 371 lwkt_trytoken(lwkt_tokref_t ref, lwkt_token_t tok) 372 { 373 lwkt_tokref_init(ref, tok); 374 return(_lwkt_trytokref(ref)); 375 } 376 377 int 378 lwkt_trytokref(lwkt_tokref_t ref) 379 { 380 return(_lwkt_trytokref(ref)); 381 } 382 383 /* 384 * Release a serializing token 385 */ 386 void 387 lwkt_reltoken(lwkt_tokref *_ref) 388 { 389 lwkt_tokref *ref; 390 lwkt_tokref **pref; 391 lwkt_token_t tok; 392 globaldata_t gd; 393 thread_t td; 394 395 /* 396 * Guard check and stack check (if in the same stack page). We must 397 * also wait for any action pending on remote cpus which we do by 398 * checking the magic number and yielding in a loop. 399 */ 400 ref = _ref; 401 #ifdef INVARIANTS 402 if ((((intptr_t)ref ^ (intptr_t)&_ref) & ~(intptr_t)PAGE_MASK) == 0) 403 KKASSERT((char *)ref > (char *)&_ref); 404 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC1 || 405 ref->tr_magic == LWKT_TOKREF_MAGIC2); 406 #endif 407 /* 408 * Locate and unlink the token. Interlock with the token's cpureq 409 * to give the token away before we release it from our thread list, 410 * which allows us to avoid using a critical section. 411 */ 412 gd = mycpu; 413 td = gd->gd_curthread; 414 for (pref = &td->td_toks; (ref = *pref) != _ref; pref = &ref->tr_next) { 415 KKASSERT(ref != NULL); 416 } 417 tok = ref->tr_tok; 418 KKASSERT(tok->t_cpu == gd); 419 tok->t_cpu = tok->t_reqcpu; /* we do not own 'tok' after this */ 420 *pref = ref->tr_next; /* note: also removes giveaway interlock */ 421 422 /* 423 * If we had gotten the token opportunistically and it still happens to 424 * be queued to a target cpu, we have to wait for the target cpu 425 * to finish processing it. This does not happen very often and does 426 * not need to be optimal. 427 */ 428 while (ref->tr_magic == LWKT_TOKREF_MAGIC2) { 429 #if defined(MAKE_TOKENS_SPIN) 430 crit_enter(); 431 #ifdef SMP 432 lwkt_process_ipiq(); 433 #endif 434 splz(); 435 crit_exit(); 436 #elif defined(MAKE_TOKENS_YIELD) 437 lwkt_yield(); 438 #else 439 #error MAKE_TOKENS_XXX ? 440 #endif 441 } 442 } 443 444 /* 445 * Pool tokens are used to provide a type-stable serializing token 446 * pointer that does not race against disappearing data structures. 447 * 448 * This routine is called in early boot just after we setup the BSP's 449 * globaldata structure. 450 */ 451 void 452 lwkt_token_pool_init(void) 453 { 454 int i; 455 456 for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i) 457 lwkt_token_init(&pool_tokens[i]); 458 } 459 460 lwkt_token_t 461 lwkt_token_pool_get(void *ptraddr) 462 { 463 int i; 464 465 i = ((int)(intptr_t)ptraddr >> 2) ^ ((int)(intptr_t)ptraddr >> 12); 466 return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]); 467 } 468 469 #ifdef SMP 470 471 /* 472 * This is the receiving side of a remote IPI requesting a token. If we 473 * cannot immediately hand the token off to another cpu we queue it. 474 * 475 * NOTE! we 'own' the ref structure, but we only 'own' the token if 476 * t_cpu == mycpu. 477 */ 478 static void 479 lwkt_reqtoken_remote(void *data) 480 { 481 lwkt_tokref_t ref = data; 482 globaldata_t gd = mycpu; 483 lwkt_token_t tok = ref->tr_tok; 484 485 /* 486 * We do not have to queue the token if we can give it away 487 * immediately. Otherwise we queue it to our globaldata structure. 488 */ 489 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2); 490 if (lwkt_oktogiveaway_token(tok)) { 491 if (tok->t_cpu == gd) 492 tok->t_cpu = ref->tr_reqgd; 493 cpu_ccfence(); /* prevent compiler reordering */ 494 ref->tr_magic = LWKT_TOKREF_MAGIC1; 495 } else { 496 ref->tr_gdreqnext = gd->gd_tokreqbase; 497 gd->gd_tokreqbase = ref; 498 } 499 } 500 501 /* 502 * Must be called from a critical section. Satisfy all remote token 503 * requests that are pending on our globaldata structure. The request 504 * does not have to be satisfied with a successful change of ownership 505 * but we do have to acknowledge that we have completed processing the 506 * request by setting the magic number back to MAGIC1. 507 * 508 * NOTE! we 'own' the ref structure, but we only 'own' the token if 509 * t_cpu == mycpu. 510 */ 511 void 512 lwkt_drain_token_requests(void) 513 { 514 globaldata_t gd = mycpu; 515 lwkt_tokref_t ref; 516 517 while ((ref = gd->gd_tokreqbase) != NULL) { 518 gd->gd_tokreqbase = ref->tr_gdreqnext; 519 KKASSERT(ref->tr_magic == LWKT_TOKREF_MAGIC2); 520 if (ref->tr_tok->t_cpu == gd) 521 ref->tr_tok->t_cpu = ref->tr_reqgd; 522 cpu_ccfence(); /* prevent compiler reordering */ 523 ref->tr_magic = LWKT_TOKREF_MAGIC1; 524 } 525 } 526 527 #endif 528 529 /* 530 * Initialize the owner and release-to cpu to the current cpu 531 * and reset the generation count. 532 */ 533 void 534 lwkt_token_init(lwkt_token_t tok) 535 { 536 tok->t_cpu = tok->t_reqcpu = mycpu; 537 } 538 539 void 540 lwkt_token_uninit(lwkt_token_t tok) 541 { 542 /* empty */ 543 } 544