Lines Matching defs:td

148 static void thread_unsuspend_one(struct thread *td, struct proc *p,
150 static void thread_free_batched(struct thread *td);
306 tidbatch_add(struct tidbatch *tb, struct thread *td)
311 tb->tab[tb->n] = td->td_tid;
353 tdcountbatch_add(struct tdcountbatch *tb, struct thread *td __unused)
384 struct thread *td;
386 td = (struct thread *)mem;
387 TD_SET_STATE(td, TDS_INACTIVE);
388 td->td_lastcpu = td->td_oncpu = NOCPU;
395 td->td_critnest = 1;
396 td->td_lend_user_pri = PRI_MAX;
398 audit_thread_alloc(td);
401 kdtrace_thread_ctor(td);
403 umtx_thread_alloc(td);
404 MPASS(td->td_sel == NULL);
414 struct thread *td;
416 td = (struct thread *)mem;
420 switch (TD_GET_STATE(td)) {
439 audit_thread_free(td);
442 kdtrace_thread_dtor(td);
445 osd_thread_exit(td);
446 ast_kclear(td);
447 seltdfini(td);
456 struct thread *td;
458 td = (struct thread *)mem;
460 td->td_allocdomain = vm_phys_domain(vtophys(td));
461 td->td_sleepqueue = sleepq_alloc();
462 td->td_turnstile = turnstile_alloc();
463 EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
464 umtx_thread_init(td);
465 td->td_kstack = 0;
466 td->td_sel = NULL;
476 struct thread *td;
478 td = (struct thread *)mem;
479 EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
480 turnstile_free(td->td_turnstile);
481 sleepq_free(td->td_sleepqueue);
482 umtx_thread_fini(td);
483 MPASS(td->td_sel == NULL);
495 proc_linkup0(struct proc *p, struct thread *td)
498 proc_linkup(p, td);
502 proc_linkup(struct proc *p, struct thread *td)
513 thread_link(td, p);
517 ast_suspend(struct thread *td, int tda __unused)
521 p = td->td_proc;
597 thread_zombie(struct thread *td)
602 tdd = &thread_domain_data[td->td_allocdomain];
605 td->td_zombie = ztd;
607 (uintptr_t *)&ztd, (uintptr_t)td))
617 thread_stash(struct thread *td)
619 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
620 thread_zombie(td);
781 struct thread *td;
789 td = uma_zalloc(thread_zone, M_WAITOK);
790 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
791 if (!vm_thread_new(td, pages)) {
792 uma_zfree(thread_zone, td);
797 td->td_tid = tid;
798 bzero(&td->td_sa.args, sizeof(td->td_sa.args));
799 kasan_thread_alloc(td);
800 kmsan_thread_alloc(td);
801 cpu_thread_alloc(td);
802 EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
803 return (td);
807 thread_recycle(struct thread *td, int pages)
809 if (td->td_kstack == 0 || td->td_kstack_pages != pages) {
810 if (td->td_kstack != 0)
811 vm_thread_dispose(td);
812 if (!vm_thread_new(td, pages))
814 cpu_thread_alloc(td);
816 kasan_thread_alloc(td);
817 kmsan_thread_alloc(td);
825 thread_free_batched(struct thread *td)
828 lock_profile_thread_exit(td);
829 if (td->td_cpuset)
830 cpuset_rel(td->td_cpuset);
831 td->td_cpuset = NULL;
832 cpu_thread_free(td);
833 if (td->td_kstack != 0)
834 vm_thread_dispose(td);
835 callout_drain(&td->td_slpcallout);
839 td->td_tid = -1;
840 kmsan_thread_free(td);
841 uma_zfree(thread_zone, td);
845 thread_free(struct thread *td)
849 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
850 tid = td->td_tid;
851 thread_free_batched(td);
868 thread_cow_get(struct thread *newtd, struct thread *td)
871 MPASS(td->td_realucred == td->td_ucred);
872 newtd->td_realucred = crcowget(td->td_realucred);
874 newtd->td_limit = lim_hold(td->td_limit);
875 newtd->td_cowgen = td->td_cowgen;
879 thread_cow_free(struct thread *td)
882 if (td->td_realucred != NULL)
883 crcowfree(td);
884 if (td->td_limit != NULL)
885 lim_free(td->td_limit);
889 thread_cow_update(struct thread *td)
895 p = td->td_proc;
899 td->td_cowgen = p->p_cowgen;
908 thread_cow_synced(struct thread *td)
912 p = td->td_proc;
914 MPASS(td->td_cowgen != p->p_cowgen);
915 MPASS(td->td_ucred == p->p_ucred);
916 MPASS(td->td_limit == p->p_limit);
917 td->td_cowgen = p->p_cowgen;
933 struct thread *td;
937 td = curthread;
938 p = td->td_proc;
945 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
946 (long)p->p_pid, td->td_name);
948 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
949 MPASS(td->td_realucred == td->td_ucred);
956 cpu_thread_exit(td);
968 atomic_add_int(&td->td_proc->p_exitthreads, 1);
969 thread_unlink(td);
971 sched_exit_thread(td2, td);
986 PCPU_SET(deadthread, td);
999 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
1000 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1001 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
1003 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
1007 thread_lock(td);
1013 td->td_runtime += runtime;
1014 td->td_incruntime += runtime;
1020 td->td_ru.ru_nvcsw++;
1021 ruxagg_locked(p, td);
1022 rucollect(&p->p_ru, &td->td_ru);
1025 TD_SET_STATE(td, TDS_INACTIVE);
1027 witness_thread_exit(td);
1029 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
1030 sched_throw(td);
1042 struct thread *td;
1047 td = FIRST_THREAD_IN_PROC(p);
1049 thread_lock(td);
1050 thread_unlock(td);
1051 lock_profile_thread_exit(td);
1052 cpuset_rel(td->td_cpuset);
1053 td->td_cpuset = NULL;
1054 cpu_thread_clean(td);
1055 thread_cow_free(td);
1056 callout_drain(&td->td_slpcallout);
1066 thread_link(struct thread *td, struct proc *p)
1074 TD_SET_STATE(td, TDS_INACTIVE);
1075 td->td_proc = p;
1076 td->td_flags = TDF_INMEM;
1078 LIST_INIT(&td->td_contested);
1079 LIST_INIT(&td->td_lprof[0]);
1080 LIST_INIT(&td->td_lprof[1]);
1082 SLIST_INIT(&td->td_epochs);
1084 sigqueue_init(&td->td_sigqueue, p);
1085 callout_init(&td->td_slpcallout, 1);
1086 TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
1095 thread_unlink(struct thread *td)
1097 struct proc *p = td->td_proc;
1101 MPASS(SLIST_EMPTY(&td->td_epochs));
1104 TAILQ_REMOVE(&p->p_threads, td, td_plist);
1215 struct thread *td;
1219 td = curthread;
1226 * adjusted to also account for td->td_proc != p. For now
1229 KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
1230 (mode != SINGLE_ALLPROC && td->td_proc == p),
1231 ("mode %d proc %p curproc %p", mode, p, td->td_proc));
1250 if (p->p_singlethread != NULL && p->p_singlethread != td)
1267 p->p_singlethread = td;
1273 if (td2 == td)
1300 thread_suspend_switch(td, p);
1319 sched_relinquish(td);
1337 if (td2 == td)
1341 ("td %p not on boundary", td2));
1343 ("td %p is not suspended", td2));
1355 struct thread *td;
1357 td = curthread;
1358 p = td->td_proc;
1361 (td->td_dbgflags & TDB_SUSPEND) != 0));
1400 struct thread *td;
1403 td = curthread;
1404 p = td->td_proc;
1417 if (p->p_singlethread == td)
1431 if ((td->td_flags & TDF_SBDRY) != 0) {
1434 KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
1437 return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
1445 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1453 (p->p_sysent->sv_thread_detach)(td);
1454 umtx_thread_exit(td);
1455 kern_thr_exit(td);
1469 thread_lock(td);
1474 thread_suspend_one(td);
1477 td->td_flags |= TDF_BOUNDARY;
1506 thread_check_susp(struct thread *td, bool sleep)
1515 if (!td_ast_pending(td, TDA_SUSPEND))
1518 p = td->td_proc;
1523 ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
1530 thread_suspend_switch(struct thread *td, struct proc *p)
1533 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1540 if (p == td->td_proc) {
1545 thread_lock(td);
1546 ast_unsched_locked(td, TDA_SUSPEND);
1547 TD_SET_SUSPENDED(td);
1548 sched_sleep(td, 0);
1558 thread_suspend_one(struct thread *td)
1562 p = td->td_proc;
1564 THREAD_LOCK_ASSERT(td, MA_OWNED);
1565 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1567 ast_unsched_locked(td, TDA_SUSPEND);
1568 TD_SET_SUSPENDED(td);
1569 sched_sleep(td, 0);
1573 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1576 THREAD_LOCK_ASSERT(td, MA_OWNED);
1577 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1578 TD_CLR_SUSPENDED(td);
1579 td->td_flags &= ~TDF_ALLPROCSUSP;
1580 if (td->td_proc == p) {
1583 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1584 td->td_flags &= ~TDF_BOUNDARY;
1588 setrunnable(td, 0);
1592 thread_run_flash(struct thread *td)
1596 p = td->td_proc;
1599 if (TD_ON_SLEEPQ(td))
1600 sleepq_remove_nested(td);
1602 thread_lock(td);
1604 THREAD_LOCK_ASSERT(td, MA_OWNED);
1605 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1607 TD_CLR_SUSPENDED(td);
1612 setrunnable(td, 0);
1621 struct thread *td;
1626 FOREACH_THREAD_IN_PROC(p, td) {
1627 thread_lock(td);
1628 if (TD_IS_SUSPENDED(td))
1629 thread_unsuspend_one(td, p, true);
1631 thread_unlock(td);
1653 struct thread *td;
1680 FOREACH_THREAD_IN_PROC(p, td) {
1681 thread_lock(td);
1682 if (TD_IS_SUSPENDED(td))
1683 thread_unsuspend_one(td, p, true);
1685 thread_unlock(td);
1710 struct thread *td;
1717 LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1718 if (td->td_tid != tid) {
1722 p = td->td_proc;
1724 td = NULL;
1729 LIST_REMOVE(td, td_hash);
1730 LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1731 td, td_hash);
1741 if (td == NULL)
1744 *tdp = td;
1752 struct thread *td;
1754 td = curthread;
1755 if (td->td_tid == tid) {
1756 if (pid != -1 && td->td_proc->p_pid != pid)
1758 PROC_LOCK(td->td_proc);
1759 return (td);
1763 if (!tdfind_hash(tid, pid, &p, &td))
1766 if (td->td_tid != tid) {
1770 if (td->td_proc != p) {
1778 return (td);
1783 tidhash_add(struct thread *td)
1785 rw_wlock(TIDHASHLOCK(td->td_tid));
1786 LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1787 rw_wunlock(TIDHASHLOCK(td->td_tid));
1791 tidhash_remove(struct thread *td)
1794 rw_wlock(TIDHASHLOCK(td->td_tid));
1795 LIST_REMOVE(td, td_hash);
1796 rw_wunlock(TIDHASHLOCK(td->td_tid));