10e32b8c5SMatthew Dillon /*
20e32b8c5SMatthew Dillon * Copyright (c) 2019 The DragonFly Project. All rights reserved.
30e32b8c5SMatthew Dillon *
40e32b8c5SMatthew Dillon * This code is derived from software contributed to The DragonFly Project
50e32b8c5SMatthew Dillon * by Matthew Dillon <dillon@backplane.com>
60e32b8c5SMatthew Dillon *
70e32b8c5SMatthew Dillon * Redistribution and use in source and binary forms, with or without
80e32b8c5SMatthew Dillon * modification, are permitted provided that the following conditions
90e32b8c5SMatthew Dillon * are met:
100e32b8c5SMatthew Dillon *
110e32b8c5SMatthew Dillon * 1. Redistributions of source code must retain the above copyright
120e32b8c5SMatthew Dillon * notice, this list of conditions and the following disclaimer.
130e32b8c5SMatthew Dillon * 2. Redistributions in binary form must reproduce the above copyright
140e32b8c5SMatthew Dillon * notice, this list of conditions and the following disclaimer in
150e32b8c5SMatthew Dillon * the documentation and/or other materials provided with the
160e32b8c5SMatthew Dillon * distribution.
170e32b8c5SMatthew Dillon * 3. Neither the name of The DragonFly Project nor the names of its
180e32b8c5SMatthew Dillon * contributors may be used to endorse or promote products derived
190e32b8c5SMatthew Dillon * from this software without specific, prior written permission.
200e32b8c5SMatthew Dillon *
210e32b8c5SMatthew Dillon * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
220e32b8c5SMatthew Dillon * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
230e32b8c5SMatthew Dillon * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
240e32b8c5SMatthew Dillon * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
250e32b8c5SMatthew Dillon * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
260e32b8c5SMatthew Dillon * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
270e32b8c5SMatthew Dillon * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
280e32b8c5SMatthew Dillon * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
290e32b8c5SMatthew Dillon * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
300e32b8c5SMatthew Dillon * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
310e32b8c5SMatthew Dillon * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
320e32b8c5SMatthew Dillon * SUCH DAMAGE.
330e32b8c5SMatthew Dillon */
340e32b8c5SMatthew Dillon #include <sys/cdefs.h>
350e32b8c5SMatthew Dillon
360e32b8c5SMatthew Dillon #include <sys/condvar.h>
370e32b8c5SMatthew Dillon #include <sys/queue.h>
380e32b8c5SMatthew Dillon #include <sys/lock.h>
390e32b8c5SMatthew Dillon
400e32b8c5SMatthew Dillon #include <linux/compiler.h>
410e32b8c5SMatthew Dillon
420e32b8c5SMatthew Dillon #include <linux/atomic.h>
430e32b8c5SMatthew Dillon #include <linux/errno.h>
440e32b8c5SMatthew Dillon #include <linux/kref.h>
456559babbSFrançois Tigeot #include <linux/dma-fence.h>
460e32b8c5SMatthew Dillon #include <linux/sched.h>
470e32b8c5SMatthew Dillon #include <linux/slab.h>
480e32b8c5SMatthew Dillon #include <linux/spinlock.h>
490e32b8c5SMatthew Dillon
500e32b8c5SMatthew Dillon /*
510e32b8c5SMatthew Dillon * Called when curthread->td_linux_task is NULL. We must allocated, initialize,
520e32b8c5SMatthew Dillon * and install a task_struct in td (the current thread).
530e32b8c5SMatthew Dillon *
540e32b8c5SMatthew Dillon * All threads belonging to the same process have a common mm_struct which
550e32b8c5SMatthew Dillon * is stored as p->p_linux_mm. This must be allocated, initialized, and
560e32b8c5SMatthew Dillon * and installed if necessary.
570e32b8c5SMatthew Dillon */
580e32b8c5SMatthew Dillon struct task_struct *
linux_task_alloc(struct thread * td)590e32b8c5SMatthew Dillon linux_task_alloc(struct thread *td)
600e32b8c5SMatthew Dillon {
610e32b8c5SMatthew Dillon struct task_struct *task;
620e32b8c5SMatthew Dillon struct mm_struct *mm;
630e32b8c5SMatthew Dillon struct proc *p;
640e32b8c5SMatthew Dillon
650e32b8c5SMatthew Dillon task = kzalloc(sizeof(*task), GFP_KERNEL);
660e32b8c5SMatthew Dillon task->dfly_td = td;
67*3f2dd94aSFrançois Tigeot task->pid = -1;
68*3f2dd94aSFrançois Tigeot spin_init(&task->kt_spin, "tspin2");
690e32b8c5SMatthew Dillon
700e32b8c5SMatthew Dillon if ((p = td->td_proc) != NULL) {
71*3f2dd94aSFrançois Tigeot task->pid = td->td_proc->p_pid;
720e32b8c5SMatthew Dillon if ((mm = p->p_linux_mm) == NULL) {
730e32b8c5SMatthew Dillon mm = kzalloc(sizeof(*mm), GFP_KERNEL);
740e32b8c5SMatthew Dillon mm->refs = 1;
750e32b8c5SMatthew Dillon lockinit(&mm->mmap_sem, "drmmms", 0, LK_CANRECURSE);
760e32b8c5SMatthew Dillon lwkt_gettoken(&p->p_token);
770e32b8c5SMatthew Dillon if (p->p_linux_mm == NULL) {
780e32b8c5SMatthew Dillon p->p_linux_mm = mm;
790e32b8c5SMatthew Dillon } else {
800e32b8c5SMatthew Dillon linux_mm_drop(mm);
810e32b8c5SMatthew Dillon mm = p->p_linux_mm;
820e32b8c5SMatthew Dillon }
830e32b8c5SMatthew Dillon lwkt_reltoken(&p->p_token);
840e32b8c5SMatthew Dillon }
850e32b8c5SMatthew Dillon task->mm = mm;
860e32b8c5SMatthew Dillon atomic_add_long(&mm->refs, 1);
870e32b8c5SMatthew Dillon }
880e32b8c5SMatthew Dillon td->td_linux_task = task;
890e32b8c5SMatthew Dillon
900e32b8c5SMatthew Dillon return task;
910e32b8c5SMatthew Dillon }
920e32b8c5SMatthew Dillon
930e32b8c5SMatthew Dillon /*
940e32b8c5SMatthew Dillon * Called at thread exit
950e32b8c5SMatthew Dillon */
960e32b8c5SMatthew Dillon void
linux_task_drop(struct thread * td)970e32b8c5SMatthew Dillon linux_task_drop(struct thread *td)
980e32b8c5SMatthew Dillon {
990e32b8c5SMatthew Dillon struct task_struct *task;
1000e32b8c5SMatthew Dillon struct mm_struct *mm;
1010e32b8c5SMatthew Dillon
1020e32b8c5SMatthew Dillon task = td->td_linux_task;
1030e32b8c5SMatthew Dillon td->td_linux_task = NULL;
1040e32b8c5SMatthew Dillon if ((mm = task->mm) != NULL) {
1050e32b8c5SMatthew Dillon atomic_add_long(&mm->refs, -1); /* proc ref always remains */
1060e32b8c5SMatthew Dillon task->mm = NULL;
1070e32b8c5SMatthew Dillon }
1080e32b8c5SMatthew Dillon kfree(task);
1090e32b8c5SMatthew Dillon }
1100e32b8c5SMatthew Dillon
1110e32b8c5SMatthew Dillon void
linux_proc_drop(struct proc * p)1120e32b8c5SMatthew Dillon linux_proc_drop(struct proc *p)
1130e32b8c5SMatthew Dillon {
1140e32b8c5SMatthew Dillon struct mm_struct *mm;
1150e32b8c5SMatthew Dillon
1160e32b8c5SMatthew Dillon if ((mm = p->p_linux_mm) != NULL) {
1170e32b8c5SMatthew Dillon p->p_linux_mm = NULL;
1180e32b8c5SMatthew Dillon linux_mm_drop(mm);
1190e32b8c5SMatthew Dillon }
1200e32b8c5SMatthew Dillon }
1210e32b8c5SMatthew Dillon
1220e32b8c5SMatthew Dillon void
linux_mm_drop(struct mm_struct * mm)1230e32b8c5SMatthew Dillon linux_mm_drop(struct mm_struct *mm)
1240e32b8c5SMatthew Dillon {
1250e32b8c5SMatthew Dillon long refs;
1260e32b8c5SMatthew Dillon
1270e32b8c5SMatthew Dillon refs = atomic_fetchadd_long(&mm->refs, -1);
1280e32b8c5SMatthew Dillon KKASSERT(refs > 0);
1290e32b8c5SMatthew Dillon if (refs == 1) {
1300e32b8c5SMatthew Dillon lockuninit(&mm->mmap_sem);
1310e32b8c5SMatthew Dillon kfree(mm);
1320e32b8c5SMatthew Dillon }
1330e32b8c5SMatthew Dillon }
134