Lines Matching defs:mm
109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
114 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
395 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
398 struct mm_struct *mm;
400 mm = svm_bo->eviction_fence->mm;
405 p = kfd_lookup_process_by_mm(mm);
412 mmput(mm);
548 struct mm_struct *mm;
563 mm = get_task_mm(p->lead_thread);
564 if (!mm) {
565 pr_debug("failed to get mm\n");
572 mm,
574 mmput(mm);
1133 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1139 pchild->work_item.mm = mm;
1148 * @mm: mm structure
1162 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1185 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1192 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1616 static int svm_range_validate_and_map(struct mm_struct *mm,
1707 vma = vma_lookup(mm, addr);
1772 * @mm: the mm structure
1779 struct mm_struct *mm)
1783 mmap_write_lock(mm);
1787 mmap_write_unlock(mm);
1799 struct mm_struct *mm;
1814 /* Keep mm reference when svm_range_validate_and_map ranges */
1815 mm = get_task_mm(p->lead_thread);
1816 if (!mm) {
1817 pr_debug("svms 0x%p process mm gone\n", svms);
1822 svm_range_list_lock_and_flush_work(svms, mm);
1841 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1861 r = kgd2kfd_resume_mm(mm);
1873 mmap_write_unlock(mm);
1882 kfd_smi_event_queue_restore_rescheduled(mm);
1884 mmput(mm);
1890 * @mm: current process mm_struct
1904 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1950 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
2201 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2225 svm_range_add_notifier_locked(mm, prange);
2230 struct mm_struct *mm)
2247 svm_range_update_notifier_and_interval_tree(mm, prange);
2252 svm_range_update_notifier_and_interval_tree(mm, prange);
2259 svm_range_add_notifier_locked(mm, prange);
2265 svm_range_add_notifier_locked(mm, prange);
2315 struct mm_struct *mm;
2329 mm = prange->work_item.mm;
2331 mmap_write_lock(mm);
2337 mmap_write_unlock(mm);
2364 svm_range_handle_list_op(svms, pchild, mm);
2368 svm_range_handle_list_op(svms, prange, mm);
2370 mmap_write_unlock(mm);
2373 * last mm refcount, schedule release work to avoid circular locking
2375 mmput_async(mm);
2385 struct mm_struct *mm, enum svm_work_list_ops op)
2391 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2399 mmget(mm);
2400 prange->work_item.mm = mm;
2418 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2440 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2441 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2443 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2445 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2452 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2462 p = kfd_lookup_process_by_mm(mm);
2484 svm_range_unmap_split(mm, prange, pchild, start, last);
2491 svm_range_unmap_split(mm, prange, prange, start, last);
2494 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2496 svm_range_add_list_work(svms, prange, mm,
2534 if (!mmget_not_zero(mni->mm))
2554 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2557 svm_range_evict(prange, mni->mm, start, last, range->event);
2562 mmput(mni->mm);
2689 vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
2786 struct mm_struct *mm,
2829 svm_range_add_notifier_locked(mm, prange);
2917 struct mm_struct *mm = NULL;
2959 mm = get_task_mm(p->lead_thread);
2960 if (!mm) {
2961 pr_debug("svms 0x%p failed to get mm\n", svms);
2973 mmap_read_lock(mm);
2986 mmap_read_unlock(mm);
2987 mmap_write_lock(mm);
2991 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2995 mmap_write_downgrade(mm);
3001 mmap_write_downgrade(mm);
3023 vma = vma_lookup(mm, addr << PAGE_SHIFT);
3055 r = svm_migrate_to_vram(prange, best_loc, mm,
3064 r = svm_migrate_vram_to_ram(prange, mm,
3071 r = svm_migrate_vram_to_ram(prange, mm,
3082 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
3094 mmap_read_unlock(mm);
3098 mmput(mm);
3182 * not find kfd process and take mm lock to recover fault.
3304 vma = vma_lookup(p->mm, start);
3398 * @mm: current process mm_struct
3422 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3436 r = svm_migrate_vram_to_ram(prange, mm,
3442 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3468 struct mm_struct *mm;
3473 if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
3474 mm = svm_bo->eviction_fence->mm;
3480 mmap_read_lock(mm);
3496 r = svm_migrate_vram_to_ram(prange, mm,
3513 mmap_read_unlock(mm);
3514 mmput(mm);
3526 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3552 svm_range_list_lock_and_flush_work(svms, mm);
3557 mmap_write_unlock(mm);
3568 mmap_write_unlock(mm);
3574 svm_range_add_notifier_locked(mm, prange);
3589 mmap_write_downgrade(mm);
3600 r = svm_range_trigger_migration(mm, prange, &migrated);
3619 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3633 mmap_read_unlock(mm);
3644 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3679 mmap_read_lock(mm);
3681 mmap_read_unlock(mm);
3830 struct mm_struct *mm;
3837 mm = get_task_mm(p->lead_thread);
3838 if (!mm) {
3839 pr_err("failed to get mm for the target process\n");
3900 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3918 mmput(mm);
4046 struct mm_struct *mm;
4052 mm = get_task_mm(p->lead_thread);
4053 if (!mm) {
4054 pr_err("failed to get mm for the target process\n");
4100 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
4126 mmput(mm);
4134 struct mm_struct *mm = current->mm;
4142 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
4145 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);