Lines Matching defs:svms

53 #define dynamic_svm_range_dump(svms) \
54 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
56 #define dynamic_svm_range_dump(svms) \
57 do { if (0) svm_range_debug_dump(svms); } while (0)
87 * Remove the svm_range from the svms and svm_bo lists and the svms
90 * Context: The caller must hold svms->lock
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
105 interval_tree_remove(&prange->it_node, &prange->svms->objects);
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
121 * svm_range_add_to_svms - add svm range to svms
124 * Add the svm range to svms interval tree and link list
126 * Context: The caller must hold svms->lock
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
133 list_move_tail(&prange->list, &prange->svms->list);
136 interval_tree_insert(&prange->it_node, &prange->svms->objects);
141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 prange->svms, prange,
213 p = container_of(prange->svms, struct kfd_process, svms);
260 p = container_of(prange->svms, struct kfd_process, svms);
283 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
313 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
324 p = container_of(svms, struct kfd_process, svms);
333 prange->svms = svms;
347 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
354 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
385 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
501 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
502 prange->svms, prange->start, prange->last);
551 p = container_of(prange->svms, struct kfd_process, svms);
552 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
669 p = container_of(prange->svms, struct kfd_process, svms);
684 p = container_of(prange->svms, struct kfd_process, svms);
738 !test_bit(gpuidx, p->svms.bitmap_supported)) {
855 * svm_range_debug_dump - print all range information from svms
856 * @svms: svm range list header
858 * debug output svm range start, end, prefetch location from svms
861 * Context: The caller must hold svms->lock
863 static void svm_range_debug_dump(struct svm_range_list *svms)
868 pr_debug("dump svms 0x%p list\n", svms);
871 list_for_each_entry(prange, &svms->list, list) {
878 pr_debug("dump svms 0x%p interval tree\n", svms);
880 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
977 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
978 new->svms, new, new->start, start, last);
1018 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
1019 new->svms, new->start, old->start, old->last, start, last);
1078 struct svm_range_list *svms;
1081 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1089 svms = prange->svms;
1091 *new = svm_range_new(svms, last + 1, old_last, false);
1093 *new = svm_range_new(svms, old_start, start - 1, false);
1147 * @p: the process with svms list
1178 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1179 prange->svms, prange->start, prange->last, start, last, size);
1345 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1351 p = container_of(prange->svms, struct kfd_process, svms);
1399 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1420 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1421 prange->svms, last_start, prange->start + i,
1475 p = container_of(prange->svms, struct kfd_process, svms);
1630 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1687 p = container_of(prange->svms, struct kfd_process, svms);
1713 WRITE_ONCE(p->svms.faulting_task, current);
1717 WRITE_ONCE(p->svms.faulting_task, NULL);
1771 * @svms: the svm range list
1778 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1782 flush_work(&svms->deferred_list_work);
1785 if (list_empty(&svms->deferred_range_list))
1796 struct svm_range_list *svms;
1804 svms = container_of(dwork, struct svm_range_list, restore_work);
1805 evicted_ranges = atomic_read(&svms->evicted_ranges);
1811 p = container_of(svms, struct kfd_process, svms);
1817 pr_debug("svms 0x%p process mm gone\n", svms);
1822 svm_range_list_lock_and_flush_work(svms, mm);
1823 mutex_lock(&svms->lock);
1825 evicted_ranges = atomic_read(&svms->evicted_ranges);
1827 list_for_each_entry(prange, &svms->list, list) {
1832 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1833 prange->svms, prange, prange->start, prange->last,
1855 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1872 mutex_unlock(&svms->lock);
1879 schedule_delayed_work(&svms->restore_work,
1908 struct svm_range_list *svms = prange->svms;
1913 p = container_of(svms, struct kfd_process, svms);
1915 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1916 svms, prange->start, prange->last, start, last);
1942 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1946 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1947 prange->svms, prange->start, prange->last);
1954 pr_debug("schedule to restore svm %p ranges\n", svms);
1955 schedule_delayed_work(&svms->restore_work,
1966 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1967 prange->svms, start, last);
1989 new = svm_range_new(old->svms, old->start, old->last, false);
2041 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
2054 prange = svm_range_new(svms, start, l, true);
2067 * @p: the range add to this process svms
2073 * @insert_list: output, the ranges need insert to svms
2074 * @remove_list: output, the ranges are replaced and need remove from svms
2089 * Context: Process context, caller must hold svms->lock
2101 struct svm_range_list *svms = &p->svms;
2108 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
2115 node = interval_tree_iter_first(&svms->objects, start, last);
2170 r = svm_range_split_new(svms, start, node->start - 1,
2183 r = svm_range_split_new(svms, start, last,
2214 prange->svms, prange, start, last, prange->start,
2218 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2224 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2229 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2235 svms, prange, prange->start, prange->last);
2239 svms, prange, prange->start, prange->last);
2246 svms, prange, prange->start, prange->last);
2251 svms, prange, prange->start, prange->last);
2256 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2262 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2274 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2281 p = container_of(svms, struct kfd_process, svms);
2284 drain = atomic_read(&svms->drain_pagefaults);
2288 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2293 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2305 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2307 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2313 struct svm_range_list *svms;
2317 svms = container_of(work, struct svm_range_list, deferred_list_work);
2318 pr_debug("enter svms 0x%p\n", svms);
2320 spin_lock(&svms->deferred_list_lock);
2321 while (!list_empty(&svms->deferred_range_list)) {
2322 prange = list_first_entry(&svms->deferred_range_list,
2324 spin_unlock(&svms->deferred_list_lock);
2336 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2338 svm_range_drain_retry_fault(svms);
2350 spin_lock(&svms->deferred_list_lock);
2352 spin_unlock(&svms->deferred_list_lock);
2354 mutex_lock(&svms->lock);
2364 svm_range_handle_list_op(svms, pchild, mm);
2368 svm_range_handle_list_op(svms, prange, mm);
2369 mutex_unlock(&svms->lock);
2377 spin_lock(&svms->deferred_list_lock);
2379 spin_unlock(&svms->deferred_list_lock);
2380 pr_debug("exit svms 0x%p\n", svms);
2384 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2387 spin_lock(&svms->deferred_list_lock);
2402 &prange->svms->deferred_range_list);
2406 spin_unlock(&svms->deferred_list_lock);
2409 void schedule_deferred_list_work(struct svm_range_list *svms)
2411 spin_lock(&svms->deferred_list_lock);
2412 if (!list_empty(&svms->deferred_range_list))
2413 schedule_work(&svms->deferred_list_work);
2414 spin_unlock(&svms->deferred_list_lock);
2456 struct svm_range_list *svms;
2465 svms = &p->svms;
2467 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2474 atomic_inc(&svms->drain_pagefaults);
2494 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2496 svm_range_add_list_work(svms, prange, mm,
2498 schedule_deferred_list_work(svms);
2512 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2569 * @svms: svm range list header
2573 * Context: The caller must hold svms->lock
2578 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2585 node = interval_tree_iter_first(&svms->objects, addr, addr);
2641 p = container_of(prange->svms, struct kfd_process, svms);
2702 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2711 rb_node = rb_last(&p->svms.objects.rb_root);
2814 prange = svm_range_new(&p->svms, start, last, true);
2848 struct svm_range_list *svms = prange->svms;
2850 spin_lock(&svms->deferred_list_lock);
2853 spin_unlock(&svms->deferred_list_lock);
2856 spin_unlock(&svms->deferred_list_lock);
2859 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2860 svms, prange, prange->start, prange->last);
2865 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2866 svms, prange, prange->start, prange->last);
2918 struct svm_range_list *svms;
2940 svms = &p->svms;
2942 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2944 if (atomic_read(&svms->drain_pagefaults)) {
2961 pr_debug("svms 0x%p failed to get mm\n", svms);
2975 mutex_lock(&svms->lock);
2976 prange = svm_range_from_addr(svms, addr, NULL);
2978 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2979 svms, addr);
2985 mutex_unlock(&svms->lock);
2993 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2994 svms, addr);
3014 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
3015 svms, prange->start, prange->last);
3039 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
3040 svms, prange->start, prange->last);
3045 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
3046 svms, prange->start, prange->last, best_loc,
3076 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
3077 r, svms, prange->start, prange->last);
3084 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
3085 r, svms, prange->start, prange->last);
3093 mutex_unlock(&svms->lock);
3120 mutex_lock(&p->svms.lock);
3122 list_for_each_entry(prange, &p->svms.list, list) {
3159 /* Change xnack mode must be inside svms lock, to avoid race with
3164 mutex_unlock(&p->svms.lock);
3173 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
3175 cancel_delayed_work_sync(&p->svms.restore_work);
3178 flush_work(&p->svms.deferred_list_work);
3184 atomic_inc(&p->svms.drain_pagefaults);
3185 svm_range_drain_retry_fault(&p->svms);
3187 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3193 mutex_destroy(&p->svms.lock);
3195 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
3200 struct svm_range_list *svms = &p->svms;
3203 svms->objects = RB_ROOT_CACHED;
3204 mutex_init(&svms->lock);
3205 INIT_LIST_HEAD(&svms->list);
3206 atomic_set(&svms->evicted_ranges, 0);
3207 atomic_set(&svms->drain_pagefaults, 0);
3208 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3209 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3210 INIT_LIST_HEAD(&svms->deferred_range_list);
3211 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3212 spin_lock_init(&svms->deferred_list_lock);
3216 bitmap_set(svms->bitmap_supported, i, 1);
3350 p = container_of(prange->svms, struct kfd_process, svms);
3390 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3391 p->xnack_enabled, &p->svms, prange->start, prange->last,
3491 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3534 struct svm_range_list *svms;
3541 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3542 p->pasid, &p->svms, start, start + size - 1, size);
3548 svms = &p->svms;
3552 svm_range_list_lock_and_flush_work(svms, mm);
3561 mutex_lock(&svms->lock);
3567 mutex_unlock(&svms->lock);
3582 prange->svms, prange, prange->start,
3630 dynamic_svm_range_dump(svms);
3632 mutex_unlock(&svms->lock);
3637 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3638 &p->svms, start, start + size - 1, r);
3658 struct svm_range_list *svms;
3668 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3677 flush_work(&p->svms.deferred_list_work);
3714 svms = &p->svms;
3716 mutex_lock(&svms->lock);
3718 node = interval_tree_iter_first(&svms->objects, start, last);
3725 bitmap_copy(bitmap_access, svms->bitmap_supported,
3732 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3733 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3780 mutex_unlock(&svms->lock);
3825 struct svm_range_list *svms = &p->svms;
3832 if (list_empty(&svms->criu_svm_metadata_list)) {
3846 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3912 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3931 struct svm_range_list *svms = &p->svms;
3966 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3982 struct svm_range_list *svms;
3988 svms = &p->svms;
3989 if (!svms)
3992 mutex_lock(&svms->lock);
3993 list_for_each_entry(prange, &svms->list, list) {
3999 mutex_unlock(&svms->lock);
4043 struct svm_range_list *svms;
4048 svms = &p->svms;
4049 if (!svms)
4089 list_for_each_entry(prange, &svms->list, list) {