Lines Matching defs:prange
85 * @prange: svm range structure to be removed
92 static void svm_range_unlink(struct svm_range *prange)
94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
95 prange, prange->start, prange->last);
97 if (prange->svm_bo) {
98 spin_lock(&prange->svm_bo->list_lock);
99 list_del(&prange->svm_bo_list);
100 spin_unlock(&prange->svm_bo->list_lock);
103 list_del(&prange->list);
104 if (prange->it_node.start != 0 && prange->it_node.last != 0)
105 interval_tree_remove(&prange->it_node, &prange->svms->objects);
109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
112 prange, prange->start, prange->last);
114 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
115 prange->start << PAGE_SHIFT,
116 prange->npages << PAGE_SHIFT,
122 * @prange: svm range structure to be added
128 static void svm_range_add_to_svms(struct svm_range *prange)
130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
131 prange, prange->start, prange->last);
133 list_move_tail(&prange->list, &prange->svms->list);
134 prange->it_node.start = prange->start;
135 prange->it_node.last = prange->last;
136 interval_tree_insert(&prange->it_node, &prange->svms->objects);
139 static void svm_range_remove_notifier(struct svm_range *prange)
141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
142 prange->svms, prange,
143 prange->notifier.interval_tree.start >> PAGE_SHIFT,
144 prange->notifier.interval_tree.last >> PAGE_SHIFT);
146 if (prange->notifier.interval_tree.start != 0 &&
147 prange->notifier.interval_tree.last != 0)
148 mmu_interval_notifier_remove(&prange->notifier);
159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
164 dma_addr_t *addr = prange->dma_addr[gpuidx];
170 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
173 prange->dma_addr[gpuidx] = addr;
183 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
205 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
213 p = container_of(prange->svms, struct kfd_process, svms);
225 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
252 void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
260 p = container_of(prange->svms, struct kfd_process, svms);
263 dma_addr = prange->dma_addr[gpuidx];
274 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
276 prange->dma_addr[gpuidx] = NULL;
280 static void svm_range_free(struct svm_range *prange, bool do_unmap)
282 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
283 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
286 prange->start, prange->last);
288 svm_range_vram_node_free(prange);
289 svm_range_free_dma_mappings(prange, do_unmap);
292 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
296 mutex_destroy(&prange->lock);
297 mutex_destroy(&prange->migrate_mutex);
298 kfree(prange);
317 struct svm_range *prange;
320 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
321 if (!prange)
329 kfree(prange);
332 prange->npages = size;
333 prange->svms = svms;
334 prange->start = start;
335 prange->last = last;
336 INIT_LIST_HEAD(&prange->list);
337 INIT_LIST_HEAD(&prange->update_list);
338 INIT_LIST_HEAD(&prange->svm_bo_list);
339 INIT_LIST_HEAD(&prange->deferred_list);
340 INIT_LIST_HEAD(&prange->child_list);
341 atomic_set(&prange->invalid, 0);
342 prange->validate_timestamp = 0;
343 mutex_init(&prange->migrate_mutex);
344 mutex_init(&prange->lock);
347 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
350 svm_range_set_default_attributes(&prange->preferred_loc,
351 &prange->prefetch_loc,
352 &prange->granularity, &prange->flags);
356 return prange;
376 struct svm_range *prange =
382 list_del_init(&prange->svm_bo_list);
385 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
386 prange->start, prange->last);
387 mutex_lock(&prange->lock);
388 prange->svm_bo = NULL;
389 mutex_unlock(&prange->lock);
453 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
455 mutex_lock(&prange->lock);
456 if (!prange->svm_bo) {
457 mutex_unlock(&prange->lock);
460 if (prange->ttm_res) {
462 mutex_unlock(&prange->lock);
465 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
471 if (prange->svm_bo->node != node) {
472 mutex_unlock(&prange->lock);
474 spin_lock(&prange->svm_bo->list_lock);
475 list_del_init(&prange->svm_bo_list);
476 spin_unlock(&prange->svm_bo->list_lock);
478 svm_range_bo_unref(prange->svm_bo);
481 if (READ_ONCE(prange->svm_bo->evicting)) {
487 mutex_unlock(&prange->lock);
488 svm_bo = prange->svm_bo;
490 svm_range_bo_unref(prange->svm_bo);
500 mutex_unlock(&prange->lock);
502 prange->svms, prange->start, prange->last);
504 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
509 mutex_unlock(&prange->lock);
514 * its range list and set prange->svm_bo to null. After this,
517 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo)
539 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
551 p = container_of(prange->svms, struct kfd_process, svms);
552 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
553 prange->start, prange->last);
555 if (svm_range_validate_svm_bo(node, prange))
578 bp.size = prange->npages * PAGE_SIZE;
626 prange->svm_bo = svm_bo;
627 prange->ttm_res = bo->tbo.resource;
628 prange->offset = 0;
631 list_add(&prange->svm_bo_list, &svm_bo->range_list);
634 pdd = svm_range_get_pdd_by_node(prange, node);
645 prange->ttm_res = NULL;
650 void svm_range_vram_node_free(struct svm_range *prange)
652 /* serialize prange->svm_bo unref */
653 mutex_lock(&prange->lock);
654 /* prange->svm_bo has not been unref */
655 if (prange->ttm_res) {
656 prange->ttm_res = NULL;
657 mutex_unlock(&prange->lock);
658 svm_range_bo_unref(prange->svm_bo);
660 mutex_unlock(&prange->lock);
664 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
669 p = container_of(prange->svms, struct kfd_process, svms);
680 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
684 p = container_of(prange->svms, struct kfd_process, svms);
748 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
758 prange->preferred_loc = attrs[i].value;
761 prange->prefetch_loc = attrs[i].value;
772 bitmap_clear(prange->bitmap_access, gpuidx, 1);
773 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
775 bitmap_set(prange->bitmap_access, gpuidx, 1);
776 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
778 bitmap_clear(prange->bitmap_access, gpuidx, 1);
779 bitmap_set(prange->bitmap_aip, gpuidx, 1);
784 prange->flags |= attrs[i].value;
788 prange->flags &= ~attrs[i].value;
791 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
800 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
809 if (prange->preferred_loc != attrs[i].value)
823 if (test_bit(gpuidx, prange->bitmap_access) ||
824 test_bit(gpuidx, prange->bitmap_aip))
827 if (!test_bit(gpuidx, prange->bitmap_access))
830 if (!test_bit(gpuidx, prange->bitmap_aip))
835 if ((prange->flags & attrs[i].value) != attrs[i].value)
839 if ((prange->flags & attrs[i].value) != 0)
843 if (prange->granularity != attrs[i].value)
866 struct svm_range *prange;
871 list_for_each_entry(prange, &svms->list, list) {
873 prange, prange->start, prange->npages,
874 prange->start + prange->npages - 1,
875 prange->actual_loc);
882 prange = container_of(node, struct svm_range, it_node);
884 prange, prange->start, prange->npages,
885 prange->start + prange->npages - 1,
886 prange->actual_loc);
977 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
1055 * @prange: the svm range to split
1061 * case 1: if start == prange->start
1062 * prange ==> prange[start, last]
1063 * new range [last + 1, prange->last]
1065 * case 2: if last == prange->last
1066 * prange ==> prange[start, last]
1067 * new range [prange->start, start - 1]
1073 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1076 uint64_t old_start = prange->start;
1077 uint64_t old_last = prange->last;
1081 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1089 svms = prange->svms;
1097 r = svm_range_split_adjust(*new, prange, start, last);
1109 svm_range_split_tail(struct svm_range *prange,
1113 int r = svm_range_split(prange, prange->start, new_last, &tail);
1121 svm_range_split_head(struct svm_range *prange,
1125 int r = svm_range_split(prange, new_start, prange->last, &head);
1133 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1136 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1137 pchild, pchild->start, pchild->last, prange, op);
1141 list_add_tail(&pchild->child_list, &prange->child_list);
1149 * @addr: the vm fault address in pages, to split the prange
1150 * @parent: parent range if prange is from child list
1151 * @prange: prange to split
1153 * Trims @prange to be a single aligned block of prange->granularity if
1156 * Context: caller must hold mmap_read_lock and prange->lock
1164 struct svm_range *prange)
1174 size = 1UL << prange->granularity;
1179 prange->svms, prange->start, prange->last, start, last, size);
1181 if (start > prange->start) {
1182 r = svm_range_split(prange, start, prange->last, &head);
1188 if (last < prange->last) {
1189 r = svm_range_split(prange, prange->start, last, &tail);
1196 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1197 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1198 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1199 prange, prange->start, prange->last,
1213 struct svm_range *prange, int domain)
1216 uint32_t flags = prange->flags;
1225 bo_node = prange->svm_bo->node;
1328 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1338 if (!prange->mapped_to_gpu) {
1339 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1340 prange, prange->start, prange->last);
1344 if (prange->start == start && prange->last == last) {
1345 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1346 prange->mapped_to_gpu = false;
1349 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1351 p = container_of(prange->svms, struct kfd_process, svms);
1384 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1397 last_start = prange->start + offset;
1399 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1414 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1416 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
1421 prange->svms, last_start, prange->start + i,
1430 last_start, prange->start + i,
1432 (last_start - prange->start) << PAGE_SHIFT,
1436 for (j = last_start - prange->start; j <= i; j++)
1440 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1443 last_start = prange->start + i + 1;
1449 prange->start);
1461 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1472 if (prange->svm_bo && prange->ttm_res)
1473 bo_adev = prange->svm_bo->node->adev;
1475 p = container_of(prange->svms, struct kfd_process, svms);
1494 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1495 prange->dma_addr[gpuidx],
1519 struct svm_range *prange;
1599 * prange->migrate_mutex must be held.
1617 struct svm_range *prange, int32_t gpuidx,
1630 ctx->process = container_of(prange->svms, struct kfd_process, svms);
1631 ctx->prange = prange;
1638 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1644 if (prange->actual_loc) {
1646 prange->actual_loc);
1649 prange->actual_loc);
1653 if (test_bit(gpuidx, prange->bitmap_access))
1658 * If prange is already mapped or with always mapped flag,
1662 if (prange->mapped_to_gpu ||
1663 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
1664 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1667 bitmap_or(ctx->bitmap, prange->bitmap_access,
1668 prange->bitmap_aip, MAX_GPU_INSTANCE);
1676 if (prange->actual_loc && !prange->ttm_res) {
1687 p = container_of(prange->svms, struct kfd_process, svms);
1697 start = prange->start << PAGE_SHIFT;
1698 end = (prange->last + 1) << PAGE_SHIFT;
1714 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
1729 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
1735 svm_range_lock(prange);
1741 if (!r && !list_empty(&prange->child_list)) {
1747 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1751 prange->mapped_to_gpu = true;
1753 svm_range_unlock(prange);
1760 prange->validate_timestamp = ktime_get_boottime();
1797 struct svm_range *prange;
1827 list_for_each_entry(prange, &svms->list, list) {
1828 invalid = atomic_read(&prange->invalid);
1832 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1833 prange->svms, prange, prange->start, prange->last,
1839 mutex_lock(&prange->migrate_mutex);
1841 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1845 prange->start);
1847 mutex_unlock(&prange->migrate_mutex);
1851 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1889 * @prange: svm range structure
1904 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1908 struct svm_range_list *svms = prange->svms;
1915 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1916 svms, prange->start, prange->last, start, last);
1919 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1921 bool mapped = prange->mapped_to_gpu;
1923 list_for_each_entry(pchild, &prange->child_list, child_list) {
1939 if (prange->start <= last && prange->last >= start)
1940 atomic_inc(&prange->invalid);
1947 prange->svms, prange->start, prange->last);
1967 prange->svms, start, last);
1968 list_for_each_entry(pchild, &prange->child_list, child_list) {
1976 s = max(start, prange->start);
1977 l = min(last, prange->last);
1979 svm_range_unmap_from_gpus(prange, s, l, trigger);
2045 struct svm_range *prange;
2054 prange = svm_range_new(svms, start, l, true);
2055 if (!prange)
2057 list_add(&prange->list, insert_list);
2058 list_add(&prange->update_list, update_list);
2103 struct svm_range *prange;
2123 prange = container_of(node, struct svm_range, it_node);
2127 if (svm_range_is_same_attrs(p, prange, nattr, attrs) &&
2128 prange->mapped_to_gpu) {
2135 struct svm_range *old = prange;
2137 prange = svm_range_clone(old);
2138 if (!prange) {
2144 list_add(&prange->list, insert_list);
2145 list_add(&prange->update_list, update_list);
2149 r = svm_range_split_head(prange, start,
2156 r = svm_range_split_tail(prange, last,
2165 list_add(&prange->update_list, update_list);
2189 list_for_each_entry_safe(prange, tmp, insert_list, list)
2190 svm_range_free(prange, false);
2191 list_for_each_entry_safe(prange, tmp, &new_list, list)
2192 svm_range_free(prange, true);
2202 struct svm_range *prange)
2207 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2208 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2210 if (prange->start == start && prange->last == last)
2213 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2214 prange->svms, prange, start, last, prange->start,
2215 prange->last);
2218 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2219 svm_range_remove_notifier(prange);
2221 prange->it_node.start = prange->start;
2222 prange->it_node.last = prange->last;
2224 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2225 svm_range_add_notifier_locked(mm, prange);
2229 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2232 switch (prange->work_item.op) {
2234 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2235 svms, prange, prange->start, prange->last);
2238 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2239 svms, prange, prange->start, prange->last);
2240 svm_range_unlink(prange);
2241 svm_range_remove_notifier(prange);
2242 svm_range_free(prange, true);
2245 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2246 svms, prange, prange->start, prange->last);
2247 svm_range_update_notifier_and_interval_tree(mm, prange);
2250 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2251 svms, prange, prange->start, prange->last);
2252 svm_range_update_notifier_and_interval_tree(mm, prange);
2256 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2257 prange->start, prange->last);
2258 svm_range_add_to_svms(prange);
2259 svm_range_add_notifier_locked(mm, prange);
2262 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2263 prange, prange->start, prange->last);
2264 svm_range_add_to_svms(prange);
2265 svm_range_add_notifier_locked(mm, prange);
2269 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2270 prange->work_item.op);
2314 struct svm_range *prange;
2322 prange = list_first_entry(&svms->deferred_range_list,
2326 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2327 prange->start, prange->last, prange->work_item.op);
2329 mm = prange->work_item.mm;
2351 list_del_init(&prange->deferred_list);
2355 mutex_lock(&prange->migrate_mutex);
2356 while (!list_empty(&prange->child_list)) {
2359 pchild = list_first_entry(&prange->child_list,
2361 pr_debug("child prange 0x%p op %d\n", pchild,
2366 mutex_unlock(&prange->migrate_mutex);
2368 svm_range_handle_list_op(svms, prange, mm);
2384 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2388 /* if prange is on the deferred list */
2389 if (!list_empty(&prange->deferred_list)) {
2390 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2391 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2393 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2394 prange->work_item.op = op;
2396 prange->work_item.op = op;
2400 prange->work_item.mm = mm;
2401 list_add_tail(&prange->deferred_list,
2402 &prange->svms->deferred_range_list);
2403 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2404 prange, prange->start, prange->last, op);
2419 struct svm_range *prange, unsigned long start,
2425 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2426 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2427 prange->start, prange->last);
2430 if (start > prange->last || last < prange->start)
2433 head = tail = prange;
2434 if (start > prange->start)
2435 svm_range_split(prange, prange->start, start - 1, &tail);
2439 if (head != prange && tail != prange) {
2442 } else if (tail != prange) {
2444 } else if (head != prange) {
2446 } else if (parent != prange) {
2447 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2452 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2467 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2468 prange, prange->start, prange->last, start, last);
2476 unmap_parent = start <= prange->start && last >= prange->last;
2478 list_for_each_entry(pchild, &prange->child_list, child_list) {
2484 svm_range_unmap_split(mm, prange, pchild, start, last);
2487 s = max(start, prange->start);
2488 l = min(last, prange->last);
2490 svm_range_unmap_from_gpus(prange, s, l, trigger);
2491 svm_range_unmap_split(mm, prange, prange, start, last);
2494 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2496 svm_range_add_list_work(svms, prange, mm,
2512 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2513 * work thread, and split prange if only part of prange is unmapped.
2521 * for invalidate event, prange lock is held if this is from migration
2528 struct svm_range *prange;
2547 prange = container_of(mni, struct svm_range, notifier);
2549 svm_range_lock(prange);
2554 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2557 svm_range_evict(prange, mni->mm, start, last, range->event);
2561 svm_range_unlock(prange);
2582 struct svm_range *prange;
2589 prange = container_of(node, struct svm_range, it_node);
2590 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2591 addr, prange->start, prange->last, node->start, node->last);
2593 if (addr >= prange->start && addr <= prange->last) {
2595 *parent = prange;
2596 return prange;
2598 list_for_each_entry(pchild, &prange->child_list, child_list)
2603 *parent = prange;
2611 * @prange: svm range structure
2632 svm_range_best_restore_location(struct svm_range *prange,
2641 p = container_of(prange->svms, struct kfd_process, svms);
2652 if (prange->preferred_loc == gpuid ||
2653 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2654 return prange->preferred_loc;
2655 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2656 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
2658 return prange->preferred_loc;
2662 if (test_bit(*gpuidx, prange->bitmap_access))
2665 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2666 if (!prange->actual_loc)
2669 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
2671 return prange->actual_loc;
2789 struct svm_range *prange = NULL;
2814 prange = svm_range_new(&p->svms, start, last, true);
2815 if (!prange) {
2816 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2821 svm_range_free(prange, true);
2826 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2828 svm_range_add_to_svms(prange);
2829 svm_range_add_notifier_locked(mm, prange);
2831 return prange;
2834 /* svm_range_skip_recover - decide if prange can be recovered
2835 * @prange: svm range structure
2838 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2839 * deferred list work will drain the stale fault before free the prange.
2840 * 2. prange is on deferred list to add interval notifier after split, or
2841 * 3. prange is child range, it is split from parent prange, recover later
2846 static bool svm_range_skip_recover(struct svm_range *prange)
2848 struct svm_range_list *svms = prange->svms;
2851 if (list_empty(&prange->deferred_list) &&
2852 list_empty(&prange->child_list)) {
2858 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2859 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2860 svms, prange, prange->start, prange->last);
2863 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2864 prange->work_item.op == SVM_OP_ADD_RANGE) {
2865 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2866 svms, prange, prange->start, prange->last);
2919 struct svm_range *prange;
2976 prange = svm_range_from_addr(svms, addr, NULL);
2977 if (!prange) {
2978 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2991 prange = svm_range_create_unregistered_range(node, p, mm, addr);
2992 if (!prange) {
3003 mutex_lock(&prange->migrate_mutex);
3005 if (svm_range_skip_recover(prange)) {
3012 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
3015 svms, prange->start, prange->last);
3037 best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
3040 svms, prange->start, prange->last);
3046 svms, prange->start, prange->last, best_loc,
3047 prange->actual_loc);
3052 if (prange->actual_loc != best_loc) {
3055 r = svm_migrate_to_vram(prange, best_loc, mm,
3063 if (prange->actual_loc)
3064 r = svm_migrate_vram_to_ram(prange, mm,
3071 r = svm_migrate_vram_to_ram(prange, mm,
3077 r, svms, prange->start, prange->last);
3082 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
3085 r, svms, prange->start, prange->last);
3091 mutex_unlock(&prange->migrate_mutex);
3113 struct svm_range *prange, *pchild;
3122 list_for_each_entry(prange, &p->svms.list, list) {
3123 svm_range_lock(prange);
3124 list_for_each_entry(pchild, &prange->child_list, child_list) {
3138 size = (prange->last - prange->start + 1) << PAGE_SHIFT;
3150 svm_range_unlock(prange);
3170 struct svm_range *prange;
3187 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
3188 svm_range_unlink(prange);
3189 svm_range_remove_notifier(prange);
3190 svm_range_free(prange, true);
3316 * @prange: svm range structure
3341 svm_range_best_prefetch_location(struct svm_range *prange)
3344 uint32_t best_loc = prange->prefetch_loc;
3350 p = container_of(prange->svms, struct kfd_process, svms);
3355 bo_node = svm_range_get_node_by_id(prange, best_loc);
3368 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3370 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3391 p->xnack_enabled, &p->svms, prange->start, prange->last,
3399 * @prange: svm range structure
3412 * a. svm_range_validate_vram takes prange->migrate_mutex
3422 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3429 best_loc = svm_range_best_prefetch_location(prange);
3432 best_loc == prange->actual_loc)
3436 r = svm_migrate_vram_to_ram(prange, mm,
3442 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3483 struct svm_range *prange =
3488 list_del_init(&prange->svm_bo_list);
3491 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3492 prange->start, prange->last);
3494 mutex_lock(&prange->migrate_mutex);
3496 r = svm_migrate_vram_to_ram(prange, mm,
3498 } while (!r && prange->actual_loc && --retries);
3500 if (!r && prange->actual_loc)
3503 if (!prange->actual_loc) {
3504 mutex_lock(&prange->lock);
3505 prange->svm_bo = NULL;
3506 mutex_unlock(&prange->lock);
3508 mutex_unlock(&prange->migrate_mutex);
3535 struct svm_range *prange;
3572 list_for_each_entry_safe(prange, next, &insert_list, list) {
3573 svm_range_add_to_svms(prange);
3574 svm_range_add_notifier_locked(mm, prange);
3576 list_for_each_entry(prange, &update_list, update_list) {
3577 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3580 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3581 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3582 prange->svms, prange, prange->start,
3583 prange->last);
3584 svm_range_unlink(prange);
3585 svm_range_remove_notifier(prange);
3586 svm_range_free(prange, false);
3595 list_for_each_entry(prange, &update_list, update_list) {
3598 mutex_lock(&prange->migrate_mutex);
3600 r = svm_range_trigger_migration(mm, prange, &migrated);
3605 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3606 prange->mapped_to_gpu) {
3608 mutex_unlock(&prange->migrate_mutex);
3613 mutex_unlock(&prange->migrate_mutex);
3617 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3619 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3625 mutex_unlock(&prange->migrate_mutex);
3659 struct svm_range *prange;
3738 prange = container_of(node, struct svm_range, it_node);
3742 if (prange->preferred_loc ==
3745 location != prange->preferred_loc)) {
3749 location = prange->preferred_loc;
3753 if (prange->prefetch_loc ==
3756 prefetch_loc != prange->prefetch_loc)) {
3760 prefetch_loc = prange->prefetch_loc;
3765 prange->bitmap_access, MAX_GPU_INSTANCE);
3767 prange->bitmap_aip, MAX_GPU_INSTANCE);
3770 flags_and &= prange->flags;
3771 flags_or |= prange->flags;
3774 if (get_granularity && prange->granularity < granularity)
3775 granularity = prange->granularity;
3983 struct svm_range *prange;
3993 list_for_each_entry(prange, &svms->list, list) {
3994 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3995 prange, prange->start, prange->npages,
3996 prange->start + prange->npages - 1);
4045 struct svm_range *prange;
4089 list_for_each_entry(prange, &svms->list, list) {
4092 svm_priv->start_addr = prange->start;
4093 svm_priv->size = prange->npages;
4095 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
4096 prange, prange->start, prange->npages,
4097 prange->start + prange->npages - 1,
4098 prange->npages * PAGE_SIZE);