1c349dbc7Sjsg /*
2c349dbc7Sjsg * SPDX-License-Identifier: MIT
3c349dbc7Sjsg *
4c349dbc7Sjsg * Copyright © 2008-2015 Intel Corporation
5c349dbc7Sjsg */
6c349dbc7Sjsg
7c349dbc7Sjsg #include <linux/oom.h>
8c349dbc7Sjsg #include <linux/sched/mm.h>
9c349dbc7Sjsg #include <linux/shmem_fs.h>
10c349dbc7Sjsg #include <linux/slab.h>
11c349dbc7Sjsg #include <linux/swap.h>
12c349dbc7Sjsg #include <linux/pci.h>
13c349dbc7Sjsg #include <linux/dma-buf.h>
14c349dbc7Sjsg #include <linux/vmalloc.h>
15c349dbc7Sjsg
16ad8b1aafSjsg #include "gt/intel_gt_requests.h"
17ad8b1aafSjsg
18c349dbc7Sjsg #include "i915_trace.h"
19c349dbc7Sjsg
swap_available(void)20c349dbc7Sjsg static bool swap_available(void)
21c349dbc7Sjsg {
22c349dbc7Sjsg return get_nr_swap_pages() > 0;
23c349dbc7Sjsg }
24c349dbc7Sjsg
can_release_pages(struct drm_i915_gem_object * obj)25c349dbc7Sjsg static bool can_release_pages(struct drm_i915_gem_object *obj)
26c349dbc7Sjsg {
27c349dbc7Sjsg /* Consider only shrinkable ojects. */
28c349dbc7Sjsg if (!i915_gem_object_is_shrinkable(obj))
29c349dbc7Sjsg return false;
30c349dbc7Sjsg
31c349dbc7Sjsg /*
32c349dbc7Sjsg * We can only return physical pages to the system if we can either
33c349dbc7Sjsg * discard the contents (because the user has marked them as being
34c349dbc7Sjsg * purgeable) or if we can move their contents out to swap.
35c349dbc7Sjsg */
36c349dbc7Sjsg return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
37c349dbc7Sjsg }
38c349dbc7Sjsg
drop_pages(struct drm_i915_gem_object * obj,unsigned long shrink,bool trylock_vm)391bb76ff1Sjsg static bool drop_pages(struct drm_i915_gem_object *obj,
405ca02815Sjsg unsigned long shrink, bool trylock_vm)
41c349dbc7Sjsg {
42c349dbc7Sjsg unsigned long flags;
43c349dbc7Sjsg
44c349dbc7Sjsg flags = 0;
45c349dbc7Sjsg if (shrink & I915_SHRINK_ACTIVE)
465ca02815Sjsg flags |= I915_GEM_OBJECT_UNBIND_ACTIVE;
47ad8b1aafSjsg if (!(shrink & I915_SHRINK_BOUND))
485ca02815Sjsg flags |= I915_GEM_OBJECT_UNBIND_TEST;
495ca02815Sjsg if (trylock_vm)
505ca02815Sjsg flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK;
51c349dbc7Sjsg
52c349dbc7Sjsg if (i915_gem_object_unbind(obj, flags) == 0)
535ca02815Sjsg return true;
54c349dbc7Sjsg
555ca02815Sjsg return false;
56c349dbc7Sjsg }
57c349dbc7Sjsg
try_to_writeback(struct drm_i915_gem_object * obj,unsigned int flags)581bb76ff1Sjsg static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
59c349dbc7Sjsg {
601bb76ff1Sjsg if (obj->ops->shrink) {
611bb76ff1Sjsg unsigned int shrink_flags = 0;
621bb76ff1Sjsg
631bb76ff1Sjsg if (!(flags & I915_SHRINK_ACTIVE))
641bb76ff1Sjsg shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
65c349dbc7Sjsg
66c349dbc7Sjsg if (flags & I915_SHRINK_WRITEBACK)
671bb76ff1Sjsg shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
681bb76ff1Sjsg
691bb76ff1Sjsg return obj->ops->shrink(obj, shrink_flags);
701bb76ff1Sjsg }
711bb76ff1Sjsg
721bb76ff1Sjsg return 0;
73c349dbc7Sjsg }
74c349dbc7Sjsg
75c349dbc7Sjsg /**
76c349dbc7Sjsg * i915_gem_shrink - Shrink buffer object caches
775ca02815Sjsg * @ww: i915 gem ww acquire ctx, or NULL
78c349dbc7Sjsg * @i915: i915 device
79c349dbc7Sjsg * @target: amount of memory to make available, in pages
80c349dbc7Sjsg * @nr_scanned: optional output for number of pages scanned (incremental)
81c349dbc7Sjsg * @shrink: control flags for selecting cache types
82c349dbc7Sjsg *
83c349dbc7Sjsg * This function is the main interface to the shrinker. It will try to release
84c349dbc7Sjsg * up to @target pages of main memory backing storage from buffer objects.
85c349dbc7Sjsg * Selection of the specific caches can be done with @flags. This is e.g. useful
86c349dbc7Sjsg * when purgeable objects should be removed from caches preferentially.
87c349dbc7Sjsg *
88c349dbc7Sjsg * Note that it's not guaranteed that released amount is actually available as
89c349dbc7Sjsg * free system memory - the pages might still be in-used to due to other reasons
90c349dbc7Sjsg * (like cpu mmaps) or the mm core has reused them before we could grab them.
91c349dbc7Sjsg * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
92c349dbc7Sjsg * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
93c349dbc7Sjsg *
94c349dbc7Sjsg * Also note that any kind of pinning (both per-vma address space pins and
95c349dbc7Sjsg * backing storage pins at the buffer object level) result in the shrinker code
96c349dbc7Sjsg * having to skip the object.
97c349dbc7Sjsg *
98c349dbc7Sjsg * Returns:
99c349dbc7Sjsg * The number of pages of backing storage actually released.
100c349dbc7Sjsg */
101c349dbc7Sjsg unsigned long
i915_gem_shrink(struct i915_gem_ww_ctx * ww,struct drm_i915_private * i915,unsigned long target,unsigned long * nr_scanned,unsigned int shrink)1025ca02815Sjsg i915_gem_shrink(struct i915_gem_ww_ctx *ww,
1035ca02815Sjsg struct drm_i915_private *i915,
104c349dbc7Sjsg unsigned long target,
105c349dbc7Sjsg unsigned long *nr_scanned,
106c349dbc7Sjsg unsigned int shrink)
107c349dbc7Sjsg {
108c349dbc7Sjsg const struct {
109c349dbc7Sjsg struct list_head *list;
110c349dbc7Sjsg unsigned int bit;
111c349dbc7Sjsg } phases[] = {
112c349dbc7Sjsg { &i915->mm.purge_list, ~0u },
113c349dbc7Sjsg {
114c349dbc7Sjsg &i915->mm.shrink_list,
115c349dbc7Sjsg I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
116c349dbc7Sjsg },
117c349dbc7Sjsg { NULL, 0 },
118c349dbc7Sjsg }, *phase;
119c349dbc7Sjsg intel_wakeref_t wakeref = 0;
120c349dbc7Sjsg unsigned long count = 0;
121c349dbc7Sjsg unsigned long scanned = 0;
1225ca02815Sjsg int err = 0;
1235ca02815Sjsg
1245ca02815Sjsg /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
1255ca02815Sjsg bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
126c349dbc7Sjsg
127c349dbc7Sjsg trace_i915_gem_shrink(i915, target, shrink);
128c349dbc7Sjsg
129c349dbc7Sjsg /*
130c349dbc7Sjsg * Unbinding of objects will require HW access; Let us not wake the
131c349dbc7Sjsg * device just to recover a little memory. If absolutely necessary,
132c349dbc7Sjsg * we will force the wake during oom-notifier.
133c349dbc7Sjsg */
134c349dbc7Sjsg if (shrink & I915_SHRINK_BOUND) {
135c349dbc7Sjsg wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
136c349dbc7Sjsg if (!wakeref)
137c349dbc7Sjsg shrink &= ~I915_SHRINK_BOUND;
138c349dbc7Sjsg }
139c349dbc7Sjsg
140c349dbc7Sjsg /*
141ad8b1aafSjsg * When shrinking the active list, we should also consider active
142ad8b1aafSjsg * contexts. Active contexts are pinned until they are retired, and
143ad8b1aafSjsg * so can not be simply unbound to retire and unpin their pages. To
144ad8b1aafSjsg * shrink the contexts, we must wait until the gpu is idle and
145ad8b1aafSjsg * completed its switch to the kernel context. In short, we do
146ad8b1aafSjsg * not have a good mechanism for idling a specific context, but
147ad8b1aafSjsg * what we can do is give them a kick so that we do not keep idle
148ad8b1aafSjsg * contexts around longer than is necessary.
149ad8b1aafSjsg */
150ad8b1aafSjsg if (shrink & I915_SHRINK_ACTIVE)
151ad8b1aafSjsg /* Retire requests to unpin all idle contexts */
1521bb76ff1Sjsg intel_gt_retire_requests(to_gt(i915));
153ad8b1aafSjsg
154ad8b1aafSjsg /*
155c349dbc7Sjsg * As we may completely rewrite the (un)bound list whilst unbinding
156c349dbc7Sjsg * (due to retiring requests) we have to strictly process only
157c349dbc7Sjsg * one element of the list at the time, and recheck the list
158c349dbc7Sjsg * on every iteration.
159c349dbc7Sjsg *
160c349dbc7Sjsg * In particular, we must hold a reference whilst removing the
161c349dbc7Sjsg * object as we may end up waiting for and/or retiring the objects.
162c349dbc7Sjsg * This might release the final reference (held by the active list)
163c349dbc7Sjsg * and result in the object being freed from under us. This is
164c349dbc7Sjsg * similar to the precautions the eviction code must take whilst
165c349dbc7Sjsg * removing objects.
166c349dbc7Sjsg *
167c349dbc7Sjsg * Also note that although these lists do not hold a reference to
168c349dbc7Sjsg * the object we can safely grab one here: The final object
169c349dbc7Sjsg * unreferencing and the bound_list are both protected by the
170c349dbc7Sjsg * dev->struct_mutex and so we won't ever be able to observe an
171c349dbc7Sjsg * object on the bound_list with a reference count equals 0.
172c349dbc7Sjsg */
173c349dbc7Sjsg for (phase = phases; phase->list; phase++) {
174c349dbc7Sjsg struct list_head still_in_list;
175c349dbc7Sjsg struct drm_i915_gem_object *obj;
176c349dbc7Sjsg unsigned long flags;
177c349dbc7Sjsg
178c349dbc7Sjsg if ((shrink & phase->bit) == 0)
179c349dbc7Sjsg continue;
180c349dbc7Sjsg
181c349dbc7Sjsg INIT_LIST_HEAD(&still_in_list);
182c349dbc7Sjsg
183c349dbc7Sjsg /*
184c349dbc7Sjsg * We serialize our access to unreferenced objects through
185c349dbc7Sjsg * the use of the struct_mutex. While the objects are not
186c349dbc7Sjsg * yet freed (due to RCU then a workqueue) we still want
187c349dbc7Sjsg * to be able to shrink their pages, so they remain on
188c349dbc7Sjsg * the unbound/bound list until actually freed.
189c349dbc7Sjsg */
190c349dbc7Sjsg spin_lock_irqsave(&i915->mm.obj_lock, flags);
191c349dbc7Sjsg while (count < target &&
192c349dbc7Sjsg (obj = list_first_entry_or_null(phase->list,
193c349dbc7Sjsg typeof(*obj),
194c349dbc7Sjsg mm.link))) {
195c349dbc7Sjsg list_move_tail(&obj->mm.link, &still_in_list);
196c349dbc7Sjsg
197c349dbc7Sjsg if (shrink & I915_SHRINK_VMAPS &&
198c349dbc7Sjsg !is_vmalloc_addr(obj->mm.mapping))
199c349dbc7Sjsg continue;
200c349dbc7Sjsg
201c349dbc7Sjsg if (!(shrink & I915_SHRINK_ACTIVE) &&
202c349dbc7Sjsg i915_gem_object_is_framebuffer(obj))
203c349dbc7Sjsg continue;
204c349dbc7Sjsg
205c349dbc7Sjsg if (!can_release_pages(obj))
206c349dbc7Sjsg continue;
207c349dbc7Sjsg
208c349dbc7Sjsg if (!kref_get_unless_zero(&obj->base.refcount))
209c349dbc7Sjsg continue;
210c349dbc7Sjsg
211c349dbc7Sjsg spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
212c349dbc7Sjsg
213c349dbc7Sjsg /* May arrive from get_pages on another bo */
2145ca02815Sjsg if (!ww) {
2151bb76ff1Sjsg if (!i915_gem_object_trylock(obj, NULL))
2165ca02815Sjsg goto skip;
2175ca02815Sjsg } else {
2185ca02815Sjsg err = i915_gem_object_lock(obj, ww);
2195ca02815Sjsg if (err)
2205ca02815Sjsg goto skip;
2215ca02815Sjsg }
2225ca02815Sjsg
2231bb76ff1Sjsg if (drop_pages(obj, shrink, trylock_vm) &&
2241bb76ff1Sjsg !__i915_gem_object_put_pages(obj) &&
2251bb76ff1Sjsg !try_to_writeback(obj, shrink))
226c349dbc7Sjsg count += obj->base.size >> PAGE_SHIFT;
2271bb76ff1Sjsg
2285ca02815Sjsg if (!ww)
2295ca02815Sjsg i915_gem_object_unlock(obj);
2305ca02815Sjsg
231c349dbc7Sjsg scanned += obj->base.size >> PAGE_SHIFT;
2325ca02815Sjsg skip:
233c349dbc7Sjsg i915_gem_object_put(obj);
234c349dbc7Sjsg
235c349dbc7Sjsg spin_lock_irqsave(&i915->mm.obj_lock, flags);
2365ca02815Sjsg if (err)
2375ca02815Sjsg break;
238c349dbc7Sjsg }
239c349dbc7Sjsg list_splice_tail(&still_in_list, phase->list);
240c349dbc7Sjsg spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
2415ca02815Sjsg if (err)
2425ca02815Sjsg break;
243c349dbc7Sjsg }
244c349dbc7Sjsg
245c349dbc7Sjsg if (shrink & I915_SHRINK_BOUND)
246c349dbc7Sjsg intel_runtime_pm_put(&i915->runtime_pm, wakeref);
247c349dbc7Sjsg
2485ca02815Sjsg if (err)
2495ca02815Sjsg return err;
2505ca02815Sjsg
251c349dbc7Sjsg if (nr_scanned)
252c349dbc7Sjsg *nr_scanned += scanned;
253c349dbc7Sjsg return count;
254c349dbc7Sjsg }
255c349dbc7Sjsg
256c349dbc7Sjsg /**
257c349dbc7Sjsg * i915_gem_shrink_all - Shrink buffer object caches completely
258c349dbc7Sjsg * @i915: i915 device
259c349dbc7Sjsg *
260c349dbc7Sjsg * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
261c349dbc7Sjsg * caches completely. It also first waits for and retires all outstanding
262c349dbc7Sjsg * requests to also be able to release backing storage for active objects.
263c349dbc7Sjsg *
264c349dbc7Sjsg * This should only be used in code to intentionally quiescent the gpu or as a
265c349dbc7Sjsg * last-ditch effort when memory seems to have run out.
266c349dbc7Sjsg *
267c349dbc7Sjsg * Returns:
268c349dbc7Sjsg * The number of pages of backing storage actually released.
269c349dbc7Sjsg */
i915_gem_shrink_all(struct drm_i915_private * i915)270c349dbc7Sjsg unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
271c349dbc7Sjsg {
272c349dbc7Sjsg intel_wakeref_t wakeref;
273c349dbc7Sjsg unsigned long freed = 0;
274c349dbc7Sjsg
275c349dbc7Sjsg with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2765ca02815Sjsg freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
277c349dbc7Sjsg I915_SHRINK_BOUND |
278c349dbc7Sjsg I915_SHRINK_UNBOUND);
279c349dbc7Sjsg }
280c349dbc7Sjsg
281c349dbc7Sjsg return freed;
282c349dbc7Sjsg }
283c349dbc7Sjsg
284c349dbc7Sjsg static unsigned long
i915_gem_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)285c349dbc7Sjsg i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
286c349dbc7Sjsg {
287c349dbc7Sjsg struct drm_i915_private *i915 =
288c349dbc7Sjsg container_of(shrinker, struct drm_i915_private, mm.shrinker);
289c349dbc7Sjsg unsigned long num_objects;
290c349dbc7Sjsg unsigned long count;
291c349dbc7Sjsg
292c349dbc7Sjsg count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
293c349dbc7Sjsg num_objects = READ_ONCE(i915->mm.shrink_count);
294c349dbc7Sjsg
295c349dbc7Sjsg /*
296c349dbc7Sjsg * Update our preferred vmscan batch size for the next pass.
297c349dbc7Sjsg * Our rough guess for an effective batch size is roughly 2
298c349dbc7Sjsg * available GEM objects worth of pages. That is we don't want
299c349dbc7Sjsg * the shrinker to fire, until it is worth the cost of freeing an
300c349dbc7Sjsg * entire GEM object.
301c349dbc7Sjsg */
302c349dbc7Sjsg if (num_objects) {
303c349dbc7Sjsg unsigned long avg = 2 * count / num_objects;
304c349dbc7Sjsg
305c349dbc7Sjsg i915->mm.shrinker.batch =
306c349dbc7Sjsg max((i915->mm.shrinker.batch + avg) >> 1,
307c349dbc7Sjsg 128ul /* default SHRINK_BATCH */);
308c349dbc7Sjsg }
309c349dbc7Sjsg
310c349dbc7Sjsg return count;
311c349dbc7Sjsg }
312c349dbc7Sjsg
313c349dbc7Sjsg static unsigned long
i915_gem_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)314c349dbc7Sjsg i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
315c349dbc7Sjsg {
316c349dbc7Sjsg struct drm_i915_private *i915 =
317c349dbc7Sjsg container_of(shrinker, struct drm_i915_private, mm.shrinker);
318c349dbc7Sjsg unsigned long freed;
319c349dbc7Sjsg
320c349dbc7Sjsg sc->nr_scanned = 0;
321c349dbc7Sjsg
3225ca02815Sjsg freed = i915_gem_shrink(NULL, i915,
323c349dbc7Sjsg sc->nr_to_scan,
324c349dbc7Sjsg &sc->nr_scanned,
325c349dbc7Sjsg I915_SHRINK_BOUND |
326c349dbc7Sjsg I915_SHRINK_UNBOUND);
327c349dbc7Sjsg if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
328c349dbc7Sjsg intel_wakeref_t wakeref;
329c349dbc7Sjsg
330c349dbc7Sjsg with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
3315ca02815Sjsg freed += i915_gem_shrink(NULL, i915,
332c349dbc7Sjsg sc->nr_to_scan - sc->nr_scanned,
333c349dbc7Sjsg &sc->nr_scanned,
334c349dbc7Sjsg I915_SHRINK_ACTIVE |
335c349dbc7Sjsg I915_SHRINK_BOUND |
336c349dbc7Sjsg I915_SHRINK_UNBOUND |
337c349dbc7Sjsg I915_SHRINK_WRITEBACK);
338c349dbc7Sjsg }
339c349dbc7Sjsg }
340c349dbc7Sjsg
341c349dbc7Sjsg return sc->nr_scanned ? freed : SHRINK_STOP;
342c349dbc7Sjsg }
343c349dbc7Sjsg
344c349dbc7Sjsg static int
i915_gem_shrinker_oom(struct notifier_block * nb,unsigned long event,void * ptr)345c349dbc7Sjsg i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
346c349dbc7Sjsg {
347c349dbc7Sjsg struct drm_i915_private *i915 =
348c349dbc7Sjsg container_of(nb, struct drm_i915_private, mm.oom_notifier);
349c349dbc7Sjsg struct drm_i915_gem_object *obj;
350c349dbc7Sjsg unsigned long unevictable, available, freed_pages;
351c349dbc7Sjsg intel_wakeref_t wakeref;
352c349dbc7Sjsg unsigned long flags;
353c349dbc7Sjsg
354c349dbc7Sjsg freed_pages = 0;
355c349dbc7Sjsg with_intel_runtime_pm(&i915->runtime_pm, wakeref)
3565ca02815Sjsg freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
357c349dbc7Sjsg I915_SHRINK_BOUND |
358c349dbc7Sjsg I915_SHRINK_UNBOUND |
359c349dbc7Sjsg I915_SHRINK_WRITEBACK);
360c349dbc7Sjsg
361c349dbc7Sjsg /* Because we may be allocating inside our own driver, we cannot
362c349dbc7Sjsg * assert that there are no objects with pinned pages that are not
363c349dbc7Sjsg * being pointed to by hardware.
364c349dbc7Sjsg */
365c349dbc7Sjsg available = unevictable = 0;
366c349dbc7Sjsg spin_lock_irqsave(&i915->mm.obj_lock, flags);
367c349dbc7Sjsg list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
368c349dbc7Sjsg if (!can_release_pages(obj))
369c349dbc7Sjsg unevictable += obj->base.size >> PAGE_SHIFT;
370c349dbc7Sjsg else
371c349dbc7Sjsg available += obj->base.size >> PAGE_SHIFT;
372c349dbc7Sjsg }
373c349dbc7Sjsg spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
374c349dbc7Sjsg
375c349dbc7Sjsg if (freed_pages || available)
376c349dbc7Sjsg pr_info("Purging GPU memory, %lu pages freed, "
377c349dbc7Sjsg "%lu pages still pinned, %lu pages left available.\n",
378c349dbc7Sjsg freed_pages, unevictable, available);
379c349dbc7Sjsg
380c349dbc7Sjsg *(unsigned long *)ptr += freed_pages;
381c349dbc7Sjsg return NOTIFY_DONE;
382c349dbc7Sjsg }
383c349dbc7Sjsg
384c349dbc7Sjsg static int
i915_gem_shrinker_vmap(struct notifier_block * nb,unsigned long event,void * ptr)385c349dbc7Sjsg i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
386c349dbc7Sjsg {
387c349dbc7Sjsg struct drm_i915_private *i915 =
388c349dbc7Sjsg container_of(nb, struct drm_i915_private, mm.vmap_notifier);
389c349dbc7Sjsg struct i915_vma *vma, *next;
390c349dbc7Sjsg unsigned long freed_pages = 0;
391c349dbc7Sjsg intel_wakeref_t wakeref;
392c349dbc7Sjsg
393c349dbc7Sjsg with_intel_runtime_pm(&i915->runtime_pm, wakeref)
3945ca02815Sjsg freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
395c349dbc7Sjsg I915_SHRINK_BOUND |
396c349dbc7Sjsg I915_SHRINK_UNBOUND |
397c349dbc7Sjsg I915_SHRINK_VMAPS);
398c349dbc7Sjsg
399c349dbc7Sjsg /* We also want to clear any cached iomaps as they wrap vmap */
4001bb76ff1Sjsg mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
401c349dbc7Sjsg list_for_each_entry_safe(vma, next,
4021bb76ff1Sjsg &to_gt(i915)->ggtt->vm.bound_list, vm_link) {
403*f005ef32Sjsg unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
4041bb76ff1Sjsg struct drm_i915_gem_object *obj = vma->obj;
405c349dbc7Sjsg
406c349dbc7Sjsg if (!vma->iomap || i915_vma_is_active(vma))
407c349dbc7Sjsg continue;
408c349dbc7Sjsg
4091bb76ff1Sjsg if (!i915_gem_object_trylock(obj, NULL))
4101bb76ff1Sjsg continue;
4111bb76ff1Sjsg
412c349dbc7Sjsg if (__i915_vma_unbind(vma) == 0)
413c349dbc7Sjsg freed_pages += count;
4141bb76ff1Sjsg
4151bb76ff1Sjsg i915_gem_object_unlock(obj);
416c349dbc7Sjsg }
4171bb76ff1Sjsg mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
418c349dbc7Sjsg
419c349dbc7Sjsg *(unsigned long *)ptr += freed_pages;
420c349dbc7Sjsg return NOTIFY_DONE;
421c349dbc7Sjsg }
422c349dbc7Sjsg
i915_gem_driver_register__shrinker(struct drm_i915_private * i915)423c349dbc7Sjsg void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
424c349dbc7Sjsg {
425c349dbc7Sjsg i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
426c349dbc7Sjsg i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
427c349dbc7Sjsg i915->mm.shrinker.seeks = DEFAULT_SEEKS;
428c349dbc7Sjsg i915->mm.shrinker.batch = 4096;
4291bb76ff1Sjsg drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
4301bb76ff1Sjsg "drm-i915_gem"));
431c349dbc7Sjsg
432c349dbc7Sjsg #ifdef notyet
433c349dbc7Sjsg i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
434c349dbc7Sjsg drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
435c349dbc7Sjsg
436c349dbc7Sjsg i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
437c349dbc7Sjsg drm_WARN_ON(&i915->drm,
438c349dbc7Sjsg register_vmap_purge_notifier(&i915->mm.vmap_notifier));
439c349dbc7Sjsg #endif
440c349dbc7Sjsg }
441c349dbc7Sjsg
i915_gem_driver_unregister__shrinker(struct drm_i915_private * i915)442c349dbc7Sjsg void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
443c349dbc7Sjsg {
444c349dbc7Sjsg #ifdef notyet
445c349dbc7Sjsg drm_WARN_ON(&i915->drm,
446c349dbc7Sjsg unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
447c349dbc7Sjsg drm_WARN_ON(&i915->drm,
448c349dbc7Sjsg unregister_oom_notifier(&i915->mm.oom_notifier));
449c349dbc7Sjsg #endif
450c349dbc7Sjsg unregister_shrinker(&i915->mm.shrinker);
451c349dbc7Sjsg }
452c349dbc7Sjsg
i915_gem_shrinker_taints_mutex(struct drm_i915_private * i915,struct rwlock * mutex)453c349dbc7Sjsg void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
454c349dbc7Sjsg struct rwlock *mutex)
455c349dbc7Sjsg {
456c349dbc7Sjsg #ifdef notyet
457c349dbc7Sjsg if (!IS_ENABLED(CONFIG_LOCKDEP))
458c349dbc7Sjsg return;
459c349dbc7Sjsg
460c349dbc7Sjsg fs_reclaim_acquire(GFP_KERNEL);
461c349dbc7Sjsg
462c349dbc7Sjsg mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
463c349dbc7Sjsg mutex_release(&mutex->dep_map, _RET_IP_);
464c349dbc7Sjsg
465c349dbc7Sjsg fs_reclaim_release(GFP_KERNEL);
466c349dbc7Sjsg #endif
467c349dbc7Sjsg }
468c349dbc7Sjsg
4691bb76ff1Sjsg /**
4701bb76ff1Sjsg * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By
4711bb76ff1Sjsg * default all object types that support shrinking(see IS_SHRINKABLE), will also
4721bb76ff1Sjsg * make the object visible to the shrinker after allocating the system memory
4731bb76ff1Sjsg * pages.
4741bb76ff1Sjsg * @obj: The GEM object.
4751bb76ff1Sjsg *
4761bb76ff1Sjsg * This is typically used for special kernel internal objects that can't be
4771bb76ff1Sjsg * easily processed by the shrinker, like if they are perma-pinned.
4781bb76ff1Sjsg */
i915_gem_object_make_unshrinkable(struct drm_i915_gem_object * obj)479c349dbc7Sjsg void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
480c349dbc7Sjsg {
481c349dbc7Sjsg struct drm_i915_private *i915 = obj_to_i915(obj);
482c349dbc7Sjsg unsigned long flags;
483c349dbc7Sjsg
484c349dbc7Sjsg /*
485c349dbc7Sjsg * We can only be called while the pages are pinned or when
486c349dbc7Sjsg * the pages are released. If pinned, we should only be called
487c349dbc7Sjsg * from a single caller under controlled conditions; and on release
488c349dbc7Sjsg * only one caller may release us. Neither the two may cross.
489c349dbc7Sjsg */
490c349dbc7Sjsg if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
491c349dbc7Sjsg return;
492c349dbc7Sjsg
493c349dbc7Sjsg spin_lock_irqsave(&i915->mm.obj_lock, flags);
494c349dbc7Sjsg if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
495c349dbc7Sjsg !list_empty(&obj->mm.link)) {
496c349dbc7Sjsg list_del_init(&obj->mm.link);
497c349dbc7Sjsg i915->mm.shrink_count--;
498c349dbc7Sjsg i915->mm.shrink_memory -= obj->base.size;
499c349dbc7Sjsg }
500c349dbc7Sjsg spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
501c349dbc7Sjsg }
502c349dbc7Sjsg
___i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj,struct list_head * head)5031bb76ff1Sjsg static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
504c349dbc7Sjsg struct list_head *head)
505c349dbc7Sjsg {
506c349dbc7Sjsg struct drm_i915_private *i915 = obj_to_i915(obj);
507c349dbc7Sjsg unsigned long flags;
508c349dbc7Sjsg
509c349dbc7Sjsg if (!i915_gem_object_is_shrinkable(obj))
510c349dbc7Sjsg return;
511c349dbc7Sjsg
512c349dbc7Sjsg if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
513c349dbc7Sjsg return;
514c349dbc7Sjsg
515c349dbc7Sjsg spin_lock_irqsave(&i915->mm.obj_lock, flags);
516c349dbc7Sjsg GEM_BUG_ON(!kref_read(&obj->base.refcount));
517c349dbc7Sjsg if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
518c349dbc7Sjsg GEM_BUG_ON(!list_empty(&obj->mm.link));
519c349dbc7Sjsg
520c349dbc7Sjsg list_add_tail(&obj->mm.link, head);
521c349dbc7Sjsg i915->mm.shrink_count++;
522c349dbc7Sjsg i915->mm.shrink_memory += obj->base.size;
523c349dbc7Sjsg
524c349dbc7Sjsg }
525c349dbc7Sjsg spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
526c349dbc7Sjsg }
527c349dbc7Sjsg
5281bb76ff1Sjsg /**
5291bb76ff1Sjsg * __i915_gem_object_make_shrinkable - Move the object to the tail of the
5301bb76ff1Sjsg * shrinkable list. Objects on this list might be swapped out. Used with
5311bb76ff1Sjsg * WILLNEED objects.
5321bb76ff1Sjsg * @obj: The GEM object.
5331bb76ff1Sjsg *
5341bb76ff1Sjsg * DO NOT USE. This is intended to be called on very special objects that don't
5351bb76ff1Sjsg * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
5361bb76ff1Sjsg * underneath.
5371bb76ff1Sjsg */
__i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj)5381bb76ff1Sjsg void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
539c349dbc7Sjsg {
5401bb76ff1Sjsg ___i915_gem_object_make_shrinkable(obj,
541c349dbc7Sjsg &obj_to_i915(obj)->mm.shrink_list);
542c349dbc7Sjsg }
543c349dbc7Sjsg
5441bb76ff1Sjsg /**
5451bb76ff1Sjsg * __i915_gem_object_make_purgeable - Move the object to the tail of the
5461bb76ff1Sjsg * purgeable list. Objects on this list might be swapped out. Used with
5471bb76ff1Sjsg * DONTNEED objects.
5481bb76ff1Sjsg * @obj: The GEM object.
5491bb76ff1Sjsg *
5501bb76ff1Sjsg * DO NOT USE. This is intended to be called on very special objects that don't
5511bb76ff1Sjsg * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
5521bb76ff1Sjsg * underneath.
5531bb76ff1Sjsg */
__i915_gem_object_make_purgeable(struct drm_i915_gem_object * obj)5541bb76ff1Sjsg void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
5551bb76ff1Sjsg {
5561bb76ff1Sjsg ___i915_gem_object_make_shrinkable(obj,
5571bb76ff1Sjsg &obj_to_i915(obj)->mm.purge_list);
5581bb76ff1Sjsg }
5591bb76ff1Sjsg
5601bb76ff1Sjsg /**
5611bb76ff1Sjsg * i915_gem_object_make_shrinkable - Move the object to the tail of the
5621bb76ff1Sjsg * shrinkable list. Objects on this list might be swapped out. Used with
5631bb76ff1Sjsg * WILLNEED objects.
5641bb76ff1Sjsg * @obj: The GEM object.
5651bb76ff1Sjsg *
5661bb76ff1Sjsg * MUST only be called on objects which have backing pages.
5671bb76ff1Sjsg *
5681bb76ff1Sjsg * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
5691bb76ff1Sjsg */
i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj)5701bb76ff1Sjsg void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
5711bb76ff1Sjsg {
5721bb76ff1Sjsg GEM_BUG_ON(!i915_gem_object_has_pages(obj));
5731bb76ff1Sjsg __i915_gem_object_make_shrinkable(obj);
5741bb76ff1Sjsg }
5751bb76ff1Sjsg
5761bb76ff1Sjsg /**
5771bb76ff1Sjsg * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable
5781bb76ff1Sjsg * list. Used with DONTNEED objects. Unlike with shrinkable objects, the
5791bb76ff1Sjsg * shrinker will attempt to discard the backing pages, instead of trying to swap
5801bb76ff1Sjsg * them out.
5811bb76ff1Sjsg * @obj: The GEM object.
5821bb76ff1Sjsg *
5831bb76ff1Sjsg * MUST only be called on objects which have backing pages.
5841bb76ff1Sjsg *
5851bb76ff1Sjsg * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
5861bb76ff1Sjsg */
i915_gem_object_make_purgeable(struct drm_i915_gem_object * obj)587c349dbc7Sjsg void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
588c349dbc7Sjsg {
5891bb76ff1Sjsg GEM_BUG_ON(!i915_gem_object_has_pages(obj));
5901bb76ff1Sjsg __i915_gem_object_make_purgeable(obj);
591c349dbc7Sjsg }
592