xref: /openbsd-src/sys/dev/pci/drm/i915/i915_gem_evict.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: i915_gem_evict.c,v 1.7 2015/09/23 23:12:12 kettenis Exp $	*/
2 /*
3  * Copyright © 2008-2010 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Eric Anholt <eric@anholt.net>
26  *    Chris Wilson <chris@chris-wilson.co.uuk>
27  *
28  */
29 
30 #include <dev/pci/drm/drmP.h>
31 #include <dev/pci/drm/i915_drm.h>
32 
33 #include "i915_drv.h"
34 #include "intel_drv.h"
35 #include "i915_trace.h"
36 
37 static bool
38 mark_free(struct i915_vma *vma, struct list_head *unwind)
39 {
40 	if (vma->obj->pin_count)
41 		return false;
42 
43 	if (WARN_ON(!list_empty(&vma->exec_list)))
44 		return false;
45 
46 	list_add(&vma->exec_list, unwind);
47 	return drm_mm_scan_add_block(&vma->node);
48 }
49 
50 int
51 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
52 			 int min_size, unsigned alignment, unsigned cache_level,
53 			 bool mappable, bool nonblocking)
54 {
55 	drm_i915_private_t *dev_priv = dev->dev_private;
56 	struct list_head eviction_list, unwind_list;
57 	struct i915_vma *vma;
58 	int ret = 0;
59 	int pass = 0;
60 
61 	trace_i915_gem_evict(dev, min_size, alignment, mappable);
62 
63 	/*
64 	 * The goal is to evict objects and amalgamate space in LRU order.
65 	 * The oldest idle objects reside on the inactive list, which is in
66 	 * retirement order. The next objects to retire are those on the (per
67 	 * ring) active list that do not have an outstanding flush. Once the
68 	 * hardware reports completion (the seqno is updated after the
69 	 * batchbuffer has been finished) the clean buffer objects would
70 	 * be retired to the inactive list. Any dirty objects would be added
71 	 * to the tail of the flushing list. So after processing the clean
72 	 * active objects we need to emit a MI_FLUSH to retire the flushing
73 	 * list, hence the retirement order of the flushing list is in
74 	 * advance of the dirty objects on the active lists.
75 	 *
76 	 * The retirement sequence is thus:
77 	 *   1. Inactive objects (already retired)
78 	 *   2. Clean active objects
79 	 *   3. Flushing list
80 	 *   4. Dirty active objects.
81 	 *
82 	 * On each list, the oldest objects lie at the HEAD with the freshest
83 	 * object on the TAIL.
84 	 */
85 
86 	INIT_LIST_HEAD(&unwind_list);
87 	if (mappable) {
88 		BUG_ON(!i915_is_ggtt(vm));
89 		drm_mm_init_scan_with_range(&vm->mm, min_size,
90 					    alignment, cache_level, 0,
91 					    dev_priv->gtt.mappable_end);
92 	} else
93 		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
94 
95 search_again:
96 	/* First see if there is a large enough contiguous idle region... */
97 	list_for_each_entry(vma, &vm->inactive_list, mm_list) {
98 		if (mark_free(vma, &unwind_list))
99 			goto found;
100 	}
101 
102 	if (nonblocking)
103 		goto none;
104 
105 	/* Now merge in the soon-to-be-expired objects... */
106 	list_for_each_entry(vma, &vm->active_list, mm_list) {
107 		if (mark_free(vma, &unwind_list))
108 			goto found;
109 	}
110 
111 none:
112 	/* Nothing found, clean up and bail out! */
113 	while (!list_empty(&unwind_list)) {
114 		vma = list_first_entry(&unwind_list,
115 				       struct i915_vma,
116 				       exec_list);
117 		ret = drm_mm_scan_remove_block(&vma->node);
118 		BUG_ON(ret);
119 
120 		list_del_init(&vma->exec_list);
121 	}
122 
123 	/* Can we unpin some objects such as idle hw contents,
124 	 * or pending flips?
125 	 */
126 	if (nonblocking)
127 		return -ENOSPC;
128 
129 	/* Only idle the GPU and repeat the search once */
130 	if (pass++ == 0) {
131 		ret = i915_gpu_idle(dev);
132 		if (ret)
133 			return ret;
134 
135 		i915_gem_retire_requests(dev);
136 		goto search_again;
137 	}
138 
139 	/* If we still have pending pageflip completions, drop
140 	 * back to userspace to give our workqueues time to
141 	 * acquire our locks and unpin the old scanouts.
142 	 */
143 	return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
144 
145 found:
146 	/* drm_mm doesn't allow any other other operations while
147 	 * scanning, therefore store to be evicted objects on a
148 	 * temporary list. */
149 	INIT_LIST_HEAD(&eviction_list);
150 	while (!list_empty(&unwind_list)) {
151 		vma = list_first_entry(&unwind_list,
152 				       struct i915_vma,
153 				       exec_list);
154 		if (drm_mm_scan_remove_block(&vma->node)) {
155 			list_move(&vma->exec_list, &eviction_list);
156 			drm_gem_object_reference(&vma->obj->base);
157 			continue;
158 		}
159 		list_del_init(&vma->exec_list);
160 	}
161 
162 	/* Unbinding will emit any required flushes */
163 	while (!list_empty(&eviction_list)) {
164 		struct drm_gem_object *obj;
165 		vma = list_first_entry(&eviction_list,
166 				       struct i915_vma,
167 				       exec_list);
168 
169 		obj =  &vma->obj->base;
170 		list_del_init(&vma->exec_list);
171 		if (ret == 0)
172 			ret = i915_vma_unbind(vma);
173 
174 		drm_gem_object_unreference(obj);
175 	}
176 
177 	return ret;
178 }
179 
180 /**
181  * i915_gem_evict_vm - Try to free up VM space
182  *
183  * @vm: Address space to evict from
184  * @do_idle: Boolean directing whether to idle first.
185  *
186  * VM eviction is about freeing up virtual address space. If one wants fine
187  * grained eviction, they should see evict something for more details. In terms
188  * of freeing up actual system memory, this function may not accomplish the
189  * desired result. An object may be shared in multiple address space, and this
190  * function will not assert those objects be freed.
191  *
192  * Using do_idle will result in a more complete eviction because it retires, and
193  * inactivates current BOs.
194  */
195 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
196 {
197 	struct i915_vma *vma, *next;
198 	int ret;
199 
200 	trace_i915_gem_evict_vm(vm);
201 
202 	if (do_idle) {
203 		ret = i915_gpu_idle(vm->dev);
204 		if (ret)
205 			return ret;
206 
207 		i915_gem_retire_requests(vm->dev);
208 	}
209 
210 	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
211 		if (vma->obj->pin_count == 0)
212 			WARN_ON(i915_vma_unbind(vma));
213 
214 	return 0;
215 }
216 
217 int
218 i915_gem_evict_everything(struct drm_device *dev)
219 {
220 	drm_i915_private_t *dev_priv = dev->dev_private;
221 	struct i915_address_space *vm;
222 	bool lists_empty = true;
223 	int ret;
224 
225 	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
226 		lists_empty = (list_empty(&vm->inactive_list) &&
227 			       list_empty(&vm->active_list));
228 		if (!lists_empty)
229 			lists_empty = false;
230 	}
231 
232 	if (lists_empty)
233 		return -ENOSPC;
234 
235 	trace_i915_gem_evict_everything(dev);
236 
237 	/* The gpu_idle will flush everything in the write domain to the
238 	 * active list. Then we must move everything off the active list
239 	 * with retire requests.
240 	 */
241 	ret = i915_gpu_idle(dev);
242 	if (ret)
243 		return ret;
244 
245 	i915_gem_retire_requests(dev);
246 
247 	/* Having flushed everything, unbind() should never raise an error */
248 	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
249 		WARN_ON(i915_gem_evict_vm(vm, false));
250 
251 	return 0;
252 }
253