xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_clflush.c (revision 9ff13907d286a44cc5268ca9557a2582947e8089)
1 /*	$NetBSD: i915_gem_clflush.c,v 1.5 2021/12/19 11:33:30 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright © 2016 Intel Corporation
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_clflush.c,v 1.5 2021/12/19 11:33:30 riastradh Exp $");
11 
12 #include "display/intel_frontbuffer.h"
13 
14 #include "i915_drv.h"
15 #include "i915_gem_clflush.h"
16 #include "i915_sw_fence_work.h"
17 #include "i915_trace.h"
18 
19 struct clflush {
20 	struct dma_fence_work base;
21 	struct drm_i915_gem_object *obj;
22 };
23 
__do_clflush(struct drm_i915_gem_object * obj)24 static void __do_clflush(struct drm_i915_gem_object *obj)
25 {
26 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
27 	drm_clflush_sg(obj->mm.pages);
28 
29 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
30 }
31 
clflush_work(struct dma_fence_work * base)32 static int clflush_work(struct dma_fence_work *base)
33 {
34 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
35 	struct drm_i915_gem_object *obj = clflush->obj;
36 	int err;
37 
38 	err = i915_gem_object_pin_pages(obj);
39 	if (err)
40 		return err;
41 
42 	__do_clflush(obj);
43 	i915_gem_object_unpin_pages(obj);
44 
45 	return 0;
46 }
47 
clflush_release(struct dma_fence_work * base)48 static void clflush_release(struct dma_fence_work *base)
49 {
50 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
51 
52 	i915_gem_object_put(clflush->obj);
53 }
54 
55 static const struct dma_fence_work_ops clflush_ops = {
56 	.name = "clflush",
57 	.work = clflush_work,
58 	.release = clflush_release,
59 };
60 
clflush_work_create(struct drm_i915_gem_object * obj)61 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
62 {
63 	struct clflush *clflush;
64 
65 	GEM_BUG_ON(!obj->cache_dirty);
66 
67 	clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
68 	if (!clflush)
69 		return NULL;
70 
71 	dma_fence_work_init(&clflush->base, &clflush_ops);
72 	clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
73 
74 	return clflush;
75 }
76 
i915_gem_clflush_object(struct drm_i915_gem_object * obj,unsigned int flags)77 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
78 			     unsigned int flags)
79 {
80 	struct clflush *clflush;
81 
82 	assert_object_held(obj);
83 
84 	/*
85 	 * Stolen memory is always coherent with the GPU as it is explicitly
86 	 * marked as wc by the system, or the system is cache-coherent.
87 	 * Similarly, we only access struct pages through the CPU cache, so
88 	 * anything not backed by physical memory we consider to be always
89 	 * coherent and not need clflushing.
90 	 */
91 	if (!i915_gem_object_has_struct_page(obj)) {
92 		obj->cache_dirty = false;
93 		return false;
94 	}
95 
96 	/* If the GPU is snooping the contents of the CPU cache,
97 	 * we do not need to manually clear the CPU cache lines.  However,
98 	 * the caches are only snooped when the render cache is
99 	 * flushed/invalidated.  As we always have to emit invalidations
100 	 * and flushes when moving into and out of the RENDER domain, correct
101 	 * snooping behaviour occurs naturally as the result of our domain
102 	 * tracking.
103 	 */
104 	if (!(flags & I915_CLFLUSH_FORCE) &&
105 	    obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
106 		return false;
107 
108 	trace_i915_gem_object_clflush(obj);
109 
110 	clflush = NULL;
111 	if (!(flags & I915_CLFLUSH_SYNC))
112 		clflush = clflush_work_create(obj);
113 	if (clflush) {
114 		i915_sw_fence_await_reservation(&clflush->base.chain,
115 						obj->base.resv, NULL, true,
116 						I915_FENCE_TIMEOUT,
117 						I915_FENCE_GFP);
118 		dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
119 		dma_fence_work_commit(&clflush->base);
120 	} else if (obj->mm.pages) {
121 		__do_clflush(obj);
122 	} else {
123 		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
124 	}
125 
126 	obj->cache_dirty = false;
127 	return true;
128 }
129