1c349dbc7Sjsg /*
2c349dbc7Sjsg * SPDX-License-Identifier: MIT
3c349dbc7Sjsg *
4c349dbc7Sjsg * Copyright © 2016 Intel Corporation
5c349dbc7Sjsg */
6c349dbc7Sjsg
71bb76ff1Sjsg #include <drm/drm_cache.h>
81bb76ff1Sjsg
9c349dbc7Sjsg #include "display/intel_frontbuffer.h"
10c349dbc7Sjsg
11*f005ef32Sjsg #include "i915_config.h"
12c349dbc7Sjsg #include "i915_drv.h"
13c349dbc7Sjsg #include "i915_gem_clflush.h"
14c349dbc7Sjsg #include "i915_sw_fence_work.h"
15c349dbc7Sjsg #include "i915_trace.h"
16c349dbc7Sjsg
17c349dbc7Sjsg struct clflush {
18c349dbc7Sjsg struct dma_fence_work base;
19c349dbc7Sjsg struct drm_i915_gem_object *obj;
20c349dbc7Sjsg };
21c349dbc7Sjsg
__do_clflush(struct drm_i915_gem_object * obj)22c349dbc7Sjsg static void __do_clflush(struct drm_i915_gem_object *obj)
23c349dbc7Sjsg {
24c349dbc7Sjsg GEM_BUG_ON(!i915_gem_object_has_pages(obj));
25c349dbc7Sjsg drm_clflush_sg(obj->mm.pages);
26c349dbc7Sjsg
27c349dbc7Sjsg i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
28c349dbc7Sjsg }
29c349dbc7Sjsg
clflush_work(struct dma_fence_work * base)305ca02815Sjsg static void clflush_work(struct dma_fence_work *base)
31c349dbc7Sjsg {
32c349dbc7Sjsg struct clflush *clflush = container_of(base, typeof(*clflush), base);
33c349dbc7Sjsg
345ca02815Sjsg __do_clflush(clflush->obj);
35c349dbc7Sjsg }
36c349dbc7Sjsg
clflush_release(struct dma_fence_work * base)37c349dbc7Sjsg static void clflush_release(struct dma_fence_work *base)
38c349dbc7Sjsg {
39c349dbc7Sjsg struct clflush *clflush = container_of(base, typeof(*clflush), base);
40c349dbc7Sjsg
415ca02815Sjsg i915_gem_object_unpin_pages(clflush->obj);
42c349dbc7Sjsg i915_gem_object_put(clflush->obj);
43c349dbc7Sjsg }
44c349dbc7Sjsg
45c349dbc7Sjsg static const struct dma_fence_work_ops clflush_ops = {
46c349dbc7Sjsg .name = "clflush",
47c349dbc7Sjsg .work = clflush_work,
48c349dbc7Sjsg .release = clflush_release,
49c349dbc7Sjsg };
50c349dbc7Sjsg
clflush_work_create(struct drm_i915_gem_object * obj)51c349dbc7Sjsg static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
52c349dbc7Sjsg {
53c349dbc7Sjsg struct clflush *clflush;
54c349dbc7Sjsg
55c349dbc7Sjsg GEM_BUG_ON(!obj->cache_dirty);
56c349dbc7Sjsg
57c349dbc7Sjsg clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
58c349dbc7Sjsg if (!clflush)
59c349dbc7Sjsg return NULL;
60c349dbc7Sjsg
615ca02815Sjsg if (__i915_gem_object_get_pages(obj) < 0) {
625ca02815Sjsg kfree(clflush);
635ca02815Sjsg return NULL;
645ca02815Sjsg }
655ca02815Sjsg
66c349dbc7Sjsg dma_fence_work_init(&clflush->base, &clflush_ops);
67c349dbc7Sjsg clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
68c349dbc7Sjsg
69c349dbc7Sjsg return clflush;
70c349dbc7Sjsg }
71c349dbc7Sjsg
i915_gem_clflush_object(struct drm_i915_gem_object * obj,unsigned int flags)72c349dbc7Sjsg bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
73c349dbc7Sjsg unsigned int flags)
74c349dbc7Sjsg {
751bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(obj->base.dev);
76c349dbc7Sjsg struct clflush *clflush;
77c349dbc7Sjsg
78c349dbc7Sjsg assert_object_held(obj);
79c349dbc7Sjsg
801bb76ff1Sjsg if (IS_DGFX(i915)) {
811bb76ff1Sjsg WARN_ON_ONCE(obj->cache_dirty);
821bb76ff1Sjsg return false;
831bb76ff1Sjsg }
841bb76ff1Sjsg
85c349dbc7Sjsg /*
86c349dbc7Sjsg * Stolen memory is always coherent with the GPU as it is explicitly
87c349dbc7Sjsg * marked as wc by the system, or the system is cache-coherent.
88c349dbc7Sjsg * Similarly, we only access struct pages through the CPU cache, so
89c349dbc7Sjsg * anything not backed by physical memory we consider to be always
90c349dbc7Sjsg * coherent and not need clflushing.
91c349dbc7Sjsg */
92c349dbc7Sjsg if (!i915_gem_object_has_struct_page(obj)) {
93c349dbc7Sjsg obj->cache_dirty = false;
94c349dbc7Sjsg return false;
95c349dbc7Sjsg }
96c349dbc7Sjsg
97c349dbc7Sjsg /* If the GPU is snooping the contents of the CPU cache,
98c349dbc7Sjsg * we do not need to manually clear the CPU cache lines. However,
99c349dbc7Sjsg * the caches are only snooped when the render cache is
100c349dbc7Sjsg * flushed/invalidated. As we always have to emit invalidations
101c349dbc7Sjsg * and flushes when moving into and out of the RENDER domain, correct
102c349dbc7Sjsg * snooping behaviour occurs naturally as the result of our domain
103c349dbc7Sjsg * tracking.
104c349dbc7Sjsg */
105c349dbc7Sjsg if (!(flags & I915_CLFLUSH_FORCE) &&
106c349dbc7Sjsg obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
107c349dbc7Sjsg return false;
108c349dbc7Sjsg
109c349dbc7Sjsg trace_i915_gem_object_clflush(obj);
110c349dbc7Sjsg
111c349dbc7Sjsg clflush = NULL;
1121bb76ff1Sjsg if (!(flags & I915_CLFLUSH_SYNC) &&
1131bb76ff1Sjsg dma_resv_reserve_fences(obj->base.resv, 1) == 0)
114c349dbc7Sjsg clflush = clflush_work_create(obj);
115c349dbc7Sjsg if (clflush) {
116c349dbc7Sjsg i915_sw_fence_await_reservation(&clflush->base.chain,
117*f005ef32Sjsg obj->base.resv, true,
1181bb76ff1Sjsg i915_fence_timeout(i915),
119c349dbc7Sjsg I915_FENCE_GFP);
1201bb76ff1Sjsg dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
1211bb76ff1Sjsg DMA_RESV_USAGE_KERNEL);
122c349dbc7Sjsg dma_fence_work_commit(&clflush->base);
1231bb76ff1Sjsg /*
1241bb76ff1Sjsg * We must have successfully populated the pages(since we are
1251bb76ff1Sjsg * holding a pin on the pages as per the flush worker) to reach
1261bb76ff1Sjsg * this point, which must mean we have already done the required
1271bb76ff1Sjsg * flush-on-acquire, hence resetting cache_dirty here should be
1281bb76ff1Sjsg * safe.
1291bb76ff1Sjsg */
1301bb76ff1Sjsg obj->cache_dirty = false;
131c349dbc7Sjsg } else if (obj->mm.pages) {
132c349dbc7Sjsg __do_clflush(obj);
1331bb76ff1Sjsg obj->cache_dirty = false;
134c349dbc7Sjsg } else {
135c349dbc7Sjsg GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
136c349dbc7Sjsg }
137c349dbc7Sjsg
138c349dbc7Sjsg return true;
139c349dbc7Sjsg }
140