xref: /netbsd-src/sys/external/bsd/drm2/drm/drm_cache.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: drm_cache.c,v 1.14 2020/09/05 07:45:44 maxv Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.14 2020/09/05 07:45:44 maxv Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/xcall.h>
38 
39 #include <uvm/uvm_extern.h>
40 
41 #include <linux/mm_types.h>
42 
43 #include <drm/drmP.h>
44 
45 #if !defined(__arm__) && !defined(__aarch64__)
46 #define DRM_CLFLUSH	1
47 #endif
48 
49 #if defined(DRM_CLFLUSH)
50 static bool		drm_md_clflush_finegrained_p(void);
51 static void		drm_md_clflush_all(void);
52 static void		drm_md_clflush_begin(void);
53 static void		drm_md_clflush_commit(void);
54 static void		drm_md_clflush_page(struct page *);
55 static void		drm_md_clflush_virt_range(const void *, size_t);
56 #endif
57 
58 void
59 drm_clflush_pages(struct page **pages, unsigned long npages)
60 {
61 #if defined(DRM_CLFLUSH)
62 	if (drm_md_clflush_finegrained_p()) {
63 		drm_md_clflush_begin();
64 		while (npages--)
65 			drm_md_clflush_page(pages[npages]);
66 		drm_md_clflush_commit();
67 	} else {
68 		drm_md_clflush_all();
69 	}
70 #endif
71 }
72 
73 void
74 drm_clflush_pglist(struct pglist *list)
75 {
76 #if defined(DRM_CLFLUSH)
77 	if (drm_md_clflush_finegrained_p()) {
78 		struct vm_page *page;
79 
80 		drm_md_clflush_begin();
81 		TAILQ_FOREACH(page, list, pageq.queue)
82 			drm_md_clflush_page(container_of(page, struct page,
83 				p_vmp));
84 		drm_md_clflush_commit();
85 	} else {
86 		drm_md_clflush_all();
87 	}
88 #endif
89 }
90 
91 void
92 drm_clflush_page(struct page *page)
93 {
94 #if defined(DRM_CLFLUSH)
95 	if (drm_md_clflush_finegrained_p()) {
96 		drm_md_clflush_begin();
97 		drm_md_clflush_page(page);
98 		drm_md_clflush_commit();
99 	} else {
100 		drm_md_clflush_all();
101 	}
102 #endif
103 }
104 
105 void
106 drm_clflush_virt_range(const void *vaddr, size_t nbytes)
107 {
108 #if defined(DRM_CLFLUSH)
109 	if (drm_md_clflush_finegrained_p()) {
110 		drm_md_clflush_begin();
111 		drm_md_clflush_virt_range(vaddr, nbytes);
112 		drm_md_clflush_commit();
113 	} else {
114 		drm_md_clflush_all();
115 	}
116 #endif
117 }
118 
119 #if defined(__i386__) || defined(__x86_64__)
120 
121 #include <machine/cpufunc.h>
122 
123 static bool
124 drm_md_clflush_finegrained_p(void)
125 {
126 	return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CLFSH);
127 }
128 
129 static void
130 drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
131 {
132 	wbinvd();
133 }
134 
135 static void
136 drm_md_clflush_all(void)
137 {
138 	xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
139 }
140 
141 static void
142 drm_md_clflush_begin(void)
143 {
144 	/* Support for CLFLUSH implies support for MFENCE.  */
145 	x86_mfence();
146 }
147 
148 static void
149 drm_md_clflush_commit(void)
150 {
151 	x86_mfence();
152 }
153 
154 static void
155 drm_md_clflush_page(struct page *page)
156 {
157 	void *const vaddr = kmap_atomic(page);
158 
159 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
160 
161 	kunmap_atomic(vaddr);
162 }
163 
164 static void
165 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
166 {
167 	const unsigned clflush_size = cpu_info_primary.ci_cflush_lsize;
168 	const vaddr_t vaddr = (vaddr_t)ptr;
169 	const vaddr_t start = rounddown(vaddr, clflush_size);
170 	const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
171 	vaddr_t va;
172 
173 	for (va = start; va < end; va += clflush_size)
174 		asm volatile ("clflush %0" : : "m" (*(const char *)va));
175 }
176 
177 #elif defined(__sparc__) || defined(__sparc64__)
178 
179 #ifdef __sparc64__
180 #include <sparc64/sparc64/cache.h>
181 #else
182 #include <sparc/sparc/cache.h>
183 #endif
184 
185 static bool
186 drm_md_clflush_finegrained_p(void)
187 {
188 	return true;
189 }
190 
191 static void
192 drm_md_clflush_all(void)
193 {
194 	panic("don't know how to flush entire cache on sparc64");
195 }
196 
197 static void
198 drm_md_clflush_begin(void)
199 {
200 	membar_Sync();		/* unsure if needed */
201 }
202 
203 static void
204 drm_md_clflush_commit(void)
205 {
206 	membar_Sync();		/* unsure if needed */
207 }
208 
209 static void
210 drm_md_clflush_page(struct page *page)
211 {
212 #ifdef __sparc64__
213 	paddr_t pa = VM_PAGE_TO_PHYS(&page->p_vmp);
214 
215 	cache_flush_phys(pa, PAGE_SIZE, 0);
216 #else
217 	void *const vaddr = kmap_atomic(page);
218 
219 	cache_flush(vaddr, PAGE_SIZE);
220 
221 	kunmap_atomic(vaddr);
222 #endif
223 }
224 
225 static void
226 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
227 {
228 #ifdef __sparc64__
229 	/* XXX Mega-kludge -- doesn't seem to be a way to flush by vaddr.  */
230 	blast_dcache();
231 #else
232 	cache_flush(ptr, nbytes);
233 #endif
234 }
235 
236 #elif defined(__powerpc__)
237 
238 static bool
239 drm_md_clflush_finegrained_p(void)
240 {
241 	return true;
242 }
243 
244 static void
245 drm_md_clflush_all(void)
246 {
247 	panic("don't know how to flush entire cache on powerpc");
248 }
249 
250 static void
251 drm_md_clflush_begin(void)
252 {
253 }
254 
255 static void
256 drm_md_clflush_commit(void)
257 {
258 	asm volatile ("sync" ::: "memory");
259 }
260 
261 static void
262 drm_md_clflush_page(struct page *page)
263 {
264 	void *const vaddr = kmap_atomic(page);
265 
266 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
267 
268 	kunmap_atomic(vaddr);
269 }
270 
271 static void
272 drm_md_clflush_virt_range(const void *ptr, size_t nbytes)
273 {
274 	const unsigned dcsize = curcpu()->ci_ci.dcache_line_size;
275 	vaddr_t va = (vaddr_t)ptr;
276 	vaddr_t start = rounddown(va, dcsize);
277 	vaddr_t end = roundup(va + nbytes, dcsize);
278 	vsize_t len = end - start;
279 	vsize_t off;
280 
281 	for (off = 0; off < len; off += dcsize)
282 		asm volatile ("dcbf\t%0,%1" : : "b"(start), "r"(off));
283 }
284 
285 #endif
286