xref: /netbsd-src/sys/external/bsd/drm2/drm/drm_cache.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: drm_cache.c,v 1.8 2015/10/17 21:11:56 jmcneill Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.8 2015/10/17 21:11:56 jmcneill Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/xcall.h>
38 
39 #include <uvm/uvm_extern.h>
40 
41 #include <linux/mm_types.h>
42 
43 #include <drm/drmP.h>
44 
45 #if !defined(__arm__)
46 #define DRM_CLFLUSH	1
47 #endif
48 
49 #if defined(DRM_CLFLUSH)
50 static bool		drm_md_clflush_finegrained_p(void);
51 static void		drm_md_clflush_all(void);
52 static void		drm_md_clflush_page(struct page *);
53 static void		drm_md_clflush_virt_range(const void *, size_t);
54 #endif
55 
56 void
57 drm_clflush_pages(struct page **pages, unsigned long npages)
58 {
59 #if defined(DRM_CLFLUSH)
60 	if (drm_md_clflush_finegrained_p()) {
61 		while (npages--)
62 			drm_md_clflush_page(pages[npages]);
63 	} else {
64 		drm_md_clflush_all();
65 	}
66 #endif
67 }
68 
69 void
70 drm_clflush_pglist(struct pglist *list)
71 {
72 #if defined(DRM_CLFLUSH)
73 	if (drm_md_clflush_finegrained_p()) {
74 		struct vm_page *page;
75 
76 		TAILQ_FOREACH(page, list, pageq.queue)
77 			drm_md_clflush_page(container_of(page, struct page,
78 				p_vmp));
79 	} else {
80 		drm_md_clflush_all();
81 	}
82 #endif
83 }
84 
85 void
86 drm_clflush_page(struct page *page)
87 {
88 #if defined(DRM_CLFLUSH)
89 	if (drm_md_clflush_finegrained_p())
90 		drm_md_clflush_page(page);
91 	else
92 		drm_md_clflush_all();
93 #endif
94 }
95 
96 void
97 drm_clflush_virt_range(const void *vaddr, size_t nbytes)
98 {
99 #if defined(DRM_CLFLUSH)
100 	if (drm_md_clflush_finegrained_p())
101 		drm_md_clflush_virt_range(vaddr, nbytes);
102 	else
103 		drm_md_clflush_all();
104 #endif
105 }
106 
107 #if defined(__i386__) || defined(__x86_64__)
108 
109 #include <machine/cpufunc.h>
110 
111 static bool
112 drm_md_clflush_finegrained_p(void)
113 {
114 	return ISSET(cpu_info_primary.ci_feat_val[0], CPUID_CFLUSH);
115 }
116 
117 static void
118 drm_x86_clflush(const void *vaddr)
119 {
120 	asm volatile ("clflush %0" : : "m" (*(const char *)vaddr));
121 }
122 
123 static size_t
124 drm_x86_clflush_size(void)
125 {
126 	KASSERT(drm_md_clflush_finegrained_p());
127 	return cpu_info_primary.ci_cflush_lsize;
128 }
129 
130 static void
131 drm_x86_clflush_xc(void *arg0 __unused, void *arg1 __unused)
132 {
133 	wbinvd();
134 }
135 
136 static void
137 drm_md_clflush_all(void)
138 {
139 	xc_wait(xc_broadcast(0, &drm_x86_clflush_xc, NULL, NULL));
140 }
141 
142 static void
143 drm_md_clflush_page(struct page *page)
144 {
145 	void *const vaddr = kmap_atomic(page);
146 
147 	drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
148 
149 	kunmap_atomic(vaddr);
150 }
151 
152 static void
153 drm_md_clflush_virt_range(const void *vaddr, size_t nbytes)
154 {
155 	const unsigned clflush_size = drm_x86_clflush_size();
156 	const vaddr_t va = (vaddr_t)vaddr;
157 	const char *const start = (const void *)rounddown(va, clflush_size);
158 	const char *const end = (const void *)roundup(va + nbytes,
159 	    clflush_size);
160 	const char *p;
161 
162 	/* Support for CLFLUSH implies support for MFENCE.  */
163 	KASSERT(drm_md_clflush_finegrained_p());
164 	x86_mfence();
165 	for (p = start; p < end; p += clflush_size)
166 		drm_x86_clflush(p);
167 	x86_mfence();
168 }
169 
170 #endif	/* defined(__i386__) || defined(__x86_64__) */
171