1252e0799Sjsg /**************************************************************************
2252e0799Sjsg *
3252e0799Sjsg * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4252e0799Sjsg * All Rights Reserved.
5252e0799Sjsg *
6252e0799Sjsg * Permission is hereby granted, free of charge, to any person obtaining a
7252e0799Sjsg * copy of this software and associated documentation files (the
8252e0799Sjsg * "Software"), to deal in the Software without restriction, including
9252e0799Sjsg * without limitation the rights to use, copy, modify, merge, publish,
10252e0799Sjsg * distribute, sub license, and/or sell copies of the Software, and to
11252e0799Sjsg * permit persons to whom the Software is furnished to do so, subject to
12252e0799Sjsg * the following conditions:
13252e0799Sjsg *
14252e0799Sjsg * The above copyright notice and this permission notice (including the
15252e0799Sjsg * next paragraph) shall be included in all copies or substantial portions
16252e0799Sjsg * of the Software.
17252e0799Sjsg *
18252e0799Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19252e0799Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20252e0799Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21252e0799Sjsg * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22252e0799Sjsg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23252e0799Sjsg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24252e0799Sjsg * USE OR OTHER DEALINGS IN THE SOFTWARE.
25252e0799Sjsg *
26252e0799Sjsg **************************************************************************/
271099013bSjsg /*
28252e0799Sjsg * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
291099013bSjsg */
30*1bb76ff1Sjsg #include <linux/cc_platform.h>
31252e0799Sjsg #include <linux/export.h>
32c349dbc7Sjsg #include <linux/highmem.h>
33*1bb76ff1Sjsg #include <linux/ioport.h>
34*1bb76ff1Sjsg #include <linux/iosys-map.h>
355ca02815Sjsg #include <xen/xen.h>
361099013bSjsg
37252e0799Sjsg #include <drm/drm_cache.h>
38252e0799Sjsg
395ca02815Sjsg /* A small bounce buffer that fits on the stack. */
405ca02815Sjsg #define MEMCPY_BOUNCE_SIZE 128
415ca02815Sjsg
42252e0799Sjsg #if defined(CONFIG_X86)
43252e0799Sjsg #include <asm/smp.h>
44252e0799Sjsg
45252e0799Sjsg /*
46252e0799Sjsg * clflushopt is an unordered instruction which needs fencing with mfence or
47252e0799Sjsg * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
48252e0799Sjsg * in the caller.
49252e0799Sjsg */
501099013bSjsg static void
drm_clflush_page(struct vm_page * page)51e0b53ceeSkettenis drm_clflush_page(struct vm_page *page)
521099013bSjsg {
53252e0799Sjsg uint8_t *page_virtual;
54252e0799Sjsg unsigned int i;
55252e0799Sjsg const int size = curcpu()->ci_cflushsz;
561099013bSjsg
57252e0799Sjsg if (unlikely(page == NULL))
581099013bSjsg return;
591099013bSjsg
60252e0799Sjsg page_virtual = kmap_atomic(page);
61252e0799Sjsg for (i = 0; i < PAGE_SIZE; i += size)
62252e0799Sjsg clflushopt(page_virtual + i);
63252e0799Sjsg kunmap_atomic(page_virtual);
641099013bSjsg }
651099013bSjsg
drm_cache_flush_clflush(struct vm_page * pages[],unsigned long num_pages)66252e0799Sjsg static void drm_cache_flush_clflush(struct vm_page *pages[],
67252e0799Sjsg unsigned long num_pages)
681099013bSjsg {
691099013bSjsg unsigned long i;
701099013bSjsg
71252e0799Sjsg mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
72e0b53ceeSkettenis for (i = 0; i < num_pages; i++)
73e0b53ceeSkettenis drm_clflush_page(*pages++);
74252e0799Sjsg mb(); /*Also used after CLFLUSH so that all cache is flushed*/
751099013bSjsg }
76252e0799Sjsg #endif
771099013bSjsg
78252e0799Sjsg /**
79252e0799Sjsg * drm_clflush_pages - Flush dcache lines of a set of pages.
80252e0799Sjsg * @pages: List of pages to be flushed.
81252e0799Sjsg * @num_pages: Number of pages in the array.
82252e0799Sjsg *
83252e0799Sjsg * Flush every data cache line entry that points to an address belonging
84252e0799Sjsg * to a page in the array.
85252e0799Sjsg */
867ccd5a2cSjsg void
drm_clflush_pages(struct vm_page * pages[],unsigned long num_pages)877ccd5a2cSjsg drm_clflush_pages(struct vm_page *pages[], unsigned long num_pages)
887ccd5a2cSjsg {
89252e0799Sjsg
90252e0799Sjsg #if defined(CONFIG_X86)
91252e0799Sjsg if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
92252e0799Sjsg drm_cache_flush_clflush(pages, num_pages);
93252e0799Sjsg return;
947ccd5a2cSjsg }
95252e0799Sjsg
96252e0799Sjsg if (wbinvd_on_all_cpus())
97252e0799Sjsg pr_err("Timed out waiting for cache flush\n");
98252e0799Sjsg
99252e0799Sjsg #elif defined(__powerpc__) && defined(__linux__)
100252e0799Sjsg unsigned long i;
101252e0799Sjsg
102252e0799Sjsg for (i = 0; i < num_pages; i++) {
103252e0799Sjsg struct vm_page *page = pages[i];
104252e0799Sjsg void *page_virtual;
105252e0799Sjsg
106252e0799Sjsg if (unlikely(page == NULL))
107252e0799Sjsg continue;
108252e0799Sjsg
109252e0799Sjsg page_virtual = kmap_atomic(page);
110252e0799Sjsg flush_dcache_range((unsigned long)page_virtual,
111252e0799Sjsg (unsigned long)page_virtual + PAGE_SIZE);
112252e0799Sjsg kunmap_atomic(page_virtual);
113252e0799Sjsg }
114252e0799Sjsg #else
115*1bb76ff1Sjsg WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
1167ccd5a2cSjsg #endif
117252e0799Sjsg }
118252e0799Sjsg EXPORT_SYMBOL(drm_clflush_pages);
119252e0799Sjsg
120252e0799Sjsg /**
121252e0799Sjsg * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
122252e0799Sjsg * @st: struct sg_table.
123252e0799Sjsg *
124252e0799Sjsg * Flush every data cache line entry that points to an address in the
125252e0799Sjsg * sg.
126252e0799Sjsg */
127252e0799Sjsg void
drm_clflush_sg(struct sg_table * st)128252e0799Sjsg drm_clflush_sg(struct sg_table *st)
129252e0799Sjsg {
130252e0799Sjsg #if defined(CONFIG_X86)
131252e0799Sjsg if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
132252e0799Sjsg struct sg_page_iter sg_iter;
133252e0799Sjsg
134252e0799Sjsg mb(); /*CLFLUSH is ordered only by using memory barriers*/
135ad8b1aafSjsg for_each_sgtable_page(st, &sg_iter, 0)
136252e0799Sjsg drm_clflush_page(sg_page_iter_page(&sg_iter));
137252e0799Sjsg mb(); /*Make sure that all cache line entry is flushed*/
138252e0799Sjsg
139252e0799Sjsg return;
140252e0799Sjsg }
141252e0799Sjsg
142252e0799Sjsg if (wbinvd_on_all_cpus())
143252e0799Sjsg pr_err("Timed out waiting for cache flush\n");
144252e0799Sjsg #else
145*1bb76ff1Sjsg WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
146252e0799Sjsg #endif
147252e0799Sjsg }
148252e0799Sjsg EXPORT_SYMBOL(drm_clflush_sg);
149252e0799Sjsg
150252e0799Sjsg /**
151252e0799Sjsg * drm_clflush_virt_range - Flush dcache lines of a region
152252e0799Sjsg * @addr: Initial kernel memory address.
153252e0799Sjsg * @length: Region size.
154252e0799Sjsg *
155252e0799Sjsg * Flush every data cache line entry that points to an address in the
156252e0799Sjsg * region requested.
157252e0799Sjsg */
158252e0799Sjsg void
drm_clflush_virt_range(void * addr,unsigned long length)159252e0799Sjsg drm_clflush_virt_range(void *addr, unsigned long length)
160252e0799Sjsg {
161252e0799Sjsg #if defined(CONFIG_X86)
162252e0799Sjsg if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
163252e0799Sjsg const int size = curcpu()->ci_cflushsz;
164252e0799Sjsg void *end = addr + length;
165252e0799Sjsg
166252e0799Sjsg addr = (void *)(((unsigned long)addr) & -size);
167252e0799Sjsg mb(); /*CLFLUSH is only ordered with a full memory barrier*/
168252e0799Sjsg for (; addr < end; addr += size)
169252e0799Sjsg clflushopt(addr);
170252e0799Sjsg clflushopt(end - 1); /* force serialisation */
1715ca02815Sjsg mb(); /*Ensure that every data cache line entry is flushed*/
172252e0799Sjsg return;
173252e0799Sjsg }
174252e0799Sjsg
175252e0799Sjsg if (wbinvd_on_all_cpus())
176252e0799Sjsg pr_err("Timed out waiting for cache flush\n");
177252e0799Sjsg #else
178*1bb76ff1Sjsg WARN_ONCE(1, "Architecture has no drm_cache.c support\n");
179252e0799Sjsg #endif
180252e0799Sjsg }
181252e0799Sjsg EXPORT_SYMBOL(drm_clflush_virt_range);
1825ca02815Sjsg
drm_need_swiotlb(int dma_bits)1835ca02815Sjsg bool drm_need_swiotlb(int dma_bits)
1845ca02815Sjsg {
1855ca02815Sjsg return false;
1865ca02815Sjsg #ifdef notyet
1875ca02815Sjsg struct resource *tmp;
1885ca02815Sjsg resource_size_t max_iomem = 0;
1895ca02815Sjsg
1905ca02815Sjsg /*
1915ca02815Sjsg * Xen paravirtual hosts require swiotlb regardless of requested dma
1925ca02815Sjsg * transfer size.
1935ca02815Sjsg *
1945ca02815Sjsg * NOTE: Really, what it requires is use of the dma_alloc_coherent
1955ca02815Sjsg * allocator used in ttm_dma_populate() instead of
1965ca02815Sjsg * ttm_populate_and_map_pages(), which bounce buffers so much in
1975ca02815Sjsg * Xen it leads to swiotlb buffer exhaustion.
1985ca02815Sjsg */
1995ca02815Sjsg if (xen_pv_domain())
2005ca02815Sjsg return true;
2015ca02815Sjsg
2025ca02815Sjsg /*
2035ca02815Sjsg * Enforce dma_alloc_coherent when memory encryption is active as well
2045ca02815Sjsg * for the same reasons as for Xen paravirtual hosts.
2055ca02815Sjsg */
206*1bb76ff1Sjsg if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
2075ca02815Sjsg return true;
2085ca02815Sjsg
2095ca02815Sjsg for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
2105ca02815Sjsg max_iomem = max(max_iomem, tmp->end);
2115ca02815Sjsg
2125ca02815Sjsg return max_iomem > ((u64)1 << dma_bits);
2135ca02815Sjsg #endif
2145ca02815Sjsg }
2155ca02815Sjsg EXPORT_SYMBOL(drm_need_swiotlb);
2165ca02815Sjsg
memcpy_fallback(struct iosys_map * dst,const struct iosys_map * src,unsigned long len)217*1bb76ff1Sjsg static void memcpy_fallback(struct iosys_map *dst,
218*1bb76ff1Sjsg const struct iosys_map *src,
2195ca02815Sjsg unsigned long len)
2205ca02815Sjsg {
2215ca02815Sjsg if (!dst->is_iomem && !src->is_iomem) {
2225ca02815Sjsg memcpy(dst->vaddr, src->vaddr, len);
2235ca02815Sjsg } else if (!src->is_iomem) {
224*1bb76ff1Sjsg iosys_map_memcpy_to(dst, 0, src->vaddr, len);
2255ca02815Sjsg } else if (!dst->is_iomem) {
2265ca02815Sjsg memcpy_fromio(dst->vaddr, src->vaddr_iomem, len);
2275ca02815Sjsg } else {
2285ca02815Sjsg /*
2295ca02815Sjsg * Bounce size is not performance tuned, but using a
2305ca02815Sjsg * bounce buffer like this is significantly faster than
2315ca02815Sjsg * resorting to ioreadxx() + iowritexx().
2325ca02815Sjsg */
2335ca02815Sjsg char bounce[MEMCPY_BOUNCE_SIZE];
2345ca02815Sjsg void __iomem *_src = src->vaddr_iomem;
2355ca02815Sjsg void __iomem *_dst = dst->vaddr_iomem;
2365ca02815Sjsg
2375ca02815Sjsg while (len >= MEMCPY_BOUNCE_SIZE) {
2385ca02815Sjsg memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
2395ca02815Sjsg memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
2405ca02815Sjsg _src += MEMCPY_BOUNCE_SIZE;
2415ca02815Sjsg _dst += MEMCPY_BOUNCE_SIZE;
2425ca02815Sjsg len -= MEMCPY_BOUNCE_SIZE;
2435ca02815Sjsg }
2445ca02815Sjsg if (len) {
2455ca02815Sjsg memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE);
2465ca02815Sjsg memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE);
2475ca02815Sjsg }
2485ca02815Sjsg }
2495ca02815Sjsg }
2505ca02815Sjsg
251b05db03cSjsg #ifdef CONFIG_X86
2525ca02815Sjsg
253b05db03cSjsg #ifdef __linux__
2545ca02815Sjsg static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
255b05db03cSjsg #else
256b05db03cSjsg static int has_movntdqa;
257b05db03cSjsg
258b05db03cSjsg #include <asm/fpu/api.h>
259b05db03cSjsg
260b05db03cSjsg static inline void
static_branch_enable(int * x)261b05db03cSjsg static_branch_enable(int *x)
262b05db03cSjsg {
263b05db03cSjsg *x = 1;
264b05db03cSjsg }
265b05db03cSjsg
266b05db03cSjsg static inline int
static_branch_likely(int * x)267b05db03cSjsg static_branch_likely(int *x)
268b05db03cSjsg {
269b05db03cSjsg return (likely(*x == 1));
270b05db03cSjsg }
271b05db03cSjsg
272b05db03cSjsg #endif
2735ca02815Sjsg
__memcpy_ntdqa(void * dst,const void * src,unsigned long len)2745ca02815Sjsg static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
2755ca02815Sjsg {
2765ca02815Sjsg kernel_fpu_begin();
2775ca02815Sjsg
2785ca02815Sjsg while (len >= 4) {
2795ca02815Sjsg asm("movntdqa (%0), %%xmm0\n"
2805ca02815Sjsg "movntdqa 16(%0), %%xmm1\n"
2815ca02815Sjsg "movntdqa 32(%0), %%xmm2\n"
2825ca02815Sjsg "movntdqa 48(%0), %%xmm3\n"
2835ca02815Sjsg "movaps %%xmm0, (%1)\n"
2845ca02815Sjsg "movaps %%xmm1, 16(%1)\n"
2855ca02815Sjsg "movaps %%xmm2, 32(%1)\n"
2865ca02815Sjsg "movaps %%xmm3, 48(%1)\n"
2875ca02815Sjsg :: "r" (src), "r" (dst) : "memory");
2885ca02815Sjsg src += 64;
2895ca02815Sjsg dst += 64;
2905ca02815Sjsg len -= 4;
2915ca02815Sjsg }
2925ca02815Sjsg while (len--) {
2935ca02815Sjsg asm("movntdqa (%0), %%xmm0\n"
2945ca02815Sjsg "movaps %%xmm0, (%1)\n"
2955ca02815Sjsg :: "r" (src), "r" (dst) : "memory");
2965ca02815Sjsg src += 16;
2975ca02815Sjsg dst += 16;
2985ca02815Sjsg }
2995ca02815Sjsg
3005ca02815Sjsg kernel_fpu_end();
3015ca02815Sjsg }
3025ca02815Sjsg
3035ca02815Sjsg /*
3045ca02815Sjsg * __drm_memcpy_from_wc copies @len bytes from @src to @dst using
3055ca02815Sjsg * non-temporal instructions where available. Note that all arguments
3065ca02815Sjsg * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
3075ca02815Sjsg * of 16.
3085ca02815Sjsg */
__drm_memcpy_from_wc(void * dst,const void * src,unsigned long len)3095ca02815Sjsg static void __drm_memcpy_from_wc(void *dst, const void *src, unsigned long len)
3105ca02815Sjsg {
3115ca02815Sjsg if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
3125ca02815Sjsg memcpy(dst, src, len);
3135ca02815Sjsg else if (likely(len))
3145ca02815Sjsg __memcpy_ntdqa(dst, src, len >> 4);
3155ca02815Sjsg }
3165ca02815Sjsg
3175ca02815Sjsg /**
3185ca02815Sjsg * drm_memcpy_from_wc - Perform the fastest available memcpy from a source
3195ca02815Sjsg * that may be WC.
3205ca02815Sjsg * @dst: The destination pointer
3215ca02815Sjsg * @src: The source pointer
3225ca02815Sjsg * @len: The size of the area o transfer in bytes
3235ca02815Sjsg *
3245ca02815Sjsg * Tries an arch optimized memcpy for prefetching reading out of a WC region,
3255ca02815Sjsg * and if no such beast is available, falls back to a normal memcpy.
3265ca02815Sjsg */
drm_memcpy_from_wc(struct iosys_map * dst,const struct iosys_map * src,unsigned long len)327*1bb76ff1Sjsg void drm_memcpy_from_wc(struct iosys_map *dst,
328*1bb76ff1Sjsg const struct iosys_map *src,
3295ca02815Sjsg unsigned long len)
3305ca02815Sjsg {
3315ca02815Sjsg if (WARN_ON(in_interrupt())) {
3325ca02815Sjsg memcpy_fallback(dst, src, len);
3335ca02815Sjsg return;
3345ca02815Sjsg }
3355ca02815Sjsg
3365ca02815Sjsg if (static_branch_likely(&has_movntdqa)) {
3375ca02815Sjsg __drm_memcpy_from_wc(dst->is_iomem ?
3385ca02815Sjsg (void __force *)dst->vaddr_iomem :
3395ca02815Sjsg dst->vaddr,
3405ca02815Sjsg src->is_iomem ?
3415ca02815Sjsg (void const __force *)src->vaddr_iomem :
3425ca02815Sjsg src->vaddr,
3435ca02815Sjsg len);
3445ca02815Sjsg return;
3455ca02815Sjsg }
3465ca02815Sjsg
3475ca02815Sjsg memcpy_fallback(dst, src, len);
3485ca02815Sjsg }
3495ca02815Sjsg EXPORT_SYMBOL(drm_memcpy_from_wc);
3505ca02815Sjsg
3515ca02815Sjsg /*
3525ca02815Sjsg * drm_memcpy_init_early - One time initialization of the WC memcpy code
3535ca02815Sjsg */
drm_memcpy_init_early(void)3545ca02815Sjsg void drm_memcpy_init_early(void)
3555ca02815Sjsg {
3565ca02815Sjsg /*
3575ca02815Sjsg * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
3585ca02815Sjsg * emulation. So don't enable movntdqa in hypervisor guest.
3595ca02815Sjsg */
3605ca02815Sjsg if (static_cpu_has(X86_FEATURE_XMM4_1) &&
3615ca02815Sjsg !boot_cpu_has(X86_FEATURE_HYPERVISOR))
3625ca02815Sjsg static_branch_enable(&has_movntdqa);
3635ca02815Sjsg }
3645ca02815Sjsg #else
drm_memcpy_from_wc(struct iosys_map * dst,const struct iosys_map * src,unsigned long len)365*1bb76ff1Sjsg void drm_memcpy_from_wc(struct iosys_map *dst,
366*1bb76ff1Sjsg const struct iosys_map *src,
3675ca02815Sjsg unsigned long len)
3685ca02815Sjsg {
3695ca02815Sjsg WARN_ON(in_interrupt());
3705ca02815Sjsg
3715ca02815Sjsg memcpy_fallback(dst, src, len);
3725ca02815Sjsg }
3735ca02815Sjsg EXPORT_SYMBOL(drm_memcpy_from_wc);
3745ca02815Sjsg
drm_memcpy_init_early(void)3755ca02815Sjsg void drm_memcpy_init_early(void)
3765ca02815Sjsg {
3775ca02815Sjsg }
3785ca02815Sjsg #endif /* CONFIG_X86 */
379