xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/i915_memcpy.c (revision 9c351cd577713afdb6755fa574728be2cd2a19fa)
1*9c351cd5Sriastradh /*	$NetBSD: i915_memcpy.c,v 1.4 2021/12/19 11:33:49 riastradh Exp $	*/
24e390cabSriastradh 
34e390cabSriastradh /*
44e390cabSriastradh  * Copyright © 2016 Intel Corporation
54e390cabSriastradh  *
64e390cabSriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
74e390cabSriastradh  * copy of this software and associated documentation files (the "Software"),
84e390cabSriastradh  * to deal in the Software without restriction, including without limitation
94e390cabSriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
104e390cabSriastradh  * and/or sell copies of the Software, and to permit persons to whom the
114e390cabSriastradh  * Software is furnished to do so, subject to the following conditions:
124e390cabSriastradh  *
134e390cabSriastradh  * The above copyright notice and this permission notice (including the next
144e390cabSriastradh  * paragraph) shall be included in all copies or substantial portions of the
154e390cabSriastradh  * Software.
164e390cabSriastradh  *
174e390cabSriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
184e390cabSriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
194e390cabSriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
204e390cabSriastradh  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
214e390cabSriastradh  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
224e390cabSriastradh  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
234e390cabSriastradh  * IN THE SOFTWARE.
244e390cabSriastradh  *
254e390cabSriastradh  */
264e390cabSriastradh 
274e390cabSriastradh #include <sys/cdefs.h>
28*9c351cd5Sriastradh __KERNEL_RCSID(0, "$NetBSD: i915_memcpy.c,v 1.4 2021/12/19 11:33:49 riastradh Exp $");
294e390cabSriastradh 
304e390cabSriastradh #include <linux/kernel.h>
314e390cabSriastradh #include <asm/fpu/api.h>
324e390cabSriastradh 
334e390cabSriastradh #include "i915_memcpy.h"
344e390cabSriastradh 
35*9c351cd5Sriastradh #include <linux/nbsd-namespace.h>
36*9c351cd5Sriastradh 
374e390cabSriastradh #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
384e390cabSriastradh #define CI_BUG_ON(expr) BUG_ON(expr)
394e390cabSriastradh #else
404e390cabSriastradh #define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
414e390cabSriastradh #endif
424e390cabSriastradh 
434e390cabSriastradh static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
444e390cabSriastradh 
454e390cabSriastradh #ifdef CONFIG_AS_MOVNTDQA
__memcpy_ntdqa(void * dst,const void * src,unsigned long len)464e390cabSriastradh static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
474e390cabSriastradh {
484e390cabSriastradh 	kernel_fpu_begin();
494e390cabSriastradh 
504e390cabSriastradh 	while (len >= 4) {
514e390cabSriastradh 		asm("movntdqa   (%0), %%xmm0\n"
524e390cabSriastradh 		    "movntdqa 16(%0), %%xmm1\n"
534e390cabSriastradh 		    "movntdqa 32(%0), %%xmm2\n"
544e390cabSriastradh 		    "movntdqa 48(%0), %%xmm3\n"
554e390cabSriastradh 		    "movaps %%xmm0,   (%1)\n"
564e390cabSriastradh 		    "movaps %%xmm1, 16(%1)\n"
574e390cabSriastradh 		    "movaps %%xmm2, 32(%1)\n"
584e390cabSriastradh 		    "movaps %%xmm3, 48(%1)\n"
594e390cabSriastradh 		    :: "r" (src), "r" (dst) : "memory");
604e390cabSriastradh 		src += 64;
614e390cabSriastradh 		dst += 64;
624e390cabSriastradh 		len -= 4;
634e390cabSriastradh 	}
644e390cabSriastradh 	while (len--) {
654e390cabSriastradh 		asm("movntdqa (%0), %%xmm0\n"
664e390cabSriastradh 		    "movaps %%xmm0, (%1)\n"
674e390cabSriastradh 		    :: "r" (src), "r" (dst) : "memory");
684e390cabSriastradh 		src += 16;
694e390cabSriastradh 		dst += 16;
704e390cabSriastradh 	}
714e390cabSriastradh 
724e390cabSriastradh 	kernel_fpu_end();
734e390cabSriastradh }
744e390cabSriastradh 
__memcpy_ntdqu(void * dst,const void * src,unsigned long len)754e390cabSriastradh static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
764e390cabSriastradh {
774e390cabSriastradh 	kernel_fpu_begin();
784e390cabSriastradh 
794e390cabSriastradh 	while (len >= 4) {
804e390cabSriastradh 		asm("movntdqa   (%0), %%xmm0\n"
814e390cabSriastradh 		    "movntdqa 16(%0), %%xmm1\n"
824e390cabSriastradh 		    "movntdqa 32(%0), %%xmm2\n"
834e390cabSriastradh 		    "movntdqa 48(%0), %%xmm3\n"
844e390cabSriastradh 		    "movups %%xmm0,   (%1)\n"
854e390cabSriastradh 		    "movups %%xmm1, 16(%1)\n"
864e390cabSriastradh 		    "movups %%xmm2, 32(%1)\n"
874e390cabSriastradh 		    "movups %%xmm3, 48(%1)\n"
884e390cabSriastradh 		    :: "r" (src), "r" (dst) : "memory");
894e390cabSriastradh 		src += 64;
904e390cabSriastradh 		dst += 64;
914e390cabSriastradh 		len -= 4;
924e390cabSriastradh 	}
934e390cabSriastradh 	while (len--) {
944e390cabSriastradh 		asm("movntdqa (%0), %%xmm0\n"
954e390cabSriastradh 		    "movups %%xmm0, (%1)\n"
964e390cabSriastradh 		    :: "r" (src), "r" (dst) : "memory");
974e390cabSriastradh 		src += 16;
984e390cabSriastradh 		dst += 16;
994e390cabSriastradh 	}
1004e390cabSriastradh 
1014e390cabSriastradh 	kernel_fpu_end();
1024e390cabSriastradh }
1034e390cabSriastradh #else
__memcpy_ntdqa(void * dst,const void * src,unsigned long len)1044e390cabSriastradh static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
__memcpy_ntdqu(void * dst,const void * src,unsigned long len)1054e390cabSriastradh static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
1064e390cabSriastradh #endif
1074e390cabSriastradh 
1084e390cabSriastradh /**
1094e390cabSriastradh  * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
1104e390cabSriastradh  * @dst: destination pointer
1114e390cabSriastradh  * @src: source pointer
1124e390cabSriastradh  * @len: how many bytes to copy
1134e390cabSriastradh  *
1144e390cabSriastradh  * i915_memcpy_from_wc copies @len bytes from @src to @dst using
1154e390cabSriastradh  * non-temporal instructions where available. Note that all arguments
1164e390cabSriastradh  * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple
1174e390cabSriastradh  * of 16.
1184e390cabSriastradh  *
1194e390cabSriastradh  * To test whether accelerated reads from WC are supported, use
1204e390cabSriastradh  * i915_memcpy_from_wc(NULL, NULL, 0);
1214e390cabSriastradh  *
1224e390cabSriastradh  * Returns true if the copy was successful, false if the preconditions
1234e390cabSriastradh  * are not met.
1244e390cabSriastradh  */
i915_memcpy_from_wc(void * dst,const void * src,unsigned long len)1254e390cabSriastradh bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
1264e390cabSriastradh {
1274e390cabSriastradh 	if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
1284e390cabSriastradh 		return false;
1294e390cabSriastradh 
1304e390cabSriastradh 	if (static_branch_likely(&has_movntdqa)) {
1314e390cabSriastradh 		if (likely(len))
1324e390cabSriastradh 			__memcpy_ntdqa(dst, src, len >> 4);
1334e390cabSriastradh 		return true;
1344e390cabSriastradh 	}
1354e390cabSriastradh 
1364e390cabSriastradh 	return false;
1374e390cabSriastradh }
1384e390cabSriastradh 
1394e390cabSriastradh /**
1404e390cabSriastradh  * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
1414e390cabSriastradh  * @dst: destination pointer
1424e390cabSriastradh  * @src: source pointer
1434e390cabSriastradh  * @len: how many bytes to copy
1444e390cabSriastradh  *
1454e390cabSriastradh  * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
1464e390cabSriastradh  * @src to @dst using * non-temporal instructions where available, but
1474e390cabSriastradh  * accepts that its arguments may not be aligned, but are valid for the
1484e390cabSriastradh  * potential 16-byte read past the end.
1494e390cabSriastradh  */
i915_unaligned_memcpy_from_wc(void * dst,void * src,unsigned long len)1504e390cabSriastradh void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len)
1514e390cabSriastradh {
1524e390cabSriastradh 	unsigned long addr;
1534e390cabSriastradh 
1544e390cabSriastradh 	CI_BUG_ON(!i915_has_memcpy_from_wc());
1554e390cabSriastradh 
1564e390cabSriastradh 	addr = (unsigned long)src;
1574e390cabSriastradh 	if (!IS_ALIGNED(addr, 16)) {
1584e390cabSriastradh 		unsigned long x = min(ALIGN(addr, 16) - addr, len);
1594e390cabSriastradh 
1604e390cabSriastradh 		memcpy(dst, src, x);
1614e390cabSriastradh 
1624e390cabSriastradh 		len -= x;
1634e390cabSriastradh 		dst += x;
1644e390cabSriastradh 		src += x;
1654e390cabSriastradh 	}
1664e390cabSriastradh 
1674e390cabSriastradh 	if (likely(len))
1684e390cabSriastradh 		__memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16));
1694e390cabSriastradh }
1704e390cabSriastradh 
i915_memcpy_init_early(struct drm_i915_private * dev_priv)1714e390cabSriastradh void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
1724e390cabSriastradh {
17384c08e63Sriastradh #ifdef CONFIG_AS_MOVNTDQA
1744e390cabSriastradh 	/*
1754e390cabSriastradh 	 * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
1764e390cabSriastradh 	 * emulation. So don't enable movntdqa in hypervisor guest.
1774e390cabSriastradh 	 */
1784e390cabSriastradh 	if (static_cpu_has(X86_FEATURE_XMM4_1) &&
1794e390cabSriastradh 	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
1804e390cabSriastradh 		static_branch_enable(&has_movntdqa);
18184c08e63Sriastradh #endif
1824e390cabSriastradh }
183