xref: /netbsd-src/sys/arch/sparc64/include/elf_support.h (revision 2d65afd272c2b4a48e9cc8a9cc60a9db3ff83203)
1*2d65afd2Sjoerg /*	$NetBSD: elf_support.h,v 1.1 2018/03/29 13:23:40 joerg Exp $	*/
2*2d65afd2Sjoerg 
3*2d65afd2Sjoerg /*-
4*2d65afd2Sjoerg  * Copyright (c) 2000 Eduardo Horvath.
5*2d65afd2Sjoerg  * Copyright (c) 2018 The NetBSD Foundation, Inc.
6*2d65afd2Sjoerg  * All rights reserved.
7*2d65afd2Sjoerg  *
8*2d65afd2Sjoerg  * Redistribution and use in source and binary forms, with or without
9*2d65afd2Sjoerg  * modification, are permitted provided that the following conditions
10*2d65afd2Sjoerg  * are met:
11*2d65afd2Sjoerg  * 1. Redistributions of source code must retain the above copyright
12*2d65afd2Sjoerg  *    notice, this list of conditions and the following disclaimer.
13*2d65afd2Sjoerg  * 2. Redistributions in binary form must reproduce the above copyright
14*2d65afd2Sjoerg  *    notice, this list of conditions and the following disclaimer in the
15*2d65afd2Sjoerg  *    documentation and/or other materials provided with the distribution.
16*2d65afd2Sjoerg  *
17*2d65afd2Sjoerg  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18*2d65afd2Sjoerg  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19*2d65afd2Sjoerg  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20*2d65afd2Sjoerg  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21*2d65afd2Sjoerg  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22*2d65afd2Sjoerg  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23*2d65afd2Sjoerg  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24*2d65afd2Sjoerg  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25*2d65afd2Sjoerg  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26*2d65afd2Sjoerg  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27*2d65afd2Sjoerg  * POSSIBILITY OF SUCH DAMAGE.
28*2d65afd2Sjoerg  */
29*2d65afd2Sjoerg #ifndef _SPARC64_ELF_SUPPORT_H
30*2d65afd2Sjoerg #define _SPARC64_ELF_SUPPORT_H
31*2d65afd2Sjoerg 
32*2d65afd2Sjoerg #ifdef __arch64__
33*2d65afd2Sjoerg /*
34*2d65afd2Sjoerg  * Create a jump to the location `target` starting at `where`.
35*2d65afd2Sjoerg  * This requires up to 6 instructions.
36*2d65afd2Sjoerg  * The first instruction is written last as it replaces a branch
37*2d65afd2Sjoerg  * in the PLT during lazy binding.
38*2d65afd2Sjoerg  * The resulting code can trash %g1 and %g5.
39*2d65afd2Sjoerg  */
40*2d65afd2Sjoerg static inline void
sparc_write_branch(void * where_,void * target)41*2d65afd2Sjoerg sparc_write_branch(void *where_, void *target)
42*2d65afd2Sjoerg {
43*2d65afd2Sjoerg 	const unsigned int BAA     = 0x30800000U; /* ba,a  (offset / 4) */
44*2d65afd2Sjoerg 	const unsigned int SETHI   = 0x03000000U; /* sethi %hi(0), %g1 */
45*2d65afd2Sjoerg 	const unsigned int JMP     = 0x81c06000U; /* jmpl  %g1+%lo(0), %g0 */
46*2d65afd2Sjoerg 	const unsigned int OR      = 0x82106000U; /* or    %g1, 0, %g1 */
47*2d65afd2Sjoerg 	const unsigned int XOR     = 0x82186000U; /* xor   %g1, 0, %g1 */
48*2d65afd2Sjoerg 	const unsigned int MOV71   = 0x8213e000U; /* or    %o7, 0, %g1 */
49*2d65afd2Sjoerg 	const unsigned int MOV17   = 0x9e106000U; /* or    %g1, 0, %o7 */
50*2d65afd2Sjoerg 	const unsigned int CALL    = 0x40000000U; /* call  0 */
51*2d65afd2Sjoerg 	const unsigned int SLLX    = 0x83287000U; /* sllx  %g1, 0, %g1 */
52*2d65afd2Sjoerg 	const unsigned int NEG     = 0x82200001U; /* neg   %g1 */
53*2d65afd2Sjoerg 	const unsigned int SETHIG5 = 0x0b000000U; /* sethi %hi(0), %g5 */
54*2d65afd2Sjoerg 	const unsigned int ORG5    = 0x82104005U; /* or    %g1, %g5, %g1 */
55*2d65afd2Sjoerg 
56*2d65afd2Sjoerg 	unsigned int *where = (unsigned int *)where_;
57*2d65afd2Sjoerg 	unsigned long value = (unsigned long)target;
58*2d65afd2Sjoerg 	unsigned long offset = value - (unsigned long)where;
59*2d65afd2Sjoerg 
60*2d65afd2Sjoerg #define	HIVAL(v, s)	(((v) >> (s)) & 0x003fffffU)
61*2d65afd2Sjoerg #define	LOVAL(v, s)	(((v) >> (s)) & 0x000003ffU)
62*2d65afd2Sjoerg 	if (offset + 0x800000 <= 0x7ffffc) {
63*2d65afd2Sjoerg 		/* Displacement is within 8MB, use a direct branch. */
64*2d65afd2Sjoerg 		where[0] = BAA | ((offset >> 2) & 0x3fffff);
65*2d65afd2Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
66*2d65afd2Sjoerg 		return;
67*2d65afd2Sjoerg 	}
68*2d65afd2Sjoerg 
69*2d65afd2Sjoerg 	if (value <= 0xffffffffUL) {
70*2d65afd2Sjoerg 		/*
71*2d65afd2Sjoerg 		 * The absolute address is a 32bit value.
72*2d65afd2Sjoerg 		 * This can be encoded as:
73*2d65afd2Sjoerg 		 *	sethi	%hi(value), %g1
74*2d65afd2Sjoerg 		 *	jmp	%g1+%lo(value)
75*2d65afd2Sjoerg 		 */
76*2d65afd2Sjoerg 		where[1] = JMP   | LOVAL(value, 0);
77*2d65afd2Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
78*2d65afd2Sjoerg 		where[0] = SETHI | HIVAL(value, 10);
79*2d65afd2Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
80*2d65afd2Sjoerg 		return;
81*2d65afd2Sjoerg 	}
82*2d65afd2Sjoerg 
83*2d65afd2Sjoerg 	if (value >= 0xffffffff00000000UL) {
84*2d65afd2Sjoerg 		/*
85*2d65afd2Sjoerg 		 * The top 32bit address range can be encoded as:
86*2d65afd2Sjoerg 		 *	sethi	%hix(addr), %g1
87*2d65afd2Sjoerg 		 *	xor	%g1, %lox(addr), %g1
88*2d65afd2Sjoerg 		 *	jmp	%g1
89*2d65afd2Sjoerg 		 */
90*2d65afd2Sjoerg 		where[2] = JMP;
91*2d65afd2Sjoerg 		where[1] = XOR | (value & 0x00003ff) | 0x1c00;
92*2d65afd2Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
93*2d65afd2Sjoerg 		__asm volatile("iflush %0+8" : : "r" (where));
94*2d65afd2Sjoerg 		where[0] = SETHI | HIVAL(~value, 10);
95*2d65afd2Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
96*2d65afd2Sjoerg 		return;
97*2d65afd2Sjoerg 	}
98*2d65afd2Sjoerg 
99*2d65afd2Sjoerg 	if ((offset + 4) + 0x80000000UL <= 0x100000000UL) {
100*2d65afd2Sjoerg 		/*
101*2d65afd2Sjoerg 		 * Displacement of the second instruction is within
102*2d65afd2Sjoerg 		 * +-2GB. This can use a direct call instruction:
103*2d65afd2Sjoerg 		 *	mov	%o7, %g1
104*2d65afd2Sjoerg 		 *	call	(value - .)
105*2d65afd2Sjoerg 		 *	 mov	%g1, %o7
106*2d65afd2Sjoerg 		 */
107*2d65afd2Sjoerg 		where[1] = CALL | ((-(offset + 4)>> 2) & 0x3fffffffU);
108*2d65afd2Sjoerg 		where[2] = MOV17;
109*2d65afd2Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
110*2d65afd2Sjoerg 		__asm volatile("iflush %0+8" : : "r" (where));
111*2d65afd2Sjoerg 		where[0] = MOV71;
112*2d65afd2Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
113*2d65afd2Sjoerg 		return;
114*2d65afd2Sjoerg 	}
115*2d65afd2Sjoerg 
116*2d65afd2Sjoerg 	if (value < 0x100000000000UL) {
117*2d65afd2Sjoerg 		/*
118*2d65afd2Sjoerg 		 * The absolute address is a 44bit value.
119*2d65afd2Sjoerg 		 * This can be encoded as:
120*2d65afd2Sjoerg 		 *	sethi	%h44(addr), %g1
121*2d65afd2Sjoerg 		 *	or	%g1, %m44(addr), %g1
122*2d65afd2Sjoerg 		 *	sllx	%g1, 12, %g1
123*2d65afd2Sjoerg 		 *	jmp	%g1+%l44(addr)
124*2d65afd2Sjoerg 		 */
125*2d65afd2Sjoerg 		where[1] = OR    | (((value) >> 12) & 0x00001fff);
126*2d65afd2Sjoerg 		where[2] = SLLX  | 12;
127*2d65afd2Sjoerg 		where[3] = JMP   | LOVAL(value, 0);
128*2d65afd2Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
129*2d65afd2Sjoerg 		__asm volatile("iflush %0+8" : : "r" (where));
130*2d65afd2Sjoerg 		__asm volatile("iflush %0+12" : : "r" (where));
131*2d65afd2Sjoerg 		where[0] = SETHI | HIVAL(value, 22);
132*2d65afd2Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
133*2d65afd2Sjoerg 		return;
134*2d65afd2Sjoerg 	}
135*2d65afd2Sjoerg 
136*2d65afd2Sjoerg 	if (value > 0xfffff00000000000UL) {
137*2d65afd2Sjoerg 		/*
138*2d65afd2Sjoerg 		 * The top 44bit address range can be encoded as:
139*2d65afd2Sjoerg 		 *	sethi	%hi((-addr)>>12), %g1
140*2d65afd2Sjoerg 		 *	or	%g1, %lo((-addr)>>12), %g1
141*2d65afd2Sjoerg 		 *	neg	%g1
142*2d65afd2Sjoerg 		 *	sllx	%g1, 12, %g1
143*2d65afd2Sjoerg 		 *	jmp	%g1+(addr&0x0fff)
144*2d65afd2Sjoerg 		 */
145*2d65afd2Sjoerg 		unsigned long neg = (-value)>>12;
146*2d65afd2Sjoerg 		where[1] = OR    | (LOVAL(neg, 0)+1);
147*2d65afd2Sjoerg 		where[2] = NEG;
148*2d65afd2Sjoerg 		where[3] = SLLX  | 12;
149*2d65afd2Sjoerg 		where[4] = JMP   | (value & 0x0fff);
150*2d65afd2Sjoerg 		__asm volatile("iflush %0+4" : : "r" (where));
151*2d65afd2Sjoerg 		__asm volatile("iflush %0+8" : : "r" (where));
152*2d65afd2Sjoerg 		__asm volatile("iflush %0+12" : : "r" (where));
153*2d65afd2Sjoerg 		__asm volatile("iflush %0+16" : : "r" (where));
154*2d65afd2Sjoerg 		where[0] = SETHI | HIVAL(neg, 10);
155*2d65afd2Sjoerg 		__asm volatile("iflush %0+0" : : "r" (where));
156*2d65afd2Sjoerg 		return;
157*2d65afd2Sjoerg 	}
158*2d65afd2Sjoerg 
159*2d65afd2Sjoerg 	/*
160*2d65afd2Sjoerg 	 * The general case of a 64bit address is encoded as:
161*2d65afd2Sjoerg 	 *	sethi	%hh(addr), %g1
162*2d65afd2Sjoerg 	 *	sethi	%lm(addr), %g5
163*2d65afd2Sjoerg 	 *	or	%g1, %hm(addr), %g1
164*2d65afd2Sjoerg 	 *	sllx	%g1, 32, %g1
165*2d65afd2Sjoerg 	 *	or	%g1, %g5, %g1
166*2d65afd2Sjoerg 	 *	jmp	%g1+%lo(addr)
167*2d65afd2Sjoerg 	 */
168*2d65afd2Sjoerg 	where[1] = SETHIG5 | HIVAL(value, 10);
169*2d65afd2Sjoerg 	where[2] = OR      | LOVAL(value, 32);
170*2d65afd2Sjoerg 	where[3] = SLLX    | 32;
171*2d65afd2Sjoerg 	where[4] = ORG5;
172*2d65afd2Sjoerg 	where[5] = JMP     | LOVAL(value, 0);
173*2d65afd2Sjoerg 	__asm volatile("iflush %0+4" : : "r" (where));
174*2d65afd2Sjoerg 	__asm volatile("iflush %0+8" : : "r" (where));
175*2d65afd2Sjoerg 	__asm volatile("iflush %0+12" : : "r" (where));
176*2d65afd2Sjoerg 	__asm volatile("iflush %0+16" : : "r" (where));
177*2d65afd2Sjoerg 	__asm volatile("iflush %0+20" : : "r" (where));
178*2d65afd2Sjoerg 	where[0] = SETHI   | HIVAL(value, 42);
179*2d65afd2Sjoerg 	__asm volatile("iflush %0+0" : : "r" (where));
180*2d65afd2Sjoerg #undef	HIVAL
181*2d65afd2Sjoerg #undef	LOVAL
182*2d65afd2Sjoerg }
183*2d65afd2Sjoerg #else
184*2d65afd2Sjoerg #include <sparc/elf_support.h>
185*2d65afd2Sjoerg #endif
186*2d65afd2Sjoerg #endif
187