xref: /openbsd-src/gnu/llvm/libunwind/src/UnwindRegistersSave.S (revision 202cdb0e0a5b97857d0b77e650500ce112f967da)
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
12#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
13
14#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
15#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
16
17#if defined(_AIX)
18    .toc
19#else
20    .text
21#endif
22
23#if !defined(__USING_SJLJ_EXCEPTIONS__)
24
25#if defined(__i386__)
26
27#
28# extern int __unw_getcontext(unw_context_t* thread_state)
29#
30# On entry:
31#   +                       +
32#   +-----------------------+
33#   + thread_state pointer  +
34#   +-----------------------+
35#   + return address        +
36#   +-----------------------+   <-- SP
37#   +                       +
38#
39DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
40
41  _LIBUNWIND_CET_ENDBR
42  push  %eax
43  movl  8(%esp), %eax
44  movl  %ebx,  4(%eax)
45  movl  %ecx,  8(%eax)
46  movl  %edx, 12(%eax)
47  movl  %edi, 16(%eax)
48  movl  %esi, 20(%eax)
49  movl  %ebp, 24(%eax)
50  movl  %esp, %edx
51  addl  $8, %edx
52  movl  %edx, 28(%eax)  # store what sp was at call site as esp
53  # skip ss
54  # skip eflags
55  movl  4(%esp), %edx
56  movl  %edx, 40(%eax)  # store return address as eip
57  # skip cs
58  # skip ds
59  # skip es
60  # skip fs
61  # skip gs
62  movl  (%esp), %edx
63  movl  %edx, (%eax)  # store original eax
64  popl  %eax
65  xorl  %eax, %eax    # return UNW_ESUCCESS
66  ret
67
68#elif defined(__x86_64__)
69
70#
71# extern int __unw_getcontext(unw_context_t* thread_state)
72#
73# On entry:
74#  thread_state pointer is in rdi
75#
76DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
77#if defined(_WIN64)
78#define PTR %rcx
79#define TMP %rdx
80#else
81#define PTR %rdi
82#define TMP %rsi
83#endif
84
85  _LIBUNWIND_CET_ENDBR
86  movq  %rax,   (PTR)
87  movq  %rbx,  8(PTR)
88  movq  %rcx, 16(PTR)
89  movq  %rdx, 24(PTR)
90  movq  %rdi, 32(PTR)
91  movq  %rsi, 40(PTR)
92  movq  %rbp, 48(PTR)
93  movq  %rsp, 56(PTR)
94  addq  $8,   56(PTR)
95  movq  %r8,  64(PTR)
96  movq  %r9,  72(PTR)
97  movq  %r10, 80(PTR)
98  movq  %r11, 88(PTR)
99  movq  %r12, 96(PTR)
100  movq  %r13,104(PTR)
101  movq  %r14,112(PTR)
102  movq  %r15,120(PTR)
103  movq  (%rsp),TMP
104  movq  TMP,128(PTR) # store return address as rip
105  # skip rflags
106  # skip cs
107  # skip fs
108  # skip gs
109
110#if defined(_WIN64)
111  movdqu %xmm0,176(PTR)
112  movdqu %xmm1,192(PTR)
113  movdqu %xmm2,208(PTR)
114  movdqu %xmm3,224(PTR)
115  movdqu %xmm4,240(PTR)
116  movdqu %xmm5,256(PTR)
117  movdqu %xmm6,272(PTR)
118  movdqu %xmm7,288(PTR)
119  movdqu %xmm8,304(PTR)
120  movdqu %xmm9,320(PTR)
121  movdqu %xmm10,336(PTR)
122  movdqu %xmm11,352(PTR)
123  movdqu %xmm12,368(PTR)
124  movdqu %xmm13,384(PTR)
125  movdqu %xmm14,400(PTR)
126  movdqu %xmm15,416(PTR)
127#endif
128  xorl  %eax, %eax    # return UNW_ESUCCESS
129  ret
130
131#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
132
133#
134# extern int __unw_getcontext(unw_context_t* thread_state)
135#
136# On entry:
137#  thread_state pointer is in a0 ($4)
138#
139DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
140  .set push
141  .set noat
142  .set noreorder
143  .set nomacro
144  sw    $1, (4 * 1)($4)
145  sw    $2, (4 * 2)($4)
146  sw    $3, (4 * 3)($4)
147  sw    $4, (4 * 4)($4)
148  sw    $5, (4 * 5)($4)
149  sw    $6, (4 * 6)($4)
150  sw    $7, (4 * 7)($4)
151  sw    $8, (4 * 8)($4)
152  sw    $9, (4 * 9)($4)
153  sw    $10, (4 * 10)($4)
154  sw    $11, (4 * 11)($4)
155  sw    $12, (4 * 12)($4)
156  sw    $13, (4 * 13)($4)
157  sw    $14, (4 * 14)($4)
158  sw    $15, (4 * 15)($4)
159  sw    $16, (4 * 16)($4)
160  sw    $17, (4 * 17)($4)
161  sw    $18, (4 * 18)($4)
162  sw    $19, (4 * 19)($4)
163  sw    $20, (4 * 20)($4)
164  sw    $21, (4 * 21)($4)
165  sw    $22, (4 * 22)($4)
166  sw    $23, (4 * 23)($4)
167  sw    $24, (4 * 24)($4)
168  sw    $25, (4 * 25)($4)
169  sw    $26, (4 * 26)($4)
170  sw    $27, (4 * 27)($4)
171  sw    $28, (4 * 28)($4)
172  sw    $29, (4 * 29)($4)
173  sw    $30, (4 * 30)($4)
174  sw    $31, (4 * 31)($4)
175  # Store return address to pc
176  sw    $31, (4 * 32)($4)
177  # hi and lo
178  mfhi  $8
179  sw    $8,  (4 * 33)($4)
180  mflo  $8
181  sw    $8,  (4 * 34)($4)
182#ifdef __mips_hard_float
183#if __mips_fpr != 64
184  sdc1  $f0, (4 * 36 + 8 * 0)($4)
185  sdc1  $f2, (4 * 36 + 8 * 2)($4)
186  sdc1  $f4, (4 * 36 + 8 * 4)($4)
187  sdc1  $f6, (4 * 36 + 8 * 6)($4)
188  sdc1  $f8, (4 * 36 + 8 * 8)($4)
189  sdc1  $f10, (4 * 36 + 8 * 10)($4)
190  sdc1  $f12, (4 * 36 + 8 * 12)($4)
191  sdc1  $f14, (4 * 36 + 8 * 14)($4)
192  sdc1  $f16, (4 * 36 + 8 * 16)($4)
193  sdc1  $f18, (4 * 36 + 8 * 18)($4)
194  sdc1  $f20, (4 * 36 + 8 * 20)($4)
195  sdc1  $f22, (4 * 36 + 8 * 22)($4)
196  sdc1  $f24, (4 * 36 + 8 * 24)($4)
197  sdc1  $f26, (4 * 36 + 8 * 26)($4)
198  sdc1  $f28, (4 * 36 + 8 * 28)($4)
199  sdc1  $f30, (4 * 36 + 8 * 30)($4)
200#else
201  sdc1  $f0, (4 * 36 + 8 * 0)($4)
202  sdc1  $f1, (4 * 36 + 8 * 1)($4)
203  sdc1  $f2, (4 * 36 + 8 * 2)($4)
204  sdc1  $f3, (4 * 36 + 8 * 3)($4)
205  sdc1  $f4, (4 * 36 + 8 * 4)($4)
206  sdc1  $f5, (4 * 36 + 8 * 5)($4)
207  sdc1  $f6, (4 * 36 + 8 * 6)($4)
208  sdc1  $f7, (4 * 36 + 8 * 7)($4)
209  sdc1  $f8, (4 * 36 + 8 * 8)($4)
210  sdc1  $f9, (4 * 36 + 8 * 9)($4)
211  sdc1  $f10, (4 * 36 + 8 * 10)($4)
212  sdc1  $f11, (4 * 36 + 8 * 11)($4)
213  sdc1  $f12, (4 * 36 + 8 * 12)($4)
214  sdc1  $f13, (4 * 36 + 8 * 13)($4)
215  sdc1  $f14, (4 * 36 + 8 * 14)($4)
216  sdc1  $f15, (4 * 36 + 8 * 15)($4)
217  sdc1  $f16, (4 * 36 + 8 * 16)($4)
218  sdc1  $f17, (4 * 36 + 8 * 17)($4)
219  sdc1  $f18, (4 * 36 + 8 * 18)($4)
220  sdc1  $f19, (4 * 36 + 8 * 19)($4)
221  sdc1  $f20, (4 * 36 + 8 * 20)($4)
222  sdc1  $f21, (4 * 36 + 8 * 21)($4)
223  sdc1  $f22, (4 * 36 + 8 * 22)($4)
224  sdc1  $f23, (4 * 36 + 8 * 23)($4)
225  sdc1  $f24, (4 * 36 + 8 * 24)($4)
226  sdc1  $f25, (4 * 36 + 8 * 25)($4)
227  sdc1  $f26, (4 * 36 + 8 * 26)($4)
228  sdc1  $f27, (4 * 36 + 8 * 27)($4)
229  sdc1  $f28, (4 * 36 + 8 * 28)($4)
230  sdc1  $f29, (4 * 36 + 8 * 29)($4)
231  sdc1  $f30, (4 * 36 + 8 * 30)($4)
232  sdc1  $f31, (4 * 36 + 8 * 31)($4)
233#endif
234#endif
235  jr	$31
236  # return UNW_ESUCCESS
237  or    $2, $0, $0
238  .set pop
239
240#elif defined(__mips64)
241
242#
243# extern int __unw_getcontext(unw_context_t* thread_state)
244#
245# On entry:
246#  thread_state pointer is in a0 ($4)
247#
248DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
249  .set push
250  .set noat
251  .set noreorder
252  .set nomacro
253  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
254    sd $\i, (8 * \i)($4)
255  .endr
256  # Store return address to pc
257  sd    $31, (8 * 32)($4)
258  # hi and lo
259  mfhi  $8
260  sd    $8,  (8 * 33)($4)
261  mflo  $8
262  sd    $8,  (8 * 34)($4)
263#ifdef __mips_hard_float
264  .irp i,FROM_0_TO_31
265    sdc1 $f\i, (280+8*\i)($4)
266  .endr
267#endif
268  jr	$31
269  # return UNW_ESUCCESS
270  or    $2, $0, $0
271  .set pop
272
273# elif defined(__mips__)
274
275#
276# extern int __unw_getcontext(unw_context_t* thread_state)
277#
278# Just trap for the time being.
279DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
280  teq $0, $0
281
282#elif defined(__powerpc64__)
283
284//
285// extern int __unw_getcontext(unw_context_t* thread_state)
286//
287// On entry:
288//  thread_state pointer is in r3
289//
290#if defined(_AIX)
291DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
292#else
293DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
294#endif
295// store register (GPR)
296#define PPC64_STR(n) \
297  std   n, (8 * (n + 2))(3)
298
299  // save GPRs
300  PPC64_STR(0)
301  mflr  0
302  std   0, PPC64_OFFS_SRR0(3) // store lr as ssr0
303  PPC64_STR(1)
304  PPC64_STR(2)
305  PPC64_STR(3)
306  PPC64_STR(4)
307  PPC64_STR(5)
308  PPC64_STR(6)
309  PPC64_STR(7)
310  PPC64_STR(8)
311  PPC64_STR(9)
312  PPC64_STR(10)
313  PPC64_STR(11)
314  PPC64_STR(12)
315  PPC64_STR(13)
316  PPC64_STR(14)
317  PPC64_STR(15)
318  PPC64_STR(16)
319  PPC64_STR(17)
320  PPC64_STR(18)
321  PPC64_STR(19)
322  PPC64_STR(20)
323  PPC64_STR(21)
324  PPC64_STR(22)
325  PPC64_STR(23)
326  PPC64_STR(24)
327  PPC64_STR(25)
328  PPC64_STR(26)
329  PPC64_STR(27)
330  PPC64_STR(28)
331  PPC64_STR(29)
332  PPC64_STR(30)
333  PPC64_STR(31)
334
335  mfcr  0
336  std   0,  PPC64_OFFS_CR(3)
337  mfxer 0
338  std   0,  PPC64_OFFS_XER(3)
339  mflr  0
340  std   0,  PPC64_OFFS_LR(3)
341  mfctr 0
342  std   0,  PPC64_OFFS_CTR(3)
343  mfvrsave    0
344  std   0,  PPC64_OFFS_VRSAVE(3)
345
346#if defined(__VSX__)
347  // save VS registers
348  // (note that this also saves floating point registers and V registers,
349  // because part of VS is mapped to these registers)
350
351  addi  4, 3, PPC64_OFFS_FP
352
353// store VS register
354#ifdef __LITTLE_ENDIAN__
355// For little-endian targets, we need a swap since stxvd2x will store the
356// register in the incorrect doubleword order.
357// FIXME: when supporting targets older than Power9 on LE is no longer required
358//        this can be changed to simply `stxv n, 16 * n(4)`.
359#define PPC64_STVS(n)      \
360  xxswapd n, n            ;\
361  stxvd2x n, 0, 4         ;\
362  addi    4, 4, 16
363#else
364#define PPC64_STVS(n)      \
365  stxvd2x n, 0, 4         ;\
366  addi    4, 4, 16
367#endif
368
369  PPC64_STVS(0)
370  PPC64_STVS(1)
371  PPC64_STVS(2)
372  PPC64_STVS(3)
373  PPC64_STVS(4)
374  PPC64_STVS(5)
375  PPC64_STVS(6)
376  PPC64_STVS(7)
377  PPC64_STVS(8)
378  PPC64_STVS(9)
379  PPC64_STVS(10)
380  PPC64_STVS(11)
381  PPC64_STVS(12)
382  PPC64_STVS(13)
383  PPC64_STVS(14)
384  PPC64_STVS(15)
385  PPC64_STVS(16)
386  PPC64_STVS(17)
387  PPC64_STVS(18)
388  PPC64_STVS(19)
389  PPC64_STVS(20)
390  PPC64_STVS(21)
391  PPC64_STVS(22)
392  PPC64_STVS(23)
393  PPC64_STVS(24)
394  PPC64_STVS(25)
395  PPC64_STVS(26)
396  PPC64_STVS(27)
397  PPC64_STVS(28)
398  PPC64_STVS(29)
399  PPC64_STVS(30)
400  PPC64_STVS(31)
401  PPC64_STVS(32)
402  PPC64_STVS(33)
403  PPC64_STVS(34)
404  PPC64_STVS(35)
405  PPC64_STVS(36)
406  PPC64_STVS(37)
407  PPC64_STVS(38)
408  PPC64_STVS(39)
409  PPC64_STVS(40)
410  PPC64_STVS(41)
411  PPC64_STVS(42)
412  PPC64_STVS(43)
413  PPC64_STVS(44)
414  PPC64_STVS(45)
415  PPC64_STVS(46)
416  PPC64_STVS(47)
417  PPC64_STVS(48)
418  PPC64_STVS(49)
419  PPC64_STVS(50)
420  PPC64_STVS(51)
421  PPC64_STVS(52)
422  PPC64_STVS(53)
423  PPC64_STVS(54)
424  PPC64_STVS(55)
425  PPC64_STVS(56)
426  PPC64_STVS(57)
427  PPC64_STVS(58)
428  PPC64_STVS(59)
429  PPC64_STVS(60)
430  PPC64_STVS(61)
431  PPC64_STVS(62)
432  PPC64_STVS(63)
433
434#else
435
436// store FP register
437#define PPC64_STF(n) \
438  stfd  n, (PPC64_OFFS_FP + n * 16)(3)
439
440  // save float registers
441  PPC64_STF(0)
442  PPC64_STF(1)
443  PPC64_STF(2)
444  PPC64_STF(3)
445  PPC64_STF(4)
446  PPC64_STF(5)
447  PPC64_STF(6)
448  PPC64_STF(7)
449  PPC64_STF(8)
450  PPC64_STF(9)
451  PPC64_STF(10)
452  PPC64_STF(11)
453  PPC64_STF(12)
454  PPC64_STF(13)
455  PPC64_STF(14)
456  PPC64_STF(15)
457  PPC64_STF(16)
458  PPC64_STF(17)
459  PPC64_STF(18)
460  PPC64_STF(19)
461  PPC64_STF(20)
462  PPC64_STF(21)
463  PPC64_STF(22)
464  PPC64_STF(23)
465  PPC64_STF(24)
466  PPC64_STF(25)
467  PPC64_STF(26)
468  PPC64_STF(27)
469  PPC64_STF(28)
470  PPC64_STF(29)
471  PPC64_STF(30)
472  PPC64_STF(31)
473
474#if defined(__ALTIVEC__)
475  // save vector registers
476
477  // Use 16-bytes below the stack pointer as an
478  // aligned buffer to save each vector register.
479  // Note that the stack pointer is always 16-byte aligned.
480  subi  4, 1, 16
481
482#define PPC64_STV_UNALIGNED(n)             \
483  stvx  n, 0, 4                           ;\
484  ld    5, 0(4)                           ;\
485  std   5, (PPC64_OFFS_V + n * 16)(3)     ;\
486  ld    5, 8(4)                           ;\
487  std   5, (PPC64_OFFS_V + n * 16 + 8)(3)
488
489  PPC64_STV_UNALIGNED(0)
490  PPC64_STV_UNALIGNED(1)
491  PPC64_STV_UNALIGNED(2)
492  PPC64_STV_UNALIGNED(3)
493  PPC64_STV_UNALIGNED(4)
494  PPC64_STV_UNALIGNED(5)
495  PPC64_STV_UNALIGNED(6)
496  PPC64_STV_UNALIGNED(7)
497  PPC64_STV_UNALIGNED(8)
498  PPC64_STV_UNALIGNED(9)
499  PPC64_STV_UNALIGNED(10)
500  PPC64_STV_UNALIGNED(11)
501  PPC64_STV_UNALIGNED(12)
502  PPC64_STV_UNALIGNED(13)
503  PPC64_STV_UNALIGNED(14)
504  PPC64_STV_UNALIGNED(15)
505  PPC64_STV_UNALIGNED(16)
506  PPC64_STV_UNALIGNED(17)
507  PPC64_STV_UNALIGNED(18)
508  PPC64_STV_UNALIGNED(19)
509  PPC64_STV_UNALIGNED(20)
510  PPC64_STV_UNALIGNED(21)
511  PPC64_STV_UNALIGNED(22)
512  PPC64_STV_UNALIGNED(23)
513  PPC64_STV_UNALIGNED(24)
514  PPC64_STV_UNALIGNED(25)
515  PPC64_STV_UNALIGNED(26)
516  PPC64_STV_UNALIGNED(27)
517  PPC64_STV_UNALIGNED(28)
518  PPC64_STV_UNALIGNED(29)
519  PPC64_STV_UNALIGNED(30)
520  PPC64_STV_UNALIGNED(31)
521
522#endif
523#endif
524
525  li    3,  0   // return UNW_ESUCCESS
526  blr
527
528
529#elif defined(__powerpc__)
530
531//
532// extern int unw_getcontext(unw_context_t* thread_state)
533//
534// On entry:
535//  thread_state pointer is in r3
536//
537#if defined(_AIX)
538DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
539#else
540DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
541#endif
542  stw     0,   8(3)
543  mflr    0
544  stw     0,   0(3) // store lr as ssr0
545  stw     1,  12(3)
546  stw     2,  16(3)
547  stw     3,  20(3)
548  stw     4,  24(3)
549  stw     5,  28(3)
550  stw     6,  32(3)
551  stw     7,  36(3)
552  stw     8,  40(3)
553  stw     9,  44(3)
554  stw     10, 48(3)
555  stw     11, 52(3)
556  stw     12, 56(3)
557  stw     13, 60(3)
558  stw     14, 64(3)
559  stw     15, 68(3)
560  stw     16, 72(3)
561  stw     17, 76(3)
562  stw     18, 80(3)
563  stw     19, 84(3)
564  stw     20, 88(3)
565  stw     21, 92(3)
566  stw     22, 96(3)
567  stw     23,100(3)
568  stw     24,104(3)
569  stw     25,108(3)
570  stw     26,112(3)
571  stw     27,116(3)
572  stw     28,120(3)
573  stw     29,124(3)
574  stw     30,128(3)
575  stw     31,132(3)
576
577#if defined(__ALTIVEC__)
578  // save VRSave register
579  mfspr   0, 256
580  stw     0, 156(3)
581#endif
582  // save CR registers
583  mfcr    0
584  stw     0, 136(3)
585  // save CTR register
586  mfctr   0
587  stw     0, 148(3)
588
589#if !defined(__NO_FPRS__)
590  // save float registers
591  stfd    0, 160(3)
592  stfd    1, 168(3)
593  stfd    2, 176(3)
594  stfd    3, 184(3)
595  stfd    4, 192(3)
596  stfd    5, 200(3)
597  stfd    6, 208(3)
598  stfd    7, 216(3)
599  stfd    8, 224(3)
600  stfd    9, 232(3)
601  stfd    10,240(3)
602  stfd    11,248(3)
603  stfd    12,256(3)
604  stfd    13,264(3)
605  stfd    14,272(3)
606  stfd    15,280(3)
607  stfd    16,288(3)
608  stfd    17,296(3)
609  stfd    18,304(3)
610  stfd    19,312(3)
611  stfd    20,320(3)
612  stfd    21,328(3)
613  stfd    22,336(3)
614  stfd    23,344(3)
615  stfd    24,352(3)
616  stfd    25,360(3)
617  stfd    26,368(3)
618  stfd    27,376(3)
619  stfd    28,384(3)
620  stfd    29,392(3)
621  stfd    30,400(3)
622  stfd    31,408(3)
623#endif
624
625#if defined(__ALTIVEC__)
626  // save vector registers
627
628  subi    4, 1, 16
629  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
630  // r4 is now a 16-byte aligned pointer into the red zone
631
632#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
633  stvx    _vec, 0, 4               SEPARATOR \
634  lwz     5, 0(4)                  SEPARATOR \
635  stw     5, _offset(3)            SEPARATOR \
636  lwz     5, 4(4)                  SEPARATOR \
637  stw     5, _offset+4(3)          SEPARATOR \
638  lwz     5, 8(4)                  SEPARATOR \
639  stw     5, _offset+8(3)          SEPARATOR \
640  lwz     5, 12(4)                 SEPARATOR \
641  stw     5, _offset+12(3)
642
643  SAVE_VECTOR_UNALIGNED( 0, 424+0x000)
644  SAVE_VECTOR_UNALIGNED( 1, 424+0x010)
645  SAVE_VECTOR_UNALIGNED( 2, 424+0x020)
646  SAVE_VECTOR_UNALIGNED( 3, 424+0x030)
647  SAVE_VECTOR_UNALIGNED( 4, 424+0x040)
648  SAVE_VECTOR_UNALIGNED( 5, 424+0x050)
649  SAVE_VECTOR_UNALIGNED( 6, 424+0x060)
650  SAVE_VECTOR_UNALIGNED( 7, 424+0x070)
651  SAVE_VECTOR_UNALIGNED( 8, 424+0x080)
652  SAVE_VECTOR_UNALIGNED( 9, 424+0x090)
653  SAVE_VECTOR_UNALIGNED(10, 424+0x0A0)
654  SAVE_VECTOR_UNALIGNED(11, 424+0x0B0)
655  SAVE_VECTOR_UNALIGNED(12, 424+0x0C0)
656  SAVE_VECTOR_UNALIGNED(13, 424+0x0D0)
657  SAVE_VECTOR_UNALIGNED(14, 424+0x0E0)
658  SAVE_VECTOR_UNALIGNED(15, 424+0x0F0)
659  SAVE_VECTOR_UNALIGNED(16, 424+0x100)
660  SAVE_VECTOR_UNALIGNED(17, 424+0x110)
661  SAVE_VECTOR_UNALIGNED(18, 424+0x120)
662  SAVE_VECTOR_UNALIGNED(19, 424+0x130)
663  SAVE_VECTOR_UNALIGNED(20, 424+0x140)
664  SAVE_VECTOR_UNALIGNED(21, 424+0x150)
665  SAVE_VECTOR_UNALIGNED(22, 424+0x160)
666  SAVE_VECTOR_UNALIGNED(23, 424+0x170)
667  SAVE_VECTOR_UNALIGNED(24, 424+0x180)
668  SAVE_VECTOR_UNALIGNED(25, 424+0x190)
669  SAVE_VECTOR_UNALIGNED(26, 424+0x1A0)
670  SAVE_VECTOR_UNALIGNED(27, 424+0x1B0)
671  SAVE_VECTOR_UNALIGNED(28, 424+0x1C0)
672  SAVE_VECTOR_UNALIGNED(29, 424+0x1D0)
673  SAVE_VECTOR_UNALIGNED(30, 424+0x1E0)
674  SAVE_VECTOR_UNALIGNED(31, 424+0x1F0)
675#endif
676
677  li      3, 0  // return UNW_ESUCCESS
678  blr
679
680
681#elif defined(__aarch64__)
682
683//
684// extern int __unw_getcontext(unw_context_t* thread_state)
685//
686// On entry:
687//  thread_state pointer is in x0
688//
689  .p2align 2
690DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
691  stp    x0, x1,  [x0, #0x000]
692  stp    x2, x3,  [x0, #0x010]
693  stp    x4, x5,  [x0, #0x020]
694  stp    x6, x7,  [x0, #0x030]
695  stp    x8, x9,  [x0, #0x040]
696  stp    x10,x11, [x0, #0x050]
697  stp    x12,x13, [x0, #0x060]
698  stp    x14,x15, [x0, #0x070]
699  stp    x16,x17, [x0, #0x080]
700  stp    x18,x19, [x0, #0x090]
701  stp    x20,x21, [x0, #0x0A0]
702  stp    x22,x23, [x0, #0x0B0]
703  stp    x24,x25, [x0, #0x0C0]
704  stp    x26,x27, [x0, #0x0D0]
705  stp    x28,x29, [x0, #0x0E0]
706  str    x30,     [x0, #0x0F0]
707  mov    x1,sp
708  str    x1,      [x0, #0x0F8]
709  str    x30,     [x0, #0x100]    // store return address as pc
710  // skip cpsr
711  stp    d0, d1,  [x0, #0x110]
712  stp    d2, d3,  [x0, #0x120]
713  stp    d4, d5,  [x0, #0x130]
714  stp    d6, d7,  [x0, #0x140]
715  stp    d8, d9,  [x0, #0x150]
716  stp    d10,d11, [x0, #0x160]
717  stp    d12,d13, [x0, #0x170]
718  stp    d14,d15, [x0, #0x180]
719  stp    d16,d17, [x0, #0x190]
720  stp    d18,d19, [x0, #0x1A0]
721  stp    d20,d21, [x0, #0x1B0]
722  stp    d22,d23, [x0, #0x1C0]
723  stp    d24,d25, [x0, #0x1D0]
724  stp    d26,d27, [x0, #0x1E0]
725  stp    d28,d29, [x0, #0x1F0]
726  str    d30,     [x0, #0x200]
727  str    d31,     [x0, #0x208]
728  mov    x0, #0                   // return UNW_ESUCCESS
729  ret
730
731#elif defined(__arm__) && !defined(__APPLE__)
732
733#if !defined(__ARM_ARCH_ISA_ARM)
734#if (__ARM_ARCH_ISA_THUMB == 2)
735  .syntax unified
736#endif
737  .thumb
738#endif
739
740@
741@ extern int __unw_getcontext(unw_context_t* thread_state)
742@
743@ On entry:
744@  thread_state pointer is in r0
745@
746@ Per EHABI #4.7 this only saves the core integer registers.
747@ EHABI #7.4.5 notes that in general all VRS registers should be restored
748@ however this is very hard to do for VFP registers because it is unknown
749@ to the library how many registers are implemented by the architecture.
750@ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
751@
752  .p2align 2
753DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
754#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
755  stm r0!, {r0-r7}
756  mov r1, r8
757  mov r2, r9
758  mov r3, r10
759  stm r0!, {r1-r3}
760  mov r1, r11
761  mov r2, sp
762  mov r3, lr
763  str r1, [r0, #0]   @ r11
764  @ r12 does not need storing, it it the intra-procedure-call scratch register
765  str r2, [r0, #8]   @ sp
766  str r3, [r0, #12]  @ lr
767  str r3, [r0, #16]  @ store return address as pc
768  @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
769  @ It is safe to use here though because we are about to return, and cpsr is
770  @ not expected to be preserved.
771  movs r0, #0        @ return UNW_ESUCCESS
772#else
773  @ 32bit thumb-2 restrictions for stm:
774  @ . the sp (r13) cannot be in the list
775  @ . the pc (r15) cannot be in the list in an STM instruction
776  stm r0, {r0-r12}
777  str sp, [r0, #52]
778  str lr, [r0, #56]
779  str lr, [r0, #60]  @ store return address as pc
780  mov r0, #0         @ return UNW_ESUCCESS
781#endif
782  JMP(lr)
783
784@
785@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
786@
787@ On entry:
788@  values pointer is in r0
789@
790  .p2align 2
791#if defined(__ELF__)
792  .fpu vfpv3-d16
793#endif
794DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
795  vstmia r0, {d0-d15}
796  JMP(lr)
797
798@
799@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
800@
801@ On entry:
802@  values pointer is in r0
803@
804  .p2align 2
805#if defined(__ELF__)
806  .fpu vfpv3-d16
807#endif
808DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
809  vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
810  JMP(lr)
811
812@
813@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
814@
815@ On entry:
816@  values pointer is in r0
817@
818  .p2align 2
819#if defined(__ELF__)
820  .fpu vfpv3
821#endif
822DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
823  @ VFP and iwMMX instructions are only available when compiling with the flags
824  @ that enable them. We do not want to do that in the library (because we do not
825  @ want the compiler to generate instructions that access those) but this is
826  @ only accessed if the personality routine needs these registers. Use of
827  @ these registers implies they are, actually, available on the target, so
828  @ it's ok to execute.
829  @ So, generate the instructions using the corresponding coprocessor mnemonic.
830  vstmia r0, {d16-d31}
831  JMP(lr)
832
833#if defined(_LIBUNWIND_ARM_WMMX)
834
835@
836@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
837@
838@ On entry:
839@  values pointer is in r0
840@
841  .p2align 2
842#if defined(__ELF__)
843  .arch armv5te
844#endif
845DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
846  stcl p1, cr0, [r0], #8  @ wstrd wR0, [r0], #8
847  stcl p1, cr1, [r0], #8  @ wstrd wR1, [r0], #8
848  stcl p1, cr2, [r0], #8  @ wstrd wR2, [r0], #8
849  stcl p1, cr3, [r0], #8  @ wstrd wR3, [r0], #8
850  stcl p1, cr4, [r0], #8  @ wstrd wR4, [r0], #8
851  stcl p1, cr5, [r0], #8  @ wstrd wR5, [r0], #8
852  stcl p1, cr6, [r0], #8  @ wstrd wR6, [r0], #8
853  stcl p1, cr7, [r0], #8  @ wstrd wR7, [r0], #8
854  stcl p1, cr8, [r0], #8  @ wstrd wR8, [r0], #8
855  stcl p1, cr9, [r0], #8  @ wstrd wR9, [r0], #8
856  stcl p1, cr10, [r0], #8  @ wstrd wR10, [r0], #8
857  stcl p1, cr11, [r0], #8  @ wstrd wR11, [r0], #8
858  stcl p1, cr12, [r0], #8  @ wstrd wR12, [r0], #8
859  stcl p1, cr13, [r0], #8  @ wstrd wR13, [r0], #8
860  stcl p1, cr14, [r0], #8  @ wstrd wR14, [r0], #8
861  stcl p1, cr15, [r0], #8  @ wstrd wR15, [r0], #8
862  JMP(lr)
863
864@
865@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
866@
867@ On entry:
868@  values pointer is in r0
869@
870  .p2align 2
871#if defined(__ELF__)
872  .arch armv5te
873#endif
874DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
875  stc2 p1, cr8, [r0], #4  @ wstrw wCGR0, [r0], #4
876  stc2 p1, cr9, [r0], #4  @ wstrw wCGR1, [r0], #4
877  stc2 p1, cr10, [r0], #4  @ wstrw wCGR2, [r0], #4
878  stc2 p1, cr11, [r0], #4  @ wstrw wCGR3, [r0], #4
879  JMP(lr)
880
881#endif
882
883#elif defined(__or1k__)
884
885#
886# extern int __unw_getcontext(unw_context_t* thread_state)
887#
888# On entry:
889#  thread_state pointer is in r3
890#
891DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
892  l.sw       0(r3), r0
893  l.sw       4(r3), r1
894  l.sw       8(r3), r2
895  l.sw      12(r3), r3
896  l.sw      16(r3), r4
897  l.sw      20(r3), r5
898  l.sw      24(r3), r6
899  l.sw      28(r3), r7
900  l.sw      32(r3), r8
901  l.sw      36(r3), r9
902  l.sw      40(r3), r10
903  l.sw      44(r3), r11
904  l.sw      48(r3), r12
905  l.sw      52(r3), r13
906  l.sw      56(r3), r14
907  l.sw      60(r3), r15
908  l.sw      64(r3), r16
909  l.sw      68(r3), r17
910  l.sw      72(r3), r18
911  l.sw      76(r3), r19
912  l.sw      80(r3), r20
913  l.sw      84(r3), r21
914  l.sw      88(r3), r22
915  l.sw      92(r3), r23
916  l.sw      96(r3), r24
917  l.sw     100(r3), r25
918  l.sw     104(r3), r26
919  l.sw     108(r3), r27
920  l.sw     112(r3), r28
921  l.sw     116(r3), r29
922  l.sw     120(r3), r30
923  l.sw     124(r3), r31
924  # store ra to pc
925  l.sw     128(r3), r9
926  # zero epcr
927  l.sw     132(r3), r0
928
929#elif defined(__hexagon__)
930#
931# extern int unw_getcontext(unw_context_t* thread_state)
932#
933# On entry:
934#  thread_state pointer is in r0
935#
936#define OFFSET(offset) (offset/4)
937DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
938  memw(r0+#32) = r8
939  memw(r0+#36) = r9
940  memw(r0+#40) = r10
941  memw(r0+#44) = r11
942
943  memw(r0+#48) = r12
944  memw(r0+#52) = r13
945  memw(r0+#56) = r14
946  memw(r0+#60) = r15
947
948  memw(r0+#64) = r16
949  memw(r0+#68) = r17
950  memw(r0+#72) = r18
951  memw(r0+#76) = r19
952
953  memw(r0+#80) = r20
954  memw(r0+#84) = r21
955  memw(r0+#88) = r22
956  memw(r0+#92) = r23
957
958  memw(r0+#96) = r24
959  memw(r0+#100) = r25
960  memw(r0+#104) = r26
961  memw(r0+#108) = r27
962
963  memw(r0+#112) = r28
964  memw(r0+#116) = r29
965  memw(r0+#120) = r30
966  memw(r0+#124) = r31
967  r1 = c4   // Predicate register
968  memw(r0+#128) = r1
969  r1 = memw(r30)           // *FP == Saved FP
970  r1 = r31
971  memw(r0+#132) = r1
972
973  jumpr r31
974
975#elif defined(__sparc__) && defined(__arch64__)
976
977#
978# extern int __unw_getcontext(unw_context_t* thread_state)
979#
980# On entry:
981#  thread_state pointer is in %o0
982#
983DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
984  .register %g2, #scratch
985  .register %g3, #scratch
986  .register %g6, #scratch
987  .register %g7, #scratch
988  stx  %g1, [%o0 + 0x08]
989  stx  %g2, [%o0 + 0x10]
990  stx  %g3, [%o0 + 0x18]
991  stx  %g4, [%o0 + 0x20]
992  stx  %g5, [%o0 + 0x28]
993  stx  %g6, [%o0 + 0x30]
994  stx  %g7, [%o0 + 0x38]
995  stx  %o0, [%o0 + 0x40]
996  stx  %o1, [%o0 + 0x48]
997  stx  %o2, [%o0 + 0x50]
998  stx  %o3, [%o0 + 0x58]
999  stx  %o4, [%o0 + 0x60]
1000  stx  %o5, [%o0 + 0x68]
1001  stx  %o6, [%o0 + 0x70]
1002  stx  %o7, [%o0 + 0x78]
1003  stx  %l0, [%o0 + 0x80]
1004  stx  %l1, [%o0 + 0x88]
1005  stx  %l2, [%o0 + 0x90]
1006  stx  %l3, [%o0 + 0x98]
1007  stx  %l4, [%o0 + 0xa0]
1008  stx  %l5, [%o0 + 0xa8]
1009  stx  %l6, [%o0 + 0xb0]
1010  stx  %l7, [%o0 + 0xb8]
1011  stx  %i0, [%o0 + 0xc0]
1012  stx  %i1, [%o0 + 0xc8]
1013  stx  %i2, [%o0 + 0xd0]
1014  stx  %i3, [%o0 + 0xd8]
1015  stx  %i4, [%o0 + 0xe0]
1016  stx  %i5, [%o0 + 0xe8]
1017  stx  %i6, [%o0 + 0xf0]
1018  stx  %i7, [%o0 + 0xf8]
1019
1020  # save StackGhost cookie
1021  mov  %i7, %g4
1022  save %sp, -176, %sp
1023  # register window flush necessary even without StackGhost
1024  flushw
1025  restore
1026  ldx  [%sp + 2047 + 0x78], %g5
1027  xor  %g4, %g5, %g4
1028  stx  %g4, [%o0 + 0x100]
1029  retl
1030  # return UNW_ESUCCESS
1031   clr %o0
1032
1033#elif defined(__sparc__)
1034
1035#
1036# extern int __unw_getcontext(unw_context_t* thread_state)
1037#
1038# On entry:
1039#  thread_state pointer is in o0
1040#
1041DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1042  ta 3
1043  add %o7, 8, %o7
1044  std %g0, [%o0 +   0]
1045  std %g2, [%o0 +   8]
1046  std %g4, [%o0 +  16]
1047  std %g6, [%o0 +  24]
1048  std %o0, [%o0 +  32]
1049  std %o2, [%o0 +  40]
1050  std %o4, [%o0 +  48]
1051  std %o6, [%o0 +  56]
1052  std %l0, [%o0 +  64]
1053  std %l2, [%o0 +  72]
1054  std %l4, [%o0 +  80]
1055  std %l6, [%o0 +  88]
1056  std %i0, [%o0 +  96]
1057  std %i2, [%o0 + 104]
1058  std %i4, [%o0 + 112]
1059  std %i6, [%o0 + 120]
1060  jmp %o7
1061   clr %o0                   // return UNW_ESUCCESS
1062
1063#elif defined(__riscv)
1064
1065#
1066# extern int __unw_getcontext(unw_context_t* thread_state)
1067#
1068# On entry:
1069#  thread_state pointer is in a0
1070#
1071DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1072  ISTORE    x1, (RISCV_ISIZE * 0)(a0) // store ra as pc
1073  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1074    ISTORE x\i, (RISCV_ISIZE * \i)(a0)
1075  .endr
1076
1077# if defined(__riscv_flen)
1078  .irp i,FROM_0_TO_31
1079    FSTORE f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
1080  .endr
1081# endif
1082
1083  li     a0, 0  // return UNW_ESUCCESS
1084  ret           // jump to ra
1085
1086#elif defined(__s390x__)
1087
1088//
1089// extern int __unw_getcontext(unw_context_t* thread_state)
1090//
1091// On entry:
1092//  thread_state pointer is in r2
1093//
1094DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1095
1096  // Save GPRs
1097  stmg %r0, %r15, 16(%r2)
1098
1099  // Save PSWM
1100  epsw %r0, %r1
1101  stm %r0, %r1, 0(%r2)
1102
1103  // Store return address as PSWA
1104  stg %r14, 8(%r2)
1105
1106  // Save FPRs
1107  .irp i,FROM_0_TO_15
1108    std %f\i, (144+8*\i)(%r2)
1109  .endr
1110
1111  // Return UNW_ESUCCESS
1112  lghi %r2, 0
1113  br %r14
1114
1115#elif defined(__loongarch__) && __loongarch_grlen == 64
1116
1117#
1118# extern int __unw_getcontext(unw_context_t* thread_state)
1119#
1120# On entry:
1121#  thread_state pointer is in $a0($r4)
1122#
1123DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1124  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1125    st.d $r\i, $a0, (8*\i)
1126  .endr
1127  st.d    $r1,  $a0, (8 * 32) // store $ra to pc
1128
1129# if __loongarch_frlen == 64
1130  .irp i,FROM_0_TO_31
1131    fst.d $f\i, $a0, (8 * 33 + 8 * \i)
1132  .endr
1133# endif
1134
1135  move     $a0, $zero  // UNW_ESUCCESS
1136  jr       $ra
1137
1138#endif
1139
1140  WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1141
1142#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1143
1144NO_EXEC_STACK_DIRECTIVE
1145