xref: /netbsd-src/sys/arch/sh3/sh3/exception_vector.S (revision ffeddc7dfb635e1401716011ddd976a806000777)
1/*	$NetBSD: exception_vector.S,v 1.53 2021/07/26 21:43:11 andvar Exp $	*/
2
3/*-
4 * Copyright (c) 2002, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include "opt_cputype.h"
30#include "opt_ddb.h"
31#include "opt_ptrace.h"
32
33#include "assym.h"
34
35#include <sh3/param.h>
36#include <sh3/locore.h>
37#include <sh3/exception.h>
38#include <sh3/ubcreg.h>
39#include <sh3/pte.h>
40#include <sh3/mmu_sh3.h>
41#include <sh3/mmu_sh4.h>
42
43/*
44 * Align vectors more strictly here (where we don't really care) so
45 * that .align 5 (i.e. 32B cache line) before data block does the
46 * right thing w.r.t. final destinations after vectors are copied.
47 */
48#define _ALIGN_TEXT	.align 5
49#include <sh3/asm.h>
50
51__KERNEL_RCSID(0, "$NetBSD: exception_vector.S,v 1.53 2021/07/26 21:43:11 andvar Exp $")
52
53
54/*
55 * Exception vectors.
56 * The following routines are copied to vector addresses.
57 *	sh_vector_generic:	VBR + 0x100
58 *	sh_vector_tlbmiss:	VBR + 0x400
59 *	sh_vector_interrupt:	VBR + 0x600
60 */
61
62#define VECTOR_END_MARKER(sym)			\
63		.globl	_C_LABEL(sym);		\
64	_C_LABEL(sym):
65
66
67/*
68 * LINTSTUB: Var: char sh_vector_generic[1];
69 *
70 * void sh_vector_generic(void);
71 *	Copied to VBR+0x100.  This code should be position independent
72 *	and maximum 786 bytes long (== 0x400 - 0x100).
73 */
74NENTRY(sh_vector_generic)
75	__EXCEPTION_ENTRY
76	/* Identify exception cause */
77	MOV	(EXPEVT, r0)
78	mov.l	@r0, r0
79	mov.l	r0, @(TF_EXPEVT, r14)	/* tf->tf_expevt = EXPEVT */
80	/* Get curlwp */
81	mov.l	.Lg_curlwp, r1
82	mov.l	@r1, r4			/* 1st arg */
83	/* Get TEA */
84	MOV	(TEA, r1)
85	mov.l	@r1, r6			/* 3rd arg */
86	/* Check TLB exception or not */
87	mov.l	.Lg_TLB_PROT_ST, r1
88	cmp/hi	r1, r0
89	bt/s	1f
90	 mov	r4, r8	/* preserve curlwp across call */
91
92	/* tlb_exception(curlwp, tf, TEA); */
93	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
94	mov.l	.Lg_tlb_exception, r0
95	jsr	@r0
96	 mov	r14, r5			/* 2nd arg */
97
98	/* Check for ASTs on exit to user mode. */
99	__INTR_MASK(r0, r1)
100	mov.l	.Lg_ast, r0
101	mov	r8, r4
102	jsr	@r0
103	 mov	r14, r5
104	bra	.Lg_return_from_exception
105	 nop
106
1071:	/* general_exception(curlwp, tf, TEA); */
108#if defined(PTRACE_HOOKS) || defined(DDB)
109	mov	#0, r2
110	MOV	(BBRA, r1)
111	mov.l	r2, @(TF_UBC, r14)	/* clear tf->tf_ubc */
112	mov.w	r2, @r1			/* disable UBC channel A */
113#endif
114	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
115	mov.l	.Lg_general_exception, r0
116	jsr	@r0
117	 mov	r14, r5			/* 2nd arg */
118
119	/* Check for ASTs on exit to user mode. */
120	__INTR_MASK(r0, r1)
121	mov.l	.Lg_ast, r0
122	mov	r8, r4
123	jsr	@r0
124	 mov	r14, r5
125
126#if defined(PTRACE_HOOKS) || defined(DDB)
127	mov.l	@(TF_UBC, r14), r2
128	tst	r2, r2			! single-step == 0?
129	bt	.Lg_return_from_exception
130
131	!! We are returning from DDB to do single step.  Channel A in
132	!! UBC is already rigged, we just need to enable it.
133	MOV	(BBRA, r3)
134	MOV	(BARA, r5)
135	__EXCEPTION_BLOCK(r0, r1)
136	mov.l	@(TF_SPC, r14), r4
137	mov.l	r4, @r5			! BARA = tf->tf_spc
138	mov.w	r2, @r3			! BBRA = tf->tf_ubc
139#endif /* PTRACE || DDB */
140.Lg_return_from_exception:
141	__EXCEPTION_RETURN
142
143	.align	5
144REG_SYMBOL(EXPEVT)
145.Lg_curlwp:		.long	_C_LABEL(curlwp)
146REG_SYMBOL(TEA)
147.Lg_TLB_PROT_ST:	.long	EXPEVT_TLB_PROT_ST
148.Lg_tlb_exception:	.long	_C_LABEL(tlb_exception)
149.Lg_general_exception:	.long	_C_LABEL(general_exception)
150.Lg_ast:		.long	_C_LABEL(ast)
151REG_SYMBOL(BBRA)
152REG_SYMBOL(BARA)
153
154/* LINTSTUB: Var: char sh_vector_generic_end[1]; */
155VECTOR_END_MARKER(sh_vector_generic_end)
156	SET_ENTRY_SIZE(sh_vector_generic)
157
158
159#ifdef SH3
160/*
161 * LINTSTUB: Var: char sh3_vector_tlbmiss[1];
162 *
163 * TLB miss vector.  We run through the fast path first, checking if
164 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
165 * with exceptions disabled, so no P3 addresses please (including no
166 * kernel stack, as we cannot wire TLB entries on sh3).  We can only
167 * use BANK1 registers, and of those r6 and r7 are already taken.
168 *
169 * If we don't find a valid mapping in the fast path, we do context
170 * save and call tlb exception handler.
171 *
172 * Copied to VBR+0x400.  This code should be position independent
173 * and maximum 512 bytes long (== 0x600 - 0x400).
174 */
175NENTRY(sh3_vector_tlbmiss)
176	mov	#(SH3_PTEH & 0xff), r4
177	mov.l	.L3_VPN_cleanup, r0
178	mov.l	@r4, r5
179	and	r0, r5		! trim vpn to 4K page boundary
180	!! For the duration of fast path we keep
181	!! r4: SH3_PTEH - other PTE regs are addressable as @(offset, r4)
182	!! r5: { VPN, ASID } that caused the miss
183
184	cmp/pz	r5		! user space address?
185	bt/s	.L3_user_va
186	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
187
188	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
189	!! see __pmap_kpte_lookup
190.L3_kernel_va:
191	mov.l	.L3_VM_MIN_KERNEL_ADDRESS, r0
192	mov.l	.L3_kernptd,  r1 ! pmap_kernel()->pm_ptp
193	bra	.L3_fetch_pte
194	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
195
196	!! user space address, use curlwp's pmap
197.L3_user_va:
198	mov.l	.L3_curptd,  r1	! curlwp->...->pm_ptp
199
200	!! see __pmap_pte_lookup
201.L3_fetch_pte:
202	mov.l	@r1, r3		! fetch ptd
203
204	!! r2: vpn, prepared for indexing into ptd
205	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
206#ifdef DEBUG
207	tst	r3, r3		! ptd == NULL  - cannot happen
208	bt/s	.L3_call_tlb_exception
209#endif
210	 mov	#-22, r1	! __PMAP_PTP_SHIFT
211
212	!! __PMAP_PTP_INDEX(vpn)
213	mov	r2, r0
214	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
215	mov.l	.L3_ptp_index_mask, r1
216	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
217	shll2	r0		! array index -> array offset
218	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
219	tst	r3, r3		! if (ptp == NULL)
220	bt/s	.L3_call_tlb_exception
221	 mov	#-(PGSHIFT - 2), r1
222
223	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
224	!! get the array offset directly, as we know bits 10 and 11
225	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
226	shld	r1, r2		! vpn >> (PGSHIFT - 2)
227	mov.l	.L3_ptp_offset_mask, r0
228	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
229	mov.l	@(r0, r3), r3	! pte = ptp[idx]
230
231
232	!! r3: pte
233	!! r4: SH3_PTEH
234	!! r5: { VPN, ASID }
235
236	mov.l	.L3_PG_V, r0
237	tst	r0, r3		! if ((pte & PG_V) == 0)
238	bt/s	.L3_call_tlb_exception
239	 nop
240
241	mov.l	.L3_PG_HW_BITS, r1
242	cmp/pz	r5		! user space address?
243	and	r1, r3		! pte &= PG_HW_BITS
244	bf/s	.L3_load_kernel
245	 mov.l	r3, @(0x04, r4)	! *SH3_PTEL = pte
246
247	!! load mapping for a user space page
248	!! we reload PTEH to enter VPN aligned to 4K page boundary
249.L3_load_user:
250	mov.l	r5, @r4		! *SH3_PTEH = { VPN, ASID }
251	ldtlb			! needs 2 insns padding before RTE
252	nop
253	nop
254	rte
255	 nop
256
257	!! load mapping for a kernel space page
258	!! we need to temporary set ASID to 0
259.L3_load_kernel:
260	mov.l	.L3_clear_ASID, r1
261	and	r5, r1		! *SH3_PTEH & ~SH3_PTEH_ASID_MASK
262	mov.l	r1, @r4		! *SH3_PTEH = { VPN, ASID = 0 }
263	ldtlb
264	mov.l	r5, @r4		! restore ASID
265	nop
266	rte
267	 nop
268
269
270	!! if we haven't found a valid mapping in the fast path
271	!!     tlb_exception(curlwp, trapframe, tea)
272.L3_call_tlb_exception:
273	__EXCEPTION_ENTRY
274	mov.l	.L3_SH3_EXPEVT, r2
275	mov.l	.L3_curlwp, r1
276	mov	#(SH3_TEA & 0xff), r0
277	mov.l	@r2, r2			! *SH3_EXPEVT
278	mov.l	@r0, r6			! arg3: va = *SH3_TEA
279	mov.l	@r1, r4			! arg1: curlwp
280	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
281	mov.l	.L3_tlb_exception, r0
282	mov	r4, r8			! save curlwp across the call
283	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
284	jsr	@r0
285	 mov	r14, r5			! arg2: trapframe
286
287	/* Check for ASTs on exit to user mode. */
288	__INTR_MASK(r0, r1)
289	mov.l	.L3_ast, r0
290	mov	r8, r4			! arg1: curlwp
291	jsr	@r0
292	 mov	r14, r5			! arg2: trapframe
293	__EXCEPTION_RETURN
294
295	.align	4
296.L3_VPN_cleanup:		.long	~0x00000c00
297.L3_curptd:			.long	_C_LABEL(curptd)
298.L3_kernptd:			.long	_C_LABEL(__pmap_kernel)
299.L3_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
300.L3_ptp_index_mask:		.long	0x1ff
301.L3_ptp_offset_mask:		.long	0x3ff << 2
302.L3_PG_HW_BITS:			.long	PG_HW_BITS
303.L3_PG_V:			.long	PG_V
304.L3_clear_ASID:			.long	~SH3_PTEH_ASID_MASK
305.L3_SH3_EXPEVT:			.long	SH3_EXPEVT
306.L3_curlwp:			.long	_C_LABEL(curlwp)
307.L3_tlb_exception:		.long	_C_LABEL(tlb_exception)
308.L3_ast:			.long	_C_LABEL(ast)
309
310/* LINTSTUB: Var: char sh3_vector_tlbmiss_end[1]; */
311VECTOR_END_MARKER(sh3_vector_tlbmiss_end)
312	SET_ENTRY_SIZE(sh3_vector_tlbmiss)
313
314#endif /* SH3 */
315
316
317#ifdef SH4
318/*
319 * LINTSTUB: Var: char sh4_vector_tlbmiss[1];
320 *
321 * TLB miss vector.  We run through the fast path first, checking if
322 * there's a valid mapping in curlwp or kernel pmap.  We do fast path
323 * with exceptions disabled, so no P3 addresses please (though we can
324 * use kernel stack if need be, as its TLB entries are wired).  We can
325 * only use BANK1 registers, and of those r6 and r7 are already taken.
326 *
327 * If we don't find a valid mapping in the fast path, we do context
328 * save and call tlb exception handler.
329 *
330 * Copied to VBR+0x400.  This code should be relocatable
331 * and maximum 512 bytes long (== 0x600 - 0x400).
332 */
333NENTRY(sh4_vector_tlbmiss)
334	mov.l	.L4_SH4_PTEH, r4
335	mov.l	.L4_VPN_cleanup, r0
336	mov.l	@r4, r5
337	and	r0, r5		! trim vpn to 4K page boundary
338	!! For the duration of fast path we keep
339	!! r4: SH4_PTEH - other PTE regs are addressable as @(offset, r4)
340	!! r5: { VPN, ASID } that caused the miss
341
342	cmp/pz	r5		! user space address?
343	bt/s	.L4_user_va
344	 mov	r5, r2		! copy of vpn to compute indices into ptd/ptp
345
346	!! kernel space address, use pmap_kernel(), adjust vpn for indexing
347	!! see __pmap_kpte_lookup
348.L4_kernel_va:
349	mov.l	.L4_VM_MIN_KERNEL_ADDRESS, r0
350	mov.l	.L4_kernptd,  r1 ! pmap_kernel()->pm_ptp
351	bra	.L4_fetch_pte
352	 sub	r0, r2		! vpn -= VM_MIN_KERNEL_ADDRESS
353
354	!! user space address, use curlwp's pmap
355.L4_user_va:
356	mov.l	.L4_curptd,  r1	! curlwp->...->pm_ptp
357
358	!! see __pmap_pte_lookup
359.L4_fetch_pte:
360	mov.l	@r1, r3		! fetch ptd
361
362	!! r2: vpn, prepared for indexing into ptd
363	!! r3: pt_entry_t **ptd => pt_entry_t *ptp => pt_entry_t pte
364#ifdef DEBUG
365	tst	r3, r3		! ptd == NULL  - cannot happen
366	bt/s	.L4_call_tlb_exception
367#endif
368	 mov	#-22, r1	! __PMAP_PTP_SHIFT
369
370	!! __PMAP_PTP_INDEX(vpn)
371	mov	r2, r0
372	shld	r1, r0		! vpn >> __PMAP_PTP_SHIFT
373	mov.l	.L4_ptp_index_mask, r1
374	and	r1, r0		! ... & (__PMAP_PTP_N - 1)
375	shll2	r0		! array index -> array offset
376	mov.l	@(r0, r3), r3	! ptp = ptd[idx]
377	tst	r3, r3		! if (ptp == NULL)
378	bt/s	.L4_call_tlb_exception
379	 mov	#-(PGSHIFT - 2), r1
380
381	!! __PMAP_PTP_OFSET(vpn) - except we pre-shift 2 bits left to
382	!! get the array offset directly, as we know bits 10 and 11
383	!! are zero (we cleaned them in r5 to get 4K aligned VPN)
384	shld	r1, r2		! vpn >> (PGSHIFT - 2)
385	mov.l	.L4_ptp_offset_mask, r0
386	and	r2, r0		! ... & ((__PMAP_PTP_PG_N - 1) << 2)
387	mov.l	@(r0, r3), r3	! pte = ptp[idx]
388
389
390	!! r3: pte
391	!! r4: SH4_PTEH
392	!! r5: { VPN, ASID }
393
394	mov.l	.L4_PG_V, r0
395	tst	r0, r3		! if ((pte & PG_V) == 0)
396	bt/s	.L4_call_tlb_exception
397	 mov	r3, r0		! prepare PCMCIA SA bits for SH4_PTEA
398
399	mov.l	.L4_PG_HW_BITS, r1
400	shlr8	r0
401	and	r1, r3		! pte &= PG_HW_BITS
402	shlr	r0		! pte >> _PG_PCMCIA_SHIFT
403	cmp/pz	r5		! user space address?
404	and	#SH4_PTEA_SA_MASK, r0
405	mov.l	r3, @(0x04, r4)	! *SH4_PTEL = pte
406	bf/s	.L4_load_kernel
407	 mov.l	r0, @(0x34, r4)	! *SH4_PTEA = PCMCIA space attrs
408
409	!! load mapping for a user space page
410	!! we reload PTEH to enter VPN aligned to 4K page boundary
411.L4_load_user:
412	mov.l	r5, @r4		! *SH4_PTEH = { VPN, ASID }
413	ldtlb			! needs 1 insn padding before RTE
414	nop
415	rte
416	 nop
417
418	!! load mapping for a kernel space page
419	!! we need to temporary set ASID to 0
420.L4_load_kernel:
421	mov.l	.L4_clear_ASID, r1
422	and	r5, r1		! *SH4_PTEH & ~SH4_PTEH_ASID_MASK
423	mov.l	r1, @r4		! *SH4_PTEH = { VPN, ASID = 0 }
424	ldtlb
425	mov.l	r5, @r4		! restore ASID
426	rte
427	 nop
428
429
430	!! if we haven't found a valid mapping in the fast path
431	!!     tlb_exception(curlwp, trapframe, tea)
432.L4_call_tlb_exception:
433	__EXCEPTION_ENTRY
434	mov.l	.L4_SH4_PTEH, r0
435	mov.l	.L4_curlwp, r1
436	mov.l	@(0x24, r0), r2		! *SH4_EXPEVT
437	mov.l	@(0x0c, r0), r6		! arg3: va = *SH4_TEA
438	mov.l	@r1, r4			! arg1: curlwp
439	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
440	mov.l	.L4_tlb_exception, r0
441	mov	r4, r8			! save curlwp across the call
442	mov.l	r2, @(TF_EXPEVT, r14)	! tf->tf_expevt = EXPEVT
443	jsr	@r0
444	 mov	r14, r5			! arg2: trapframe
445
446	/* Check for ASTs on exit to user mode. */
447	__INTR_MASK(r0, r1)
448	mov.l	.L4_ast, r0
449	mov	r8, r4			! arg1: curlwp
450	jsr	@r0
451	 mov	r14, r5			! arg2: trapframe
452	__EXCEPTION_RETURN
453
454	.align	5
455.L4_SH4_PTEH:			.long	SH4_PTEH
456.L4_VPN_cleanup:		.long	~0x00000c00
457.L4_curptd:			.long	_C_LABEL(curptd)
458.L4_kernptd:			.long	_C_LABEL(__pmap_kernel)
459.L4_VM_MIN_KERNEL_ADDRESS:	.long	VM_MIN_KERNEL_ADDRESS
460.L4_ptp_index_mask:		.long	0x1ff
461.L4_ptp_offset_mask:		.long	0x3ff << 2
462.L4_PG_HW_BITS:			.long	PG_HW_BITS
463.L4_PG_V:			.long	PG_V
464.L4_clear_ASID:			.long	~SH4_PTEH_ASID_MASK
465.L4_curlwp:			.long	_C_LABEL(curlwp)
466.L4_tlb_exception:		.long	_C_LABEL(tlb_exception)
467.L4_ast:			.long	_C_LABEL(ast)
468
469/* LINTSTUB: Var: char sh4_vector_tlbmiss_end[1]; */
470VECTOR_END_MARKER(sh4_vector_tlbmiss_end)
471	SET_ENTRY_SIZE(sh4_vector_tlbmiss)
472
473#endif /* SH4 */
474
475
476/*
477 * LINTSTUB: Var: char sh_vector_interrupt[1];
478 *
479 * void sh_vector_interrupt(void);
480 *	Copied to VBR+0x600.  This code should be position independent.
481 */
482NENTRY(sh_vector_interrupt)
483	__EXCEPTION_ENTRY
484	!! arguments for intc_intr(): for struct clockframe
485	stc	ssr, r4
486	stc	spc, r5
487	stc	r0_bank, r6		! ssp
488	__INTR_MASK_EXCEPTION_UNBLOCK(r0, r1, r3)
489
490	mov.l	.Li_ci_idepth, r8	! callee-saved
491	mov.l	.Li_intc_intr, r0
492	mov.l	@r8, r9			! callee-saved
493	mov	#0, r1
494	add	#1, r9			! curcpu()->ci_idepth++
495	mov.l	r1, @(TF_EXPEVT, r14)	! tf->tf_expevt = 0 (for debug)?
496	jsr	@r0			! intc_intr(ssr, spc, ssp)
497	 mov.l	r9, @r8
498
499	cmp/pl	r9			! curcpu()->ci_idepth > 0
500	add	#-1, r9			! curcpu()->ci_idepth--
501	bt/s	.Li_return_to_kernel	! returning from a nested interrupt
502	 mov.l	r9, @r8
503
504	mov.l	@(TF_SSR, r14), r2
505	mov.l	.Li_PSL_MD, r1
506	tst	r1, r2			! tf->tf_ssr & PSL_MD == 0 ?
507	bt	.Li_return_to_user
508
509.Li_return_to_kernel:
510	!! Check for interrupted kernel RAS when returning to kernel
511	mov.l	@(TF_SPC, r14), r2
512	mov.l	.Li_ras_start, r3
513	cmp/hi	r3, r2			! spc > _lock_cas_ras_start ?
514	bf	.Li_return_from_interrupt
515
516	mov.l	.Li_ras_end, r1
517	cmp/hs	r1, r2			! spc >= _lock_cas_ras_end ?
518	bt	.Li_return_from_interrupt
519
520	bra	.Li_return_from_interrupt
521	 mov.l	r3, @(TF_SPC, r14)	! spc = _lock_cas_ras_start
522
523.Li_return_to_user:
524	/* Check for ASTs on exit to user mode. */
525	mov.l	.Li_ast, r0
526	mov.l	.Li_curlwp, r1
527	mov	r14, r5		/* 2nd arg */
528	jsr	@r0
529	 mov.l	@r1, r4		/* 1st arg */
530
531.Li_return_from_interrupt:
532	__EXCEPTION_RETURN
533
534	.align	5
535.Li_ci_idepth:		.long	_C_LABEL(cpu_info_store) + CI_IDEPTH
536.Li_intc_intr:		.long	_C_LABEL(intc_intr)
537.Li_PSL_MD:		.long	0x40000000 /* PSL_MD */
538.Li_ras_start:		.long	_C_LABEL(_lock_cas_ras_start)
539.Li_ras_end:		.long	_C_LABEL(_lock_cas_ras_end)
540.Li_ast:		.long	_C_LABEL(ast)
541.Li_curlwp:		.long	_C_LABEL(curlwp)
542
543
544/* LINTSTUB: Var: char sh_vector_interrupt_end[1]; */
545VECTOR_END_MARKER(sh_vector_interrupt_end)
546	SET_ENTRY_SIZE(sh_vector_interrupt)
547