xref: /netbsd-src/sys/arch/hppa/include/cpufunc.h (revision d6cefede3cc3bb121f7b4ee40ab6cb084118f470)
1 /*	$NetBSD: cpufunc.h,v 1.17 2012/05/21 07:42:51 skrll Exp $	*/
2 
3 /*	$OpenBSD: cpufunc.h,v 1.17 2000/05/15 17:22:40 mickey Exp $	*/
4 
5 /*
6  * Copyright (c) 1998-2004 Michael Shalayeff
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 /*
31  *  (c) Copyright 1988 HEWLETT-PACKARD COMPANY
32  *
33  *  To anyone who acknowledges that this file is provided "AS IS"
34  *  without any express or implied warranty:
35  *      permission to use, copy, modify, and distribute this file
36  *  for any purpose is hereby granted without fee, provided that
37  *  the above copyright notice and this notice appears in all
38  *  copies, and that the name of Hewlett-Packard Company not be
39  *  used in advertising or publicity pertaining to distribution
40  *  of the software without specific, written prior permission.
41  *  Hewlett-Packard Company makes no representations about the
42  *  suitability of this software for any purpose.
43  */
44 /*
45  * Copyright (c) 1990,1994 The University of Utah and
46  * the Computer Systems Laboratory (CSL).  All rights reserved.
47  *
48  * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
50  * WHATSOEVER RESULTING FROM ITS USE.
51  *
52  * CSL requests users of this software to return to csl-dist@cs.utah.edu any
53  * improvements that they make and grant CSL redistribution rights.
54  *
55  * 	Utah $Hdr: c_support.s 1.8 94/12/14$
56  *	Author: Bob Wheeler, University of Utah CSL
57  */
58 
59 #ifndef _HPPA_CPUFUNC_H_
60 #define _HPPA_CPUFUNC_H_
61 
62 #include <machine/psl.h>
63 #include <machine/pte.h>
64 
65 #define tlbbtop(b) ((b) >> (PGSHIFT - 5))
66 #define tlbptob(p) ((p) << (PGSHIFT - 5))
67 
68 #define hptbtop(b) ((b) >> 17)
69 
70 /* Get space register for an address */
71 static __inline register_t
ldsid(vaddr_t p)72 ldsid(vaddr_t p) {
73 	register_t ret;
74 	__asm volatile("ldsid (%1),%0" : "=r" (ret) : "r" (p));
75 	return ret;
76 }
77 
78 #define mtctl(v,r) __asm volatile("mtctl %0,%1":: "r" (v), "i" (r))
79 #define mfctl(r,v) __asm volatile("mfctl %1,%0": "=r" (v): "i" (r))
80 
81 #define mfcpu(r,v)	/* XXX for the lack of the mnemonics */		\
82 	__asm volatile("diag  %1\n\t"					\
83 			 "copy  %%r22, %0"				\
84 	: "=r" (v) : "i" ((0x1400 | ((r) << 21) | (22))) : "r22")
85 
86 #define mtsp(v,r) __asm volatile("mtsp %0,%1":: "r" (v), "i" (r))
87 #define mfsp(r,v) __asm volatile("mfsp %1,%0": "=r" (v): "i" (r))
88 
89 #define ssm(v,r) __asm volatile("ssm %1,%0": "=r" (r): "i" (v))
90 #define rsm(v,r) __asm volatile("rsm %1,%0": "=r" (r): "i" (v))
91 
92 
93 /* Get coherence index for an address */
94 static __inline register_t
lci(pa_space_t sp,vaddr_t va)95 lci(pa_space_t sp, vaddr_t va) {
96 	register_t ret;
97 
98 	mtsp((sp), 1);	\
99 	__asm volatile("lci 0(%%sr1, %1), %0" : "=r" (ret) : "r" (va));
100 
101 	return ret;
102 }
103 
104 
105 /* Move to system mask. Old value of system mask is returned. */
mtsm(register_t mask)106 static __inline register_t mtsm(register_t mask) {
107 	register_t ret;
108 	__asm volatile(
109 	    "ssm 0,%0\n\t"
110 	    "mtsm %1": "=&r" (ret) : "r" (mask));
111 	return ret;
112 }
113 
114 #define	fdce(sp,off) __asm volatile("fdce 0(%0,%1)":: "i" (sp), "r" (off))
115 #define	fice(sp,off) __asm volatile("fice 0(%0,%1)":: "i" (sp), "r" (off))
116 #define sync_caches() \
117     __asm volatile("sync\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop":::"memory")
118 
119 static __inline void
iitlba(u_int pg,pa_space_t sp,vaddr_t va)120 iitlba(u_int pg, pa_space_t sp, vaddr_t va)
121 {
122 	mtsp(sp, 1);
123 	__asm volatile("iitlba %0,(%%sr1, %1)":: "r" (pg), "r" (va));
124 }
125 
126 static __inline void
idtlba(u_int pg,pa_space_t sp,vaddr_t va)127 idtlba(u_int pg, pa_space_t sp, vaddr_t va)
128 {
129 	mtsp(sp, 1);
130 	__asm volatile("idtlba %0,(%%sr1, %1)":: "r" (pg), "r" (va));
131 }
132 
133 static __inline void
iitlbp(u_int prot,pa_space_t sp,vaddr_t va)134 iitlbp(u_int prot, pa_space_t sp, vaddr_t va)
135 {
136 	mtsp(sp, 1);
137 	__asm volatile("iitlbp %0,(%%sr1, %1)":: "r" (prot), "r" (va));
138 }
139 
140 static __inline void
idtlbp(u_int prot,pa_space_t sp,vaddr_t va)141 idtlbp(u_int prot, pa_space_t sp, vaddr_t va)
142 {
143 	mtsp(sp, 1);
144 	__asm volatile("idtlbp %0,(%%sr1, %1)":: "r" (prot), "r" (va));
145 }
146 
147 static __inline void
pitlb(pa_space_t sp,vaddr_t va)148 pitlb(pa_space_t sp, vaddr_t va)
149 {
150 	mtsp(sp, 1);
151 	__asm volatile("pitlb %%r0(%%sr1, %0)":: "r" (va));
152 }
153 
154 static __inline void
pdtlb(pa_space_t sp,vaddr_t va)155 pdtlb(pa_space_t sp, vaddr_t va)
156 {
157 	mtsp(sp, 1);
158 	__asm volatile("pdtlb %%r0(%%sr1, %0)":: "r" (va));
159 }
160 
161 static __inline void
pitlbe(pa_space_t sp,vaddr_t va)162 pitlbe(pa_space_t sp, vaddr_t va)
163 {
164 	mtsp(sp, 1);
165 	__asm volatile("pitlbe %%r0(%%sr1, %0)":: "r" (va));
166 }
167 
168 static __inline void
pdtlbe(pa_space_t sp,vaddr_t va)169 pdtlbe(pa_space_t sp, vaddr_t va)
170 {
171 	mtsp(sp, 1);
172 	__asm volatile("pdtlbe %%r0(%%sr1, %0)":: "r" (va));
173 }
174 
175 static __inline void
hppa_disable_irq(void)176 hppa_disable_irq(void)
177 {
178         __asm volatile("rsm %0, %%r0" :: "i" (PSW_I) : "memory");
179 }
180 
181 static __inline void
hppa_enable_irq(void)182 hppa_enable_irq(void)
183 {
184         __asm volatile("ssm %0, %%r0" :: "i" (PSW_I) : "memory");
185 }
186 
187 #ifdef _KERNEL
188 extern int (*cpu_hpt_init)(vaddr_t, vsize_t);
189 
190 void ficache(pa_space_t, vaddr_t, vsize_t);
191 void fdcache(pa_space_t, vaddr_t, vsize_t);
192 void pdcache(pa_space_t, vaddr_t, vsize_t);
193 void fcacheall(void);
194 void ptlball(void);
195 
196 #define PCXL2_ACCEL_IO_START		0xf4000000
197 #define PCXL2_ACCEL_IO_END		(0xfc000000 - 1)
198 #define PCXL2_ACCEL_IO_ADDR2MASK(a)	(0x8 >> ((((a) >> 25) - 2) & 3))
199 void eaio_l2(int);
200 
201 #endif /* _KERNEL */
202 
203 #endif /* _HPPA_CPUFUNC_H_ */
204