xref: /netbsd-src/sys/arch/ia64/include/ia64_cpu.h (revision f9ac86e62634e8fe296f268c518a7f351b2d99c2)
1 /*	$NetBSD: ia64_cpu.h,v 1.4 2023/10/06 11:45:37 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007 Marcel Moolenaar
5  * Copyright (c) 2000 Doug Rabson
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: releng/10.1/sys/ia64/include/ia64_cpu.h 223170 2011-06-17 04:26:03Z marcel $
30  */
31 
32 #ifndef _MACHINE_IA64_CPU_H_
33 #define _MACHINE_IA64_CPU_H_
34 
35 /*
36  * Local Interrupt ID.
37  */
38 #define	IA64_LID_GET_SAPIC_ID(x)	((u_int)((x) >> 16) & 0xffff)
39 #define	IA64_LID_SET_SAPIC_ID(x)	((u_int)((x) & 0xffff) << 16)
40 
41 /*
42  * Definition of DCR bits.
43  */
44 #define	IA64_DCR_PP		0x0000000000000001
45 #define	IA64_DCR_BE		0x0000000000000002
46 #define	IA64_DCR_LC		0x0000000000000004
47 #define	IA64_DCR_DM		0x0000000000000100
48 #define	IA64_DCR_DP		0x0000000000000200
49 #define	IA64_DCR_DK		0x0000000000000400
50 #define	IA64_DCR_DX		0x0000000000000800
51 #define	IA64_DCR_DR		0x0000000000001000
52 #define	IA64_DCR_DA		0x0000000000002000
53 #define	IA64_DCR_DD		0x0000000000004000
54 
55 #define	IA64_DCR_DEFAULT					\
56     (IA64_DCR_DM | IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX |	\
57      IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD)
58 
59 /*
60  * Definition of PSR and IPSR bits.
61  */
62 #define IA64_PSR_BE		0x0000000000000002
63 #define IA64_PSR_UP		0x0000000000000004
64 #define IA64_PSR_AC		0x0000000000000008
65 #define IA64_PSR_MFL		0x0000000000000010
66 #define IA64_PSR_MFH		0x0000000000000020
67 #define IA64_PSR_IC		0x0000000000002000
68 #define IA64_PSR_I		0x0000000000004000
69 #define IA64_PSR_PK		0x0000000000008000
70 #define IA64_PSR_DT		0x0000000000020000
71 #define IA64_PSR_DFL		0x0000000000040000
72 #define IA64_PSR_DFH		0x0000000000080000
73 #define IA64_PSR_SP		0x0000000000100000
74 #define IA64_PSR_PP		0x0000000000200000
75 #define IA64_PSR_DI		0x0000000000400000
76 #define IA64_PSR_SI		0x0000000000800000
77 #define IA64_PSR_DB		0x0000000001000000
78 #define IA64_PSR_LP		0x0000000002000000
79 #define IA64_PSR_TB		0x0000000004000000
80 #define IA64_PSR_RT		0x0000000008000000
81 #define IA64_PSR_CPL		0x0000000300000000
82 #define IA64_PSR_CPL_KERN	0x0000000000000000
83 #define IA64_PSR_CPL_1		0x0000000100000000
84 #define IA64_PSR_CPL_2		0x0000000200000000
85 #define IA64_PSR_CPL_USER	0x0000000300000000
86 #define IA64_PSR_IS		0x0000000400000000
87 #define IA64_PSR_MC		0x0000000800000000
88 #define IA64_PSR_IT		0x0000001000000000
89 #define IA64_PSR_ID		0x0000002000000000
90 #define IA64_PSR_DA		0x0000004000000000
91 #define IA64_PSR_DD		0x0000008000000000
92 #define IA64_PSR_SS		0x0000010000000000
93 #define IA64_PSR_RI		0x0000060000000000
94 #define IA64_PSR_RI_0		0x0000000000000000
95 #define IA64_PSR_RI_1		0x0000020000000000
96 #define IA64_PSR_RI_2		0x0000040000000000
97 #define IA64_PSR_ED		0x0000080000000000
98 #define IA64_PSR_BN		0x0000100000000000
99 #define IA64_PSR_IA		0x0000200000000000
100 
101 /*
102  * Definition of ISR bits.
103  */
104 #define IA64_ISR_CODE		0x000000000000ffff
105 #define IA64_ISR_VECTOR		0x0000000000ff0000
106 #define IA64_ISR_X		0x0000000100000000
107 #define IA64_ISR_W		0x0000000200000000
108 #define IA64_ISR_R		0x0000000400000000
109 #define IA64_ISR_NA		0x0000000800000000
110 #define IA64_ISR_SP		0x0000001000000000
111 #define IA64_ISR_RS		0x0000002000000000
112 #define IA64_ISR_IR		0x0000004000000000
113 #define IA64_ISR_NI		0x0000008000000000
114 #define IA64_ISR_SO		0x0000010000000000
115 #define IA64_ISR_EI		0x0000060000000000
116 #define IA64_ISR_EI_0		0x0000000000000000
117 #define IA64_ISR_EI_1		0x0000020000000000
118 #define IA64_ISR_EI_2		0x0000040000000000
119 #define IA64_ISR_ED		0x0000080000000000
120 
121 /*
122  * Vector numbers for various ia64 interrupts.
123  */
124 #define IA64_VEC_VHPT			0
125 #define IA64_VEC_ITLB			1
126 #define IA64_VEC_DTLB			2
127 #define IA64_VEC_ALT_ITLB		3
128 #define IA64_VEC_ALT_DTLB		4
129 #define IA64_VEC_NESTED_DTLB		5
130 #define IA64_VEC_IKEY_MISS		6
131 #define IA64_VEC_DKEY_MISS		7
132 #define IA64_VEC_DIRTY_BIT		8
133 #define IA64_VEC_INST_ACCESS		9
134 #define IA64_VEC_DATA_ACCESS		10
135 #define IA64_VEC_BREAK			11
136 #define IA64_VEC_EXT_INTR		12
137 #define IA64_VEC_PAGE_NOT_PRESENT	20
138 #define IA64_VEC_KEY_PERMISSION		21
139 #define IA64_VEC_INST_ACCESS_RIGHTS	22
140 #define IA64_VEC_DATA_ACCESS_RIGHTS	23
141 #define IA64_VEC_GENERAL_EXCEPTION	24
142 #define IA64_VEC_DISABLED_FP		25
143 #define IA64_VEC_NAT_CONSUMPTION	26
144 #define IA64_VEC_SPECULATION		27
145 #define IA64_VEC_DEBUG			29
146 #define IA64_VEC_UNALIGNED_REFERENCE	30
147 #define IA64_VEC_UNSUPP_DATA_REFERENCE	31
148 #define IA64_VEC_FLOATING_POINT_FAULT	32
149 #define IA64_VEC_FLOATING_POINT_TRAP	33
150 #define IA64_VEC_LOWER_PRIVILEGE_TRANSFER 34
151 #define IA64_VEC_TAKEN_BRANCH_TRAP	35
152 #define IA64_VEC_SINGLE_STEP_TRAP	36
153 #define IA64_VEC_IA32_EXCEPTION		45
154 #define IA64_VEC_IA32_INTERCEPT		46
155 #define IA64_VEC_IA32_INTERRUPT		47
156 
157 /*
158  * IA-32 exceptions.
159  */
160 #define IA32_EXCEPTION_DIVIDE		0
161 #define IA32_EXCEPTION_DEBUG		1
162 #define IA32_EXCEPTION_BREAK		3
163 #define IA32_EXCEPTION_OVERFLOW		4
164 #define IA32_EXCEPTION_BOUND		5
165 #define IA32_EXCEPTION_DNA		7
166 #define IA32_EXCEPTION_NOT_PRESENT	11
167 #define IA32_EXCEPTION_STACK_FAULT	12
168 #define IA32_EXCEPTION_GPFAULT		13
169 #define IA32_EXCEPTION_FPERROR		16
170 #define IA32_EXCEPTION_ALIGNMENT_CHECK	17
171 #define IA32_EXCEPTION_STREAMING_SIMD	19
172 
173 #define IA32_INTERCEPT_INSTRUCTION	0
174 #define IA32_INTERCEPT_GATE		1
175 #define IA32_INTERCEPT_SYSTEM_FLAG	2
176 #define IA32_INTERCEPT_LOCK		4
177 
178 #ifndef _LOCORE
179 
180 /*
181  * Various special ia64 instructions.
182  */
183 
184 /*
185  * Memory Fence.
186  */
187 static __inline void
ia64_mf(void)188 ia64_mf(void)
189 {
190 	__asm __volatile("mf");
191 }
192 
193 static __inline void
ia64_mf_a(void)194 ia64_mf_a(void)
195 {
196 	__asm __volatile("mf.a");
197 }
198 
199 /*
200  * Flush Cache.
201  */
202 static __inline void
ia64_fc(uint64_t va)203 ia64_fc(uint64_t va)
204 {
205 	__asm __volatile("fc %0" :: "r"(va));
206 }
207 
208 static __inline void
ia64_fc_i(uint64_t va)209 ia64_fc_i(uint64_t va)
210 {
211 	__asm __volatile("fc.i %0" :: "r"(va));
212 }
213 
214 /*
215  * Sync instruction stream.
216  */
217 static __inline void
ia64_sync_i(void)218 ia64_sync_i(void)
219 {
220 	__asm __volatile("sync.i");
221 }
222 
223 /*
224  * Calculate address in VHPT for va.
225  */
226 static __inline uint64_t
ia64_thash(uint64_t va)227 ia64_thash(uint64_t va)
228 {
229 	uint64_t result;
230 	__asm __volatile("thash %0=%1" : "=r" (result) : "r" (va));
231 	return result;
232 }
233 
234 /*
235  * Calculate VHPT tag for va.
236  */
237 static __inline uint64_t
ia64_ttag(uint64_t va)238 ia64_ttag(uint64_t va)
239 {
240 	uint64_t result;
241 	__asm __volatile("ttag %0=%1" : "=r" (result) : "r" (va));
242 	return result;
243 }
244 
245 /*
246  * Convert virtual address to physical.
247  */
248 static __inline uint64_t
ia64_tpa(uint64_t va)249 ia64_tpa(uint64_t va)
250 {
251 	uint64_t result;
252 	__asm __volatile("tpa %0=%1" : "=r" (result) : "r" (va));
253 	return result;
254 }
255 
256 /*
257  * Generate a ptc.e instruction.
258  */
259 static __inline void
ia64_ptc_e(uint64_t v)260 ia64_ptc_e(uint64_t v)
261 {
262 	__asm __volatile("ptc.e %0;; srlz.i;;" :: "r"(v));
263 }
264 
265 /*
266  * Generate a ptc.g instruction.
267  */
268 static __inline void
ia64_ptc_g(uint64_t va,uint64_t log2size)269 ia64_ptc_g(uint64_t va, uint64_t log2size)
270 {
271 	__asm __volatile("ptc.g %0,%1;;" :: "r"(va), "r"(log2size));
272 }
273 
274 /*
275  * Generate a ptc.ga instruction.
276  */
277 static __inline void
ia64_ptc_ga(uint64_t va,uint64_t log2size)278 ia64_ptc_ga(uint64_t va, uint64_t log2size)
279 {
280 	__asm __volatile("ptc.ga %0,%1;;" :: "r"(va), "r"(log2size));
281 }
282 
283 /*
284  * Generate a ptc.l instruction.
285  */
286 static __inline void
ia64_ptc_l(uint64_t va,uint64_t log2size)287 ia64_ptc_l(uint64_t va, uint64_t log2size)
288 {
289 	__asm __volatile("ptc.l %0,%1;; srlz.i;;" :: "r"(va), "r"(log2size));
290 }
291 
292 /*
293  * Invalidate the ALAT on the local processor.
294  */
295 static __inline void
ia64_invala(void)296 ia64_invala(void)
297 {
298 	__asm __volatile("invala;;");
299 }
300 
301 /*
302  * Unordered memory load.
303  */
304 
305 static __inline uint8_t
ia64_ld1(uint8_t * p)306 ia64_ld1(uint8_t *p)
307 {
308 	uint8_t v;
309 
310 	__asm __volatile("ld1 %0=[%1];;" : "=r"(v) : "r"(p));
311 	return (v);
312 }
313 
314 static __inline uint16_t
ia64_ld2(uint16_t * p)315 ia64_ld2(uint16_t *p)
316 {
317 	uint16_t v;
318 
319 	__asm __volatile("ld2 %0=[%1];;" : "=r"(v) : "r"(p));
320 	return (v);
321 }
322 
323 static __inline uint32_t
ia64_ld4(uint32_t * p)324 ia64_ld4(uint32_t *p)
325 {
326 	uint32_t v;
327 
328 	__asm __volatile("ld4 %0=[%1];;" : "=r"(v) : "r"(p));
329 	return (v);
330 }
331 
332 static __inline uint64_t
ia64_ld8(uint64_t * p)333 ia64_ld8(uint64_t *p)
334 {
335 	uint64_t v;
336 
337 	__asm __volatile("ld8 %0=[%1];;" : "=r"(v) : "r"(p));
338 	return (v);
339 }
340 
341 /*
342  * Unordered memory store.
343  */
344 
345 static __inline void
ia64_st1(uint8_t * p,uint8_t v)346 ia64_st1(uint8_t *p, uint8_t v)
347 {
348 	__asm __volatile("st1 [%0]=%1;;" :: "r"(p), "r"(v));
349 }
350 
351 static __inline void
ia64_st2(uint16_t * p,uint16_t v)352 ia64_st2(uint16_t *p, uint16_t v)
353 {
354 	__asm __volatile("st2 [%0]=%1;;" :: "r"(p), "r"(v));
355 }
356 
357 static __inline void
ia64_st4(uint32_t * p,uint32_t v)358 ia64_st4(uint32_t *p, uint32_t v)
359 {
360 	__asm __volatile("st4 [%0]=%1;;" :: "r"(p), "r"(v));
361 }
362 
363 static __inline void
ia64_st8(uint64_t * p,uint64_t v)364 ia64_st8(uint64_t *p, uint64_t v)
365 {
366 	__asm __volatile("st8 [%0]=%1;;" :: "r"(p), "r"(v));
367 }
368 
369 /*
370  * Read the value of psr.
371  */
372 static __inline uint64_t
ia64_get_psr(void)373 ia64_get_psr(void)
374 {
375 	uint64_t result;
376 	__asm __volatile("mov %0=psr;;" : "=r" (result));
377 	return result;
378 }
379 
380 /*
381  * Define accessors for application registers.
382  */
383 
384 #define IA64_AR(name)						\
385 								\
386 static __inline uint64_t					\
387 ia64_get_##name(void)						\
388 {								\
389 	uint64_t result;					\
390 	__asm __volatile("mov %0=ar." #name : "=r" (result));	\
391 	return result;						\
392 }								\
393 								\
394 static __inline void						\
395 ia64_set_##name(uint64_t v)					\
396 {								\
397 	__asm __volatile("mov ar." #name "=%0;;" :: "r" (v));	\
398 }
399 
400 IA64_AR(k0)
IA64_AR(k1)401 IA64_AR(k1)
402 IA64_AR(k2)
403 IA64_AR(k3)
404 IA64_AR(k4)
405 IA64_AR(k5)
406 IA64_AR(k6)
407 IA64_AR(k7)
408 
409 IA64_AR(rsc)
410 IA64_AR(bsp)
411 IA64_AR(bspstore)
412 IA64_AR(rnat)
413 
414 IA64_AR(fcr)
415 
416 IA64_AR(eflag)
417 IA64_AR(csd)
418 IA64_AR(ssd)
419 IA64_AR(cflg)
420 IA64_AR(fsr)
421 IA64_AR(fir)
422 IA64_AR(fdr)
423 
424 IA64_AR(ccv)
425 
426 IA64_AR(unat)
427 
428 IA64_AR(fpsr)
429 
430 IA64_AR(itc)
431 
432 IA64_AR(pfs)
433 IA64_AR(lc)
434 IA64_AR(ec)
435 
436 /*
437  * Define accessors for control registers.
438  */
439 
440 #define IA64_CR(name)						\
441 								\
442 static __inline uint64_t					\
443 ia64_get_##name(void)						\
444 {								\
445 	uint64_t result;					\
446 	__asm __volatile("mov %0=cr." #name : "=r" (result));	\
447 	return result;						\
448 }								\
449 								\
450 static __inline void						\
451 ia64_set_##name(uint64_t v)					\
452 {								\
453 	__asm __volatile("mov cr." #name "=%0;;" :: "r" (v));	\
454 }
455 
456 IA64_CR(dcr)
457 IA64_CR(itm)
458 IA64_CR(iva)
459 
460 IA64_CR(pta)
461 
462 IA64_CR(ipsr)
463 IA64_CR(isr)
464 
465 IA64_CR(iip)
466 IA64_CR(ifa)
467 IA64_CR(itir)
468 IA64_CR(iipa)
469 IA64_CR(ifs)
470 IA64_CR(iim)
471 IA64_CR(iha)
472 
473 IA64_CR(lid)
474 IA64_CR(ivr)
475 IA64_CR(tpr)
476 IA64_CR(eoi)
477 IA64_CR(irr0)
478 IA64_CR(irr1)
479 IA64_CR(irr2)
480 IA64_CR(irr3)
481 IA64_CR(itv)
482 IA64_CR(pmv)
483 IA64_CR(cmcv)
484 
485 IA64_CR(lrr0)
486 IA64_CR(lrr1)
487 
488 /*
489  * Write a region register.
490  */
491 static __inline void
492 ia64_set_rr(uint64_t rrbase, uint64_t v)
493 {
494 	__asm __volatile("mov rr[%0]=%1"
495 			 :: "r"(rrbase), "r"(v) : "memory");
496 }
497 
498 /*
499  * Read a CPUID register.
500  */
501 static __inline uint64_t
ia64_get_cpuid(int i)502 ia64_get_cpuid(int i)
503 {
504 	uint64_t result;
505 	__asm __volatile("mov %0=cpuid[%1]"
506 			 : "=r" (result) : "r"(i));
507 	return result;
508 }
509 
510 static __inline void
ia64_disable_highfp(void)511 ia64_disable_highfp(void)
512 {
513 	__asm __volatile("ssm psr.dfh;; srlz.d");
514 }
515 
516 static __inline void
ia64_enable_highfp(void)517 ia64_enable_highfp(void)
518 {
519 	__asm __volatile("rsm psr.dfh;; srlz.d");
520 }
521 
522 /*
523  * Avoid inline functions for the following so that they still work
524  * correctly when inlining is not enabled (e.g. -O0). Function calls
525  * need data serialization after setting psr, which results in a
526  * hazard.
527  */
528 #define	ia64_srlz_d()	__asm __volatile("srlz.d")
529 #define	ia64_srlz_i()	__asm __volatile("srlz.i;;")
530 
531 #endif /* !_LOCORE */
532 
533 #endif /* _MACHINE_IA64_CPU_H_ */
534 
535