xref: /netbsd-src/external/gpl3/gdb/dist/sim/bfin/bfin-sim.h (revision 1f4e7eb9e5e045e008f1894823a8e4e6c9f46890)
1 /* Simulator for Analog Devices Blackfin processors.
2 
3    Copyright (C) 2005-2024 Free Software Foundation, Inc.
4    Contributed by Analog Devices, Inc.
5 
6    This file is part of simulators.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20 
21 #ifndef _BFIN_SIM_H_
22 #define _BFIN_SIM_H_
23 
24 #include <stdbool.h>
25 #include <stdint.h>
26 
27 typedef uint8_t bu8;
28 typedef uint16_t bu16;
29 typedef uint32_t bu32;
30 typedef uint64_t bu40;
31 typedef uint64_t bu64;
32 typedef int8_t bs8;
33 typedef int16_t bs16;
34 typedef int32_t bs32;
35 typedef int64_t bs40;
36 typedef int64_t bs64;
37 
38 #include "machs.h"
39 
40 /* For dealing with parallel instructions, we must avoid changing our register
41    file until all parallel insns have been simulated.  This queue of stores
42    can be used to delay a modification.
43    XXX: Should go and convert all 32 bit insns to use this.  */
44 struct store {
45   bu32 *addr;
46   bu32 val;
47 };
48 
49 enum bfin_parallel_group {
50   BFIN_PARALLEL_NONE,
51   BFIN_PARALLEL_GROUP0,	/* 32bit slot.  */
52   BFIN_PARALLEL_GROUP1,	/* 16bit group1.  */
53   BFIN_PARALLEL_GROUP2,	/* 16bit group2.  */
54 };
55 
56 /* The KSP/USP handling wrt SP may not follow the hardware exactly (the hw
57    looks at current mode and uses either SP or USP based on that.  We instead
58    always operate on SP and mirror things in KSP and USP.  During a CEC
59    transition, we take care of syncing the values.  This lowers the simulation
60    complexity and speeds things up a bit.  */
61 struct bfin_cpu_state
62 {
63   bu32 dpregs[16], iregs[4], mregs[4], bregs[4], lregs[4], cycles[3];
64   bu32 ax[2], aw[2];
65   bu32 lt[2], lc[2], lb[2];
66   bu32 ksp, usp, seqstat, syscfg, rets, reti, retx, retn, rete;
67   bu32 pc, emudat[2];
68   /* These ASTAT flags need not be bu32, but it makes pointers easier.  */
69   bu32 ac0, ac0_copy, ac1, an, aq;
70   union { struct { bu32 av0;  bu32 av1;  }; bu32 av [2]; };
71   union { struct { bu32 av0s; bu32 av1s; }; bu32 avs[2]; };
72   bu32 az, cc, v, v_copy, vs;
73   bu32 rnd_mod;
74   bu32 v_internal;
75   bu32 astat_reserved;
76 
77   /* Set by an instruction emulation function if we performed a jump.  We
78      cannot compare oldpc to newpc as this ignores the "jump 0;" case.  */
79   bool did_jump;
80 
81   /* Used by the CEC to figure out where to return to.  */
82   bu32 insn_len;
83 
84   /* How many cycles did this insn take to complete ?  */
85   bu32 cycle_delay;
86 
87   /* The pc currently being interpreted in parallel insns.  */
88   bu32 multi_pc;
89 
90   /* Some insns are valid in group1, and others in group2, so we
91      need to keep track of the exact slot we're processing.  */
92   enum bfin_parallel_group group;
93 
94   /* Needed for supporting the DISALGNEXCPT instruction */
95   int dis_algn_expt;
96 
97   /* See notes above for struct store.  */
98   struct store stores[20];
99   int n_stores;
100 
101 #if (WITH_HW)
102   /* Cache heavily used CPU-specific device pointers.  */
103   void *cec_cache;
104   void *evt_cache;
105   void *mmu_cache;
106   void *trace_cache;
107 #endif
108 };
109 
110 #define REG_H_L(h, l)	(((h) & 0xffff0000) | ((l) & 0x0000ffff))
111 
112 #define DREG(x)		(BFIN_CPU_STATE.dpregs[x])
113 #define PREG(x)		(BFIN_CPU_STATE.dpregs[x + 8])
114 #define SPREG		PREG (6)
115 #define FPREG		PREG (7)
116 #define IREG(x)		(BFIN_CPU_STATE.iregs[x])
117 #define MREG(x)		(BFIN_CPU_STATE.mregs[x])
118 #define BREG(x)		(BFIN_CPU_STATE.bregs[x])
119 #define LREG(x)		(BFIN_CPU_STATE.lregs[x])
120 #define AXREG(x)	(BFIN_CPU_STATE.ax[x])
121 #define AWREG(x)	(BFIN_CPU_STATE.aw[x])
122 #define CCREG		(BFIN_CPU_STATE.cc)
123 #define LCREG(x)	(BFIN_CPU_STATE.lc[x])
124 #define LTREG(x)	(BFIN_CPU_STATE.lt[x])
125 #define LBREG(x)	(BFIN_CPU_STATE.lb[x])
126 #define CYCLESREG	(BFIN_CPU_STATE.cycles[0])
127 #define CYCLES2REG	(BFIN_CPU_STATE.cycles[1])
128 #define CYCLES2SHDREG	(BFIN_CPU_STATE.cycles[2])
129 #define KSPREG		(BFIN_CPU_STATE.ksp)
130 #define USPREG		(BFIN_CPU_STATE.usp)
131 #define SEQSTATREG	(BFIN_CPU_STATE.seqstat)
132 #define SYSCFGREG	(BFIN_CPU_STATE.syscfg)
133 #define RETSREG		(BFIN_CPU_STATE.rets)
134 #define RETIREG		(BFIN_CPU_STATE.reti)
135 #define RETXREG		(BFIN_CPU_STATE.retx)
136 #define RETNREG		(BFIN_CPU_STATE.retn)
137 #define RETEREG		(BFIN_CPU_STATE.rete)
138 #define PCREG		(BFIN_CPU_STATE.pc)
139 #define EMUDAT_INREG	(BFIN_CPU_STATE.emudat[0])
140 #define EMUDAT_OUTREG	(BFIN_CPU_STATE.emudat[1])
141 #define INSN_LEN	(BFIN_CPU_STATE.insn_len)
142 #define PARALLEL_GROUP	(BFIN_CPU_STATE.group)
143 #define CYCLE_DELAY	(BFIN_CPU_STATE.cycle_delay)
144 #define DIS_ALGN_EXPT	(BFIN_CPU_STATE.dis_algn_expt)
145 
146 #define EXCAUSE_SHIFT		0
147 #define EXCAUSE_MASK		(0x3f << EXCAUSE_SHIFT)
148 #define EXCAUSE			((SEQSTATREG & EXCAUSE_MASK) >> EXCAUSE_SHIFT)
149 #define HWERRCAUSE_SHIFT	14
150 #define HWERRCAUSE_MASK		(0x1f << HWERRCAUSE_SHIFT)
151 #define HWERRCAUSE		((SEQSTATREG & HWERRCAUSE_MASK) >> HWERRCAUSE_SHIFT)
152 
153 #define _SET_CORE32REG_IDX(reg, p, x, val) \
154   do { \
155     bu32 __v = (val); \
156     TRACE_REGISTER (cpu, "wrote "#p"%i = %#x", x, __v); \
157     reg = __v; \
158   } while (0)
159 #define SET_DREG(x, val) _SET_CORE32REG_IDX (DREG (x), R, x, val)
160 #define SET_PREG(x, val) _SET_CORE32REG_IDX (PREG (x), P, x, val)
161 #define SET_IREG(x, val) _SET_CORE32REG_IDX (IREG (x), I, x, val)
162 #define SET_MREG(x, val) _SET_CORE32REG_IDX (MREG (x), M, x, val)
163 #define SET_BREG(x, val) _SET_CORE32REG_IDX (BREG (x), B, x, val)
164 #define SET_LREG(x, val) _SET_CORE32REG_IDX (LREG (x), L, x, val)
165 #define SET_LCREG(x, val) _SET_CORE32REG_IDX (LCREG (x), LC, x, val)
166 #define SET_LTREG(x, val) _SET_CORE32REG_IDX (LTREG (x), LT, x, val)
167 #define SET_LBREG(x, val) _SET_CORE32REG_IDX (LBREG (x), LB, x, val)
168 
169 #define SET_DREG_L_H(x, l, h) SET_DREG (x, REG_H_L (h, l))
170 #define SET_DREG_L(x, l) SET_DREG (x, REG_H_L (DREG (x), l))
171 #define SET_DREG_H(x, h) SET_DREG (x, REG_H_L (h, DREG (x)))
172 
173 #define _SET_CORE32REG_ALU(reg, p, x, val) \
174   do { \
175     bu32 __v = (val); \
176     TRACE_REGISTER (cpu, "wrote A%i"#p" = %#x", x, __v); \
177     reg = __v; \
178   } while (0)
179 #define SET_AXREG(x, val) _SET_CORE32REG_ALU (AXREG (x), X, x, val)
180 #define SET_AWREG(x, val) _SET_CORE32REG_ALU (AWREG (x), W, x, val)
181 
182 #define SET_AREG(x, val) \
183   do { \
184     bu40 __a = (val); \
185     SET_AXREG (x, (__a >> 32) & 0xff); \
186     SET_AWREG (x, __a); \
187   } while (0)
188 #define SET_AREG32(x, val) \
189   do { \
190     SET_AWREG (x, val); \
191     SET_AXREG (x, -(AWREG (x) >> 31)); \
192   } while (0)
193 
194 #define _SET_CORE32REG(reg, val) \
195   do { \
196     bu32 __v = (val); \
197     TRACE_REGISTER (cpu, "wrote "#reg" = %#x", __v); \
198     reg##REG = __v; \
199   } while (0)
200 #define SET_FPREG(val) _SET_CORE32REG (FP, val)
201 #define SET_SPREG(val) _SET_CORE32REG (SP, val)
202 #define SET_CYCLESREG(val) _SET_CORE32REG (CYCLES, val)
203 #define SET_CYCLES2REG(val) _SET_CORE32REG (CYCLES2, val)
204 #define SET_CYCLES2SHDREG(val) _SET_CORE32REG (CYCLES2SHD, val)
205 #define SET_KSPREG(val) _SET_CORE32REG (KSP, val)
206 #define SET_USPREG(val) _SET_CORE32REG (USP, val)
207 #define SET_SYSCFGREG(val) _SET_CORE32REG (SYSCFG, val)
208 #define SET_RETSREG(val) _SET_CORE32REG (RETS, val)
209 #define SET_RETIREG(val) _SET_CORE32REG (RETI, val)
210 #define SET_RETXREG(val) _SET_CORE32REG (RETX, val)
211 #define SET_RETNREG(val) _SET_CORE32REG (RETN, val)
212 #define SET_RETEREG(val) _SET_CORE32REG (RETE, val)
213 #define SET_PCREG(val) _SET_CORE32REG (PC, val)
214 
215 #define _SET_CORE32REGFIELD(reg, field, val, mask, shift) \
216   do { \
217     bu32 __f = (val); \
218     bu32 __v = ((reg##REG) & ~(mask)) | (__f << (shift)); \
219     TRACE_REGISTER (cpu, "wrote "#field" = %#x ("#reg" = %#x)", __f, __v); \
220     reg##REG = __v; \
221   } while (0)
222 #define SET_SEQSTATREG(val)   _SET_CORE32REG (SEQSTAT, val)
223 #define SET_EXCAUSE(excp)     _SET_CORE32REGFIELD (SEQSTAT, EXCAUSE, excp, EXCAUSE_MASK, EXCAUSE_SHIFT)
224 #define SET_HWERRCAUSE(hwerr) _SET_CORE32REGFIELD (SEQSTAT, HWERRCAUSE, hwerr, HWERRCAUSE_MASK, HWERRCAUSE_SHIFT)
225 
226 #define AZ_BIT		0
227 #define AN_BIT		1
228 #define AC0_COPY_BIT	2
229 #define V_COPY_BIT	3
230 #define CC_BIT		5
231 #define AQ_BIT		6
232 #define RND_MOD_BIT	8
233 #define AC0_BIT		12
234 #define AC1_BIT		13
235 #define AV0_BIT		16
236 #define AV0S_BIT	17
237 #define AV1_BIT		18
238 #define AV1S_BIT	19
239 #define V_BIT		24
240 #define VS_BIT		25
241 #define ASTAT_DEFINED_BITS \
242   ((1 << AZ_BIT) | (1 << AN_BIT) | (1 << AC0_COPY_BIT) | (1 << V_COPY_BIT) \
243   |(1 << CC_BIT) | (1 << AQ_BIT) \
244   |(1 << RND_MOD_BIT) \
245   |(1 << AC0_BIT) | (1 << AC1_BIT) \
246   |(1 << AV0_BIT) | (1 << AV0S_BIT) | (1 << AV1_BIT) | (1 << AV1S_BIT) \
247   |(1 << V_BIT) | (1 << VS_BIT))
248 
249 #define ASTATREG(field) (BFIN_CPU_STATE.field)
250 #define ASTAT_DEPOSIT(field, bit) (ASTATREG(field) << (bit))
251 #define ASTAT \
252   (ASTAT_DEPOSIT(az,       AZ_BIT)       \
253   |ASTAT_DEPOSIT(an,       AN_BIT)       \
254   |ASTAT_DEPOSIT(ac0_copy, AC0_COPY_BIT) \
255   |ASTAT_DEPOSIT(v_copy,   V_COPY_BIT)   \
256   |ASTAT_DEPOSIT(cc,       CC_BIT)       \
257   |ASTAT_DEPOSIT(aq,       AQ_BIT)       \
258   |ASTAT_DEPOSIT(rnd_mod,  RND_MOD_BIT)  \
259   |ASTAT_DEPOSIT(ac0,      AC0_BIT)      \
260   |ASTAT_DEPOSIT(ac1,      AC1_BIT)      \
261   |ASTAT_DEPOSIT(av0,      AV0_BIT)      \
262   |ASTAT_DEPOSIT(av0s,     AV0S_BIT)     \
263   |ASTAT_DEPOSIT(av1,      AV1_BIT)      \
264   |ASTAT_DEPOSIT(av1s,     AV1S_BIT)     \
265   |ASTAT_DEPOSIT(v,        V_BIT)        \
266   |ASTAT_DEPOSIT(vs,       VS_BIT)       \
267   |ASTATREG(astat_reserved))
268 
269 #define ASTAT_EXTRACT(a, bit)     (((a) >> bit) & 1)
270 #define _SET_ASTAT(a, field, bit) (ASTATREG(field) = ASTAT_EXTRACT(a, bit))
271 #define SET_ASTAT(a) \
272   do { \
273     TRACE_REGISTER (cpu, "wrote ASTAT = %#x", a); \
274     _SET_ASTAT(a, az,       AZ_BIT); \
275     _SET_ASTAT(a, an,       AN_BIT); \
276     _SET_ASTAT(a, ac0_copy, AC0_COPY_BIT); \
277     _SET_ASTAT(a, v_copy,   V_COPY_BIT); \
278     _SET_ASTAT(a, cc,       CC_BIT); \
279     _SET_ASTAT(a, aq,       AQ_BIT); \
280     _SET_ASTAT(a, rnd_mod,  RND_MOD_BIT); \
281     _SET_ASTAT(a, ac0,      AC0_BIT); \
282     _SET_ASTAT(a, ac1,      AC1_BIT); \
283     _SET_ASTAT(a, av0,      AV0_BIT); \
284     _SET_ASTAT(a, av0s,     AV0S_BIT); \
285     _SET_ASTAT(a, av1,      AV1_BIT); \
286     _SET_ASTAT(a, av1s,     AV1S_BIT); \
287     _SET_ASTAT(a, v,        V_BIT); \
288     _SET_ASTAT(a, vs,       VS_BIT); \
289     ASTATREG(astat_reserved) = (a) & ~ASTAT_DEFINED_BITS; \
290   } while (0)
291 #define SET_ASTATREG(field, val) \
292   do { \
293     int __v = !!(val); \
294     TRACE_REGISTER (cpu, "wrote ASTAT["#field"] = %i", __v); \
295     ASTATREG (field) = __v; \
296     if (&ASTATREG (field) == &ASTATREG (ac0)) \
297       { \
298 	TRACE_REGISTER (cpu, "wrote ASTAT["#field"_copy] = %i", __v); \
299 	ASTATREG (ac0_copy) = __v; \
300       } \
301     else if (&ASTATREG (field) == &ASTATREG (v)) \
302       { \
303 	TRACE_REGISTER (cpu, "wrote ASTAT["#field"_copy] = %i", __v); \
304 	ASTATREG (v_copy) = __v; \
305       } \
306   } while (0)
307 #define SET_CCREG(val) SET_ASTATREG (cc, val)
308 
309 #define SYSCFG_SSSTEP	(1 << 0)
310 #define SYSCFG_CCEN	(1 << 1)
311 #define SYSCFG_SNEN	(1 << 2)
312 
313 #define __PUT_MEM(taddr, v, size) \
314 do { \
315   bu##size __v = (v); \
316   bu32 __taddr = (taddr); \
317   int __cnt, __bytes = size / 8; \
318   mmu_check_addr (cpu, __taddr, true, false, __bytes); \
319   __cnt = sim_core_write_buffer (CPU_STATE(cpu), cpu, write_map, \
320 				 (void *)&__v, __taddr, __bytes); \
321   if (__cnt != __bytes) \
322     mmu_process_fault (cpu, __taddr, true, false, false, true); \
323   BFIN_TRACE_CORE (cpu, __taddr, __bytes, write_map, __v); \
324 } while (0)
325 #define PUT_BYTE(taddr, v) __PUT_MEM(taddr, v, 8)
326 #define PUT_WORD(taddr, v) __PUT_MEM(taddr, v, 16)
327 #define PUT_LONG(taddr, v) __PUT_MEM(taddr, v, 32)
328 
329 #define __GET_MEM(taddr, size, inst, map) \
330 ({ \
331   bu##size __ret; \
332   bu32 __taddr = (taddr); \
333   int __cnt, __bytes = size / 8; \
334   mmu_check_addr (cpu, __taddr, false, inst, __bytes); \
335   __cnt = sim_core_read_buffer (CPU_STATE(cpu), cpu, map, \
336 				(void *)&__ret, __taddr, __bytes); \
337   if (__cnt != __bytes) \
338     mmu_process_fault (cpu, __taddr, false, inst, false, true); \
339   BFIN_TRACE_CORE (cpu, __taddr, __bytes, map, __ret); \
340   __ret; \
341 })
342 #define _GET_MEM(taddr, size) __GET_MEM(taddr, size, false, read_map)
343 #define GET_BYTE(taddr) _GET_MEM(taddr, 8)
344 #define GET_WORD(taddr) _GET_MEM(taddr, 16)
345 #define GET_LONG(taddr) _GET_MEM(taddr, 32)
346 
347 #define IFETCH(taddr) __GET_MEM(taddr, 16, true, exec_map)
348 #define IFETCH_CHECK(taddr) mmu_check_addr (cpu, taddr, false, true, 2)
349 
350 extern void bfin_syscall (SIM_CPU *);
351 extern bu32 interp_insn_bfin (SIM_CPU *, bu32);
352 extern bu32 hwloop_get_next_pc (SIM_CPU *, bu32, bu32);
353 
354 /* Defines for Blackfin memory layouts.  */
355 #define BFIN_ASYNC_BASE           0x20000000
356 #define BFIN_SYSTEM_MMR_BASE      0xFFC00000
357 #define BFIN_CORE_MMR_BASE        0xFFE00000
358 #define BFIN_L1_SRAM_SCRATCH      0xFFB00000
359 #define BFIN_L1_SRAM_SCRATCH_SIZE 0x1000
360 #define BFIN_L1_SRAM_SCRATCH_END  (BFIN_L1_SRAM_SCRATCH + BFIN_L1_SRAM_SCRATCH_SIZE)
361 
362 #define BFIN_L1_CACHE_BYTES       32
363 
364 #define BFIN_CPU_STATE (*(struct bfin_cpu_state *) CPU_ARCH_DATA (cpu))
365 #define STATE_BOARD_DATA(sd) ((struct bfin_board_data *) STATE_ARCH_DATA (sd))
366 
367 #include "dv-bfin_trace.h"
368 
369 #undef CLAMP
370 #define CLAMP(a, b, c) min (max (a, b), c)
371 
372 /* TODO: Move all this trace logic to the common code.  */
373 #define BFIN_TRACE_CORE(cpu, addr, size, map, val) \
374   do { \
375     TRACE_CORE (cpu, "%cBUS %s %i bytes @ 0x%08x: 0x%0*x", \
376 		map == exec_map ? 'I' : 'D', \
377 		map == write_map ? "STORE" : "FETCH", \
378 		size, addr, size * 2, val); \
379     PROFILE_COUNT_CORE (cpu, addr, size, map); \
380   } while (0)
381 #define BFIN_TRACE_BRANCH(cpu, oldpc, newpc, hwloop, fmt, ...) \
382   do { \
383     TRACE_BRANCH (cpu, fmt " to %#x", ## __VA_ARGS__, newpc); \
384     if (STATE_ENVIRONMENT (CPU_STATE (cpu)) == OPERATING_ENVIRONMENT) \
385       bfin_trace_queue (cpu, oldpc, newpc, hwloop); \
386   } while (0)
387 
388 /* Default memory size.  */
389 #define BFIN_DEFAULT_MEM_SIZE (128 * 1024 * 1024)
390 
391 #endif
392