xref: /netbsd-src/external/gpl3/gdb/dist/sim/frv/memory.c (revision 2a9ac006fc2a82cb2082b71083430235e8210ae2)
1 /* frv memory model.
2    Copyright (C) 1999-2024 Free Software Foundation, Inc.
3    Contributed by Red Hat
4 
5 This file is part of the GNU simulators.
6 
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11 
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19 
20 /* This must come before any other includes.  */
21 #include "defs.h"
22 
23 #define WANT_CPU frvbf
24 #define WANT_CPU_FRVBF
25 
26 #include "sim-main.h"
27 #include "cgen-mem.h"
28 #include "bfd.h"
29 #include <stdlib.h>
30 
31 /* Check for alignment and access restrictions.  Return the corrected address.
32  */
33 static SI
34 fr400_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
35 {
36   /* Check access restrictions for double word loads only.  */
37   if (align_mask == 7)
38     {
39       if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
40 	frv_queue_data_access_error_interrupt (current_cpu, address);
41     }
42   return address;
43 }
44 
45 static SI
46 fr500_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
47 {
48   if (address & align_mask)
49     {
50       frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
51       address &= ~align_mask;
52     }
53 
54   if (((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff)
55       || ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff))
56     frv_queue_data_access_error_interrupt (current_cpu, address);
57 
58   return address;
59 }
60 
61 static SI
62 fr550_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
63 {
64   if (((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
65       || (align_mask > 0x3
66 	  && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
67     frv_queue_data_access_error_interrupt (current_cpu, address);
68 
69   return address;
70 }
71 
72 static SI
73 check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
74 {
75   SIM_DESC sd = CPU_STATE (current_cpu);
76   switch (STATE_ARCHITECTURE (sd)->mach)
77     {
78     case bfd_mach_fr400:
79     case bfd_mach_fr450:
80       address = fr400_check_data_read_address (current_cpu, address,
81 					       align_mask);
82       break;
83     case bfd_mach_frvtomcat:
84     case bfd_mach_fr500:
85     case bfd_mach_frv:
86       address = fr500_check_data_read_address (current_cpu, address,
87 					       align_mask);
88       break;
89     case bfd_mach_fr550:
90       address = fr550_check_data_read_address (current_cpu, address,
91 					       align_mask);
92       break;
93     default:
94       break;
95     }
96 
97   return address;
98 }
99 
100 static SI
101 fr400_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
102 {
103   if (address & align_mask)
104     {
105       /* Make sure that this exception is not masked.  */
106       USI isr = GET_ISR ();
107       if (! GET_ISR_EMAM (isr))
108 	{
109 	  /* Bad alignment causes a data_access_error on fr400.  */
110 	  frv_queue_data_access_error_interrupt (current_cpu, address);
111 	}
112       address &= ~align_mask;
113     }
114   /* Nothing to check.  */
115   return address;
116 }
117 
118 static SI
119 fr500_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
120 {
121   if (((USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff)
122       || ((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff)
123       || ((USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff)
124       || ((USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff))
125     frv_queue_data_access_exception_interrupt (current_cpu);
126 
127   return address;
128 }
129 
130 static SI
131 fr550_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
132 {
133   /* No alignment restrictions on fr550 */
134 
135   if (((USI)address >= 0xfe000000 && (USI)address <= 0xfe3fffff)
136       || ((USI)address >= 0xfe408000 && (USI)address <= 0xfe7fffff))
137     frv_queue_data_access_exception_interrupt (current_cpu);
138   else
139     {
140       USI hsr0 = GET_HSR0 ();
141       if (! GET_HSR0_RME (hsr0)
142 	  && ((USI)address >= 0xfe400000 && (USI)address <= 0xfe407fff))
143 	frv_queue_data_access_exception_interrupt (current_cpu);
144     }
145 
146   return address;
147 }
148 
149 static SI
150 check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
151 {
152   SIM_DESC sd = CPU_STATE (current_cpu);
153   switch (STATE_ARCHITECTURE (sd)->mach)
154     {
155     case bfd_mach_fr400:
156     case bfd_mach_fr450:
157       address = fr400_check_readwrite_address (current_cpu, address,
158 						    align_mask);
159       break;
160     case bfd_mach_frvtomcat:
161     case bfd_mach_fr500:
162     case bfd_mach_frv:
163       address = fr500_check_readwrite_address (current_cpu, address,
164 						    align_mask);
165       break;
166     case bfd_mach_fr550:
167       address = fr550_check_readwrite_address (current_cpu, address,
168 					       align_mask);
169       break;
170     default:
171       break;
172     }
173 
174   return address;
175 }
176 
177 static PCADDR
178 fr400_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
179 			       int align_mask)
180 {
181   if (address & align_mask)
182     {
183       frv_queue_instruction_access_error_interrupt (current_cpu);
184       address &= ~align_mask;
185     }
186   else if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
187     frv_queue_instruction_access_error_interrupt (current_cpu);
188 
189   return address;
190 }
191 
192 static PCADDR
193 fr500_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
194 			       int align_mask)
195 {
196   if (address & align_mask)
197     {
198       frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
199       address &= ~align_mask;
200     }
201 
202   if (((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff)
203       || ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff))
204     frv_queue_instruction_access_error_interrupt (current_cpu);
205   else if (((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff)
206 	   || ((USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff)
207 	   || ((USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff))
208     frv_queue_instruction_access_exception_interrupt (current_cpu);
209   else
210     {
211       USI hsr0 = GET_HSR0 ();
212       if (! GET_HSR0_RME (hsr0)
213 	  && ((USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff))
214 	frv_queue_instruction_access_exception_interrupt (current_cpu);
215     }
216 
217   return address;
218 }
219 
220 static PCADDR
221 fr550_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
222 			       int align_mask)
223 {
224   address &= ~align_mask;
225 
226   if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
227     frv_queue_instruction_access_error_interrupt (current_cpu);
228   else if ((USI)address >= 0xfe008000 && (USI)address <= 0xfe7fffff)
229     frv_queue_instruction_access_exception_interrupt (current_cpu);
230   else
231     {
232       USI hsr0 = GET_HSR0 ();
233       if (! GET_HSR0_RME (hsr0)
234 	  && (USI)address >= 0xfe000000 && (USI)address <= 0xfe007fff)
235 	frv_queue_instruction_access_exception_interrupt (current_cpu);
236     }
237 
238   return address;
239 }
240 
241 static PCADDR
242 check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask)
243 {
244   SIM_DESC sd = CPU_STATE (current_cpu);
245   switch (STATE_ARCHITECTURE (sd)->mach)
246     {
247     case bfd_mach_fr400:
248     case bfd_mach_fr450:
249       address = fr400_check_insn_read_address (current_cpu, address,
250 					       align_mask);
251       break;
252     case bfd_mach_frvtomcat:
253     case bfd_mach_fr500:
254     case bfd_mach_frv:
255       address = fr500_check_insn_read_address (current_cpu, address,
256 					       align_mask);
257       break;
258     case bfd_mach_fr550:
259       address = fr550_check_insn_read_address (current_cpu, address,
260 					       align_mask);
261       break;
262     default:
263       break;
264     }
265 
266   return address;
267 }
268 
269 /* Memory reads.  */
270 QI
271 frvbf_read_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address)
272 {
273   USI hsr0 = GET_HSR0 ();
274   FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
275 
276   /* Check for access exceptions.  */
277   address = check_data_read_address (current_cpu, address, 0);
278   address = check_readwrite_address (current_cpu, address, 0);
279 
280   /* If we need to count cycles, then the cache operation will be
281      initiated from the model profiling functions.
282      See frvbf_model_....  */
283   if (model_insn)
284     {
285       CPU_LOAD_ADDRESS (current_cpu) = address;
286       CPU_LOAD_LENGTH (current_cpu) = 1;
287       CPU_LOAD_SIGNED (current_cpu) = 1;
288       return 0xb7; /* any random value */
289     }
290 
291   if (GET_HSR0_DCE (hsr0))
292     {
293       int cycles;
294       cycles = frv_cache_read (cache, 0, address);
295       if (cycles != 0)
296 	return CACHE_RETURN_DATA (cache, 0, address, QI, 1);
297     }
298 
299   return GETMEMQI (current_cpu, pc, address);
300 }
301 
302 UQI
303 frvbf_read_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address)
304 {
305   USI hsr0 = GET_HSR0 ();
306   FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
307 
308   /* Check for access exceptions.  */
309   address = check_data_read_address (current_cpu, address, 0);
310   address = check_readwrite_address (current_cpu, address, 0);
311 
312   /* If we need to count cycles, then the cache operation will be
313      initiated from the model profiling functions.
314      See frvbf_model_....  */
315   if (model_insn)
316     {
317       CPU_LOAD_ADDRESS (current_cpu) = address;
318       CPU_LOAD_LENGTH (current_cpu) = 1;
319       CPU_LOAD_SIGNED (current_cpu) = 0;
320       return 0xb7; /* any random value */
321     }
322 
323   if (GET_HSR0_DCE (hsr0))
324     {
325       int cycles;
326       cycles = frv_cache_read (cache, 0, address);
327       if (cycles != 0)
328 	return CACHE_RETURN_DATA (cache, 0, address, UQI, 1);
329     }
330 
331   return GETMEMUQI (current_cpu, pc, address);
332 }
333 
334 /* Read a HI which spans two cache lines */
335 static HI
336 read_mem_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
337 {
338   HI value = frvbf_read_mem_QI (current_cpu, pc, address);
339   value <<= 8;
340   value |= frvbf_read_mem_UQI (current_cpu, pc, address + 1);
341   return T2H_2 (value);
342 }
343 
344 HI
345 frvbf_read_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
346 {
347   USI hsr0;
348   FRV_CACHE *cache;
349 
350   /* Check for access exceptions.  */
351   address = check_data_read_address (current_cpu, address, 1);
352   address = check_readwrite_address (current_cpu, address, 1);
353 
354   /* If we need to count cycles, then the cache operation will be
355      initiated from the model profiling functions.
356      See frvbf_model_....  */
357   hsr0 = GET_HSR0 ();
358   cache = CPU_DATA_CACHE (current_cpu);
359   if (model_insn)
360     {
361       CPU_LOAD_ADDRESS (current_cpu) = address;
362       CPU_LOAD_LENGTH (current_cpu) = 2;
363       CPU_LOAD_SIGNED (current_cpu) = 1;
364       return 0xb711; /* any random value */
365     }
366 
367   if (GET_HSR0_DCE (hsr0))
368     {
369       int cycles;
370       /* Handle access which crosses cache line boundary */
371       SIM_DESC sd = CPU_STATE (current_cpu);
372       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
373 	{
374 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
375 	    return read_mem_unaligned_HI (current_cpu, pc, address);
376 	}
377       cycles = frv_cache_read (cache, 0, address);
378       if (cycles != 0)
379 	return CACHE_RETURN_DATA (cache, 0, address, HI, 2);
380     }
381 
382   return GETMEMHI (current_cpu, pc, address);
383 }
384 
385 UHI
386 frvbf_read_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address)
387 {
388   USI hsr0;
389   FRV_CACHE *cache;
390 
391   /* Check for access exceptions.  */
392   address = check_data_read_address (current_cpu, address, 1);
393   address = check_readwrite_address (current_cpu, address, 1);
394 
395   /* If we need to count cycles, then the cache operation will be
396      initiated from the model profiling functions.
397      See frvbf_model_....  */
398   hsr0 = GET_HSR0 ();
399   cache = CPU_DATA_CACHE (current_cpu);
400   if (model_insn)
401     {
402       CPU_LOAD_ADDRESS (current_cpu) = address;
403       CPU_LOAD_LENGTH (current_cpu) = 2;
404       CPU_LOAD_SIGNED (current_cpu) = 0;
405       return 0xb711; /* any random value */
406     }
407 
408   if (GET_HSR0_DCE (hsr0))
409     {
410       int cycles;
411       /* Handle access which crosses cache line boundary */
412       SIM_DESC sd = CPU_STATE (current_cpu);
413       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
414 	{
415 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
416 	    return read_mem_unaligned_HI (current_cpu, pc, address);
417 	}
418       cycles = frv_cache_read (cache, 0, address);
419       if (cycles != 0)
420 	return CACHE_RETURN_DATA (cache, 0, address, UHI, 2);
421     }
422 
423   return GETMEMUHI (current_cpu, pc, address);
424 }
425 
426 /* Read a SI which spans two cache lines */
427 static SI
428 read_mem_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
429 {
430   FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
431   unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
432   char valarray[4];
433   SI SIvalue;
434   HI HIvalue;
435 
436   switch (hi_len)
437     {
438     case 1:
439       valarray[0] = frvbf_read_mem_QI (current_cpu, pc, address);
440       SIvalue = frvbf_read_mem_SI (current_cpu, pc, address + 1);
441       SIvalue = H2T_4 (SIvalue);
442       memcpy (valarray + 1, (char*)&SIvalue, 3);
443       break;
444     case 2:
445       HIvalue = frvbf_read_mem_HI (current_cpu, pc, address);
446       HIvalue = H2T_2 (HIvalue);
447       memcpy (valarray, (char*)&HIvalue, 2);
448       HIvalue = frvbf_read_mem_HI (current_cpu, pc, address + 2);
449       HIvalue = H2T_2 (HIvalue);
450       memcpy (valarray + 2, (char*)&HIvalue, 2);
451       break;
452     case 3:
453       SIvalue = frvbf_read_mem_SI (current_cpu, pc, address - 1);
454       SIvalue = H2T_4 (SIvalue);
455       memcpy (valarray, (char*)&SIvalue, 3);
456       valarray[3] = frvbf_read_mem_QI (current_cpu, pc, address + 3);
457       break;
458     default:
459       abort (); /* can't happen */
460     }
461   return T2H_4 (*(SI*)valarray);
462 }
463 
464 SI
465 frvbf_read_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
466 {
467   FRV_CACHE *cache;
468   USI hsr0;
469 
470   /* Check for access exceptions.  */
471   address = check_data_read_address (current_cpu, address, 3);
472   address = check_readwrite_address (current_cpu, address, 3);
473 
474   hsr0 = GET_HSR0 ();
475   cache = CPU_DATA_CACHE (current_cpu);
476   /* If we need to count cycles, then the cache operation will be
477      initiated from the model profiling functions.
478      See frvbf_model_....  */
479   if (model_insn)
480     {
481       CPU_LOAD_ADDRESS (current_cpu) = address;
482       CPU_LOAD_LENGTH (current_cpu) = 4;
483       return 0x37111319; /* any random value */
484     }
485 
486   if (GET_HSR0_DCE (hsr0))
487     {
488       int cycles;
489       /* Handle access which crosses cache line boundary */
490       SIM_DESC sd = CPU_STATE (current_cpu);
491       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
492 	{
493 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
494 	    return read_mem_unaligned_SI (current_cpu, pc, address);
495 	}
496       cycles = frv_cache_read (cache, 0, address);
497       if (cycles != 0)
498 	return CACHE_RETURN_DATA (cache, 0, address, SI, 4);
499     }
500 
501   return GETMEMSI (current_cpu, pc, address);
502 }
503 
504 SI
505 frvbf_read_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address)
506 {
507   return frvbf_read_mem_SI (current_cpu, pc, address);
508 }
509 
510 /* Read a SI which spans two cache lines */
511 static DI
512 read_mem_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
513 {
514   FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
515   unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
516   DI value, value1;
517 
518   switch (hi_len)
519     {
520     case 1:
521       value = frvbf_read_mem_QI (current_cpu, pc, address);
522       value <<= 56;
523       value1 = frvbf_read_mem_DI (current_cpu, pc, address + 1);
524       value1 = H2T_8 (value1);
525       value |= value1 & ((DI)0x00ffffff << 32);
526       value |= value1 & 0xffffffffu;
527       break;
528     case 2:
529       value = frvbf_read_mem_HI (current_cpu, pc, address);
530       value = H2T_2 (value);
531       value <<= 48;
532       value1 = frvbf_read_mem_DI (current_cpu, pc, address + 2);
533       value1 = H2T_8 (value1);
534       value |= value1 & ((DI)0x0000ffff << 32);
535       value |= value1 & 0xffffffffu;
536       break;
537     case 3:
538       value = frvbf_read_mem_SI (current_cpu, pc, address - 1);
539       value = H2T_4 (value);
540       value <<= 40;
541       value1 = frvbf_read_mem_DI (current_cpu, pc, address + 3);
542       value1 = H2T_8 (value1);
543       value |= value1 & ((DI)0x000000ff << 32);
544       value |= value1 & 0xffffffffu;
545       break;
546     case 4:
547       value = frvbf_read_mem_SI (current_cpu, pc, address);
548       value = H2T_4 (value);
549       value <<= 32;
550       value1 = frvbf_read_mem_SI (current_cpu, pc, address + 4);
551       value1 = H2T_4 (value1);
552       value |= value1 & 0xffffffffu;
553       break;
554     case 5:
555       value = frvbf_read_mem_DI (current_cpu, pc, address - 3);
556       value = H2T_8 (value);
557       value <<= 24;
558       value1 = frvbf_read_mem_SI (current_cpu, pc, address + 5);
559       value1 = H2T_4 (value1);
560       value |= value1 & 0x00ffffff;
561       break;
562     case 6:
563       value = frvbf_read_mem_DI (current_cpu, pc, address - 2);
564       value = H2T_8 (value);
565       value <<= 16;
566       value1 = frvbf_read_mem_HI (current_cpu, pc, address + 6);
567       value1 = H2T_2 (value1);
568       value |= value1 & 0x0000ffff;
569       break;
570     case 7:
571       value = frvbf_read_mem_DI (current_cpu, pc, address - 1);
572       value = H2T_8 (value);
573       value <<= 8;
574       value1 = frvbf_read_mem_QI (current_cpu, pc, address + 7);
575       value |= value1 & 0x000000ff;
576       break;
577     default:
578       abort (); /* can't happen */
579     }
580   return T2H_8 (value);
581 }
582 
583 DI
584 frvbf_read_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
585 {
586   USI hsr0;
587   FRV_CACHE *cache;
588 
589   /* Check for access exceptions.  */
590   address = check_data_read_address (current_cpu, address, 7);
591   address = check_readwrite_address (current_cpu, address, 7);
592 
593   /* If we need to count cycles, then the cache operation will be
594      initiated from the model profiling functions.
595      See frvbf_model_....  */
596   hsr0 = GET_HSR0 ();
597   cache = CPU_DATA_CACHE (current_cpu);
598   if (model_insn)
599     {
600       CPU_LOAD_ADDRESS (current_cpu) = address;
601       CPU_LOAD_LENGTH (current_cpu) = 8;
602       return 0x37111319; /* any random value */
603     }
604 
605   if (GET_HSR0_DCE (hsr0))
606     {
607       int cycles;
608       /* Handle access which crosses cache line boundary */
609       SIM_DESC sd = CPU_STATE (current_cpu);
610       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
611 	{
612 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
613 	    return read_mem_unaligned_DI (current_cpu, pc, address);
614 	}
615       cycles = frv_cache_read (cache, 0, address);
616       if (cycles != 0)
617 	return CACHE_RETURN_DATA (cache, 0, address, DI, 8);
618     }
619 
620   return GETMEMDI (current_cpu, pc, address);
621 }
622 
623 DF
624 frvbf_read_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address)
625 {
626   USI hsr0;
627   FRV_CACHE *cache;
628 
629   /* Check for access exceptions.  */
630   address = check_data_read_address (current_cpu, address, 7);
631   address = check_readwrite_address (current_cpu, address, 7);
632 
633   /* If we need to count cycles, then the cache operation will be
634      initiated from the model profiling functions.
635      See frvbf_model_....  */
636   hsr0 = GET_HSR0 ();
637   cache = CPU_DATA_CACHE (current_cpu);
638   if (model_insn)
639     {
640       CPU_LOAD_ADDRESS (current_cpu) = address;
641       CPU_LOAD_LENGTH (current_cpu) = 8;
642       return 0x37111319; /* any random value */
643     }
644 
645   if (GET_HSR0_DCE (hsr0))
646     {
647       int cycles;
648       /* Handle access which crosses cache line boundary */
649       SIM_DESC sd = CPU_STATE (current_cpu);
650       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
651 	{
652 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
653 	    return read_mem_unaligned_DI (current_cpu, pc, address);
654 	}
655       cycles = frv_cache_read (cache, 0, address);
656       if (cycles != 0)
657 	return CACHE_RETURN_DATA (cache, 0, address, DF, 8);
658     }
659 
660   return GETMEMDF (current_cpu, pc, address);
661 }
662 
663 USI
664 frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc)
665 {
666   USI hsr0;
667   vpc = check_insn_read_address (current_cpu, vpc, 3);
668 
669   hsr0 = GET_HSR0 ();
670   if (GET_HSR0_ICE (hsr0))
671     {
672       FRV_CACHE *cache;
673       SI value;
674 
675       /* We don't want this to show up in the cache statistics.  That read
676 	 is done in frvbf_simulate_insn_prefetch.  So read the cache or memory
677 	 passively here.  */
678       cache = CPU_INSN_CACHE (current_cpu);
679       if (frv_cache_read_passive_SI (cache, vpc, &value))
680 	return value;
681     }
682   return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc);
683 }
684 
685 static SI
686 fr400_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
687 {
688   if (align_mask == 7
689       && address >= 0xfe800000 && address <= 0xfeffffff)
690     frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
691 
692   return address;
693 }
694 
695 static SI
696 fr500_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
697 {
698   if (address & align_mask)
699     {
700       struct frv_interrupt_queue_element *item =
701 	frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
702       /* Record the correct vliw slot with the interrupt.  */
703       if (item != NULL)
704 	item->slot = frv_interrupt_state.slot;
705       address &= ~align_mask;
706     }
707   if ((address >= 0xfeff0600 && address <= 0xfeff7fff)
708       || (address >= 0xfe800000 && address <= 0xfefeffff))
709     frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
710 
711   return address;
712 }
713 
714 static SI
715 fr550_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
716 {
717   if (((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
718       || (align_mask > 0x3
719 	  && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
720     frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
721 
722   return address;
723 }
724 
725 static SI
726 check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
727 {
728   SIM_DESC sd = CPU_STATE (current_cpu);
729   switch (STATE_ARCHITECTURE (sd)->mach)
730     {
731     case bfd_mach_fr400:
732     case bfd_mach_fr450:
733       address = fr400_check_write_address (current_cpu, address, align_mask);
734       break;
735     case bfd_mach_frvtomcat:
736     case bfd_mach_fr500:
737     case bfd_mach_frv:
738       address = fr500_check_write_address (current_cpu, address, align_mask);
739       break;
740     case bfd_mach_fr550:
741       address = fr550_check_write_address (current_cpu, address, align_mask);
742       break;
743     default:
744       break;
745     }
746   return address;
747 }
748 
749 void
750 frvbf_write_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
751 {
752   USI hsr0;
753   hsr0 = GET_HSR0 ();
754   if (GET_HSR0_DCE (hsr0))
755     sim_queue_fn_mem_qi_write (current_cpu, frvbf_mem_set_QI, address, value);
756   else
757     sim_queue_mem_qi_write (current_cpu, address, value);
758   frv_set_write_queue_slot (current_cpu);
759 }
760 
761 void
762 frvbf_write_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address, UQI value)
763 {
764   frvbf_write_mem_QI (current_cpu, pc, address, value);
765 }
766 
767 void
768 frvbf_write_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
769 {
770   USI hsr0;
771   hsr0 = GET_HSR0 ();
772   if (GET_HSR0_DCE (hsr0))
773     sim_queue_fn_mem_hi_write (current_cpu, frvbf_mem_set_HI, address, value);
774   else
775     sim_queue_mem_hi_write (current_cpu, address, value);
776   frv_set_write_queue_slot (current_cpu);
777 }
778 
779 void
780 frvbf_write_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address, UHI value)
781 {
782   frvbf_write_mem_HI (current_cpu, pc, address, value);
783 }
784 
785 void
786 frvbf_write_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
787 {
788   USI hsr0;
789   hsr0 = GET_HSR0 ();
790   if (GET_HSR0_DCE (hsr0))
791     sim_queue_fn_mem_si_write (current_cpu, frvbf_mem_set_SI, address, value);
792   else
793     sim_queue_mem_si_write (current_cpu, address, value);
794   frv_set_write_queue_slot (current_cpu);
795 }
796 
797 void
798 frvbf_write_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
799 {
800   frvbf_write_mem_SI (current_cpu, pc, address, value);
801 }
802 
803 void
804 frvbf_write_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
805 {
806   USI hsr0;
807   hsr0 = GET_HSR0 ();
808   if (GET_HSR0_DCE (hsr0))
809     sim_queue_fn_mem_di_write (current_cpu, frvbf_mem_set_DI, address, value);
810   else
811     sim_queue_mem_di_write (current_cpu, address, value);
812   frv_set_write_queue_slot (current_cpu);
813 }
814 
815 void
816 frvbf_write_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
817 {
818   USI hsr0;
819   hsr0 = GET_HSR0 ();
820   if (GET_HSR0_DCE (hsr0))
821     sim_queue_fn_mem_df_write (current_cpu, frvbf_mem_set_DF, address, value);
822   else
823     sim_queue_mem_df_write (current_cpu, address, value);
824   frv_set_write_queue_slot (current_cpu);
825 }
826 
827 /* Memory writes.  These do the actual writing through the cache.  */
828 void
829 frvbf_mem_set_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
830 {
831   FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
832 
833   /* Check for access errors.  */
834   address = check_write_address (current_cpu, address, 0);
835   address = check_readwrite_address (current_cpu, address, 0);
836 
837   /* If we need to count cycles, then submit the write request to the cache
838      and let it prioritize the request.  Otherwise perform the write now.  */
839   if (model_insn)
840     {
841       int slot = UNIT_I0;
842       frv_cache_request_store (cache, address, slot, (char *)&value,
843 			       sizeof (value));
844     }
845   else
846     frv_cache_write (cache, address, (char *)&value, sizeof (value));
847 }
848 
849 /* Write a HI which spans two cache lines */
850 static void
851 mem_set_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
852 {
853   FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
854   /* value is already in target byte order */
855   frv_cache_write (cache, address, (char *)&value, 1);
856   frv_cache_write (cache, address + 1, ((char *)&value + 1), 1);
857 }
858 
859 void
860 frvbf_mem_set_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
861 {
862   FRV_CACHE *cache;
863 
864   /* Check for access errors.  */
865   address = check_write_address (current_cpu, address, 1);
866   address = check_readwrite_address (current_cpu, address, 1);
867 
868   /* If we need to count cycles, then submit the write request to the cache
869      and let it prioritize the request.  Otherwise perform the write now.  */
870   value = H2T_2 (value);
871   cache = CPU_DATA_CACHE (current_cpu);
872   if (model_insn)
873     {
874       int slot = UNIT_I0;
875       frv_cache_request_store (cache, address, slot,
876 			       (char *)&value, sizeof (value));
877     }
878   else
879     {
880       /* Handle access which crosses cache line boundary */
881       SIM_DESC sd = CPU_STATE (current_cpu);
882       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
883 	{
884 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
885 	    {
886 	      mem_set_unaligned_HI (current_cpu, pc, address, value);
887 	      return;
888 	    }
889 	}
890       frv_cache_write (cache, address, (char *)&value, sizeof (value));
891     }
892 }
893 
894 /* Write a SI which spans two cache lines */
895 static void
896 mem_set_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
897 {
898   FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
899   unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
900   /* value is already in target byte order */
901   frv_cache_write (cache, address, (char *)&value, hi_len);
902   frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 4 - hi_len);
903 }
904 
905 void
906 frvbf_mem_set_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
907 {
908   FRV_CACHE *cache;
909 
910   /* Check for access errors.  */
911   address = check_write_address (current_cpu, address, 3);
912   address = check_readwrite_address (current_cpu, address, 3);
913 
914   /* If we need to count cycles, then submit the write request to the cache
915      and let it prioritize the request.  Otherwise perform the write now.  */
916   cache = CPU_DATA_CACHE (current_cpu);
917   value = H2T_4 (value);
918   if (model_insn)
919     {
920       int slot = UNIT_I0;
921       frv_cache_request_store (cache, address, slot,
922 			       (char *)&value, sizeof (value));
923     }
924   else
925     {
926       /* Handle access which crosses cache line boundary */
927       SIM_DESC sd = CPU_STATE (current_cpu);
928       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
929 	{
930 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
931 	    {
932 	      mem_set_unaligned_SI (current_cpu, pc, address, value);
933 	      return;
934 	    }
935 	}
936       frv_cache_write (cache, address, (char *)&value, sizeof (value));
937     }
938 }
939 
940 /* Write a DI which spans two cache lines */
941 static void
942 mem_set_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
943 {
944   FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
945   unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
946   /* value is already in target byte order */
947   frv_cache_write (cache, address, (char *)&value, hi_len);
948   frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 8 - hi_len);
949 }
950 
951 void
952 frvbf_mem_set_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
953 {
954   FRV_CACHE *cache;
955 
956   /* Check for access errors.  */
957   address = check_write_address (current_cpu, address, 7);
958   address = check_readwrite_address (current_cpu, address, 7);
959 
960   /* If we need to count cycles, then submit the write request to the cache
961      and let it prioritize the request.  Otherwise perform the write now.  */
962   value = H2T_8 (value);
963   cache = CPU_DATA_CACHE (current_cpu);
964   if (model_insn)
965     {
966       int slot = UNIT_I0;
967       frv_cache_request_store (cache, address, slot,
968 			       (char *)&value, sizeof (value));
969     }
970   else
971     {
972       /* Handle access which crosses cache line boundary */
973       SIM_DESC sd = CPU_STATE (current_cpu);
974       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
975 	{
976 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
977 	    {
978 	      mem_set_unaligned_DI (current_cpu, pc, address, value);
979 	      return;
980 	    }
981 	}
982       frv_cache_write (cache, address, (char *)&value, sizeof (value));
983     }
984 }
985 
986 void
987 frvbf_mem_set_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
988 {
989   FRV_CACHE *cache;
990 
991   /* Check for access errors.  */
992   address = check_write_address (current_cpu, address, 7);
993   address = check_readwrite_address (current_cpu, address, 7);
994 
995   /* If we need to count cycles, then submit the write request to the cache
996      and let it prioritize the request.  Otherwise perform the write now.  */
997   value = H2T_8 (value);
998   cache = CPU_DATA_CACHE (current_cpu);
999   if (model_insn)
1000     {
1001       int slot = UNIT_I0;
1002       frv_cache_request_store (cache, address, slot,
1003 			       (char *)&value, sizeof (value));
1004     }
1005   else
1006     {
1007       /* Handle access which crosses cache line boundary */
1008       SIM_DESC sd = CPU_STATE (current_cpu);
1009       if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
1010 	{
1011 	  if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
1012 	    {
1013 	      mem_set_unaligned_DI (current_cpu, pc, address, value);
1014 	      return;
1015 	    }
1016 	}
1017       frv_cache_write (cache, address, (char *)&value, sizeof (value));
1018     }
1019 }
1020 
1021 void
1022 frvbf_mem_set_XI (SIM_CPU *current_cpu, IADDR pc, SI address, SI *value)
1023 {
1024   int i;
1025   FRV_CACHE *cache;
1026 
1027   /* Check for access errors.  */
1028   address = check_write_address (current_cpu, address, 0xf);
1029   address = check_readwrite_address (current_cpu, address, 0xf);
1030 
1031   /* TODO -- reverse word order as well?  */
1032   for (i = 0; i < 4; ++i)
1033     value[i] = H2T_4 (value[i]);
1034 
1035   /* If we need to count cycles, then submit the write request to the cache
1036      and let it prioritize the request.  Otherwise perform the write now.  */
1037   cache = CPU_DATA_CACHE (current_cpu);
1038   if (model_insn)
1039     {
1040       int slot = UNIT_I0;
1041       frv_cache_request_store (cache, address, slot, (char*)value, 16);
1042     }
1043   else
1044     frv_cache_write (cache, address, (char*)value, 16);
1045 }
1046 
1047 /* Record the current VLIW slot on the element at the top of the write queue.
1048 */
1049 void
1050 frv_set_write_queue_slot (SIM_CPU *current_cpu)
1051 {
1052   FRV_VLIW *vliw = CPU_VLIW (current_cpu);
1053   int slot = vliw->next_slot - 1;
1054   CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1055   int ix = CGEN_WRITE_QUEUE_INDEX (q) - 1;
1056   CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix);
1057   CGEN_WRITE_QUEUE_ELEMENT_PIPE (item) = (*vliw->current_vliw)[slot];
1058 }
1059