xref: /openbsd-src/gnu/gcc/gcc/config/m32c/m32c.c (revision 404b540a9034ac75a6199ad1a32d1bbc7a0d4210)
1 /* Target Code for R8C/M16C/M32C
2    Copyright (C) 2005
3    Free Software Foundation, Inc.
4    Contributed by Red Hat.
5 
6    This file is part of GCC.
7 
8    GCC is free software; you can redistribute it and/or modify it
9    under the terms of the GNU General Public License as published
10    by the Free Software Foundation; either version 2, or (at your
11    option) any later version.
12 
13    GCC is distributed in the hope that it will be useful, but WITHOUT
14    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
16    License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with GCC; see the file COPYING.  If not, write to the Free
20    Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21    02110-1301, USA.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-flags.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "reload.h"
39 #include "toplev.h"
40 #include "obstack.h"
41 #include "tree.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "except.h"
45 #include "function.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "tm_p.h"
50 #include "langhooks.h"
51 #include "tree-gimple.h"
52 
53 /* Prototypes */
54 
55 /* Used by m32c_pushm_popm.  */
56 typedef enum
57 {
58   PP_pushm,
59   PP_popm,
60   PP_justcount
61 } Push_Pop_Type;
62 
63 static tree interrupt_handler (tree *, tree, tree, int, bool *);
64 static int interrupt_p (tree node);
65 static bool m32c_asm_integer (rtx, unsigned int, int);
66 static int m32c_comp_type_attributes (tree, tree);
67 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
68 static struct machine_function *m32c_init_machine_status (void);
69 static void m32c_insert_attributes (tree, tree *);
70 static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
71 				    tree, bool);
72 static bool m32c_promote_prototypes (tree);
73 static int m32c_pushm_popm (Push_Pop_Type);
74 static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
75 static rtx m32c_struct_value_rtx (tree, int);
76 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
77 static int need_to_save (int);
78 
79 #define streq(a,b) (strcmp ((a), (b)) == 0)
80 
81 /* Internal support routines */
82 
83 /* Debugging statements are tagged with DEBUG0 only so that they can
84    be easily enabled individually, by replacing the '0' with '1' as
85    needed.  */
86 #define DEBUG0 0
87 #define DEBUG1 1
88 
89 #if DEBUG0
90 /* This is needed by some of the commented-out debug statements
91    below.  */
92 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
93 #endif
94 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
95 
96 /* These are all to support encode_pattern().  */
97 static char pattern[30], *patternp;
98 static GTY(()) rtx patternr[30];
99 #define RTX_IS(x) (streq (pattern, x))
100 
101 /* Some macros to simplify the logic throughout this file.  */
102 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
103 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
104 
105 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
106 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
107 
108 /* We do most RTX matching by converting the RTX into a string, and
109    using string compares.  This vastly simplifies the logic in many of
110    the functions in this file.
111 
112    On exit, pattern[] has the encoded string (use RTX_IS("...") to
113    compare it) and patternr[] has pointers to the nodes in the RTX
114    corresponding to each character in the encoded string.  The latter
115    is mostly used by print_operand().
116 
117    Unrecognized patterns have '?' in them; this shows up when the
118    assembler complains about syntax errors.
119 */
120 
121 static void
encode_pattern_1(rtx x)122 encode_pattern_1 (rtx x)
123 {
124   int i;
125 
126   if (patternp == pattern + sizeof (pattern) - 2)
127     {
128       patternp[-1] = '?';
129       return;
130     }
131 
132   patternr[patternp - pattern] = x;
133 
134   switch (GET_CODE (x))
135     {
136     case REG:
137       *patternp++ = 'r';
138       break;
139     case SUBREG:
140       if (GET_MODE_SIZE (GET_MODE (x)) !=
141 	  GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
142 	*patternp++ = 'S';
143       encode_pattern_1 (XEXP (x, 0));
144       break;
145     case MEM:
146       *patternp++ = 'm';
147     case CONST:
148       encode_pattern_1 (XEXP (x, 0));
149       break;
150     case PLUS:
151       *patternp++ = '+';
152       encode_pattern_1 (XEXP (x, 0));
153       encode_pattern_1 (XEXP (x, 1));
154       break;
155     case PRE_DEC:
156       *patternp++ = '>';
157       encode_pattern_1 (XEXP (x, 0));
158       break;
159     case POST_INC:
160       *patternp++ = '<';
161       encode_pattern_1 (XEXP (x, 0));
162       break;
163     case LO_SUM:
164       *patternp++ = 'L';
165       encode_pattern_1 (XEXP (x, 0));
166       encode_pattern_1 (XEXP (x, 1));
167       break;
168     case HIGH:
169       *patternp++ = 'H';
170       encode_pattern_1 (XEXP (x, 0));
171       break;
172     case SYMBOL_REF:
173       *patternp++ = 's';
174       break;
175     case LABEL_REF:
176       *patternp++ = 'l';
177       break;
178     case CODE_LABEL:
179       *patternp++ = 'c';
180       break;
181     case CONST_INT:
182     case CONST_DOUBLE:
183       *patternp++ = 'i';
184       break;
185     case UNSPEC:
186       *patternp++ = 'u';
187       *patternp++ = '0' + XCINT (x, 1, UNSPEC);
188       for (i = 0; i < XVECLEN (x, 0); i++)
189 	encode_pattern_1 (XVECEXP (x, 0, i));
190       break;
191     case USE:
192       *patternp++ = 'U';
193       break;
194     case PARALLEL:
195       *patternp++ = '|';
196       for (i = 0; i < XVECLEN (x, 0); i++)
197 	encode_pattern_1 (XVECEXP (x, 0, i));
198       break;
199     case EXPR_LIST:
200       *patternp++ = 'E';
201       encode_pattern_1 (XEXP (x, 0));
202       if (XEXP (x, 1))
203 	encode_pattern_1 (XEXP (x, 1));
204       break;
205     default:
206       *patternp++ = '?';
207 #if DEBUG0
208       fprintf (stderr, "can't encode pattern %s\n",
209 	       GET_RTX_NAME (GET_CODE (x)));
210       debug_rtx (x);
211       gcc_unreachable ();
212 #endif
213       break;
214     }
215 }
216 
217 static void
encode_pattern(rtx x)218 encode_pattern (rtx x)
219 {
220   patternp = pattern;
221   encode_pattern_1 (x);
222   *patternp = 0;
223 }
224 
225 /* Since register names indicate the mode they're used in, we need a
226    way to determine which name to refer to the register with.  Called
227    by print_operand().  */
228 
229 static const char *
reg_name_with_mode(int regno,enum machine_mode mode)230 reg_name_with_mode (int regno, enum machine_mode mode)
231 {
232   int mlen = GET_MODE_SIZE (mode);
233   if (regno == R0_REGNO && mlen == 1)
234     return "r0l";
235   if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
236     return "r2r0";
237   if (regno == R0_REGNO && mlen == 6)
238     return "r2r1r0";
239   if (regno == R0_REGNO && mlen == 8)
240     return "r3r1r2r0";
241   if (regno == R1_REGNO && mlen == 1)
242     return "r1l";
243   if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
244     return "r3r1";
245   if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
246     return "a1a0";
247   return reg_names[regno];
248 }
249 
250 /* How many bytes a register uses on stack when it's pushed.  We need
251    to know this because the push opcode needs to explicitly indicate
252    the size of the register, even though the name of the register
253    already tells it that.  Used by m32c_output_reg_{push,pop}, which
254    is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
255 
256 static int
reg_push_size(int regno)257 reg_push_size (int regno)
258 {
259   switch (regno)
260     {
261     case R0_REGNO:
262     case R1_REGNO:
263       return 2;
264     case R2_REGNO:
265     case R3_REGNO:
266     case FLG_REGNO:
267       return 2;
268     case A0_REGNO:
269     case A1_REGNO:
270     case SB_REGNO:
271     case FB_REGNO:
272     case SP_REGNO:
273       if (TARGET_A16)
274 	return 2;
275       else
276 	return 3;
277     default:
278       gcc_unreachable ();
279     }
280 }
281 
282 static int *class_sizes = 0;
283 
284 /* Given two register classes, find the largest intersection between
285    them.  If there is no intersection, return RETURNED_IF_EMPTY
286    instead.  */
287 static int
reduce_class(int original_class,int limiting_class,int returned_if_empty)288 reduce_class (int original_class, int limiting_class, int returned_if_empty)
289 {
290   int cc = class_contents[original_class][0];
291   int i, best = NO_REGS;
292   int best_size = 0;
293 
294   if (original_class == limiting_class)
295     return original_class;
296 
297   if (!class_sizes)
298     {
299       int r;
300       class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
301       for (i = 0; i < LIM_REG_CLASSES; i++)
302 	{
303 	  class_sizes[i] = 0;
304 	  for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
305 	    if (class_contents[i][0] & (1 << r))
306 	      class_sizes[i]++;
307 	}
308     }
309 
310   cc &= class_contents[limiting_class][0];
311   for (i = 0; i < LIM_REG_CLASSES; i++)
312     {
313       int ic = class_contents[i][0];
314 
315       if ((~cc & ic) == 0)
316 	if (best_size < class_sizes[i])
317 	  {
318 	    best = i;
319 	    best_size = class_sizes[i];
320 	  }
321 
322     }
323   if (best == NO_REGS)
324     return returned_if_empty;
325   return best;
326 }
327 
328 /* Returns TRUE If there are any registers that exist in both register
329    classes.  */
330 static int
classes_intersect(int class1,int class2)331 classes_intersect (int class1, int class2)
332 {
333   return class_contents[class1][0] & class_contents[class2][0];
334 }
335 
336 /* Used by m32c_register_move_cost to determine if a move is
337    impossibly expensive.  */
338 static int
class_can_hold_mode(int class,enum machine_mode mode)339 class_can_hold_mode (int class, enum machine_mode mode)
340 {
341   /* Cache the results:  0=untested  1=no  2=yes */
342   static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
343   if (results[class][mode] == 0)
344     {
345       int r, n, i;
346       results[class][mode] = 1;
347       for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
348 	if (class_contents[class][0] & (1 << r)
349 	    && HARD_REGNO_MODE_OK (r, mode))
350 	  {
351 	    int ok = 1;
352 	    n = HARD_REGNO_NREGS (r, mode);
353 	    for (i = 1; i < n; i++)
354 	      if (!(class_contents[class][0] & (1 << (r + i))))
355 		ok = 0;
356 	    if (ok)
357 	      {
358 		results[class][mode] = 2;
359 		break;
360 	      }
361 	  }
362     }
363 #if DEBUG0
364   fprintf (stderr, "class %s can hold %s? %s\n",
365 	   class_names[class], mode_name[mode],
366 	   (results[class][mode] == 2) ? "yes" : "no");
367 #endif
368   return results[class][mode] == 2;
369 }
370 
371 /* Run-time Target Specification.  */
372 
373 /* Memregs are memory locations that gcc treats like general
374    registers, as there are a limited number of true registers and the
375    m32c families can use memory in most places that registers can be
376    used.
377 
378    However, since memory accesses are more expensive than registers,
379    we allow the user to limit the number of memregs available, in
380    order to try to persuade gcc to try harder to use real registers.
381 
382    Memregs are provided by m32c-lib1.S.
383 */
384 
385 int target_memregs = 16;
386 static bool target_memregs_set = FALSE;
387 int ok_to_change_target_memregs = TRUE;
388 
389 #undef  TARGET_HANDLE_OPTION
390 #define TARGET_HANDLE_OPTION m32c_handle_option
391 static bool
m32c_handle_option(size_t code,const char * arg ATTRIBUTE_UNUSED,int value ATTRIBUTE_UNUSED)392 m32c_handle_option (size_t code,
393 		    const char *arg ATTRIBUTE_UNUSED,
394 		    int value ATTRIBUTE_UNUSED)
395 {
396   if (code == OPT_memregs_)
397     {
398       target_memregs_set = TRUE;
399       target_memregs = atoi (arg);
400     }
401   return TRUE;
402 }
403 
404 /* Implements OVERRIDE_OPTIONS.  We limit memregs to 0..16, and
405    provide a default.  */
406 void
m32c_override_options(void)407 m32c_override_options (void)
408 {
409   if (target_memregs_set)
410     {
411       if (target_memregs < 0 || target_memregs > 16)
412 	error ("invalid target memregs value '%d'", target_memregs);
413     }
414   else
415     target_memregs = 16;
416 }
417 
418 /* Defining data structures for per-function information */
419 
420 /* The usual; we set up our machine_function data.  */
421 static struct machine_function *
m32c_init_machine_status(void)422 m32c_init_machine_status (void)
423 {
424   struct machine_function *machine;
425   machine =
426     (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
427 
428   return machine;
429 }
430 
431 /* Implements INIT_EXPANDERS.  We just set up to call the above
432    function.  */
433 void
m32c_init_expanders(void)434 m32c_init_expanders (void)
435 {
436   init_machine_status = m32c_init_machine_status;
437 }
438 
439 /* Storage Layout */
440 
441 #undef TARGET_PROMOTE_FUNCTION_RETURN
442 #define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
443 bool
m32c_promote_function_return(tree fntype ATTRIBUTE_UNUSED)444 m32c_promote_function_return (tree fntype ATTRIBUTE_UNUSED)
445 {
446   return false;
447 }
448 
449 /* Register Basics */
450 
451 /* Basic Characteristics of Registers */
452 
453 /* Whether a mode fits in a register is complex enough to warrant a
454    table.  */
455 static struct
456 {
457   char qi_regs;
458   char hi_regs;
459   char pi_regs;
460   char si_regs;
461   char di_regs;
462 } nregs_table[FIRST_PSEUDO_REGISTER] =
463 {
464   { 1, 1, 2, 2, 4 },		/* r0 */
465   { 0, 1, 0, 0, 0 },		/* r2 */
466   { 1, 1, 2, 2, 0 },		/* r1 */
467   { 0, 1, 0, 0, 0 },		/* r3 */
468   { 0, 1, 1, 0, 0 },		/* a0 */
469   { 0, 1, 1, 0, 0 },		/* a1 */
470   { 0, 1, 1, 0, 0 },		/* sb */
471   { 0, 1, 1, 0, 0 },		/* fb */
472   { 0, 1, 1, 0, 0 },		/* sp */
473   { 1, 1, 1, 0, 0 },		/* pc */
474   { 0, 0, 0, 0, 0 },		/* fl */
475   { 1, 1, 1, 0, 0 },		/* ap */
476   { 1, 1, 2, 2, 4 },		/* mem0 */
477   { 1, 1, 2, 2, 4 },		/* mem1 */
478   { 1, 1, 2, 2, 4 },		/* mem2 */
479   { 1, 1, 2, 2, 4 },		/* mem3 */
480   { 1, 1, 2, 2, 4 },		/* mem4 */
481   { 1, 1, 2, 2, 0 },		/* mem5 */
482   { 1, 1, 2, 2, 0 },		/* mem6 */
483   { 1, 1, 0, 0, 0 },		/* mem7 */
484 };
485 
486 /* Implements CONDITIONAL_REGISTER_USAGE.  We adjust the number of
487    available memregs, and select which registers need to be preserved
488    across calls based on the chip family.  */
489 
490 void
m32c_conditional_register_usage(void)491 m32c_conditional_register_usage (void)
492 {
493   int i;
494 
495   if (0 <= target_memregs && target_memregs <= 16)
496     {
497       /* The command line option is bytes, but our "registers" are
498 	 16-bit words.  */
499       for (i = target_memregs/2; i < 8; i++)
500 	{
501 	  fixed_regs[MEM0_REGNO + i] = 1;
502 	  CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
503 	}
504     }
505 
506   /* M32CM and M32C preserve more registers across function calls.  */
507   if (TARGET_A24)
508     {
509       call_used_regs[R1_REGNO] = 0;
510       call_used_regs[R2_REGNO] = 0;
511       call_used_regs[R3_REGNO] = 0;
512       call_used_regs[A0_REGNO] = 0;
513       call_used_regs[A1_REGNO] = 0;
514     }
515 }
516 
517 /* How Values Fit in Registers */
518 
519 /* Implements HARD_REGNO_NREGS.  This is complicated by the fact that
520    different registers are different sizes from each other, *and* may
521    be different sizes in different chip families.  */
522 int
m32c_hard_regno_nregs(int regno,enum machine_mode mode)523 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
524 {
525   if (regno == FLG_REGNO && mode == CCmode)
526     return 1;
527   if (regno >= FIRST_PSEUDO_REGISTER)
528     return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
529 
530   if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
531     return (GET_MODE_SIZE (mode) + 1) / 2;
532 
533   if (GET_MODE_SIZE (mode) <= 1)
534     return nregs_table[regno].qi_regs;
535   if (GET_MODE_SIZE (mode) <= 2)
536     return nregs_table[regno].hi_regs;
537   if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
538     return 2;
539   if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
540     return nregs_table[regno].pi_regs;
541   if (GET_MODE_SIZE (mode) <= 4)
542     return nregs_table[regno].si_regs;
543   if (GET_MODE_SIZE (mode) <= 8)
544     return nregs_table[regno].di_regs;
545   return 0;
546 }
547 
548 /* Implements HARD_REGNO_MODE_OK.  The above function does the work
549    already; just test its return value.  */
550 int
m32c_hard_regno_ok(int regno,enum machine_mode mode)551 m32c_hard_regno_ok (int regno, enum machine_mode mode)
552 {
553   return m32c_hard_regno_nregs (regno, mode) != 0;
554 }
555 
556 /* Implements MODES_TIEABLE_P.  In general, modes aren't tieable since
557    registers are all different sizes.  However, since most modes are
558    bigger than our registers anyway, it's easier to implement this
559    function that way, leaving QImode as the only unique case.  */
560 int
m32c_modes_tieable_p(enum machine_mode m1,enum machine_mode m2)561 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
562 {
563   if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
564     return 1;
565 
566 #if 0
567   if (m1 == QImode || m2 == QImode)
568     return 0;
569 #endif
570 
571   return 1;
572 }
573 
574 /* Register Classes */
575 
576 /* Implements REGNO_REG_CLASS.  */
577 enum machine_mode
m32c_regno_reg_class(int regno)578 m32c_regno_reg_class (int regno)
579 {
580   switch (regno)
581     {
582     case R0_REGNO:
583       return R0_REGS;
584     case R1_REGNO:
585       return R1_REGS;
586     case R2_REGNO:
587       return R2_REGS;
588     case R3_REGNO:
589       return R3_REGS;
590     case A0_REGNO:
591     case A1_REGNO:
592       return A_REGS;
593     case SB_REGNO:
594       return SB_REGS;
595     case FB_REGNO:
596       return FB_REGS;
597     case SP_REGNO:
598       return SP_REGS;
599     case FLG_REGNO:
600       return FLG_REGS;
601     default:
602       if (IS_MEM_REGNO (regno))
603 	return MEM_REGS;
604       return ALL_REGS;
605     }
606 }
607 
608 /* Implements REG_CLASS_FROM_CONSTRAINT.  Note that some constraints only match
609    for certain chip families.  */
610 int
m32c_reg_class_from_constraint(char c ATTRIBUTE_UNUSED,const char * s)611 m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
612 {
613   if (memcmp (s, "Rsp", 3) == 0)
614     return SP_REGS;
615   if (memcmp (s, "Rfb", 3) == 0)
616     return FB_REGS;
617   if (memcmp (s, "Rsb", 3) == 0)
618     return SB_REGS;
619   if (memcmp (s, "Rcr", 3) == 0)
620     return TARGET_A16 ? CR_REGS : NO_REGS;
621   if (memcmp (s, "Rcl", 3) == 0)
622     return TARGET_A24 ? CR_REGS : NO_REGS;
623   if (memcmp (s, "R0w", 3) == 0)
624     return R0_REGS;
625   if (memcmp (s, "R1w", 3) == 0)
626     return R1_REGS;
627   if (memcmp (s, "R2w", 3) == 0)
628     return R2_REGS;
629   if (memcmp (s, "R3w", 3) == 0)
630     return R3_REGS;
631   if (memcmp (s, "R02", 3) == 0)
632     return R02_REGS;
633   if (memcmp (s, "R03", 3) == 0)
634     return R03_REGS;
635   if (memcmp (s, "Rdi", 3) == 0)
636     return DI_REGS;
637   if (memcmp (s, "Rhl", 3) == 0)
638     return HL_REGS;
639   if (memcmp (s, "R23", 3) == 0)
640     return R23_REGS;
641   if (memcmp (s, "Ra0", 3) == 0)
642     return A0_REGS;
643   if (memcmp (s, "Ra1", 3) == 0)
644     return A1_REGS;
645   if (memcmp (s, "Raa", 3) == 0)
646     return A_REGS;
647   if (memcmp (s, "Raw", 3) == 0)
648     return TARGET_A16 ? A_REGS : NO_REGS;
649   if (memcmp (s, "Ral", 3) == 0)
650     return TARGET_A24 ? A_REGS : NO_REGS;
651   if (memcmp (s, "Rqi", 3) == 0)
652     return QI_REGS;
653   if (memcmp (s, "Rad", 3) == 0)
654     return AD_REGS;
655   if (memcmp (s, "Rsi", 3) == 0)
656     return SI_REGS;
657   if (memcmp (s, "Rhi", 3) == 0)
658     return HI_REGS;
659   if (memcmp (s, "Rhc", 3) == 0)
660     return HC_REGS;
661   if (memcmp (s, "Rra", 3) == 0)
662     return RA_REGS;
663   if (memcmp (s, "Rfl", 3) == 0)
664     return FLG_REGS;
665   if (memcmp (s, "Rmm", 3) == 0)
666     {
667       if (fixed_regs[MEM0_REGNO])
668 	return NO_REGS;
669       return MEM_REGS;
670     }
671 
672   /* PSImode registers - i.e. whatever can hold a pointer.  */
673   if (memcmp (s, "Rpi", 3) == 0)
674     {
675       if (TARGET_A16)
676 	return HI_REGS;
677       else
678 	return RA_REGS; /* r2r0 and r3r1 can hold pointers.  */
679     }
680 
681   /* We handle this one as an EXTRA_CONSTRAINT.  */
682   if (memcmp (s, "Rpa", 3) == 0)
683     return NO_REGS;
684 
685   if (*s == 'R')
686     {
687       fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
688       gcc_unreachable();
689     }
690 
691   return NO_REGS;
692 }
693 
694 /* Implements REGNO_OK_FOR_BASE_P.  */
695 int
m32c_regno_ok_for_base_p(int regno)696 m32c_regno_ok_for_base_p (int regno)
697 {
698   if (regno == A0_REGNO
699       || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
700     return 1;
701   return 0;
702 }
703 
704 #define DEBUG_RELOAD 0
705 
706 /* Implements PREFERRED_RELOAD_CLASS.  In general, prefer general
707    registers of the appropriate size.  */
708 int
m32c_preferred_reload_class(rtx x,int rclass)709 m32c_preferred_reload_class (rtx x, int rclass)
710 {
711   int newclass = rclass;
712 
713 #if DEBUG_RELOAD
714   fprintf (stderr, "\npreferred_reload_class for %s is ",
715 	   class_names[rclass]);
716 #endif
717   if (rclass == NO_REGS)
718     rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
719 
720   if (classes_intersect (rclass, CR_REGS))
721     {
722       switch (GET_MODE (x))
723 	{
724 	case QImode:
725 	  newclass = HL_REGS;
726 	  break;
727 	default:
728 	  /*      newclass = HI_REGS; */
729 	  break;
730 	}
731     }
732 
733   else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
734     newclass = SI_REGS;
735   else if (GET_MODE_SIZE (GET_MODE (x)) > 4
736 	   && ~class_contents[rclass][0] & 0x000f)
737     newclass = DI_REGS;
738 
739   rclass = reduce_class (rclass, newclass, rclass);
740 
741   if (GET_MODE (x) == QImode)
742     rclass = reduce_class (rclass, HL_REGS, rclass);
743 
744 #if DEBUG_RELOAD
745   fprintf (stderr, "%s\n", class_names[rclass]);
746   debug_rtx (x);
747 
748   if (GET_CODE (x) == MEM
749       && GET_CODE (XEXP (x, 0)) == PLUS
750       && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
751     fprintf (stderr, "Glorm!\n");
752 #endif
753   return rclass;
754 }
755 
756 /* Implements PREFERRED_OUTPUT_RELOAD_CLASS.  */
757 int
m32c_preferred_output_reload_class(rtx x,int rclass)758 m32c_preferred_output_reload_class (rtx x, int rclass)
759 {
760   return m32c_preferred_reload_class (x, rclass);
761 }
762 
763 /* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
764    address registers for reloads since they're needed for address
765    reloads.  */
766 int
m32c_limit_reload_class(enum machine_mode mode,int rclass)767 m32c_limit_reload_class (enum machine_mode mode, int rclass)
768 {
769 #if DEBUG_RELOAD
770   fprintf (stderr, "limit_reload_class for %s: %s ->",
771 	   mode_name[mode], class_names[rclass]);
772 #endif
773 
774   if (mode == QImode)
775     rclass = reduce_class (rclass, HL_REGS, rclass);
776   else if (mode == HImode)
777     rclass = reduce_class (rclass, HI_REGS, rclass);
778   else if (mode == SImode)
779     rclass = reduce_class (rclass, SI_REGS, rclass);
780 
781   if (rclass != A_REGS)
782     rclass = reduce_class (rclass, DI_REGS, rclass);
783 
784 #if DEBUG_RELOAD
785   fprintf (stderr, " %s\n", class_names[rclass]);
786 #endif
787   return rclass;
788 }
789 
790 /* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
791    r0 or r1, as those are the only real QImode registers.  CR regs get
792    reloaded through appropriately sized general or address
793    registers.  */
794 int
m32c_secondary_reload_class(int rclass,enum machine_mode mode,rtx x)795 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
796 {
797   int cc = class_contents[rclass][0];
798 #if DEBUG0
799   fprintf (stderr, "\nsecondary reload class %s %s\n",
800 	   class_names[rclass], mode_name[mode]);
801   debug_rtx (x);
802 #endif
803   if (mode == QImode
804       && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
805     return QI_REGS;
806   if (classes_intersect (rclass, CR_REGS)
807       && GET_CODE (x) == REG
808       && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
809     return TARGET_A16 ? HI_REGS : A_REGS;
810   return NO_REGS;
811 }
812 
813 /* Implements CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
814    reloads.  */
815 int
m32c_class_likely_spilled_p(int regclass)816 m32c_class_likely_spilled_p (int regclass)
817 {
818   if (regclass == A_REGS)
819     return 1;
820   return reg_class_size[regclass] == 1;
821 }
822 
823 /* Implements CLASS_MAX_NREGS.  We calculate this according to its
824    documented meaning, to avoid potential inconsistencies with actual
825    class definitions.  */
826 int
m32c_class_max_nregs(int regclass,enum machine_mode mode)827 m32c_class_max_nregs (int regclass, enum machine_mode mode)
828 {
829   int rn, max = 0;
830 
831   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
832     if (class_contents[regclass][0] & (1 << rn))
833       {
834 	int n = m32c_hard_regno_nregs (rn, mode);
835 	if (max < n)
836 	  max = n;
837       }
838   return max;
839 }
840 
841 /* Implements CANNOT_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
842    QI (r0l, r1l) because the chip doesn't support QI ops on other
843    registers (well, it does on a0/a1 but if we let gcc do that, reload
844    suffers).  Otherwise, we allow changes to larger modes.  */
845 int
m32c_cannot_change_mode_class(enum machine_mode from,enum machine_mode to,int rclass)846 m32c_cannot_change_mode_class (enum machine_mode from,
847 			       enum machine_mode to, int rclass)
848 {
849 #if DEBUG0
850   fprintf (stderr, "cannot change from %s to %s in %s\n",
851 	   mode_name[from], mode_name[to], class_names[rclass]);
852 #endif
853 
854   if (to == QImode)
855     return (class_contents[rclass][0] & 0x1ffa);
856 
857   if (class_contents[rclass][0] & 0x0005	/* r0, r1 */
858       && GET_MODE_SIZE (from) > 1)
859     return 0;
860   if (GET_MODE_SIZE (from) > 2)	/* all other regs */
861     return 0;
862 
863   return 1;
864 }
865 
866 /* Helpers for the rest of the file.  */
867 /* TRUE if the rtx is a REG rtx for the given register.  */
868 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
869 			   && REGNO (rtx) == regno)
870 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
871    base register in address calculations (hence the "strict"
872    argument).  */
873 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
874 			       && (REGNO (rtx) == AP_REGNO \
875 				   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
876 
877 /* Implements CONST_OK_FOR_CONSTRAINT_P.  Currently, all constant
878    constraints start with 'I', with the next two characters indicating
879    the type and size of the range allowed.  */
880 int
m32c_const_ok_for_constraint_p(HOST_WIDE_INT value,char c ATTRIBUTE_UNUSED,const char * str)881 m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
882 				char c ATTRIBUTE_UNUSED, const char *str)
883 {
884   /* s=signed u=unsigned n=nonzero m=minus l=log2able,
885      [sun] bits [SUN] bytes, p=pointer size
886      I[-0-9][0-9] matches that number */
887   if (memcmp (str, "Is3", 3) == 0)
888     {
889       return (-8 <= value && value <= 7);
890     }
891   if (memcmp (str, "IS1", 3) == 0)
892     {
893       return (-128 <= value && value <= 127);
894     }
895   if (memcmp (str, "IS2", 3) == 0)
896     {
897       return (-32768 <= value && value <= 32767);
898     }
899   if (memcmp (str, "IU2", 3) == 0)
900     {
901       return (0 <= value && value <= 65535);
902     }
903   if (memcmp (str, "IU3", 3) == 0)
904     {
905       return (0 <= value && value <= 0x00ffffff);
906     }
907   if (memcmp (str, "In4", 3) == 0)
908     {
909       return (-8 <= value && value && value <= 8);
910     }
911   if (memcmp (str, "In5", 3) == 0)
912     {
913       return (-16 <= value && value && value <= 16);
914     }
915   if (memcmp (str, "In6", 3) == 0)
916     {
917       return (-32 <= value && value && value <= 32);
918     }
919   if (memcmp (str, "IM2", 3) == 0)
920     {
921       return (-65536 <= value && value && value <= -1);
922     }
923   if (memcmp (str, "Ilb", 3) == 0)
924     {
925       int b = exact_log2 (value);
926       return (b >= 0 && b <= 7);
927     }
928   if (memcmp (str, "Imb", 3) == 0)
929     {
930       int b = exact_log2 ((value ^ 0xff) & 0xff);
931       return (b >= 0 && b <= 7);
932     }
933   if (memcmp (str, "Ilw", 3) == 0)
934     {
935       int b = exact_log2 (value);
936       return (b >= 0 && b <= 15);
937     }
938   if (memcmp (str, "Imw", 3) == 0)
939     {
940       int b = exact_log2 ((value ^ 0xffff) & 0xffff);
941       return (b >= 0 && b <= 15);
942     }
943   if (memcmp (str, "I00", 3) == 0)
944     {
945       return (value == 0);
946     }
947   return 0;
948 }
949 
950 /* Implements EXTRA_CONSTRAINT_STR (see next function too).  'S' is
951    for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
952    call return values.  */
953 int
m32c_extra_constraint_p2(rtx value,char c ATTRIBUTE_UNUSED,const char * str)954 m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
955 {
956   encode_pattern (value);
957   if (memcmp (str, "Sd", 2) == 0)
958     {
959       /* This is the common "src/dest" address */
960       rtx r;
961       if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
962 	return 1;
963       if (RTX_IS ("ms") || RTX_IS ("m+si"))
964 	return 1;
965       if (RTX_IS ("m++rii"))
966 	{
967 	  if (REGNO (patternr[3]) == FB_REGNO
968 	      && INTVAL (patternr[4]) == 0)
969 	    return 1;
970 	}
971       if (RTX_IS ("mr"))
972 	r = patternr[1];
973       else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
974 	r = patternr[2];
975       else
976 	return 0;
977       if (REGNO (r) == SP_REGNO)
978 	return 0;
979       return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
980     }
981   else if (memcmp (str, "Sa", 2) == 0)
982     {
983       rtx r;
984       if (RTX_IS ("mr"))
985 	r = patternr[1];
986       else if (RTX_IS ("m+ri"))
987 	r = patternr[2];
988       else
989 	return 0;
990       return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
991     }
992   else if (memcmp (str, "Si", 2) == 0)
993     {
994       return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
995     }
996   else if (memcmp (str, "Ss", 2) == 0)
997     {
998       return ((RTX_IS ("mr")
999 	       && (IS_REG (patternr[1], SP_REGNO)))
1000 	      || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1001     }
1002   else if (memcmp (str, "Sf", 2) == 0)
1003     {
1004       return ((RTX_IS ("mr")
1005 	       && (IS_REG (patternr[1], FB_REGNO)))
1006 	      || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1007     }
1008   else if (memcmp (str, "Sb", 2) == 0)
1009     {
1010       return ((RTX_IS ("mr")
1011 	       && (IS_REG (patternr[1], SB_REGNO)))
1012 	      || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1013     }
1014   else if (memcmp (str, "Sp", 2) == 0)
1015     {
1016       /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1017       return (RTX_IS ("mi")
1018 	      && !(INTVAL (patternr[1]) & ~0x1fff));
1019     }
1020   else if (memcmp (str, "S1", 2) == 0)
1021     {
1022       return r1h_operand (value, QImode);
1023     }
1024 
1025   gcc_assert (str[0] != 'S');
1026 
1027   if (memcmp (str, "Rpa", 2) == 0)
1028     return GET_CODE (value) == PARALLEL;
1029 
1030   return 0;
1031 }
1032 
1033 /* This is for when we're debugging the above.  */
1034 int
m32c_extra_constraint_p(rtx value,char c,const char * str)1035 m32c_extra_constraint_p (rtx value, char c, const char *str)
1036 {
1037   int rv = m32c_extra_constraint_p2 (value, c, str);
1038 #if DEBUG0
1039   fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1040 	   rv);
1041   debug_rtx (value);
1042 #endif
1043   return rv;
1044 }
1045 
1046 /* Implements EXTRA_MEMORY_CONSTRAINT.  Currently, we only use strings
1047    starting with 'S'.  */
1048 int
m32c_extra_memory_constraint(char c,const char * str ATTRIBUTE_UNUSED)1049 m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1050 {
1051   return c == 'S';
1052 }
1053 
1054 /* Implements EXTRA_ADDRESS_CONSTRAINT.  We reserve 'A' strings for these,
1055    but don't currently define any.  */
1056 int
m32c_extra_address_constraint(char c,const char * str ATTRIBUTE_UNUSED)1057 m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1058 {
1059   return c == 'A';
1060 }
1061 
1062 /* STACK AND CALLING */
1063 
1064 /* Frame Layout */
1065 
1066 /* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
1067    (yes, THREE bytes) onto the stack for the return address, but we
1068    don't support pointers bigger than 16 bits on those chips.  This
1069    will likely wreak havoc with exception unwinding.  FIXME.  */
1070 rtx
m32c_return_addr_rtx(int count)1071 m32c_return_addr_rtx (int count)
1072 {
1073   enum machine_mode mode;
1074   int offset;
1075   rtx ra_mem;
1076 
1077   if (count)
1078     return NULL_RTX;
1079   /* we want 2[$fb] */
1080 
1081   if (TARGET_A24)
1082     {
1083       mode = SImode;
1084       offset = 4;
1085     }
1086   else
1087     {
1088       /* FIXME: it's really 3 bytes */
1089       mode = HImode;
1090       offset = 2;
1091     }
1092 
1093   ra_mem =
1094     gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1095   return copy_to_mode_reg (mode, ra_mem);
1096 }
1097 
1098 /* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
1099 rtx
m32c_incoming_return_addr_rtx(void)1100 m32c_incoming_return_addr_rtx (void)
1101 {
1102   /* we want [sp] */
1103   return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1104 }
1105 
1106 /* Exception Handling Support */
1107 
1108 /* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
1109    pointers.  */
1110 int
m32c_eh_return_data_regno(int n)1111 m32c_eh_return_data_regno (int n)
1112 {
1113   switch (n)
1114     {
1115     case 0:
1116       return A0_REGNO;
1117     case 1:
1118       return A1_REGNO;
1119     default:
1120       return INVALID_REGNUM;
1121     }
1122 }
1123 
1124 /* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
1125    m32c_emit_eh_epilogue.  */
1126 rtx
m32c_eh_return_stackadj_rtx(void)1127 m32c_eh_return_stackadj_rtx (void)
1128 {
1129   if (!cfun->machine->eh_stack_adjust)
1130     {
1131       rtx sa;
1132 
1133       sa = gen_reg_rtx (Pmode);
1134       cfun->machine->eh_stack_adjust = sa;
1135     }
1136   return cfun->machine->eh_stack_adjust;
1137 }
1138 
1139 /* Registers That Address the Stack Frame */
1140 
1141 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
1142    the original spec called for dwarf numbers to vary with register
1143    width as well, for example, r0l, r0, and r2r0 would each have
1144    different dwarf numbers.  GCC doesn't support this, and we don't do
1145    it, and gdb seems to like it this way anyway.  */
1146 unsigned int
m32c_dwarf_frame_regnum(int n)1147 m32c_dwarf_frame_regnum (int n)
1148 {
1149   switch (n)
1150     {
1151     case R0_REGNO:
1152       return 5;
1153     case R1_REGNO:
1154       return 6;
1155     case R2_REGNO:
1156       return 7;
1157     case R3_REGNO:
1158       return 8;
1159     case A0_REGNO:
1160       return 9;
1161     case A1_REGNO:
1162       return 10;
1163     case FB_REGNO:
1164       return 11;
1165     case SB_REGNO:
1166       return 19;
1167 
1168     case SP_REGNO:
1169       return 12;
1170     case PC_REGNO:
1171       return 13;
1172     default:
1173       return DWARF_FRAME_REGISTERS + 1;
1174     }
1175 }
1176 
1177 /* The frame looks like this:
1178 
1179    ap -> +------------------------------
1180          | Return address (3 or 4 bytes)
1181 	 | Saved FB (2 or 4 bytes)
1182    fb -> +------------------------------
1183 	 | local vars
1184          | register saves fb
1185 	 |        through r0 as needed
1186    sp -> +------------------------------
1187 */
1188 
1189 /* We use this to wrap all emitted insns in the prologue.  */
1190 static rtx
F(rtx x)1191 F (rtx x)
1192 {
1193   RTX_FRAME_RELATED_P (x) = 1;
1194   return x;
1195 }
1196 
1197 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1198    how much the stack pointer moves for each, for each cpu family.  */
1199 static struct
1200 {
1201   int reg1;
1202   int bit;
1203   int a16_bytes;
1204   int a24_bytes;
1205 } pushm_info[] =
1206 {
1207   /* These are in reverse push (nearest-to-sp) order.  */
1208   { R0_REGNO, 0x80, 2, 2 },
1209   { R1_REGNO, 0x40, 2, 2 },
1210   { R2_REGNO, 0x20, 2, 2 },
1211   { R3_REGNO, 0x10, 2, 2 },
1212   { A0_REGNO, 0x08, 2, 4 },
1213   { A1_REGNO, 0x04, 2, 4 },
1214   { SB_REGNO, 0x02, 2, 4 },
1215   { FB_REGNO, 0x01, 2, 4 }
1216 };
1217 
1218 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1219 
1220 /* Returns TRUE if we need to save/restore the given register.  We
1221    save everything for exception handlers, so that any register can be
1222    unwound.  For interrupt handlers, we save everything if the handler
1223    calls something else (because we don't know what *that* function
1224    might do), but try to be a bit smarter if the handler is a leaf
1225    function.  We always save $a0, though, because we use that in the
1226    epilog to copy $fb to $sp.  */
1227 static int
need_to_save(int regno)1228 need_to_save (int regno)
1229 {
1230   if (fixed_regs[regno])
1231     return 0;
1232   if (cfun->calls_eh_return)
1233     return 1;
1234   if (regno == FP_REGNO)
1235     return 0;
1236   if (cfun->machine->is_interrupt
1237       && (!cfun->machine->is_leaf || regno == A0_REGNO))
1238     return 1;
1239   if (regs_ever_live[regno]
1240       && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1241     return 1;
1242   return 0;
1243 }
1244 
1245 /* This function contains all the intelligence about saving and
1246    restoring registers.  It always figures out the register save set.
1247    When called with PP_justcount, it merely returns the size of the
1248    save set (for eliminating the frame pointer, for example).  When
1249    called with PP_pushm or PP_popm, it emits the appropriate
1250    instructions for saving (pushm) or restoring (popm) the
1251    registers.  */
1252 static int
m32c_pushm_popm(Push_Pop_Type ppt)1253 m32c_pushm_popm (Push_Pop_Type ppt)
1254 {
1255   int reg_mask = 0;
1256   int byte_count = 0, bytes;
1257   int i;
1258   rtx dwarf_set[PUSHM_N];
1259   int n_dwarfs = 0;
1260   int nosave_mask = 0;
1261 
1262   if (cfun->return_rtx
1263       && GET_CODE (cfun->return_rtx) == PARALLEL
1264       && !(cfun->calls_eh_return || cfun->machine->is_interrupt))
1265     {
1266       rtx exp = XVECEXP (cfun->return_rtx, 0, 0);
1267       rtx rv = XEXP (exp, 0);
1268       int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1269 
1270       if (rv_bytes > 2)
1271 	nosave_mask |= 0x20;	/* PSI, SI */
1272       else
1273 	nosave_mask |= 0xf0;	/* DF */
1274       if (rv_bytes > 4)
1275 	nosave_mask |= 0x50;	/* DI */
1276     }
1277 
1278   for (i = 0; i < (int) PUSHM_N; i++)
1279     {
1280       /* Skip if neither register needs saving.  */
1281       if (!need_to_save (pushm_info[i].reg1))
1282 	continue;
1283 
1284       if (pushm_info[i].bit & nosave_mask)
1285 	continue;
1286 
1287       reg_mask |= pushm_info[i].bit;
1288       bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1289 
1290       if (ppt == PP_pushm)
1291 	{
1292 	  enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1293 	  rtx addr;
1294 
1295 	  /* Always use stack_pointer_rtx instead of calling
1296 	     rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
1297 	     that there is a single rtx representing the stack pointer,
1298 	     namely stack_pointer_rtx, and uses == to recognize it.  */
1299 	  addr = stack_pointer_rtx;
1300 
1301 	  if (byte_count != 0)
1302 	    addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1303 
1304 	  dwarf_set[n_dwarfs++] =
1305 	    gen_rtx_SET (VOIDmode,
1306 			 gen_rtx_MEM (mode, addr),
1307 			 gen_rtx_REG (mode, pushm_info[i].reg1));
1308 	  F (dwarf_set[n_dwarfs - 1]);
1309 
1310 	}
1311       byte_count += bytes;
1312     }
1313 
1314   if (cfun->machine->is_interrupt)
1315     {
1316       cfun->machine->intr_pushm = reg_mask & 0xfe;
1317       reg_mask = 0;
1318       byte_count = 0;
1319     }
1320 
1321   if (cfun->machine->is_interrupt)
1322     for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1323       if (need_to_save (i))
1324 	{
1325 	  byte_count += 2;
1326 	  cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1327 	}
1328 
1329   if (ppt == PP_pushm && byte_count)
1330     {
1331       rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1332       rtx pushm;
1333 
1334       if (reg_mask)
1335 	{
1336 	  XVECEXP (note, 0, 0)
1337 	    = gen_rtx_SET (VOIDmode,
1338 			   stack_pointer_rtx,
1339 			   gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1340 					 stack_pointer_rtx,
1341 					 GEN_INT (-byte_count)));
1342 	  F (XVECEXP (note, 0, 0));
1343 
1344 	  for (i = 0; i < n_dwarfs; i++)
1345 	    XVECEXP (note, 0, i + 1) = dwarf_set[i];
1346 
1347 	  pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1348 
1349 	  REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1350 						 REG_NOTES (pushm));
1351 	}
1352 
1353       if (cfun->machine->is_interrupt)
1354 	for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1355 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1356 	    {
1357 	      if (TARGET_A16)
1358 		pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1359 	      else
1360 		pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1361 	      F (pushm);
1362 	    }
1363     }
1364   if (ppt == PP_popm && byte_count)
1365     {
1366       if (cfun->machine->is_interrupt)
1367 	for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1368 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1369 	    {
1370 	      if (TARGET_A16)
1371 		emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1372 	      else
1373 		emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1374 	    }
1375       if (reg_mask)
1376 	emit_insn (gen_popm (GEN_INT (reg_mask)));
1377     }
1378 
1379   return byte_count;
1380 }
1381 
1382 /* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
1383    diagrams our call frame.  */
1384 int
m32c_initial_elimination_offset(int from,int to)1385 m32c_initial_elimination_offset (int from, int to)
1386 {
1387   int ofs = 0;
1388 
1389   if (from == AP_REGNO)
1390     {
1391       if (TARGET_A16)
1392 	ofs += 5;
1393       else
1394 	ofs += 8;
1395     }
1396 
1397   if (to == SP_REGNO)
1398     {
1399       ofs += m32c_pushm_popm (PP_justcount);
1400       ofs += get_frame_size ();
1401     }
1402 
1403   /* Account for push rounding.  */
1404   if (TARGET_A24)
1405     ofs = (ofs + 1) & ~1;
1406 #if DEBUG0
1407   fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1408 	   to, ofs);
1409 #endif
1410   return ofs;
1411 }
1412 
1413 /* Passing Function Arguments on the Stack */
1414 
1415 #undef TARGET_PROMOTE_PROTOTYPES
1416 #define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
1417 static bool
m32c_promote_prototypes(tree fntype ATTRIBUTE_UNUSED)1418 m32c_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
1419 {
1420   return 0;
1421 }
1422 
1423 /* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
1424    M32C has word stacks.  */
1425 int
m32c_push_rounding(int n)1426 m32c_push_rounding (int n)
1427 {
1428   if (TARGET_R8C || TARGET_M16C)
1429     return n;
1430   return (n + 1) & ~1;
1431 }
1432 
1433 /* Passing Arguments in Registers */
1434 
1435 /* Implements FUNCTION_ARG.  Arguments are passed partly in registers,
1436    partly on stack.  If our function returns a struct, a pointer to a
1437    buffer for it is at the top of the stack (last thing pushed).  The
1438    first few real arguments may be in registers as follows:
1439 
1440    R8C/M16C:	arg1 in r1 if it's QI or HI (else it's pushed on stack)
1441 		arg2 in r2 if it's HI (else pushed on stack)
1442 		rest on stack
1443    M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
1444 		rest on stack
1445 
1446    Structs are not passed in registers, even if they fit.  Only
1447    integer and pointer types are passed in registers.
1448 
1449    Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1450    r2 if it fits.  */
1451 rtx
m32c_function_arg(CUMULATIVE_ARGS * ca,enum machine_mode mode,tree type,int named)1452 m32c_function_arg (CUMULATIVE_ARGS * ca,
1453 		   enum machine_mode mode, tree type, int named)
1454 {
1455   /* Can return a reg, parallel, or 0 for stack */
1456   rtx rv = NULL_RTX;
1457 #if DEBUG0
1458   fprintf (stderr, "func_arg %d (%s, %d)\n",
1459 	   ca->parm_num, mode_name[mode], named);
1460   debug_tree (type);
1461 #endif
1462 
1463   if (mode == VOIDmode)
1464     return GEN_INT (0);
1465 
1466   if (ca->force_mem || !named)
1467     {
1468 #if DEBUG0
1469       fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1470 	       named);
1471 #endif
1472       return NULL_RTX;
1473     }
1474 
1475   if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1476     return NULL_RTX;
1477 
1478   if (type && AGGREGATE_TYPE_P (type))
1479     return NULL_RTX;
1480 
1481   switch (ca->parm_num)
1482     {
1483     case 1:
1484       if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1485 	rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1486       break;
1487 
1488     case 2:
1489       if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1490 	rv = gen_rtx_REG (mode, R2_REGNO);
1491       break;
1492     }
1493 
1494 #if DEBUG0
1495   debug_rtx (rv);
1496 #endif
1497   return rv;
1498 }
1499 
1500 #undef TARGET_PASS_BY_REFERENCE
1501 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1502 static bool
m32c_pass_by_reference(CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,enum machine_mode mode ATTRIBUTE_UNUSED,tree type ATTRIBUTE_UNUSED,bool named ATTRIBUTE_UNUSED)1503 m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1504 			enum machine_mode mode ATTRIBUTE_UNUSED,
1505 			tree type ATTRIBUTE_UNUSED,
1506 			bool named ATTRIBUTE_UNUSED)
1507 {
1508   return 0;
1509 }
1510 
1511 /* Implements INIT_CUMULATIVE_ARGS.  */
1512 void
m32c_init_cumulative_args(CUMULATIVE_ARGS * ca,tree fntype,rtx libname ATTRIBUTE_UNUSED,tree fndecl,int n_named_args ATTRIBUTE_UNUSED)1513 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1514 			   tree fntype,
1515 			   rtx libname ATTRIBUTE_UNUSED,
1516 			   tree fndecl,
1517 			   int n_named_args ATTRIBUTE_UNUSED)
1518 {
1519   if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1520     ca->force_mem = 1;
1521   else
1522     ca->force_mem = 0;
1523   ca->parm_num = 1;
1524 }
1525 
1526 /* Implements FUNCTION_ARG_ADVANCE.  force_mem is set for functions
1527    returning structures, so we always reset that.  Otherwise, we only
1528    need to know the sequence number of the argument to know what to do
1529    with it.  */
1530 void
m32c_function_arg_advance(CUMULATIVE_ARGS * ca,enum machine_mode mode ATTRIBUTE_UNUSED,tree type ATTRIBUTE_UNUSED,int named ATTRIBUTE_UNUSED)1531 m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1532 			   enum machine_mode mode ATTRIBUTE_UNUSED,
1533 			   tree type ATTRIBUTE_UNUSED,
1534 			   int named ATTRIBUTE_UNUSED)
1535 {
1536   if (ca->force_mem)
1537     ca->force_mem = 0;
1538   else
1539     ca->parm_num++;
1540 }
1541 
1542 /* Implements FUNCTION_ARG_REGNO_P.  */
1543 int
m32c_function_arg_regno_p(int r)1544 m32c_function_arg_regno_p (int r)
1545 {
1546   if (TARGET_A24)
1547     return (r == R0_REGNO);
1548   return (r == R1_REGNO || r == R2_REGNO);
1549 }
1550 
1551 /* HImode and PSImode are the two "native" modes as far as GCC is
1552    concerned, but the chips also support a 32 bit mode which is used
1553    for some opcodes in R8C/M16C and for reset vectors and such.  */
1554 #undef TARGET_VALID_POINTER_MODE
1555 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1556 static bool
m32c_valid_pointer_mode(enum machine_mode mode)1557 m32c_valid_pointer_mode (enum machine_mode mode)
1558 {
1559   if (mode == HImode
1560       || mode == PSImode
1561       || mode == SImode
1562       )
1563     return 1;
1564   return 0;
1565 }
1566 
1567 /* How Scalar Function Values Are Returned */
1568 
1569 /* Implements LIBCALL_VALUE.  Most values are returned in $r0, or some
1570    combination of registers starting there (r2r0 for longs, r3r1r2r0
1571    for long long, r3r2r1r0 for doubles), except that that ABI
1572    currently doesn't work because it ends up using all available
1573    general registers and gcc often can't compile it.  So, instead, we
1574    return anything bigger than 16 bits in "mem0" (effectively, a
1575    memory location).  */
1576 rtx
m32c_libcall_value(enum machine_mode mode)1577 m32c_libcall_value (enum machine_mode mode)
1578 {
1579   /* return reg or parallel */
1580 #if 0
1581   /* FIXME: GCC has difficulty returning large values in registers,
1582      because that ties up most of the general registers and gives the
1583      register allocator little to work with.  Until we can resolve
1584      this, large values are returned in memory.  */
1585   if (mode == DFmode)
1586     {
1587       rtx rv;
1588 
1589       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1590       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1591 					      gen_rtx_REG (HImode,
1592 							   R0_REGNO),
1593 					      GEN_INT (0));
1594       XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1595 					      gen_rtx_REG (HImode,
1596 							   R1_REGNO),
1597 					      GEN_INT (2));
1598       XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1599 					      gen_rtx_REG (HImode,
1600 							   R2_REGNO),
1601 					      GEN_INT (4));
1602       XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1603 					      gen_rtx_REG (HImode,
1604 							   R3_REGNO),
1605 					      GEN_INT (6));
1606       return rv;
1607     }
1608 
1609   if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1610     {
1611       rtx rv;
1612 
1613       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1614       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1615 					      gen_rtx_REG (mode,
1616 							   R0_REGNO),
1617 					      GEN_INT (0));
1618       return rv;
1619     }
1620 #endif
1621 
1622   if (GET_MODE_SIZE (mode) > 2)
1623     return gen_rtx_REG (mode, MEM0_REGNO);
1624   return gen_rtx_REG (mode, R0_REGNO);
1625 }
1626 
1627 /* Implements FUNCTION_VALUE.  Functions and libcalls have the same
1628    conventions.  */
1629 rtx
m32c_function_value(tree valtype,tree func ATTRIBUTE_UNUSED)1630 m32c_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
1631 {
1632   /* return reg or parallel */
1633   enum machine_mode mode = TYPE_MODE (valtype);
1634   return m32c_libcall_value (mode);
1635 }
1636 
1637 /* How Large Values Are Returned */
1638 
1639 /* We return structures by pushing the address on the stack, even if
1640    we use registers for the first few "real" arguments.  */
1641 #undef TARGET_STRUCT_VALUE_RTX
1642 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1643 static rtx
m32c_struct_value_rtx(tree fndecl ATTRIBUTE_UNUSED,int incoming ATTRIBUTE_UNUSED)1644 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1645 		       int incoming ATTRIBUTE_UNUSED)
1646 {
1647   return 0;
1648 }
1649 
1650 /* Function Entry and Exit */
1651 
1652 /* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
1653 int
m32c_epilogue_uses(int regno ATTRIBUTE_UNUSED)1654 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1655 {
1656   if (cfun->machine->is_interrupt)
1657     return 1;
1658   return 0;
1659 }
1660 
1661 /* Implementing the Varargs Macros */
1662 
1663 #undef TARGET_STRICT_ARGUMENT_NAMING
1664 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1665 static bool
m32c_strict_argument_naming(CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)1666 m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1667 {
1668   return 1;
1669 }
1670 
1671 /* Trampolines for Nested Functions */
1672 
1673 /*
1674    m16c:
1675    1 0000 75C43412              mov.w   #0x1234,a0
1676    2 0004 FC000000              jmp.a   label
1677 
1678    m32c:
1679    1 0000 BC563412              mov.l:s #0x123456,a0
1680    2 0004 CC000000              jmp.a   label
1681 */
1682 
1683 /* Implements TRAMPOLINE_SIZE.  */
1684 int
m32c_trampoline_size(void)1685 m32c_trampoline_size (void)
1686 {
1687   /* Allocate extra space so we can avoid the messy shifts when we
1688      initialize the trampoline; we just write past the end of the
1689      opcode.  */
1690   return TARGET_A16 ? 8 : 10;
1691 }
1692 
1693 /* Implements TRAMPOLINE_ALIGNMENT.  */
1694 int
m32c_trampoline_alignment(void)1695 m32c_trampoline_alignment (void)
1696 {
1697   return 2;
1698 }
1699 
1700 /* Implements INITIALIZE_TRAMPOLINE.  */
1701 void
m32c_initialize_trampoline(rtx tramp,rtx function,rtx chainval)1702 m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1703 {
1704 #define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1705   if (TARGET_A16)
1706     {
1707       /* Note: we subtract a "word" because the moves want signed
1708 	 constants, not unsigned constants.  */
1709       emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1710       emit_move_insn (A0 (HImode, 2), chainval);
1711       emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1712       /* We use 16 bit addresses here, but store the zero to turn it
1713 	 into a 24 bit offset.  */
1714       emit_move_insn (A0 (HImode, 5), function);
1715       emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1716     }
1717   else
1718     {
1719       /* Note that the PSI moves actually write 4 bytes.  Make sure we
1720 	 write stuff out in the right order, and leave room for the
1721 	 extra byte at the end.  */
1722       emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1723       emit_move_insn (A0 (PSImode, 1), chainval);
1724       emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1725       emit_move_insn (A0 (PSImode, 5), function);
1726     }
1727 #undef A0
1728 }
1729 
1730 /* Implicit Calls to Library Routines */
1731 
1732 #undef TARGET_INIT_LIBFUNCS
1733 #define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1734 static void
m32c_init_libfuncs(void)1735 m32c_init_libfuncs (void)
1736 {
1737   if (TARGET_A24)
1738     {
1739       /* We do this because the M32C has an HImode operand, but the
1740 	 M16C has an 8 bit operand.  Since gcc looks at the match data
1741 	 and not the expanded rtl, we have to reset the array so that
1742 	 the right modes are found. */
1743       setcc_gen_code[EQ] = CODE_FOR_seq_24;
1744       setcc_gen_code[NE] = CODE_FOR_sne_24;
1745       setcc_gen_code[GT] = CODE_FOR_sgt_24;
1746       setcc_gen_code[GE] = CODE_FOR_sge_24;
1747       setcc_gen_code[LT] = CODE_FOR_slt_24;
1748       setcc_gen_code[LE] = CODE_FOR_sle_24;
1749       setcc_gen_code[GTU] = CODE_FOR_sgtu_24;
1750       setcc_gen_code[GEU] = CODE_FOR_sgeu_24;
1751       setcc_gen_code[LTU] = CODE_FOR_sltu_24;
1752       setcc_gen_code[LEU] = CODE_FOR_sleu_24;
1753     }
1754 }
1755 
1756 /* Addressing Modes */
1757 
1758 /* Used by GO_IF_LEGITIMATE_ADDRESS.  The r8c/m32c family supports a
1759    wide range of non-orthogonal addressing modes, including the
1760    ability to double-indirect on *some* of them.  Not all insns
1761    support all modes, either, but we rely on predicates and
1762    constraints to deal with that.  */
1763 int
m32c_legitimate_address_p(enum machine_mode mode,rtx x,int strict)1764 m32c_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1765 {
1766   int mode_adjust;
1767   if (CONSTANT_P (x))
1768     return 1;
1769 
1770   /* Wide references to memory will be split after reload, so we must
1771      ensure that all parts of such splits remain legitimate
1772      addresses.  */
1773   mode_adjust = GET_MODE_SIZE (mode) - 1;
1774 
1775   /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1776   if (GET_CODE (x) == PRE_DEC
1777       || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1778     {
1779       return (GET_CODE (XEXP (x, 0)) == REG
1780 	      && REGNO (XEXP (x, 0)) == SP_REGNO);
1781     }
1782 
1783 #if 0
1784   /* This is the double indirection detection, but it currently
1785      doesn't work as cleanly as this code implies, so until we've had
1786      a chance to debug it, leave it disabled.  */
1787   if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1788     {
1789 #if DEBUG_DOUBLE
1790       fprintf (stderr, "double indirect\n");
1791 #endif
1792       x = XEXP (x, 0);
1793     }
1794 #endif
1795 
1796   encode_pattern (x);
1797   if (RTX_IS ("r"))
1798     {
1799       /* Most indexable registers can be used without displacements,
1800 	 although some of them will be emitted with an explicit zero
1801 	 to please the assembler.  */
1802       switch (REGNO (patternr[0]))
1803 	{
1804 	case A0_REGNO:
1805 	case A1_REGNO:
1806 	case SB_REGNO:
1807 	case FB_REGNO:
1808 	case SP_REGNO:
1809 	  return 1;
1810 
1811 	default:
1812 	  if (IS_PSEUDO (patternr[0], strict))
1813 	    return 1;
1814 	  return 0;
1815 	}
1816     }
1817   if (RTX_IS ("+ri"))
1818     {
1819       /* This is more interesting, because different base registers
1820 	 allow for different displacements - both range and signedness
1821 	 - and it differs from chip series to chip series too.  */
1822       int rn = REGNO (patternr[1]);
1823       HOST_WIDE_INT offs = INTVAL (patternr[2]);
1824       switch (rn)
1825 	{
1826 	case A0_REGNO:
1827 	case A1_REGNO:
1828 	case SB_REGNO:
1829 	  /* The syntax only allows positive offsets, but when the
1830 	     offsets span the entire memory range, we can simulate
1831 	     negative offsets by wrapping.  */
1832 	  if (TARGET_A16)
1833 	    return (offs >= -65536 && offs <= 65535 - mode_adjust);
1834 	  if (rn == SB_REGNO)
1835 	    return (offs >= 0 && offs <= 65535 - mode_adjust);
1836 	  /* A0 or A1 */
1837 	  return (offs >= -16777216 && offs <= 16777215);
1838 
1839 	case FB_REGNO:
1840 	  if (TARGET_A16)
1841 	    return (offs >= -128 && offs <= 127 - mode_adjust);
1842 	  return (offs >= -65536 && offs <= 65535 - mode_adjust);
1843 
1844 	case SP_REGNO:
1845 	  return (offs >= -128 && offs <= 127 - mode_adjust);
1846 
1847 	default:
1848 	  if (IS_PSEUDO (patternr[1], strict))
1849 	    return 1;
1850 	  return 0;
1851 	}
1852     }
1853   if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1854     {
1855       rtx reg = patternr[1];
1856 
1857       /* We don't know where the symbol is, so only allow base
1858 	 registers which support displacements spanning the whole
1859 	 address range.  */
1860       switch (REGNO (reg))
1861 	{
1862 	case A0_REGNO:
1863 	case A1_REGNO:
1864 	  /* $sb needs a secondary reload, but since it's involved in
1865 	     memory address reloads too, we don't deal with it very
1866 	     well.  */
1867 	  /*    case SB_REGNO: */
1868 	  return 1;
1869 	default:
1870 	  if (IS_PSEUDO (reg, strict))
1871 	    return 1;
1872 	  return 0;
1873 	}
1874     }
1875   return 0;
1876 }
1877 
1878 /* Implements REG_OK_FOR_BASE_P.  */
1879 int
m32c_reg_ok_for_base_p(rtx x,int strict)1880 m32c_reg_ok_for_base_p (rtx x, int strict)
1881 {
1882   if (GET_CODE (x) != REG)
1883     return 0;
1884   switch (REGNO (x))
1885     {
1886     case A0_REGNO:
1887     case A1_REGNO:
1888     case SB_REGNO:
1889     case FB_REGNO:
1890     case SP_REGNO:
1891       return 1;
1892     default:
1893       if (IS_PSEUDO (x, strict))
1894 	return 1;
1895       return 0;
1896     }
1897 }
1898 
1899 /* We have three choices for choosing fb->aN offsets.  If we choose -128,
1900    we need one MOVA -128[fb],aN opcode and 16 bit aN displacements,
1901    like this:
1902        EB 4B FF    mova    -128[$fb],$a0
1903        D8 0C FF FF mov.w:Q #0,-1[$a0]
1904 
1905    Alternately, we subtract the frame size, and hopefully use 8 bit aN
1906    displacements:
1907        7B F4       stc $fb,$a0
1908        77 54 00 01 sub #256,$a0
1909        D8 08 01    mov.w:Q #0,1[$a0]
1910 
1911    If we don't offset (i.e. offset by zero), we end up with:
1912        7B F4       stc $fb,$a0
1913        D8 0C 00 FF mov.w:Q #0,-256[$a0]
1914 
1915    We have to subtract *something* so that we have a PLUS rtx to mark
1916    that we've done this reload.  The -128 offset will never result in
1917    an 8 bit aN offset, and the payoff for the second case is five
1918    loads *if* those loads are within 256 bytes of the other end of the
1919    frame, so the third case seems best.  Note that we subtract the
1920    zero, but detect that in the addhi3 pattern.  */
1921 
1922 #define BIG_FB_ADJ 0
1923 
1924 /* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
1925    worry about is frame base offsets, as $fb has a limited
1926    displacement range.  We deal with this by attempting to reload $fb
1927    itself into an address register; that seems to result in the best
1928    code.  */
1929 int
m32c_legitimize_address(rtx * x ATTRIBUTE_UNUSED,rtx oldx ATTRIBUTE_UNUSED,enum machine_mode mode ATTRIBUTE_UNUSED)1930 m32c_legitimize_address (rtx * x ATTRIBUTE_UNUSED,
1931 			 rtx oldx ATTRIBUTE_UNUSED,
1932 			 enum machine_mode mode ATTRIBUTE_UNUSED)
1933 {
1934 #if DEBUG0
1935   fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1936   debug_rtx (*x);
1937   fprintf (stderr, "\n");
1938 #endif
1939 
1940   if (GET_CODE (*x) == PLUS
1941       && GET_CODE (XEXP (*x, 0)) == REG
1942       && REGNO (XEXP (*x, 0)) == FB_REGNO
1943       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1944       && (INTVAL (XEXP (*x, 1)) < -128
1945 	  || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1946     {
1947       /* reload FB to A_REGS */
1948       rtx temp = gen_reg_rtx (Pmode);
1949       *x = copy_rtx (*x);
1950       emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (*x, 0)));
1951       XEXP (*x, 0) = temp;
1952       return 1;
1953     }
1954 
1955   return 0;
1956 }
1957 
1958 /* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
1959 int
m32c_legitimize_reload_address(rtx * x,enum machine_mode mode,int opnum,int type,int ind_levels ATTRIBUTE_UNUSED)1960 m32c_legitimize_reload_address (rtx * x,
1961 				enum machine_mode mode,
1962 				int opnum,
1963 				int type, int ind_levels ATTRIBUTE_UNUSED)
1964 {
1965 #if DEBUG0
1966   fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1967 	   mode_name[mode]);
1968   debug_rtx (*x);
1969 #endif
1970 
1971   /* At one point, this function tried to get $fb copied to an address
1972      register, which in theory would maximize sharing, but gcc was
1973      *also* still trying to reload the whole address, and we'd run out
1974      of address registers.  So we let gcc do the naive (but safe)
1975      reload instead, when the above function doesn't handle it for
1976      us.
1977 
1978      The code below is a second attempt at the above.  */
1979 
1980   if (GET_CODE (*x) == PLUS
1981       && GET_CODE (XEXP (*x, 0)) == REG
1982       && REGNO (XEXP (*x, 0)) == FB_REGNO
1983       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1984       && (INTVAL (XEXP (*x, 1)) < -128
1985 	  || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1986     {
1987       rtx sum;
1988       int offset = INTVAL (XEXP (*x, 1));
1989       int adjustment = -BIG_FB_ADJ;
1990 
1991       sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1992 			  GEN_INT (adjustment));
1993       *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1994       if (type == RELOAD_OTHER)
1995 	type = RELOAD_FOR_OTHER_ADDRESS;
1996       push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1997 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1998 		   type);
1999       return 1;
2000     }
2001 
2002   if (GET_CODE (*x) == PLUS
2003       && GET_CODE (XEXP (*x, 0)) == PLUS
2004       && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2005       && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2006       && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2007       && GET_CODE (XEXP (*x, 1)) == CONST_INT
2008       )
2009     {
2010       if (type == RELOAD_OTHER)
2011 	type = RELOAD_FOR_OTHER_ADDRESS;
2012       push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2013 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2014 		   type);
2015       return 1;
2016     }
2017 
2018   return 0;
2019 }
2020 
2021 /* Used in GO_IF_MODE_DEPENDENT_ADDRESS.  */
2022 int
m32c_mode_dependent_address(rtx addr)2023 m32c_mode_dependent_address (rtx addr)
2024 {
2025   if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == PRE_DEC)
2026     return 1;
2027   return 0;
2028 }
2029 
2030 /* Implements LEGITIMATE_CONSTANT_P.  We split large constants anyway,
2031    so we can allow anything.  */
2032 int
m32c_legitimate_constant_p(rtx x ATTRIBUTE_UNUSED)2033 m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2034 {
2035   return 1;
2036 }
2037 
2038 
2039 /* Condition Code Status */
2040 
2041 #undef TARGET_FIXED_CONDITION_CODE_REGS
2042 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2043 static bool
m32c_fixed_condition_code_regs(unsigned int * p1,unsigned int * p2)2044 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2045 {
2046   *p1 = FLG_REGNO;
2047   *p2 = INVALID_REGNUM;
2048   return true;
2049 }
2050 
2051 /* Describing Relative Costs of Operations */
2052 
2053 /* Implements REGISTER_MOVE_COST.  We make impossible moves
2054    prohibitively expensive, like trying to put QIs in r2/r3 (there are
2055    no opcodes to do that).  We also discourage use of mem* registers
2056    since they're really memory.  */
2057 int
m32c_register_move_cost(enum machine_mode mode,int from,int to)2058 m32c_register_move_cost (enum machine_mode mode, int from, int to)
2059 {
2060   int cost = COSTS_N_INSNS (3);
2061   int cc = class_contents[from][0] | class_contents[to][0];
2062   /* FIXME: pick real values, but not 2 for now.  */
2063   if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2064     {
2065       if (!(cc & ~class_contents[R23_REGS][0]))
2066 	cost = COSTS_N_INSNS (1000);
2067       else
2068 	cost = COSTS_N_INSNS (80);
2069     }
2070 
2071   if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2072     cost = COSTS_N_INSNS (1000);
2073 
2074   if (classes_intersect (from, CR_REGS))
2075     cost += COSTS_N_INSNS (5);
2076 
2077   if (classes_intersect (to, CR_REGS))
2078     cost += COSTS_N_INSNS (5);
2079 
2080   if (from == MEM_REGS || to == MEM_REGS)
2081     cost += COSTS_N_INSNS (50);
2082   else if (classes_intersect (from, MEM_REGS)
2083 	   || classes_intersect (to, MEM_REGS))
2084     cost += COSTS_N_INSNS (10);
2085 
2086 #if DEBUG0
2087   fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2088 	   mode_name[mode], class_names[from], class_names[to], cost);
2089 #endif
2090   return cost;
2091 }
2092 
2093 /*  Implements MEMORY_MOVE_COST.  */
2094 int
m32c_memory_move_cost(enum machine_mode mode ATTRIBUTE_UNUSED,int reg_class ATTRIBUTE_UNUSED,int in ATTRIBUTE_UNUSED)2095 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2096 		       int reg_class ATTRIBUTE_UNUSED,
2097 		       int in ATTRIBUTE_UNUSED)
2098 {
2099   /* FIXME: pick real values.  */
2100   return COSTS_N_INSNS (10);
2101 }
2102 
2103 /* Here we try to describe when we use multiple opcodes for one RTX so
2104    that gcc knows when to use them.  */
2105 #undef TARGET_RTX_COSTS
2106 #define TARGET_RTX_COSTS m32c_rtx_costs
2107 static bool
m32c_rtx_costs(rtx x,int code,int outer_code,int * total)2108 m32c_rtx_costs (rtx x, int code, int outer_code, int *total)
2109 {
2110   switch (code)
2111     {
2112     case REG:
2113       if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2114 	*total += COSTS_N_INSNS (500);
2115       else
2116 	*total += COSTS_N_INSNS (1);
2117       return true;
2118 
2119     case ASHIFT:
2120     case LSHIFTRT:
2121     case ASHIFTRT:
2122       if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2123 	{
2124 	  /* mov.b r1l, r1h */
2125 	  *total +=  COSTS_N_INSNS (1);
2126 	  return true;
2127 	}
2128       if (INTVAL (XEXP (x, 1)) > 8
2129 	  || INTVAL (XEXP (x, 1)) < -8)
2130 	{
2131 	  /* mov.b #N, r1l */
2132 	  /* mov.b r1l, r1h */
2133 	  *total +=  COSTS_N_INSNS (2);
2134 	  return true;
2135 	}
2136       return true;
2137 
2138     case LE:
2139     case LEU:
2140     case LT:
2141     case LTU:
2142     case GT:
2143     case GTU:
2144     case GE:
2145     case GEU:
2146     case NE:
2147     case EQ:
2148       if (outer_code == SET)
2149 	{
2150 	  *total += COSTS_N_INSNS (2);
2151 	  return true;
2152 	}
2153       break;
2154 
2155     case ZERO_EXTRACT:
2156       {
2157 	rtx dest = XEXP (x, 0);
2158 	rtx addr = XEXP (dest, 0);
2159 	switch (GET_CODE (addr))
2160 	  {
2161 	  case CONST_INT:
2162 	    *total += COSTS_N_INSNS (1);
2163 	    break;
2164 	  case SYMBOL_REF:
2165 	    *total += COSTS_N_INSNS (3);
2166 	    break;
2167 	  default:
2168 	    *total += COSTS_N_INSNS (2);
2169 	    break;
2170 	  }
2171 	return true;
2172       }
2173       break;
2174 
2175     default:
2176       /* Reasonable default.  */
2177       if (TARGET_A16 && GET_MODE(x) == SImode)
2178 	*total += COSTS_N_INSNS (2);
2179       break;
2180     }
2181   return false;
2182 }
2183 
2184 #undef TARGET_ADDRESS_COST
2185 #define TARGET_ADDRESS_COST m32c_address_cost
2186 static int
m32c_address_cost(rtx addr)2187 m32c_address_cost (rtx addr)
2188 {
2189   /*  fprintf(stderr, "\naddress_cost\n");
2190       debug_rtx(addr);*/
2191   switch (GET_CODE (addr))
2192     {
2193     case CONST_INT:
2194       return COSTS_N_INSNS(1);
2195     case SYMBOL_REF:
2196       return COSTS_N_INSNS(3);
2197     case REG:
2198       return COSTS_N_INSNS(2);
2199     default:
2200       return 0;
2201     }
2202 }
2203 
2204 /* Defining the Output Assembler Language */
2205 
2206 /* The Overall Framework of an Assembler File */
2207 
2208 #undef TARGET_HAVE_NAMED_SECTIONS
2209 #define TARGET_HAVE_NAMED_SECTIONS true
2210 
2211 /* Output of Data */
2212 
2213 /* We may have 24 bit sizes, which is the native address size.
2214    Currently unused, but provided for completeness.  */
2215 #undef TARGET_ASM_INTEGER
2216 #define TARGET_ASM_INTEGER m32c_asm_integer
2217 static bool
m32c_asm_integer(rtx x,unsigned int size,int aligned_p)2218 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2219 {
2220   switch (size)
2221     {
2222     case 3:
2223       fprintf (asm_out_file, "\t.3byte\t");
2224       output_addr_const (asm_out_file, x);
2225       fputc ('\n', asm_out_file);
2226       return true;
2227     case 4:
2228       if (GET_CODE (x) == SYMBOL_REF)
2229 	{
2230 	  fprintf (asm_out_file, "\t.long\t");
2231 	  output_addr_const (asm_out_file, x);
2232 	  fputc ('\n', asm_out_file);
2233 	  return true;
2234 	}
2235       break;
2236     }
2237   return default_assemble_integer (x, size, aligned_p);
2238 }
2239 
2240 /* Output of Assembler Instructions */
2241 
2242 /* We use a lookup table because the addressing modes are non-orthogonal.  */
2243 
2244 static struct
2245 {
2246   char code;
2247   char const *pattern;
2248   char const *format;
2249 }
2250 const conversions[] = {
2251   { 0, "r", "0" },
2252 
2253   { 0, "mr", "z[1]" },
2254   { 0, "m+ri", "3[2]" },
2255   { 0, "m+rs", "3[2]" },
2256   { 0, "m+r+si", "4+5[2]" },
2257   { 0, "ms", "1" },
2258   { 0, "mi", "1" },
2259   { 0, "m+si", "2+3" },
2260 
2261   { 0, "mmr", "[z[2]]" },
2262   { 0, "mm+ri", "[4[3]]" },
2263   { 0, "mm+rs", "[4[3]]" },
2264   { 0, "mm+r+si", "[5+6[3]]" },
2265   { 0, "mms", "[[2]]" },
2266   { 0, "mmi", "[[2]]" },
2267   { 0, "mm+si", "[4[3]]" },
2268 
2269   { 0, "i", "#0" },
2270   { 0, "s", "#0" },
2271   { 0, "+si", "#1+2" },
2272   { 0, "l", "#0" },
2273 
2274   { 'l', "l", "0" },
2275   { 'd', "i", "0" },
2276   { 'd', "s", "0" },
2277   { 'd', "+si", "1+2" },
2278   { 'D', "i", "0" },
2279   { 'D', "s", "0" },
2280   { 'D', "+si", "1+2" },
2281   { 'x', "i", "#0" },
2282   { 'X', "i", "#0" },
2283   { 'm', "i", "#0" },
2284   { 'b', "i", "#0" },
2285   { 'B', "i", "0" },
2286   { 'p', "i", "0" },
2287 
2288   { 0, 0, 0 }
2289 };
2290 
2291 /* This is in order according to the bitfield that pushm/popm use.  */
2292 static char const *pushm_regs[] = {
2293   "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2294 };
2295 
2296 /* Implements PRINT_OPERAND.  */
2297 void
m32c_print_operand(FILE * file,rtx x,int code)2298 m32c_print_operand (FILE * file, rtx x, int code)
2299 {
2300   int i, j, b;
2301   const char *comma;
2302   HOST_WIDE_INT ival;
2303   int unsigned_const = 0;
2304   int force_sign;
2305 
2306   /* Multiplies; constants are converted to sign-extended format but
2307    we need unsigned, so 'u' and 'U' tell us what size unsigned we
2308    need.  */
2309   if (code == 'u')
2310     {
2311       unsigned_const = 2;
2312       code = 0;
2313     }
2314   if (code == 'U')
2315     {
2316       unsigned_const = 1;
2317       code = 0;
2318     }
2319   /* This one is only for debugging; you can put it in a pattern to
2320      force this error.  */
2321   if (code == '!')
2322     {
2323       fprintf (stderr, "dj: unreviewed pattern:");
2324       if (current_output_insn)
2325 	debug_rtx (current_output_insn);
2326       gcc_unreachable ();
2327     }
2328   /* PSImode operations are either .w or .l depending on the target.  */
2329   if (code == '&')
2330     {
2331       if (TARGET_A16)
2332 	fprintf (file, "w");
2333       else
2334 	fprintf (file, "l");
2335       return;
2336     }
2337   /* Inverted conditionals.  */
2338   if (code == 'C')
2339     {
2340       switch (GET_CODE (x))
2341 	{
2342 	case LE:
2343 	  fputs ("gt", file);
2344 	  break;
2345 	case LEU:
2346 	  fputs ("gtu", file);
2347 	  break;
2348 	case LT:
2349 	  fputs ("ge", file);
2350 	  break;
2351 	case LTU:
2352 	  fputs ("geu", file);
2353 	  break;
2354 	case GT:
2355 	  fputs ("le", file);
2356 	  break;
2357 	case GTU:
2358 	  fputs ("leu", file);
2359 	  break;
2360 	case GE:
2361 	  fputs ("lt", file);
2362 	  break;
2363 	case GEU:
2364 	  fputs ("ltu", file);
2365 	  break;
2366 	case NE:
2367 	  fputs ("eq", file);
2368 	  break;
2369 	case EQ:
2370 	  fputs ("ne", file);
2371 	  break;
2372 	default:
2373 	  gcc_unreachable ();
2374 	}
2375       return;
2376     }
2377   /* Regular conditionals.  */
2378   if (code == 'c')
2379     {
2380       switch (GET_CODE (x))
2381 	{
2382 	case LE:
2383 	  fputs ("le", file);
2384 	  break;
2385 	case LEU:
2386 	  fputs ("leu", file);
2387 	  break;
2388 	case LT:
2389 	  fputs ("lt", file);
2390 	  break;
2391 	case LTU:
2392 	  fputs ("ltu", file);
2393 	  break;
2394 	case GT:
2395 	  fputs ("gt", file);
2396 	  break;
2397 	case GTU:
2398 	  fputs ("gtu", file);
2399 	  break;
2400 	case GE:
2401 	  fputs ("ge", file);
2402 	  break;
2403 	case GEU:
2404 	  fputs ("geu", file);
2405 	  break;
2406 	case NE:
2407 	  fputs ("ne", file);
2408 	  break;
2409 	case EQ:
2410 	  fputs ("eq", file);
2411 	  break;
2412 	default:
2413 	  gcc_unreachable ();
2414 	}
2415       return;
2416     }
2417   /* Used in negsi2 to do HImode ops on the two parts of an SImode
2418      operand.  */
2419   if (code == 'h' && GET_MODE (x) == SImode)
2420     {
2421       x = m32c_subreg (HImode, x, SImode, 0);
2422       code = 0;
2423     }
2424   if (code == 'H' && GET_MODE (x) == SImode)
2425     {
2426       x = m32c_subreg (HImode, x, SImode, 2);
2427       code = 0;
2428     }
2429   if (code == 'h' && GET_MODE (x) == HImode)
2430     {
2431       x = m32c_subreg (QImode, x, HImode, 0);
2432       code = 0;
2433     }
2434   if (code == 'H' && GET_MODE (x) == HImode)
2435     {
2436       /* We can't actually represent this as an rtx.  Do it here.  */
2437       if (GET_CODE (x) == REG)
2438 	{
2439 	  switch (REGNO (x))
2440 	    {
2441 	    case R0_REGNO:
2442 	      fputs ("r0h", file);
2443 	      return;
2444 	    case R1_REGNO:
2445 	      fputs ("r1h", file);
2446 	      return;
2447 	    default:
2448 	      gcc_unreachable();
2449 	    }
2450 	}
2451       /* This should be a MEM.  */
2452       x = m32c_subreg (QImode, x, HImode, 1);
2453       code = 0;
2454     }
2455   /* This is for BMcond, which always wants word register names.  */
2456   if (code == 'h' && GET_MODE (x) == QImode)
2457     {
2458       if (GET_CODE (x) == REG)
2459 	x = gen_rtx_REG (HImode, REGNO (x));
2460       code = 0;
2461     }
2462   /* 'x' and 'X' need to be ignored for non-immediates.  */
2463   if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2464     code = 0;
2465 
2466   encode_pattern (x);
2467   force_sign = 0;
2468   for (i = 0; conversions[i].pattern; i++)
2469     if (conversions[i].code == code
2470 	&& streq (conversions[i].pattern, pattern))
2471       {
2472 	for (j = 0; conversions[i].format[j]; j++)
2473 	  /* backslash quotes the next character in the output pattern.  */
2474 	  if (conversions[i].format[j] == '\\')
2475 	    {
2476 	      fputc (conversions[i].format[j + 1], file);
2477 	      j++;
2478 	    }
2479 	  /* Digits in the output pattern indicate that the
2480 	     corresponding RTX is to be output at that point.  */
2481 	  else if (ISDIGIT (conversions[i].format[j]))
2482 	    {
2483 	      rtx r = patternr[conversions[i].format[j] - '0'];
2484 	      switch (GET_CODE (r))
2485 		{
2486 		case REG:
2487 		  fprintf (file, "%s",
2488 			   reg_name_with_mode (REGNO (r), GET_MODE (r)));
2489 		  break;
2490 		case CONST_INT:
2491 		  switch (code)
2492 		    {
2493 		    case 'b':
2494 		    case 'B':
2495 		      {
2496 			int v = INTVAL (r);
2497 			int i = (int) exact_log2 (v);
2498 			if (i == -1)
2499 			  i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2500 			if (i == -1)
2501 			  i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2502 			/* Bit position.  */
2503 			fprintf (file, "%d", i);
2504 		      }
2505 		      break;
2506 		    case 'x':
2507 		      /* Unsigned byte.  */
2508 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2509 			       INTVAL (r) & 0xff);
2510 		      break;
2511 		    case 'X':
2512 		      /* Unsigned word.  */
2513 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2514 			       INTVAL (r) & 0xffff);
2515 		      break;
2516 		    case 'p':
2517 		      /* pushm and popm encode a register set into a single byte.  */
2518 		      comma = "";
2519 		      for (b = 7; b >= 0; b--)
2520 			if (INTVAL (r) & (1 << b))
2521 			  {
2522 			    fprintf (file, "%s%s", comma, pushm_regs[b]);
2523 			    comma = ",";
2524 			  }
2525 		      break;
2526 		    case 'm':
2527 		      /* "Minus".  Output -X  */
2528 		      ival = (-INTVAL (r) & 0xffff);
2529 		      if (ival & 0x8000)
2530 			ival = ival - 0x10000;
2531 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2532 		      break;
2533 		    default:
2534 		      ival = INTVAL (r);
2535 		      if (conversions[i].format[j + 1] == '[' && ival < 0)
2536 			{
2537 			  /* We can simulate negative displacements by
2538 			     taking advantage of address space
2539 			     wrapping when the offset can span the
2540 			     entire address range.  */
2541 			  rtx base =
2542 			    patternr[conversions[i].format[j + 2] - '0'];
2543 			  if (GET_CODE (base) == REG)
2544 			    switch (REGNO (base))
2545 			      {
2546 			      case A0_REGNO:
2547 			      case A1_REGNO:
2548 				if (TARGET_A24)
2549 				  ival = 0x1000000 + ival;
2550 				else
2551 				  ival = 0x10000 + ival;
2552 				break;
2553 			      case SB_REGNO:
2554 				if (TARGET_A16)
2555 				  ival = 0x10000 + ival;
2556 				break;
2557 			      }
2558 			}
2559 		      else if (code == 'd' && ival < 0 && j == 0)
2560 			/* The "mova" opcode is used to do addition by
2561 			   computing displacements, but again, we need
2562 			   displacements to be unsigned *if* they're
2563 			   the only component of the displacement
2564 			   (i.e. no "symbol-4" type displacement).  */
2565 			ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2566 
2567 		      if (conversions[i].format[j] == '0')
2568 			{
2569 			  /* More conversions to unsigned.  */
2570 			  if (unsigned_const == 2)
2571 			    ival &= 0xffff;
2572 			  if (unsigned_const == 1)
2573 			    ival &= 0xff;
2574 			}
2575 		      if (streq (conversions[i].pattern, "mi")
2576 			  || streq (conversions[i].pattern, "mmi"))
2577 			{
2578 			  /* Integers used as addresses are unsigned.  */
2579 			  ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2580 			}
2581 		      if (force_sign && ival >= 0)
2582 			fputc ('+', file);
2583 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2584 		      break;
2585 		    }
2586 		  break;
2587 		case CONST_DOUBLE:
2588 		  /* We don't have const_double constants.  If it
2589 		     happens, make it obvious.  */
2590 		  fprintf (file, "[const_double 0x%lx]",
2591 			   (unsigned long) CONST_DOUBLE_HIGH (r));
2592 		  break;
2593 		case SYMBOL_REF:
2594 		  assemble_name (file, XSTR (r, 0));
2595 		  break;
2596 		case LABEL_REF:
2597 		  output_asm_label (r);
2598 		  break;
2599 		default:
2600 		  fprintf (stderr, "don't know how to print this operand:");
2601 		  debug_rtx (r);
2602 		  gcc_unreachable ();
2603 		}
2604 	    }
2605 	  else
2606 	    {
2607 	      if (conversions[i].format[j] == 'z')
2608 		{
2609 		  /* Some addressing modes *must* have a displacement,
2610 		     so insert a zero here if needed.  */
2611 		  int k;
2612 		  for (k = j + 1; conversions[i].format[k]; k++)
2613 		    if (ISDIGIT (conversions[i].format[k]))
2614 		      {
2615 			rtx reg = patternr[conversions[i].format[k] - '0'];
2616 			if (GET_CODE (reg) == REG
2617 			    && (REGNO (reg) == SB_REGNO
2618 				|| REGNO (reg) == FB_REGNO
2619 				|| REGNO (reg) == SP_REGNO))
2620 			  fputc ('0', file);
2621 		      }
2622 		  continue;
2623 		}
2624 	      /* Signed displacements off symbols need to have signs
2625 		 blended cleanly.  */
2626 	      if (conversions[i].format[j] == '+'
2627 		  && (!code || code == 'D' || code == 'd')
2628 		  && ISDIGIT (conversions[i].format[j + 1])
2629 		  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2630 		      == CONST_INT))
2631 		{
2632 		  force_sign = 1;
2633 		  continue;
2634 		}
2635 	      fputc (conversions[i].format[j], file);
2636 	    }
2637 	break;
2638       }
2639   if (!conversions[i].pattern)
2640     {
2641       fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2642 	       pattern);
2643       debug_rtx (x);
2644       fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2645     }
2646 
2647   return;
2648 }
2649 
2650 /* Implements PRINT_OPERAND_PUNCT_VALID_P.  See m32c_print_operand
2651    above for descriptions of what these do.  */
2652 int
m32c_print_operand_punct_valid_p(int c)2653 m32c_print_operand_punct_valid_p (int c)
2654 {
2655   if (c == '&' || c == '!')
2656     return 1;
2657   return 0;
2658 }
2659 
2660 /* Implements PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
2661 void
m32c_print_operand_address(FILE * stream,rtx address)2662 m32c_print_operand_address (FILE * stream, rtx address)
2663 {
2664   gcc_assert (GET_CODE (address) == MEM);
2665   m32c_print_operand (stream, XEXP (address, 0), 0);
2666 }
2667 
2668 /* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
2669    differently than general registers.  */
2670 void
m32c_output_reg_push(FILE * s,int regno)2671 m32c_output_reg_push (FILE * s, int regno)
2672 {
2673   if (regno == FLG_REGNO)
2674     fprintf (s, "\tpushc\tflg\n");
2675   else
2676     fprintf (s, "\tpush.%c\t%s\n",
2677 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2678 }
2679 
2680 /* Likewise for ASM_OUTPUT_REG_POP.  */
2681 void
m32c_output_reg_pop(FILE * s,int regno)2682 m32c_output_reg_pop (FILE * s, int regno)
2683 {
2684   if (regno == FLG_REGNO)
2685     fprintf (s, "\tpopc\tflg\n");
2686   else
2687     fprintf (s, "\tpop.%c\t%s\n",
2688 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2689 }
2690 
2691 /* Defining target-specific uses of `__attribute__' */
2692 
2693 /* Used to simplify the logic below.  Find the attributes wherever
2694    they may be.  */
2695 #define M32C_ATTRIBUTES(decl) \
2696   (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2697                 : DECL_ATTRIBUTES (decl) \
2698                   ? (DECL_ATTRIBUTES (decl)) \
2699 		  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2700 
2701 /* Returns TRUE if the given tree has the "interrupt" attribute.  */
2702 static int
interrupt_p(tree node ATTRIBUTE_UNUSED)2703 interrupt_p (tree node ATTRIBUTE_UNUSED)
2704 {
2705   tree list = M32C_ATTRIBUTES (node);
2706   while (list)
2707     {
2708       if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2709 	return 1;
2710       list = TREE_CHAIN (list);
2711     }
2712   return 0;
2713 }
2714 
2715 static tree
interrupt_handler(tree * node ATTRIBUTE_UNUSED,tree name ATTRIBUTE_UNUSED,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs ATTRIBUTE_UNUSED)2716 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2717 		   tree name ATTRIBUTE_UNUSED,
2718 		   tree args ATTRIBUTE_UNUSED,
2719 		   int flags ATTRIBUTE_UNUSED,
2720 		   bool * no_add_attrs ATTRIBUTE_UNUSED)
2721 {
2722   return NULL_TREE;
2723 }
2724 
2725 #undef TARGET_ATTRIBUTE_TABLE
2726 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2727 static const struct attribute_spec m32c_attribute_table[] = {
2728   {"interrupt", 0, 0, false, false, false, interrupt_handler},
2729   {0, 0, 0, 0, 0, 0, 0}
2730 };
2731 
2732 #undef TARGET_COMP_TYPE_ATTRIBUTES
2733 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2734 static int
m32c_comp_type_attributes(tree type1 ATTRIBUTE_UNUSED,tree type2 ATTRIBUTE_UNUSED)2735 m32c_comp_type_attributes (tree type1 ATTRIBUTE_UNUSED,
2736 			   tree type2 ATTRIBUTE_UNUSED)
2737 {
2738   /* 0=incompatible 1=compatible 2=warning */
2739   return 1;
2740 }
2741 
2742 #undef TARGET_INSERT_ATTRIBUTES
2743 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2744 static void
m32c_insert_attributes(tree node ATTRIBUTE_UNUSED,tree * attr_ptr ATTRIBUTE_UNUSED)2745 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2746 			tree * attr_ptr ATTRIBUTE_UNUSED)
2747 {
2748   /* Nothing to do here.  */
2749 }
2750 
2751 /* Predicates */
2752 
2753 /* Returns TRUE if we support a move between the first two operands.
2754    At the moment, we just want to discourage mem to mem moves until
2755    after reload, because reload has a hard time with our limited
2756    number of address registers, and we can get into a situation where
2757    we need three of them when we only have two.  */
2758 bool
m32c_mov_ok(rtx * operands,enum machine_mode mode ATTRIBUTE_UNUSED)2759 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2760 {
2761   rtx op0 = operands[0];
2762   rtx op1 = operands[1];
2763 
2764   if (TARGET_A24)
2765     return true;
2766 
2767 #define DEBUG_MOV_OK 0
2768 #if DEBUG_MOV_OK
2769   fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2770   debug_rtx (op0);
2771   debug_rtx (op1);
2772 #endif
2773 
2774   if (GET_CODE (op0) == SUBREG)
2775     op0 = XEXP (op0, 0);
2776   if (GET_CODE (op1) == SUBREG)
2777     op1 = XEXP (op1, 0);
2778 
2779   if (GET_CODE (op0) == MEM
2780       && GET_CODE (op1) == MEM
2781       && ! reload_completed)
2782     {
2783 #if DEBUG_MOV_OK
2784       fprintf (stderr, " - no, mem to mem\n");
2785 #endif
2786       return false;
2787     }
2788 
2789 #if DEBUG_MOV_OK
2790   fprintf (stderr, " - ok\n");
2791 #endif
2792   return true;
2793 }
2794 
2795 /* Returns TRUE if two consecutive HImode mov instructions, generated
2796    for moving an immediate double data to a double data type variable
2797    location, can be combined into single SImode mov instruction.  */
2798 bool
m32c_immd_dbl_mov(rtx * operands,enum machine_mode mode ATTRIBUTE_UNUSED)2799 m32c_immd_dbl_mov (rtx * operands,
2800 		   enum machine_mode mode ATTRIBUTE_UNUSED)
2801 {
2802   int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
2803   const char *str1;
2804   const char *str2;
2805 
2806   if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
2807       && MEM_SCALAR_P (operands[0])
2808       && !MEM_IN_STRUCT_P (operands[0])
2809       && GET_CODE (XEXP (operands[2], 0)) == CONST
2810       && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2811       && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2812       && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
2813       && MEM_SCALAR_P (operands[2])
2814       && !MEM_IN_STRUCT_P (operands[2]))
2815     flag = 1;
2816 
2817   else if (GET_CODE (XEXP (operands[0], 0)) == CONST
2818            && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
2819            && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
2820            && MEM_SCALAR_P (operands[0])
2821            && !MEM_IN_STRUCT_P (operands[0])
2822            && !(XINT (XEXP (XEXP (XEXP (operands[0], 0), 0), 1), 0) %4)
2823            && GET_CODE (XEXP (operands[2], 0)) == CONST
2824            && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2825            && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2826            && MEM_SCALAR_P (operands[2])
2827            && !MEM_IN_STRUCT_P (operands[2]))
2828     flag = 2;
2829 
2830   else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
2831            &&  GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
2832            &&  REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
2833            &&  GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
2834            &&  MEM_SCALAR_P (operands[0])
2835            &&  !MEM_IN_STRUCT_P (operands[0])
2836            &&  !(XINT (XEXP (XEXP (operands[0], 0), 1), 0) %4)
2837            &&  REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
2838            &&  GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
2839            &&  MEM_SCALAR_P (operands[2])
2840            &&  !MEM_IN_STRUCT_P (operands[2]))
2841     flag = 3;
2842 
2843   else
2844     return false;
2845 
2846   switch (flag)
2847     {
2848     case 1:
2849       str1 = XSTR (XEXP (operands[0], 0), 0);
2850       str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
2851       if (strcmp (str1, str2) == 0)
2852 	okflag = 1;
2853       else
2854 	okflag = 0;
2855       break;
2856     case 2:
2857       str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
2858       str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
2859       if (strcmp(str1,str2) == 0)
2860 	okflag = 1;
2861       else
2862 	okflag = 0;
2863       break;
2864     case 3:
2865       offset1 = XINT (XEXP (XEXP (operands[0], 0), 1), 0);
2866       offset2 = XINT (XEXP (XEXP (operands[2], 0), 1), 0);
2867       offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
2868       if (((offset2-offset1) == 2) && offsetsign != 0)
2869 	okflag = 1;
2870       else
2871 	okflag = 0;
2872       break;
2873     default:
2874       okflag = 0;
2875     }
2876 
2877   if (okflag == 1)
2878     {
2879       HOST_WIDE_INT val;
2880       operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
2881 
2882       val = (XINT (operands[3], 0) << 16) + (XINT (operands[1], 0) & 0xFFFF);
2883       operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
2884 
2885       return true;
2886     }
2887 
2888   return false;
2889 }
2890 
2891 /* Expanders */
2892 
2893 /* Subregs are non-orthogonal for us, because our registers are all
2894    different sizes.  */
2895 static rtx
m32c_subreg(enum machine_mode outer,rtx x,enum machine_mode inner,int byte)2896 m32c_subreg (enum machine_mode outer,
2897 	     rtx x, enum machine_mode inner, int byte)
2898 {
2899   int r, nr = -1;
2900 
2901   /* Converting MEMs to different types that are the same size, we
2902      just rewrite them.  */
2903   if (GET_CODE (x) == SUBREG
2904       && SUBREG_BYTE (x) == 0
2905       && GET_CODE (SUBREG_REG (x)) == MEM
2906       && (GET_MODE_SIZE (GET_MODE (x))
2907 	  == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
2908     {
2909       rtx oldx = x;
2910       x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
2911       MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
2912     }
2913 
2914   /* Push/pop get done as smaller push/pops.  */
2915   if (GET_CODE (x) == MEM
2916       && (GET_CODE (XEXP (x, 0)) == PRE_DEC
2917 	  || GET_CODE (XEXP (x, 0)) == POST_INC))
2918     return gen_rtx_MEM (outer, XEXP (x, 0));
2919   if (GET_CODE (x) == SUBREG
2920       && GET_CODE (XEXP (x, 0)) == MEM
2921       && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
2922 	  || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
2923     return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
2924 
2925   if (GET_CODE (x) != REG)
2926     return simplify_gen_subreg (outer, x, inner, byte);
2927 
2928   r = REGNO (x);
2929   if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
2930     return simplify_gen_subreg (outer, x, inner, byte);
2931 
2932   if (IS_MEM_REGNO (r))
2933     return simplify_gen_subreg (outer, x, inner, byte);
2934 
2935   /* This is where the complexities of our register layout are
2936      described.  */
2937   if (byte == 0)
2938     nr = r;
2939   else if (outer == HImode)
2940     {
2941       if (r == R0_REGNO && byte == 2)
2942 	nr = R2_REGNO;
2943       else if (r == R0_REGNO && byte == 4)
2944 	nr = R1_REGNO;
2945       else if (r == R0_REGNO && byte == 6)
2946 	nr = R3_REGNO;
2947       else if (r == R1_REGNO && byte == 2)
2948 	nr = R3_REGNO;
2949       else if (r == A0_REGNO && byte == 2)
2950 	nr = A1_REGNO;
2951     }
2952   else if (outer == SImode)
2953     {
2954       if (r == R0_REGNO && byte == 0)
2955 	nr = R0_REGNO;
2956       else if (r == R0_REGNO && byte == 4)
2957 	nr = R1_REGNO;
2958     }
2959   if (nr == -1)
2960     {
2961       fprintf (stderr, "m32c_subreg %s %s %d\n",
2962 	       mode_name[outer], mode_name[inner], byte);
2963       debug_rtx (x);
2964       gcc_unreachable ();
2965     }
2966   return gen_rtx_REG (outer, nr);
2967 }
2968 
2969 /* Used to emit move instructions.  We split some moves,
2970    and avoid mem-mem moves.  */
2971 int
m32c_prepare_move(rtx * operands,enum machine_mode mode)2972 m32c_prepare_move (rtx * operands, enum machine_mode mode)
2973 {
2974   if (TARGET_A16 && mode == PSImode)
2975     return m32c_split_move (operands, mode, 1);
2976   if ((GET_CODE (operands[0]) == MEM)
2977       && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
2978     {
2979       rtx pmv = XEXP (operands[0], 0);
2980       rtx dest_reg = XEXP (pmv, 0);
2981       rtx dest_mod = XEXP (pmv, 1);
2982 
2983       emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
2984       operands[0] = gen_rtx_MEM (mode, dest_reg);
2985     }
2986   if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
2987     operands[1] = copy_to_mode_reg (mode, operands[1]);
2988   return 0;
2989 }
2990 
2991 #define DEBUG_SPLIT 0
2992 
2993 /* Returns TRUE if the given PSImode move should be split.  We split
2994    for all r8c/m16c moves, since it doesn't support them, and for
2995    POP.L as we can only *push* SImode.  */
2996 int
m32c_split_psi_p(rtx * operands)2997 m32c_split_psi_p (rtx * operands)
2998 {
2999 #if DEBUG_SPLIT
3000   fprintf (stderr, "\nm32c_split_psi_p\n");
3001   debug_rtx (operands[0]);
3002   debug_rtx (operands[1]);
3003 #endif
3004   if (TARGET_A16)
3005     {
3006 #if DEBUG_SPLIT
3007       fprintf (stderr, "yes, A16\n");
3008 #endif
3009       return 1;
3010     }
3011   if (GET_CODE (operands[1]) == MEM
3012       && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3013     {
3014 #if DEBUG_SPLIT
3015       fprintf (stderr, "yes, pop.l\n");
3016 #endif
3017       return 1;
3018     }
3019 #if DEBUG_SPLIT
3020   fprintf (stderr, "no, default\n");
3021 #endif
3022   return 0;
3023 }
3024 
3025 /* Split the given move.  SPLIT_ALL is 0 if splitting is optional
3026    (define_expand), 1 if it is not optional (define_insn_and_split),
3027    and 3 for define_split (alternate api). */
3028 int
m32c_split_move(rtx * operands,enum machine_mode mode,int split_all)3029 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3030 {
3031   rtx s[4], d[4];
3032   int parts, si, di, rev = 0;
3033   int rv = 0, opi = 2;
3034   enum machine_mode submode = HImode;
3035   rtx *ops, local_ops[10];
3036 
3037   /* define_split modifies the existing operands, but the other two
3038      emit new insns.  OPS is where we store the operand pairs, which
3039      we emit later.  */
3040   if (split_all == 3)
3041     ops = operands;
3042   else
3043     ops = local_ops;
3044 
3045   /* Else HImode.  */
3046   if (mode == DImode)
3047     submode = SImode;
3048 
3049   /* Before splitting mem-mem moves, force one operand into a
3050      register.  */
3051   if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
3052     {
3053 #if DEBUG0
3054       fprintf (stderr, "force_reg...\n");
3055       debug_rtx (operands[1]);
3056 #endif
3057       operands[1] = force_reg (mode, operands[1]);
3058 #if DEBUG0
3059       debug_rtx (operands[1]);
3060 #endif
3061     }
3062 
3063   parts = 2;
3064 
3065 #if DEBUG_SPLIT
3066   fprintf (stderr, "\nsplit_move %d all=%d\n", no_new_pseudos, split_all);
3067   debug_rtx (operands[0]);
3068   debug_rtx (operands[1]);
3069 #endif
3070 
3071   /* Note that split_all is not used to select the api after this
3072      point, so it's safe to set it to 3 even with define_insn.  */
3073   /* None of the chips can move SI operands to sp-relative addresses,
3074      so we always split those.  */
3075   if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3076     split_all = 3;
3077 
3078   /* We don't need to split these.  */
3079   if (TARGET_A24
3080       && split_all != 3
3081       && (mode == SImode || mode == PSImode)
3082       && !(GET_CODE (operands[1]) == MEM
3083 	   && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3084     return 0;
3085 
3086   /* First, enumerate the subregs we'll be dealing with.  */
3087   for (si = 0; si < parts; si++)
3088     {
3089       d[si] =
3090 	m32c_subreg (submode, operands[0], mode,
3091 		     si * GET_MODE_SIZE (submode));
3092       s[si] =
3093 	m32c_subreg (submode, operands[1], mode,
3094 		     si * GET_MODE_SIZE (submode));
3095     }
3096 
3097   /* Split pushes by emitting a sequence of smaller pushes.  */
3098   if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3099     {
3100       for (si = parts - 1; si >= 0; si--)
3101 	{
3102 	  ops[opi++] = gen_rtx_MEM (submode,
3103 				    gen_rtx_PRE_DEC (Pmode,
3104 						     gen_rtx_REG (Pmode,
3105 								  SP_REGNO)));
3106 	  ops[opi++] = s[si];
3107 	}
3108 
3109       rv = 1;
3110     }
3111   /* Likewise for pops.  */
3112   else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3113     {
3114       for (di = 0; di < parts; di++)
3115 	{
3116 	  ops[opi++] = d[di];
3117 	  ops[opi++] = gen_rtx_MEM (submode,
3118 				    gen_rtx_POST_INC (Pmode,
3119 						      gen_rtx_REG (Pmode,
3120 								   SP_REGNO)));
3121 	}
3122       rv = 1;
3123     }
3124   else if (split_all)
3125     {
3126       /* if d[di] == s[si] for any di < si, we'll early clobber. */
3127       for (di = 0; di < parts - 1; di++)
3128 	for (si = di + 1; si < parts; si++)
3129 	  if (reg_mentioned_p (d[di], s[si]))
3130 	    rev = 1;
3131 
3132       if (rev)
3133 	for (si = 0; si < parts; si++)
3134 	  {
3135 	    ops[opi++] = d[si];
3136 	    ops[opi++] = s[si];
3137 	  }
3138       else
3139 	for (si = parts - 1; si >= 0; si--)
3140 	  {
3141 	    ops[opi++] = d[si];
3142 	    ops[opi++] = s[si];
3143 	  }
3144       rv = 1;
3145     }
3146   /* Now emit any moves we may have accumulated.  */
3147   if (rv && split_all != 3)
3148     {
3149       int i;
3150       for (i = 2; i < opi; i += 2)
3151 	emit_move_insn (ops[i], ops[i + 1]);
3152     }
3153   return rv;
3154 }
3155 
3156 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3157    the like.  For the R8C they expect one of the addresses to be in
3158    R1L:An so we need to arrange for that.  Otherwise, it's just a
3159    matter of picking out the operands we want and emitting the right
3160    pattern for them.  All these expanders, which correspond to
3161    patterns in blkmov.md, must return nonzero if they expand the insn,
3162    or zero if they should FAIL.  */
3163 
3164 /* This is a memset() opcode.  All operands are implied, so we need to
3165    arrange for them to be in the right registers.  The opcode wants
3166    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3167    the count (HI), and $2 the value (QI).  */
3168 int
m32c_expand_setmemhi(rtx * operands)3169 m32c_expand_setmemhi(rtx *operands)
3170 {
3171   rtx desta, count, val;
3172   rtx desto, counto;
3173 
3174   desta = XEXP (operands[0], 0);
3175   count = operands[1];
3176   val = operands[2];
3177 
3178   desto = gen_reg_rtx (Pmode);
3179   counto = gen_reg_rtx (HImode);
3180 
3181   if (GET_CODE (desta) != REG
3182       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3183     desta = copy_to_mode_reg (Pmode, desta);
3184 
3185   /* This looks like an arbitrary restriction, but this is by far the
3186      most common case.  For counts 8..14 this actually results in
3187      smaller code with no speed penalty because the half-sized
3188      constant can be loaded with a shorter opcode.  */
3189   if (GET_CODE (count) == CONST_INT
3190       && GET_CODE (val) == CONST_INT
3191       && ! (INTVAL (count) & 1)
3192       && (INTVAL (count) > 1)
3193       && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3194     {
3195       unsigned v = INTVAL (val) & 0xff;
3196       v = v | (v << 8);
3197       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3198       val = copy_to_mode_reg (HImode, GEN_INT (v));
3199       if (TARGET_A16)
3200 	emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3201       else
3202 	emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3203       return 1;
3204     }
3205 
3206   /* This is the generalized memset() case.  */
3207   if (GET_CODE (val) != REG
3208       || REGNO (val) < FIRST_PSEUDO_REGISTER)
3209     val = copy_to_mode_reg (QImode, val);
3210 
3211   if (GET_CODE (count) != REG
3212       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3213     count = copy_to_mode_reg (HImode, count);
3214 
3215   if (TARGET_A16)
3216     emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3217   else
3218     emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3219 
3220   return 1;
3221 }
3222 
3223 /* This is a memcpy() opcode.  All operands are implied, so we need to
3224    arrange for them to be in the right registers.  The opcode wants
3225    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3226    is the source (MEM:BLK), and $2 the count (HI).  */
3227 int
m32c_expand_movmemhi(rtx * operands)3228 m32c_expand_movmemhi(rtx *operands)
3229 {
3230   rtx desta, srca, count;
3231   rtx desto, srco, counto;
3232 
3233   desta = XEXP (operands[0], 0);
3234   srca = XEXP (operands[1], 0);
3235   count = operands[2];
3236 
3237   desto = gen_reg_rtx (Pmode);
3238   srco = gen_reg_rtx (Pmode);
3239   counto = gen_reg_rtx (HImode);
3240 
3241   if (GET_CODE (desta) != REG
3242       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3243     desta = copy_to_mode_reg (Pmode, desta);
3244 
3245   if (GET_CODE (srca) != REG
3246       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3247     srca = copy_to_mode_reg (Pmode, srca);
3248 
3249   /* Similar to setmem, but we don't need to check the value.  */
3250   if (GET_CODE (count) == CONST_INT
3251       && ! (INTVAL (count) & 1)
3252       && (INTVAL (count) > 1))
3253     {
3254       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3255       if (TARGET_A16)
3256 	emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3257       else
3258 	emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3259       return 1;
3260     }
3261 
3262   /* This is the generalized memset() case.  */
3263   if (GET_CODE (count) != REG
3264       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3265     count = copy_to_mode_reg (HImode, count);
3266 
3267   if (TARGET_A16)
3268     emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3269   else
3270     emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3271 
3272   return 1;
3273 }
3274 
3275 /* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
3276    the copy, which should point to the NUL at the end of the string,
3277    $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3278    Since our opcode leaves the destination pointing *after* the NUL,
3279    we must emit an adjustment.  */
3280 int
m32c_expand_movstr(rtx * operands)3281 m32c_expand_movstr(rtx *operands)
3282 {
3283   rtx desta, srca;
3284   rtx desto, srco;
3285 
3286   desta = XEXP (operands[1], 0);
3287   srca = XEXP (operands[2], 0);
3288 
3289   desto = gen_reg_rtx (Pmode);
3290   srco = gen_reg_rtx (Pmode);
3291 
3292   if (GET_CODE (desta) != REG
3293       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3294     desta = copy_to_mode_reg (Pmode, desta);
3295 
3296   if (GET_CODE (srca) != REG
3297       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3298     srca = copy_to_mode_reg (Pmode, srca);
3299 
3300   emit_insn (gen_movstr_op (desto, srco, desta, srca));
3301   /* desto ends up being a1, which allows this type of add through MOVA.  */
3302   emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3303 
3304   return 1;
3305 }
3306 
3307 /* This is a strcmp() opcode.  $0 is the destination (HI) which holds
3308    <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3309    $2 is the other (MEM:BLK).  We must do the comparison, and then
3310    convert the flags to a signed integer result.  */
3311 int
m32c_expand_cmpstr(rtx * operands)3312 m32c_expand_cmpstr(rtx *operands)
3313 {
3314   rtx src1a, src2a;
3315 
3316   src1a = XEXP (operands[1], 0);
3317   src2a = XEXP (operands[2], 0);
3318 
3319   if (GET_CODE (src1a) != REG
3320       || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3321     src1a = copy_to_mode_reg (Pmode, src1a);
3322 
3323   if (GET_CODE (src2a) != REG
3324       || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3325     src2a = copy_to_mode_reg (Pmode, src2a);
3326 
3327   emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3328   emit_insn (gen_cond_to_int (operands[0]));
3329 
3330   return 1;
3331 }
3332 
3333 
3334 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3335 
3336 static shift_gen_func
shift_gen_func_for(int mode,int code)3337 shift_gen_func_for (int mode, int code)
3338 {
3339 #define GFF(m,c,f) if (mode == m && code == c) return f
3340   GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
3341   GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
3342   GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
3343   GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
3344   GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
3345   GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
3346   GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
3347   GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3348   GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3349   GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3350   GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3351   GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3352 #undef GFF
3353   gcc_unreachable ();
3354 }
3355 
3356 /* The m32c only has one shift, but it takes a signed count.  GCC
3357    doesn't want this, so we fake it by negating any shift count when
3358    we're pretending to shift the other way.  Also, the shift count is
3359    limited to -8..8.  It's slightly better to use two shifts for 9..15
3360    than to load the count into r1h, so we do that too.  */
3361 int
m32c_prepare_shift(rtx * operands,int scale,int shift_code)3362 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3363 {
3364   enum machine_mode mode = GET_MODE (operands[0]);
3365   shift_gen_func func = shift_gen_func_for (mode, shift_code);
3366   rtx temp;
3367 
3368   if (GET_CODE (operands[2]) == CONST_INT)
3369     {
3370       int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3371       int count = INTVAL (operands[2]) * scale;
3372 
3373       while (count > maxc)
3374 	{
3375 	  temp = gen_reg_rtx (mode);
3376 	  emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3377 	  operands[1] = temp;
3378 	  count -= maxc;
3379 	}
3380       while (count < -maxc)
3381 	{
3382 	  temp = gen_reg_rtx (mode);
3383 	  emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3384 	  operands[1] = temp;
3385 	  count += maxc;
3386 	}
3387       emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3388       return 1;
3389     }
3390 
3391   temp = gen_reg_rtx (QImode);
3392   if (scale < 0)
3393     /* The pattern has a NEG that corresponds to this. */
3394     emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3395   else if (TARGET_A16 && mode == SImode)
3396     /* We do this because the code below may modify this, we don't
3397        want to modify the origin of this value.  */
3398     emit_move_insn (temp, operands[2]);
3399   else
3400     /* We'll only use it for the shift, no point emitting a move.  */
3401     temp = operands[2];
3402 
3403   if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3404     {
3405       /* The m16c has a limit of -16..16 for SI shifts, even when the
3406 	 shift count is in a register.  Since there are so many targets
3407 	 of these shifts, it's better to expand the RTL here than to
3408 	 call a helper function.
3409 
3410 	 The resulting code looks something like this:
3411 
3412 		cmp.b	r1h,-16
3413 		jge.b	1f
3414 		shl.l	-16,dest
3415 		add.b	r1h,16
3416 	1f:	cmp.b	r1h,16
3417 		jle.b	1f
3418 		shl.l	16,dest
3419 		sub.b	r1h,16
3420 	1f:	shl.l	r1h,dest
3421 
3422 	 We take advantage of the fact that "negative" shifts are
3423 	 undefined to skip one of the comparisons.  */
3424 
3425       rtx count;
3426       rtx label, lref, insn, tempvar;
3427 
3428       emit_move_insn (operands[0], operands[1]);
3429 
3430       count = temp;
3431       label = gen_label_rtx ();
3432       lref = gen_rtx_LABEL_REF (VOIDmode, label);
3433       LABEL_NUSES (label) ++;
3434 
3435       tempvar = gen_reg_rtx (mode);
3436 
3437       if (shift_code == ASHIFT)
3438 	{
3439 	  /* This is a left shift.  We only need check positive counts.  */
3440 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3441 					  count, GEN_INT (16), label));
3442 	  emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3443 	  emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3444 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3445 	  emit_label_after (label, insn);
3446 	}
3447       else
3448 	{
3449 	  /* This is a right shift.  We only need check negative counts.  */
3450 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3451 					  count, GEN_INT (-16), label));
3452 	  emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3453 	  emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3454 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3455 	  emit_label_after (label, insn);
3456 	}
3457       operands[1] = operands[0];
3458       emit_insn (func (operands[0], operands[0], count));
3459       return 1;
3460     }
3461 
3462   operands[2] = temp;
3463   return 0;
3464 }
3465 
3466 /* The m32c has a limited range of operations that work on PSImode
3467    values; we have to expand to SI, do the math, and truncate back to
3468    PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
3469    those cases.  */
3470 void
m32c_expand_neg_mulpsi3(rtx * operands)3471 m32c_expand_neg_mulpsi3 (rtx * operands)
3472 {
3473   /* operands: a = b * i */
3474   rtx temp1; /* b as SI */
3475   rtx scale /* i as SI */;
3476   rtx temp2; /* a*b as SI */
3477 
3478   temp1 = gen_reg_rtx (SImode);
3479   temp2 = gen_reg_rtx (SImode);
3480   if (GET_CODE (operands[2]) != CONST_INT)
3481     {
3482       scale = gen_reg_rtx (SImode);
3483       emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3484     }
3485   else
3486     scale = copy_to_mode_reg (SImode, operands[2]);
3487 
3488   emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3489   temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3490   emit_insn (gen_truncsipsi2 (operands[0], temp2));
3491 }
3492 
3493 static rtx compare_op0, compare_op1;
3494 
3495 void
m32c_pend_compare(rtx * operands)3496 m32c_pend_compare (rtx *operands)
3497 {
3498   compare_op0 = operands[0];
3499   compare_op1 = operands[1];
3500 }
3501 
3502 void
m32c_unpend_compare(void)3503 m32c_unpend_compare (void)
3504 {
3505   switch (GET_MODE (compare_op0))
3506     {
3507     case QImode:
3508       emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
3509     case HImode:
3510       emit_insn (gen_cmphi_op (compare_op0, compare_op1));
3511     case PSImode:
3512       emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
3513     }
3514 }
3515 
3516 void
m32c_expand_scc(int code,rtx * operands)3517 m32c_expand_scc (int code, rtx *operands)
3518 {
3519   enum machine_mode mode = TARGET_A16 ? QImode : HImode;
3520 
3521   emit_insn (gen_rtx_SET (mode,
3522 			  operands[0],
3523 			  gen_rtx_fmt_ee (code,
3524 					  mode,
3525 					  compare_op0,
3526 					  compare_op1)));
3527 }
3528 
3529 /* Pattern Output Functions */
3530 
3531 /* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3532    match_operand rtx's OP.  */
3533 rtx
m32c_cmp_flg_0(rtx cmp)3534 m32c_cmp_flg_0 (rtx cmp)
3535 {
3536   return gen_rtx_fmt_ee (GET_CODE (cmp),
3537 			 GET_MODE (cmp),
3538 			 gen_rtx_REG (CCmode, FLG_REGNO),
3539 			 GEN_INT (0));
3540 }
3541 
3542 int
m32c_expand_movcc(rtx * operands)3543 m32c_expand_movcc (rtx *operands)
3544 {
3545   rtx rel = operands[1];
3546   rtx cmp;
3547 
3548   if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3549     return 1;
3550   if (GET_CODE (operands[2]) != CONST_INT
3551       || GET_CODE (operands[3]) != CONST_INT)
3552     return 1;
3553   emit_insn (gen_cmpqi(XEXP (rel, 0), XEXP (rel, 1)));
3554   if (GET_CODE (rel) == NE)
3555     {
3556       rtx tmp = operands[2];
3557       operands[2] = operands[3];
3558       operands[3] = tmp;
3559     }
3560 
3561   cmp = gen_rtx_fmt_ee (GET_CODE (rel),
3562 			GET_MODE (rel),
3563 			compare_op0,
3564 			compare_op1);
3565 
3566   emit_move_insn (operands[0],
3567 		  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3568 					cmp,
3569 					operands[2],
3570 					operands[3]));
3571   return 0;
3572 }
3573 
3574 /* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
3575 int
m32c_expand_insv(rtx * operands)3576 m32c_expand_insv (rtx *operands)
3577 {
3578   rtx op0, src0, p;
3579   int mask;
3580 
3581   if (INTVAL (operands[1]) != 1)
3582     return 1;
3583 
3584   /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
3585   if (GET_CODE (operands[3]) != CONST_INT)
3586     return 1;
3587   if (INTVAL (operands[3]) != 0
3588       && INTVAL (operands[3]) != 1
3589       && INTVAL (operands[3]) != -1)
3590     return 1;
3591 
3592   mask = 1 << INTVAL (operands[2]);
3593 
3594   op0 = operands[0];
3595   if (GET_CODE (op0) == SUBREG
3596       && SUBREG_BYTE (op0) == 0)
3597     {
3598       rtx sub = SUBREG_REG (op0);
3599       if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3600 	op0 = sub;
3601     }
3602 
3603   if (no_new_pseudos
3604       || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3605     src0 = op0;
3606   else
3607     {
3608       src0 = gen_reg_rtx (GET_MODE (op0));
3609       emit_move_insn (src0, op0);
3610     }
3611 
3612   if (GET_MODE (op0) == HImode
3613       && INTVAL (operands[2]) >= 8
3614       && GET_MODE (op0) == MEM)
3615     {
3616       /* We are little endian.  */
3617       rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3618       MEM_COPY_ATTRIBUTES (new_mem, op0);
3619       mask >>= 8;
3620     }
3621 
3622   /* First, we generate a mask with the correct polarity.  If we are
3623      storing a zero, we want an AND mask, so invert it.  */
3624   if (INTVAL (operands[3]) == 0)
3625     {
3626       /* Storing a zero, use an AND mask */
3627       if (GET_MODE (op0) == HImode)
3628 	mask ^= 0xffff;
3629       else
3630 	mask ^= 0xff;
3631     }
3632   /* Now we need to properly sign-extend the mask in case we need to
3633      fall back to an AND or OR opcode.  */
3634   if (GET_MODE (op0) == HImode)
3635     {
3636       if (mask & 0x8000)
3637 	mask -= 0x10000;
3638     }
3639   else
3640     {
3641       if (mask & 0x80)
3642 	mask -= 0x100;
3643     }
3644 
3645   switch (  (INTVAL (operands[3]) ? 4 : 0)
3646 	  + ((GET_MODE (op0) == HImode) ? 2 : 0)
3647 	  + (TARGET_A24 ? 1 : 0))
3648     {
3649     case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3650     case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3651     case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3652     case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3653     case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3654     case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3655     case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3656     case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3657     }
3658 
3659   emit_insn (p);
3660   return 0;
3661 }
3662 
3663 const char *
m32c_scc_pattern(rtx * operands,RTX_CODE code)3664 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3665 {
3666   static char buf[30];
3667   if (GET_CODE (operands[0]) == REG
3668       && REGNO (operands[0]) == R0_REGNO)
3669     {
3670       if (code == EQ)
3671 	return "stzx\t#1,#0,r0l";
3672       if (code == NE)
3673 	return "stzx\t#0,#1,r0l";
3674     }
3675   sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3676   return buf;
3677 }
3678 
3679 /* Returns TRUE if the current function is a leaf, and thus we can
3680    determine which registers an interrupt function really needs to
3681    save.  The logic below is mostly about finding the insn sequence
3682    that's the function, versus any sequence that might be open for the
3683    current insn.  */
3684 static int
m32c_leaf_function_p(void)3685 m32c_leaf_function_p (void)
3686 {
3687   rtx saved_first, saved_last;
3688   struct sequence_stack *seq;
3689   int rv;
3690 
3691   saved_first = cfun->emit->x_first_insn;
3692   saved_last = cfun->emit->x_last_insn;
3693   for (seq = cfun->emit->sequence_stack; seq && seq->next; seq = seq->next)
3694     ;
3695   if (seq)
3696     {
3697       cfun->emit->x_first_insn = seq->first;
3698       cfun->emit->x_last_insn = seq->last;
3699     }
3700 
3701   rv = leaf_function_p ();
3702 
3703   cfun->emit->x_first_insn = saved_first;
3704   cfun->emit->x_last_insn = saved_last;
3705   return rv;
3706 }
3707 
3708 /* Returns TRUE if the current function needs to use the ENTER/EXIT
3709    opcodes.  If the function doesn't need the frame base or stack
3710    pointer, it can use the simpler RTS opcode.  */
3711 static bool
m32c_function_needs_enter(void)3712 m32c_function_needs_enter (void)
3713 {
3714   rtx insn;
3715   struct sequence_stack *seq;
3716   rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3717   rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3718 
3719   insn = get_insns ();
3720   for (seq = cfun->emit->sequence_stack;
3721        seq;
3722        insn = seq->first, seq = seq->next);
3723 
3724   while (insn)
3725     {
3726       if (reg_mentioned_p (sp, insn))
3727 	return true;
3728       if (reg_mentioned_p (fb, insn))
3729 	return true;
3730       insn = NEXT_INSN (insn);
3731     }
3732   return false;
3733 }
3734 
3735 /* Mark all the subexpressions of the PARALLEL rtx PAR as
3736    frame-related.  Return PAR.
3737 
3738    dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3739    PARALLEL rtx other than the first if they do not have the
3740    FRAME_RELATED flag set on them.  So this function is handy for
3741    marking up 'enter' instructions.  */
3742 static rtx
m32c_all_frame_related(rtx par)3743 m32c_all_frame_related (rtx par)
3744 {
3745   int len = XVECLEN (par, 0);
3746   int i;
3747 
3748   for (i = 0; i < len; i++)
3749     F (XVECEXP (par, 0, i));
3750 
3751   return par;
3752 }
3753 
3754 /* Emits the prologue.  See the frame layout comment earlier in this
3755    file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
3756    that we manually update sp.  */
3757 void
m32c_emit_prologue(void)3758 m32c_emit_prologue (void)
3759 {
3760   int frame_size, extra_frame_size = 0, reg_save_size;
3761   int complex_prologue = 0;
3762 
3763   cfun->machine->is_leaf = m32c_leaf_function_p ();
3764   if (interrupt_p (cfun->decl))
3765     {
3766       cfun->machine->is_interrupt = 1;
3767       complex_prologue = 1;
3768     }
3769 
3770   reg_save_size = m32c_pushm_popm (PP_justcount);
3771 
3772   if (interrupt_p (cfun->decl))
3773     emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
3774 
3775   frame_size =
3776     m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
3777   if (frame_size == 0
3778       && !cfun->machine->is_interrupt
3779       && !m32c_function_needs_enter ())
3780     cfun->machine->use_rts = 1;
3781 
3782   if (frame_size > 254)
3783     {
3784       extra_frame_size = frame_size - 254;
3785       frame_size = 254;
3786     }
3787   if (cfun->machine->use_rts == 0)
3788     F (emit_insn (m32c_all_frame_related
3789 		  (TARGET_A16
3790 		   ? gen_prologue_enter_16 (GEN_INT (frame_size))
3791 		   : gen_prologue_enter_24 (GEN_INT (frame_size)))));
3792 
3793   if (extra_frame_size)
3794     {
3795       complex_prologue = 1;
3796       if (TARGET_A16)
3797 	F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
3798 				  gen_rtx_REG (HImode, SP_REGNO),
3799 				  GEN_INT (-extra_frame_size))));
3800       else
3801 	F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
3802 				   gen_rtx_REG (PSImode, SP_REGNO),
3803 				   GEN_INT (-extra_frame_size))));
3804     }
3805 
3806   complex_prologue += m32c_pushm_popm (PP_pushm);
3807 
3808   /* This just emits a comment into the .s file for debugging.  */
3809   if (complex_prologue)
3810     emit_insn (gen_prologue_end ());
3811 }
3812 
3813 /* Likewise, for the epilogue.  The only exception is that, for
3814    interrupts, we must manually unwind the frame as the REIT opcode
3815    doesn't do that.  */
3816 void
m32c_emit_epilogue(void)3817 m32c_emit_epilogue (void)
3818 {
3819   /* This just emits a comment into the .s file for debugging.  */
3820   if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
3821     emit_insn (gen_epilogue_start ());
3822 
3823   m32c_pushm_popm (PP_popm);
3824 
3825   if (cfun->machine->is_interrupt)
3826     {
3827       enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
3828 
3829       emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
3830 		      gen_rtx_REG (spmode, FP_REGNO));
3831       emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
3832 		      gen_rtx_REG (spmode, A0_REGNO));
3833       if (TARGET_A16)
3834 	emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
3835       else
3836 	emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
3837       emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
3838       emit_jump_insn (gen_epilogue_reit (GEN_INT (TARGET_A16 ? 4 : 6)));
3839     }
3840   else if (cfun->machine->use_rts)
3841     emit_jump_insn (gen_epilogue_rts ());
3842   else
3843     emit_jump_insn (gen_epilogue_exitd (GEN_INT (TARGET_A16 ? 2 : 4)));
3844   emit_barrier ();
3845 }
3846 
3847 void
m32c_emit_eh_epilogue(rtx ret_addr)3848 m32c_emit_eh_epilogue (rtx ret_addr)
3849 {
3850   /* R0[R2] has the stack adjustment.  R1[R3] has the address to
3851      return to.  We have to fudge the stack, pop everything, pop SP
3852      (fudged), and return (fudged).  This is actually easier to do in
3853      assembler, so punt to libgcc.  */
3854   emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
3855   /*  emit_insn (gen_rtx_CLOBBER (HImode, gen_rtx_REG (HImode, R0L_REGNO))); */
3856   emit_barrier ();
3857 }
3858 
3859 /* Indicate which flags must be properly set for a given conditional.  */
3860 static int
flags_needed_for_conditional(rtx cond)3861 flags_needed_for_conditional (rtx cond)
3862 {
3863   switch (GET_CODE (cond))
3864     {
3865     case LE:
3866     case GT:
3867       return FLAGS_OSZ;
3868     case LEU:
3869     case GTU:
3870       return FLAGS_ZC;
3871     case LT:
3872     case GE:
3873       return FLAGS_OS;
3874     case LTU:
3875     case GEU:
3876       return FLAGS_C;
3877     case EQ:
3878     case NE:
3879       return FLAGS_Z;
3880     default:
3881       return FLAGS_N;
3882     }
3883 }
3884 
3885 #define DEBUG_CMP 0
3886 
3887 /* Returns true if a compare insn is redundant because it would only
3888    set flags that are already set correctly.  */
3889 static bool
m32c_compare_redundant(rtx cmp,rtx * operands)3890 m32c_compare_redundant (rtx cmp, rtx *operands)
3891 {
3892   int flags_needed;
3893   int pflags;
3894   rtx prev, pp, next;
3895   rtx op0, op1, op2;
3896 #if DEBUG_CMP
3897   int prev_icode, i;
3898 #endif
3899 
3900   op0 = operands[0];
3901   op1 = operands[1];
3902   op2 = operands[2];
3903 
3904 #if DEBUG_CMP
3905   fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
3906   debug_rtx(cmp);
3907   for (i=0; i<2; i++)
3908     {
3909       fprintf(stderr, "operands[%d] = ", i);
3910       debug_rtx(operands[i]);
3911     }
3912 #endif
3913 
3914   next = next_nonnote_insn (cmp);
3915   if (!next || !INSN_P (next))
3916     {
3917 #if DEBUG_CMP
3918       fprintf(stderr, "compare not followed by insn\n");
3919       debug_rtx(next);
3920 #endif
3921       return false;
3922     }
3923   if (GET_CODE (PATTERN (next)) == SET
3924       && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
3925     {
3926       next = XEXP (XEXP (PATTERN (next), 1), 0);
3927     }
3928   else if (GET_CODE (PATTERN (next)) == SET)
3929     {
3930       /* If this is a conditional, flags_needed will be something
3931 	 other than FLAGS_N, which we test below.  */
3932       next = XEXP (PATTERN (next), 1);
3933     }
3934   else
3935     {
3936 #if DEBUG_CMP
3937       fprintf(stderr, "compare not followed by conditional\n");
3938       debug_rtx(next);
3939 #endif
3940       return false;
3941     }
3942 #if DEBUG_CMP
3943   fprintf(stderr, "conditional is: ");
3944   debug_rtx(next);
3945 #endif
3946 
3947   flags_needed = flags_needed_for_conditional (next);
3948   if (flags_needed == FLAGS_N)
3949     {
3950 #if DEBUG_CMP
3951       fprintf(stderr, "compare not followed by conditional\n");
3952       debug_rtx(next);
3953 #endif
3954       return false;
3955     }
3956 
3957   /* Compare doesn't set overflow and carry the same way that
3958      arithmetic instructions do, so we can't replace those.  */
3959   if (flags_needed & FLAGS_OC)
3960     return false;
3961 
3962   prev = cmp;
3963   do {
3964     prev = prev_nonnote_insn (prev);
3965     if (!prev)
3966       {
3967 #if DEBUG_CMP
3968 	fprintf(stderr, "No previous insn.\n");
3969 #endif
3970 	return false;
3971       }
3972     if (!INSN_P (prev))
3973       {
3974 #if DEBUG_CMP
3975 	fprintf(stderr, "Previous insn is a non-insn.\n");
3976 #endif
3977 	return false;
3978       }
3979     pp = PATTERN (prev);
3980     if (GET_CODE (pp) != SET)
3981       {
3982 #if DEBUG_CMP
3983 	fprintf(stderr, "Previous insn is not a SET.\n");
3984 #endif
3985 	return false;
3986       }
3987     pflags = get_attr_flags (prev);
3988 
3989     /* Looking up attributes of previous insns corrupted the recog
3990        tables.  */
3991     INSN_UID (cmp) = -1;
3992     recog (PATTERN (cmp), cmp, 0);
3993 
3994     if (pflags == FLAGS_N
3995 	&& reg_mentioned_p (op0, pp))
3996       {
3997 #if DEBUG_CMP
3998 	fprintf(stderr, "intermediate non-flags insn uses op:\n");
3999 	debug_rtx(prev);
4000 #endif
4001 	return false;
4002       }
4003   } while (pflags == FLAGS_N);
4004 #if DEBUG_CMP
4005   fprintf(stderr, "previous flag-setting insn:\n");
4006   debug_rtx(prev);
4007   debug_rtx(pp);
4008 #endif
4009 
4010   if (GET_CODE (pp) == SET
4011       && GET_CODE (XEXP (pp, 0)) == REG
4012       && REGNO (XEXP (pp, 0)) == FLG_REGNO
4013       && GET_CODE (XEXP (pp, 1)) == COMPARE)
4014     {
4015       /* Adjacent cbranches must have the same operands to be
4016 	 redundant.  */
4017       rtx pop0 = XEXP (XEXP (pp, 1), 0);
4018       rtx pop1 = XEXP (XEXP (pp, 1), 1);
4019 #if DEBUG_CMP
4020       fprintf(stderr, "adjacent cbranches\n");
4021       debug_rtx(pop0);
4022       debug_rtx(pop1);
4023 #endif
4024       if (rtx_equal_p (op0, pop0)
4025 	  && rtx_equal_p (op1, pop1))
4026 	return true;
4027 #if DEBUG_CMP
4028       fprintf(stderr, "prev cmp not same\n");
4029 #endif
4030       return false;
4031     }
4032 
4033   /* Else the previous insn must be a SET, with either the source or
4034      dest equal to operands[0], and operands[1] must be zero.  */
4035 
4036   if (!rtx_equal_p (op1, const0_rtx))
4037     {
4038 #if DEBUG_CMP
4039       fprintf(stderr, "operands[1] not const0_rtx\n");
4040 #endif
4041       return false;
4042     }
4043   if (GET_CODE (pp) != SET)
4044     {
4045 #if DEBUG_CMP
4046       fprintf (stderr, "pp not set\n");
4047 #endif
4048       return false;
4049     }
4050   if (!rtx_equal_p (op0, SET_SRC (pp))
4051       && !rtx_equal_p (op0, SET_DEST (pp)))
4052     {
4053 #if DEBUG_CMP
4054       fprintf(stderr, "operands[0] not found in set\n");
4055 #endif
4056       return false;
4057     }
4058 
4059 #if DEBUG_CMP
4060   fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4061 #endif
4062   if ((pflags & flags_needed) == flags_needed)
4063     return true;
4064 
4065   return false;
4066 }
4067 
4068 /* Return the pattern for a compare.  This will be commented out if
4069    the compare is redundant, else a normal pattern is returned.  Thus,
4070    the assembler output says where the compare would have been.  */
4071 char *
m32c_output_compare(rtx insn,rtx * operands)4072 m32c_output_compare (rtx insn, rtx *operands)
4073 {
4074   static char template[] = ";cmp.b\t%1,%0";
4075   /*                             ^ 5  */
4076 
4077   template[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4078   if (m32c_compare_redundant (insn, operands))
4079     {
4080 #if DEBUG_CMP
4081       fprintf(stderr, "cbranch: cmp not needed\n");
4082 #endif
4083       return template;
4084     }
4085 
4086 #if DEBUG_CMP
4087   fprintf(stderr, "cbranch: cmp needed: `%s'\n", template);
4088 #endif
4089   return template + 1;
4090 }
4091 
4092 /* The Global `targetm' Variable. */
4093 
4094 struct gcc_target targetm = TARGET_INITIALIZER;
4095 
4096 #include "gt-m32c.h"
4097