xref: /openbsd-src/gnu/usr.bin/gcc/gcc/expmed.c (revision a67f0032ff015a4f10c1aaf6c63004fb17009442)
1 /* Medium-level subroutines: convert bit-field store and extract
2    and shifts, multiplies and divides to rtl instructions.
3    Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4    1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING.  If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA.  */
22 
23 
24 #include "config.h"
25 #include "system.h"
26 #include "toplev.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "insn-config.h"
32 #include "expr.h"
33 #include "optabs.h"
34 #include "real.h"
35 #include "recog.h"
36 #include "langhooks.h"
37 
38 static void store_fixed_bit_field	PARAMS ((rtx, unsigned HOST_WIDE_INT,
39 						 unsigned HOST_WIDE_INT,
40 						 unsigned HOST_WIDE_INT, rtx));
41 static void store_split_bit_field	PARAMS ((rtx, unsigned HOST_WIDE_INT,
42 						 unsigned HOST_WIDE_INT, rtx));
43 static rtx extract_fixed_bit_field	PARAMS ((enum machine_mode, rtx,
44 						 unsigned HOST_WIDE_INT,
45 						 unsigned HOST_WIDE_INT,
46 						 unsigned HOST_WIDE_INT,
47 						 rtx, int));
48 static rtx mask_rtx			PARAMS ((enum machine_mode, int,
49 						 int, int));
50 static rtx lshift_value			PARAMS ((enum machine_mode, rtx,
51 						 int, int));
52 static rtx extract_split_bit_field	PARAMS ((rtx, unsigned HOST_WIDE_INT,
53 						 unsigned HOST_WIDE_INT, int));
54 static void do_cmp_and_jump		PARAMS ((rtx, rtx, enum rtx_code,
55 						 enum machine_mode, rtx));
56 
57 /* Nonzero means divides or modulus operations are relatively cheap for
58    powers of two, so don't use branches; emit the operation instead.
59    Usually, this will mean that the MD file will emit non-branch
60    sequences.  */
61 
62 static int sdiv_pow2_cheap, smod_pow2_cheap;
63 
64 #ifndef SLOW_UNALIGNED_ACCESS
65 #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
66 #endif
67 
68 /* For compilers that support multiple targets with different word sizes,
69    MAX_BITS_PER_WORD contains the biggest value of BITS_PER_WORD.  An example
70    is the H8/300(H) compiler.  */
71 
72 #ifndef MAX_BITS_PER_WORD
73 #define MAX_BITS_PER_WORD BITS_PER_WORD
74 #endif
75 
76 /* Reduce conditional compilation elsewhere.  */
77 #ifndef HAVE_insv
78 #define HAVE_insv	0
79 #define CODE_FOR_insv	CODE_FOR_nothing
80 #define gen_insv(a,b,c,d) NULL_RTX
81 #endif
82 #ifndef HAVE_extv
83 #define HAVE_extv	0
84 #define CODE_FOR_extv	CODE_FOR_nothing
85 #define gen_extv(a,b,c,d) NULL_RTX
86 #endif
87 #ifndef HAVE_extzv
88 #define HAVE_extzv	0
89 #define CODE_FOR_extzv	CODE_FOR_nothing
90 #define gen_extzv(a,b,c,d) NULL_RTX
91 #endif
92 
93 /* Cost of various pieces of RTL.  Note that some of these are indexed by
94    shift count and some by mode.  */
95 static int add_cost, negate_cost, zero_cost;
96 static int shift_cost[MAX_BITS_PER_WORD];
97 static int shiftadd_cost[MAX_BITS_PER_WORD];
98 static int shiftsub_cost[MAX_BITS_PER_WORD];
99 static int mul_cost[NUM_MACHINE_MODES];
100 static int div_cost[NUM_MACHINE_MODES];
101 static int mul_widen_cost[NUM_MACHINE_MODES];
102 static int mul_highpart_cost[NUM_MACHINE_MODES];
103 
104 void
init_expmed()105 init_expmed ()
106 {
107   rtx reg, shift_insn, shiftadd_insn, shiftsub_insn;
108   int dummy;
109   int m;
110   enum machine_mode mode, wider_mode;
111 
112   start_sequence ();
113 
114   /* This is "some random pseudo register" for purposes of calling recog
115      to see what insns exist.  */
116   reg = gen_rtx_REG (word_mode, 10000);
117 
118   zero_cost = rtx_cost (const0_rtx, 0);
119   add_cost = rtx_cost (gen_rtx_PLUS (word_mode, reg, reg), SET);
120 
121   shift_insn = emit_insn (gen_rtx_SET (VOIDmode, reg,
122 				       gen_rtx_ASHIFT (word_mode, reg,
123 						       const0_rtx)));
124 
125   shiftadd_insn
126     = emit_insn (gen_rtx_SET (VOIDmode, reg,
127 			      gen_rtx_PLUS (word_mode,
128 					    gen_rtx_MULT (word_mode,
129 							  reg, const0_rtx),
130 					    reg)));
131 
132   shiftsub_insn
133     = emit_insn (gen_rtx_SET (VOIDmode, reg,
134 			      gen_rtx_MINUS (word_mode,
135 					     gen_rtx_MULT (word_mode,
136 							   reg, const0_rtx),
137 					     reg)));
138 
139   init_recog ();
140 
141   shift_cost[0] = 0;
142   shiftadd_cost[0] = shiftsub_cost[0] = add_cost;
143 
144   for (m = 1; m < MAX_BITS_PER_WORD; m++)
145     {
146       rtx c_int = GEN_INT ((HOST_WIDE_INT) 1 << m);
147       shift_cost[m] = shiftadd_cost[m] = shiftsub_cost[m] = 32000;
148 
149       XEXP (SET_SRC (PATTERN (shift_insn)), 1) = GEN_INT (m);
150       if (recog (PATTERN (shift_insn), shift_insn, &dummy) >= 0)
151 	shift_cost[m] = rtx_cost (SET_SRC (PATTERN (shift_insn)), SET);
152 
153       XEXP (XEXP (SET_SRC (PATTERN (shiftadd_insn)), 0), 1) = c_int;
154       if (recog (PATTERN (shiftadd_insn), shiftadd_insn, &dummy) >= 0)
155 	shiftadd_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftadd_insn)), SET);
156 
157       XEXP (XEXP (SET_SRC (PATTERN (shiftsub_insn)), 0), 1) = c_int;
158       if (recog (PATTERN (shiftsub_insn), shiftsub_insn, &dummy) >= 0)
159 	shiftsub_cost[m] = rtx_cost (SET_SRC (PATTERN (shiftsub_insn)), SET);
160     }
161 
162   negate_cost = rtx_cost (gen_rtx_NEG (word_mode, reg), SET);
163 
164   sdiv_pow2_cheap
165     = (rtx_cost (gen_rtx_DIV (word_mode, reg, GEN_INT (32)), SET)
166        <= 2 * add_cost);
167   smod_pow2_cheap
168     = (rtx_cost (gen_rtx_MOD (word_mode, reg, GEN_INT (32)), SET)
169        <= 2 * add_cost);
170 
171   for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
172        mode != VOIDmode;
173        mode = GET_MODE_WIDER_MODE (mode))
174     {
175       reg = gen_rtx_REG (mode, 10000);
176       div_cost[(int) mode] = rtx_cost (gen_rtx_UDIV (mode, reg, reg), SET);
177       mul_cost[(int) mode] = rtx_cost (gen_rtx_MULT (mode, reg, reg), SET);
178       wider_mode = GET_MODE_WIDER_MODE (mode);
179       if (wider_mode != VOIDmode)
180 	{
181 	  mul_widen_cost[(int) wider_mode]
182 	    = rtx_cost (gen_rtx_MULT (wider_mode,
183 				      gen_rtx_ZERO_EXTEND (wider_mode, reg),
184 				      gen_rtx_ZERO_EXTEND (wider_mode, reg)),
185 			SET);
186 	  mul_highpart_cost[(int) mode]
187 	    = rtx_cost (gen_rtx_TRUNCATE
188 			(mode,
189 			 gen_rtx_LSHIFTRT (wider_mode,
190 					   gen_rtx_MULT (wider_mode,
191 							 gen_rtx_ZERO_EXTEND
192 							 (wider_mode, reg),
193 							 gen_rtx_ZERO_EXTEND
194 							 (wider_mode, reg)),
195 					   GEN_INT (GET_MODE_BITSIZE (mode)))),
196 			SET);
197 	}
198     }
199 
200   end_sequence ();
201 }
202 
203 /* Return an rtx representing minus the value of X.
204    MODE is the intended mode of the result,
205    useful if X is a CONST_INT.  */
206 
207 rtx
negate_rtx(mode,x)208 negate_rtx (mode, x)
209      enum machine_mode mode;
210      rtx x;
211 {
212   rtx result = simplify_unary_operation (NEG, mode, x, mode);
213 
214   if (result == 0)
215     result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
216 
217   return result;
218 }
219 
220 /* Report on the availability of insv/extv/extzv and the desired mode
221    of each of their operands.  Returns MAX_MACHINE_MODE if HAVE_foo
222    is false; else the mode of the specified operand.  If OPNO is -1,
223    all the caller cares about is whether the insn is available.  */
224 enum machine_mode
mode_for_extraction(pattern,opno)225 mode_for_extraction (pattern, opno)
226      enum extraction_pattern pattern;
227      int opno;
228 {
229   const struct insn_data *data;
230 
231   switch (pattern)
232     {
233     case EP_insv:
234       if (HAVE_insv)
235 	{
236 	  data = &insn_data[CODE_FOR_insv];
237 	  break;
238 	}
239       return MAX_MACHINE_MODE;
240 
241     case EP_extv:
242       if (HAVE_extv)
243 	{
244 	  data = &insn_data[CODE_FOR_extv];
245 	  break;
246 	}
247       return MAX_MACHINE_MODE;
248 
249     case EP_extzv:
250       if (HAVE_extzv)
251 	{
252 	  data = &insn_data[CODE_FOR_extzv];
253 	  break;
254 	}
255       return MAX_MACHINE_MODE;
256 
257     default:
258       abort ();
259     }
260 
261   if (opno == -1)
262     return VOIDmode;
263 
264   /* Everyone who uses this function used to follow it with
265      if (result == VOIDmode) result = word_mode; */
266   if (data->operand[opno].mode == VOIDmode)
267     return word_mode;
268   return data->operand[opno].mode;
269 }
270 
271 
272 /* Generate code to store value from rtx VALUE
273    into a bit-field within structure STR_RTX
274    containing BITSIZE bits starting at bit BITNUM.
275    FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
276    ALIGN is the alignment that STR_RTX is known to have.
277    TOTAL_SIZE is the size of the structure in bytes, or -1 if varying.  */
278 
279 /* ??? Note that there are two different ideas here for how
280    to determine the size to count bits within, for a register.
281    One is BITS_PER_WORD, and the other is the size of operand 3
282    of the insv pattern.
283 
284    If operand 3 of the insv pattern is VOIDmode, then we will use BITS_PER_WORD
285    else, we use the mode of operand 3.  */
286 
287 rtx
store_bit_field(str_rtx,bitsize,bitnum,fieldmode,value,total_size)288 store_bit_field (str_rtx, bitsize, bitnum, fieldmode, value, total_size)
289      rtx str_rtx;
290      unsigned HOST_WIDE_INT bitsize;
291      unsigned HOST_WIDE_INT bitnum;
292      enum machine_mode fieldmode;
293      rtx value;
294      HOST_WIDE_INT total_size;
295 {
296   unsigned int unit
297     = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
298   unsigned HOST_WIDE_INT offset = bitnum / unit;
299   unsigned HOST_WIDE_INT bitpos = bitnum % unit;
300   rtx op0 = str_rtx;
301   int byte_offset;
302   rtx orig_value;
303 
304   enum machine_mode op_mode = mode_for_extraction (EP_insv, 3);
305 
306   /* Discount the part of the structure before the desired byte.
307      We need to know how many bytes are safe to reference after it.  */
308   if (total_size >= 0)
309     total_size -= (bitpos / BIGGEST_ALIGNMENT
310 		   * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
311 
312   while (GET_CODE (op0) == SUBREG)
313     {
314       /* The following line once was done only if WORDS_BIG_ENDIAN,
315 	 but I think that is a mistake.  WORDS_BIG_ENDIAN is
316 	 meaningful at a much higher level; when structures are copied
317 	 between memory and regs, the higher-numbered regs
318 	 always get higher addresses.  */
319       offset += (SUBREG_BYTE (op0) / UNITS_PER_WORD);
320       /* We used to adjust BITPOS here, but now we do the whole adjustment
321 	 right after the loop.  */
322       op0 = SUBREG_REG (op0);
323     }
324 
325   value = protect_from_queue (value, 0);
326 
327   if (flag_force_mem)
328     {
329       int old_generating_concat_p = generating_concat_p;
330       generating_concat_p = 0;
331       value = force_not_mem (value);
332       generating_concat_p = old_generating_concat_p;
333     }
334 
335   /* If the target is a register, overwriting the entire object, or storing
336      a full-word or multi-word field can be done with just a SUBREG.
337 
338      If the target is memory, storing any naturally aligned field can be
339      done with a simple store.  For targets that support fast unaligned
340      memory, any naturally sized, unit aligned field can be done directly.  */
341 
342   byte_offset = (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
343                 + (offset * UNITS_PER_WORD);
344 
345   if (bitpos == 0
346       && bitsize == GET_MODE_BITSIZE (fieldmode)
347       && (GET_CODE (op0) != MEM
348 	  ? ((GET_MODE_SIZE (fieldmode) >= UNITS_PER_WORD
349 	     || GET_MODE_SIZE (GET_MODE (op0)) == GET_MODE_SIZE (fieldmode))
350 	     && byte_offset % GET_MODE_SIZE (fieldmode) == 0)
351 	  : (! SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
352 	     || (offset * BITS_PER_UNIT % bitsize == 0
353 		 && MEM_ALIGN (op0) % GET_MODE_BITSIZE (fieldmode) == 0))))
354     {
355       if (GET_MODE (op0) != fieldmode)
356 	{
357 	  if (GET_CODE (op0) == SUBREG)
358 	    {
359 	      if (GET_MODE (SUBREG_REG (op0)) == fieldmode
360 		  || GET_MODE_CLASS (fieldmode) == MODE_INT
361 		  || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
362 		op0 = SUBREG_REG (op0);
363 	      else
364 		/* Else we've got some float mode source being extracted into
365 		   a different float mode destination -- this combination of
366 		   subregs results in Severe Tire Damage.  */
367 		abort ();
368 	    }
369 	  if (GET_CODE (op0) == REG)
370 	    op0 = gen_rtx_SUBREG (fieldmode, op0, byte_offset);
371 	  else
372 	    op0 = adjust_address (op0, fieldmode, offset);
373 	}
374       emit_move_insn (op0, value);
375       return value;
376     }
377 
378   /* Make sure we are playing with integral modes.  Pun with subregs
379      if we aren't.  This must come after the entire register case above,
380      since that case is valid for any mode.  The following cases are only
381      valid for integral modes.  */
382   {
383     enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
384     if (imode != GET_MODE (op0))
385       {
386 	if (GET_CODE (op0) == MEM)
387 	  op0 = adjust_address (op0, imode, 0);
388 	else if (imode != BLKmode)
389 	  op0 = gen_lowpart (imode, op0);
390 	else
391 	  abort ();
392       }
393   }
394 
395   /* We may be accessing data outside the field, which means
396      we can alias adjacent data.  */
397   if (GET_CODE (op0) == MEM)
398     {
399       op0 = shallow_copy_rtx (op0);
400       set_mem_alias_set (op0, 0);
401       set_mem_expr (op0, 0);
402     }
403 
404   /* If OP0 is a register, BITPOS must count within a word.
405      But as we have it, it counts within whatever size OP0 now has.
406      On a bigendian machine, these are not the same, so convert.  */
407   if (BYTES_BIG_ENDIAN
408       && GET_CODE (op0) != MEM
409       && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
410     bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
411 
412   /* Storing an lsb-aligned field in a register
413      can be done with a movestrict instruction.  */
414 
415   if (GET_CODE (op0) != MEM
416       && (BYTES_BIG_ENDIAN ? bitpos + bitsize == unit : bitpos == 0)
417       && bitsize == GET_MODE_BITSIZE (fieldmode)
418       && (movstrict_optab->handlers[(int) fieldmode].insn_code
419 	  != CODE_FOR_nothing))
420     {
421       int icode = movstrict_optab->handlers[(int) fieldmode].insn_code;
422 
423       /* Get appropriate low part of the value being stored.  */
424       if (GET_CODE (value) == CONST_INT || GET_CODE (value) == REG)
425 	value = gen_lowpart (fieldmode, value);
426       else if (!(GET_CODE (value) == SYMBOL_REF
427 		 || GET_CODE (value) == LABEL_REF
428 		 || GET_CODE (value) == CONST))
429 	value = convert_to_mode (fieldmode, value, 0);
430 
431       if (! (*insn_data[icode].operand[1].predicate) (value, fieldmode))
432 	value = copy_to_mode_reg (fieldmode, value);
433 
434       if (GET_CODE (op0) == SUBREG)
435 	{
436 	  if (GET_MODE (SUBREG_REG (op0)) == fieldmode
437 	      || GET_MODE_CLASS (fieldmode) == MODE_INT
438 	      || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT)
439 	    op0 = SUBREG_REG (op0);
440 	  else
441 	    /* Else we've got some float mode source being extracted into
442 	       a different float mode destination -- this combination of
443 	       subregs results in Severe Tire Damage.  */
444 	    abort ();
445 	}
446 
447       emit_insn (GEN_FCN (icode)
448 		 (gen_rtx_SUBREG (fieldmode, op0,
449 				  (bitnum % BITS_PER_WORD) / BITS_PER_UNIT
450 				  + (offset * UNITS_PER_WORD)),
451 				  value));
452 
453       return value;
454     }
455 
456   /* Handle fields bigger than a word.  */
457 
458   if (bitsize > BITS_PER_WORD)
459     {
460       /* Here we transfer the words of the field
461 	 in the order least significant first.
462 	 This is because the most significant word is the one which may
463 	 be less than full.
464 	 However, only do that if the value is not BLKmode.  */
465 
466       unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
467       unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
468       unsigned int i;
469 
470       /* This is the mode we must force value to, so that there will be enough
471 	 subwords to extract.  Note that fieldmode will often (always?) be
472 	 VOIDmode, because that is what store_field uses to indicate that this
473 	 is a bit field, but passing VOIDmode to operand_subword_force will
474 	 result in an abort.  */
475       fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
476 
477       for (i = 0; i < nwords; i++)
478 	{
479 	  /* If I is 0, use the low-order word in both field and target;
480 	     if I is 1, use the next to lowest word; and so on.  */
481 	  unsigned int wordnum = (backwards ? nwords - i - 1 : i);
482 	  unsigned int bit_offset = (backwards
483 				     ? MAX ((int) bitsize - ((int) i + 1)
484 					    * BITS_PER_WORD,
485 					    0)
486 				     : (int) i * BITS_PER_WORD);
487 
488 	  store_bit_field (op0, MIN (BITS_PER_WORD,
489 				     bitsize - i * BITS_PER_WORD),
490 			   bitnum + bit_offset, word_mode,
491 			   operand_subword_force (value, wordnum,
492 						  (GET_MODE (value) == VOIDmode
493 						   ? fieldmode
494 						   : GET_MODE (value))),
495 			   total_size);
496 	}
497       return value;
498     }
499 
500   /* From here on we can assume that the field to be stored in is
501      a full-word (whatever type that is), since it is shorter than a word.  */
502 
503   /* OFFSET is the number of words or bytes (UNIT says which)
504      from STR_RTX to the first word or byte containing part of the field.  */
505 
506   if (GET_CODE (op0) != MEM)
507     {
508       if (offset != 0
509 	  || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
510 	{
511 	  if (GET_CODE (op0) != REG)
512 	    {
513 	      /* Since this is a destination (lvalue), we can't copy it to a
514 		 pseudo.  We can trivially remove a SUBREG that does not
515 		 change the size of the operand.  Such a SUBREG may have been
516 		 added above.  Otherwise, abort.  */
517 	      if (GET_CODE (op0) == SUBREG
518 		  && (GET_MODE_SIZE (GET_MODE (op0))
519 		      == GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
520 		op0 = SUBREG_REG (op0);
521 	      else
522 		abort ();
523 	    }
524 	  op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
525 		                op0, (offset * UNITS_PER_WORD));
526 	}
527       offset = 0;
528     }
529   else
530     op0 = protect_from_queue (op0, 1);
531 
532   /* If VALUE is a floating-point mode, access it as an integer of the
533      corresponding size.  This can occur on a machine with 64 bit registers
534      that uses SFmode for float.  This can also occur for unaligned float
535      structure fields.  */
536   orig_value = value;
537   if (GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
538       && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
539     value = gen_lowpart ((GET_MODE (value) == VOIDmode
540 			  ? word_mode : int_mode_for_mode (GET_MODE (value))),
541 			 value);
542 
543   /* Now OFFSET is nonzero only if OP0 is memory
544      and is therefore always measured in bytes.  */
545 
546   if (HAVE_insv
547       && GET_MODE (value) != BLKmode
548       && !(bitsize == 1 && GET_CODE (value) == CONST_INT)
549       /* Ensure insv's size is wide enough for this field.  */
550       && (GET_MODE_BITSIZE (op_mode) >= bitsize)
551       && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
552 	    && (bitsize + bitpos > GET_MODE_BITSIZE (op_mode))))
553     {
554       int xbitpos = bitpos;
555       rtx value1;
556       rtx xop0 = op0;
557       rtx last = get_last_insn ();
558       rtx pat;
559       enum machine_mode maxmode = mode_for_extraction (EP_insv, 3);
560       int save_volatile_ok = volatile_ok;
561 
562       volatile_ok = 1;
563 
564       /* If this machine's insv can only insert into a register, copy OP0
565 	 into a register and save it back later.  */
566       /* This used to check flag_force_mem, but that was a serious
567 	 de-optimization now that flag_force_mem is enabled by -O2.  */
568       if (GET_CODE (op0) == MEM
569 	  && ! ((*insn_data[(int) CODE_FOR_insv].operand[0].predicate)
570 		(op0, VOIDmode)))
571 	{
572 	  rtx tempreg;
573 	  enum machine_mode bestmode;
574 
575 	  /* Get the mode to use for inserting into this field.  If OP0 is
576 	     BLKmode, get the smallest mode consistent with the alignment. If
577 	     OP0 is a non-BLKmode object that is no wider than MAXMODE, use its
578 	     mode. Otherwise, use the smallest mode containing the field.  */
579 
580 	  if (GET_MODE (op0) == BLKmode
581 	      || GET_MODE_SIZE (GET_MODE (op0)) > GET_MODE_SIZE (maxmode))
582 	    bestmode
583 	      = get_best_mode (bitsize, bitnum, MEM_ALIGN (op0), maxmode,
584 			       MEM_VOLATILE_P (op0));
585 	  else
586 	    bestmode = GET_MODE (op0);
587 
588 	  if (bestmode == VOIDmode
589 	      || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (op0))
590 		  && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (op0)))
591 	    goto insv_loses;
592 
593 	  /* Adjust address to point to the containing unit of that mode.
594 	     Compute offset as multiple of this unit, counting in bytes.  */
595 	  unit = GET_MODE_BITSIZE (bestmode);
596 	  offset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
597 	  bitpos = bitnum % unit;
598 	  op0 = adjust_address (op0, bestmode,  offset);
599 
600 	  /* Fetch that unit, store the bitfield in it, then store
601 	     the unit.  */
602 	  tempreg = copy_to_reg (op0);
603 	  store_bit_field (tempreg, bitsize, bitpos, fieldmode, orig_value,
604 			   total_size);
605 	  emit_move_insn (op0, tempreg);
606 	  return value;
607 	}
608       volatile_ok = save_volatile_ok;
609 
610       /* Add OFFSET into OP0's address.  */
611       if (GET_CODE (xop0) == MEM)
612 	xop0 = adjust_address (xop0, byte_mode, offset);
613 
614       /* If xop0 is a register, we need it in MAXMODE
615 	 to make it acceptable to the format of insv.  */
616       if (GET_CODE (xop0) == SUBREG)
617 	/* We can't just change the mode, because this might clobber op0,
618 	   and we will need the original value of op0 if insv fails.  */
619 	xop0 = gen_rtx_SUBREG (maxmode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
620       if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
621 	xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
622 
623       /* On big-endian machines, we count bits from the most significant.
624 	 If the bit field insn does not, we must invert.  */
625 
626       if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
627 	xbitpos = unit - bitsize - xbitpos;
628 
629       /* We have been counting XBITPOS within UNIT.
630 	 Count instead within the size of the register.  */
631       if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
632 	xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
633 
634       unit = GET_MODE_BITSIZE (maxmode);
635 
636       /* Convert VALUE to maxmode (which insv insn wants) in VALUE1.  */
637       value1 = value;
638       if (GET_MODE (value) != maxmode)
639 	{
640 	  if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
641 	    {
642 	      /* Optimization: Don't bother really extending VALUE
643 		 if it has all the bits we will actually use.  However,
644 		 if we must narrow it, be sure we do it correctly.  */
645 
646 	      if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (maxmode))
647 		{
648 		  rtx tmp;
649 
650 		  tmp = simplify_subreg (maxmode, value1, GET_MODE (value), 0);
651 		  if (! tmp)
652 		    tmp = simplify_gen_subreg (maxmode,
653 					       force_reg (GET_MODE (value),
654 							  value1),
655 					       GET_MODE (value), 0);
656 		  value1 = tmp;
657 		}
658 	      else
659 		value1 = gen_lowpart (maxmode, value1);
660 	    }
661 	  else if (GET_CODE (value) == CONST_INT)
662 	    value1 = gen_int_mode (INTVAL (value), maxmode);
663 	  else if (!CONSTANT_P (value))
664 	    /* Parse phase is supposed to make VALUE's data type
665 	       match that of the component reference, which is a type
666 	       at least as wide as the field; so VALUE should have
667 	       a mode that corresponds to that type.  */
668 	    abort ();
669 	}
670 
671       /* If this machine's insv insists on a register,
672 	 get VALUE1 into a register.  */
673       if (! ((*insn_data[(int) CODE_FOR_insv].operand[3].predicate)
674 	     (value1, maxmode)))
675 	value1 = force_reg (maxmode, value1);
676 
677       pat = gen_insv (xop0, GEN_INT (bitsize), GEN_INT (xbitpos), value1);
678       if (pat)
679 	emit_insn (pat);
680       else
681 	{
682 	  delete_insns_since (last);
683 	  store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
684 	}
685     }
686   else
687     insv_loses:
688     /* Insv is not available; store using shifts and boolean ops.  */
689     store_fixed_bit_field (op0, offset, bitsize, bitpos, value);
690   return value;
691 }
692 
693 /* Use shifts and boolean operations to store VALUE
694    into a bit field of width BITSIZE
695    in a memory location specified by OP0 except offset by OFFSET bytes.
696      (OFFSET must be 0 if OP0 is a register.)
697    The field starts at position BITPOS within the byte.
698     (If OP0 is a register, it may be a full word or a narrower mode,
699      but BITPOS still counts within a full word,
700      which is significant on bigendian machines.)
701 
702    Note that protect_from_queue has already been done on OP0 and VALUE.  */
703 
704 static void
store_fixed_bit_field(op0,offset,bitsize,bitpos,value)705 store_fixed_bit_field (op0, offset, bitsize, bitpos, value)
706      rtx op0;
707      unsigned HOST_WIDE_INT offset, bitsize, bitpos;
708      rtx value;
709 {
710   enum machine_mode mode;
711   unsigned int total_bits = BITS_PER_WORD;
712   rtx subtarget, temp;
713   int all_zero = 0;
714   int all_one = 0;
715 
716   /* There is a case not handled here:
717      a structure with a known alignment of just a halfword
718      and a field split across two aligned halfwords within the structure.
719      Or likewise a structure with a known alignment of just a byte
720      and a field split across two bytes.
721      Such cases are not supposed to be able to occur.  */
722 
723   if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
724     {
725       if (offset != 0)
726 	abort ();
727       /* Special treatment for a bit field split across two registers.  */
728       if (bitsize + bitpos > BITS_PER_WORD)
729 	{
730 	  store_split_bit_field (op0, bitsize, bitpos, value);
731 	  return;
732 	}
733     }
734   else
735     {
736       /* Get the proper mode to use for this field.  We want a mode that
737 	 includes the entire field.  If such a mode would be larger than
738 	 a word, we won't be doing the extraction the normal way.
739 	 We don't want a mode bigger than the destination.  */
740 
741       mode = GET_MODE (op0);
742       if (GET_MODE_BITSIZE (mode) == 0
743 	  || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
744 	mode = word_mode;
745       mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
746 			    MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
747 
748       if (mode == VOIDmode)
749 	{
750 	  /* The only way this should occur is if the field spans word
751 	     boundaries.  */
752 	  store_split_bit_field (op0, bitsize, bitpos + offset * BITS_PER_UNIT,
753 				 value);
754 	  return;
755 	}
756 
757       total_bits = GET_MODE_BITSIZE (mode);
758 
759       /* Make sure bitpos is valid for the chosen mode.  Adjust BITPOS to
760 	 be in the range 0 to total_bits-1, and put any excess bytes in
761 	 OFFSET.  */
762       if (bitpos >= total_bits)
763 	{
764 	  offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
765 	  bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
766 		     * BITS_PER_UNIT);
767 	}
768 
769       /* Get ref to an aligned byte, halfword, or word containing the field.
770 	 Adjust BITPOS to be position within a word,
771 	 and OFFSET to be the offset of that word.
772 	 Then alter OP0 to refer to that word.  */
773       bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
774       offset -= (offset % (total_bits / BITS_PER_UNIT));
775       op0 = adjust_address (op0, mode, offset);
776     }
777 
778   mode = GET_MODE (op0);
779 
780   /* Now MODE is either some integral mode for a MEM as OP0,
781      or is a full-word for a REG as OP0.  TOTAL_BITS corresponds.
782      The bit field is contained entirely within OP0.
783      BITPOS is the starting bit number within OP0.
784      (OP0's mode may actually be narrower than MODE.)  */
785 
786   if (BYTES_BIG_ENDIAN)
787       /* BITPOS is the distance between our msb
788 	 and that of the containing datum.
789 	 Convert it to the distance from the lsb.  */
790       bitpos = total_bits - bitsize - bitpos;
791 
792   /* Now BITPOS is always the distance between our lsb
793      and that of OP0.  */
794 
795   /* Shift VALUE left by BITPOS bits.  If VALUE is not constant,
796      we must first convert its mode to MODE.  */
797 
798   if (GET_CODE (value) == CONST_INT)
799     {
800       HOST_WIDE_INT v = INTVAL (value);
801 
802       if (bitsize < HOST_BITS_PER_WIDE_INT)
803 	v &= ((HOST_WIDE_INT) 1 << bitsize) - 1;
804 
805       if (v == 0)
806 	all_zero = 1;
807       else if ((bitsize < HOST_BITS_PER_WIDE_INT
808 		&& v == ((HOST_WIDE_INT) 1 << bitsize) - 1)
809 	       || (bitsize == HOST_BITS_PER_WIDE_INT && v == -1))
810 	all_one = 1;
811 
812       value = lshift_value (mode, value, bitpos, bitsize);
813     }
814   else
815     {
816       int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
817 		      && bitpos + bitsize != GET_MODE_BITSIZE (mode));
818 
819       if (GET_MODE (value) != mode)
820 	{
821 	  if ((GET_CODE (value) == REG || GET_CODE (value) == SUBREG)
822 	      && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (value)))
823 	    value = gen_lowpart (mode, value);
824 	  else
825 	    value = convert_to_mode (mode, value, 1);
826 	}
827 
828       if (must_and)
829 	value = expand_binop (mode, and_optab, value,
830 			      mask_rtx (mode, 0, bitsize, 0),
831 			      NULL_RTX, 1, OPTAB_LIB_WIDEN);
832       if (bitpos > 0)
833 	value = expand_shift (LSHIFT_EXPR, mode, value,
834 			      build_int_2 (bitpos, 0), NULL_RTX, 1);
835     }
836 
837   /* Now clear the chosen bits in OP0,
838      except that if VALUE is -1 we need not bother.  */
839 
840   subtarget = (GET_CODE (op0) == REG || ! flag_force_mem) ? op0 : 0;
841 
842   if (! all_one)
843     {
844       temp = expand_binop (mode, and_optab, op0,
845 			   mask_rtx (mode, bitpos, bitsize, 1),
846 			   subtarget, 1, OPTAB_LIB_WIDEN);
847       subtarget = temp;
848     }
849   else
850     temp = op0;
851 
852   /* Now logical-or VALUE into OP0, unless it is zero.  */
853 
854   if (! all_zero)
855     temp = expand_binop (mode, ior_optab, temp, value,
856 			 subtarget, 1, OPTAB_LIB_WIDEN);
857   if (op0 != temp)
858     emit_move_insn (op0, temp);
859 }
860 
861 /* Store a bit field that is split across multiple accessible memory objects.
862 
863    OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
864    BITSIZE is the field width; BITPOS the position of its first bit
865    (within the word).
866    VALUE is the value to store.
867 
868    This does not yet handle fields wider than BITS_PER_WORD.  */
869 
870 static void
store_split_bit_field(op0,bitsize,bitpos,value)871 store_split_bit_field (op0, bitsize, bitpos, value)
872      rtx op0;
873      unsigned HOST_WIDE_INT bitsize, bitpos;
874      rtx value;
875 {
876   unsigned int unit;
877   unsigned int bitsdone = 0;
878 
879   /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
880      much at a time.  */
881   if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
882     unit = BITS_PER_WORD;
883   else
884     unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
885 
886   /* If VALUE is a constant other than a CONST_INT, get it into a register in
887      WORD_MODE.  If we can do this using gen_lowpart_common, do so.  Note
888      that VALUE might be a floating-point constant.  */
889   if (CONSTANT_P (value) && GET_CODE (value) != CONST_INT)
890     {
891       rtx word = gen_lowpart_common (word_mode, value);
892 
893       if (word && (value != word))
894 	value = word;
895       else
896 	value = gen_lowpart_common (word_mode,
897 				    force_reg (GET_MODE (value) != VOIDmode
898 					       ? GET_MODE (value)
899 					       : word_mode, value));
900     }
901   else if (GET_CODE (value) == ADDRESSOF)
902     value = copy_to_reg (value);
903 
904   while (bitsdone < bitsize)
905     {
906       unsigned HOST_WIDE_INT thissize;
907       rtx part, word;
908       unsigned HOST_WIDE_INT thispos;
909       unsigned HOST_WIDE_INT offset;
910 
911       offset = (bitpos + bitsdone) / unit;
912       thispos = (bitpos + bitsdone) % unit;
913 
914       /* THISSIZE must not overrun a word boundary.  Otherwise,
915 	 store_fixed_bit_field will call us again, and we will mutually
916 	 recurse forever.  */
917       thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
918       thissize = MIN (thissize, unit - thispos);
919 
920       if (BYTES_BIG_ENDIAN)
921 	{
922 	  int total_bits;
923 
924 	  /* We must do an endian conversion exactly the same way as it is
925 	     done in extract_bit_field, so that the two calls to
926 	     extract_fixed_bit_field will have comparable arguments.  */
927 	  if (GET_CODE (value) != MEM || GET_MODE (value) == BLKmode)
928 	    total_bits = BITS_PER_WORD;
929 	  else
930 	    total_bits = GET_MODE_BITSIZE (GET_MODE (value));
931 
932 	  /* Fetch successively less significant portions.  */
933 	  if (GET_CODE (value) == CONST_INT)
934 	    part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
935 			     >> (bitsize - bitsdone - thissize))
936 			    & (((HOST_WIDE_INT) 1 << thissize) - 1));
937 	  else
938 	    /* The args are chosen so that the last part includes the
939 	       lsb.  Give extract_bit_field the value it needs (with
940 	       endianness compensation) to fetch the piece we want.  */
941 	    part = extract_fixed_bit_field (word_mode, value, 0, thissize,
942 					    total_bits - bitsize + bitsdone,
943 					    NULL_RTX, 1);
944 	}
945       else
946 	{
947 	  /* Fetch successively more significant portions.  */
948 	  if (GET_CODE (value) == CONST_INT)
949 	    part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
950 			     >> bitsdone)
951 			    & (((HOST_WIDE_INT) 1 << thissize) - 1));
952 	  else
953 	    part = extract_fixed_bit_field (word_mode, value, 0, thissize,
954 					    bitsdone, NULL_RTX, 1);
955 	}
956 
957       /* If OP0 is a register, then handle OFFSET here.
958 
959 	 When handling multiword bitfields, extract_bit_field may pass
960 	 down a word_mode SUBREG of a larger REG for a bitfield that actually
961 	 crosses a word boundary.  Thus, for a SUBREG, we must find
962 	 the current word starting from the base register.  */
963       if (GET_CODE (op0) == SUBREG)
964 	{
965 	  int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
966 	  word = operand_subword_force (SUBREG_REG (op0), word_offset,
967 					GET_MODE (SUBREG_REG (op0)));
968 	  offset = 0;
969 	}
970       else if (GET_CODE (op0) == REG)
971 	{
972 	  word = operand_subword_force (op0, offset, GET_MODE (op0));
973 	  offset = 0;
974 	}
975       else
976 	word = op0;
977 
978       /* OFFSET is in UNITs, and UNIT is in bits.
979          store_fixed_bit_field wants offset in bytes.  */
980       store_fixed_bit_field (word, offset * unit / BITS_PER_UNIT, thissize,
981 			     thispos, part);
982       bitsdone += thissize;
983     }
984 }
985 
986 /* Generate code to extract a byte-field from STR_RTX
987    containing BITSIZE bits, starting at BITNUM,
988    and put it in TARGET if possible (if TARGET is nonzero).
989    Regardless of TARGET, we return the rtx for where the value is placed.
990    It may be a QUEUED.
991 
992    STR_RTX is the structure containing the byte (a REG or MEM).
993    UNSIGNEDP is nonzero if this is an unsigned bit field.
994    MODE is the natural mode of the field value once extracted.
995    TMODE is the mode the caller would like the value to have;
996    but the value may be returned with type MODE instead.
997 
998    TOTAL_SIZE is the size in bytes of the containing structure,
999    or -1 if varying.
1000 
1001    If a TARGET is specified and we can store in it at no extra cost,
1002    we do so, and return TARGET.
1003    Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1004    if they are equally easy.  */
1005 
1006 rtx
extract_bit_field(str_rtx,bitsize,bitnum,unsignedp,target,mode,tmode,total_size)1007 extract_bit_field (str_rtx, bitsize, bitnum, unsignedp,
1008 		   target, mode, tmode, total_size)
1009      rtx str_rtx;
1010      unsigned HOST_WIDE_INT bitsize;
1011      unsigned HOST_WIDE_INT bitnum;
1012      int unsignedp;
1013      rtx target;
1014      enum machine_mode mode, tmode;
1015      HOST_WIDE_INT total_size;
1016 {
1017   unsigned int unit
1018     = (GET_CODE (str_rtx) == MEM) ? BITS_PER_UNIT : BITS_PER_WORD;
1019   unsigned HOST_WIDE_INT offset = bitnum / unit;
1020   unsigned HOST_WIDE_INT bitpos = bitnum % unit;
1021   rtx op0 = str_rtx;
1022   rtx spec_target = target;
1023   rtx spec_target_subreg = 0;
1024   enum machine_mode int_mode;
1025   enum machine_mode extv_mode = mode_for_extraction (EP_extv, 0);
1026   enum machine_mode extzv_mode = mode_for_extraction (EP_extzv, 0);
1027   enum machine_mode mode1;
1028   int byte_offset;
1029 
1030   /* Discount the part of the structure before the desired byte.
1031      We need to know how many bytes are safe to reference after it.  */
1032   if (total_size >= 0)
1033     total_size -= (bitpos / BIGGEST_ALIGNMENT
1034 		   * (BIGGEST_ALIGNMENT / BITS_PER_UNIT));
1035 
1036   if (tmode == VOIDmode)
1037     tmode = mode;
1038 
1039   while (GET_CODE (op0) == SUBREG)
1040     {
1041       bitpos += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1042       if (bitpos > unit)
1043 	{
1044 	  offset += (bitpos / unit);
1045 	  bitpos %= unit;
1046 	}
1047       op0 = SUBREG_REG (op0);
1048     }
1049 
1050   if (GET_CODE (op0) == REG
1051       && mode == GET_MODE (op0)
1052       && bitnum == 0
1053       && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1054     {
1055       /* We're trying to extract a full register from itself.  */
1056       return op0;
1057     }
1058 
1059   /* Make sure we are playing with integral modes.  Pun with subregs
1060      if we aren't.  */
1061   {
1062     enum machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1063     if (imode != GET_MODE (op0))
1064       {
1065 	if (GET_CODE (op0) == MEM)
1066 	  op0 = adjust_address (op0, imode, 0);
1067 	else if (imode != BLKmode)
1068 	  op0 = gen_lowpart (imode, op0);
1069 	else
1070 	  abort ();
1071       }
1072   }
1073 
1074   /* We may be accessing data outside the field, which means
1075      we can alias adjacent data.  */
1076   if (GET_CODE (op0) == MEM)
1077     {
1078       op0 = shallow_copy_rtx (op0);
1079       set_mem_alias_set (op0, 0);
1080       set_mem_expr (op0, 0);
1081     }
1082 
1083   /* Extraction of a full-word or multi-word value from a structure
1084      in a register or aligned memory can be done with just a SUBREG.
1085      A subword value in the least significant part of a register
1086      can also be extracted with a SUBREG.  For this, we need the
1087      byte offset of the value in op0.  */
1088 
1089   byte_offset = bitpos / BITS_PER_UNIT + offset * UNITS_PER_WORD;
1090 
1091   /* If OP0 is a register, BITPOS must count within a word.
1092      But as we have it, it counts within whatever size OP0 now has.
1093      On a bigendian machine, these are not the same, so convert.  */
1094   if (BYTES_BIG_ENDIAN
1095       && GET_CODE (op0) != MEM
1096       && unit > GET_MODE_BITSIZE (GET_MODE (op0)))
1097     bitpos += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1098 
1099   /* ??? We currently assume TARGET is at least as big as BITSIZE.
1100      If that's wrong, the solution is to test for it and set TARGET to 0
1101      if needed.  */
1102 
1103   mode1  = (VECTOR_MODE_P (tmode)
1104 	    ? mode
1105 	    : mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0));
1106 
1107   if (((GET_CODE (op0) != MEM
1108 	&& TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1109 				  GET_MODE_BITSIZE (GET_MODE (op0)))
1110 	&& GET_MODE_SIZE (mode1) != 0
1111 	&& byte_offset % GET_MODE_SIZE (mode1) == 0)
1112        || (GET_CODE (op0) == MEM
1113 	   && (! SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
1114 	       || (offset * BITS_PER_UNIT % bitsize == 0
1115 		   && MEM_ALIGN (op0) % bitsize == 0))))
1116       && ((bitsize >= BITS_PER_WORD && bitsize == GET_MODE_BITSIZE (mode)
1117 	   && bitpos % BITS_PER_WORD == 0)
1118 	  || (mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0) != BLKmode
1119 	      /* ??? The big endian test here is wrong.  This is correct
1120 		 if the value is in a register, and if mode_for_size is not
1121 		 the same mode as op0.  This causes us to get unnecessarily
1122 		 inefficient code from the Thumb port when -mbig-endian.  */
1123 	      && (BYTES_BIG_ENDIAN
1124 		  ? bitpos + bitsize == BITS_PER_WORD
1125 		  : bitpos == 0))))
1126     {
1127       if (mode1 != GET_MODE (op0))
1128 	{
1129 	  if (GET_CODE (op0) == SUBREG)
1130 	    {
1131 	      if (GET_MODE (SUBREG_REG (op0)) == mode1
1132 		  || GET_MODE_CLASS (mode1) == MODE_INT
1133 		  || GET_MODE_CLASS (mode1) == MODE_PARTIAL_INT)
1134 		op0 = SUBREG_REG (op0);
1135 	      else
1136 		/* Else we've got some float mode source being extracted into
1137 		   a different float mode destination -- this combination of
1138 		   subregs results in Severe Tire Damage.  */
1139 		goto no_subreg_mode_swap;
1140 	    }
1141 	  if (GET_CODE (op0) == REG)
1142 	    op0 = gen_rtx_SUBREG (mode1, op0, byte_offset);
1143 	  else
1144 	    op0 = adjust_address (op0, mode1, offset);
1145 	}
1146       if (mode1 != mode)
1147 	return convert_to_mode (tmode, op0, unsignedp);
1148       return op0;
1149     }
1150  no_subreg_mode_swap:
1151 
1152   /* Handle fields bigger than a word.  */
1153 
1154   if (bitsize > BITS_PER_WORD)
1155     {
1156       /* Here we transfer the words of the field
1157 	 in the order least significant first.
1158 	 This is because the most significant word is the one which may
1159 	 be less than full.  */
1160 
1161       unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1162       unsigned int i;
1163 
1164       if (target == 0 || GET_CODE (target) != REG)
1165 	target = gen_reg_rtx (mode);
1166 
1167       /* Indicate for flow that the entire target reg is being set.  */
1168       emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
1169 
1170       for (i = 0; i < nwords; i++)
1171 	{
1172 	  /* If I is 0, use the low-order word in both field and target;
1173 	     if I is 1, use the next to lowest word; and so on.  */
1174 	  /* Word number in TARGET to use.  */
1175 	  unsigned int wordnum
1176 	    = (WORDS_BIG_ENDIAN
1177 	       ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1178 	       : i);
1179 	  /* Offset from start of field in OP0.  */
1180 	  unsigned int bit_offset = (WORDS_BIG_ENDIAN
1181 				     ? MAX (0, ((int) bitsize - ((int) i + 1)
1182 						* (int) BITS_PER_WORD))
1183 				     : (int) i * BITS_PER_WORD);
1184 	  rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1185 	  rtx result_part
1186 	    = extract_bit_field (op0, MIN (BITS_PER_WORD,
1187 					   bitsize - i * BITS_PER_WORD),
1188 				 bitnum + bit_offset, 1, target_part, mode,
1189 				 word_mode, total_size);
1190 
1191 	  if (target_part == 0)
1192 	    abort ();
1193 
1194 	  if (result_part != target_part)
1195 	    emit_move_insn (target_part, result_part);
1196 	}
1197 
1198       if (unsignedp)
1199 	{
1200 	  /* Unless we've filled TARGET, the upper regs in a multi-reg value
1201 	     need to be zero'd out.  */
1202 	  if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1203 	    {
1204 	      unsigned int i, total_words;
1205 
1206 	      total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1207 	      for (i = nwords; i < total_words; i++)
1208 		emit_move_insn
1209 		  (operand_subword (target,
1210 				    WORDS_BIG_ENDIAN ? total_words - i - 1 : i,
1211 				    1, VOIDmode),
1212 		   const0_rtx);
1213 	    }
1214 	  return target;
1215 	}
1216 
1217       /* Signed bit field: sign-extend with two arithmetic shifts.  */
1218       target = expand_shift (LSHIFT_EXPR, mode, target,
1219 			     build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1220 			     NULL_RTX, 0);
1221       return expand_shift (RSHIFT_EXPR, mode, target,
1222 			   build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1223 			   NULL_RTX, 0);
1224     }
1225 
1226   /* From here on we know the desired field is smaller than a word.  */
1227 
1228   /* Check if there is a correspondingly-sized integer field, so we can
1229      safely extract it as one size of integer, if necessary; then
1230      truncate or extend to the size that is wanted; then use SUBREGs or
1231      convert_to_mode to get one of the modes we really wanted.  */
1232 
1233   int_mode = int_mode_for_mode (tmode);
1234   if (int_mode == BLKmode)
1235     int_mode = int_mode_for_mode (mode);
1236   if (int_mode == BLKmode)
1237     abort ();    /* Should probably push op0 out to memory and then
1238 		    do a load.  */
1239 
1240   /* OFFSET is the number of words or bytes (UNIT says which)
1241      from STR_RTX to the first word or byte containing part of the field.  */
1242 
1243   if (GET_CODE (op0) != MEM)
1244     {
1245       if (offset != 0
1246 	  || GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1247 	{
1248 	  if (GET_CODE (op0) != REG)
1249 	    op0 = copy_to_reg (op0);
1250 	  op0 = gen_rtx_SUBREG (mode_for_size (BITS_PER_WORD, MODE_INT, 0),
1251 		                op0, (offset * UNITS_PER_WORD));
1252 	}
1253       offset = 0;
1254     }
1255   else
1256     op0 = protect_from_queue (str_rtx, 1);
1257 
1258   /* Now OFFSET is nonzero only for memory operands.  */
1259 
1260   if (unsignedp)
1261     {
1262       if (HAVE_extzv
1263 	  && (GET_MODE_BITSIZE (extzv_mode) >= bitsize)
1264 	  && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1265 		&& (bitsize + bitpos > GET_MODE_BITSIZE (extzv_mode))))
1266 	{
1267 	  unsigned HOST_WIDE_INT xbitpos = bitpos, xoffset = offset;
1268 	  rtx bitsize_rtx, bitpos_rtx;
1269 	  rtx last = get_last_insn ();
1270 	  rtx xop0 = op0;
1271 	  rtx xtarget = target;
1272 	  rtx xspec_target = spec_target;
1273 	  rtx xspec_target_subreg = spec_target_subreg;
1274 	  rtx pat;
1275 	  enum machine_mode maxmode = mode_for_extraction (EP_extzv, 0);
1276 
1277 	  if (GET_CODE (xop0) == MEM)
1278 	    {
1279 	      int save_volatile_ok = volatile_ok;
1280 	      volatile_ok = 1;
1281 
1282 	      /* Is the memory operand acceptable?  */
1283 	      if (! ((*insn_data[(int) CODE_FOR_extzv].operand[1].predicate)
1284 		     (xop0, GET_MODE (xop0))))
1285 		{
1286 		  /* No, load into a reg and extract from there.  */
1287 		  enum machine_mode bestmode;
1288 
1289 		  /* Get the mode to use for inserting into this field.  If
1290 		     OP0 is BLKmode, get the smallest mode consistent with the
1291 		     alignment. If OP0 is a non-BLKmode object that is no
1292 		     wider than MAXMODE, use its mode. Otherwise, use the
1293 		     smallest mode containing the field.  */
1294 
1295 		  if (GET_MODE (xop0) == BLKmode
1296 		      || (GET_MODE_SIZE (GET_MODE (op0))
1297 			  > GET_MODE_SIZE (maxmode)))
1298 		    bestmode = get_best_mode (bitsize, bitnum,
1299 					      MEM_ALIGN (xop0), maxmode,
1300 					      MEM_VOLATILE_P (xop0));
1301 		  else
1302 		    bestmode = GET_MODE (xop0);
1303 
1304 		  if (bestmode == VOIDmode
1305 		      || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1306 			  && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1307 		    goto extzv_loses;
1308 
1309 		  /* Compute offset as multiple of this unit,
1310 		     counting in bytes.  */
1311 		  unit = GET_MODE_BITSIZE (bestmode);
1312 		  xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1313 		  xbitpos = bitnum % unit;
1314 		  xop0 = adjust_address (xop0, bestmode, xoffset);
1315 
1316 		  /* Fetch it to a register in that size.  */
1317 		  xop0 = force_reg (bestmode, xop0);
1318 
1319 		  /* XBITPOS counts within UNIT, which is what is expected.  */
1320 		}
1321 	      else
1322 		/* Get ref to first byte containing part of the field.  */
1323 		xop0 = adjust_address (xop0, byte_mode, xoffset);
1324 
1325 	      volatile_ok = save_volatile_ok;
1326 	    }
1327 
1328 	  /* If op0 is a register, we need it in MAXMODE (which is usually
1329 	     SImode). to make it acceptable to the format of extzv.  */
1330 	  if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1331 	    goto extzv_loses;
1332 	  if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1333 	    xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1334 
1335 	  /* On big-endian machines, we count bits from the most significant.
1336 	     If the bit field insn does not, we must invert.  */
1337 	  if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1338 	    xbitpos = unit - bitsize - xbitpos;
1339 
1340 	  /* Now convert from counting within UNIT to counting in MAXMODE.  */
1341 	  if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1342 	    xbitpos += GET_MODE_BITSIZE (maxmode) - unit;
1343 
1344 	  unit = GET_MODE_BITSIZE (maxmode);
1345 
1346 	  if (xtarget == 0
1347 	      || (flag_force_mem && GET_CODE (xtarget) == MEM))
1348 	    xtarget = xspec_target = gen_reg_rtx (tmode);
1349 
1350 	  if (GET_MODE (xtarget) != maxmode)
1351 	    {
1352 	      if (GET_CODE (xtarget) == REG)
1353 		{
1354 		  int wider = (GET_MODE_SIZE (maxmode)
1355 			       > GET_MODE_SIZE (GET_MODE (xtarget)));
1356 		  xtarget = gen_lowpart (maxmode, xtarget);
1357 		  if (wider)
1358 		    xspec_target_subreg = xtarget;
1359 		}
1360 	      else
1361 		xtarget = gen_reg_rtx (maxmode);
1362 	    }
1363 
1364 	  /* If this machine's extzv insists on a register target,
1365 	     make sure we have one.  */
1366 	  if (! ((*insn_data[(int) CODE_FOR_extzv].operand[0].predicate)
1367 		 (xtarget, maxmode)))
1368 	    xtarget = gen_reg_rtx (maxmode);
1369 
1370 	  bitsize_rtx = GEN_INT (bitsize);
1371 	  bitpos_rtx = GEN_INT (xbitpos);
1372 
1373 	  pat = gen_extzv (protect_from_queue (xtarget, 1),
1374 			   xop0, bitsize_rtx, bitpos_rtx);
1375 	  if (pat)
1376 	    {
1377 	      emit_insn (pat);
1378 	      target = xtarget;
1379 	      spec_target = xspec_target;
1380 	      spec_target_subreg = xspec_target_subreg;
1381 	    }
1382 	  else
1383 	    {
1384 	      delete_insns_since (last);
1385 	      target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1386 						bitpos, target, 1);
1387 	    }
1388 	}
1389       else
1390       extzv_loses:
1391 	target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1392 					  bitpos, target, 1);
1393     }
1394   else
1395     {
1396       if (HAVE_extv
1397 	  && (GET_MODE_BITSIZE (extv_mode) >= bitsize)
1398 	  && ! ((GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1399 		&& (bitsize + bitpos > GET_MODE_BITSIZE (extv_mode))))
1400 	{
1401 	  int xbitpos = bitpos, xoffset = offset;
1402 	  rtx bitsize_rtx, bitpos_rtx;
1403 	  rtx last = get_last_insn ();
1404 	  rtx xop0 = op0, xtarget = target;
1405 	  rtx xspec_target = spec_target;
1406 	  rtx xspec_target_subreg = spec_target_subreg;
1407 	  rtx pat;
1408 	  enum machine_mode maxmode = mode_for_extraction (EP_extv, 0);
1409 
1410 	  if (GET_CODE (xop0) == MEM)
1411 	    {
1412 	      /* Is the memory operand acceptable?  */
1413 	      if (! ((*insn_data[(int) CODE_FOR_extv].operand[1].predicate)
1414 		     (xop0, GET_MODE (xop0))))
1415 		{
1416 		  /* No, load into a reg and extract from there.  */
1417 		  enum machine_mode bestmode;
1418 
1419 		  /* Get the mode to use for inserting into this field.  If
1420 		     OP0 is BLKmode, get the smallest mode consistent with the
1421 		     alignment. If OP0 is a non-BLKmode object that is no
1422 		     wider than MAXMODE, use its mode. Otherwise, use the
1423 		     smallest mode containing the field.  */
1424 
1425 		  if (GET_MODE (xop0) == BLKmode
1426 		      || (GET_MODE_SIZE (GET_MODE (op0))
1427 			  > GET_MODE_SIZE (maxmode)))
1428 		    bestmode = get_best_mode (bitsize, bitnum,
1429 					      MEM_ALIGN (xop0), maxmode,
1430 					      MEM_VOLATILE_P (xop0));
1431 		  else
1432 		    bestmode = GET_MODE (xop0);
1433 
1434 		  if (bestmode == VOIDmode
1435 		      || (SLOW_UNALIGNED_ACCESS (bestmode, MEM_ALIGN (xop0))
1436 			  && GET_MODE_BITSIZE (bestmode) > MEM_ALIGN (xop0)))
1437 		    goto extv_loses;
1438 
1439 		  /* Compute offset as multiple of this unit,
1440 		     counting in bytes.  */
1441 		  unit = GET_MODE_BITSIZE (bestmode);
1442 		  xoffset = (bitnum / unit) * GET_MODE_SIZE (bestmode);
1443 		  xbitpos = bitnum % unit;
1444 		  xop0 = adjust_address (xop0, bestmode, xoffset);
1445 
1446 		  /* Fetch it to a register in that size.  */
1447 		  xop0 = force_reg (bestmode, xop0);
1448 
1449 		  /* XBITPOS counts within UNIT, which is what is expected.  */
1450 		}
1451 	      else
1452 		/* Get ref to first byte containing part of the field.  */
1453 		xop0 = adjust_address (xop0, byte_mode, xoffset);
1454 	    }
1455 
1456 	  /* If op0 is a register, we need it in MAXMODE (which is usually
1457 	     SImode) to make it acceptable to the format of extv.  */
1458 	  if (GET_CODE (xop0) == SUBREG && GET_MODE (xop0) != maxmode)
1459 	    goto extv_loses;
1460 	  if (GET_CODE (xop0) == REG && GET_MODE (xop0) != maxmode)
1461 	    xop0 = gen_rtx_SUBREG (maxmode, xop0, 0);
1462 
1463 	  /* On big-endian machines, we count bits from the most significant.
1464 	     If the bit field insn does not, we must invert.  */
1465 	  if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1466 	    xbitpos = unit - bitsize - xbitpos;
1467 
1468 	  /* XBITPOS counts within a size of UNIT.
1469 	     Adjust to count within a size of MAXMODE.  */
1470 	  if (BITS_BIG_ENDIAN && GET_CODE (xop0) != MEM)
1471 	    xbitpos += (GET_MODE_BITSIZE (maxmode) - unit);
1472 
1473 	  unit = GET_MODE_BITSIZE (maxmode);
1474 
1475 	  if (xtarget == 0
1476 	      || (flag_force_mem && GET_CODE (xtarget) == MEM))
1477 	    xtarget = xspec_target = gen_reg_rtx (tmode);
1478 
1479 	  if (GET_MODE (xtarget) != maxmode)
1480 	    {
1481 	      if (GET_CODE (xtarget) == REG)
1482 		{
1483 		  int wider = (GET_MODE_SIZE (maxmode)
1484 			       > GET_MODE_SIZE (GET_MODE (xtarget)));
1485 		  xtarget = gen_lowpart (maxmode, xtarget);
1486 		  if (wider)
1487 		    xspec_target_subreg = xtarget;
1488 		}
1489 	      else
1490 		xtarget = gen_reg_rtx (maxmode);
1491 	    }
1492 
1493 	  /* If this machine's extv insists on a register target,
1494 	     make sure we have one.  */
1495 	  if (! ((*insn_data[(int) CODE_FOR_extv].operand[0].predicate)
1496 		 (xtarget, maxmode)))
1497 	    xtarget = gen_reg_rtx (maxmode);
1498 
1499 	  bitsize_rtx = GEN_INT (bitsize);
1500 	  bitpos_rtx = GEN_INT (xbitpos);
1501 
1502 	  pat = gen_extv (protect_from_queue (xtarget, 1),
1503 			  xop0, bitsize_rtx, bitpos_rtx);
1504 	  if (pat)
1505 	    {
1506 	      emit_insn (pat);
1507 	      target = xtarget;
1508 	      spec_target = xspec_target;
1509 	      spec_target_subreg = xspec_target_subreg;
1510 	    }
1511 	  else
1512 	    {
1513 	      delete_insns_since (last);
1514 	      target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1515 						bitpos, target, 0);
1516 	    }
1517 	}
1518       else
1519       extv_loses:
1520 	target = extract_fixed_bit_field (int_mode, op0, offset, bitsize,
1521 					  bitpos, target, 0);
1522     }
1523   if (target == spec_target)
1524     return target;
1525   if (target == spec_target_subreg)
1526     return spec_target;
1527   if (GET_MODE (target) != tmode && GET_MODE (target) != mode)
1528     {
1529       /* If the target mode is floating-point, first convert to the
1530 	 integer mode of that size and then access it as a floating-point
1531 	 value via a SUBREG.  */
1532       if (GET_MODE_CLASS (tmode) != MODE_INT
1533 	  && GET_MODE_CLASS (tmode) != MODE_PARTIAL_INT)
1534 	{
1535 	  target = convert_to_mode (mode_for_size (GET_MODE_BITSIZE (tmode),
1536 						   MODE_INT, 0),
1537 				    target, unsignedp);
1538 	  return gen_lowpart (tmode, target);
1539 	}
1540       else
1541 	return convert_to_mode (tmode, target, unsignedp);
1542     }
1543   return target;
1544 }
1545 
1546 /* Extract a bit field using shifts and boolean operations
1547    Returns an rtx to represent the value.
1548    OP0 addresses a register (word) or memory (byte).
1549    BITPOS says which bit within the word or byte the bit field starts in.
1550    OFFSET says how many bytes farther the bit field starts;
1551     it is 0 if OP0 is a register.
1552    BITSIZE says how many bits long the bit field is.
1553     (If OP0 is a register, it may be narrower than a full word,
1554      but BITPOS still counts within a full word,
1555      which is significant on bigendian machines.)
1556 
1557    UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1558    If TARGET is nonzero, attempts to store the value there
1559    and return TARGET, but this is not guaranteed.
1560    If TARGET is not used, create a pseudo-reg of mode TMODE for the value.  */
1561 
1562 static rtx
extract_fixed_bit_field(tmode,op0,offset,bitsize,bitpos,target,unsignedp)1563 extract_fixed_bit_field (tmode, op0, offset, bitsize, bitpos,
1564 			 target, unsignedp)
1565      enum machine_mode tmode;
1566      rtx op0, target;
1567      unsigned HOST_WIDE_INT offset, bitsize, bitpos;
1568      int unsignedp;
1569 {
1570   unsigned int total_bits = BITS_PER_WORD;
1571   enum machine_mode mode;
1572 
1573   if (GET_CODE (op0) == SUBREG || GET_CODE (op0) == REG)
1574     {
1575       /* Special treatment for a bit field split across two registers.  */
1576       if (bitsize + bitpos > BITS_PER_WORD)
1577 	return extract_split_bit_field (op0, bitsize, bitpos, unsignedp);
1578     }
1579   else
1580     {
1581       /* Get the proper mode to use for this field.  We want a mode that
1582 	 includes the entire field.  If such a mode would be larger than
1583 	 a word, we won't be doing the extraction the normal way.  */
1584 
1585       mode = get_best_mode (bitsize, bitpos + offset * BITS_PER_UNIT,
1586 			    MEM_ALIGN (op0), word_mode, MEM_VOLATILE_P (op0));
1587 
1588       if (mode == VOIDmode)
1589 	/* The only way this should occur is if the field spans word
1590 	   boundaries.  */
1591 	return extract_split_bit_field (op0, bitsize,
1592 					bitpos + offset * BITS_PER_UNIT,
1593 					unsignedp);
1594 
1595       total_bits = GET_MODE_BITSIZE (mode);
1596 
1597       /* Make sure bitpos is valid for the chosen mode.  Adjust BITPOS to
1598 	 be in the range 0 to total_bits-1, and put any excess bytes in
1599 	 OFFSET.  */
1600       if (bitpos >= total_bits)
1601 	{
1602 	  offset += (bitpos / total_bits) * (total_bits / BITS_PER_UNIT);
1603 	  bitpos -= ((bitpos / total_bits) * (total_bits / BITS_PER_UNIT)
1604 		     * BITS_PER_UNIT);
1605 	}
1606 
1607       /* Get ref to an aligned byte, halfword, or word containing the field.
1608 	 Adjust BITPOS to be position within a word,
1609 	 and OFFSET to be the offset of that word.
1610 	 Then alter OP0 to refer to that word.  */
1611       bitpos += (offset % (total_bits / BITS_PER_UNIT)) * BITS_PER_UNIT;
1612       offset -= (offset % (total_bits / BITS_PER_UNIT));
1613       op0 = adjust_address (op0, mode, offset);
1614     }
1615 
1616   mode = GET_MODE (op0);
1617 
1618   if (BYTES_BIG_ENDIAN)
1619     /* BITPOS is the distance between our msb and that of OP0.
1620        Convert it to the distance from the lsb.  */
1621     bitpos = total_bits - bitsize - bitpos;
1622 
1623   /* Now BITPOS is always the distance between the field's lsb and that of OP0.
1624      We have reduced the big-endian case to the little-endian case.  */
1625 
1626   if (unsignedp)
1627     {
1628       if (bitpos)
1629 	{
1630 	  /* If the field does not already start at the lsb,
1631 	     shift it so it does.  */
1632 	  tree amount = build_int_2 (bitpos, 0);
1633 	  /* Maybe propagate the target for the shift.  */
1634 	  /* But not if we will return it--could confuse integrate.c.  */
1635 	  rtx subtarget = (target != 0 && GET_CODE (target) == REG
1636 			   && !REG_FUNCTION_VALUE_P (target)
1637 			   ? target : 0);
1638 	  if (tmode != mode) subtarget = 0;
1639 	  op0 = expand_shift (RSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1640 	}
1641       /* Convert the value to the desired mode.  */
1642       if (mode != tmode)
1643 	op0 = convert_to_mode (tmode, op0, 1);
1644 
1645       /* Unless the msb of the field used to be the msb when we shifted,
1646 	 mask out the upper bits.  */
1647 
1648       if (GET_MODE_BITSIZE (mode) != bitpos + bitsize)
1649 	return expand_binop (GET_MODE (op0), and_optab, op0,
1650 			     mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1651 			     target, 1, OPTAB_LIB_WIDEN);
1652       return op0;
1653     }
1654 
1655   /* To extract a signed bit-field, first shift its msb to the msb of the word,
1656      then arithmetic-shift its lsb to the lsb of the word.  */
1657   op0 = force_reg (mode, op0);
1658   if (mode != tmode)
1659     target = 0;
1660 
1661   /* Find the narrowest integer mode that contains the field.  */
1662 
1663   for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1664        mode = GET_MODE_WIDER_MODE (mode))
1665     if (GET_MODE_BITSIZE (mode) >= bitsize + bitpos)
1666       {
1667 	op0 = convert_to_mode (mode, op0, 0);
1668 	break;
1669       }
1670 
1671   if (GET_MODE_BITSIZE (mode) != (bitsize + bitpos))
1672     {
1673       tree amount
1674 	= build_int_2 (GET_MODE_BITSIZE (mode) - (bitsize + bitpos), 0);
1675       /* Maybe propagate the target for the shift.  */
1676       /* But not if we will return the result--could confuse integrate.c.  */
1677       rtx subtarget = (target != 0 && GET_CODE (target) == REG
1678 		       && ! REG_FUNCTION_VALUE_P (target)
1679 		       ? target : 0);
1680       op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1681     }
1682 
1683   return expand_shift (RSHIFT_EXPR, mode, op0,
1684 		       build_int_2 (GET_MODE_BITSIZE (mode) - bitsize, 0),
1685 		       target, 0);
1686 }
1687 
1688 /* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
1689    of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
1690    complement of that if COMPLEMENT.  The mask is truncated if
1691    necessary to the width of mode MODE.  The mask is zero-extended if
1692    BITSIZE+BITPOS is too small for MODE.  */
1693 
1694 static rtx
mask_rtx(mode,bitpos,bitsize,complement)1695 mask_rtx (mode, bitpos, bitsize, complement)
1696      enum machine_mode mode;
1697      int bitpos, bitsize, complement;
1698 {
1699   HOST_WIDE_INT masklow, maskhigh;
1700 
1701   if (bitpos < HOST_BITS_PER_WIDE_INT)
1702     masklow = (HOST_WIDE_INT) -1 << bitpos;
1703   else
1704     masklow = 0;
1705 
1706   if (bitpos + bitsize < HOST_BITS_PER_WIDE_INT)
1707     masklow &= ((unsigned HOST_WIDE_INT) -1
1708 		>> (HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1709 
1710   if (bitpos <= HOST_BITS_PER_WIDE_INT)
1711     maskhigh = -1;
1712   else
1713     maskhigh = (HOST_WIDE_INT) -1 << (bitpos - HOST_BITS_PER_WIDE_INT);
1714 
1715   if (bitpos + bitsize > HOST_BITS_PER_WIDE_INT)
1716     maskhigh &= ((unsigned HOST_WIDE_INT) -1
1717 		 >> (2 * HOST_BITS_PER_WIDE_INT - bitpos - bitsize));
1718   else
1719     maskhigh = 0;
1720 
1721   if (complement)
1722     {
1723       maskhigh = ~maskhigh;
1724       masklow = ~masklow;
1725     }
1726 
1727   return immed_double_const (masklow, maskhigh, mode);
1728 }
1729 
1730 /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1731    VALUE truncated to BITSIZE bits and then shifted left BITPOS bits.  */
1732 
1733 static rtx
lshift_value(mode,value,bitpos,bitsize)1734 lshift_value (mode, value, bitpos, bitsize)
1735      enum machine_mode mode;
1736      rtx value;
1737      int bitpos, bitsize;
1738 {
1739   unsigned HOST_WIDE_INT v = INTVAL (value);
1740   HOST_WIDE_INT low, high;
1741 
1742   if (bitsize < HOST_BITS_PER_WIDE_INT)
1743     v &= ~((HOST_WIDE_INT) -1 << bitsize);
1744 
1745   if (bitpos < HOST_BITS_PER_WIDE_INT)
1746     {
1747       low = v << bitpos;
1748       high = (bitpos > 0 ? (v >> (HOST_BITS_PER_WIDE_INT - bitpos)) : 0);
1749     }
1750   else
1751     {
1752       low = 0;
1753       high = v << (bitpos - HOST_BITS_PER_WIDE_INT);
1754     }
1755 
1756   return immed_double_const (low, high, mode);
1757 }
1758 
1759 /* Extract a bit field that is split across two words
1760    and return an RTX for the result.
1761 
1762    OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1763    BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1764    UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.  */
1765 
1766 static rtx
extract_split_bit_field(op0,bitsize,bitpos,unsignedp)1767 extract_split_bit_field (op0, bitsize, bitpos, unsignedp)
1768      rtx op0;
1769      unsigned HOST_WIDE_INT bitsize, bitpos;
1770      int unsignedp;
1771 {
1772   unsigned int unit;
1773   unsigned int bitsdone = 0;
1774   rtx result = NULL_RTX;
1775   int first = 1;
1776 
1777   /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1778      much at a time.  */
1779   if (GET_CODE (op0) == REG || GET_CODE (op0) == SUBREG)
1780     unit = BITS_PER_WORD;
1781   else
1782     unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1783 
1784   while (bitsdone < bitsize)
1785     {
1786       unsigned HOST_WIDE_INT thissize;
1787       rtx part, word;
1788       unsigned HOST_WIDE_INT thispos;
1789       unsigned HOST_WIDE_INT offset;
1790 
1791       offset = (bitpos + bitsdone) / unit;
1792       thispos = (bitpos + bitsdone) % unit;
1793 
1794       /* THISSIZE must not overrun a word boundary.  Otherwise,
1795 	 extract_fixed_bit_field will call us again, and we will mutually
1796 	 recurse forever.  */
1797       thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1798       thissize = MIN (thissize, unit - thispos);
1799 
1800       /* If OP0 is a register, then handle OFFSET here.
1801 
1802 	 When handling multiword bitfields, extract_bit_field may pass
1803 	 down a word_mode SUBREG of a larger REG for a bitfield that actually
1804 	 crosses a word boundary.  Thus, for a SUBREG, we must find
1805 	 the current word starting from the base register.  */
1806       if (GET_CODE (op0) == SUBREG)
1807 	{
1808 	  int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1809 	  word = operand_subword_force (SUBREG_REG (op0), word_offset,
1810 					GET_MODE (SUBREG_REG (op0)));
1811 	  offset = 0;
1812 	}
1813       else if (GET_CODE (op0) == REG)
1814 	{
1815 	  word = operand_subword_force (op0, offset, GET_MODE (op0));
1816 	  offset = 0;
1817 	}
1818       else
1819 	word = op0;
1820 
1821       /* Extract the parts in bit-counting order,
1822 	 whose meaning is determined by BYTES_PER_UNIT.
1823 	 OFFSET is in UNITs, and UNIT is in bits.
1824 	 extract_fixed_bit_field wants offset in bytes.  */
1825       part = extract_fixed_bit_field (word_mode, word,
1826 				      offset * unit / BITS_PER_UNIT,
1827 				      thissize, thispos, 0, 1);
1828       bitsdone += thissize;
1829 
1830       /* Shift this part into place for the result.  */
1831       if (BYTES_BIG_ENDIAN)
1832 	{
1833 	  if (bitsize != bitsdone)
1834 	    part = expand_shift (LSHIFT_EXPR, word_mode, part,
1835 				 build_int_2 (bitsize - bitsdone, 0), 0, 1);
1836 	}
1837       else
1838 	{
1839 	  if (bitsdone != thissize)
1840 	    part = expand_shift (LSHIFT_EXPR, word_mode, part,
1841 				 build_int_2 (bitsdone - thissize, 0), 0, 1);
1842 	}
1843 
1844       if (first)
1845 	result = part;
1846       else
1847 	/* Combine the parts with bitwise or.  This works
1848 	   because we extracted each part as an unsigned bit field.  */
1849 	result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
1850 			       OPTAB_LIB_WIDEN);
1851 
1852       first = 0;
1853     }
1854 
1855   /* Unsigned bit field: we are done.  */
1856   if (unsignedp)
1857     return result;
1858   /* Signed bit field: sign-extend with two arithmetic shifts.  */
1859   result = expand_shift (LSHIFT_EXPR, word_mode, result,
1860 			 build_int_2 (BITS_PER_WORD - bitsize, 0),
1861 			 NULL_RTX, 0);
1862   return expand_shift (RSHIFT_EXPR, word_mode, result,
1863 		       build_int_2 (BITS_PER_WORD - bitsize, 0), NULL_RTX, 0);
1864 }
1865 
1866 /* Add INC into TARGET.  */
1867 
1868 void
expand_inc(target,inc)1869 expand_inc (target, inc)
1870      rtx target, inc;
1871 {
1872   rtx value = expand_binop (GET_MODE (target), add_optab,
1873 			    target, inc,
1874 			    target, 0, OPTAB_LIB_WIDEN);
1875   if (value != target)
1876     emit_move_insn (target, value);
1877 }
1878 
1879 /* Subtract DEC from TARGET.  */
1880 
1881 void
expand_dec(target,dec)1882 expand_dec (target, dec)
1883      rtx target, dec;
1884 {
1885   rtx value = expand_binop (GET_MODE (target), sub_optab,
1886 			    target, dec,
1887 			    target, 0, OPTAB_LIB_WIDEN);
1888   if (value != target)
1889     emit_move_insn (target, value);
1890 }
1891 
1892 /* Output a shift instruction for expression code CODE,
1893    with SHIFTED being the rtx for the value to shift,
1894    and AMOUNT the tree for the amount to shift by.
1895    Store the result in the rtx TARGET, if that is convenient.
1896    If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
1897    Return the rtx for where the value is.  */
1898 
1899 rtx
expand_shift(code,mode,shifted,amount,target,unsignedp)1900 expand_shift (code, mode, shifted, amount, target, unsignedp)
1901      enum tree_code code;
1902      enum machine_mode mode;
1903      rtx shifted;
1904      tree amount;
1905      rtx target;
1906      int unsignedp;
1907 {
1908   rtx op1, temp = 0;
1909   int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
1910   int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
1911   int try;
1912 
1913   /* Previously detected shift-counts computed by NEGATE_EXPR
1914      and shifted in the other direction; but that does not work
1915      on all machines.  */
1916 
1917   op1 = expand_expr (amount, NULL_RTX, VOIDmode, 0);
1918 
1919 #ifdef SHIFT_COUNT_TRUNCATED
1920   if (SHIFT_COUNT_TRUNCATED)
1921     {
1922       if (GET_CODE (op1) == CONST_INT
1923 	  && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
1924 	      (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))
1925 	op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
1926 		       % GET_MODE_BITSIZE (mode));
1927       else if (GET_CODE (op1) == SUBREG
1928 	       && subreg_lowpart_p (op1))
1929 	op1 = SUBREG_REG (op1);
1930     }
1931 #endif
1932 
1933   if (op1 == const0_rtx)
1934     return shifted;
1935 
1936   for (try = 0; temp == 0 && try < 3; try++)
1937     {
1938       enum optab_methods methods;
1939 
1940       if (try == 0)
1941 	methods = OPTAB_DIRECT;
1942       else if (try == 1)
1943 	methods = OPTAB_WIDEN;
1944       else
1945 	methods = OPTAB_LIB_WIDEN;
1946 
1947       if (rotate)
1948 	{
1949 	  /* Widening does not work for rotation.  */
1950 	  if (methods == OPTAB_WIDEN)
1951 	    continue;
1952 	  else if (methods == OPTAB_LIB_WIDEN)
1953 	    {
1954 	      /* If we have been unable to open-code this by a rotation,
1955 		 do it as the IOR of two shifts.  I.e., to rotate A
1956 		 by N bits, compute (A << N) | ((unsigned) A >> (C - N))
1957 		 where C is the bitsize of A.
1958 
1959 		 It is theoretically possible that the target machine might
1960 		 not be able to perform either shift and hence we would
1961 		 be making two libcalls rather than just the one for the
1962 		 shift (similarly if IOR could not be done).  We will allow
1963 		 this extremely unlikely lossage to avoid complicating the
1964 		 code below.  */
1965 
1966 	      rtx subtarget = target == shifted ? 0 : target;
1967 	      rtx temp1;
1968 	      tree type = TREE_TYPE (amount);
1969 	      tree new_amount = make_tree (type, op1);
1970 	      tree other_amount
1971 		= fold (build (MINUS_EXPR, type,
1972 			       convert (type,
1973 					build_int_2 (GET_MODE_BITSIZE (mode),
1974 						     0)),
1975 			       amount));
1976 
1977 	      shifted = force_reg (mode, shifted);
1978 
1979 	      temp = expand_shift (left ? LSHIFT_EXPR : RSHIFT_EXPR,
1980 				   mode, shifted, new_amount, subtarget, 1);
1981 	      temp1 = expand_shift (left ? RSHIFT_EXPR : LSHIFT_EXPR,
1982 				    mode, shifted, other_amount, 0, 1);
1983 	      return expand_binop (mode, ior_optab, temp, temp1, target,
1984 				   unsignedp, methods);
1985 	    }
1986 
1987 	  temp = expand_binop (mode,
1988 			       left ? rotl_optab : rotr_optab,
1989 			       shifted, op1, target, unsignedp, methods);
1990 
1991 	  /* If we don't have the rotate, but we are rotating by a constant
1992 	     that is in range, try a rotate in the opposite direction.  */
1993 
1994 	  if (temp == 0 && GET_CODE (op1) == CONST_INT
1995 	      && INTVAL (op1) > 0
1996 	      && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1997 	    temp = expand_binop (mode,
1998 				 left ? rotr_optab : rotl_optab,
1999 				 shifted,
2000 				 GEN_INT (GET_MODE_BITSIZE (mode)
2001 					  - INTVAL (op1)),
2002 				 target, unsignedp, methods);
2003 	}
2004       else if (unsignedp)
2005 	temp = expand_binop (mode,
2006 			     left ? ashl_optab : lshr_optab,
2007 			     shifted, op1, target, unsignedp, methods);
2008 
2009       /* Do arithmetic shifts.
2010 	 Also, if we are going to widen the operand, we can just as well
2011 	 use an arithmetic right-shift instead of a logical one.  */
2012       if (temp == 0 && ! rotate
2013 	  && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2014 	{
2015 	  enum optab_methods methods1 = methods;
2016 
2017 	  /* If trying to widen a log shift to an arithmetic shift,
2018 	     don't accept an arithmetic shift of the same size.  */
2019 	  if (unsignedp)
2020 	    methods1 = OPTAB_MUST_WIDEN;
2021 
2022 	  /* Arithmetic shift */
2023 
2024 	  temp = expand_binop (mode,
2025 			       left ? ashl_optab : ashr_optab,
2026 			       shifted, op1, target, unsignedp, methods1);
2027 	}
2028 
2029       /* We used to try extzv here for logical right shifts, but that was
2030 	 only useful for one machine, the VAX, and caused poor code
2031 	 generation there for lshrdi3, so the code was deleted and a
2032 	 define_expand for lshrsi3 was added to vax.md.  */
2033     }
2034 
2035   if (temp == 0)
2036     abort ();
2037   return temp;
2038 }
2039 
2040 enum alg_code { alg_zero, alg_m, alg_shift,
2041 		  alg_add_t_m2, alg_sub_t_m2,
2042 		  alg_add_factor, alg_sub_factor,
2043 		  alg_add_t2_m, alg_sub_t2_m,
2044 		  alg_add, alg_subtract, alg_factor, alg_shiftop };
2045 
2046 /* This structure records a sequence of operations.
2047    `ops' is the number of operations recorded.
2048    `cost' is their total cost.
2049    The operations are stored in `op' and the corresponding
2050    logarithms of the integer coefficients in `log'.
2051 
2052    These are the operations:
2053    alg_zero		total := 0;
2054    alg_m		total := multiplicand;
2055    alg_shift		total := total * coeff
2056    alg_add_t_m2		total := total + multiplicand * coeff;
2057    alg_sub_t_m2		total := total - multiplicand * coeff;
2058    alg_add_factor	total := total * coeff + total;
2059    alg_sub_factor	total := total * coeff - total;
2060    alg_add_t2_m		total := total * coeff + multiplicand;
2061    alg_sub_t2_m		total := total * coeff - multiplicand;
2062 
2063    The first operand must be either alg_zero or alg_m.  */
2064 
2065 struct algorithm
2066 {
2067   short cost;
2068   short ops;
2069   /* The size of the OP and LOG fields are not directly related to the
2070      word size, but the worst-case algorithms will be if we have few
2071      consecutive ones or zeros, i.e., a multiplicand like 10101010101...
2072      In that case we will generate shift-by-2, add, shift-by-2, add,...,
2073      in total wordsize operations.  */
2074   enum alg_code op[MAX_BITS_PER_WORD];
2075   char log[MAX_BITS_PER_WORD];
2076 };
2077 
2078 static void synth_mult			PARAMS ((struct algorithm *,
2079 						 unsigned HOST_WIDE_INT,
2080 						 int));
2081 static unsigned HOST_WIDE_INT choose_multiplier PARAMS ((unsigned HOST_WIDE_INT,
2082 							 int, int,
2083 							 unsigned HOST_WIDE_INT *,
2084 							 int *, int *));
2085 static unsigned HOST_WIDE_INT invert_mod2n	PARAMS ((unsigned HOST_WIDE_INT,
2086 							 int));
2087 /* Compute and return the best algorithm for multiplying by T.
2088    The algorithm must cost less than cost_limit
2089    If retval.cost >= COST_LIMIT, no algorithm was found and all
2090    other field of the returned struct are undefined.  */
2091 
2092 static void
synth_mult(alg_out,t,cost_limit)2093 synth_mult (alg_out, t, cost_limit)
2094      struct algorithm *alg_out;
2095      unsigned HOST_WIDE_INT t;
2096      int cost_limit;
2097 {
2098   int m;
2099   struct algorithm *alg_in, *best_alg;
2100   int cost;
2101   unsigned HOST_WIDE_INT q;
2102 
2103   /* Indicate that no algorithm is yet found.  If no algorithm
2104      is found, this value will be returned and indicate failure.  */
2105   alg_out->cost = cost_limit;
2106 
2107   if (cost_limit <= 0)
2108     return;
2109 
2110   /* t == 1 can be done in zero cost.  */
2111   if (t == 1)
2112     {
2113       alg_out->ops = 1;
2114       alg_out->cost = 0;
2115       alg_out->op[0] = alg_m;
2116       return;
2117     }
2118 
2119   /* t == 0 sometimes has a cost.  If it does and it exceeds our limit,
2120      fail now.  */
2121   if (t == 0)
2122     {
2123       if (zero_cost >= cost_limit)
2124 	return;
2125       else
2126 	{
2127 	  alg_out->ops = 1;
2128 	  alg_out->cost = zero_cost;
2129 	  alg_out->op[0] = alg_zero;
2130 	  return;
2131 	}
2132     }
2133 
2134   /* We'll be needing a couple extra algorithm structures now.  */
2135 
2136   alg_in = (struct algorithm *)alloca (sizeof (struct algorithm));
2137   best_alg = (struct algorithm *)alloca (sizeof (struct algorithm));
2138 
2139   /* If we have a group of zero bits at the low-order part of T, try
2140      multiplying by the remaining bits and then doing a shift.  */
2141 
2142   if ((t & 1) == 0)
2143     {
2144       m = floor_log2 (t & -t);	/* m = number of low zero bits */
2145       if (m < BITS_PER_WORD)
2146 	{
2147 	  q = t >> m;
2148 	  cost = shift_cost[m];
2149 	  synth_mult (alg_in, q, cost_limit - cost);
2150 
2151 	  cost += alg_in->cost;
2152 	  if (cost < cost_limit)
2153 	    {
2154 	      struct algorithm *x;
2155 	      x = alg_in, alg_in = best_alg, best_alg = x;
2156 	      best_alg->log[best_alg->ops] = m;
2157 	      best_alg->op[best_alg->ops] = alg_shift;
2158 	      cost_limit = cost;
2159 	    }
2160 	}
2161     }
2162 
2163   /* If we have an odd number, add or subtract one.  */
2164   if ((t & 1) != 0)
2165     {
2166       unsigned HOST_WIDE_INT w;
2167 
2168       for (w = 1; (w & t) != 0; w <<= 1)
2169 	;
2170       /* If T was -1, then W will be zero after the loop.  This is another
2171 	 case where T ends with ...111.  Handling this with (T + 1) and
2172 	 subtract 1 produces slightly better code and results in algorithm
2173 	 selection much faster than treating it like the ...0111 case
2174 	 below.  */
2175       if (w == 0
2176 	  || (w > 2
2177 	      /* Reject the case where t is 3.
2178 		 Thus we prefer addition in that case.  */
2179 	      && t != 3))
2180 	{
2181 	  /* T ends with ...111.  Multiply by (T + 1) and subtract 1.  */
2182 
2183 	  cost = add_cost;
2184 	  synth_mult (alg_in, t + 1, cost_limit - cost);
2185 
2186 	  cost += alg_in->cost;
2187 	  if (cost < cost_limit)
2188 	    {
2189 	      struct algorithm *x;
2190 	      x = alg_in, alg_in = best_alg, best_alg = x;
2191 	      best_alg->log[best_alg->ops] = 0;
2192 	      best_alg->op[best_alg->ops] = alg_sub_t_m2;
2193 	      cost_limit = cost;
2194 	    }
2195 	}
2196       else
2197 	{
2198 	  /* T ends with ...01 or ...011.  Multiply by (T - 1) and add 1.  */
2199 
2200 	  cost = add_cost;
2201 	  synth_mult (alg_in, t - 1, cost_limit - cost);
2202 
2203 	  cost += alg_in->cost;
2204 	  if (cost < cost_limit)
2205 	    {
2206 	      struct algorithm *x;
2207 	      x = alg_in, alg_in = best_alg, best_alg = x;
2208 	      best_alg->log[best_alg->ops] = 0;
2209 	      best_alg->op[best_alg->ops] = alg_add_t_m2;
2210 	      cost_limit = cost;
2211 	    }
2212 	}
2213     }
2214 
2215   /* Look for factors of t of the form
2216      t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2217      If we find such a factor, we can multiply by t using an algorithm that
2218      multiplies by q, shift the result by m and add/subtract it to itself.
2219 
2220      We search for large factors first and loop down, even if large factors
2221      are less probable than small; if we find a large factor we will find a
2222      good sequence quickly, and therefore be able to prune (by decreasing
2223      COST_LIMIT) the search.  */
2224 
2225   for (m = floor_log2 (t - 1); m >= 2; m--)
2226     {
2227       unsigned HOST_WIDE_INT d;
2228 
2229       d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2230       if (t % d == 0 && t > d && m < BITS_PER_WORD)
2231 	{
2232 	  cost = MIN (shiftadd_cost[m], add_cost + shift_cost[m]);
2233 	  synth_mult (alg_in, t / d, cost_limit - cost);
2234 
2235 	  cost += alg_in->cost;
2236 	  if (cost < cost_limit)
2237 	    {
2238 	      struct algorithm *x;
2239 	      x = alg_in, alg_in = best_alg, best_alg = x;
2240 	      best_alg->log[best_alg->ops] = m;
2241 	      best_alg->op[best_alg->ops] = alg_add_factor;
2242 	      cost_limit = cost;
2243 	    }
2244 	  /* Other factors will have been taken care of in the recursion.  */
2245 	  break;
2246 	}
2247 
2248       d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2249       if (t % d == 0 && t > d && m < BITS_PER_WORD)
2250 	{
2251 	  cost = MIN (shiftsub_cost[m], add_cost + shift_cost[m]);
2252 	  synth_mult (alg_in, t / d, cost_limit - cost);
2253 
2254 	  cost += alg_in->cost;
2255 	  if (cost < cost_limit)
2256 	    {
2257 	      struct algorithm *x;
2258 	      x = alg_in, alg_in = best_alg, best_alg = x;
2259 	      best_alg->log[best_alg->ops] = m;
2260 	      best_alg->op[best_alg->ops] = alg_sub_factor;
2261 	      cost_limit = cost;
2262 	    }
2263 	  break;
2264 	}
2265     }
2266 
2267   /* Try shift-and-add (load effective address) instructions,
2268      i.e. do a*3, a*5, a*9.  */
2269   if ((t & 1) != 0)
2270     {
2271       q = t - 1;
2272       q = q & -q;
2273       m = exact_log2 (q);
2274       if (m >= 0 && m < BITS_PER_WORD)
2275 	{
2276 	  cost = shiftadd_cost[m];
2277 	  synth_mult (alg_in, (t - 1) >> m, cost_limit - cost);
2278 
2279 	  cost += alg_in->cost;
2280 	  if (cost < cost_limit)
2281 	    {
2282 	      struct algorithm *x;
2283 	      x = alg_in, alg_in = best_alg, best_alg = x;
2284 	      best_alg->log[best_alg->ops] = m;
2285 	      best_alg->op[best_alg->ops] = alg_add_t2_m;
2286 	      cost_limit = cost;
2287 	    }
2288 	}
2289 
2290       q = t + 1;
2291       q = q & -q;
2292       m = exact_log2 (q);
2293       if (m >= 0 && m < BITS_PER_WORD)
2294 	{
2295 	  cost = shiftsub_cost[m];
2296 	  synth_mult (alg_in, (t + 1) >> m, cost_limit - cost);
2297 
2298 	  cost += alg_in->cost;
2299 	  if (cost < cost_limit)
2300 	    {
2301 	      struct algorithm *x;
2302 	      x = alg_in, alg_in = best_alg, best_alg = x;
2303 	      best_alg->log[best_alg->ops] = m;
2304 	      best_alg->op[best_alg->ops] = alg_sub_t2_m;
2305 	      cost_limit = cost;
2306 	    }
2307 	}
2308     }
2309 
2310   /* If cost_limit has not decreased since we stored it in alg_out->cost,
2311      we have not found any algorithm.  */
2312   if (cost_limit == alg_out->cost)
2313     return;
2314 
2315   /* If we are getting a too long sequence for `struct algorithm'
2316      to record, make this search fail.  */
2317   if (best_alg->ops == MAX_BITS_PER_WORD)
2318     return;
2319 
2320   /* Copy the algorithm from temporary space to the space at alg_out.
2321      We avoid using structure assignment because the majority of
2322      best_alg is normally undefined, and this is a critical function.  */
2323   alg_out->ops = best_alg->ops + 1;
2324   alg_out->cost = cost_limit;
2325   memcpy (alg_out->op, best_alg->op,
2326 	  alg_out->ops * sizeof *alg_out->op);
2327   memcpy (alg_out->log, best_alg->log,
2328 	  alg_out->ops * sizeof *alg_out->log);
2329 }
2330 
2331 /* Perform a multiplication and return an rtx for the result.
2332    MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
2333    TARGET is a suggestion for where to store the result (an rtx).
2334 
2335    We check specially for a constant integer as OP1.
2336    If you want this check for OP0 as well, then before calling
2337    you should swap the two operands if OP0 would be constant.  */
2338 
2339 rtx
expand_mult(mode,op0,op1,target,unsignedp)2340 expand_mult (mode, op0, op1, target, unsignedp)
2341      enum machine_mode mode;
2342      rtx op0, op1, target;
2343      int unsignedp;
2344 {
2345   rtx const_op1 = op1;
2346 
2347   /* synth_mult does an `unsigned int' multiply.  As long as the mode is
2348      less than or equal in size to `unsigned int' this doesn't matter.
2349      If the mode is larger than `unsigned int', then synth_mult works only
2350      if the constant value exactly fits in an `unsigned int' without any
2351      truncation.  This means that multiplying by negative values does
2352      not work; results are off by 2^32 on a 32 bit machine.  */
2353 
2354   /* If we are multiplying in DImode, it may still be a win
2355      to try to work with shifts and adds.  */
2356   if (GET_CODE (op1) == CONST_DOUBLE
2357       && GET_MODE_CLASS (GET_MODE (op1)) == MODE_INT
2358       && HOST_BITS_PER_INT >= BITS_PER_WORD
2359       && CONST_DOUBLE_HIGH (op1) == 0)
2360     const_op1 = GEN_INT (CONST_DOUBLE_LOW (op1));
2361   else if (HOST_BITS_PER_INT < GET_MODE_BITSIZE (mode)
2362 	   && GET_CODE (op1) == CONST_INT
2363 	   && INTVAL (op1) < 0)
2364     const_op1 = 0;
2365 
2366   /* We used to test optimize here, on the grounds that it's better to
2367      produce a smaller program when -O is not used.
2368      But this causes such a terrible slowdown sometimes
2369      that it seems better to use synth_mult always.  */
2370 
2371   if (const_op1 && GET_CODE (const_op1) == CONST_INT
2372       && (unsignedp || ! flag_trapv))
2373     {
2374       struct algorithm alg;
2375       struct algorithm alg2;
2376       HOST_WIDE_INT val = INTVAL (op1);
2377       HOST_WIDE_INT val_so_far;
2378       rtx insn;
2379       int mult_cost;
2380       enum {basic_variant, negate_variant, add_variant} variant = basic_variant;
2381 
2382       /* op0 must be register to make mult_cost match the precomputed
2383          shiftadd_cost array.  */
2384       op0 = force_reg (mode, op0);
2385 
2386       /* Try to do the computation three ways: multiply by the negative of OP1
2387 	 and then negate, do the multiplication directly, or do multiplication
2388 	 by OP1 - 1.  */
2389 
2390       mult_cost = rtx_cost (gen_rtx_MULT (mode, op0, op1), SET);
2391       mult_cost = MIN (12 * add_cost, mult_cost);
2392 
2393       synth_mult (&alg, val, mult_cost);
2394 
2395       /* This works only if the inverted value actually fits in an
2396 	 `unsigned int' */
2397       if (HOST_BITS_PER_INT >= GET_MODE_BITSIZE (mode))
2398 	{
2399 	  synth_mult (&alg2, - val,
2400 		      (alg.cost < mult_cost ? alg.cost : mult_cost) - negate_cost);
2401 	  if (alg2.cost + negate_cost < alg.cost)
2402 	    alg = alg2, variant = negate_variant;
2403 	}
2404 
2405       /* This proves very useful for division-by-constant.  */
2406       synth_mult (&alg2, val - 1,
2407 		  (alg.cost < mult_cost ? alg.cost : mult_cost) - add_cost);
2408       if (alg2.cost + add_cost < alg.cost)
2409 	alg = alg2, variant = add_variant;
2410 
2411       if (alg.cost < mult_cost)
2412 	{
2413 	  /* We found something cheaper than a multiply insn.  */
2414 	  int opno;
2415 	  rtx accum, tem;
2416 	  enum machine_mode nmode;
2417 
2418 	  op0 = protect_from_queue (op0, 0);
2419 
2420 	  /* Avoid referencing memory over and over.
2421 	     For speed, but also for correctness when mem is volatile.  */
2422 	  if (GET_CODE (op0) == MEM)
2423 	    op0 = force_reg (mode, op0);
2424 
2425 	  /* ACCUM starts out either as OP0 or as a zero, depending on
2426 	     the first operation.  */
2427 
2428 	  if (alg.op[0] == alg_zero)
2429 	    {
2430 	      accum = copy_to_mode_reg (mode, const0_rtx);
2431 	      val_so_far = 0;
2432 	    }
2433 	  else if (alg.op[0] == alg_m)
2434 	    {
2435 	      accum = copy_to_mode_reg (mode, op0);
2436 	      val_so_far = 1;
2437 	    }
2438 	  else
2439 	    abort ();
2440 
2441 	  for (opno = 1; opno < alg.ops; opno++)
2442 	    {
2443 	      int log = alg.log[opno];
2444 	      int preserve = preserve_subexpressions_p ();
2445 	      rtx shift_subtarget = preserve ? 0 : accum;
2446 	      rtx add_target
2447 		= (opno == alg.ops - 1 && target != 0 && variant != add_variant
2448 		   && ! preserve)
2449 		  ? target : 0;
2450 	      rtx accum_target = preserve ? 0 : accum;
2451 
2452 	      switch (alg.op[opno])
2453 		{
2454 		case alg_shift:
2455 		  accum = expand_shift (LSHIFT_EXPR, mode, accum,
2456 					build_int_2 (log, 0), NULL_RTX, 0);
2457 		  val_so_far <<= log;
2458 		  break;
2459 
2460 		case alg_add_t_m2:
2461 		  tem = expand_shift (LSHIFT_EXPR, mode, op0,
2462 				      build_int_2 (log, 0), NULL_RTX, 0);
2463 		  accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2464 					 add_target
2465 					 ? add_target : accum_target);
2466 		  val_so_far += (HOST_WIDE_INT) 1 << log;
2467 		  break;
2468 
2469 		case alg_sub_t_m2:
2470 		  tem = expand_shift (LSHIFT_EXPR, mode, op0,
2471 				      build_int_2 (log, 0), NULL_RTX, 0);
2472 		  accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
2473 					 add_target
2474 					 ? add_target : accum_target);
2475 		  val_so_far -= (HOST_WIDE_INT) 1 << log;
2476 		  break;
2477 
2478 		case alg_add_t2_m:
2479 		  accum = expand_shift (LSHIFT_EXPR, mode, accum,
2480 					build_int_2 (log, 0), shift_subtarget,
2481 					0);
2482 		  accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
2483 					 add_target
2484 					 ? add_target : accum_target);
2485 		  val_so_far = (val_so_far << log) + 1;
2486 		  break;
2487 
2488 		case alg_sub_t2_m:
2489 		  accum = expand_shift (LSHIFT_EXPR, mode, accum,
2490 					build_int_2 (log, 0), shift_subtarget,
2491 					0);
2492 		  accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
2493 					 add_target
2494 					 ? add_target : accum_target);
2495 		  val_so_far = (val_so_far << log) - 1;
2496 		  break;
2497 
2498 		case alg_add_factor:
2499 		  tem = expand_shift (LSHIFT_EXPR, mode, accum,
2500 				      build_int_2 (log, 0), NULL_RTX, 0);
2501 		  accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
2502 					 add_target
2503 					 ? add_target : accum_target);
2504 		  val_so_far += val_so_far << log;
2505 		  break;
2506 
2507 		case alg_sub_factor:
2508 		  tem = expand_shift (LSHIFT_EXPR, mode, accum,
2509 				      build_int_2 (log, 0), NULL_RTX, 0);
2510 		  accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
2511 					 (add_target ? add_target
2512 					  : preserve ? 0 : tem));
2513 		  val_so_far = (val_so_far << log) - val_so_far;
2514 		  break;
2515 
2516 		default:
2517 		  abort ();
2518 		}
2519 
2520 	      /* Write a REG_EQUAL note on the last insn so that we can cse
2521 		 multiplication sequences.  Note that if ACCUM is a SUBREG,
2522 		 we've set the inner register and must properly indicate
2523 		 that.  */
2524 
2525 	      tem = op0, nmode = mode;
2526 	      if (GET_CODE (accum) == SUBREG)
2527 		{
2528 		  nmode = GET_MODE (SUBREG_REG (accum));
2529 		  tem = gen_lowpart (nmode, op0);
2530 		}
2531 
2532 	      insn = get_last_insn ();
2533 	      set_unique_reg_note (insn,
2534 	      			   REG_EQUAL,
2535 				   gen_rtx_MULT (nmode, tem,
2536 				   	         GEN_INT (val_so_far)));
2537 	    }
2538 
2539 	  if (variant == negate_variant)
2540 	    {
2541 	      val_so_far = - val_so_far;
2542 	      accum = expand_unop (mode, neg_optab, accum, target, 0);
2543 	    }
2544 	  else if (variant == add_variant)
2545 	    {
2546 	      val_so_far = val_so_far + 1;
2547 	      accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
2548 	    }
2549 
2550 	  if (val != val_so_far)
2551 	    abort ();
2552 
2553 	  return accum;
2554 	}
2555     }
2556 
2557   /* This used to use umul_optab if unsigned, but for non-widening multiply
2558      there is no difference between signed and unsigned.  */
2559   op0 = expand_binop (mode,
2560 		      ! unsignedp
2561 		      && flag_trapv && (GET_MODE_CLASS(mode) == MODE_INT)
2562 		      ? smulv_optab : smul_optab,
2563 		      op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
2564   if (op0 == 0)
2565     abort ();
2566   return op0;
2567 }
2568 
2569 /* Return the smallest n such that 2**n >= X.  */
2570 
2571 int
ceil_log2(x)2572 ceil_log2 (x)
2573      unsigned HOST_WIDE_INT x;
2574 {
2575   return floor_log2 (x - 1) + 1;
2576 }
2577 
2578 /* Choose a minimal N + 1 bit approximation to 1/D that can be used to
2579    replace division by D, and put the least significant N bits of the result
2580    in *MULTIPLIER_PTR and return the most significant bit.
2581 
2582    The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
2583    needed precision is in PRECISION (should be <= N).
2584 
2585    PRECISION should be as small as possible so this function can choose
2586    multiplier more freely.
2587 
2588    The rounded-up logarithm of D is placed in *lgup_ptr.  A shift count that
2589    is to be used for a final right shift is placed in *POST_SHIFT_PTR.
2590 
2591    Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
2592    where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier.  */
2593 
2594 static
2595 unsigned HOST_WIDE_INT
choose_multiplier(d,n,precision,multiplier_ptr,post_shift_ptr,lgup_ptr)2596 choose_multiplier (d, n, precision, multiplier_ptr, post_shift_ptr, lgup_ptr)
2597      unsigned HOST_WIDE_INT d;
2598      int n;
2599      int precision;
2600      unsigned HOST_WIDE_INT *multiplier_ptr;
2601      int *post_shift_ptr;
2602      int *lgup_ptr;
2603 {
2604   HOST_WIDE_INT mhigh_hi, mlow_hi;
2605   unsigned HOST_WIDE_INT mhigh_lo, mlow_lo;
2606   int lgup, post_shift;
2607   int pow, pow2;
2608   unsigned HOST_WIDE_INT nl, dummy1;
2609   HOST_WIDE_INT nh, dummy2;
2610 
2611   /* lgup = ceil(log2(divisor)); */
2612   lgup = ceil_log2 (d);
2613 
2614   if (lgup > n)
2615     abort ();
2616 
2617   pow = n + lgup;
2618   pow2 = n + lgup - precision;
2619 
2620   if (pow == 2 * HOST_BITS_PER_WIDE_INT)
2621     {
2622       /* We could handle this with some effort, but this case is much better
2623 	 handled directly with a scc insn, so rely on caller using that.  */
2624       abort ();
2625     }
2626 
2627   /* mlow = 2^(N + lgup)/d */
2628  if (pow >= HOST_BITS_PER_WIDE_INT)
2629     {
2630       nh = (HOST_WIDE_INT) 1 << (pow - HOST_BITS_PER_WIDE_INT);
2631       nl = 0;
2632     }
2633   else
2634     {
2635       nh = 0;
2636       nl = (unsigned HOST_WIDE_INT) 1 << pow;
2637     }
2638   div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2639 			&mlow_lo, &mlow_hi, &dummy1, &dummy2);
2640 
2641   /* mhigh = (2^(N + lgup) + 2^N + lgup - precision)/d */
2642   if (pow2 >= HOST_BITS_PER_WIDE_INT)
2643     nh |= (HOST_WIDE_INT) 1 << (pow2 - HOST_BITS_PER_WIDE_INT);
2644   else
2645     nl |= (unsigned HOST_WIDE_INT) 1 << pow2;
2646   div_and_round_double (TRUNC_DIV_EXPR, 1, nl, nh, d, (HOST_WIDE_INT) 0,
2647 			&mhigh_lo, &mhigh_hi, &dummy1, &dummy2);
2648 
2649   if (mhigh_hi && nh - d >= d)
2650     abort ();
2651   if (mhigh_hi > 1 || mlow_hi > 1)
2652     abort ();
2653   /* assert that mlow < mhigh.  */
2654   if (! (mlow_hi < mhigh_hi || (mlow_hi == mhigh_hi && mlow_lo < mhigh_lo)))
2655     abort ();
2656 
2657   /* If precision == N, then mlow, mhigh exceed 2^N
2658      (but they do not exceed 2^(N+1)).  */
2659 
2660   /* Reduce to lowest terms */
2661   for (post_shift = lgup; post_shift > 0; post_shift--)
2662     {
2663       unsigned HOST_WIDE_INT ml_lo = (mlow_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mlow_lo >> 1);
2664       unsigned HOST_WIDE_INT mh_lo = (mhigh_hi << (HOST_BITS_PER_WIDE_INT - 1)) | (mhigh_lo >> 1);
2665       if (ml_lo >= mh_lo)
2666 	break;
2667 
2668       mlow_hi = 0;
2669       mlow_lo = ml_lo;
2670       mhigh_hi = 0;
2671       mhigh_lo = mh_lo;
2672     }
2673 
2674   *post_shift_ptr = post_shift;
2675   *lgup_ptr = lgup;
2676   if (n < HOST_BITS_PER_WIDE_INT)
2677     {
2678       unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
2679       *multiplier_ptr = mhigh_lo & mask;
2680       return mhigh_lo >= mask;
2681     }
2682   else
2683     {
2684       *multiplier_ptr = mhigh_lo;
2685       return mhigh_hi;
2686     }
2687 }
2688 
2689 /* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
2690    congruent to 1 (mod 2**N).  */
2691 
2692 static unsigned HOST_WIDE_INT
invert_mod2n(x,n)2693 invert_mod2n (x, n)
2694      unsigned HOST_WIDE_INT x;
2695      int n;
2696 {
2697   /* Solve x*y == 1 (mod 2^n), where x is odd.  Return y.  */
2698 
2699   /* The algorithm notes that the choice y = x satisfies
2700      x*y == 1 mod 2^3, since x is assumed odd.
2701      Each iteration doubles the number of bits of significance in y.  */
2702 
2703   unsigned HOST_WIDE_INT mask;
2704   unsigned HOST_WIDE_INT y = x;
2705   int nbit = 3;
2706 
2707   mask = (n == HOST_BITS_PER_WIDE_INT
2708 	  ? ~(unsigned HOST_WIDE_INT) 0
2709 	  : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
2710 
2711   while (nbit < n)
2712     {
2713       y = y * (2 - x*y) & mask;		/* Modulo 2^N */
2714       nbit *= 2;
2715     }
2716   return y;
2717 }
2718 
2719 /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
2720    flavor of OP0 and OP1.  ADJ_OPERAND is already the high half of the
2721    product OP0 x OP1.  If UNSIGNEDP is nonzero, adjust the signed product
2722    to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
2723    become signed.
2724 
2725    The result is put in TARGET if that is convenient.
2726 
2727    MODE is the mode of operation.  */
2728 
2729 rtx
expand_mult_highpart_adjust(mode,adj_operand,op0,op1,target,unsignedp)2730 expand_mult_highpart_adjust (mode, adj_operand, op0, op1, target, unsignedp)
2731      enum machine_mode mode;
2732      rtx adj_operand, op0, op1, target;
2733      int unsignedp;
2734 {
2735   rtx tem;
2736   enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
2737 
2738   tem = expand_shift (RSHIFT_EXPR, mode, op0,
2739 		      build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2740 		      NULL_RTX, 0);
2741   tem = expand_and (mode, tem, op1, NULL_RTX);
2742   adj_operand
2743     = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2744 		     adj_operand);
2745 
2746   tem = expand_shift (RSHIFT_EXPR, mode, op1,
2747 		      build_int_2 (GET_MODE_BITSIZE (mode) - 1, 0),
2748 		      NULL_RTX, 0);
2749   tem = expand_and (mode, tem, op0, NULL_RTX);
2750   target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
2751 			  target);
2752 
2753   return target;
2754 }
2755 
2756 /* Emit code to multiply OP0 and CNST1, putting the high half of the result
2757    in TARGET if that is convenient, and return where the result is.  If the
2758    operation can not be performed, 0 is returned.
2759 
2760    MODE is the mode of operation and result.
2761 
2762    UNSIGNEDP nonzero means unsigned multiply.
2763 
2764    MAX_COST is the total allowed cost for the expanded RTL.  */
2765 
2766 rtx
expand_mult_highpart(mode,op0,cnst1,target,unsignedp,max_cost)2767 expand_mult_highpart (mode, op0, cnst1, target, unsignedp, max_cost)
2768      enum machine_mode mode;
2769      rtx op0, target;
2770      unsigned HOST_WIDE_INT cnst1;
2771      int unsignedp;
2772      int max_cost;
2773 {
2774   enum machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
2775   optab mul_highpart_optab;
2776   optab moptab;
2777   rtx tem;
2778   int size = GET_MODE_BITSIZE (mode);
2779   rtx op1, wide_op1;
2780 
2781   /* We can't support modes wider than HOST_BITS_PER_INT.  */
2782   if (size > HOST_BITS_PER_WIDE_INT)
2783     abort ();
2784 
2785   op1 = gen_int_mode (cnst1, mode);
2786 
2787   wide_op1
2788     = immed_double_const (cnst1,
2789 			  (unsignedp
2790 			   ? (HOST_WIDE_INT) 0
2791 			   : -(cnst1 >> (HOST_BITS_PER_WIDE_INT - 1))),
2792 			  wider_mode);
2793 
2794   /* expand_mult handles constant multiplication of word_mode
2795      or narrower.  It does a poor job for large modes.  */
2796   if (size < BITS_PER_WORD
2797       && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2798     {
2799       /* We have to do this, since expand_binop doesn't do conversion for
2800 	 multiply.  Maybe change expand_binop to handle widening multiply?  */
2801       op0 = convert_to_mode (wider_mode, op0, unsignedp);
2802 
2803       /* We know that this can't have signed overflow, so pretend this is
2804          an unsigned multiply.  */
2805       tem = expand_mult (wider_mode, op0, wide_op1, NULL_RTX, 0);
2806       tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2807 			  build_int_2 (size, 0), NULL_RTX, 1);
2808       return convert_modes (mode, wider_mode, tem, unsignedp);
2809     }
2810 
2811   if (target == 0)
2812     target = gen_reg_rtx (mode);
2813 
2814   /* Firstly, try using a multiplication insn that only generates the needed
2815      high part of the product, and in the sign flavor of unsignedp.  */
2816   if (mul_highpart_cost[(int) mode] < max_cost)
2817     {
2818       mul_highpart_optab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
2819       target = expand_binop (mode, mul_highpart_optab,
2820 			     op0, op1, target, unsignedp, OPTAB_DIRECT);
2821       if (target)
2822 	return target;
2823     }
2824 
2825   /* Secondly, same as above, but use sign flavor opposite of unsignedp.
2826      Need to adjust the result after the multiplication.  */
2827   if (size - 1 < BITS_PER_WORD
2828       && (mul_highpart_cost[(int) mode] + 2 * shift_cost[size-1] + 4 * add_cost
2829 	  < max_cost))
2830     {
2831       mul_highpart_optab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
2832       target = expand_binop (mode, mul_highpart_optab,
2833 			     op0, op1, target, unsignedp, OPTAB_DIRECT);
2834       if (target)
2835 	/* We used the wrong signedness.  Adjust the result.  */
2836 	return expand_mult_highpart_adjust (mode, target, op0,
2837 					    op1, target, unsignedp);
2838     }
2839 
2840   /* Try widening multiplication.  */
2841   moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
2842   if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2843       && mul_widen_cost[(int) wider_mode] < max_cost)
2844     {
2845       op1 = force_reg (mode, op1);
2846       goto try;
2847     }
2848 
2849   /* Try widening the mode and perform a non-widening multiplication.  */
2850   moptab = smul_optab;
2851   if (smul_optab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2852       && size - 1 < BITS_PER_WORD
2853       && mul_cost[(int) wider_mode] + shift_cost[size-1] < max_cost)
2854     {
2855       op1 = wide_op1;
2856       goto try;
2857     }
2858 
2859   /* Try widening multiplication of opposite signedness, and adjust.  */
2860   moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
2861   if (moptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
2862       && size - 1 < BITS_PER_WORD
2863       && (mul_widen_cost[(int) wider_mode]
2864 	  + 2 * shift_cost[size-1] + 4 * add_cost < max_cost))
2865     {
2866       rtx regop1 = force_reg (mode, op1);
2867       tem = expand_binop (wider_mode, moptab, op0, regop1,
2868 			  NULL_RTX, ! unsignedp, OPTAB_WIDEN);
2869       if (tem != 0)
2870 	{
2871 	  /* Extract the high half of the just generated product.  */
2872 	  tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2873 			      build_int_2 (size, 0), NULL_RTX, 1);
2874 	  tem = convert_modes (mode, wider_mode, tem, unsignedp);
2875 	  /* We used the wrong signedness.  Adjust the result.  */
2876 	  return expand_mult_highpart_adjust (mode, tem, op0, op1,
2877 					      target, unsignedp);
2878 	}
2879     }
2880 
2881   return 0;
2882 
2883  try:
2884   /* Pass NULL_RTX as target since TARGET has wrong mode.  */
2885   tem = expand_binop (wider_mode, moptab, op0, op1,
2886 		      NULL_RTX, unsignedp, OPTAB_WIDEN);
2887   if (tem == 0)
2888     return 0;
2889 
2890   /* Extract the high half of the just generated product.  */
2891   if (mode == word_mode)
2892     {
2893       return gen_highpart (mode, tem);
2894     }
2895   else
2896     {
2897       tem = expand_shift (RSHIFT_EXPR, wider_mode, tem,
2898 			  build_int_2 (size, 0), NULL_RTX, 1);
2899       return convert_modes (mode, wider_mode, tem, unsignedp);
2900     }
2901 }
2902 
2903 /* Emit the code to divide OP0 by OP1, putting the result in TARGET
2904    if that is convenient, and returning where the result is.
2905    You may request either the quotient or the remainder as the result;
2906    specify REM_FLAG nonzero to get the remainder.
2907 
2908    CODE is the expression code for which kind of division this is;
2909    it controls how rounding is done.  MODE is the machine mode to use.
2910    UNSIGNEDP nonzero means do unsigned division.  */
2911 
2912 /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
2913    and then correct it by or'ing in missing high bits
2914    if result of ANDI is nonzero.
2915    For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
2916    This could optimize to a bfexts instruction.
2917    But C doesn't use these operations, so their optimizations are
2918    left for later.  */
2919 /* ??? For modulo, we don't actually need the highpart of the first product,
2920    the low part will do nicely.  And for small divisors, the second multiply
2921    can also be a low-part only multiply or even be completely left out.
2922    E.g. to calculate the remainder of a division by 3 with a 32 bit
2923    multiply, multiply with 0x55555556 and extract the upper two bits;
2924    the result is exact for inputs up to 0x1fffffff.
2925    The input range can be reduced by using cross-sum rules.
2926    For odd divisors >= 3, the following table gives right shift counts
2927    so that if a number is shifted by an integer multiple of the given
2928    amount, the remainder stays the same:
2929    2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
2930    14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
2931    0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
2932    20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
2933    0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
2934 
2935    Cross-sum rules for even numbers can be derived by leaving as many bits
2936    to the right alone as the divisor has zeros to the right.
2937    E.g. if x is an unsigned 32 bit number:
2938    (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
2939    */
2940 
2941 #define EXACT_POWER_OF_2_OR_ZERO_P(x) (((x) & ((x) - 1)) == 0)
2942 
2943 rtx
expand_divmod(rem_flag,code,mode,op0,op1,target,unsignedp)2944 expand_divmod (rem_flag, code, mode, op0, op1, target, unsignedp)
2945      int rem_flag;
2946      enum tree_code code;
2947      enum machine_mode mode;
2948      rtx op0, op1, target;
2949      int unsignedp;
2950 {
2951   enum machine_mode compute_mode;
2952   rtx tquotient;
2953   rtx quotient = 0, remainder = 0;
2954   rtx last;
2955   int size;
2956   rtx insn, set;
2957   optab optab1, optab2;
2958   int op1_is_constant, op1_is_pow2;
2959   int max_cost, extra_cost;
2960   static HOST_WIDE_INT last_div_const = 0;
2961 
2962   op1_is_constant = GET_CODE (op1) == CONST_INT;
2963   op1_is_pow2 = (op1_is_constant
2964 		 && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
2965 		      || (! unsignedp && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1))))));
2966 
2967   /*
2968      This is the structure of expand_divmod:
2969 
2970      First comes code to fix up the operands so we can perform the operations
2971      correctly and efficiently.
2972 
2973      Second comes a switch statement with code specific for each rounding mode.
2974      For some special operands this code emits all RTL for the desired
2975      operation, for other cases, it generates only a quotient and stores it in
2976      QUOTIENT.  The case for trunc division/remainder might leave quotient = 0,
2977      to indicate that it has not done anything.
2978 
2979      Last comes code that finishes the operation.  If QUOTIENT is set and
2980      REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1.  If
2981      QUOTIENT is not set, it is computed using trunc rounding.
2982 
2983      We try to generate special code for division and remainder when OP1 is a
2984      constant.  If |OP1| = 2**n we can use shifts and some other fast
2985      operations.  For other values of OP1, we compute a carefully selected
2986      fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
2987      by m.
2988 
2989      In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
2990      half of the product.  Different strategies for generating the product are
2991      implemented in expand_mult_highpart.
2992 
2993      If what we actually want is the remainder, we generate that by another
2994      by-constant multiplication and a subtraction.  */
2995 
2996   /* We shouldn't be called with OP1 == const1_rtx, but some of the
2997      code below will malfunction if we are, so check here and handle
2998      the special case if so.  */
2999   if (op1 == const1_rtx)
3000     return rem_flag ? const0_rtx : op0;
3001 
3002     /* When dividing by -1, we could get an overflow.
3003      negv_optab can handle overflows.  */
3004   if (! unsignedp && op1 == constm1_rtx)
3005     {
3006       if (rem_flag)
3007 	return const0_rtx;
3008       return expand_unop (mode, flag_trapv && GET_MODE_CLASS(mode) == MODE_INT
3009 			  ? negv_optab : neg_optab, op0, target, 0);
3010     }
3011 
3012   if (target
3013       /* Don't use the function value register as a target
3014 	 since we have to read it as well as write it,
3015 	 and function-inlining gets confused by this.  */
3016       && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
3017 	  /* Don't clobber an operand while doing a multi-step calculation.  */
3018 	  || ((rem_flag || op1_is_constant)
3019 	      && (reg_mentioned_p (target, op0)
3020 		  || (GET_CODE (op0) == MEM && GET_CODE (target) == MEM)))
3021 	  || reg_mentioned_p (target, op1)
3022 	  || (GET_CODE (op1) == MEM && GET_CODE (target) == MEM)))
3023     target = 0;
3024 
3025   /* Get the mode in which to perform this computation.  Normally it will
3026      be MODE, but sometimes we can't do the desired operation in MODE.
3027      If so, pick a wider mode in which we can do the operation.  Convert
3028      to that mode at the start to avoid repeated conversions.
3029 
3030      First see what operations we need.  These depend on the expression
3031      we are evaluating.  (We assume that divxx3 insns exist under the
3032      same conditions that modxx3 insns and that these insns don't normally
3033      fail.  If these assumptions are not correct, we may generate less
3034      efficient code in some cases.)
3035 
3036      Then see if we find a mode in which we can open-code that operation
3037      (either a division, modulus, or shift).  Finally, check for the smallest
3038      mode for which we can do the operation with a library call.  */
3039 
3040   /* We might want to refine this now that we have division-by-constant
3041      optimization.  Since expand_mult_highpart tries so many variants, it is
3042      not straightforward to generalize this.  Maybe we should make an array
3043      of possible modes in init_expmed?  Save this for GCC 2.7.  */
3044 
3045   optab1 = ((op1_is_pow2 && op1 != const0_rtx)
3046 	    ? (unsignedp ? lshr_optab : ashr_optab)
3047 	    : (unsignedp ? udiv_optab : sdiv_optab));
3048   optab2 = ((op1_is_pow2 && op1 != const0_rtx)
3049 	    ? optab1
3050 	    : (unsignedp ? udivmod_optab : sdivmod_optab));
3051 
3052   for (compute_mode = mode; compute_mode != VOIDmode;
3053        compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3054     if (optab1->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing
3055 	|| optab2->handlers[(int) compute_mode].insn_code != CODE_FOR_nothing)
3056       break;
3057 
3058   if (compute_mode == VOIDmode)
3059     for (compute_mode = mode; compute_mode != VOIDmode;
3060 	 compute_mode = GET_MODE_WIDER_MODE (compute_mode))
3061       if (optab1->handlers[(int) compute_mode].libfunc
3062 	  || optab2->handlers[(int) compute_mode].libfunc)
3063 	break;
3064 
3065   /* If we still couldn't find a mode, use MODE, but we'll probably abort
3066      in expand_binop.  */
3067   if (compute_mode == VOIDmode)
3068     compute_mode = mode;
3069 
3070   if (target && GET_MODE (target) == compute_mode)
3071     tquotient = target;
3072   else
3073     tquotient = gen_reg_rtx (compute_mode);
3074 
3075   size = GET_MODE_BITSIZE (compute_mode);
3076 #if 0
3077   /* It should be possible to restrict the precision to GET_MODE_BITSIZE
3078      (mode), and thereby get better code when OP1 is a constant.  Do that
3079      later.  It will require going over all usages of SIZE below.  */
3080   size = GET_MODE_BITSIZE (mode);
3081 #endif
3082 
3083   /* Only deduct something for a REM if the last divide done was
3084      for a different constant.   Then set the constant of the last
3085      divide.  */
3086   max_cost = div_cost[(int) compute_mode]
3087     - (rem_flag && ! (last_div_const != 0 && op1_is_constant
3088 		      && INTVAL (op1) == last_div_const)
3089        ? mul_cost[(int) compute_mode] + add_cost : 0);
3090 
3091   last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
3092 
3093   /* Now convert to the best mode to use.  */
3094   if (compute_mode != mode)
3095     {
3096       op0 = convert_modes (compute_mode, mode, op0, unsignedp);
3097       op1 = convert_modes (compute_mode, mode, op1, unsignedp);
3098 
3099       /* convert_modes may have placed op1 into a register, so we
3100 	 must recompute the following.  */
3101       op1_is_constant = GET_CODE (op1) == CONST_INT;
3102       op1_is_pow2 = (op1_is_constant
3103 		     && ((EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3104 			  || (! unsignedp
3105 			      && EXACT_POWER_OF_2_OR_ZERO_P (-INTVAL (op1)))))) ;
3106     }
3107 
3108   /* If one of the operands is a volatile MEM, copy it into a register.  */
3109 
3110   if (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0))
3111     op0 = force_reg (compute_mode, op0);
3112   if (GET_CODE (op1) == MEM && MEM_VOLATILE_P (op1))
3113     op1 = force_reg (compute_mode, op1);
3114 
3115   /* If we need the remainder or if OP1 is constant, we need to
3116      put OP0 in a register in case it has any queued subexpressions.  */
3117   if (rem_flag || op1_is_constant)
3118     op0 = force_reg (compute_mode, op0);
3119 
3120   last = get_last_insn ();
3121 
3122   /* Promote floor rounding to trunc rounding for unsigned operations.  */
3123   if (unsignedp)
3124     {
3125       if (code == FLOOR_DIV_EXPR)
3126 	code = TRUNC_DIV_EXPR;
3127       if (code == FLOOR_MOD_EXPR)
3128 	code = TRUNC_MOD_EXPR;
3129       if (code == EXACT_DIV_EXPR && op1_is_pow2)
3130 	code = TRUNC_DIV_EXPR;
3131     }
3132 
3133   if (op1 != const0_rtx)
3134     switch (code)
3135       {
3136       case TRUNC_MOD_EXPR:
3137       case TRUNC_DIV_EXPR:
3138 	if (op1_is_constant)
3139 	  {
3140 	    if (unsignedp)
3141 	      {
3142 		unsigned HOST_WIDE_INT mh, ml;
3143 		int pre_shift, post_shift;
3144 		int dummy;
3145 		unsigned HOST_WIDE_INT d = INTVAL (op1);
3146 
3147 		if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3148 		  {
3149 		    pre_shift = floor_log2 (d);
3150 		    if (rem_flag)
3151 		      {
3152 			remainder
3153 			  = expand_binop (compute_mode, and_optab, op0,
3154 					  GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3155 					  remainder, 1,
3156 					  OPTAB_LIB_WIDEN);
3157 			if (remainder)
3158 			  return gen_lowpart (mode, remainder);
3159 		      }
3160 		    quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3161 					     build_int_2 (pre_shift, 0),
3162 					     tquotient, 1);
3163 		  }
3164 		else if (size <= HOST_BITS_PER_WIDE_INT)
3165 		  {
3166 		    if (d >= ((unsigned HOST_WIDE_INT) 1 << (size - 1)))
3167 		      {
3168 			/* Most significant bit of divisor is set; emit an scc
3169 			   insn.  */
3170 			quotient = emit_store_flag (tquotient, GEU, op0, op1,
3171 						    compute_mode, 1, 1);
3172 			if (quotient == 0)
3173 			  goto fail1;
3174 		      }
3175 		    else
3176 		      {
3177 			/* Find a suitable multiplier and right shift count
3178 			   instead of multiplying with D.  */
3179 
3180 			mh = choose_multiplier (d, size, size,
3181 						&ml, &post_shift, &dummy);
3182 
3183 			/* If the suggested multiplier is more than SIZE bits,
3184 			   we can do better for even divisors, using an
3185 			   initial right shift.  */
3186 			if (mh != 0 && (d & 1) == 0)
3187 			  {
3188 			    pre_shift = floor_log2 (d & -d);
3189 			    mh = choose_multiplier (d >> pre_shift, size,
3190 						    size - pre_shift,
3191 						    &ml, &post_shift, &dummy);
3192 			    if (mh)
3193 			      abort ();
3194 			  }
3195 			else
3196 			  pre_shift = 0;
3197 
3198 			if (mh != 0)
3199 			  {
3200 			    rtx t1, t2, t3, t4;
3201 
3202 			    if (post_shift - 1 >= BITS_PER_WORD)
3203 			      goto fail1;
3204 
3205 			    extra_cost = (shift_cost[post_shift - 1]
3206 					  + shift_cost[1] + 2 * add_cost);
3207 			    t1 = expand_mult_highpart (compute_mode, op0, ml,
3208 						       NULL_RTX, 1,
3209 						       max_cost - extra_cost);
3210 			    if (t1 == 0)
3211 			      goto fail1;
3212 			    t2 = force_operand (gen_rtx_MINUS (compute_mode,
3213 							       op0, t1),
3214 						NULL_RTX);
3215 			    t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3216 					       build_int_2 (1, 0), NULL_RTX,1);
3217 			    t4 = force_operand (gen_rtx_PLUS (compute_mode,
3218 							      t1, t3),
3219 						NULL_RTX);
3220 			    quotient
3221 			      = expand_shift (RSHIFT_EXPR, compute_mode, t4,
3222 					      build_int_2 (post_shift - 1, 0),
3223 					      tquotient, 1);
3224 			  }
3225 			else
3226 			  {
3227 			    rtx t1, t2;
3228 
3229 			    if (pre_shift >= BITS_PER_WORD
3230 				|| post_shift >= BITS_PER_WORD)
3231 			      goto fail1;
3232 
3233 			    t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3234 					       build_int_2 (pre_shift, 0),
3235 					       NULL_RTX, 1);
3236 			    extra_cost = (shift_cost[pre_shift]
3237 					  + shift_cost[post_shift]);
3238 			    t2 = expand_mult_highpart (compute_mode, t1, ml,
3239 						       NULL_RTX, 1,
3240 						       max_cost - extra_cost);
3241 			    if (t2 == 0)
3242 			      goto fail1;
3243 			    quotient
3244 			      = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3245 					      build_int_2 (post_shift, 0),
3246 					      tquotient, 1);
3247 			  }
3248 		      }
3249 		  }
3250 		else		/* Too wide mode to use tricky code */
3251 		  break;
3252 
3253 		insn = get_last_insn ();
3254 		if (insn != last
3255 		    && (set = single_set (insn)) != 0
3256 		    && SET_DEST (set) == quotient)
3257 		  set_unique_reg_note (insn,
3258 		  		       REG_EQUAL,
3259 				       gen_rtx_UDIV (compute_mode, op0, op1));
3260 	      }
3261 	    else		/* TRUNC_DIV, signed */
3262 	      {
3263 		unsigned HOST_WIDE_INT ml;
3264 		int lgup, post_shift;
3265 		HOST_WIDE_INT d = INTVAL (op1);
3266 		unsigned HOST_WIDE_INT abs_d = d >= 0 ? d : -d;
3267 
3268 		/* n rem d = n rem -d */
3269 		if (rem_flag && d < 0)
3270 		  {
3271 		    d = abs_d;
3272 		    op1 = gen_int_mode (abs_d, compute_mode);
3273 		  }
3274 
3275 		if (d == 1)
3276 		  quotient = op0;
3277 		else if (d == -1)
3278 		  quotient = expand_unop (compute_mode, neg_optab, op0,
3279 					  tquotient, 0);
3280 		else if (abs_d == (unsigned HOST_WIDE_INT) 1 << (size - 1))
3281 		  {
3282 		    /* This case is not handled correctly below.  */
3283 		    quotient = emit_store_flag (tquotient, EQ, op0, op1,
3284 						compute_mode, 1, 1);
3285 		    if (quotient == 0)
3286 		      goto fail1;
3287 		  }
3288 		else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
3289 			 && (rem_flag ? smod_pow2_cheap : sdiv_pow2_cheap)
3290 			 /* ??? The cheap metric is computed only for
3291 			    word_mode.  If this operation is wider, this may
3292 			    not be so.  Assume true if the optab has an
3293 			    expander for this mode.  */
3294 			 && (((rem_flag ? smod_optab : sdiv_optab)
3295 			      ->handlers[(int) compute_mode].insn_code
3296 			      != CODE_FOR_nothing)
3297 			     || (sdivmod_optab->handlers[(int) compute_mode]
3298 				 .insn_code != CODE_FOR_nothing)))
3299 		  ;
3300 		else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
3301 		  {
3302 		    lgup = floor_log2 (abs_d);
3303 		    if (BRANCH_COST < 1 || (abs_d != 2 && BRANCH_COST < 3))
3304 		      {
3305 			rtx label = gen_label_rtx ();
3306 			rtx t1;
3307 
3308 			t1 = copy_to_mode_reg (compute_mode, op0);
3309 			do_cmp_and_jump (t1, const0_rtx, GE,
3310 					 compute_mode, label);
3311 			expand_inc (t1, gen_int_mode (abs_d - 1,
3312 						      compute_mode));
3313 			emit_label (label);
3314 			quotient = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3315 						 build_int_2 (lgup, 0),
3316 						 tquotient, 0);
3317 		      }
3318 		    else
3319 		      {
3320 			rtx t1, t2, t3;
3321 			t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3322 					   build_int_2 (size - 1, 0),
3323 					   NULL_RTX, 0);
3324 			t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3325 					   build_int_2 (size - lgup, 0),
3326 					   NULL_RTX, 1);
3327 			t3 = force_operand (gen_rtx_PLUS (compute_mode,
3328 							  op0, t2),
3329 					    NULL_RTX);
3330 			quotient = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3331 						 build_int_2 (lgup, 0),
3332 						 tquotient, 0);
3333 		      }
3334 
3335 		    /* We have computed OP0 / abs(OP1).  If OP1 is negative, negate
3336 		       the quotient.  */
3337 		    if (d < 0)
3338 		      {
3339 			insn = get_last_insn ();
3340 			if (insn != last
3341 			    && (set = single_set (insn)) != 0
3342 			    && SET_DEST (set) == quotient
3343 			    && abs_d < ((unsigned HOST_WIDE_INT) 1
3344 					<< (HOST_BITS_PER_WIDE_INT - 1)))
3345 			  set_unique_reg_note (insn,
3346 			  		       REG_EQUAL,
3347 					       gen_rtx_DIV (compute_mode,
3348 							    op0,
3349 							    GEN_INT
3350 							    (trunc_int_for_mode
3351 							     (abs_d,
3352 							      compute_mode))));
3353 
3354 			quotient = expand_unop (compute_mode, neg_optab,
3355 						quotient, quotient, 0);
3356 		      }
3357 		  }
3358 		else if (size <= HOST_BITS_PER_WIDE_INT)
3359 		  {
3360 		    choose_multiplier (abs_d, size, size - 1,
3361 				       &ml, &post_shift, &lgup);
3362 		    if (ml < (unsigned HOST_WIDE_INT) 1 << (size - 1))
3363 		      {
3364 			rtx t1, t2, t3;
3365 
3366 			if (post_shift >= BITS_PER_WORD
3367 			    || size - 1 >= BITS_PER_WORD)
3368 			  goto fail1;
3369 
3370 			extra_cost = (shift_cost[post_shift]
3371 				      + shift_cost[size - 1] + add_cost);
3372 			t1 = expand_mult_highpart (compute_mode, op0, ml,
3373 						   NULL_RTX, 0,
3374 						   max_cost - extra_cost);
3375 			if (t1 == 0)
3376 			  goto fail1;
3377 			t2 = expand_shift (RSHIFT_EXPR, compute_mode, t1,
3378 					   build_int_2 (post_shift, 0), NULL_RTX, 0);
3379 			t3 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3380 					   build_int_2 (size - 1, 0), NULL_RTX, 0);
3381 			if (d < 0)
3382 			  quotient
3383 			    = force_operand (gen_rtx_MINUS (compute_mode,
3384 							    t3, t2),
3385 					     tquotient);
3386 			else
3387 			  quotient
3388 			    = force_operand (gen_rtx_MINUS (compute_mode,
3389 							    t2, t3),
3390 					     tquotient);
3391 		      }
3392 		    else
3393 		      {
3394 			rtx t1, t2, t3, t4;
3395 
3396 			if (post_shift >= BITS_PER_WORD
3397 			    || size - 1 >= BITS_PER_WORD)
3398 			  goto fail1;
3399 
3400 			ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1);
3401 			extra_cost = (shift_cost[post_shift]
3402 				      + shift_cost[size - 1] + 2 * add_cost);
3403 			t1 = expand_mult_highpart (compute_mode, op0, ml,
3404 						   NULL_RTX, 0,
3405 						   max_cost - extra_cost);
3406 			if (t1 == 0)
3407 			  goto fail1;
3408 			t2 = force_operand (gen_rtx_PLUS (compute_mode,
3409 							  t1, op0),
3410 					    NULL_RTX);
3411 			t3 = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3412 					   build_int_2 (post_shift, 0),
3413 					   NULL_RTX, 0);
3414 			t4 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3415 					   build_int_2 (size - 1, 0),
3416 					   NULL_RTX, 0);
3417 			if (d < 0)
3418 			  quotient
3419 			    = force_operand (gen_rtx_MINUS (compute_mode,
3420 							    t4, t3),
3421 					     tquotient);
3422 			else
3423 			  quotient
3424 			    = force_operand (gen_rtx_MINUS (compute_mode,
3425 							    t3, t4),
3426 					     tquotient);
3427 		      }
3428 		  }
3429 		else		/* Too wide mode to use tricky code */
3430 		  break;
3431 
3432 		insn = get_last_insn ();
3433 		if (insn != last
3434 		    && (set = single_set (insn)) != 0
3435 		    && SET_DEST (set) == quotient)
3436 		  set_unique_reg_note (insn,
3437 		  		       REG_EQUAL,
3438 				       gen_rtx_DIV (compute_mode, op0, op1));
3439 	      }
3440 	    break;
3441 	  }
3442       fail1:
3443 	delete_insns_since (last);
3444 	break;
3445 
3446       case FLOOR_DIV_EXPR:
3447       case FLOOR_MOD_EXPR:
3448       /* We will come here only for signed operations.  */
3449 	if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3450 	  {
3451 	    unsigned HOST_WIDE_INT mh, ml;
3452 	    int pre_shift, lgup, post_shift;
3453 	    HOST_WIDE_INT d = INTVAL (op1);
3454 
3455 	    if (d > 0)
3456 	      {
3457 		/* We could just as easily deal with negative constants here,
3458 		   but it does not seem worth the trouble for GCC 2.6.  */
3459 		if (EXACT_POWER_OF_2_OR_ZERO_P (d))
3460 		  {
3461 		    pre_shift = floor_log2 (d);
3462 		    if (rem_flag)
3463 		      {
3464 			remainder = expand_binop (compute_mode, and_optab, op0,
3465 						  GEN_INT (((HOST_WIDE_INT) 1 << pre_shift) - 1),
3466 						  remainder, 0, OPTAB_LIB_WIDEN);
3467 			if (remainder)
3468 			  return gen_lowpart (mode, remainder);
3469 		      }
3470 		    quotient = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3471 					     build_int_2 (pre_shift, 0),
3472 					     tquotient, 0);
3473 		  }
3474 		else
3475 		  {
3476 		    rtx t1, t2, t3, t4;
3477 
3478 		    mh = choose_multiplier (d, size, size - 1,
3479 					    &ml, &post_shift, &lgup);
3480 		    if (mh)
3481 		      abort ();
3482 
3483 		    if (post_shift < BITS_PER_WORD
3484 			&& size - 1 < BITS_PER_WORD)
3485 		      {
3486 			t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3487 					   build_int_2 (size - 1, 0),
3488 					   NULL_RTX, 0);
3489 			t2 = expand_binop (compute_mode, xor_optab, op0, t1,
3490 					   NULL_RTX, 0, OPTAB_WIDEN);
3491 			extra_cost = (shift_cost[post_shift]
3492 				      + shift_cost[size - 1] + 2 * add_cost);
3493 			t3 = expand_mult_highpart (compute_mode, t2, ml,
3494 						   NULL_RTX, 1,
3495 						   max_cost - extra_cost);
3496 			if (t3 != 0)
3497 			  {
3498 			    t4 = expand_shift (RSHIFT_EXPR, compute_mode, t3,
3499 					       build_int_2 (post_shift, 0),
3500 					       NULL_RTX, 1);
3501 			    quotient = expand_binop (compute_mode, xor_optab,
3502 						     t4, t1, tquotient, 0,
3503 						     OPTAB_WIDEN);
3504 			  }
3505 		      }
3506 		  }
3507 	      }
3508 	    else
3509 	      {
3510 		rtx nsign, t1, t2, t3, t4;
3511 		t1 = force_operand (gen_rtx_PLUS (compute_mode,
3512 						  op0, constm1_rtx), NULL_RTX);
3513 		t2 = expand_binop (compute_mode, ior_optab, op0, t1, NULL_RTX,
3514 				   0, OPTAB_WIDEN);
3515 		nsign = expand_shift (RSHIFT_EXPR, compute_mode, t2,
3516 				      build_int_2 (size - 1, 0), NULL_RTX, 0);
3517 		t3 = force_operand (gen_rtx_MINUS (compute_mode, t1, nsign),
3518 				    NULL_RTX);
3519 		t4 = expand_divmod (0, TRUNC_DIV_EXPR, compute_mode, t3, op1,
3520 				    NULL_RTX, 0);
3521 		if (t4)
3522 		  {
3523 		    rtx t5;
3524 		    t5 = expand_unop (compute_mode, one_cmpl_optab, nsign,
3525 				      NULL_RTX, 0);
3526 		    quotient = force_operand (gen_rtx_PLUS (compute_mode,
3527 							    t4, t5),
3528 					      tquotient);
3529 		  }
3530 	      }
3531 	  }
3532 
3533 	if (quotient != 0)
3534 	  break;
3535 	delete_insns_since (last);
3536 
3537 	/* Try using an instruction that produces both the quotient and
3538 	   remainder, using truncation.  We can easily compensate the quotient
3539 	   or remainder to get floor rounding, once we have the remainder.
3540 	   Notice that we compute also the final remainder value here,
3541 	   and return the result right away.  */
3542 	if (target == 0 || GET_MODE (target) != compute_mode)
3543 	  target = gen_reg_rtx (compute_mode);
3544 
3545 	if (rem_flag)
3546 	  {
3547 	    remainder
3548 	      = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3549 	    quotient = gen_reg_rtx (compute_mode);
3550 	  }
3551 	else
3552 	  {
3553 	    quotient
3554 	      = GET_CODE (target) == REG ? target : gen_reg_rtx (compute_mode);
3555 	    remainder = gen_reg_rtx (compute_mode);
3556 	  }
3557 
3558 	if (expand_twoval_binop (sdivmod_optab, op0, op1,
3559 				 quotient, remainder, 0))
3560 	  {
3561 	    /* This could be computed with a branch-less sequence.
3562 	       Save that for later.  */
3563 	    rtx tem;
3564 	    rtx label = gen_label_rtx ();
3565 	    do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
3566 	    tem = expand_binop (compute_mode, xor_optab, op0, op1,
3567 				NULL_RTX, 0, OPTAB_WIDEN);
3568 	    do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
3569 	    expand_dec (quotient, const1_rtx);
3570 	    expand_inc (remainder, op1);
3571 	    emit_label (label);
3572 	    return gen_lowpart (mode, rem_flag ? remainder : quotient);
3573 	  }
3574 
3575 	/* No luck with division elimination or divmod.  Have to do it
3576 	   by conditionally adjusting op0 *and* the result.  */
3577 	{
3578 	  rtx label1, label2, label3, label4, label5;
3579 	  rtx adjusted_op0;
3580 	  rtx tem;
3581 
3582 	  quotient = gen_reg_rtx (compute_mode);
3583 	  adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3584 	  label1 = gen_label_rtx ();
3585 	  label2 = gen_label_rtx ();
3586 	  label3 = gen_label_rtx ();
3587 	  label4 = gen_label_rtx ();
3588 	  label5 = gen_label_rtx ();
3589 	  do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3590 	  do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
3591 	  tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3592 			      quotient, 0, OPTAB_LIB_WIDEN);
3593 	  if (tem != quotient)
3594 	    emit_move_insn (quotient, tem);
3595 	  emit_jump_insn (gen_jump (label5));
3596 	  emit_barrier ();
3597 	  emit_label (label1);
3598 	  expand_inc (adjusted_op0, const1_rtx);
3599 	  emit_jump_insn (gen_jump (label4));
3600 	  emit_barrier ();
3601 	  emit_label (label2);
3602 	  do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
3603 	  tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3604 			      quotient, 0, OPTAB_LIB_WIDEN);
3605 	  if (tem != quotient)
3606 	    emit_move_insn (quotient, tem);
3607 	  emit_jump_insn (gen_jump (label5));
3608 	  emit_barrier ();
3609 	  emit_label (label3);
3610 	  expand_dec (adjusted_op0, const1_rtx);
3611 	  emit_label (label4);
3612 	  tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3613 			      quotient, 0, OPTAB_LIB_WIDEN);
3614 	  if (tem != quotient)
3615 	    emit_move_insn (quotient, tem);
3616 	  expand_dec (quotient, const1_rtx);
3617 	  emit_label (label5);
3618 	}
3619 	break;
3620 
3621       case CEIL_DIV_EXPR:
3622       case CEIL_MOD_EXPR:
3623 	if (unsignedp)
3624 	  {
3625 	    if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1)))
3626 	      {
3627 		rtx t1, t2, t3;
3628 		unsigned HOST_WIDE_INT d = INTVAL (op1);
3629 		t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3630 				   build_int_2 (floor_log2 (d), 0),
3631 				   tquotient, 1);
3632 		t2 = expand_binop (compute_mode, and_optab, op0,
3633 				   GEN_INT (d - 1),
3634 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3635 		t3 = gen_reg_rtx (compute_mode);
3636 		t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3637 				      compute_mode, 1, 1);
3638 		if (t3 == 0)
3639 		  {
3640 		    rtx lab;
3641 		    lab = gen_label_rtx ();
3642 		    do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3643 		    expand_inc (t1, const1_rtx);
3644 		    emit_label (lab);
3645 		    quotient = t1;
3646 		  }
3647 		else
3648 		  quotient = force_operand (gen_rtx_PLUS (compute_mode,
3649 							  t1, t3),
3650 					    tquotient);
3651 		break;
3652 	      }
3653 
3654 	    /* Try using an instruction that produces both the quotient and
3655 	       remainder, using truncation.  We can easily compensate the
3656 	       quotient or remainder to get ceiling rounding, once we have the
3657 	       remainder.  Notice that we compute also the final remainder
3658 	       value here, and return the result right away.  */
3659 	    if (target == 0 || GET_MODE (target) != compute_mode)
3660 	      target = gen_reg_rtx (compute_mode);
3661 
3662 	    if (rem_flag)
3663 	      {
3664 		remainder = (GET_CODE (target) == REG
3665 			     ? target : gen_reg_rtx (compute_mode));
3666 		quotient = gen_reg_rtx (compute_mode);
3667 	      }
3668 	    else
3669 	      {
3670 		quotient = (GET_CODE (target) == REG
3671 			    ? target : gen_reg_rtx (compute_mode));
3672 		remainder = gen_reg_rtx (compute_mode);
3673 	      }
3674 
3675 	    if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
3676 				     remainder, 1))
3677 	      {
3678 		/* This could be computed with a branch-less sequence.
3679 		   Save that for later.  */
3680 		rtx label = gen_label_rtx ();
3681 		do_cmp_and_jump (remainder, const0_rtx, EQ,
3682 				 compute_mode, label);
3683 		expand_inc (quotient, const1_rtx);
3684 		expand_dec (remainder, op1);
3685 		emit_label (label);
3686 		return gen_lowpart (mode, rem_flag ? remainder : quotient);
3687 	      }
3688 
3689 	    /* No luck with division elimination or divmod.  Have to do it
3690 	       by conditionally adjusting op0 *and* the result.  */
3691 	    {
3692 	      rtx label1, label2;
3693 	      rtx adjusted_op0, tem;
3694 
3695 	      quotient = gen_reg_rtx (compute_mode);
3696 	      adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3697 	      label1 = gen_label_rtx ();
3698 	      label2 = gen_label_rtx ();
3699 	      do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
3700 			       compute_mode, label1);
3701 	      emit_move_insn  (quotient, const0_rtx);
3702 	      emit_jump_insn (gen_jump (label2));
3703 	      emit_barrier ();
3704 	      emit_label (label1);
3705 	      expand_dec (adjusted_op0, const1_rtx);
3706 	      tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
3707 				  quotient, 1, OPTAB_LIB_WIDEN);
3708 	      if (tem != quotient)
3709 		emit_move_insn (quotient, tem);
3710 	      expand_inc (quotient, const1_rtx);
3711 	      emit_label (label2);
3712 	    }
3713 	  }
3714 	else /* signed */
3715 	  {
3716 	    if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
3717 		&& INTVAL (op1) >= 0)
3718 	      {
3719 		/* This is extremely similar to the code for the unsigned case
3720 		   above.  For 2.7 we should merge these variants, but for
3721 		   2.6.1 I don't want to touch the code for unsigned since that
3722 		   get used in C.  The signed case will only be used by other
3723 		   languages (Ada).  */
3724 
3725 		rtx t1, t2, t3;
3726 		unsigned HOST_WIDE_INT d = INTVAL (op1);
3727 		t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3728 				   build_int_2 (floor_log2 (d), 0),
3729 				   tquotient, 0);
3730 		t2 = expand_binop (compute_mode, and_optab, op0,
3731 				   GEN_INT (d - 1),
3732 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3733 		t3 = gen_reg_rtx (compute_mode);
3734 		t3 = emit_store_flag (t3, NE, t2, const0_rtx,
3735 				      compute_mode, 1, 1);
3736 		if (t3 == 0)
3737 		  {
3738 		    rtx lab;
3739 		    lab = gen_label_rtx ();
3740 		    do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
3741 		    expand_inc (t1, const1_rtx);
3742 		    emit_label (lab);
3743 		    quotient = t1;
3744 		  }
3745 		else
3746 		  quotient = force_operand (gen_rtx_PLUS (compute_mode,
3747 							  t1, t3),
3748 					    tquotient);
3749 		break;
3750 	      }
3751 
3752 	    /* Try using an instruction that produces both the quotient and
3753 	       remainder, using truncation.  We can easily compensate the
3754 	       quotient or remainder to get ceiling rounding, once we have the
3755 	       remainder.  Notice that we compute also the final remainder
3756 	       value here, and return the result right away.  */
3757 	    if (target == 0 || GET_MODE (target) != compute_mode)
3758 	      target = gen_reg_rtx (compute_mode);
3759 	    if (rem_flag)
3760 	      {
3761 		remainder= (GET_CODE (target) == REG
3762 			    ? target : gen_reg_rtx (compute_mode));
3763 		quotient = gen_reg_rtx (compute_mode);
3764 	      }
3765 	    else
3766 	      {
3767 		quotient = (GET_CODE (target) == REG
3768 			    ? target : gen_reg_rtx (compute_mode));
3769 		remainder = gen_reg_rtx (compute_mode);
3770 	      }
3771 
3772 	    if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
3773 				     remainder, 0))
3774 	      {
3775 		/* This could be computed with a branch-less sequence.
3776 		   Save that for later.  */
3777 		rtx tem;
3778 		rtx label = gen_label_rtx ();
3779 		do_cmp_and_jump (remainder, const0_rtx, EQ,
3780 				 compute_mode, label);
3781 		tem = expand_binop (compute_mode, xor_optab, op0, op1,
3782 				    NULL_RTX, 0, OPTAB_WIDEN);
3783 		do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
3784 		expand_inc (quotient, const1_rtx);
3785 		expand_dec (remainder, op1);
3786 		emit_label (label);
3787 		return gen_lowpart (mode, rem_flag ? remainder : quotient);
3788 	      }
3789 
3790 	    /* No luck with division elimination or divmod.  Have to do it
3791 	       by conditionally adjusting op0 *and* the result.  */
3792 	    {
3793 	      rtx label1, label2, label3, label4, label5;
3794 	      rtx adjusted_op0;
3795 	      rtx tem;
3796 
3797 	      quotient = gen_reg_rtx (compute_mode);
3798 	      adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
3799 	      label1 = gen_label_rtx ();
3800 	      label2 = gen_label_rtx ();
3801 	      label3 = gen_label_rtx ();
3802 	      label4 = gen_label_rtx ();
3803 	      label5 = gen_label_rtx ();
3804 	      do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
3805 	      do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
3806 			       compute_mode, label1);
3807 	      tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3808 				  quotient, 0, OPTAB_LIB_WIDEN);
3809 	      if (tem != quotient)
3810 		emit_move_insn (quotient, tem);
3811 	      emit_jump_insn (gen_jump (label5));
3812 	      emit_barrier ();
3813 	      emit_label (label1);
3814 	      expand_dec (adjusted_op0, const1_rtx);
3815 	      emit_jump_insn (gen_jump (label4));
3816 	      emit_barrier ();
3817 	      emit_label (label2);
3818 	      do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
3819 			       compute_mode, label3);
3820 	      tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3821 				  quotient, 0, OPTAB_LIB_WIDEN);
3822 	      if (tem != quotient)
3823 		emit_move_insn (quotient, tem);
3824 	      emit_jump_insn (gen_jump (label5));
3825 	      emit_barrier ();
3826 	      emit_label (label3);
3827 	      expand_inc (adjusted_op0, const1_rtx);
3828 	      emit_label (label4);
3829 	      tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
3830 				  quotient, 0, OPTAB_LIB_WIDEN);
3831 	      if (tem != quotient)
3832 		emit_move_insn (quotient, tem);
3833 	      expand_inc (quotient, const1_rtx);
3834 	      emit_label (label5);
3835 	    }
3836 	  }
3837 	break;
3838 
3839       case EXACT_DIV_EXPR:
3840 	if (op1_is_constant && HOST_BITS_PER_WIDE_INT >= size)
3841 	  {
3842 	    HOST_WIDE_INT d = INTVAL (op1);
3843 	    unsigned HOST_WIDE_INT ml;
3844 	    int pre_shift;
3845 	    rtx t1;
3846 
3847 	    pre_shift = floor_log2 (d & -d);
3848 	    ml = invert_mod2n (d >> pre_shift, size);
3849 	    t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
3850 			       build_int_2 (pre_shift, 0), NULL_RTX, unsignedp);
3851 	    quotient = expand_mult (compute_mode, t1,
3852 				    gen_int_mode (ml, compute_mode),
3853 				    NULL_RTX, 0);
3854 
3855 	    insn = get_last_insn ();
3856 	    set_unique_reg_note (insn,
3857 	    			 REG_EQUAL,
3858 				 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
3859 						 compute_mode,
3860 						 op0, op1));
3861 	  }
3862 	break;
3863 
3864       case ROUND_DIV_EXPR:
3865       case ROUND_MOD_EXPR:
3866 	if (unsignedp)
3867 	  {
3868 	    rtx tem;
3869 	    rtx label;
3870 	    label = gen_label_rtx ();
3871 	    quotient = gen_reg_rtx (compute_mode);
3872 	    remainder = gen_reg_rtx (compute_mode);
3873 	    if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
3874 	      {
3875 		rtx tem;
3876 		quotient = expand_binop (compute_mode, udiv_optab, op0, op1,
3877 					 quotient, 1, OPTAB_LIB_WIDEN);
3878 		tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 1);
3879 		remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3880 					  remainder, 1, OPTAB_LIB_WIDEN);
3881 	      }
3882 	    tem = plus_constant (op1, -1);
3883 	    tem = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3884 				build_int_2 (1, 0), NULL_RTX, 1);
3885 	    do_cmp_and_jump (remainder, tem, LEU, compute_mode, label);
3886 	    expand_inc (quotient, const1_rtx);
3887 	    expand_dec (remainder, op1);
3888 	    emit_label (label);
3889 	  }
3890 	else
3891 	  {
3892 	    rtx abs_rem, abs_op1, tem, mask;
3893 	    rtx label;
3894 	    label = gen_label_rtx ();
3895 	    quotient = gen_reg_rtx (compute_mode);
3896 	    remainder = gen_reg_rtx (compute_mode);
3897 	    if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
3898 	      {
3899 		rtx tem;
3900 		quotient = expand_binop (compute_mode, sdiv_optab, op0, op1,
3901 					 quotient, 0, OPTAB_LIB_WIDEN);
3902 		tem = expand_mult (compute_mode, quotient, op1, NULL_RTX, 0);
3903 		remainder = expand_binop (compute_mode, sub_optab, op0, tem,
3904 					  remainder, 0, OPTAB_LIB_WIDEN);
3905 	      }
3906 	    abs_rem = expand_abs (compute_mode, remainder, NULL_RTX, 1, 0);
3907 	    abs_op1 = expand_abs (compute_mode, op1, NULL_RTX, 1, 0);
3908 	    tem = expand_shift (LSHIFT_EXPR, compute_mode, abs_rem,
3909 				build_int_2 (1, 0), NULL_RTX, 1);
3910 	    do_cmp_and_jump (tem, abs_op1, LTU, compute_mode, label);
3911 	    tem = expand_binop (compute_mode, xor_optab, op0, op1,
3912 				NULL_RTX, 0, OPTAB_WIDEN);
3913 	    mask = expand_shift (RSHIFT_EXPR, compute_mode, tem,
3914 				build_int_2 (size - 1, 0), NULL_RTX, 0);
3915 	    tem = expand_binop (compute_mode, xor_optab, mask, const1_rtx,
3916 				NULL_RTX, 0, OPTAB_WIDEN);
3917 	    tem = expand_binop (compute_mode, sub_optab, tem, mask,
3918 				NULL_RTX, 0, OPTAB_WIDEN);
3919 	    expand_inc (quotient, tem);
3920 	    tem = expand_binop (compute_mode, xor_optab, mask, op1,
3921 				NULL_RTX, 0, OPTAB_WIDEN);
3922 	    tem = expand_binop (compute_mode, sub_optab, tem, mask,
3923 				NULL_RTX, 0, OPTAB_WIDEN);
3924 	    expand_dec (remainder, tem);
3925 	    emit_label (label);
3926 	  }
3927 	return gen_lowpart (mode, rem_flag ? remainder : quotient);
3928 
3929       default:
3930 	abort ();
3931       }
3932 
3933   if (quotient == 0)
3934     {
3935       if (target && GET_MODE (target) != compute_mode)
3936 	target = 0;
3937 
3938       if (rem_flag)
3939 	{
3940 	  /* Try to produce the remainder without producing the quotient.
3941 	     If we seem to have a divmod pattern that does not require widening,
3942 	     don't try widening here.  We should really have an WIDEN argument
3943 	     to expand_twoval_binop, since what we'd really like to do here is
3944 	     1) try a mod insn in compute_mode
3945 	     2) try a divmod insn in compute_mode
3946 	     3) try a div insn in compute_mode and multiply-subtract to get
3947 	        remainder
3948 	     4) try the same things with widening allowed.  */
3949 	  remainder
3950 	    = sign_expand_binop (compute_mode, umod_optab, smod_optab,
3951 				 op0, op1, target,
3952 				 unsignedp,
3953 				 ((optab2->handlers[(int) compute_mode].insn_code
3954 				   != CODE_FOR_nothing)
3955 				  ? OPTAB_DIRECT : OPTAB_WIDEN));
3956 	  if (remainder == 0)
3957 	    {
3958 	      /* No luck there.  Can we do remainder and divide at once
3959 		 without a library call?  */
3960 	      remainder = gen_reg_rtx (compute_mode);
3961 	      if (! expand_twoval_binop ((unsignedp
3962 					  ? udivmod_optab
3963 					  : sdivmod_optab),
3964 					 op0, op1,
3965 					 NULL_RTX, remainder, unsignedp))
3966 		remainder = 0;
3967 	    }
3968 
3969 	  if (remainder)
3970 	    return gen_lowpart (mode, remainder);
3971 	}
3972 
3973       /* Produce the quotient.  Try a quotient insn, but not a library call.
3974 	 If we have a divmod in this mode, use it in preference to widening
3975 	 the div (for this test we assume it will not fail). Note that optab2
3976 	 is set to the one of the two optabs that the call below will use.  */
3977       quotient
3978 	= sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
3979 			     op0, op1, rem_flag ? NULL_RTX : target,
3980 			     unsignedp,
3981 			     ((optab2->handlers[(int) compute_mode].insn_code
3982 			       != CODE_FOR_nothing)
3983 			      ? OPTAB_DIRECT : OPTAB_WIDEN));
3984 
3985       if (quotient == 0)
3986 	{
3987 	  /* No luck there.  Try a quotient-and-remainder insn,
3988 	     keeping the quotient alone.  */
3989 	  quotient = gen_reg_rtx (compute_mode);
3990 	  if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
3991 				     op0, op1,
3992 				     quotient, NULL_RTX, unsignedp))
3993 	    {
3994 	      quotient = 0;
3995 	      if (! rem_flag)
3996 		/* Still no luck.  If we are not computing the remainder,
3997 		   use a library call for the quotient.  */
3998 		quotient = sign_expand_binop (compute_mode,
3999 					      udiv_optab, sdiv_optab,
4000 					      op0, op1, target,
4001 					      unsignedp, OPTAB_LIB_WIDEN);
4002 	    }
4003 	}
4004     }
4005 
4006   if (rem_flag)
4007     {
4008       if (target && GET_MODE (target) != compute_mode)
4009 	target = 0;
4010 
4011       if (quotient == 0)
4012 	/* No divide instruction either.  Use library for remainder.  */
4013 	remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
4014 				       op0, op1, target,
4015 				       unsignedp, OPTAB_LIB_WIDEN);
4016       else
4017 	{
4018 	  /* We divided.  Now finish doing X - Y * (X / Y).  */
4019 	  remainder = expand_mult (compute_mode, quotient, op1,
4020 				   NULL_RTX, unsignedp);
4021 	  remainder = expand_binop (compute_mode, sub_optab, op0,
4022 				    remainder, target, unsignedp,
4023 				    OPTAB_LIB_WIDEN);
4024 	}
4025     }
4026 
4027   return gen_lowpart (mode, rem_flag ? remainder : quotient);
4028 }
4029 
4030 /* Return a tree node with data type TYPE, describing the value of X.
4031    Usually this is an RTL_EXPR, if there is no obvious better choice.
4032    X may be an expression, however we only support those expressions
4033    generated by loop.c.  */
4034 
4035 tree
make_tree(type,x)4036 make_tree (type, x)
4037      tree type;
4038      rtx x;
4039 {
4040   tree t;
4041 
4042   switch (GET_CODE (x))
4043     {
4044     case CONST_INT:
4045       t = build_int_2 (INTVAL (x),
4046 		       (TREE_UNSIGNED (type)
4047 			&& (GET_MODE_BITSIZE (TYPE_MODE (type)) < HOST_BITS_PER_WIDE_INT))
4048 		       || INTVAL (x) >= 0 ? 0 : -1);
4049       TREE_TYPE (t) = type;
4050       return t;
4051 
4052     case CONST_DOUBLE:
4053       if (GET_MODE (x) == VOIDmode)
4054 	{
4055 	  t = build_int_2 (CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
4056 	  TREE_TYPE (t) = type;
4057 	}
4058       else
4059 	{
4060 	  REAL_VALUE_TYPE d;
4061 
4062 	  REAL_VALUE_FROM_CONST_DOUBLE (d, x);
4063 	  t = build_real (type, d);
4064 	}
4065 
4066       return t;
4067 
4068     case CONST_VECTOR:
4069       {
4070 	int i, units;
4071 	rtx elt;
4072 	tree t = NULL_TREE;
4073 
4074 	units = CONST_VECTOR_NUNITS (x);
4075 
4076 	/* Build a tree with vector elements.  */
4077 	for (i = units - 1; i >= 0; --i)
4078 	  {
4079 	    elt = CONST_VECTOR_ELT (x, i);
4080 	    t = tree_cons (NULL_TREE, make_tree (type, elt), t);
4081 	  }
4082 
4083 	return build_vector (type, t);
4084       }
4085 
4086     case PLUS:
4087       return fold (build (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4088 			  make_tree (type, XEXP (x, 1))));
4089 
4090     case MINUS:
4091       return fold (build (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
4092 			  make_tree (type, XEXP (x, 1))));
4093 
4094     case NEG:
4095       return fold (build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0))));
4096 
4097     case MULT:
4098       return fold (build (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
4099 			  make_tree (type, XEXP (x, 1))));
4100 
4101     case ASHIFT:
4102       return fold (build (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
4103 			  make_tree (type, XEXP (x, 1))));
4104 
4105     case LSHIFTRT:
4106       t = (*lang_hooks.types.unsigned_type) (type);
4107       return fold (convert (type,
4108 			    build (RSHIFT_EXPR, t,
4109 				   make_tree (t, XEXP (x, 0)),
4110 				   make_tree (type, XEXP (x, 1)))));
4111 
4112     case ASHIFTRT:
4113       t = (*lang_hooks.types.signed_type) (type);
4114       return fold (convert (type,
4115 			    build (RSHIFT_EXPR, t,
4116 				   make_tree (t, XEXP (x, 0)),
4117 				   make_tree (type, XEXP (x, 1)))));
4118 
4119     case DIV:
4120       if (TREE_CODE (type) != REAL_TYPE)
4121 	t = (*lang_hooks.types.signed_type) (type);
4122       else
4123 	t = type;
4124 
4125       return fold (convert (type,
4126 			    build (TRUNC_DIV_EXPR, t,
4127 				   make_tree (t, XEXP (x, 0)),
4128 				   make_tree (t, XEXP (x, 1)))));
4129     case UDIV:
4130       t = (*lang_hooks.types.unsigned_type) (type);
4131       return fold (convert (type,
4132 			    build (TRUNC_DIV_EXPR, t,
4133 				   make_tree (t, XEXP (x, 0)),
4134 				   make_tree (t, XEXP (x, 1)))));
4135 
4136     case SIGN_EXTEND:
4137     case ZERO_EXTEND:
4138       t = (*lang_hooks.types.type_for_mode) (GET_MODE (XEXP (x, 0)),
4139 					     GET_CODE (x) == ZERO_EXTEND);
4140       return fold (convert (type, make_tree (t, XEXP (x, 0))));
4141 
4142    default:
4143       t = make_node (RTL_EXPR);
4144       TREE_TYPE (t) = type;
4145 
4146 #ifdef POINTERS_EXTEND_UNSIGNED
4147       /* If TYPE is a POINTER_TYPE, X might be Pmode with TYPE_MODE being
4148 	 ptr_mode.  So convert.  */
4149       if (POINTER_TYPE_P (type) && GET_MODE (x) != TYPE_MODE (type))
4150 	x = convert_memory_address (TYPE_MODE (type), x);
4151 #endif
4152 
4153       RTL_EXPR_RTL (t) = x;
4154       /* There are no insns to be output
4155 	 when this rtl_expr is used.  */
4156       RTL_EXPR_SEQUENCE (t) = 0;
4157       return t;
4158     }
4159 }
4160 
4161 /* Check whether the multiplication X * MULT + ADD overflows.
4162    X, MULT and ADD must be CONST_*.
4163    MODE is the machine mode for the computation.
4164    X and MULT must have mode MODE.  ADD may have a different mode.
4165    So can X (defaults to same as MODE).
4166    UNSIGNEDP is nonzero to do unsigned multiplication.  */
4167 
4168 bool
const_mult_add_overflow_p(x,mult,add,mode,unsignedp)4169 const_mult_add_overflow_p (x, mult, add, mode, unsignedp)
4170      rtx x, mult, add;
4171      enum machine_mode mode;
4172      int unsignedp;
4173 {
4174   tree type, mult_type, add_type, result;
4175 
4176   type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4177 
4178   /* In order to get a proper overflow indication from an unsigned
4179      type, we have to pretend that it's a sizetype.  */
4180   mult_type = type;
4181   if (unsignedp)
4182     {
4183       mult_type = copy_node (type);
4184       TYPE_IS_SIZETYPE (mult_type) = 1;
4185     }
4186 
4187   add_type = (GET_MODE (add) == VOIDmode ? mult_type
4188 	      : (*lang_hooks.types.type_for_mode) (GET_MODE (add), unsignedp));
4189 
4190   result = fold (build (PLUS_EXPR, mult_type,
4191 			fold (build (MULT_EXPR, mult_type,
4192 				     make_tree (mult_type, x),
4193 				     make_tree (mult_type, mult))),
4194 			make_tree (add_type, add)));
4195 
4196   return TREE_CONSTANT_OVERFLOW (result);
4197 }
4198 
4199 /* Return an rtx representing the value of X * MULT + ADD.
4200    TARGET is a suggestion for where to store the result (an rtx).
4201    MODE is the machine mode for the computation.
4202    X and MULT must have mode MODE.  ADD may have a different mode.
4203    So can X (defaults to same as MODE).
4204    UNSIGNEDP is nonzero to do unsigned multiplication.
4205    This may emit insns.  */
4206 
4207 rtx
expand_mult_add(x,target,mult,add,mode,unsignedp)4208 expand_mult_add (x, target, mult, add, mode, unsignedp)
4209      rtx x, target, mult, add;
4210      enum machine_mode mode;
4211      int unsignedp;
4212 {
4213   tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
4214   tree add_type = (GET_MODE (add) == VOIDmode
4215 		   ? type: (*lang_hooks.types.type_for_mode) (GET_MODE (add),
4216 							      unsignedp));
4217   tree result =  fold (build (PLUS_EXPR, type,
4218 			      fold (build (MULT_EXPR, type,
4219 					   make_tree (type, x),
4220 					   make_tree (type, mult))),
4221 			      make_tree (add_type, add)));
4222 
4223   return expand_expr (result, target, VOIDmode, 0);
4224 }
4225 
4226 /* Compute the logical-and of OP0 and OP1, storing it in TARGET
4227    and returning TARGET.
4228 
4229    If TARGET is 0, a pseudo-register or constant is returned.  */
4230 
4231 rtx
expand_and(mode,op0,op1,target)4232 expand_and (mode, op0, op1, target)
4233      enum machine_mode mode;
4234      rtx op0, op1, target;
4235 {
4236   rtx tem = 0;
4237 
4238   if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
4239     tem = simplify_binary_operation (AND, mode, op0, op1);
4240   if (tem == 0)
4241     tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
4242 
4243   if (target == 0)
4244     target = tem;
4245   else if (tem != target)
4246     emit_move_insn (target, tem);
4247   return target;
4248 }
4249 
4250 /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
4251    and storing in TARGET.  Normally return TARGET.
4252    Return 0 if that cannot be done.
4253 
4254    MODE is the mode to use for OP0 and OP1 should they be CONST_INTs.  If
4255    it is VOIDmode, they cannot both be CONST_INT.
4256 
4257    UNSIGNEDP is for the case where we have to widen the operands
4258    to perform the operation.  It says to use zero-extension.
4259 
4260    NORMALIZEP is 1 if we should convert the result to be either zero
4261    or one.  Normalize is -1 if we should convert the result to be
4262    either zero or -1.  If NORMALIZEP is zero, the result will be left
4263    "raw" out of the scc insn.  */
4264 
4265 rtx
emit_store_flag(target,code,op0,op1,mode,unsignedp,normalizep)4266 emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep)
4267      rtx target;
4268      enum rtx_code code;
4269      rtx op0, op1;
4270      enum machine_mode mode;
4271      int unsignedp;
4272      int normalizep;
4273 {
4274   rtx subtarget;
4275   enum insn_code icode;
4276   enum machine_mode compare_mode;
4277   enum machine_mode target_mode = GET_MODE (target);
4278   rtx tem;
4279   rtx last = get_last_insn ();
4280   rtx pattern, comparison;
4281 
4282   /* ??? Ok to do this and then fail? */
4283   op0 = protect_from_queue (op0, 0);
4284   op1 = protect_from_queue (op1, 0);
4285 
4286   if (unsignedp)
4287     code = unsigned_condition (code);
4288 
4289   /* If one operand is constant, make it the second one.  Only do this
4290      if the other operand is not constant as well.  */
4291 
4292   if (swap_commutative_operands_p (op0, op1))
4293     {
4294       tem = op0;
4295       op0 = op1;
4296       op1 = tem;
4297       code = swap_condition (code);
4298     }
4299 
4300   if (mode == VOIDmode)
4301     mode = GET_MODE (op0);
4302 
4303   /* For some comparisons with 1 and -1, we can convert this to
4304      comparisons with zero.  This will often produce more opportunities for
4305      store-flag insns.  */
4306 
4307   switch (code)
4308     {
4309     case LT:
4310       if (op1 == const1_rtx)
4311 	op1 = const0_rtx, code = LE;
4312       break;
4313     case LE:
4314       if (op1 == constm1_rtx)
4315 	op1 = const0_rtx, code = LT;
4316       break;
4317     case GE:
4318       if (op1 == const1_rtx)
4319 	op1 = const0_rtx, code = GT;
4320       break;
4321     case GT:
4322       if (op1 == constm1_rtx)
4323 	op1 = const0_rtx, code = GE;
4324       break;
4325     case GEU:
4326       if (op1 == const1_rtx)
4327 	op1 = const0_rtx, code = NE;
4328       break;
4329     case LTU:
4330       if (op1 == const1_rtx)
4331 	op1 = const0_rtx, code = EQ;
4332       break;
4333     default:
4334       break;
4335     }
4336 
4337   /* If we are comparing a double-word integer with zero, we can convert
4338      the comparison into one involving a single word.  */
4339   if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD * 2
4340       && GET_MODE_CLASS (mode) == MODE_INT
4341       && op1 == const0_rtx
4342       && (GET_CODE (op0) != MEM || ! MEM_VOLATILE_P (op0)))
4343     {
4344       if (code == EQ || code == NE)
4345 	{
4346 	  rtx op00, op01, op0both;
4347 
4348 	  /* Do a logical OR of the two words and compare the result.  */
4349 	  op00 = simplify_gen_subreg (word_mode, op0, mode, 0);
4350 	  op01 = simplify_gen_subreg (word_mode, op0, mode, UNITS_PER_WORD);
4351 	  op0both = expand_binop (word_mode, ior_optab, op00, op01,
4352 				  NULL_RTX, unsignedp, OPTAB_DIRECT);
4353 	  if (op0both != 0)
4354 	    return emit_store_flag (target, code, op0both, op1, word_mode,
4355 				    unsignedp, normalizep);
4356 	}
4357       else if (code == LT || code == GE)
4358 	{
4359 	  rtx op0h;
4360 
4361 	  /* If testing the sign bit, can just test on high word.  */
4362 	  op0h = simplify_gen_subreg (word_mode, op0, mode,
4363 				      subreg_highpart_offset (word_mode, mode));
4364 	  return emit_store_flag (target, code, op0h, op1, word_mode,
4365 				  unsignedp, normalizep);
4366 	}
4367     }
4368 
4369   /* From now on, we won't change CODE, so set ICODE now.  */
4370   icode = setcc_gen_code[(int) code];
4371 
4372   /* If this is A < 0 or A >= 0, we can do this by taking the ones
4373      complement of A (for GE) and shifting the sign bit to the low bit.  */
4374   if (op1 == const0_rtx && (code == LT || code == GE)
4375       && GET_MODE_CLASS (mode) == MODE_INT
4376       && (normalizep || STORE_FLAG_VALUE == 1
4377 	  || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4378 	      && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4379 		  == (HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))))
4380     {
4381       subtarget = target;
4382 
4383       /* If the result is to be wider than OP0, it is best to convert it
4384 	 first.  If it is to be narrower, it is *incorrect* to convert it
4385 	 first.  */
4386       if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
4387 	{
4388 	  op0 = protect_from_queue (op0, 0);
4389 	  op0 = convert_modes (target_mode, mode, op0, 0);
4390 	  mode = target_mode;
4391 	}
4392 
4393       if (target_mode != mode)
4394 	subtarget = 0;
4395 
4396       if (code == GE)
4397 	op0 = expand_unop (mode, one_cmpl_optab, op0,
4398 			   ((STORE_FLAG_VALUE == 1 || normalizep)
4399 			    ? 0 : subtarget), 0);
4400 
4401       if (STORE_FLAG_VALUE == 1 || normalizep)
4402 	/* If we are supposed to produce a 0/1 value, we want to do
4403 	   a logical shift from the sign bit to the low-order bit; for
4404 	   a -1/0 value, we do an arithmetic shift.  */
4405 	op0 = expand_shift (RSHIFT_EXPR, mode, op0,
4406 			    size_int (GET_MODE_BITSIZE (mode) - 1),
4407 			    subtarget, normalizep != -1);
4408 
4409       if (mode != target_mode)
4410 	op0 = convert_modes (target_mode, mode, op0, 0);
4411 
4412       return op0;
4413     }
4414 
4415   if (icode != CODE_FOR_nothing)
4416     {
4417       insn_operand_predicate_fn pred;
4418 
4419       /* We think we may be able to do this with a scc insn.  Emit the
4420 	 comparison and then the scc insn.
4421 
4422 	 compare_from_rtx may call emit_queue, which would be deleted below
4423 	 if the scc insn fails.  So call it ourselves before setting LAST.
4424 	 Likewise for do_pending_stack_adjust.  */
4425 
4426       emit_queue ();
4427       do_pending_stack_adjust ();
4428       last = get_last_insn ();
4429 
4430       comparison
4431 	= compare_from_rtx (op0, op1, code, unsignedp, mode, NULL_RTX);
4432       if (GET_CODE (comparison) == CONST_INT)
4433 	return (comparison == const0_rtx ? const0_rtx
4434 		: normalizep == 1 ? const1_rtx
4435 		: normalizep == -1 ? constm1_rtx
4436 		: const_true_rtx);
4437 
4438       /* The code of COMPARISON may not match CODE if compare_from_rtx
4439 	 decided to swap its operands and reverse the original code.
4440 
4441 	 We know that compare_from_rtx returns either a CONST_INT or
4442 	 a new comparison code, so it is safe to just extract the
4443 	 code from COMPARISON.  */
4444       code = GET_CODE (comparison);
4445 
4446       /* Get a reference to the target in the proper mode for this insn.  */
4447       compare_mode = insn_data[(int) icode].operand[0].mode;
4448       subtarget = target;
4449       pred = insn_data[(int) icode].operand[0].predicate;
4450       if (preserve_subexpressions_p ()
4451 	  || ! (*pred) (subtarget, compare_mode))
4452 	subtarget = gen_reg_rtx (compare_mode);
4453 
4454       pattern = GEN_FCN (icode) (subtarget);
4455       if (pattern)
4456 	{
4457 	  emit_insn (pattern);
4458 
4459 	  /* If we are converting to a wider mode, first convert to
4460 	     TARGET_MODE, then normalize.  This produces better combining
4461 	     opportunities on machines that have a SIGN_EXTRACT when we are
4462 	     testing a single bit.  This mostly benefits the 68k.
4463 
4464 	     If STORE_FLAG_VALUE does not have the sign bit set when
4465 	     interpreted in COMPARE_MODE, we can do this conversion as
4466 	     unsigned, which is usually more efficient.  */
4467 	  if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (compare_mode))
4468 	    {
4469 	      convert_move (target, subtarget,
4470 			    (GET_MODE_BITSIZE (compare_mode)
4471 			     <= HOST_BITS_PER_WIDE_INT)
4472 			    && 0 == (STORE_FLAG_VALUE
4473 				     & ((HOST_WIDE_INT) 1
4474 					<< (GET_MODE_BITSIZE (compare_mode) -1))));
4475 	      op0 = target;
4476 	      compare_mode = target_mode;
4477 	    }
4478 	  else
4479 	    op0 = subtarget;
4480 
4481 	  /* If we want to keep subexpressions around, don't reuse our
4482 	     last target.  */
4483 
4484 	  if (preserve_subexpressions_p ())
4485 	    subtarget = 0;
4486 
4487 	  /* Now normalize to the proper value in COMPARE_MODE.  Sometimes
4488 	     we don't have to do anything.  */
4489 	  if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
4490 	    ;
4491 	  /* STORE_FLAG_VALUE might be the most negative number, so write
4492 	     the comparison this way to avoid a compiler-time warning.  */
4493 	  else if (- normalizep == STORE_FLAG_VALUE)
4494 	    op0 = expand_unop (compare_mode, neg_optab, op0, subtarget, 0);
4495 
4496 	  /* We don't want to use STORE_FLAG_VALUE < 0 below since this
4497 	     makes it hard to use a value of just the sign bit due to
4498 	     ANSI integer constant typing rules.  */
4499 	  else if (GET_MODE_BITSIZE (compare_mode) <= HOST_BITS_PER_WIDE_INT
4500 		   && (STORE_FLAG_VALUE
4501 		       & ((HOST_WIDE_INT) 1
4502 			  << (GET_MODE_BITSIZE (compare_mode) - 1))))
4503 	    op0 = expand_shift (RSHIFT_EXPR, compare_mode, op0,
4504 				size_int (GET_MODE_BITSIZE (compare_mode) - 1),
4505 				subtarget, normalizep == 1);
4506 	  else if (STORE_FLAG_VALUE & 1)
4507 	    {
4508 	      op0 = expand_and (compare_mode, op0, const1_rtx, subtarget);
4509 	      if (normalizep == -1)
4510 		op0 = expand_unop (compare_mode, neg_optab, op0, op0, 0);
4511 	    }
4512 	  else
4513 	    abort ();
4514 
4515 	  /* If we were converting to a smaller mode, do the
4516 	     conversion now.  */
4517 	  if (target_mode != compare_mode)
4518 	    {
4519 	      convert_move (target, op0, 0);
4520 	      return target;
4521 	    }
4522 	  else
4523 	    return op0;
4524 	}
4525     }
4526 
4527   delete_insns_since (last);
4528 
4529   /* If expensive optimizations, use different pseudo registers for each
4530      insn, instead of reusing the same pseudo.  This leads to better CSE,
4531      but slows down the compiler, since there are more pseudos */
4532   subtarget = (!flag_expensive_optimizations
4533 	       && (target_mode == mode)) ? target : NULL_RTX;
4534 
4535   /* If we reached here, we can't do this with a scc insn.  However, there
4536      are some comparisons that can be done directly.  For example, if
4537      this is an equality comparison of integers, we can try to exclusive-or
4538      (or subtract) the two operands and use a recursive call to try the
4539      comparison with zero.  Don't do any of these cases if branches are
4540      very cheap.  */
4541 
4542   if (BRANCH_COST > 0
4543       && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
4544       && op1 != const0_rtx)
4545     {
4546       tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
4547 			  OPTAB_WIDEN);
4548 
4549       if (tem == 0)
4550 	tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
4551 			    OPTAB_WIDEN);
4552       if (tem != 0)
4553 	tem = emit_store_flag (target, code, tem, const0_rtx,
4554 			       mode, unsignedp, normalizep);
4555       if (tem == 0)
4556 	delete_insns_since (last);
4557       return tem;
4558     }
4559 
4560   /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
4561      the constant zero.  Reject all other comparisons at this point.  Only
4562      do LE and GT if branches are expensive since they are expensive on
4563      2-operand machines.  */
4564 
4565   if (BRANCH_COST == 0
4566       || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
4567       || (code != EQ && code != NE
4568 	  && (BRANCH_COST <= 1 || (code != LE && code != GT))))
4569     return 0;
4570 
4571   /* See what we need to return.  We can only return a 1, -1, or the
4572      sign bit.  */
4573 
4574   if (normalizep == 0)
4575     {
4576       if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
4577 	normalizep = STORE_FLAG_VALUE;
4578 
4579       else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4580 	       && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
4581 		   == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
4582 	;
4583       else
4584 	return 0;
4585     }
4586 
4587   /* Try to put the result of the comparison in the sign bit.  Assume we can't
4588      do the necessary operation below.  */
4589 
4590   tem = 0;
4591 
4592   /* To see if A <= 0, compute (A | (A - 1)).  A <= 0 iff that result has
4593      the sign bit set.  */
4594 
4595   if (code == LE)
4596     {
4597       /* This is destructive, so SUBTARGET can't be OP0.  */
4598       if (rtx_equal_p (subtarget, op0))
4599 	subtarget = 0;
4600 
4601       tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
4602 			  OPTAB_WIDEN);
4603       if (tem)
4604 	tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
4605 			    OPTAB_WIDEN);
4606     }
4607 
4608   /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
4609      number of bits in the mode of OP0, minus one.  */
4610 
4611   if (code == GT)
4612     {
4613       if (rtx_equal_p (subtarget, op0))
4614 	subtarget = 0;
4615 
4616       tem = expand_shift (RSHIFT_EXPR, mode, op0,
4617 			  size_int (GET_MODE_BITSIZE (mode) - 1),
4618 			  subtarget, 0);
4619       tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
4620 			  OPTAB_WIDEN);
4621     }
4622 
4623   if (code == EQ || code == NE)
4624     {
4625       /* For EQ or NE, one way to do the comparison is to apply an operation
4626 	 that converts the operand into a positive number if it is nonzero
4627 	 or zero if it was originally zero.  Then, for EQ, we subtract 1 and
4628 	 for NE we negate.  This puts the result in the sign bit.  Then we
4629 	 normalize with a shift, if needed.
4630 
4631 	 Two operations that can do the above actions are ABS and FFS, so try
4632 	 them.  If that doesn't work, and MODE is smaller than a full word,
4633 	 we can use zero-extension to the wider mode (an unsigned conversion)
4634 	 as the operation.  */
4635 
4636       /* Note that ABS doesn't yield a positive number for INT_MIN, but
4637 	 that is compensated by the subsequent overflow when subtracting
4638 	 one / negating.  */
4639 
4640       if (abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4641 	tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
4642       else if (ffs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
4643 	tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
4644       else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4645 	{
4646 	  op0 = protect_from_queue (op0, 0);
4647 	  tem = convert_modes (word_mode, mode, op0, 1);
4648 	  mode = word_mode;
4649 	}
4650 
4651       if (tem != 0)
4652 	{
4653 	  if (code == EQ)
4654 	    tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
4655 				0, OPTAB_WIDEN);
4656 	  else
4657 	    tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
4658 	}
4659 
4660       /* If we couldn't do it that way, for NE we can "or" the two's complement
4661 	 of the value with itself.  For EQ, we take the one's complement of
4662 	 that "or", which is an extra insn, so we only handle EQ if branches
4663 	 are expensive.  */
4664 
4665       if (tem == 0 && (code == NE || BRANCH_COST > 1))
4666 	{
4667 	  if (rtx_equal_p (subtarget, op0))
4668 	    subtarget = 0;
4669 
4670 	  tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
4671 	  tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
4672 			      OPTAB_WIDEN);
4673 
4674 	  if (tem && code == EQ)
4675 	    tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
4676 	}
4677     }
4678 
4679   if (tem && normalizep)
4680     tem = expand_shift (RSHIFT_EXPR, mode, tem,
4681 			size_int (GET_MODE_BITSIZE (mode) - 1),
4682 			subtarget, normalizep == 1);
4683 
4684   if (tem)
4685     {
4686       if (GET_MODE (tem) != target_mode)
4687 	{
4688 	  convert_move (target, tem, 0);
4689 	  tem = target;
4690 	}
4691       else if (!subtarget)
4692 	{
4693 	  emit_move_insn (target, tem);
4694 	  tem = target;
4695 	}
4696     }
4697   else
4698     delete_insns_since (last);
4699 
4700   return tem;
4701 }
4702 
4703 /* Like emit_store_flag, but always succeeds.  */
4704 
4705 rtx
emit_store_flag_force(target,code,op0,op1,mode,unsignedp,normalizep)4706 emit_store_flag_force (target, code, op0, op1, mode, unsignedp, normalizep)
4707      rtx target;
4708      enum rtx_code code;
4709      rtx op0, op1;
4710      enum machine_mode mode;
4711      int unsignedp;
4712      int normalizep;
4713 {
4714   rtx tem, label;
4715 
4716   /* First see if emit_store_flag can do the job.  */
4717   tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
4718   if (tem != 0)
4719     return tem;
4720 
4721   if (normalizep == 0)
4722     normalizep = 1;
4723 
4724   /* If this failed, we have to do this with set/compare/jump/set code.  */
4725 
4726   if (GET_CODE (target) != REG
4727       || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
4728     target = gen_reg_rtx (GET_MODE (target));
4729 
4730   emit_move_insn (target, const1_rtx);
4731   label = gen_label_rtx ();
4732   do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
4733 			   NULL_RTX, label);
4734 
4735   emit_move_insn (target, const0_rtx);
4736   emit_label (label);
4737 
4738   return target;
4739 }
4740 
4741 /* Perform possibly multi-word comparison and conditional jump to LABEL
4742    if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE
4743 
4744    The algorithm is based on the code in expr.c:do_jump.
4745 
4746    Note that this does not perform a general comparison.  Only variants
4747    generated within expmed.c are correctly handled, others abort (but could
4748    be handled if needed).  */
4749 
4750 static void
do_cmp_and_jump(arg1,arg2,op,mode,label)4751 do_cmp_and_jump (arg1, arg2, op, mode, label)
4752      rtx arg1, arg2, label;
4753      enum rtx_code op;
4754      enum machine_mode mode;
4755 {
4756   /* If this mode is an integer too wide to compare properly,
4757      compare word by word.  Rely on cse to optimize constant cases.  */
4758 
4759   if (GET_MODE_CLASS (mode) == MODE_INT
4760       && ! can_compare_p (op, mode, ccp_jump))
4761     {
4762       rtx label2 = gen_label_rtx ();
4763 
4764       switch (op)
4765 	{
4766 	case LTU:
4767 	  do_jump_by_parts_greater_rtx (mode, 1, arg2, arg1, label2, label);
4768 	  break;
4769 
4770 	case LEU:
4771 	  do_jump_by_parts_greater_rtx (mode, 1, arg1, arg2, label, label2);
4772 	  break;
4773 
4774 	case LT:
4775 	  do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label2, label);
4776 	  break;
4777 
4778 	case GT:
4779 	  do_jump_by_parts_greater_rtx (mode, 0, arg1, arg2, label2, label);
4780 	  break;
4781 
4782 	case GE:
4783 	  do_jump_by_parts_greater_rtx (mode, 0, arg2, arg1, label, label2);
4784 	  break;
4785 
4786 	  /* do_jump_by_parts_equality_rtx compares with zero.  Luckily
4787 	     that's the only equality operations we do */
4788 	case EQ:
4789 	  if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4790 	    abort ();
4791 	  do_jump_by_parts_equality_rtx (arg1, label2, label);
4792 	  break;
4793 
4794 	case NE:
4795 	  if (arg2 != const0_rtx || mode != GET_MODE(arg1))
4796 	    abort ();
4797 	  do_jump_by_parts_equality_rtx (arg1, label, label2);
4798 	  break;
4799 
4800 	default:
4801 	  abort ();
4802 	}
4803 
4804       emit_label (label2);
4805     }
4806   else
4807     emit_cmp_and_jump_insns (arg1, arg2, op, NULL_RTX, mode, 0, label);
4808 }
4809