xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-loop-prefetch.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /* Array prefetching.
2    Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "tree-pretty-print.h"
28 #include "tree-flow.h"
29 #include "cfgloop.h"
30 #include "tree-pass.h"
31 #include "insn-config.h"
32 #include "hashtab.h"
33 #include "tree-chrec.h"
34 #include "tree-scalar-evolution.h"
35 #include "diagnostic-core.h"
36 #include "params.h"
37 #include "langhooks.h"
38 #include "tree-inline.h"
39 #include "tree-data-ref.h"
40 
41 
42 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
43    between the GIMPLE and RTL worlds.  */
44 #include "expr.h"
45 #include "optabs.h"
46 #include "recog.h"
47 
48 /* This pass inserts prefetch instructions to optimize cache usage during
49    accesses to arrays in loops.  It processes loops sequentially and:
50 
51    1) Gathers all memory references in the single loop.
52    2) For each of the references it decides when it is profitable to prefetch
53       it.  To do it, we evaluate the reuse among the accesses, and determines
54       two values: PREFETCH_BEFORE (meaning that it only makes sense to do
55       prefetching in the first PREFETCH_BEFORE iterations of the loop) and
56       PREFETCH_MOD (meaning that it only makes sense to prefetch in the
57       iterations of the loop that are zero modulo PREFETCH_MOD).  For example
58       (assuming cache line size is 64 bytes, char has size 1 byte and there
59       is no hardware sequential prefetch):
60 
61       char *a;
62       for (i = 0; i < max; i++)
63 	{
64 	  a[255] = ...;		(0)
65 	  a[i] = ...;		(1)
66 	  a[i + 64] = ...;	(2)
67 	  a[16*i] = ...;	(3)
68 	  a[187*i] = ...;	(4)
69 	  a[187*i + 50] = ...;	(5)
70 	}
71 
72        (0) obviously has PREFETCH_BEFORE 1
73        (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
74            location 64 iterations before it, and PREFETCH_MOD 64 (since
75 	   it hits the same cache line otherwise).
76        (2) has PREFETCH_MOD 64
77        (3) has PREFETCH_MOD 4
78        (4) has PREFETCH_MOD 1.  We do not set PREFETCH_BEFORE here, since
79            the cache line accessed by (5) is the same with probability only
80 	   7/32.
81        (5) has PREFETCH_MOD 1 as well.
82 
83       Additionally, we use data dependence analysis to determine for each
84       reference the distance till the first reuse; this information is used
85       to determine the temporality of the issued prefetch instruction.
86 
87    3) We determine how much ahead we need to prefetch.  The number of
88       iterations needed is time to fetch / time spent in one iteration of
89       the loop.  The problem is that we do not know either of these values,
90       so we just make a heuristic guess based on a magic (possibly)
91       target-specific constant and size of the loop.
92 
93    4) Determine which of the references we prefetch.  We take into account
94       that there is a maximum number of simultaneous prefetches (provided
95       by machine description).  We prefetch as many prefetches as possible
96       while still within this bound (starting with those with lowest
97       prefetch_mod, since they are responsible for most of the cache
98       misses).
99 
100    5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
101       and PREFETCH_BEFORE requirements (within some bounds), and to avoid
102       prefetching nonaccessed memory.
103       TODO -- actually implement peeling.
104 
105    6) We actually emit the prefetch instructions.  ??? Perhaps emit the
106       prefetch instructions with guards in cases where 5) was not sufficient
107       to satisfy the constraints?
108 
109    A cost model is implemented to determine whether or not prefetching is
110    profitable for a given loop.  The cost model has three heuristics:
111 
112    1. Function trip_count_to_ahead_ratio_too_small_p implements a
113       heuristic that determines whether or not the loop has too few
114       iterations (compared to ahead).  Prefetching is not likely to be
115       beneficial if the trip count to ahead ratio is below a certain
116       minimum.
117 
118    2. Function mem_ref_count_reasonable_p implements a heuristic that
119       determines whether the given loop has enough CPU ops that can be
120       overlapped with cache missing memory ops.  If not, the loop
121       won't benefit from prefetching.  In the implementation,
122       prefetching is not considered beneficial if the ratio between
123       the instruction count and the mem ref count is below a certain
124       minimum.
125 
126    3. Function insn_to_prefetch_ratio_too_small_p implements a
127       heuristic that disables prefetching in a loop if the prefetching
128       cost is above a certain limit.  The relative prefetching cost is
129       estimated by taking the ratio between the prefetch count and the
130       total intruction count (this models the I-cache cost).
131 
132    The limits used in these heuristics are defined as parameters with
133    reasonable default values. Machine-specific default values will be
134    added later.
135 
136    Some other TODO:
137       -- write and use more general reuse analysis (that could be also used
138 	 in other cache aimed loop optimizations)
139       -- make it behave sanely together with the prefetches given by user
140 	 (now we just ignore them; at the very least we should avoid
141 	 optimizing loops in that user put his own prefetches)
142       -- we assume cache line size alignment of arrays; this could be
143 	 improved.  */
144 
145 /* Magic constants follow.  These should be replaced by machine specific
146    numbers.  */
147 
148 /* True if write can be prefetched by a read prefetch.  */
149 
150 #ifndef WRITE_CAN_USE_READ_PREFETCH
151 #define WRITE_CAN_USE_READ_PREFETCH 1
152 #endif
153 
154 /* True if read can be prefetched by a write prefetch. */
155 
156 #ifndef READ_CAN_USE_WRITE_PREFETCH
157 #define READ_CAN_USE_WRITE_PREFETCH 0
158 #endif
159 
160 /* The size of the block loaded by a single prefetch.  Usually, this is
161    the same as cache line size (at the moment, we only consider one level
162    of cache hierarchy).  */
163 
164 #ifndef PREFETCH_BLOCK
165 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
166 #endif
167 
168 /* Do we have a forward hardware sequential prefetching?  */
169 
170 #ifndef HAVE_FORWARD_PREFETCH
171 #define HAVE_FORWARD_PREFETCH 0
172 #endif
173 
174 /* Do we have a backward hardware sequential prefetching?  */
175 
176 #ifndef HAVE_BACKWARD_PREFETCH
177 #define HAVE_BACKWARD_PREFETCH 0
178 #endif
179 
180 /* In some cases we are only able to determine that there is a certain
181    probability that the two accesses hit the same cache line.  In this
182    case, we issue the prefetches for both of them if this probability
183    is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand.  */
184 
185 #ifndef ACCEPTABLE_MISS_RATE
186 #define ACCEPTABLE_MISS_RATE 50
187 #endif
188 
189 #ifndef HAVE_prefetch
190 #define HAVE_prefetch 0
191 #endif
192 
193 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
194 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
195 
196 /* We consider a memory access nontemporal if it is not reused sooner than
197    after L2_CACHE_SIZE_BYTES of memory are accessed.  However, we ignore
198    accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
199    so that we use nontemporal prefetches e.g. if single memory location
200    is accessed several times in a single iteration of the loop.  */
201 #define NONTEMPORAL_FRACTION 16
202 
203 /* In case we have to emit a memory fence instruction after the loop that
204    uses nontemporal stores, this defines the builtin to use.  */
205 
206 #ifndef FENCE_FOLLOWING_MOVNT
207 #define FENCE_FOLLOWING_MOVNT NULL_TREE
208 #endif
209 
210 /* It is not profitable to prefetch when the trip count is not at
211    least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
212    For example, in a loop with a prefetch ahead distance of 10,
213    supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
214    profitable to prefetch when the trip count is greater or equal to
215    40.  In that case, 30 out of the 40 iterations will benefit from
216    prefetching.  */
217 
218 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
219 #define TRIP_COUNT_TO_AHEAD_RATIO 4
220 #endif
221 
222 /* The group of references between that reuse may occur.  */
223 
224 struct mem_ref_group
225 {
226   tree base;			/* Base of the reference.  */
227   tree step;			/* Step of the reference.  */
228   struct mem_ref *refs;		/* References in the group.  */
229   struct mem_ref_group *next;	/* Next group of references.  */
230 };
231 
232 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched.  */
233 
234 #define PREFETCH_ALL		(~(unsigned HOST_WIDE_INT) 0)
235 
236 /* Do not generate a prefetch if the unroll factor is significantly less
237    than what is required by the prefetch.  This is to avoid redundant
238    prefetches.  For example, when prefetch_mod is 16 and unroll_factor is
239    2, prefetching requires unrolling the loop 16 times, but
240    the loop is actually unrolled twice.  In this case (ratio = 8),
241    prefetching is not likely to be beneficial.  */
242 
243 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
244 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
245 #endif
246 
247 /* Some of the prefetch computations have quadratic complexity.  We want to
248    avoid huge compile times and, therefore, want to limit the amount of
249    memory references per loop where we consider prefetching.  */
250 
251 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
252 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
253 #endif
254 
255 /* The memory reference.  */
256 
257 struct mem_ref
258 {
259   gimple stmt;			/* Statement in that the reference appears.  */
260   tree mem;			/* The reference.  */
261   HOST_WIDE_INT delta;		/* Constant offset of the reference.  */
262   struct mem_ref_group *group;	/* The group of references it belongs to.  */
263   unsigned HOST_WIDE_INT prefetch_mod;
264 				/* Prefetch only each PREFETCH_MOD-th
265 				   iteration.  */
266   unsigned HOST_WIDE_INT prefetch_before;
267 				/* Prefetch only first PREFETCH_BEFORE
268 				   iterations.  */
269   unsigned reuse_distance;	/* The amount of data accessed before the first
270 				   reuse of this value.  */
271   struct mem_ref *next;		/* The next reference in the group.  */
272   unsigned write_p : 1;		/* Is it a write?  */
273   unsigned independent_p : 1;	/* True if the reference is independent on
274 				   all other references inside the loop.  */
275   unsigned issue_prefetch_p : 1;	/* Should we really issue the prefetch?  */
276   unsigned storent_p : 1;	/* True if we changed the store to a
277 				   nontemporal one.  */
278 };
279 
280 /* Dumps information about memory reference */
281 static void
282 dump_mem_details (FILE *file, tree base, tree step,
283 	    HOST_WIDE_INT delta, bool write_p)
284 {
285   fprintf (file, "(base ");
286   print_generic_expr (file, base, TDF_SLIM);
287   fprintf (file, ", step ");
288   if (cst_and_fits_in_hwi (step))
289     fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
290   else
291     print_generic_expr (file, step, TDF_TREE);
292   fprintf (file, ")\n");
293   fprintf (file, "  delta ");
294   fprintf (file, HOST_WIDE_INT_PRINT_DEC, delta);
295   fprintf (file, "\n");
296   fprintf (file, "  %s\n", write_p ? "write" : "read");
297   fprintf (file, "\n");
298 }
299 
300 /* Dumps information about reference REF to FILE.  */
301 
302 static void
303 dump_mem_ref (FILE *file, struct mem_ref *ref)
304 {
305   fprintf (file, "Reference %p:\n", (void *) ref);
306 
307   fprintf (file, "  group %p ", (void *) ref->group);
308 
309   dump_mem_details (file, ref->group->base, ref->group->step, ref->delta,
310                    ref->write_p);
311 }
312 
313 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
314    exist.  */
315 
316 static struct mem_ref_group *
317 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
318 {
319   struct mem_ref_group *group;
320 
321   for (; *groups; groups = &(*groups)->next)
322     {
323       if (operand_equal_p ((*groups)->step, step, 0)
324 	  && operand_equal_p ((*groups)->base, base, 0))
325 	return *groups;
326 
327       /* If step is an integer constant, keep the list of groups sorted
328          by decreasing step.  */
329         if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
330             && int_cst_value ((*groups)->step) < int_cst_value (step))
331 	break;
332     }
333 
334   group = XNEW (struct mem_ref_group);
335   group->base = base;
336   group->step = step;
337   group->refs = NULL;
338   group->next = *groups;
339   *groups = group;
340 
341   return group;
342 }
343 
344 /* Records a memory reference MEM in GROUP with offset DELTA and write status
345    WRITE_P.  The reference occurs in statement STMT.  */
346 
347 static void
348 record_ref (struct mem_ref_group *group, gimple stmt, tree mem,
349 	    HOST_WIDE_INT delta, bool write_p)
350 {
351   struct mem_ref **aref;
352 
353   /* Do not record the same address twice.  */
354   for (aref = &group->refs; *aref; aref = &(*aref)->next)
355     {
356       /* It does not have to be possible for write reference to reuse the read
357 	 prefetch, or vice versa.  */
358       if (!WRITE_CAN_USE_READ_PREFETCH
359 	  && write_p
360 	  && !(*aref)->write_p)
361 	continue;
362       if (!READ_CAN_USE_WRITE_PREFETCH
363 	  && !write_p
364 	  && (*aref)->write_p)
365 	continue;
366 
367       if ((*aref)->delta == delta)
368 	return;
369     }
370 
371   (*aref) = XNEW (struct mem_ref);
372   (*aref)->stmt = stmt;
373   (*aref)->mem = mem;
374   (*aref)->delta = delta;
375   (*aref)->write_p = write_p;
376   (*aref)->prefetch_before = PREFETCH_ALL;
377   (*aref)->prefetch_mod = 1;
378   (*aref)->reuse_distance = 0;
379   (*aref)->issue_prefetch_p = false;
380   (*aref)->group = group;
381   (*aref)->next = NULL;
382   (*aref)->independent_p = false;
383   (*aref)->storent_p = false;
384 
385   if (dump_file && (dump_flags & TDF_DETAILS))
386     dump_mem_ref (dump_file, *aref);
387 }
388 
389 /* Release memory references in GROUPS.  */
390 
391 static void
392 release_mem_refs (struct mem_ref_group *groups)
393 {
394   struct mem_ref_group *next_g;
395   struct mem_ref *ref, *next_r;
396 
397   for (; groups; groups = next_g)
398     {
399       next_g = groups->next;
400       for (ref = groups->refs; ref; ref = next_r)
401 	{
402 	  next_r = ref->next;
403 	  free (ref);
404 	}
405       free (groups);
406     }
407 }
408 
409 /* A structure used to pass arguments to idx_analyze_ref.  */
410 
411 struct ar_data
412 {
413   struct loop *loop;			/* Loop of the reference.  */
414   gimple stmt;				/* Statement of the reference.  */
415   tree *step;				/* Step of the memory reference.  */
416   HOST_WIDE_INT *delta;			/* Offset of the memory reference.  */
417 };
418 
419 /* Analyzes a single INDEX of a memory reference to obtain information
420    described at analyze_ref.  Callback for for_each_index.  */
421 
422 static bool
423 idx_analyze_ref (tree base, tree *index, void *data)
424 {
425   struct ar_data *ar_data = (struct ar_data *) data;
426   tree ibase, step, stepsize;
427   HOST_WIDE_INT idelta = 0, imult = 1;
428   affine_iv iv;
429 
430   if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
431 		  *index, &iv, true))
432     return false;
433   ibase = iv.base;
434   step = iv.step;
435 
436   if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
437       && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
438     {
439       idelta = int_cst_value (TREE_OPERAND (ibase, 1));
440       ibase = TREE_OPERAND (ibase, 0);
441     }
442   if (cst_and_fits_in_hwi (ibase))
443     {
444       idelta += int_cst_value (ibase);
445       ibase = build_int_cst (TREE_TYPE (ibase), 0);
446     }
447 
448   if (TREE_CODE (base) == ARRAY_REF)
449     {
450       stepsize = array_ref_element_size (base);
451       if (!cst_and_fits_in_hwi (stepsize))
452 	return false;
453       imult = int_cst_value (stepsize);
454       step = fold_build2 (MULT_EXPR, sizetype,
455 			  fold_convert (sizetype, step),
456 			  fold_convert (sizetype, stepsize));
457       idelta *= imult;
458     }
459 
460   if (*ar_data->step == NULL_TREE)
461     *ar_data->step = step;
462   else
463     *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
464 				  fold_convert (sizetype, *ar_data->step),
465 				  fold_convert (sizetype, step));
466   *ar_data->delta += idelta;
467   *index = ibase;
468 
469   return true;
470 }
471 
472 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
473    STEP are integer constants and iter is number of iterations of LOOP.  The
474    reference occurs in statement STMT.  Strips nonaddressable component
475    references from REF_P.  */
476 
477 static bool
478 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
479 	     tree *step, HOST_WIDE_INT *delta,
480 	     gimple stmt)
481 {
482   struct ar_data ar_data;
483   tree off;
484   HOST_WIDE_INT bit_offset;
485   tree ref = *ref_p;
486 
487   *step = NULL_TREE;
488   *delta = 0;
489 
490   /* First strip off the component references.  Ignore bitfields.
491      Also strip off the real and imagine parts of a complex, so that
492      they can have the same base.  */
493   if (TREE_CODE (ref) == REALPART_EXPR
494       || TREE_CODE (ref) == IMAGPART_EXPR
495       || (TREE_CODE (ref) == COMPONENT_REF
496           && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
497     {
498       if (TREE_CODE (ref) == IMAGPART_EXPR)
499         *delta += int_size_in_bytes (TREE_TYPE (ref));
500       ref = TREE_OPERAND (ref, 0);
501     }
502 
503   *ref_p = ref;
504 
505   for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
506     {
507       off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
508       bit_offset = TREE_INT_CST_LOW (off);
509       gcc_assert (bit_offset % BITS_PER_UNIT == 0);
510 
511       *delta += bit_offset / BITS_PER_UNIT;
512     }
513 
514   *base = unshare_expr (ref);
515   ar_data.loop = loop;
516   ar_data.stmt = stmt;
517   ar_data.step = step;
518   ar_data.delta = delta;
519   return for_each_index (base, idx_analyze_ref, &ar_data);
520 }
521 
522 /* Record a memory reference REF to the list REFS.  The reference occurs in
523    LOOP in statement STMT and it is write if WRITE_P.  Returns true if the
524    reference was recorded, false otherwise.  */
525 
526 static bool
527 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
528 			      tree ref, bool write_p, gimple stmt)
529 {
530   tree base, step;
531   HOST_WIDE_INT delta;
532   struct mem_ref_group *agrp;
533 
534   if (get_base_address (ref) == NULL)
535     return false;
536 
537   if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
538     return false;
539   /* If analyze_ref fails the default is a NULL_TREE.  We can stop here.  */
540   if (step == NULL_TREE)
541     return false;
542 
543   /* Stop if the address of BASE could not be taken.  */
544   if (may_be_nonaddressable_p (base))
545     return false;
546 
547   /* Limit non-constant step prefetching only to the innermost loops and
548      only when the step is loop invariant in the entire loop nest. */
549   if (!cst_and_fits_in_hwi (step))
550     {
551       if (loop->inner != NULL)
552         {
553           if (dump_file && (dump_flags & TDF_DETAILS))
554             {
555               fprintf (dump_file, "Memory expression %p\n",(void *) ref );
556               print_generic_expr (dump_file, ref, TDF_TREE);
557               fprintf (dump_file,":");
558               dump_mem_details( dump_file, base, step, delta, write_p);
559               fprintf (dump_file,
560                        "Ignoring %p, non-constant step prefetching is "
561                        "limited to inner most loops \n",
562                        (void *) ref);
563             }
564             return false;
565          }
566       else
567         {
568           if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
569           {
570             if (dump_file && (dump_flags & TDF_DETAILS))
571               {
572                 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
573                 print_generic_expr (dump_file, ref, TDF_TREE);
574                 fprintf (dump_file,":");
575                 dump_mem_details(dump_file, base, step, delta, write_p);
576                 fprintf (dump_file,
577                          "Not prefetching, ignoring %p due to "
578                          "loop variant step\n",
579                          (void *) ref);
580               }
581               return false;
582             }
583         }
584     }
585 
586   /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
587      are integer constants.  */
588   agrp = find_or_create_group (refs, base, step);
589   record_ref (agrp, stmt, ref, delta, write_p);
590 
591   return true;
592 }
593 
594 /* Record the suitable memory references in LOOP.  NO_OTHER_REFS is set to
595    true if there are no other memory references inside the loop.  */
596 
597 static struct mem_ref_group *
598 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
599 {
600   basic_block *body = get_loop_body_in_dom_order (loop);
601   basic_block bb;
602   unsigned i;
603   gimple_stmt_iterator bsi;
604   gimple stmt;
605   tree lhs, rhs;
606   struct mem_ref_group *refs = NULL;
607 
608   *no_other_refs = true;
609   *ref_count = 0;
610 
611   /* Scan the loop body in order, so that the former references precede the
612      later ones.  */
613   for (i = 0; i < loop->num_nodes; i++)
614     {
615       bb = body[i];
616       if (bb->loop_father != loop)
617 	continue;
618 
619       for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
620 	{
621 	  stmt = gsi_stmt (bsi);
622 
623 	  if (gimple_code (stmt) != GIMPLE_ASSIGN)
624 	    {
625 	      if (gimple_vuse (stmt)
626 		  || (is_gimple_call (stmt)
627 		      && !(gimple_call_flags (stmt) & ECF_CONST)))
628 		*no_other_refs = false;
629 	      continue;
630 	    }
631 
632 	  lhs = gimple_assign_lhs (stmt);
633 	  rhs = gimple_assign_rhs1 (stmt);
634 
635 	  if (REFERENCE_CLASS_P (rhs))
636 	    {
637 	    *no_other_refs &= gather_memory_references_ref (loop, &refs,
638 							    rhs, false, stmt);
639 	    *ref_count += 1;
640 	    }
641 	  if (REFERENCE_CLASS_P (lhs))
642 	    {
643 	    *no_other_refs &= gather_memory_references_ref (loop, &refs,
644 							    lhs, true, stmt);
645 	    *ref_count += 1;
646 	    }
647 	}
648     }
649   free (body);
650 
651   return refs;
652 }
653 
654 /* Prune the prefetch candidate REF using the self-reuse.  */
655 
656 static void
657 prune_ref_by_self_reuse (struct mem_ref *ref)
658 {
659   HOST_WIDE_INT step;
660   bool backward;
661 
662   /* If the step size is non constant, we cannot calculate prefetch_mod.  */
663   if (!cst_and_fits_in_hwi (ref->group->step))
664     return;
665 
666   step = int_cst_value (ref->group->step);
667 
668   backward = step < 0;
669 
670   if (step == 0)
671     {
672       /* Prefetch references to invariant address just once.  */
673       ref->prefetch_before = 1;
674       return;
675     }
676 
677   if (backward)
678     step = -step;
679 
680   if (step > PREFETCH_BLOCK)
681     return;
682 
683   if ((backward && HAVE_BACKWARD_PREFETCH)
684       || (!backward && HAVE_FORWARD_PREFETCH))
685     {
686       ref->prefetch_before = 1;
687       return;
688     }
689 
690   ref->prefetch_mod = PREFETCH_BLOCK / step;
691 }
692 
693 /* Divides X by BY, rounding down.  */
694 
695 static HOST_WIDE_INT
696 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
697 {
698   gcc_assert (by > 0);
699 
700   if (x >= 0)
701     return x / by;
702   else
703     return (x + by - 1) / by;
704 }
705 
706 /* Given a CACHE_LINE_SIZE and two inductive memory references
707    with a common STEP greater than CACHE_LINE_SIZE and an address
708    difference DELTA, compute the probability that they will fall
709    in different cache lines.  Return true if the computed miss rate
710    is not greater than the ACCEPTABLE_MISS_RATE.  DISTINCT_ITERS is the
711    number of distinct iterations after which the pattern repeats itself.
712    ALIGN_UNIT is the unit of alignment in bytes.  */
713 
714 static bool
715 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
716 		   HOST_WIDE_INT step, HOST_WIDE_INT delta,
717 		   unsigned HOST_WIDE_INT distinct_iters,
718 		   int align_unit)
719 {
720   unsigned align, iter;
721   int total_positions, miss_positions, max_allowed_miss_positions;
722   int address1, address2, cache_line1, cache_line2;
723 
724   /* It always misses if delta is greater than or equal to the cache
725      line size.  */
726   if (delta >= (HOST_WIDE_INT) cache_line_size)
727     return false;
728 
729   miss_positions = 0;
730   total_positions = (cache_line_size / align_unit) * distinct_iters;
731   max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
732 
733   /* Iterate through all possible alignments of the first
734      memory reference within its cache line.  */
735   for (align = 0; align < cache_line_size; align += align_unit)
736 
737     /* Iterate through all distinct iterations.  */
738     for (iter = 0; iter < distinct_iters; iter++)
739       {
740 	address1 = align + step * iter;
741 	address2 = address1 + delta;
742 	cache_line1 = address1 / cache_line_size;
743 	cache_line2 = address2 / cache_line_size;
744 	if (cache_line1 != cache_line2)
745 	  {
746 	    miss_positions += 1;
747             if (miss_positions > max_allowed_miss_positions)
748 	      return false;
749           }
750       }
751   return true;
752 }
753 
754 /* Prune the prefetch candidate REF using the reuse with BY.
755    If BY_IS_BEFORE is true, BY is before REF in the loop.  */
756 
757 static void
758 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
759 			  bool by_is_before)
760 {
761   HOST_WIDE_INT step;
762   bool backward;
763   HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
764   HOST_WIDE_INT delta = delta_b - delta_r;
765   HOST_WIDE_INT hit_from;
766   unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
767   HOST_WIDE_INT reduced_step;
768   unsigned HOST_WIDE_INT reduced_prefetch_block;
769   tree ref_type;
770   int align_unit;
771 
772   /* If the step is non constant we cannot calculate prefetch_before.  */
773   if (!cst_and_fits_in_hwi (ref->group->step)) {
774     return;
775   }
776 
777   step = int_cst_value (ref->group->step);
778 
779   backward = step < 0;
780 
781 
782   if (delta == 0)
783     {
784       /* If the references has the same address, only prefetch the
785 	 former.  */
786       if (by_is_before)
787 	ref->prefetch_before = 0;
788 
789       return;
790     }
791 
792   if (!step)
793     {
794       /* If the reference addresses are invariant and fall into the
795 	 same cache line, prefetch just the first one.  */
796       if (!by_is_before)
797 	return;
798 
799       if (ddown (ref->delta, PREFETCH_BLOCK)
800 	  != ddown (by->delta, PREFETCH_BLOCK))
801 	return;
802 
803       ref->prefetch_before = 0;
804       return;
805     }
806 
807   /* Only prune the reference that is behind in the array.  */
808   if (backward)
809     {
810       if (delta > 0)
811 	return;
812 
813       /* Transform the data so that we may assume that the accesses
814 	 are forward.  */
815       delta = - delta;
816       step = -step;
817       delta_r = PREFETCH_BLOCK - 1 - delta_r;
818       delta_b = PREFETCH_BLOCK - 1 - delta_b;
819     }
820   else
821     {
822       if (delta < 0)
823 	return;
824     }
825 
826   /* Check whether the two references are likely to hit the same cache
827      line, and how distant the iterations in that it occurs are from
828      each other.  */
829 
830   if (step <= PREFETCH_BLOCK)
831     {
832       /* The accesses are sure to meet.  Let us check when.  */
833       hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
834       prefetch_before = (hit_from - delta_r + step - 1) / step;
835 
836       /* Do not reduce prefetch_before if we meet beyond cache size.  */
837       if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
838         prefetch_before = PREFETCH_ALL;
839       if (prefetch_before < ref->prefetch_before)
840 	ref->prefetch_before = prefetch_before;
841 
842       return;
843     }
844 
845   /* A more complicated case with step > prefetch_block.  First reduce
846      the ratio between the step and the cache line size to its simplest
847      terms.  The resulting denominator will then represent the number of
848      distinct iterations after which each address will go back to its
849      initial location within the cache line.  This computation assumes
850      that PREFETCH_BLOCK is a power of two.  */
851   prefetch_block = PREFETCH_BLOCK;
852   reduced_prefetch_block = prefetch_block;
853   reduced_step = step;
854   while ((reduced_step & 1) == 0
855 	 && reduced_prefetch_block > 1)
856     {
857       reduced_step >>= 1;
858       reduced_prefetch_block >>= 1;
859     }
860 
861   prefetch_before = delta / step;
862   delta %= step;
863   ref_type = TREE_TYPE (ref->mem);
864   align_unit = TYPE_ALIGN (ref_type) / 8;
865   if (is_miss_rate_acceptable (prefetch_block, step, delta,
866 			       reduced_prefetch_block, align_unit))
867     {
868       /* Do not reduce prefetch_before if we meet beyond cache size.  */
869       if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
870         prefetch_before = PREFETCH_ALL;
871       if (prefetch_before < ref->prefetch_before)
872 	ref->prefetch_before = prefetch_before;
873 
874       return;
875     }
876 
877   /* Try also the following iteration.  */
878   prefetch_before++;
879   delta = step - delta;
880   if (is_miss_rate_acceptable (prefetch_block, step, delta,
881 			       reduced_prefetch_block, align_unit))
882     {
883       if (prefetch_before < ref->prefetch_before)
884 	ref->prefetch_before = prefetch_before;
885 
886       return;
887     }
888 
889   /* The ref probably does not reuse by.  */
890   return;
891 }
892 
893 /* Prune the prefetch candidate REF using the reuses with other references
894    in REFS.  */
895 
896 static void
897 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
898 {
899   struct mem_ref *prune_by;
900   bool before = true;
901 
902   prune_ref_by_self_reuse (ref);
903 
904   for (prune_by = refs; prune_by; prune_by = prune_by->next)
905     {
906       if (prune_by == ref)
907 	{
908 	  before = false;
909 	  continue;
910 	}
911 
912       if (!WRITE_CAN_USE_READ_PREFETCH
913 	  && ref->write_p
914 	  && !prune_by->write_p)
915 	continue;
916       if (!READ_CAN_USE_WRITE_PREFETCH
917 	  && !ref->write_p
918 	  && prune_by->write_p)
919 	continue;
920 
921       prune_ref_by_group_reuse (ref, prune_by, before);
922     }
923 }
924 
925 /* Prune the prefetch candidates in GROUP using the reuse analysis.  */
926 
927 static void
928 prune_group_by_reuse (struct mem_ref_group *group)
929 {
930   struct mem_ref *ref_pruned;
931 
932   for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
933     {
934       prune_ref_by_reuse (ref_pruned, group->refs);
935 
936       if (dump_file && (dump_flags & TDF_DETAILS))
937 	{
938 	  fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
939 
940 	  if (ref_pruned->prefetch_before == PREFETCH_ALL
941 	      && ref_pruned->prefetch_mod == 1)
942 	    fprintf (dump_file, " no restrictions");
943 	  else if (ref_pruned->prefetch_before == 0)
944 	    fprintf (dump_file, " do not prefetch");
945 	  else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
946 	    fprintf (dump_file, " prefetch once");
947 	  else
948 	    {
949 	      if (ref_pruned->prefetch_before != PREFETCH_ALL)
950 		{
951 		  fprintf (dump_file, " prefetch before ");
952 		  fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
953 			   ref_pruned->prefetch_before);
954 		}
955 	      if (ref_pruned->prefetch_mod != 1)
956 		{
957 		  fprintf (dump_file, " prefetch mod ");
958 		  fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
959 			   ref_pruned->prefetch_mod);
960 		}
961 	    }
962 	  fprintf (dump_file, "\n");
963 	}
964     }
965 }
966 
967 /* Prune the list of prefetch candidates GROUPS using the reuse analysis.  */
968 
969 static void
970 prune_by_reuse (struct mem_ref_group *groups)
971 {
972   for (; groups; groups = groups->next)
973     prune_group_by_reuse (groups);
974 }
975 
976 /* Returns true if we should issue prefetch for REF.  */
977 
978 static bool
979 should_issue_prefetch_p (struct mem_ref *ref)
980 {
981   /* For now do not issue prefetches for only first few of the
982      iterations.  */
983   if (ref->prefetch_before != PREFETCH_ALL)
984     {
985       if (dump_file && (dump_flags & TDF_DETAILS))
986         fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
987 		 (void *) ref);
988       return false;
989     }
990 
991   /* Do not prefetch nontemporal stores.  */
992   if (ref->storent_p)
993     {
994       if (dump_file && (dump_flags & TDF_DETAILS))
995         fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
996       return false;
997     }
998 
999   return true;
1000 }
1001 
1002 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1003    AHEAD is the number of iterations to prefetch ahead (which corresponds
1004    to the number of simultaneous instances of one prefetch running at a
1005    time).  UNROLL_FACTOR is the factor by that the loop is going to be
1006    unrolled.  Returns true if there is anything to prefetch.  */
1007 
1008 static bool
1009 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1010 		     unsigned ahead)
1011 {
1012   unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1013   unsigned slots_per_prefetch;
1014   struct mem_ref *ref;
1015   bool any = false;
1016 
1017   /* At most SIMULTANEOUS_PREFETCHES should be running at the same time.  */
1018   remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
1019 
1020   /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1021      AHEAD / UNROLL_FACTOR iterations of the unrolled loop.  In each iteration,
1022      it will need a prefetch slot.  */
1023   slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
1024   if (dump_file && (dump_flags & TDF_DETAILS))
1025     fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1026 	     slots_per_prefetch);
1027 
1028   /* For now we just take memory references one by one and issue
1029      prefetches for as many as possible.  The groups are sorted
1030      starting with the largest step, since the references with
1031      large step are more likely to cause many cache misses.  */
1032 
1033   for (; groups; groups = groups->next)
1034     for (ref = groups->refs; ref; ref = ref->next)
1035       {
1036 	if (!should_issue_prefetch_p (ref))
1037 	  continue;
1038 
1039         /* The loop is far from being sufficiently unrolled for this
1040            prefetch.  Do not generate prefetch to avoid many redudant
1041            prefetches.  */
1042         if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1043           continue;
1044 
1045 	/* If we need to prefetch the reference each PREFETCH_MOD iterations,
1046 	   and we unroll the loop UNROLL_FACTOR times, we need to insert
1047 	   ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1048 	   iteration.  */
1049 	n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1050 			/ ref->prefetch_mod);
1051 	prefetch_slots = n_prefetches * slots_per_prefetch;
1052 
1053 	/* If more than half of the prefetches would be lost anyway, do not
1054 	   issue the prefetch.  */
1055 	if (2 * remaining_prefetch_slots < prefetch_slots)
1056 	  continue;
1057 
1058 	ref->issue_prefetch_p = true;
1059 
1060 	if (remaining_prefetch_slots <= prefetch_slots)
1061 	  return true;
1062 	remaining_prefetch_slots -= prefetch_slots;
1063 	any = true;
1064       }
1065 
1066   return any;
1067 }
1068 
1069 /* Return TRUE if no prefetch is going to be generated in the given
1070    GROUPS.  */
1071 
1072 static bool
1073 nothing_to_prefetch_p (struct mem_ref_group *groups)
1074 {
1075   struct mem_ref *ref;
1076 
1077   for (; groups; groups = groups->next)
1078     for (ref = groups->refs; ref; ref = ref->next)
1079       if (should_issue_prefetch_p (ref))
1080 	return false;
1081 
1082   return true;
1083 }
1084 
1085 /* Estimate the number of prefetches in the given GROUPS.
1086    UNROLL_FACTOR is the factor by which LOOP was unrolled.  */
1087 
1088 static int
1089 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1090 {
1091   struct mem_ref *ref;
1092   unsigned n_prefetches;
1093   int prefetch_count = 0;
1094 
1095   for (; groups; groups = groups->next)
1096     for (ref = groups->refs; ref; ref = ref->next)
1097       if (should_issue_prefetch_p (ref))
1098 	{
1099 	  n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1100 			  / ref->prefetch_mod);
1101 	  prefetch_count += n_prefetches;
1102 	}
1103 
1104   return prefetch_count;
1105 }
1106 
1107 /* Issue prefetches for the reference REF into loop as decided before.
1108    HEAD is the number of iterations to prefetch ahead.  UNROLL_FACTOR
1109    is the factor by which LOOP was unrolled.  */
1110 
1111 static void
1112 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1113 {
1114   HOST_WIDE_INT delta;
1115   tree addr, addr_base, write_p, local, forward;
1116   gimple prefetch;
1117   gimple_stmt_iterator bsi;
1118   unsigned n_prefetches, ap;
1119   bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1120 
1121   if (dump_file && (dump_flags & TDF_DETAILS))
1122     fprintf (dump_file, "Issued%s prefetch for %p.\n",
1123 	     nontemporal ? " nontemporal" : "",
1124 	     (void *) ref);
1125 
1126   bsi = gsi_for_stmt (ref->stmt);
1127 
1128   n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1129 		  / ref->prefetch_mod);
1130   addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1131   addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1132 					true, NULL, true, GSI_SAME_STMT);
1133   write_p = ref->write_p ? integer_one_node : integer_zero_node;
1134   local = nontemporal ? integer_zero_node : integer_three_node;
1135 
1136   for (ap = 0; ap < n_prefetches; ap++)
1137     {
1138       if (cst_and_fits_in_hwi (ref->group->step))
1139         {
1140           /* Determine the address to prefetch.  */
1141           delta = (ahead + ap * ref->prefetch_mod) *
1142 		   int_cst_value (ref->group->step);
1143           addr = fold_build_pointer_plus_hwi (addr_base, delta);
1144           addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1145                                            true, GSI_SAME_STMT);
1146         }
1147       else
1148         {
1149           /* The step size is non-constant but loop-invariant.  We use the
1150              heuristic to simply prefetch ahead iterations ahead.  */
1151           forward = fold_build2 (MULT_EXPR, sizetype,
1152                                  fold_convert (sizetype, ref->group->step),
1153                                  fold_convert (sizetype, size_int (ahead)));
1154           addr = fold_build_pointer_plus (addr_base, forward);
1155           addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1156 					   NULL, true, GSI_SAME_STMT);
1157       }
1158       /* Create the prefetch instruction.  */
1159       prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
1160 				    3, addr, write_p, local);
1161       gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1162     }
1163 }
1164 
1165 /* Issue prefetches for the references in GROUPS into loop as decided before.
1166    HEAD is the number of iterations to prefetch ahead.  UNROLL_FACTOR is the
1167    factor by that LOOP was unrolled.  */
1168 
1169 static void
1170 issue_prefetches (struct mem_ref_group *groups,
1171 		  unsigned unroll_factor, unsigned ahead)
1172 {
1173   struct mem_ref *ref;
1174 
1175   for (; groups; groups = groups->next)
1176     for (ref = groups->refs; ref; ref = ref->next)
1177       if (ref->issue_prefetch_p)
1178 	issue_prefetch_ref (ref, unroll_factor, ahead);
1179 }
1180 
1181 /* Returns true if REF is a memory write for that a nontemporal store insn
1182    can be used.  */
1183 
1184 static bool
1185 nontemporal_store_p (struct mem_ref *ref)
1186 {
1187   enum machine_mode mode;
1188   enum insn_code code;
1189 
1190   /* REF must be a write that is not reused.  We require it to be independent
1191      on all other memory references in the loop, as the nontemporal stores may
1192      be reordered with respect to other memory references.  */
1193   if (!ref->write_p
1194       || !ref->independent_p
1195       || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1196     return false;
1197 
1198   /* Check that we have the storent instruction for the mode.  */
1199   mode = TYPE_MODE (TREE_TYPE (ref->mem));
1200   if (mode == BLKmode)
1201     return false;
1202 
1203   code = optab_handler (storent_optab, mode);
1204   return code != CODE_FOR_nothing;
1205 }
1206 
1207 /* If REF is a nontemporal store, we mark the corresponding modify statement
1208    and return true.  Otherwise, we return false.  */
1209 
1210 static bool
1211 mark_nontemporal_store (struct mem_ref *ref)
1212 {
1213   if (!nontemporal_store_p (ref))
1214     return false;
1215 
1216   if (dump_file && (dump_flags & TDF_DETAILS))
1217     fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1218 	     (void *) ref);
1219 
1220   gimple_assign_set_nontemporal_move (ref->stmt, true);
1221   ref->storent_p = true;
1222 
1223   return true;
1224 }
1225 
1226 /* Issue a memory fence instruction after LOOP.  */
1227 
1228 static void
1229 emit_mfence_after_loop (struct loop *loop)
1230 {
1231   vec<edge> exits = get_loop_exit_edges (loop);
1232   edge exit;
1233   gimple call;
1234   gimple_stmt_iterator bsi;
1235   unsigned i;
1236 
1237   FOR_EACH_VEC_ELT (exits, i, exit)
1238     {
1239       call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1240 
1241       if (!single_pred_p (exit->dest)
1242 	  /* If possible, we prefer not to insert the fence on other paths
1243 	     in cfg.  */
1244 	  && !(exit->flags & EDGE_ABNORMAL))
1245 	split_loop_exit_edge (exit);
1246       bsi = gsi_after_labels (exit->dest);
1247 
1248       gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1249     }
1250 
1251   exits.release ();
1252   update_ssa (TODO_update_ssa_only_virtuals);
1253 }
1254 
1255 /* Returns true if we can use storent in loop, false otherwise.  */
1256 
1257 static bool
1258 may_use_storent_in_loop_p (struct loop *loop)
1259 {
1260   bool ret = true;
1261 
1262   if (loop->inner != NULL)
1263     return false;
1264 
1265   /* If we must issue a mfence insn after using storent, check that there
1266      is a suitable place for it at each of the loop exits.  */
1267   if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1268     {
1269       vec<edge> exits = get_loop_exit_edges (loop);
1270       unsigned i;
1271       edge exit;
1272 
1273       FOR_EACH_VEC_ELT (exits, i, exit)
1274 	if ((exit->flags & EDGE_ABNORMAL)
1275 	    && exit->dest == EXIT_BLOCK_PTR)
1276 	  ret = false;
1277 
1278       exits.release ();
1279     }
1280 
1281   return ret;
1282 }
1283 
1284 /* Marks nontemporal stores in LOOP.  GROUPS contains the description of memory
1285    references in the loop.  */
1286 
1287 static void
1288 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1289 {
1290   struct mem_ref *ref;
1291   bool any = false;
1292 
1293   if (!may_use_storent_in_loop_p (loop))
1294     return;
1295 
1296   for (; groups; groups = groups->next)
1297     for (ref = groups->refs; ref; ref = ref->next)
1298       any |= mark_nontemporal_store (ref);
1299 
1300   if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1301     emit_mfence_after_loop (loop);
1302 }
1303 
1304 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1305    this is the case, fill in DESC by the description of number of
1306    iterations.  */
1307 
1308 static bool
1309 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1310 		      unsigned factor)
1311 {
1312   if (!can_unroll_loop_p (loop, factor, desc))
1313     return false;
1314 
1315   /* We only consider loops without control flow for unrolling.  This is not
1316      a hard restriction -- tree_unroll_loop works with arbitrary loops
1317      as well; but the unrolling/prefetching is usually more profitable for
1318      loops consisting of a single basic block, and we want to limit the
1319      code growth.  */
1320   if (loop->num_nodes > 2)
1321     return false;
1322 
1323   return true;
1324 }
1325 
1326 /* Determine the coefficient by that unroll LOOP, from the information
1327    contained in the list of memory references REFS.  Description of
1328    umber of iterations of LOOP is stored to DESC.  NINSNS is the number of
1329    insns of the LOOP.  EST_NITER is the estimated number of iterations of
1330    the loop, or -1 if no estimate is available.  */
1331 
1332 static unsigned
1333 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1334 			 unsigned ninsns, struct tree_niter_desc *desc,
1335 			 HOST_WIDE_INT est_niter)
1336 {
1337   unsigned upper_bound;
1338   unsigned nfactor, factor, mod_constraint;
1339   struct mem_ref_group *agp;
1340   struct mem_ref *ref;
1341 
1342   /* First check whether the loop is not too large to unroll.  We ignore
1343      PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1344      from unrolling them enough to make exactly one cache line covered by each
1345      iteration.  Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1346      us from unrolling the loops too many times in cases where we only expect
1347      gains from better scheduling and decreasing loop overhead, which is not
1348      the case here.  */
1349   upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1350 
1351   /* If we unrolled the loop more times than it iterates, the unrolled version
1352      of the loop would be never entered.  */
1353   if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1354     upper_bound = est_niter;
1355 
1356   if (upper_bound <= 1)
1357     return 1;
1358 
1359   /* Choose the factor so that we may prefetch each cache just once,
1360      but bound the unrolling by UPPER_BOUND.  */
1361   factor = 1;
1362   for (agp = refs; agp; agp = agp->next)
1363     for (ref = agp->refs; ref; ref = ref->next)
1364       if (should_issue_prefetch_p (ref))
1365 	{
1366 	  mod_constraint = ref->prefetch_mod;
1367 	  nfactor = least_common_multiple (mod_constraint, factor);
1368 	  if (nfactor <= upper_bound)
1369 	    factor = nfactor;
1370 	}
1371 
1372   if (!should_unroll_loop_p (loop, desc, factor))
1373     return 1;
1374 
1375   return factor;
1376 }
1377 
1378 /* Returns the total volume of the memory references REFS, taking into account
1379    reuses in the innermost loop and cache line size.  TODO -- we should also
1380    take into account reuses across the iterations of the loops in the loop
1381    nest.  */
1382 
1383 static unsigned
1384 volume_of_references (struct mem_ref_group *refs)
1385 {
1386   unsigned volume = 0;
1387   struct mem_ref_group *gr;
1388   struct mem_ref *ref;
1389 
1390   for (gr = refs; gr; gr = gr->next)
1391     for (ref = gr->refs; ref; ref = ref->next)
1392       {
1393 	/* Almost always reuses another value?  */
1394 	if (ref->prefetch_before != PREFETCH_ALL)
1395 	  continue;
1396 
1397 	/* If several iterations access the same cache line, use the size of
1398 	   the line divided by this number.  Otherwise, a cache line is
1399 	   accessed in each iteration.  TODO -- in the latter case, we should
1400 	   take the size of the reference into account, rounding it up on cache
1401 	   line size multiple.  */
1402 	volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1403       }
1404   return volume;
1405 }
1406 
1407 /* Returns the volume of memory references accessed across VEC iterations of
1408    loops, whose sizes are described in the LOOP_SIZES array.  N is the number
1409    of the loops in the nest (length of VEC and LOOP_SIZES vectors).  */
1410 
1411 static unsigned
1412 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1413 {
1414   unsigned i;
1415 
1416   for (i = 0; i < n; i++)
1417     if (vec[i] != 0)
1418       break;
1419 
1420   if (i == n)
1421     return 0;
1422 
1423   gcc_assert (vec[i] > 0);
1424 
1425   /* We ignore the parts of the distance vector in subloops, since usually
1426      the numbers of iterations are much smaller.  */
1427   return loop_sizes[i] * vec[i];
1428 }
1429 
1430 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1431    at the position corresponding to the loop of the step.  N is the depth
1432    of the considered loop nest, and, LOOP is its innermost loop.  */
1433 
1434 static void
1435 add_subscript_strides (tree access_fn, unsigned stride,
1436 		       HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1437 {
1438   struct loop *aloop;
1439   tree step;
1440   HOST_WIDE_INT astep;
1441   unsigned min_depth = loop_depth (loop) - n;
1442 
1443   while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1444     {
1445       aloop = get_chrec_loop (access_fn);
1446       step = CHREC_RIGHT (access_fn);
1447       access_fn = CHREC_LEFT (access_fn);
1448 
1449       if ((unsigned) loop_depth (aloop) <= min_depth)
1450 	continue;
1451 
1452       if (host_integerp (step, 0))
1453 	astep = tree_low_cst (step, 0);
1454       else
1455 	astep = L1_CACHE_LINE_SIZE;
1456 
1457       strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1458 
1459     }
1460 }
1461 
1462 /* Returns the volume of memory references accessed between two consecutive
1463    self-reuses of the reference DR.  We consider the subscripts of DR in N
1464    loops, and LOOP_SIZES contains the volumes of accesses in each of the
1465    loops.  LOOP is the innermost loop of the current loop nest.  */
1466 
1467 static unsigned
1468 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1469 		     struct loop *loop)
1470 {
1471   tree stride, access_fn;
1472   HOST_WIDE_INT *strides, astride;
1473   vec<tree> access_fns;
1474   tree ref = DR_REF (dr);
1475   unsigned i, ret = ~0u;
1476 
1477   /* In the following example:
1478 
1479      for (i = 0; i < N; i++)
1480        for (j = 0; j < N; j++)
1481          use (a[j][i]);
1482      the same cache line is accessed each N steps (except if the change from
1483      i to i + 1 crosses the boundary of the cache line).  Thus, for self-reuse,
1484      we cannot rely purely on the results of the data dependence analysis.
1485 
1486      Instead, we compute the stride of the reference in each loop, and consider
1487      the innermost loop in that the stride is less than cache size.  */
1488 
1489   strides = XCNEWVEC (HOST_WIDE_INT, n);
1490   access_fns = DR_ACCESS_FNS (dr);
1491 
1492   FOR_EACH_VEC_ELT (access_fns, i, access_fn)
1493     {
1494       /* Keep track of the reference corresponding to the subscript, so that we
1495 	 know its stride.  */
1496       while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1497 	ref = TREE_OPERAND (ref, 0);
1498 
1499       if (TREE_CODE (ref) == ARRAY_REF)
1500 	{
1501 	  stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1502 	  if (host_integerp (stride, 1))
1503 	    astride = tree_low_cst (stride, 1);
1504 	  else
1505 	    astride = L1_CACHE_LINE_SIZE;
1506 
1507 	  ref = TREE_OPERAND (ref, 0);
1508 	}
1509       else
1510 	astride = 1;
1511 
1512       add_subscript_strides (access_fn, astride, strides, n, loop);
1513     }
1514 
1515   for (i = n; i-- > 0; )
1516     {
1517       unsigned HOST_WIDE_INT s;
1518 
1519       s = strides[i] < 0 ?  -strides[i] : strides[i];
1520 
1521       if (s < (unsigned) L1_CACHE_LINE_SIZE
1522 	  && (loop_sizes[i]
1523 	      > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1524 	{
1525 	  ret = loop_sizes[i];
1526 	  break;
1527 	}
1528     }
1529 
1530   free (strides);
1531   return ret;
1532 }
1533 
1534 /* Determines the distance till the first reuse of each reference in REFS
1535    in the loop nest of LOOP.  NO_OTHER_REFS is true if there are no other
1536    memory references in the loop.  Return false if the analysis fails.  */
1537 
1538 static bool
1539 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1540 			   bool no_other_refs)
1541 {
1542   struct loop *nest, *aloop;
1543   vec<data_reference_p> datarefs = vNULL;
1544   vec<ddr_p> dependences = vNULL;
1545   struct mem_ref_group *gr;
1546   struct mem_ref *ref, *refb;
1547   vec<loop_p> vloops = vNULL;
1548   unsigned *loop_data_size;
1549   unsigned i, j, n;
1550   unsigned volume, dist, adist;
1551   HOST_WIDE_INT vol;
1552   data_reference_p dr;
1553   ddr_p dep;
1554 
1555   if (loop->inner)
1556     return true;
1557 
1558   /* Find the outermost loop of the loop nest of loop (we require that
1559      there are no sibling loops inside the nest).  */
1560   nest = loop;
1561   while (1)
1562     {
1563       aloop = loop_outer (nest);
1564 
1565       if (aloop == current_loops->tree_root
1566 	  || aloop->inner->next)
1567 	break;
1568 
1569       nest = aloop;
1570     }
1571 
1572   /* For each loop, determine the amount of data accessed in each iteration.
1573      We use this to estimate whether the reference is evicted from the
1574      cache before its reuse.  */
1575   find_loop_nest (nest, &vloops);
1576   n = vloops.length ();
1577   loop_data_size = XNEWVEC (unsigned, n);
1578   volume = volume_of_references (refs);
1579   i = n;
1580   while (i-- != 0)
1581     {
1582       loop_data_size[i] = volume;
1583       /* Bound the volume by the L2 cache size, since above this bound,
1584 	 all dependence distances are equivalent.  */
1585       if (volume > L2_CACHE_SIZE_BYTES)
1586 	continue;
1587 
1588       aloop = vloops[i];
1589       vol = estimated_stmt_executions_int (aloop);
1590       if (vol == -1)
1591 	vol = expected_loop_iterations (aloop);
1592       volume *= vol;
1593     }
1594 
1595   /* Prepare the references in the form suitable for data dependence
1596      analysis.  We ignore unanalyzable data references (the results
1597      are used just as a heuristics to estimate temporality of the
1598      references, hence we do not need to worry about correctness).  */
1599   for (gr = refs; gr; gr = gr->next)
1600     for (ref = gr->refs; ref; ref = ref->next)
1601       {
1602 	dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
1603 			      ref->mem, ref->stmt, !ref->write_p);
1604 
1605 	if (dr)
1606 	  {
1607 	    ref->reuse_distance = volume;
1608 	    dr->aux = ref;
1609 	    datarefs.safe_push (dr);
1610 	  }
1611 	else
1612 	  no_other_refs = false;
1613       }
1614 
1615   FOR_EACH_VEC_ELT (datarefs, i, dr)
1616     {
1617       dist = self_reuse_distance (dr, loop_data_size, n, loop);
1618       ref = (struct mem_ref *) dr->aux;
1619       if (ref->reuse_distance > dist)
1620 	ref->reuse_distance = dist;
1621 
1622       if (no_other_refs)
1623 	ref->independent_p = true;
1624     }
1625 
1626   if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1627     return false;
1628 
1629   FOR_EACH_VEC_ELT (dependences, i, dep)
1630     {
1631       if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1632 	continue;
1633 
1634       ref = (struct mem_ref *) DDR_A (dep)->aux;
1635       refb = (struct mem_ref *) DDR_B (dep)->aux;
1636 
1637       if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1638 	  || DDR_NUM_DIST_VECTS (dep) == 0)
1639 	{
1640 	  /* If the dependence cannot be analyzed, assume that there might be
1641 	     a reuse.  */
1642 	  dist = 0;
1643 
1644 	  ref->independent_p = false;
1645 	  refb->independent_p = false;
1646 	}
1647       else
1648 	{
1649 	  /* The distance vectors are normalized to be always lexicographically
1650 	     positive, hence we cannot tell just from them whether DDR_A comes
1651 	     before DDR_B or vice versa.  However, it is not important,
1652 	     anyway -- if DDR_A is close to DDR_B, then it is either reused in
1653 	     DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1654 	     in cache (and marking it as nontemporal would not affect
1655 	     anything).  */
1656 
1657 	  dist = volume;
1658 	  for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1659 	    {
1660 	      adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1661 					     loop_data_size, n);
1662 
1663 	      /* If this is a dependence in the innermost loop (i.e., the
1664 		 distances in all superloops are zero) and it is not
1665 		 the trivial self-dependence with distance zero, record that
1666 		 the references are not completely independent.  */
1667 	      if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1668 		  && (ref != refb
1669 		      || DDR_DIST_VECT (dep, j)[n-1] != 0))
1670 		{
1671 		  ref->independent_p = false;
1672 		  refb->independent_p = false;
1673 		}
1674 
1675 	      /* Ignore accesses closer than
1676 		 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1677 	      	 so that we use nontemporal prefetches e.g. if single memory
1678 		 location is accessed several times in a single iteration of
1679 		 the loop.  */
1680 	      if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1681 		continue;
1682 
1683 	      if (adist < dist)
1684 		dist = adist;
1685 	    }
1686 	}
1687 
1688       if (ref->reuse_distance > dist)
1689 	ref->reuse_distance = dist;
1690       if (refb->reuse_distance > dist)
1691 	refb->reuse_distance = dist;
1692     }
1693 
1694   free_dependence_relations (dependences);
1695   free_data_refs (datarefs);
1696   free (loop_data_size);
1697 
1698   if (dump_file && (dump_flags & TDF_DETAILS))
1699     {
1700       fprintf (dump_file, "Reuse distances:\n");
1701       for (gr = refs; gr; gr = gr->next)
1702 	for (ref = gr->refs; ref; ref = ref->next)
1703 	  fprintf (dump_file, " ref %p distance %u\n",
1704 		   (void *) ref, ref->reuse_distance);
1705     }
1706 
1707   return true;
1708 }
1709 
1710 /* Determine whether or not the trip count to ahead ratio is too small based
1711    on prefitablility consideration.
1712    AHEAD: the iteration ahead distance,
1713    EST_NITER: the estimated trip count.  */
1714 
1715 static bool
1716 trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1717 {
1718   /* Assume trip count to ahead ratio is big enough if the trip count could not
1719      be estimated at compile time.  */
1720   if (est_niter < 0)
1721     return false;
1722 
1723   if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1724     {
1725       if (dump_file && (dump_flags & TDF_DETAILS))
1726 	fprintf (dump_file,
1727 		 "Not prefetching -- loop estimated to roll only %d times\n",
1728 		 (int) est_niter);
1729       return true;
1730     }
1731 
1732   return false;
1733 }
1734 
1735 /* Determine whether or not the number of memory references in the loop is
1736    reasonable based on the profitablity and compilation time considerations.
1737    NINSNS: estimated number of instructions in the loop,
1738    MEM_REF_COUNT: total number of memory references in the loop.  */
1739 
1740 static bool
1741 mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
1742 {
1743   int insn_to_mem_ratio;
1744 
1745   if (mem_ref_count == 0)
1746     return false;
1747 
1748   /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1749      (compute_all_dependences) have high costs based on quadratic complexity.
1750      To avoid huge compilation time, we give up prefetching if mem_ref_count
1751      is too large.  */
1752   if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1753     return false;
1754 
1755   /* Prefetching improves performance by overlapping cache missing
1756      memory accesses with CPU operations.  If the loop does not have
1757      enough CPU operations to overlap with memory operations, prefetching
1758      won't give a significant benefit.  One approximate way of checking
1759      this is to require the ratio of instructions to memory references to
1760      be above a certain limit.  This approximation works well in practice.
1761      TODO: Implement a more precise computation by estimating the time
1762      for each CPU or memory op in the loop. Time estimates for memory ops
1763      should account for cache misses.  */
1764   insn_to_mem_ratio = ninsns / mem_ref_count;
1765 
1766   if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1767     {
1768       if (dump_file && (dump_flags & TDF_DETAILS))
1769         fprintf (dump_file,
1770 		 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1771 		 insn_to_mem_ratio);
1772       return false;
1773     }
1774 
1775   return true;
1776 }
1777 
1778 /* Determine whether or not the instruction to prefetch ratio in the loop is
1779    too small based on the profitablity consideration.
1780    NINSNS: estimated number of instructions in the loop,
1781    PREFETCH_COUNT: an estimate of the number of prefetches,
1782    UNROLL_FACTOR:  the factor to unroll the loop if prefetching.  */
1783 
1784 static bool
1785 insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1786                                      unsigned unroll_factor)
1787 {
1788   int insn_to_prefetch_ratio;
1789 
1790   /* Prefetching most likely causes performance degradation when the instruction
1791      to prefetch ratio is too small.  Too many prefetch instructions in a loop
1792      may reduce the I-cache performance.
1793      (unroll_factor * ninsns) is used to estimate the number of instructions in
1794      the unrolled loop.  This implementation is a bit simplistic -- the number
1795      of issued prefetch instructions is also affected by unrolling.  So,
1796      prefetch_mod and the unroll factor should be taken into account when
1797      determining prefetch_count.  Also, the number of insns of the unrolled
1798      loop will usually be significantly smaller than the number of insns of the
1799      original loop * unroll_factor (at least the induction variable increases
1800      and the exit branches will get eliminated), so it might be better to use
1801      tree_estimate_loop_size + estimated_unrolled_size.  */
1802   insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1803   if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1804     {
1805       if (dump_file && (dump_flags & TDF_DETAILS))
1806         fprintf (dump_file,
1807 		 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1808 		 insn_to_prefetch_ratio);
1809       return true;
1810     }
1811 
1812   return false;
1813 }
1814 
1815 
1816 /* Issue prefetch instructions for array references in LOOP.  Returns
1817    true if the LOOP was unrolled.  */
1818 
1819 static bool
1820 loop_prefetch_arrays (struct loop *loop)
1821 {
1822   struct mem_ref_group *refs;
1823   unsigned ahead, ninsns, time, unroll_factor;
1824   HOST_WIDE_INT est_niter;
1825   struct tree_niter_desc desc;
1826   bool unrolled = false, no_other_refs;
1827   unsigned prefetch_count;
1828   unsigned mem_ref_count;
1829 
1830   if (optimize_loop_nest_for_size_p (loop))
1831     {
1832       if (dump_file && (dump_flags & TDF_DETAILS))
1833 	fprintf (dump_file, "  ignored (cold area)\n");
1834       return false;
1835     }
1836 
1837   /* FIXME: the time should be weighted by the probabilities of the blocks in
1838      the loop body.  */
1839   time = tree_num_loop_insns (loop, &eni_time_weights);
1840   if (time == 0)
1841     return false;
1842 
1843   ahead = (PREFETCH_LATENCY + time - 1) / time;
1844   est_niter = estimated_stmt_executions_int (loop);
1845   if (est_niter == -1)
1846     est_niter = max_stmt_executions_int (loop);
1847 
1848   /* Prefetching is not likely to be profitable if the trip count to ahead
1849      ratio is too small.  */
1850   if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1851     return false;
1852 
1853   ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1854 
1855   /* Step 1: gather the memory references.  */
1856   refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1857 
1858   /* Give up prefetching if the number of memory references in the
1859      loop is not reasonable based on profitablity and compilation time
1860      considerations.  */
1861   if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1862     goto fail;
1863 
1864   /* Step 2: estimate the reuse effects.  */
1865   prune_by_reuse (refs);
1866 
1867   if (nothing_to_prefetch_p (refs))
1868     goto fail;
1869 
1870   if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1871     goto fail;
1872 
1873   /* Step 3: determine unroll factor.  */
1874   unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1875 					   est_niter);
1876 
1877   /* Estimate prefetch count for the unrolled loop.  */
1878   prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1879   if (prefetch_count == 0)
1880     goto fail;
1881 
1882   if (dump_file && (dump_flags & TDF_DETAILS))
1883     fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1884 	     HOST_WIDE_INT_PRINT_DEC "\n"
1885 	     "insn count %d, mem ref count %d, prefetch count %d\n",
1886 	     ahead, unroll_factor, est_niter,
1887 	     ninsns, mem_ref_count, prefetch_count);
1888 
1889   /* Prefetching is not likely to be profitable if the instruction to prefetch
1890      ratio is too small.  */
1891   if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1892 					  unroll_factor))
1893     goto fail;
1894 
1895   mark_nontemporal_stores (loop, refs);
1896 
1897   /* Step 4: what to prefetch?  */
1898   if (!schedule_prefetches (refs, unroll_factor, ahead))
1899     goto fail;
1900 
1901   /* Step 5: unroll the loop.  TODO -- peeling of first and last few
1902      iterations so that we do not issue superfluous prefetches.  */
1903   if (unroll_factor != 1)
1904     {
1905       tree_unroll_loop (loop, unroll_factor,
1906 			single_dom_exit (loop), &desc);
1907       unrolled = true;
1908     }
1909 
1910   /* Step 6: issue the prefetches.  */
1911   issue_prefetches (refs, unroll_factor, ahead);
1912 
1913 fail:
1914   release_mem_refs (refs);
1915   return unrolled;
1916 }
1917 
1918 /* Issue prefetch instructions for array references in loops.  */
1919 
1920 unsigned int
1921 tree_ssa_prefetch_arrays (void)
1922 {
1923   loop_iterator li;
1924   struct loop *loop;
1925   bool unrolled = false;
1926   int todo_flags = 0;
1927 
1928   if (!HAVE_prefetch
1929       /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1930 	 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1931 	 of processor costs and i486 does not have prefetch, but
1932 	 -march=pentium4 causes HAVE_prefetch to be true.  Ugh.  */
1933       || PREFETCH_BLOCK == 0)
1934     return 0;
1935 
1936   if (dump_file && (dump_flags & TDF_DETAILS))
1937     {
1938       fprintf (dump_file, "Prefetching parameters:\n");
1939       fprintf (dump_file, "    simultaneous prefetches: %d\n",
1940 	       SIMULTANEOUS_PREFETCHES);
1941       fprintf (dump_file, "    prefetch latency: %d\n", PREFETCH_LATENCY);
1942       fprintf (dump_file, "    prefetch block size: %d\n", PREFETCH_BLOCK);
1943       fprintf (dump_file, "    L1 cache size: %d lines, %d kB\n",
1944 	       L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1945       fprintf (dump_file, "    L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1946       fprintf (dump_file, "    L2 cache size: %d kB\n", L2_CACHE_SIZE);
1947       fprintf (dump_file, "    min insn-to-prefetch ratio: %d \n",
1948 	       MIN_INSN_TO_PREFETCH_RATIO);
1949       fprintf (dump_file, "    min insn-to-mem ratio: %d \n",
1950 	       PREFETCH_MIN_INSN_TO_MEM_RATIO);
1951       fprintf (dump_file, "\n");
1952     }
1953 
1954   initialize_original_copy_tables ();
1955 
1956   if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
1957     {
1958       tree type = build_function_type_list (void_type_node,
1959 					    const_ptr_type_node, NULL_TREE);
1960       tree decl = add_builtin_function ("__builtin_prefetch", type,
1961 					BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1962 					NULL, NULL_TREE);
1963       DECL_IS_NOVOPS (decl) = true;
1964       set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
1965     }
1966 
1967   /* We assume that size of cache line is a power of two, so verify this
1968      here.  */
1969   gcc_assert ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) == 0);
1970 
1971   FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
1972     {
1973       if (dump_file && (dump_flags & TDF_DETAILS))
1974 	fprintf (dump_file, "Processing loop %d:\n", loop->num);
1975 
1976       unrolled |= loop_prefetch_arrays (loop);
1977 
1978       if (dump_file && (dump_flags & TDF_DETAILS))
1979 	fprintf (dump_file, "\n\n");
1980     }
1981 
1982   if (unrolled)
1983     {
1984       scev_reset ();
1985       todo_flags |= TODO_cleanup_cfg;
1986     }
1987 
1988   free_original_copy_tables ();
1989   return todo_flags;
1990 }
1991