xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-loop-prefetch.c (revision 404ee5b9334f618040b6cdef96a0ff35a6fc4636)
1 /* Array prefetching.
2    Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "tree-pass.h"
30 #include "gimple-ssa.h"
31 #include "optabs-query.h"
32 #include "tree-pretty-print.h"
33 #include "fold-const.h"
34 #include "stor-layout.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "gimplify-me.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-manip.h"
40 #include "tree-ssa-loop-niter.h"
41 #include "tree-ssa-loop.h"
42 #include "ssa.h"
43 #include "tree-into-ssa.h"
44 #include "cfgloop.h"
45 #include "tree-scalar-evolution.h"
46 #include "params.h"
47 #include "langhooks.h"
48 #include "tree-inline.h"
49 #include "tree-data-ref.h"
50 #include "diagnostic-core.h"
51 
52 /* This pass inserts prefetch instructions to optimize cache usage during
53    accesses to arrays in loops.  It processes loops sequentially and:
54 
55    1) Gathers all memory references in the single loop.
56    2) For each of the references it decides when it is profitable to prefetch
57       it.  To do it, we evaluate the reuse among the accesses, and determines
58       two values: PREFETCH_BEFORE (meaning that it only makes sense to do
59       prefetching in the first PREFETCH_BEFORE iterations of the loop) and
60       PREFETCH_MOD (meaning that it only makes sense to prefetch in the
61       iterations of the loop that are zero modulo PREFETCH_MOD).  For example
62       (assuming cache line size is 64 bytes, char has size 1 byte and there
63       is no hardware sequential prefetch):
64 
65       char *a;
66       for (i = 0; i < max; i++)
67 	{
68 	  a[255] = ...;		(0)
69 	  a[i] = ...;		(1)
70 	  a[i + 64] = ...;	(2)
71 	  a[16*i] = ...;	(3)
72 	  a[187*i] = ...;	(4)
73 	  a[187*i + 50] = ...;	(5)
74 	}
75 
76        (0) obviously has PREFETCH_BEFORE 1
77        (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
78            location 64 iterations before it, and PREFETCH_MOD 64 (since
79 	   it hits the same cache line otherwise).
80        (2) has PREFETCH_MOD 64
81        (3) has PREFETCH_MOD 4
82        (4) has PREFETCH_MOD 1.  We do not set PREFETCH_BEFORE here, since
83            the cache line accessed by (5) is the same with probability only
84 	   7/32.
85        (5) has PREFETCH_MOD 1 as well.
86 
87       Additionally, we use data dependence analysis to determine for each
88       reference the distance till the first reuse; this information is used
89       to determine the temporality of the issued prefetch instruction.
90 
91    3) We determine how much ahead we need to prefetch.  The number of
92       iterations needed is time to fetch / time spent in one iteration of
93       the loop.  The problem is that we do not know either of these values,
94       so we just make a heuristic guess based on a magic (possibly)
95       target-specific constant and size of the loop.
96 
97    4) Determine which of the references we prefetch.  We take into account
98       that there is a maximum number of simultaneous prefetches (provided
99       by machine description).  We prefetch as many prefetches as possible
100       while still within this bound (starting with those with lowest
101       prefetch_mod, since they are responsible for most of the cache
102       misses).
103 
104    5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
105       and PREFETCH_BEFORE requirements (within some bounds), and to avoid
106       prefetching nonaccessed memory.
107       TODO -- actually implement peeling.
108 
109    6) We actually emit the prefetch instructions.  ??? Perhaps emit the
110       prefetch instructions with guards in cases where 5) was not sufficient
111       to satisfy the constraints?
112 
113    A cost model is implemented to determine whether or not prefetching is
114    profitable for a given loop.  The cost model has three heuristics:
115 
116    1. Function trip_count_to_ahead_ratio_too_small_p implements a
117       heuristic that determines whether or not the loop has too few
118       iterations (compared to ahead).  Prefetching is not likely to be
119       beneficial if the trip count to ahead ratio is below a certain
120       minimum.
121 
122    2. Function mem_ref_count_reasonable_p implements a heuristic that
123       determines whether the given loop has enough CPU ops that can be
124       overlapped with cache missing memory ops.  If not, the loop
125       won't benefit from prefetching.  In the implementation,
126       prefetching is not considered beneficial if the ratio between
127       the instruction count and the mem ref count is below a certain
128       minimum.
129 
130    3. Function insn_to_prefetch_ratio_too_small_p implements a
131       heuristic that disables prefetching in a loop if the prefetching
132       cost is above a certain limit.  The relative prefetching cost is
133       estimated by taking the ratio between the prefetch count and the
134       total intruction count (this models the I-cache cost).
135 
136    The limits used in these heuristics are defined as parameters with
137    reasonable default values. Machine-specific default values will be
138    added later.
139 
140    Some other TODO:
141       -- write and use more general reuse analysis (that could be also used
142 	 in other cache aimed loop optimizations)
143       -- make it behave sanely together with the prefetches given by user
144 	 (now we just ignore them; at the very least we should avoid
145 	 optimizing loops in that user put his own prefetches)
146       -- we assume cache line size alignment of arrays; this could be
147 	 improved.  */
148 
149 /* Magic constants follow.  These should be replaced by machine specific
150    numbers.  */
151 
152 /* True if write can be prefetched by a read prefetch.  */
153 
154 #ifndef WRITE_CAN_USE_READ_PREFETCH
155 #define WRITE_CAN_USE_READ_PREFETCH 1
156 #endif
157 
158 /* True if read can be prefetched by a write prefetch. */
159 
160 #ifndef READ_CAN_USE_WRITE_PREFETCH
161 #define READ_CAN_USE_WRITE_PREFETCH 0
162 #endif
163 
164 /* The size of the block loaded by a single prefetch.  Usually, this is
165    the same as cache line size (at the moment, we only consider one level
166    of cache hierarchy).  */
167 
168 #ifndef PREFETCH_BLOCK
169 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
170 #endif
171 
172 /* Do we have a forward hardware sequential prefetching?  */
173 
174 #ifndef HAVE_FORWARD_PREFETCH
175 #define HAVE_FORWARD_PREFETCH 0
176 #endif
177 
178 /* Do we have a backward hardware sequential prefetching?  */
179 
180 #ifndef HAVE_BACKWARD_PREFETCH
181 #define HAVE_BACKWARD_PREFETCH 0
182 #endif
183 
184 /* In some cases we are only able to determine that there is a certain
185    probability that the two accesses hit the same cache line.  In this
186    case, we issue the prefetches for both of them if this probability
187    is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand.  */
188 
189 #ifndef ACCEPTABLE_MISS_RATE
190 #define ACCEPTABLE_MISS_RATE 50
191 #endif
192 
193 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
194 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
195 
196 /* We consider a memory access nontemporal if it is not reused sooner than
197    after L2_CACHE_SIZE_BYTES of memory are accessed.  However, we ignore
198    accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
199    so that we use nontemporal prefetches e.g. if single memory location
200    is accessed several times in a single iteration of the loop.  */
201 #define NONTEMPORAL_FRACTION 16
202 
203 /* In case we have to emit a memory fence instruction after the loop that
204    uses nontemporal stores, this defines the builtin to use.  */
205 
206 #ifndef FENCE_FOLLOWING_MOVNT
207 #define FENCE_FOLLOWING_MOVNT NULL_TREE
208 #endif
209 
210 /* It is not profitable to prefetch when the trip count is not at
211    least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
212    For example, in a loop with a prefetch ahead distance of 10,
213    supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
214    profitable to prefetch when the trip count is greater or equal to
215    40.  In that case, 30 out of the 40 iterations will benefit from
216    prefetching.  */
217 
218 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
219 #define TRIP_COUNT_TO_AHEAD_RATIO 4
220 #endif
221 
222 /* The group of references between that reuse may occur.  */
223 
224 struct mem_ref_group
225 {
226   tree base;			/* Base of the reference.  */
227   tree step;			/* Step of the reference.  */
228   struct mem_ref *refs;		/* References in the group.  */
229   struct mem_ref_group *next;	/* Next group of references.  */
230 };
231 
232 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched.  */
233 
234 #define PREFETCH_ALL		HOST_WIDE_INT_M1U
235 
236 /* Do not generate a prefetch if the unroll factor is significantly less
237    than what is required by the prefetch.  This is to avoid redundant
238    prefetches.  For example, when prefetch_mod is 16 and unroll_factor is
239    2, prefetching requires unrolling the loop 16 times, but
240    the loop is actually unrolled twice.  In this case (ratio = 8),
241    prefetching is not likely to be beneficial.  */
242 
243 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
244 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
245 #endif
246 
247 /* Some of the prefetch computations have quadratic complexity.  We want to
248    avoid huge compile times and, therefore, want to limit the amount of
249    memory references per loop where we consider prefetching.  */
250 
251 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
252 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
253 #endif
254 
255 /* The memory reference.  */
256 
257 struct mem_ref
258 {
259   gimple *stmt;			/* Statement in that the reference appears.  */
260   tree mem;			/* The reference.  */
261   HOST_WIDE_INT delta;		/* Constant offset of the reference.  */
262   struct mem_ref_group *group;	/* The group of references it belongs to.  */
263   unsigned HOST_WIDE_INT prefetch_mod;
264 				/* Prefetch only each PREFETCH_MOD-th
265 				   iteration.  */
266   unsigned HOST_WIDE_INT prefetch_before;
267 				/* Prefetch only first PREFETCH_BEFORE
268 				   iterations.  */
269   unsigned reuse_distance;	/* The amount of data accessed before the first
270 				   reuse of this value.  */
271   struct mem_ref *next;		/* The next reference in the group.  */
272   unsigned write_p : 1;		/* Is it a write?  */
273   unsigned independent_p : 1;	/* True if the reference is independent on
274 				   all other references inside the loop.  */
275   unsigned issue_prefetch_p : 1;	/* Should we really issue the prefetch?  */
276   unsigned storent_p : 1;	/* True if we changed the store to a
277 				   nontemporal one.  */
278 };
279 
280 /* Dumps information about memory reference */
281 static void
282 dump_mem_details (FILE *file, tree base, tree step,
283 	    HOST_WIDE_INT delta, bool write_p)
284 {
285   fprintf (file, "(base ");
286   print_generic_expr (file, base, TDF_SLIM);
287   fprintf (file, ", step ");
288   if (cst_and_fits_in_hwi (step))
289     fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
290   else
291     print_generic_expr (file, step, TDF_TREE);
292   fprintf (file, ")\n");
293   fprintf (file, "  delta ");
294   fprintf (file, HOST_WIDE_INT_PRINT_DEC, delta);
295   fprintf (file, "\n");
296   fprintf (file, "  %s\n", write_p ? "write" : "read");
297   fprintf (file, "\n");
298 }
299 
300 /* Dumps information about reference REF to FILE.  */
301 
302 static void
303 dump_mem_ref (FILE *file, struct mem_ref *ref)
304 {
305   fprintf (file, "Reference %p:\n", (void *) ref);
306 
307   fprintf (file, "  group %p ", (void *) ref->group);
308 
309   dump_mem_details (file, ref->group->base, ref->group->step, ref->delta,
310                    ref->write_p);
311 }
312 
313 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
314    exist.  */
315 
316 static struct mem_ref_group *
317 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
318 {
319   struct mem_ref_group *group;
320 
321   for (; *groups; groups = &(*groups)->next)
322     {
323       if (operand_equal_p ((*groups)->step, step, 0)
324 	  && operand_equal_p ((*groups)->base, base, 0))
325 	return *groups;
326 
327       /* If step is an integer constant, keep the list of groups sorted
328          by decreasing step.  */
329       if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
330 	  && int_cst_value ((*groups)->step) < int_cst_value (step))
331 	break;
332     }
333 
334   group = XNEW (struct mem_ref_group);
335   group->base = base;
336   group->step = step;
337   group->refs = NULL;
338   group->next = *groups;
339   *groups = group;
340 
341   return group;
342 }
343 
344 /* Records a memory reference MEM in GROUP with offset DELTA and write status
345    WRITE_P.  The reference occurs in statement STMT.  */
346 
347 static void
348 record_ref (struct mem_ref_group *group, gimple *stmt, tree mem,
349 	    HOST_WIDE_INT delta, bool write_p)
350 {
351   struct mem_ref **aref;
352 
353   /* Do not record the same address twice.  */
354   for (aref = &group->refs; *aref; aref = &(*aref)->next)
355     {
356       /* It does not have to be possible for write reference to reuse the read
357 	 prefetch, or vice versa.  */
358       if (!WRITE_CAN_USE_READ_PREFETCH
359 	  && write_p
360 	  && !(*aref)->write_p)
361 	continue;
362       if (!READ_CAN_USE_WRITE_PREFETCH
363 	  && !write_p
364 	  && (*aref)->write_p)
365 	continue;
366 
367       if ((*aref)->delta == delta)
368 	return;
369     }
370 
371   (*aref) = XNEW (struct mem_ref);
372   (*aref)->stmt = stmt;
373   (*aref)->mem = mem;
374   (*aref)->delta = delta;
375   (*aref)->write_p = write_p;
376   (*aref)->prefetch_before = PREFETCH_ALL;
377   (*aref)->prefetch_mod = 1;
378   (*aref)->reuse_distance = 0;
379   (*aref)->issue_prefetch_p = false;
380   (*aref)->group = group;
381   (*aref)->next = NULL;
382   (*aref)->independent_p = false;
383   (*aref)->storent_p = false;
384 
385   if (dump_file && (dump_flags & TDF_DETAILS))
386     dump_mem_ref (dump_file, *aref);
387 }
388 
389 /* Release memory references in GROUPS.  */
390 
391 static void
392 release_mem_refs (struct mem_ref_group *groups)
393 {
394   struct mem_ref_group *next_g;
395   struct mem_ref *ref, *next_r;
396 
397   for (; groups; groups = next_g)
398     {
399       next_g = groups->next;
400       for (ref = groups->refs; ref; ref = next_r)
401 	{
402 	  next_r = ref->next;
403 	  free (ref);
404 	}
405       free (groups);
406     }
407 }
408 
409 /* A structure used to pass arguments to idx_analyze_ref.  */
410 
411 struct ar_data
412 {
413   struct loop *loop;			/* Loop of the reference.  */
414   gimple *stmt;				/* Statement of the reference.  */
415   tree *step;				/* Step of the memory reference.  */
416   HOST_WIDE_INT *delta;			/* Offset of the memory reference.  */
417 };
418 
419 /* Analyzes a single INDEX of a memory reference to obtain information
420    described at analyze_ref.  Callback for for_each_index.  */
421 
422 static bool
423 idx_analyze_ref (tree base, tree *index, void *data)
424 {
425   struct ar_data *ar_data = (struct ar_data *) data;
426   tree ibase, step, stepsize;
427   HOST_WIDE_INT idelta = 0, imult = 1;
428   affine_iv iv;
429 
430   if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
431 		  *index, &iv, true))
432     return false;
433   ibase = iv.base;
434   step = iv.step;
435 
436   if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
437       && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
438     {
439       idelta = int_cst_value (TREE_OPERAND (ibase, 1));
440       ibase = TREE_OPERAND (ibase, 0);
441     }
442   if (cst_and_fits_in_hwi (ibase))
443     {
444       idelta += int_cst_value (ibase);
445       ibase = build_int_cst (TREE_TYPE (ibase), 0);
446     }
447 
448   if (TREE_CODE (base) == ARRAY_REF)
449     {
450       stepsize = array_ref_element_size (base);
451       if (!cst_and_fits_in_hwi (stepsize))
452 	return false;
453       imult = int_cst_value (stepsize);
454       step = fold_build2 (MULT_EXPR, sizetype,
455 			  fold_convert (sizetype, step),
456 			  fold_convert (sizetype, stepsize));
457       idelta *= imult;
458     }
459 
460   if (*ar_data->step == NULL_TREE)
461     *ar_data->step = step;
462   else
463     *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
464 				  fold_convert (sizetype, *ar_data->step),
465 				  fold_convert (sizetype, step));
466   *ar_data->delta += idelta;
467   *index = ibase;
468 
469   return true;
470 }
471 
472 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
473    STEP are integer constants and iter is number of iterations of LOOP.  The
474    reference occurs in statement STMT.  Strips nonaddressable component
475    references from REF_P.  */
476 
477 static bool
478 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
479 	     tree *step, HOST_WIDE_INT *delta,
480 	     gimple *stmt)
481 {
482   struct ar_data ar_data;
483   tree off;
484   HOST_WIDE_INT bit_offset;
485   tree ref = *ref_p;
486 
487   *step = NULL_TREE;
488   *delta = 0;
489 
490   /* First strip off the component references.  Ignore bitfields.
491      Also strip off the real and imagine parts of a complex, so that
492      they can have the same base.  */
493   if (TREE_CODE (ref) == REALPART_EXPR
494       || TREE_CODE (ref) == IMAGPART_EXPR
495       || (TREE_CODE (ref) == COMPONENT_REF
496           && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
497     {
498       if (TREE_CODE (ref) == IMAGPART_EXPR)
499         *delta += int_size_in_bytes (TREE_TYPE (ref));
500       ref = TREE_OPERAND (ref, 0);
501     }
502 
503   *ref_p = ref;
504 
505   for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
506     {
507       off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
508       bit_offset = TREE_INT_CST_LOW (off);
509       gcc_assert (bit_offset % BITS_PER_UNIT == 0);
510 
511       *delta += bit_offset / BITS_PER_UNIT;
512     }
513 
514   *base = unshare_expr (ref);
515   ar_data.loop = loop;
516   ar_data.stmt = stmt;
517   ar_data.step = step;
518   ar_data.delta = delta;
519   return for_each_index (base, idx_analyze_ref, &ar_data);
520 }
521 
522 /* Record a memory reference REF to the list REFS.  The reference occurs in
523    LOOP in statement STMT and it is write if WRITE_P.  Returns true if the
524    reference was recorded, false otherwise.  */
525 
526 static bool
527 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
528 			      tree ref, bool write_p, gimple *stmt)
529 {
530   tree base, step;
531   HOST_WIDE_INT delta;
532   struct mem_ref_group *agrp;
533 
534   if (get_base_address (ref) == NULL)
535     return false;
536 
537   if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
538     return false;
539   /* If analyze_ref fails the default is a NULL_TREE.  We can stop here.  */
540   if (step == NULL_TREE)
541     return false;
542 
543   /* Stop if the address of BASE could not be taken.  */
544   if (may_be_nonaddressable_p (base))
545     return false;
546 
547   /* Limit non-constant step prefetching only to the innermost loops and
548      only when the step is loop invariant in the entire loop nest. */
549   if (!cst_and_fits_in_hwi (step))
550     {
551       if (loop->inner != NULL)
552         {
553           if (dump_file && (dump_flags & TDF_DETAILS))
554             {
555               fprintf (dump_file, "Memory expression %p\n",(void *) ref );
556               print_generic_expr (dump_file, ref, TDF_TREE);
557               fprintf (dump_file,":");
558               dump_mem_details (dump_file, base, step, delta, write_p);
559               fprintf (dump_file,
560                        "Ignoring %p, non-constant step prefetching is "
561                        "limited to inner most loops \n",
562                        (void *) ref);
563             }
564             return false;
565          }
566       else
567         {
568           if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
569           {
570             if (dump_file && (dump_flags & TDF_DETAILS))
571               {
572                 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
573                 print_generic_expr (dump_file, ref, TDF_TREE);
574                 fprintf (dump_file,":");
575                 dump_mem_details (dump_file, base, step, delta, write_p);
576                 fprintf (dump_file,
577                          "Not prefetching, ignoring %p due to "
578                          "loop variant step\n",
579                          (void *) ref);
580               }
581               return false;
582             }
583         }
584     }
585 
586   /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
587      are integer constants.  */
588   agrp = find_or_create_group (refs, base, step);
589   record_ref (agrp, stmt, ref, delta, write_p);
590 
591   return true;
592 }
593 
594 /* Record the suitable memory references in LOOP.  NO_OTHER_REFS is set to
595    true if there are no other memory references inside the loop.  */
596 
597 static struct mem_ref_group *
598 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
599 {
600   basic_block *body = get_loop_body_in_dom_order (loop);
601   basic_block bb;
602   unsigned i;
603   gimple_stmt_iterator bsi;
604   gimple *stmt;
605   tree lhs, rhs;
606   struct mem_ref_group *refs = NULL;
607 
608   *no_other_refs = true;
609   *ref_count = 0;
610 
611   /* Scan the loop body in order, so that the former references precede the
612      later ones.  */
613   for (i = 0; i < loop->num_nodes; i++)
614     {
615       bb = body[i];
616       if (bb->loop_father != loop)
617 	continue;
618 
619       for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
620 	{
621 	  stmt = gsi_stmt (bsi);
622 
623 	  if (gimple_code (stmt) != GIMPLE_ASSIGN)
624 	    {
625 	      if (gimple_vuse (stmt)
626 		  || (is_gimple_call (stmt)
627 		      && !(gimple_call_flags (stmt) & ECF_CONST)))
628 		*no_other_refs = false;
629 	      continue;
630 	    }
631 
632 	  if (! gimple_vuse (stmt))
633 	    continue;
634 
635 	  lhs = gimple_assign_lhs (stmt);
636 	  rhs = gimple_assign_rhs1 (stmt);
637 
638 	  if (REFERENCE_CLASS_P (rhs))
639 	    {
640 	    *no_other_refs &= gather_memory_references_ref (loop, &refs,
641 							    rhs, false, stmt);
642 	    *ref_count += 1;
643 	    }
644 	  if (REFERENCE_CLASS_P (lhs))
645 	    {
646 	    *no_other_refs &= gather_memory_references_ref (loop, &refs,
647 							    lhs, true, stmt);
648 	    *ref_count += 1;
649 	    }
650 	}
651     }
652   free (body);
653 
654   return refs;
655 }
656 
657 /* Prune the prefetch candidate REF using the self-reuse.  */
658 
659 static void
660 prune_ref_by_self_reuse (struct mem_ref *ref)
661 {
662   HOST_WIDE_INT step;
663   bool backward;
664 
665   /* If the step size is non constant, we cannot calculate prefetch_mod.  */
666   if (!cst_and_fits_in_hwi (ref->group->step))
667     return;
668 
669   step = int_cst_value (ref->group->step);
670 
671   backward = step < 0;
672 
673   if (step == 0)
674     {
675       /* Prefetch references to invariant address just once.  */
676       ref->prefetch_before = 1;
677       return;
678     }
679 
680   if (backward)
681     step = -step;
682 
683   if (step > PREFETCH_BLOCK)
684     return;
685 
686   if ((backward && HAVE_BACKWARD_PREFETCH)
687       || (!backward && HAVE_FORWARD_PREFETCH))
688     {
689       ref->prefetch_before = 1;
690       return;
691     }
692 
693   ref->prefetch_mod = PREFETCH_BLOCK / step;
694 }
695 
696 /* Divides X by BY, rounding down.  */
697 
698 static HOST_WIDE_INT
699 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
700 {
701   gcc_assert (by > 0);
702 
703   if (x >= 0)
704     return x / (HOST_WIDE_INT) by;
705   else
706     return (x + (HOST_WIDE_INT) by - 1) / (HOST_WIDE_INT) by;
707 }
708 
709 /* Given a CACHE_LINE_SIZE and two inductive memory references
710    with a common STEP greater than CACHE_LINE_SIZE and an address
711    difference DELTA, compute the probability that they will fall
712    in different cache lines.  Return true if the computed miss rate
713    is not greater than the ACCEPTABLE_MISS_RATE.  DISTINCT_ITERS is the
714    number of distinct iterations after which the pattern repeats itself.
715    ALIGN_UNIT is the unit of alignment in bytes.  */
716 
717 static bool
718 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
719 		   HOST_WIDE_INT step, HOST_WIDE_INT delta,
720 		   unsigned HOST_WIDE_INT distinct_iters,
721 		   int align_unit)
722 {
723   unsigned align, iter;
724   int total_positions, miss_positions, max_allowed_miss_positions;
725   int address1, address2, cache_line1, cache_line2;
726 
727   /* It always misses if delta is greater than or equal to the cache
728      line size.  */
729   if (delta >= (HOST_WIDE_INT) cache_line_size)
730     return false;
731 
732   miss_positions = 0;
733   total_positions = (cache_line_size / align_unit) * distinct_iters;
734   max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
735 
736   /* Iterate through all possible alignments of the first
737      memory reference within its cache line.  */
738   for (align = 0; align < cache_line_size; align += align_unit)
739 
740     /* Iterate through all distinct iterations.  */
741     for (iter = 0; iter < distinct_iters; iter++)
742       {
743 	address1 = align + step * iter;
744 	address2 = address1 + delta;
745 	cache_line1 = address1 / cache_line_size;
746 	cache_line2 = address2 / cache_line_size;
747 	if (cache_line1 != cache_line2)
748 	  {
749 	    miss_positions += 1;
750             if (miss_positions > max_allowed_miss_positions)
751 	      return false;
752           }
753       }
754   return true;
755 }
756 
757 /* Prune the prefetch candidate REF using the reuse with BY.
758    If BY_IS_BEFORE is true, BY is before REF in the loop.  */
759 
760 static void
761 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
762 			  bool by_is_before)
763 {
764   HOST_WIDE_INT step;
765   bool backward;
766   HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
767   HOST_WIDE_INT delta = delta_b - delta_r;
768   HOST_WIDE_INT hit_from;
769   unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
770   HOST_WIDE_INT reduced_step;
771   unsigned HOST_WIDE_INT reduced_prefetch_block;
772   tree ref_type;
773   int align_unit;
774 
775   /* If the step is non constant we cannot calculate prefetch_before.  */
776   if (!cst_and_fits_in_hwi (ref->group->step)) {
777     return;
778   }
779 
780   step = int_cst_value (ref->group->step);
781 
782   backward = step < 0;
783 
784 
785   if (delta == 0)
786     {
787       /* If the references has the same address, only prefetch the
788 	 former.  */
789       if (by_is_before)
790 	ref->prefetch_before = 0;
791 
792       return;
793     }
794 
795   if (!step)
796     {
797       /* If the reference addresses are invariant and fall into the
798 	 same cache line, prefetch just the first one.  */
799       if (!by_is_before)
800 	return;
801 
802       if (ddown (ref->delta, PREFETCH_BLOCK)
803 	  != ddown (by->delta, PREFETCH_BLOCK))
804 	return;
805 
806       ref->prefetch_before = 0;
807       return;
808     }
809 
810   /* Only prune the reference that is behind in the array.  */
811   if (backward)
812     {
813       if (delta > 0)
814 	return;
815 
816       /* Transform the data so that we may assume that the accesses
817 	 are forward.  */
818       delta = - delta;
819       step = -step;
820       delta_r = PREFETCH_BLOCK - 1 - delta_r;
821       delta_b = PREFETCH_BLOCK - 1 - delta_b;
822     }
823   else
824     {
825       if (delta < 0)
826 	return;
827     }
828 
829   /* Check whether the two references are likely to hit the same cache
830      line, and how distant the iterations in that it occurs are from
831      each other.  */
832 
833   if (step <= PREFETCH_BLOCK)
834     {
835       /* The accesses are sure to meet.  Let us check when.  */
836       hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
837       prefetch_before = (hit_from - delta_r + step - 1) / step;
838 
839       /* Do not reduce prefetch_before if we meet beyond cache size.  */
840       if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
841         prefetch_before = PREFETCH_ALL;
842       if (prefetch_before < ref->prefetch_before)
843 	ref->prefetch_before = prefetch_before;
844 
845       return;
846     }
847 
848   /* A more complicated case with step > prefetch_block.  First reduce
849      the ratio between the step and the cache line size to its simplest
850      terms.  The resulting denominator will then represent the number of
851      distinct iterations after which each address will go back to its
852      initial location within the cache line.  This computation assumes
853      that PREFETCH_BLOCK is a power of two.  */
854   prefetch_block = PREFETCH_BLOCK;
855   reduced_prefetch_block = prefetch_block;
856   reduced_step = step;
857   while ((reduced_step & 1) == 0
858 	 && reduced_prefetch_block > 1)
859     {
860       reduced_step >>= 1;
861       reduced_prefetch_block >>= 1;
862     }
863 
864   prefetch_before = delta / step;
865   delta %= step;
866   ref_type = TREE_TYPE (ref->mem);
867   align_unit = TYPE_ALIGN (ref_type) / 8;
868   if (is_miss_rate_acceptable (prefetch_block, step, delta,
869 			       reduced_prefetch_block, align_unit))
870     {
871       /* Do not reduce prefetch_before if we meet beyond cache size.  */
872       if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
873         prefetch_before = PREFETCH_ALL;
874       if (prefetch_before < ref->prefetch_before)
875 	ref->prefetch_before = prefetch_before;
876 
877       return;
878     }
879 
880   /* Try also the following iteration.  */
881   prefetch_before++;
882   delta = step - delta;
883   if (is_miss_rate_acceptable (prefetch_block, step, delta,
884 			       reduced_prefetch_block, align_unit))
885     {
886       if (prefetch_before < ref->prefetch_before)
887 	ref->prefetch_before = prefetch_before;
888 
889       return;
890     }
891 
892   /* The ref probably does not reuse by.  */
893   return;
894 }
895 
896 /* Prune the prefetch candidate REF using the reuses with other references
897    in REFS.  */
898 
899 static void
900 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
901 {
902   struct mem_ref *prune_by;
903   bool before = true;
904 
905   prune_ref_by_self_reuse (ref);
906 
907   for (prune_by = refs; prune_by; prune_by = prune_by->next)
908     {
909       if (prune_by == ref)
910 	{
911 	  before = false;
912 	  continue;
913 	}
914 
915       if (!WRITE_CAN_USE_READ_PREFETCH
916 	  && ref->write_p
917 	  && !prune_by->write_p)
918 	continue;
919       if (!READ_CAN_USE_WRITE_PREFETCH
920 	  && !ref->write_p
921 	  && prune_by->write_p)
922 	continue;
923 
924       prune_ref_by_group_reuse (ref, prune_by, before);
925     }
926 }
927 
928 /* Prune the prefetch candidates in GROUP using the reuse analysis.  */
929 
930 static void
931 prune_group_by_reuse (struct mem_ref_group *group)
932 {
933   struct mem_ref *ref_pruned;
934 
935   for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
936     {
937       prune_ref_by_reuse (ref_pruned, group->refs);
938 
939       if (dump_file && (dump_flags & TDF_DETAILS))
940 	{
941 	  fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
942 
943 	  if (ref_pruned->prefetch_before == PREFETCH_ALL
944 	      && ref_pruned->prefetch_mod == 1)
945 	    fprintf (dump_file, " no restrictions");
946 	  else if (ref_pruned->prefetch_before == 0)
947 	    fprintf (dump_file, " do not prefetch");
948 	  else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
949 	    fprintf (dump_file, " prefetch once");
950 	  else
951 	    {
952 	      if (ref_pruned->prefetch_before != PREFETCH_ALL)
953 		{
954 		  fprintf (dump_file, " prefetch before ");
955 		  fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
956 			   ref_pruned->prefetch_before);
957 		}
958 	      if (ref_pruned->prefetch_mod != 1)
959 		{
960 		  fprintf (dump_file, " prefetch mod ");
961 		  fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
962 			   ref_pruned->prefetch_mod);
963 		}
964 	    }
965 	  fprintf (dump_file, "\n");
966 	}
967     }
968 }
969 
970 /* Prune the list of prefetch candidates GROUPS using the reuse analysis.  */
971 
972 static void
973 prune_by_reuse (struct mem_ref_group *groups)
974 {
975   for (; groups; groups = groups->next)
976     prune_group_by_reuse (groups);
977 }
978 
979 /* Returns true if we should issue prefetch for REF.  */
980 
981 static bool
982 should_issue_prefetch_p (struct mem_ref *ref)
983 {
984   /* For now do not issue prefetches for only first few of the
985      iterations.  */
986   if (ref->prefetch_before != PREFETCH_ALL)
987     {
988       if (dump_file && (dump_flags & TDF_DETAILS))
989         fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
990 		 (void *) ref);
991       return false;
992     }
993 
994   /* Do not prefetch nontemporal stores.  */
995   if (ref->storent_p)
996     {
997       if (dump_file && (dump_flags & TDF_DETAILS))
998         fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
999       return false;
1000     }
1001 
1002   return true;
1003 }
1004 
1005 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1006    AHEAD is the number of iterations to prefetch ahead (which corresponds
1007    to the number of simultaneous instances of one prefetch running at a
1008    time).  UNROLL_FACTOR is the factor by that the loop is going to be
1009    unrolled.  Returns true if there is anything to prefetch.  */
1010 
1011 static bool
1012 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1013 		     unsigned ahead)
1014 {
1015   unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1016   unsigned slots_per_prefetch;
1017   struct mem_ref *ref;
1018   bool any = false;
1019 
1020   /* At most SIMULTANEOUS_PREFETCHES should be running at the same time.  */
1021   remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
1022 
1023   /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1024      AHEAD / UNROLL_FACTOR iterations of the unrolled loop.  In each iteration,
1025      it will need a prefetch slot.  */
1026   slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
1027   if (dump_file && (dump_flags & TDF_DETAILS))
1028     fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1029 	     slots_per_prefetch);
1030 
1031   /* For now we just take memory references one by one and issue
1032      prefetches for as many as possible.  The groups are sorted
1033      starting with the largest step, since the references with
1034      large step are more likely to cause many cache misses.  */
1035 
1036   for (; groups; groups = groups->next)
1037     for (ref = groups->refs; ref; ref = ref->next)
1038       {
1039 	if (!should_issue_prefetch_p (ref))
1040 	  continue;
1041 
1042         /* The loop is far from being sufficiently unrolled for this
1043            prefetch.  Do not generate prefetch to avoid many redudant
1044            prefetches.  */
1045         if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1046           continue;
1047 
1048 	/* If we need to prefetch the reference each PREFETCH_MOD iterations,
1049 	   and we unroll the loop UNROLL_FACTOR times, we need to insert
1050 	   ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1051 	   iteration.  */
1052 	n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1053 			/ ref->prefetch_mod);
1054 	prefetch_slots = n_prefetches * slots_per_prefetch;
1055 
1056 	/* If more than half of the prefetches would be lost anyway, do not
1057 	   issue the prefetch.  */
1058 	if (2 * remaining_prefetch_slots < prefetch_slots)
1059 	  continue;
1060 
1061 	ref->issue_prefetch_p = true;
1062 
1063 	if (remaining_prefetch_slots <= prefetch_slots)
1064 	  return true;
1065 	remaining_prefetch_slots -= prefetch_slots;
1066 	any = true;
1067       }
1068 
1069   return any;
1070 }
1071 
1072 /* Return TRUE if no prefetch is going to be generated in the given
1073    GROUPS.  */
1074 
1075 static bool
1076 nothing_to_prefetch_p (struct mem_ref_group *groups)
1077 {
1078   struct mem_ref *ref;
1079 
1080   for (; groups; groups = groups->next)
1081     for (ref = groups->refs; ref; ref = ref->next)
1082       if (should_issue_prefetch_p (ref))
1083 	return false;
1084 
1085   return true;
1086 }
1087 
1088 /* Estimate the number of prefetches in the given GROUPS.
1089    UNROLL_FACTOR is the factor by which LOOP was unrolled.  */
1090 
1091 static int
1092 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1093 {
1094   struct mem_ref *ref;
1095   unsigned n_prefetches;
1096   int prefetch_count = 0;
1097 
1098   for (; groups; groups = groups->next)
1099     for (ref = groups->refs; ref; ref = ref->next)
1100       if (should_issue_prefetch_p (ref))
1101 	{
1102 	  n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1103 			  / ref->prefetch_mod);
1104 	  prefetch_count += n_prefetches;
1105 	}
1106 
1107   return prefetch_count;
1108 }
1109 
1110 /* Issue prefetches for the reference REF into loop as decided before.
1111    HEAD is the number of iterations to prefetch ahead.  UNROLL_FACTOR
1112    is the factor by which LOOP was unrolled.  */
1113 
1114 static void
1115 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1116 {
1117   HOST_WIDE_INT delta;
1118   tree addr, addr_base, write_p, local, forward;
1119   gcall *prefetch;
1120   gimple_stmt_iterator bsi;
1121   unsigned n_prefetches, ap;
1122   bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1123 
1124   if (dump_file && (dump_flags & TDF_DETAILS))
1125     fprintf (dump_file, "Issued%s prefetch for %p.\n",
1126 	     nontemporal ? " nontemporal" : "",
1127 	     (void *) ref);
1128 
1129   bsi = gsi_for_stmt (ref->stmt);
1130 
1131   n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1132 		  / ref->prefetch_mod);
1133   addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1134   addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1135 					true, NULL, true, GSI_SAME_STMT);
1136   write_p = ref->write_p ? integer_one_node : integer_zero_node;
1137   local = nontemporal ? integer_zero_node : integer_three_node;
1138 
1139   for (ap = 0; ap < n_prefetches; ap++)
1140     {
1141       if (cst_and_fits_in_hwi (ref->group->step))
1142         {
1143           /* Determine the address to prefetch.  */
1144           delta = (ahead + ap * ref->prefetch_mod) *
1145 		   int_cst_value (ref->group->step);
1146           addr = fold_build_pointer_plus_hwi (addr_base, delta);
1147           addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1148                                            true, GSI_SAME_STMT);
1149         }
1150       else
1151         {
1152           /* The step size is non-constant but loop-invariant.  We use the
1153              heuristic to simply prefetch ahead iterations ahead.  */
1154           forward = fold_build2 (MULT_EXPR, sizetype,
1155                                  fold_convert (sizetype, ref->group->step),
1156                                  fold_convert (sizetype, size_int (ahead)));
1157           addr = fold_build_pointer_plus (addr_base, forward);
1158           addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1159 					   NULL, true, GSI_SAME_STMT);
1160       }
1161 
1162       if (addr_base != addr
1163 	  && TREE_CODE (addr_base) == SSA_NAME
1164 	  && TREE_CODE (addr) == SSA_NAME)
1165 	{
1166 	  duplicate_ssa_name_ptr_info (addr, SSA_NAME_PTR_INFO (addr_base));
1167 	  /* As this isn't a plain copy we have to reset alignment
1168 	     information.  */
1169 	  if (SSA_NAME_PTR_INFO (addr))
1170 	    mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr));
1171 	}
1172 
1173       /* Create the prefetch instruction.  */
1174       prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
1175 				    3, addr, write_p, local);
1176       gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1177     }
1178 }
1179 
1180 /* Issue prefetches for the references in GROUPS into loop as decided before.
1181    HEAD is the number of iterations to prefetch ahead.  UNROLL_FACTOR is the
1182    factor by that LOOP was unrolled.  */
1183 
1184 static void
1185 issue_prefetches (struct mem_ref_group *groups,
1186 		  unsigned unroll_factor, unsigned ahead)
1187 {
1188   struct mem_ref *ref;
1189 
1190   for (; groups; groups = groups->next)
1191     for (ref = groups->refs; ref; ref = ref->next)
1192       if (ref->issue_prefetch_p)
1193 	issue_prefetch_ref (ref, unroll_factor, ahead);
1194 }
1195 
1196 /* Returns true if REF is a memory write for that a nontemporal store insn
1197    can be used.  */
1198 
1199 static bool
1200 nontemporal_store_p (struct mem_ref *ref)
1201 {
1202   machine_mode mode;
1203   enum insn_code code;
1204 
1205   /* REF must be a write that is not reused.  We require it to be independent
1206      on all other memory references in the loop, as the nontemporal stores may
1207      be reordered with respect to other memory references.  */
1208   if (!ref->write_p
1209       || !ref->independent_p
1210       || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1211     return false;
1212 
1213   /* Check that we have the storent instruction for the mode.  */
1214   mode = TYPE_MODE (TREE_TYPE (ref->mem));
1215   if (mode == BLKmode)
1216     return false;
1217 
1218   code = optab_handler (storent_optab, mode);
1219   return code != CODE_FOR_nothing;
1220 }
1221 
1222 /* If REF is a nontemporal store, we mark the corresponding modify statement
1223    and return true.  Otherwise, we return false.  */
1224 
1225 static bool
1226 mark_nontemporal_store (struct mem_ref *ref)
1227 {
1228   if (!nontemporal_store_p (ref))
1229     return false;
1230 
1231   if (dump_file && (dump_flags & TDF_DETAILS))
1232     fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1233 	     (void *) ref);
1234 
1235   gimple_assign_set_nontemporal_move (ref->stmt, true);
1236   ref->storent_p = true;
1237 
1238   return true;
1239 }
1240 
1241 /* Issue a memory fence instruction after LOOP.  */
1242 
1243 static void
1244 emit_mfence_after_loop (struct loop *loop)
1245 {
1246   vec<edge> exits = get_loop_exit_edges (loop);
1247   edge exit;
1248   gcall *call;
1249   gimple_stmt_iterator bsi;
1250   unsigned i;
1251 
1252   FOR_EACH_VEC_ELT (exits, i, exit)
1253     {
1254       call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1255 
1256       if (!single_pred_p (exit->dest)
1257 	  /* If possible, we prefer not to insert the fence on other paths
1258 	     in cfg.  */
1259 	  && !(exit->flags & EDGE_ABNORMAL))
1260 	split_loop_exit_edge (exit);
1261       bsi = gsi_after_labels (exit->dest);
1262 
1263       gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1264     }
1265 
1266   exits.release ();
1267   update_ssa (TODO_update_ssa_only_virtuals);
1268 }
1269 
1270 /* Returns true if we can use storent in loop, false otherwise.  */
1271 
1272 static bool
1273 may_use_storent_in_loop_p (struct loop *loop)
1274 {
1275   bool ret = true;
1276 
1277   if (loop->inner != NULL)
1278     return false;
1279 
1280   /* If we must issue a mfence insn after using storent, check that there
1281      is a suitable place for it at each of the loop exits.  */
1282   if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1283     {
1284       vec<edge> exits = get_loop_exit_edges (loop);
1285       unsigned i;
1286       edge exit;
1287 
1288       FOR_EACH_VEC_ELT (exits, i, exit)
1289 	if ((exit->flags & EDGE_ABNORMAL)
1290 	    && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1291 	  ret = false;
1292 
1293       exits.release ();
1294     }
1295 
1296   return ret;
1297 }
1298 
1299 /* Marks nontemporal stores in LOOP.  GROUPS contains the description of memory
1300    references in the loop.  */
1301 
1302 static void
1303 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1304 {
1305   struct mem_ref *ref;
1306   bool any = false;
1307 
1308   if (!may_use_storent_in_loop_p (loop))
1309     return;
1310 
1311   for (; groups; groups = groups->next)
1312     for (ref = groups->refs; ref; ref = ref->next)
1313       any |= mark_nontemporal_store (ref);
1314 
1315   if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1316     emit_mfence_after_loop (loop);
1317 }
1318 
1319 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1320    this is the case, fill in DESC by the description of number of
1321    iterations.  */
1322 
1323 static bool
1324 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1325 		      unsigned factor)
1326 {
1327   if (!can_unroll_loop_p (loop, factor, desc))
1328     return false;
1329 
1330   /* We only consider loops without control flow for unrolling.  This is not
1331      a hard restriction -- tree_unroll_loop works with arbitrary loops
1332      as well; but the unrolling/prefetching is usually more profitable for
1333      loops consisting of a single basic block, and we want to limit the
1334      code growth.  */
1335   if (loop->num_nodes > 2)
1336     return false;
1337 
1338   return true;
1339 }
1340 
1341 /* Determine the coefficient by that unroll LOOP, from the information
1342    contained in the list of memory references REFS.  Description of
1343    umber of iterations of LOOP is stored to DESC.  NINSNS is the number of
1344    insns of the LOOP.  EST_NITER is the estimated number of iterations of
1345    the loop, or -1 if no estimate is available.  */
1346 
1347 static unsigned
1348 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1349 			 unsigned ninsns, struct tree_niter_desc *desc,
1350 			 HOST_WIDE_INT est_niter)
1351 {
1352   unsigned upper_bound;
1353   unsigned nfactor, factor, mod_constraint;
1354   struct mem_ref_group *agp;
1355   struct mem_ref *ref;
1356 
1357   /* First check whether the loop is not too large to unroll.  We ignore
1358      PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1359      from unrolling them enough to make exactly one cache line covered by each
1360      iteration.  Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1361      us from unrolling the loops too many times in cases where we only expect
1362      gains from better scheduling and decreasing loop overhead, which is not
1363      the case here.  */
1364   upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1365 
1366   /* If we unrolled the loop more times than it iterates, the unrolled version
1367      of the loop would be never entered.  */
1368   if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1369     upper_bound = est_niter;
1370 
1371   if (upper_bound <= 1)
1372     return 1;
1373 
1374   /* Choose the factor so that we may prefetch each cache just once,
1375      but bound the unrolling by UPPER_BOUND.  */
1376   factor = 1;
1377   for (agp = refs; agp; agp = agp->next)
1378     for (ref = agp->refs; ref; ref = ref->next)
1379       if (should_issue_prefetch_p (ref))
1380 	{
1381 	  mod_constraint = ref->prefetch_mod;
1382 	  nfactor = least_common_multiple (mod_constraint, factor);
1383 	  if (nfactor <= upper_bound)
1384 	    factor = nfactor;
1385 	}
1386 
1387   if (!should_unroll_loop_p (loop, desc, factor))
1388     return 1;
1389 
1390   return factor;
1391 }
1392 
1393 /* Returns the total volume of the memory references REFS, taking into account
1394    reuses in the innermost loop and cache line size.  TODO -- we should also
1395    take into account reuses across the iterations of the loops in the loop
1396    nest.  */
1397 
1398 static unsigned
1399 volume_of_references (struct mem_ref_group *refs)
1400 {
1401   unsigned volume = 0;
1402   struct mem_ref_group *gr;
1403   struct mem_ref *ref;
1404 
1405   for (gr = refs; gr; gr = gr->next)
1406     for (ref = gr->refs; ref; ref = ref->next)
1407       {
1408 	/* Almost always reuses another value?  */
1409 	if (ref->prefetch_before != PREFETCH_ALL)
1410 	  continue;
1411 
1412 	/* If several iterations access the same cache line, use the size of
1413 	   the line divided by this number.  Otherwise, a cache line is
1414 	   accessed in each iteration.  TODO -- in the latter case, we should
1415 	   take the size of the reference into account, rounding it up on cache
1416 	   line size multiple.  */
1417 	volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1418       }
1419   return volume;
1420 }
1421 
1422 /* Returns the volume of memory references accessed across VEC iterations of
1423    loops, whose sizes are described in the LOOP_SIZES array.  N is the number
1424    of the loops in the nest (length of VEC and LOOP_SIZES vectors).  */
1425 
1426 static unsigned
1427 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1428 {
1429   unsigned i;
1430 
1431   for (i = 0; i < n; i++)
1432     if (vec[i] != 0)
1433       break;
1434 
1435   if (i == n)
1436     return 0;
1437 
1438   gcc_assert (vec[i] > 0);
1439 
1440   /* We ignore the parts of the distance vector in subloops, since usually
1441      the numbers of iterations are much smaller.  */
1442   return loop_sizes[i] * vec[i];
1443 }
1444 
1445 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1446    at the position corresponding to the loop of the step.  N is the depth
1447    of the considered loop nest, and, LOOP is its innermost loop.  */
1448 
1449 static void
1450 add_subscript_strides (tree access_fn, unsigned stride,
1451 		       HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1452 {
1453   struct loop *aloop;
1454   tree step;
1455   HOST_WIDE_INT astep;
1456   unsigned min_depth = loop_depth (loop) - n;
1457 
1458   while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1459     {
1460       aloop = get_chrec_loop (access_fn);
1461       step = CHREC_RIGHT (access_fn);
1462       access_fn = CHREC_LEFT (access_fn);
1463 
1464       if ((unsigned) loop_depth (aloop) <= min_depth)
1465 	continue;
1466 
1467       if (tree_fits_shwi_p (step))
1468 	astep = tree_to_shwi (step);
1469       else
1470 	astep = L1_CACHE_LINE_SIZE;
1471 
1472       strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1473 
1474     }
1475 }
1476 
1477 /* Returns the volume of memory references accessed between two consecutive
1478    self-reuses of the reference DR.  We consider the subscripts of DR in N
1479    loops, and LOOP_SIZES contains the volumes of accesses in each of the
1480    loops.  LOOP is the innermost loop of the current loop nest.  */
1481 
1482 static unsigned
1483 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1484 		     struct loop *loop)
1485 {
1486   tree stride, access_fn;
1487   HOST_WIDE_INT *strides, astride;
1488   vec<tree> access_fns;
1489   tree ref = DR_REF (dr);
1490   unsigned i, ret = ~0u;
1491 
1492   /* In the following example:
1493 
1494      for (i = 0; i < N; i++)
1495        for (j = 0; j < N; j++)
1496          use (a[j][i]);
1497      the same cache line is accessed each N steps (except if the change from
1498      i to i + 1 crosses the boundary of the cache line).  Thus, for self-reuse,
1499      we cannot rely purely on the results of the data dependence analysis.
1500 
1501      Instead, we compute the stride of the reference in each loop, and consider
1502      the innermost loop in that the stride is less than cache size.  */
1503 
1504   strides = XCNEWVEC (HOST_WIDE_INT, n);
1505   access_fns = DR_ACCESS_FNS (dr);
1506 
1507   FOR_EACH_VEC_ELT (access_fns, i, access_fn)
1508     {
1509       /* Keep track of the reference corresponding to the subscript, so that we
1510 	 know its stride.  */
1511       while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1512 	ref = TREE_OPERAND (ref, 0);
1513 
1514       if (TREE_CODE (ref) == ARRAY_REF)
1515 	{
1516 	  stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1517 	  if (tree_fits_uhwi_p (stride))
1518 	    astride = tree_to_uhwi (stride);
1519 	  else
1520 	    astride = L1_CACHE_LINE_SIZE;
1521 
1522 	  ref = TREE_OPERAND (ref, 0);
1523 	}
1524       else
1525 	astride = 1;
1526 
1527       add_subscript_strides (access_fn, astride, strides, n, loop);
1528     }
1529 
1530   for (i = n; i-- > 0; )
1531     {
1532       unsigned HOST_WIDE_INT s;
1533 
1534       s = strides[i] < 0 ?  -strides[i] : strides[i];
1535 
1536       if (s < (unsigned) L1_CACHE_LINE_SIZE
1537 	  && (loop_sizes[i]
1538 	      > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1539 	{
1540 	  ret = loop_sizes[i];
1541 	  break;
1542 	}
1543     }
1544 
1545   free (strides);
1546   return ret;
1547 }
1548 
1549 /* Determines the distance till the first reuse of each reference in REFS
1550    in the loop nest of LOOP.  NO_OTHER_REFS is true if there are no other
1551    memory references in the loop.  Return false if the analysis fails.  */
1552 
1553 static bool
1554 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1555 			   bool no_other_refs)
1556 {
1557   struct loop *nest, *aloop;
1558   vec<data_reference_p> datarefs = vNULL;
1559   vec<ddr_p> dependences = vNULL;
1560   struct mem_ref_group *gr;
1561   struct mem_ref *ref, *refb;
1562   auto_vec<loop_p> vloops;
1563   unsigned *loop_data_size;
1564   unsigned i, j, n;
1565   unsigned volume, dist, adist;
1566   HOST_WIDE_INT vol;
1567   data_reference_p dr;
1568   ddr_p dep;
1569 
1570   if (loop->inner)
1571     return true;
1572 
1573   /* Find the outermost loop of the loop nest of loop (we require that
1574      there are no sibling loops inside the nest).  */
1575   nest = loop;
1576   while (1)
1577     {
1578       aloop = loop_outer (nest);
1579 
1580       if (aloop == current_loops->tree_root
1581 	  || aloop->inner->next)
1582 	break;
1583 
1584       nest = aloop;
1585     }
1586 
1587   /* For each loop, determine the amount of data accessed in each iteration.
1588      We use this to estimate whether the reference is evicted from the
1589      cache before its reuse.  */
1590   find_loop_nest (nest, &vloops);
1591   n = vloops.length ();
1592   loop_data_size = XNEWVEC (unsigned, n);
1593   volume = volume_of_references (refs);
1594   i = n;
1595   while (i-- != 0)
1596     {
1597       loop_data_size[i] = volume;
1598       /* Bound the volume by the L2 cache size, since above this bound,
1599 	 all dependence distances are equivalent.  */
1600       if (volume > L2_CACHE_SIZE_BYTES)
1601 	continue;
1602 
1603       aloop = vloops[i];
1604       vol = estimated_stmt_executions_int (aloop);
1605       if (vol == -1)
1606 	vol = expected_loop_iterations (aloop);
1607       volume *= vol;
1608     }
1609 
1610   /* Prepare the references in the form suitable for data dependence
1611      analysis.  We ignore unanalyzable data references (the results
1612      are used just as a heuristics to estimate temporality of the
1613      references, hence we do not need to worry about correctness).  */
1614   for (gr = refs; gr; gr = gr->next)
1615     for (ref = gr->refs; ref; ref = ref->next)
1616       {
1617 	dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
1618 			      ref->mem, ref->stmt, !ref->write_p);
1619 
1620 	if (dr)
1621 	  {
1622 	    ref->reuse_distance = volume;
1623 	    dr->aux = ref;
1624 	    datarefs.safe_push (dr);
1625 	  }
1626 	else
1627 	  no_other_refs = false;
1628       }
1629 
1630   FOR_EACH_VEC_ELT (datarefs, i, dr)
1631     {
1632       dist = self_reuse_distance (dr, loop_data_size, n, loop);
1633       ref = (struct mem_ref *) dr->aux;
1634       if (ref->reuse_distance > dist)
1635 	ref->reuse_distance = dist;
1636 
1637       if (no_other_refs)
1638 	ref->independent_p = true;
1639     }
1640 
1641   if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1642     return false;
1643 
1644   FOR_EACH_VEC_ELT (dependences, i, dep)
1645     {
1646       if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1647 	continue;
1648 
1649       ref = (struct mem_ref *) DDR_A (dep)->aux;
1650       refb = (struct mem_ref *) DDR_B (dep)->aux;
1651 
1652       if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1653 	  || DDR_NUM_DIST_VECTS (dep) == 0)
1654 	{
1655 	  /* If the dependence cannot be analyzed, assume that there might be
1656 	     a reuse.  */
1657 	  dist = 0;
1658 
1659 	  ref->independent_p = false;
1660 	  refb->independent_p = false;
1661 	}
1662       else
1663 	{
1664 	  /* The distance vectors are normalized to be always lexicographically
1665 	     positive, hence we cannot tell just from them whether DDR_A comes
1666 	     before DDR_B or vice versa.  However, it is not important,
1667 	     anyway -- if DDR_A is close to DDR_B, then it is either reused in
1668 	     DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1669 	     in cache (and marking it as nontemporal would not affect
1670 	     anything).  */
1671 
1672 	  dist = volume;
1673 	  for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1674 	    {
1675 	      adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1676 					     loop_data_size, n);
1677 
1678 	      /* If this is a dependence in the innermost loop (i.e., the
1679 		 distances in all superloops are zero) and it is not
1680 		 the trivial self-dependence with distance zero, record that
1681 		 the references are not completely independent.  */
1682 	      if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1683 		  && (ref != refb
1684 		      || DDR_DIST_VECT (dep, j)[n-1] != 0))
1685 		{
1686 		  ref->independent_p = false;
1687 		  refb->independent_p = false;
1688 		}
1689 
1690 	      /* Ignore accesses closer than
1691 		 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1692 	      	 so that we use nontemporal prefetches e.g. if single memory
1693 		 location is accessed several times in a single iteration of
1694 		 the loop.  */
1695 	      if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1696 		continue;
1697 
1698 	      if (adist < dist)
1699 		dist = adist;
1700 	    }
1701 	}
1702 
1703       if (ref->reuse_distance > dist)
1704 	ref->reuse_distance = dist;
1705       if (refb->reuse_distance > dist)
1706 	refb->reuse_distance = dist;
1707     }
1708 
1709   free_dependence_relations (dependences);
1710   free_data_refs (datarefs);
1711   free (loop_data_size);
1712 
1713   if (dump_file && (dump_flags & TDF_DETAILS))
1714     {
1715       fprintf (dump_file, "Reuse distances:\n");
1716       for (gr = refs; gr; gr = gr->next)
1717 	for (ref = gr->refs; ref; ref = ref->next)
1718 	  fprintf (dump_file, " ref %p distance %u\n",
1719 		   (void *) ref, ref->reuse_distance);
1720     }
1721 
1722   return true;
1723 }
1724 
1725 /* Determine whether or not the trip count to ahead ratio is too small based
1726    on prefitablility consideration.
1727    AHEAD: the iteration ahead distance,
1728    EST_NITER: the estimated trip count.  */
1729 
1730 static bool
1731 trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1732 {
1733   /* Assume trip count to ahead ratio is big enough if the trip count could not
1734      be estimated at compile time.  */
1735   if (est_niter < 0)
1736     return false;
1737 
1738   if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1739     {
1740       if (dump_file && (dump_flags & TDF_DETAILS))
1741 	fprintf (dump_file,
1742 		 "Not prefetching -- loop estimated to roll only %d times\n",
1743 		 (int) est_niter);
1744       return true;
1745     }
1746 
1747   return false;
1748 }
1749 
1750 /* Determine whether or not the number of memory references in the loop is
1751    reasonable based on the profitablity and compilation time considerations.
1752    NINSNS: estimated number of instructions in the loop,
1753    MEM_REF_COUNT: total number of memory references in the loop.  */
1754 
1755 static bool
1756 mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
1757 {
1758   int insn_to_mem_ratio;
1759 
1760   if (mem_ref_count == 0)
1761     return false;
1762 
1763   /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1764      (compute_all_dependences) have high costs based on quadratic complexity.
1765      To avoid huge compilation time, we give up prefetching if mem_ref_count
1766      is too large.  */
1767   if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1768     return false;
1769 
1770   /* Prefetching improves performance by overlapping cache missing
1771      memory accesses with CPU operations.  If the loop does not have
1772      enough CPU operations to overlap with memory operations, prefetching
1773      won't give a significant benefit.  One approximate way of checking
1774      this is to require the ratio of instructions to memory references to
1775      be above a certain limit.  This approximation works well in practice.
1776      TODO: Implement a more precise computation by estimating the time
1777      for each CPU or memory op in the loop. Time estimates for memory ops
1778      should account for cache misses.  */
1779   insn_to_mem_ratio = ninsns / mem_ref_count;
1780 
1781   if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1782     {
1783       if (dump_file && (dump_flags & TDF_DETAILS))
1784         fprintf (dump_file,
1785 		 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1786 		 insn_to_mem_ratio);
1787       return false;
1788     }
1789 
1790   return true;
1791 }
1792 
1793 /* Determine whether or not the instruction to prefetch ratio in the loop is
1794    too small based on the profitablity consideration.
1795    NINSNS: estimated number of instructions in the loop,
1796    PREFETCH_COUNT: an estimate of the number of prefetches,
1797    UNROLL_FACTOR:  the factor to unroll the loop if prefetching.  */
1798 
1799 static bool
1800 insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1801                                      unsigned unroll_factor)
1802 {
1803   int insn_to_prefetch_ratio;
1804 
1805   /* Prefetching most likely causes performance degradation when the instruction
1806      to prefetch ratio is too small.  Too many prefetch instructions in a loop
1807      may reduce the I-cache performance.
1808      (unroll_factor * ninsns) is used to estimate the number of instructions in
1809      the unrolled loop.  This implementation is a bit simplistic -- the number
1810      of issued prefetch instructions is also affected by unrolling.  So,
1811      prefetch_mod and the unroll factor should be taken into account when
1812      determining prefetch_count.  Also, the number of insns of the unrolled
1813      loop will usually be significantly smaller than the number of insns of the
1814      original loop * unroll_factor (at least the induction variable increases
1815      and the exit branches will get eliminated), so it might be better to use
1816      tree_estimate_loop_size + estimated_unrolled_size.  */
1817   insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1818   if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1819     {
1820       if (dump_file && (dump_flags & TDF_DETAILS))
1821         fprintf (dump_file,
1822 		 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1823 		 insn_to_prefetch_ratio);
1824       return true;
1825     }
1826 
1827   return false;
1828 }
1829 
1830 
1831 /* Issue prefetch instructions for array references in LOOP.  Returns
1832    true if the LOOP was unrolled.  */
1833 
1834 static bool
1835 loop_prefetch_arrays (struct loop *loop)
1836 {
1837   struct mem_ref_group *refs;
1838   unsigned ahead, ninsns, time, unroll_factor;
1839   HOST_WIDE_INT est_niter;
1840   struct tree_niter_desc desc;
1841   bool unrolled = false, no_other_refs;
1842   unsigned prefetch_count;
1843   unsigned mem_ref_count;
1844 
1845   if (optimize_loop_nest_for_size_p (loop))
1846     {
1847       if (dump_file && (dump_flags & TDF_DETAILS))
1848 	fprintf (dump_file, "  ignored (cold area)\n");
1849       return false;
1850     }
1851 
1852   /* FIXME: the time should be weighted by the probabilities of the blocks in
1853      the loop body.  */
1854   time = tree_num_loop_insns (loop, &eni_time_weights);
1855   if (time == 0)
1856     return false;
1857 
1858   ahead = (PREFETCH_LATENCY + time - 1) / time;
1859   est_niter = estimated_stmt_executions_int (loop);
1860   if (est_niter == -1)
1861     est_niter = likely_max_stmt_executions_int (loop);
1862 
1863   /* Prefetching is not likely to be profitable if the trip count to ahead
1864      ratio is too small.  */
1865   if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1866     return false;
1867 
1868   ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1869 
1870   /* Step 1: gather the memory references.  */
1871   refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1872 
1873   /* Give up prefetching if the number of memory references in the
1874      loop is not reasonable based on profitablity and compilation time
1875      considerations.  */
1876   if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1877     goto fail;
1878 
1879   /* Step 2: estimate the reuse effects.  */
1880   prune_by_reuse (refs);
1881 
1882   if (nothing_to_prefetch_p (refs))
1883     goto fail;
1884 
1885   if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1886     goto fail;
1887 
1888   /* Step 3: determine unroll factor.  */
1889   unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1890 					   est_niter);
1891 
1892   /* Estimate prefetch count for the unrolled loop.  */
1893   prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1894   if (prefetch_count == 0)
1895     goto fail;
1896 
1897   if (dump_file && (dump_flags & TDF_DETAILS))
1898     fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1899 	     HOST_WIDE_INT_PRINT_DEC "\n"
1900 	     "insn count %d, mem ref count %d, prefetch count %d\n",
1901 	     ahead, unroll_factor, est_niter,
1902 	     ninsns, mem_ref_count, prefetch_count);
1903 
1904   /* Prefetching is not likely to be profitable if the instruction to prefetch
1905      ratio is too small.  */
1906   if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1907 					  unroll_factor))
1908     goto fail;
1909 
1910   mark_nontemporal_stores (loop, refs);
1911 
1912   /* Step 4: what to prefetch?  */
1913   if (!schedule_prefetches (refs, unroll_factor, ahead))
1914     goto fail;
1915 
1916   /* Step 5: unroll the loop.  TODO -- peeling of first and last few
1917      iterations so that we do not issue superfluous prefetches.  */
1918   if (unroll_factor != 1)
1919     {
1920       tree_unroll_loop (loop, unroll_factor,
1921 			single_dom_exit (loop), &desc);
1922       unrolled = true;
1923     }
1924 
1925   /* Step 6: issue the prefetches.  */
1926   issue_prefetches (refs, unroll_factor, ahead);
1927 
1928 fail:
1929   release_mem_refs (refs);
1930   return unrolled;
1931 }
1932 
1933 /* Issue prefetch instructions for array references in loops.  */
1934 
1935 unsigned int
1936 tree_ssa_prefetch_arrays (void)
1937 {
1938   struct loop *loop;
1939   bool unrolled = false;
1940   int todo_flags = 0;
1941 
1942   if (!targetm.have_prefetch ()
1943       /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1944 	 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1945 	 of processor costs and i486 does not have prefetch, but
1946 	 -march=pentium4 causes targetm.have_prefetch to be true.  Ugh.  */
1947       || PREFETCH_BLOCK == 0)
1948     return 0;
1949 
1950   if (dump_file && (dump_flags & TDF_DETAILS))
1951     {
1952       fprintf (dump_file, "Prefetching parameters:\n");
1953       fprintf (dump_file, "    simultaneous prefetches: %d\n",
1954 	       SIMULTANEOUS_PREFETCHES);
1955       fprintf (dump_file, "    prefetch latency: %d\n", PREFETCH_LATENCY);
1956       fprintf (dump_file, "    prefetch block size: %d\n", PREFETCH_BLOCK);
1957       fprintf (dump_file, "    L1 cache size: %d lines, %d kB\n",
1958 	       L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1959       fprintf (dump_file, "    L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1960       fprintf (dump_file, "    L2 cache size: %d kB\n", L2_CACHE_SIZE);
1961       fprintf (dump_file, "    min insn-to-prefetch ratio: %d \n",
1962 	       MIN_INSN_TO_PREFETCH_RATIO);
1963       fprintf (dump_file, "    min insn-to-mem ratio: %d \n",
1964 	       PREFETCH_MIN_INSN_TO_MEM_RATIO);
1965       fprintf (dump_file, "\n");
1966     }
1967 
1968   initialize_original_copy_tables ();
1969 
1970   if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
1971     {
1972       tree type = build_function_type_list (void_type_node,
1973 					    const_ptr_type_node, NULL_TREE);
1974       tree decl = add_builtin_function ("__builtin_prefetch", type,
1975 					BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1976 					NULL, NULL_TREE);
1977       DECL_IS_NOVOPS (decl) = true;
1978       set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
1979     }
1980 
1981   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1982     {
1983       if (dump_file && (dump_flags & TDF_DETAILS))
1984 	fprintf (dump_file, "Processing loop %d:\n", loop->num);
1985 
1986       unrolled |= loop_prefetch_arrays (loop);
1987 
1988       if (dump_file && (dump_flags & TDF_DETAILS))
1989 	fprintf (dump_file, "\n\n");
1990     }
1991 
1992   if (unrolled)
1993     {
1994       scev_reset ();
1995       todo_flags |= TODO_cleanup_cfg;
1996     }
1997 
1998   free_original_copy_tables ();
1999   return todo_flags;
2000 }
2001 
2002 /* Prefetching.  */
2003 
2004 namespace {
2005 
2006 const pass_data pass_data_loop_prefetch =
2007 {
2008   GIMPLE_PASS, /* type */
2009   "aprefetch", /* name */
2010   OPTGROUP_LOOP, /* optinfo_flags */
2011   TV_TREE_PREFETCH, /* tv_id */
2012   ( PROP_cfg | PROP_ssa ), /* properties_required */
2013   0, /* properties_provided */
2014   0, /* properties_destroyed */
2015   0, /* todo_flags_start */
2016   0, /* todo_flags_finish */
2017 };
2018 
2019 class pass_loop_prefetch : public gimple_opt_pass
2020 {
2021 public:
2022   pass_loop_prefetch (gcc::context *ctxt)
2023     : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
2024   {}
2025 
2026   /* opt_pass methods: */
2027   virtual bool gate (function *) { return flag_prefetch_loop_arrays > 0; }
2028   virtual unsigned int execute (function *);
2029 
2030 }; // class pass_loop_prefetch
2031 
2032 unsigned int
2033 pass_loop_prefetch::execute (function *fun)
2034 {
2035   if (number_of_loops (fun) <= 1)
2036     return 0;
2037 
2038   if ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) != 0)
2039     {
2040       static bool warned = false;
2041 
2042       if (!warned)
2043 	{
2044 	  warning (OPT_Wdisabled_optimization,
2045 		   "%<l1-cache-size%> parameter is not a power of two %d",
2046 		   PREFETCH_BLOCK);
2047 	  warned = true;
2048 	}
2049       return 0;
2050     }
2051 
2052   return tree_ssa_prefetch_arrays ();
2053 }
2054 
2055 } // anon namespace
2056 
2057 gimple_opt_pass *
2058 make_pass_loop_prefetch (gcc::context *ctxt)
2059 {
2060   return new pass_loop_prefetch (ctxt);
2061 }
2062 
2063 
2064