xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-loop-prefetch.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /* Array prefetching.
2    Copyright (C) 2005-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "tree-pass.h"
30 #include "gimple-ssa.h"
31 #include "optabs-query.h"
32 #include "tree-pretty-print.h"
33 #include "fold-const.h"
34 #include "stor-layout.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "gimplify-me.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-manip.h"
40 #include "tree-ssa-loop-niter.h"
41 #include "tree-ssa-loop.h"
42 #include "tree-into-ssa.h"
43 #include "cfgloop.h"
44 #include "tree-scalar-evolution.h"
45 #include "params.h"
46 #include "langhooks.h"
47 #include "tree-inline.h"
48 #include "tree-data-ref.h"
49 #include "diagnostic-core.h"
50 
51 
52 /* FIXME: Needed for optabs, but this should all be moved to a TBD interface
53    between the GIMPLE and RTL worlds.  */
54 
55 /* This pass inserts prefetch instructions to optimize cache usage during
56    accesses to arrays in loops.  It processes loops sequentially and:
57 
58    1) Gathers all memory references in the single loop.
59    2) For each of the references it decides when it is profitable to prefetch
60       it.  To do it, we evaluate the reuse among the accesses, and determines
61       two values: PREFETCH_BEFORE (meaning that it only makes sense to do
62       prefetching in the first PREFETCH_BEFORE iterations of the loop) and
63       PREFETCH_MOD (meaning that it only makes sense to prefetch in the
64       iterations of the loop that are zero modulo PREFETCH_MOD).  For example
65       (assuming cache line size is 64 bytes, char has size 1 byte and there
66       is no hardware sequential prefetch):
67 
68       char *a;
69       for (i = 0; i < max; i++)
70 	{
71 	  a[255] = ...;		(0)
72 	  a[i] = ...;		(1)
73 	  a[i + 64] = ...;	(2)
74 	  a[16*i] = ...;	(3)
75 	  a[187*i] = ...;	(4)
76 	  a[187*i + 50] = ...;	(5)
77 	}
78 
79        (0) obviously has PREFETCH_BEFORE 1
80        (1) has PREFETCH_BEFORE 64, since (2) accesses the same memory
81            location 64 iterations before it, and PREFETCH_MOD 64 (since
82 	   it hits the same cache line otherwise).
83        (2) has PREFETCH_MOD 64
84        (3) has PREFETCH_MOD 4
85        (4) has PREFETCH_MOD 1.  We do not set PREFETCH_BEFORE here, since
86            the cache line accessed by (5) is the same with probability only
87 	   7/32.
88        (5) has PREFETCH_MOD 1 as well.
89 
90       Additionally, we use data dependence analysis to determine for each
91       reference the distance till the first reuse; this information is used
92       to determine the temporality of the issued prefetch instruction.
93 
94    3) We determine how much ahead we need to prefetch.  The number of
95       iterations needed is time to fetch / time spent in one iteration of
96       the loop.  The problem is that we do not know either of these values,
97       so we just make a heuristic guess based on a magic (possibly)
98       target-specific constant and size of the loop.
99 
100    4) Determine which of the references we prefetch.  We take into account
101       that there is a maximum number of simultaneous prefetches (provided
102       by machine description).  We prefetch as many prefetches as possible
103       while still within this bound (starting with those with lowest
104       prefetch_mod, since they are responsible for most of the cache
105       misses).
106 
107    5) We unroll and peel loops so that we are able to satisfy PREFETCH_MOD
108       and PREFETCH_BEFORE requirements (within some bounds), and to avoid
109       prefetching nonaccessed memory.
110       TODO -- actually implement peeling.
111 
112    6) We actually emit the prefetch instructions.  ??? Perhaps emit the
113       prefetch instructions with guards in cases where 5) was not sufficient
114       to satisfy the constraints?
115 
116    A cost model is implemented to determine whether or not prefetching is
117    profitable for a given loop.  The cost model has three heuristics:
118 
119    1. Function trip_count_to_ahead_ratio_too_small_p implements a
120       heuristic that determines whether or not the loop has too few
121       iterations (compared to ahead).  Prefetching is not likely to be
122       beneficial if the trip count to ahead ratio is below a certain
123       minimum.
124 
125    2. Function mem_ref_count_reasonable_p implements a heuristic that
126       determines whether the given loop has enough CPU ops that can be
127       overlapped with cache missing memory ops.  If not, the loop
128       won't benefit from prefetching.  In the implementation,
129       prefetching is not considered beneficial if the ratio between
130       the instruction count and the mem ref count is below a certain
131       minimum.
132 
133    3. Function insn_to_prefetch_ratio_too_small_p implements a
134       heuristic that disables prefetching in a loop if the prefetching
135       cost is above a certain limit.  The relative prefetching cost is
136       estimated by taking the ratio between the prefetch count and the
137       total intruction count (this models the I-cache cost).
138 
139    The limits used in these heuristics are defined as parameters with
140    reasonable default values. Machine-specific default values will be
141    added later.
142 
143    Some other TODO:
144       -- write and use more general reuse analysis (that could be also used
145 	 in other cache aimed loop optimizations)
146       -- make it behave sanely together with the prefetches given by user
147 	 (now we just ignore them; at the very least we should avoid
148 	 optimizing loops in that user put his own prefetches)
149       -- we assume cache line size alignment of arrays; this could be
150 	 improved.  */
151 
152 /* Magic constants follow.  These should be replaced by machine specific
153    numbers.  */
154 
155 /* True if write can be prefetched by a read prefetch.  */
156 
157 #ifndef WRITE_CAN_USE_READ_PREFETCH
158 #define WRITE_CAN_USE_READ_PREFETCH 1
159 #endif
160 
161 /* True if read can be prefetched by a write prefetch. */
162 
163 #ifndef READ_CAN_USE_WRITE_PREFETCH
164 #define READ_CAN_USE_WRITE_PREFETCH 0
165 #endif
166 
167 /* The size of the block loaded by a single prefetch.  Usually, this is
168    the same as cache line size (at the moment, we only consider one level
169    of cache hierarchy).  */
170 
171 #ifndef PREFETCH_BLOCK
172 #define PREFETCH_BLOCK L1_CACHE_LINE_SIZE
173 #endif
174 
175 /* Do we have a forward hardware sequential prefetching?  */
176 
177 #ifndef HAVE_FORWARD_PREFETCH
178 #define HAVE_FORWARD_PREFETCH 0
179 #endif
180 
181 /* Do we have a backward hardware sequential prefetching?  */
182 
183 #ifndef HAVE_BACKWARD_PREFETCH
184 #define HAVE_BACKWARD_PREFETCH 0
185 #endif
186 
187 /* In some cases we are only able to determine that there is a certain
188    probability that the two accesses hit the same cache line.  In this
189    case, we issue the prefetches for both of them if this probability
190    is less then (1000 - ACCEPTABLE_MISS_RATE) per thousand.  */
191 
192 #ifndef ACCEPTABLE_MISS_RATE
193 #define ACCEPTABLE_MISS_RATE 50
194 #endif
195 
196 #define L1_CACHE_SIZE_BYTES ((unsigned) (L1_CACHE_SIZE * 1024))
197 #define L2_CACHE_SIZE_BYTES ((unsigned) (L2_CACHE_SIZE * 1024))
198 
199 /* We consider a memory access nontemporal if it is not reused sooner than
200    after L2_CACHE_SIZE_BYTES of memory are accessed.  However, we ignore
201    accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
202    so that we use nontemporal prefetches e.g. if single memory location
203    is accessed several times in a single iteration of the loop.  */
204 #define NONTEMPORAL_FRACTION 16
205 
206 /* In case we have to emit a memory fence instruction after the loop that
207    uses nontemporal stores, this defines the builtin to use.  */
208 
209 #ifndef FENCE_FOLLOWING_MOVNT
210 #define FENCE_FOLLOWING_MOVNT NULL_TREE
211 #endif
212 
213 /* It is not profitable to prefetch when the trip count is not at
214    least TRIP_COUNT_TO_AHEAD_RATIO times the prefetch ahead distance.
215    For example, in a loop with a prefetch ahead distance of 10,
216    supposing that TRIP_COUNT_TO_AHEAD_RATIO is equal to 4, it is
217    profitable to prefetch when the trip count is greater or equal to
218    40.  In that case, 30 out of the 40 iterations will benefit from
219    prefetching.  */
220 
221 #ifndef TRIP_COUNT_TO_AHEAD_RATIO
222 #define TRIP_COUNT_TO_AHEAD_RATIO 4
223 #endif
224 
225 /* The group of references between that reuse may occur.  */
226 
227 struct mem_ref_group
228 {
229   tree base;			/* Base of the reference.  */
230   tree step;			/* Step of the reference.  */
231   struct mem_ref *refs;		/* References in the group.  */
232   struct mem_ref_group *next;	/* Next group of references.  */
233 };
234 
235 /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched.  */
236 
237 #define PREFETCH_ALL		(~(unsigned HOST_WIDE_INT) 0)
238 
239 /* Do not generate a prefetch if the unroll factor is significantly less
240    than what is required by the prefetch.  This is to avoid redundant
241    prefetches.  For example, when prefetch_mod is 16 and unroll_factor is
242    2, prefetching requires unrolling the loop 16 times, but
243    the loop is actually unrolled twice.  In this case (ratio = 8),
244    prefetching is not likely to be beneficial.  */
245 
246 #ifndef PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO
247 #define PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO 4
248 #endif
249 
250 /* Some of the prefetch computations have quadratic complexity.  We want to
251    avoid huge compile times and, therefore, want to limit the amount of
252    memory references per loop where we consider prefetching.  */
253 
254 #ifndef PREFETCH_MAX_MEM_REFS_PER_LOOP
255 #define PREFETCH_MAX_MEM_REFS_PER_LOOP 200
256 #endif
257 
258 /* The memory reference.  */
259 
260 struct mem_ref
261 {
262   gimple *stmt;			/* Statement in that the reference appears.  */
263   tree mem;			/* The reference.  */
264   HOST_WIDE_INT delta;		/* Constant offset of the reference.  */
265   struct mem_ref_group *group;	/* The group of references it belongs to.  */
266   unsigned HOST_WIDE_INT prefetch_mod;
267 				/* Prefetch only each PREFETCH_MOD-th
268 				   iteration.  */
269   unsigned HOST_WIDE_INT prefetch_before;
270 				/* Prefetch only first PREFETCH_BEFORE
271 				   iterations.  */
272   unsigned reuse_distance;	/* The amount of data accessed before the first
273 				   reuse of this value.  */
274   struct mem_ref *next;		/* The next reference in the group.  */
275   unsigned write_p : 1;		/* Is it a write?  */
276   unsigned independent_p : 1;	/* True if the reference is independent on
277 				   all other references inside the loop.  */
278   unsigned issue_prefetch_p : 1;	/* Should we really issue the prefetch?  */
279   unsigned storent_p : 1;	/* True if we changed the store to a
280 				   nontemporal one.  */
281 };
282 
283 /* Dumps information about memory reference */
284 static void
285 dump_mem_details (FILE *file, tree base, tree step,
286 	    HOST_WIDE_INT delta, bool write_p)
287 {
288   fprintf (file, "(base ");
289   print_generic_expr (file, base, TDF_SLIM);
290   fprintf (file, ", step ");
291   if (cst_and_fits_in_hwi (step))
292     fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step));
293   else
294     print_generic_expr (file, step, TDF_TREE);
295   fprintf (file, ")\n");
296   fprintf (file, "  delta ");
297   fprintf (file, HOST_WIDE_INT_PRINT_DEC, delta);
298   fprintf (file, "\n");
299   fprintf (file, "  %s\n", write_p ? "write" : "read");
300   fprintf (file, "\n");
301 }
302 
303 /* Dumps information about reference REF to FILE.  */
304 
305 static void
306 dump_mem_ref (FILE *file, struct mem_ref *ref)
307 {
308   fprintf (file, "Reference %p:\n", (void *) ref);
309 
310   fprintf (file, "  group %p ", (void *) ref->group);
311 
312   dump_mem_details (file, ref->group->base, ref->group->step, ref->delta,
313                    ref->write_p);
314 }
315 
316 /* Finds a group with BASE and STEP in GROUPS, or creates one if it does not
317    exist.  */
318 
319 static struct mem_ref_group *
320 find_or_create_group (struct mem_ref_group **groups, tree base, tree step)
321 {
322   struct mem_ref_group *group;
323 
324   for (; *groups; groups = &(*groups)->next)
325     {
326       if (operand_equal_p ((*groups)->step, step, 0)
327 	  && operand_equal_p ((*groups)->base, base, 0))
328 	return *groups;
329 
330       /* If step is an integer constant, keep the list of groups sorted
331          by decreasing step.  */
332       if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step)
333 	  && int_cst_value ((*groups)->step) < int_cst_value (step))
334 	break;
335     }
336 
337   group = XNEW (struct mem_ref_group);
338   group->base = base;
339   group->step = step;
340   group->refs = NULL;
341   group->next = *groups;
342   *groups = group;
343 
344   return group;
345 }
346 
347 /* Records a memory reference MEM in GROUP with offset DELTA and write status
348    WRITE_P.  The reference occurs in statement STMT.  */
349 
350 static void
351 record_ref (struct mem_ref_group *group, gimple *stmt, tree mem,
352 	    HOST_WIDE_INT delta, bool write_p)
353 {
354   struct mem_ref **aref;
355 
356   /* Do not record the same address twice.  */
357   for (aref = &group->refs; *aref; aref = &(*aref)->next)
358     {
359       /* It does not have to be possible for write reference to reuse the read
360 	 prefetch, or vice versa.  */
361       if (!WRITE_CAN_USE_READ_PREFETCH
362 	  && write_p
363 	  && !(*aref)->write_p)
364 	continue;
365       if (!READ_CAN_USE_WRITE_PREFETCH
366 	  && !write_p
367 	  && (*aref)->write_p)
368 	continue;
369 
370       if ((*aref)->delta == delta)
371 	return;
372     }
373 
374   (*aref) = XNEW (struct mem_ref);
375   (*aref)->stmt = stmt;
376   (*aref)->mem = mem;
377   (*aref)->delta = delta;
378   (*aref)->write_p = write_p;
379   (*aref)->prefetch_before = PREFETCH_ALL;
380   (*aref)->prefetch_mod = 1;
381   (*aref)->reuse_distance = 0;
382   (*aref)->issue_prefetch_p = false;
383   (*aref)->group = group;
384   (*aref)->next = NULL;
385   (*aref)->independent_p = false;
386   (*aref)->storent_p = false;
387 
388   if (dump_file && (dump_flags & TDF_DETAILS))
389     dump_mem_ref (dump_file, *aref);
390 }
391 
392 /* Release memory references in GROUPS.  */
393 
394 static void
395 release_mem_refs (struct mem_ref_group *groups)
396 {
397   struct mem_ref_group *next_g;
398   struct mem_ref *ref, *next_r;
399 
400   for (; groups; groups = next_g)
401     {
402       next_g = groups->next;
403       for (ref = groups->refs; ref; ref = next_r)
404 	{
405 	  next_r = ref->next;
406 	  free (ref);
407 	}
408       free (groups);
409     }
410 }
411 
412 /* A structure used to pass arguments to idx_analyze_ref.  */
413 
414 struct ar_data
415 {
416   struct loop *loop;			/* Loop of the reference.  */
417   gimple *stmt;				/* Statement of the reference.  */
418   tree *step;				/* Step of the memory reference.  */
419   HOST_WIDE_INT *delta;			/* Offset of the memory reference.  */
420 };
421 
422 /* Analyzes a single INDEX of a memory reference to obtain information
423    described at analyze_ref.  Callback for for_each_index.  */
424 
425 static bool
426 idx_analyze_ref (tree base, tree *index, void *data)
427 {
428   struct ar_data *ar_data = (struct ar_data *) data;
429   tree ibase, step, stepsize;
430   HOST_WIDE_INT idelta = 0, imult = 1;
431   affine_iv iv;
432 
433   if (!simple_iv (ar_data->loop, loop_containing_stmt (ar_data->stmt),
434 		  *index, &iv, true))
435     return false;
436   ibase = iv.base;
437   step = iv.step;
438 
439   if (TREE_CODE (ibase) == POINTER_PLUS_EXPR
440       && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1)))
441     {
442       idelta = int_cst_value (TREE_OPERAND (ibase, 1));
443       ibase = TREE_OPERAND (ibase, 0);
444     }
445   if (cst_and_fits_in_hwi (ibase))
446     {
447       idelta += int_cst_value (ibase);
448       ibase = build_int_cst (TREE_TYPE (ibase), 0);
449     }
450 
451   if (TREE_CODE (base) == ARRAY_REF)
452     {
453       stepsize = array_ref_element_size (base);
454       if (!cst_and_fits_in_hwi (stepsize))
455 	return false;
456       imult = int_cst_value (stepsize);
457       step = fold_build2 (MULT_EXPR, sizetype,
458 			  fold_convert (sizetype, step),
459 			  fold_convert (sizetype, stepsize));
460       idelta *= imult;
461     }
462 
463   if (*ar_data->step == NULL_TREE)
464     *ar_data->step = step;
465   else
466     *ar_data->step = fold_build2 (PLUS_EXPR, sizetype,
467 				  fold_convert (sizetype, *ar_data->step),
468 				  fold_convert (sizetype, step));
469   *ar_data->delta += idelta;
470   *index = ibase;
471 
472   return true;
473 }
474 
475 /* Tries to express REF_P in shape &BASE + STEP * iter + DELTA, where DELTA and
476    STEP are integer constants and iter is number of iterations of LOOP.  The
477    reference occurs in statement STMT.  Strips nonaddressable component
478    references from REF_P.  */
479 
480 static bool
481 analyze_ref (struct loop *loop, tree *ref_p, tree *base,
482 	     tree *step, HOST_WIDE_INT *delta,
483 	     gimple *stmt)
484 {
485   struct ar_data ar_data;
486   tree off;
487   HOST_WIDE_INT bit_offset;
488   tree ref = *ref_p;
489 
490   *step = NULL_TREE;
491   *delta = 0;
492 
493   /* First strip off the component references.  Ignore bitfields.
494      Also strip off the real and imagine parts of a complex, so that
495      they can have the same base.  */
496   if (TREE_CODE (ref) == REALPART_EXPR
497       || TREE_CODE (ref) == IMAGPART_EXPR
498       || (TREE_CODE (ref) == COMPONENT_REF
499           && DECL_NONADDRESSABLE_P (TREE_OPERAND (ref, 1))))
500     {
501       if (TREE_CODE (ref) == IMAGPART_EXPR)
502         *delta += int_size_in_bytes (TREE_TYPE (ref));
503       ref = TREE_OPERAND (ref, 0);
504     }
505 
506   *ref_p = ref;
507 
508   for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0))
509     {
510       off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
511       bit_offset = TREE_INT_CST_LOW (off);
512       gcc_assert (bit_offset % BITS_PER_UNIT == 0);
513 
514       *delta += bit_offset / BITS_PER_UNIT;
515     }
516 
517   *base = unshare_expr (ref);
518   ar_data.loop = loop;
519   ar_data.stmt = stmt;
520   ar_data.step = step;
521   ar_data.delta = delta;
522   return for_each_index (base, idx_analyze_ref, &ar_data);
523 }
524 
525 /* Record a memory reference REF to the list REFS.  The reference occurs in
526    LOOP in statement STMT and it is write if WRITE_P.  Returns true if the
527    reference was recorded, false otherwise.  */
528 
529 static bool
530 gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
531 			      tree ref, bool write_p, gimple *stmt)
532 {
533   tree base, step;
534   HOST_WIDE_INT delta;
535   struct mem_ref_group *agrp;
536 
537   if (get_base_address (ref) == NULL)
538     return false;
539 
540   if (!analyze_ref (loop, &ref, &base, &step, &delta, stmt))
541     return false;
542   /* If analyze_ref fails the default is a NULL_TREE.  We can stop here.  */
543   if (step == NULL_TREE)
544     return false;
545 
546   /* Stop if the address of BASE could not be taken.  */
547   if (may_be_nonaddressable_p (base))
548     return false;
549 
550   /* Limit non-constant step prefetching only to the innermost loops and
551      only when the step is loop invariant in the entire loop nest. */
552   if (!cst_and_fits_in_hwi (step))
553     {
554       if (loop->inner != NULL)
555         {
556           if (dump_file && (dump_flags & TDF_DETAILS))
557             {
558               fprintf (dump_file, "Memory expression %p\n",(void *) ref );
559               print_generic_expr (dump_file, ref, TDF_TREE);
560               fprintf (dump_file,":");
561               dump_mem_details (dump_file, base, step, delta, write_p);
562               fprintf (dump_file,
563                        "Ignoring %p, non-constant step prefetching is "
564                        "limited to inner most loops \n",
565                        (void *) ref);
566             }
567             return false;
568          }
569       else
570         {
571           if (!expr_invariant_in_loop_p (loop_outermost (loop), step))
572           {
573             if (dump_file && (dump_flags & TDF_DETAILS))
574               {
575                 fprintf (dump_file, "Memory expression %p\n",(void *) ref );
576                 print_generic_expr (dump_file, ref, TDF_TREE);
577                 fprintf (dump_file,":");
578                 dump_mem_details (dump_file, base, step, delta, write_p);
579                 fprintf (dump_file,
580                          "Not prefetching, ignoring %p due to "
581                          "loop variant step\n",
582                          (void *) ref);
583               }
584               return false;
585             }
586         }
587     }
588 
589   /* Now we know that REF = &BASE + STEP * iter + DELTA, where DELTA and STEP
590      are integer constants.  */
591   agrp = find_or_create_group (refs, base, step);
592   record_ref (agrp, stmt, ref, delta, write_p);
593 
594   return true;
595 }
596 
597 /* Record the suitable memory references in LOOP.  NO_OTHER_REFS is set to
598    true if there are no other memory references inside the loop.  */
599 
600 static struct mem_ref_group *
601 gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
602 {
603   basic_block *body = get_loop_body_in_dom_order (loop);
604   basic_block bb;
605   unsigned i;
606   gimple_stmt_iterator bsi;
607   gimple *stmt;
608   tree lhs, rhs;
609   struct mem_ref_group *refs = NULL;
610 
611   *no_other_refs = true;
612   *ref_count = 0;
613 
614   /* Scan the loop body in order, so that the former references precede the
615      later ones.  */
616   for (i = 0; i < loop->num_nodes; i++)
617     {
618       bb = body[i];
619       if (bb->loop_father != loop)
620 	continue;
621 
622       for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
623 	{
624 	  stmt = gsi_stmt (bsi);
625 
626 	  if (gimple_code (stmt) != GIMPLE_ASSIGN)
627 	    {
628 	      if (gimple_vuse (stmt)
629 		  || (is_gimple_call (stmt)
630 		      && !(gimple_call_flags (stmt) & ECF_CONST)))
631 		*no_other_refs = false;
632 	      continue;
633 	    }
634 
635 	  lhs = gimple_assign_lhs (stmt);
636 	  rhs = gimple_assign_rhs1 (stmt);
637 
638 	  if (REFERENCE_CLASS_P (rhs))
639 	    {
640 	    *no_other_refs &= gather_memory_references_ref (loop, &refs,
641 							    rhs, false, stmt);
642 	    *ref_count += 1;
643 	    }
644 	  if (REFERENCE_CLASS_P (lhs))
645 	    {
646 	    *no_other_refs &= gather_memory_references_ref (loop, &refs,
647 							    lhs, true, stmt);
648 	    *ref_count += 1;
649 	    }
650 	}
651     }
652   free (body);
653 
654   return refs;
655 }
656 
657 /* Prune the prefetch candidate REF using the self-reuse.  */
658 
659 static void
660 prune_ref_by_self_reuse (struct mem_ref *ref)
661 {
662   HOST_WIDE_INT step;
663   bool backward;
664 
665   /* If the step size is non constant, we cannot calculate prefetch_mod.  */
666   if (!cst_and_fits_in_hwi (ref->group->step))
667     return;
668 
669   step = int_cst_value (ref->group->step);
670 
671   backward = step < 0;
672 
673   if (step == 0)
674     {
675       /* Prefetch references to invariant address just once.  */
676       ref->prefetch_before = 1;
677       return;
678     }
679 
680   if (backward)
681     step = -step;
682 
683   if (step > PREFETCH_BLOCK)
684     return;
685 
686   if ((backward && HAVE_BACKWARD_PREFETCH)
687       || (!backward && HAVE_FORWARD_PREFETCH))
688     {
689       ref->prefetch_before = 1;
690       return;
691     }
692 
693   ref->prefetch_mod = PREFETCH_BLOCK / step;
694 }
695 
696 /* Divides X by BY, rounding down.  */
697 
698 static HOST_WIDE_INT
699 ddown (HOST_WIDE_INT x, unsigned HOST_WIDE_INT by)
700 {
701   gcc_assert (by > 0);
702 
703   if (x >= 0)
704     return x / by;
705   else
706     return (x + by - 1) / by;
707 }
708 
709 /* Given a CACHE_LINE_SIZE and two inductive memory references
710    with a common STEP greater than CACHE_LINE_SIZE and an address
711    difference DELTA, compute the probability that they will fall
712    in different cache lines.  Return true if the computed miss rate
713    is not greater than the ACCEPTABLE_MISS_RATE.  DISTINCT_ITERS is the
714    number of distinct iterations after which the pattern repeats itself.
715    ALIGN_UNIT is the unit of alignment in bytes.  */
716 
717 static bool
718 is_miss_rate_acceptable (unsigned HOST_WIDE_INT cache_line_size,
719 		   HOST_WIDE_INT step, HOST_WIDE_INT delta,
720 		   unsigned HOST_WIDE_INT distinct_iters,
721 		   int align_unit)
722 {
723   unsigned align, iter;
724   int total_positions, miss_positions, max_allowed_miss_positions;
725   int address1, address2, cache_line1, cache_line2;
726 
727   /* It always misses if delta is greater than or equal to the cache
728      line size.  */
729   if (delta >= (HOST_WIDE_INT) cache_line_size)
730     return false;
731 
732   miss_positions = 0;
733   total_positions = (cache_line_size / align_unit) * distinct_iters;
734   max_allowed_miss_positions = (ACCEPTABLE_MISS_RATE * total_positions) / 1000;
735 
736   /* Iterate through all possible alignments of the first
737      memory reference within its cache line.  */
738   for (align = 0; align < cache_line_size; align += align_unit)
739 
740     /* Iterate through all distinct iterations.  */
741     for (iter = 0; iter < distinct_iters; iter++)
742       {
743 	address1 = align + step * iter;
744 	address2 = address1 + delta;
745 	cache_line1 = address1 / cache_line_size;
746 	cache_line2 = address2 / cache_line_size;
747 	if (cache_line1 != cache_line2)
748 	  {
749 	    miss_positions += 1;
750             if (miss_positions > max_allowed_miss_positions)
751 	      return false;
752           }
753       }
754   return true;
755 }
756 
757 /* Prune the prefetch candidate REF using the reuse with BY.
758    If BY_IS_BEFORE is true, BY is before REF in the loop.  */
759 
760 static void
761 prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by,
762 			  bool by_is_before)
763 {
764   HOST_WIDE_INT step;
765   bool backward;
766   HOST_WIDE_INT delta_r = ref->delta, delta_b = by->delta;
767   HOST_WIDE_INT delta = delta_b - delta_r;
768   HOST_WIDE_INT hit_from;
769   unsigned HOST_WIDE_INT prefetch_before, prefetch_block;
770   HOST_WIDE_INT reduced_step;
771   unsigned HOST_WIDE_INT reduced_prefetch_block;
772   tree ref_type;
773   int align_unit;
774 
775   /* If the step is non constant we cannot calculate prefetch_before.  */
776   if (!cst_and_fits_in_hwi (ref->group->step)) {
777     return;
778   }
779 
780   step = int_cst_value (ref->group->step);
781 
782   backward = step < 0;
783 
784 
785   if (delta == 0)
786     {
787       /* If the references has the same address, only prefetch the
788 	 former.  */
789       if (by_is_before)
790 	ref->prefetch_before = 0;
791 
792       return;
793     }
794 
795   if (!step)
796     {
797       /* If the reference addresses are invariant and fall into the
798 	 same cache line, prefetch just the first one.  */
799       if (!by_is_before)
800 	return;
801 
802       if (ddown (ref->delta, PREFETCH_BLOCK)
803 	  != ddown (by->delta, PREFETCH_BLOCK))
804 	return;
805 
806       ref->prefetch_before = 0;
807       return;
808     }
809 
810   /* Only prune the reference that is behind in the array.  */
811   if (backward)
812     {
813       if (delta > 0)
814 	return;
815 
816       /* Transform the data so that we may assume that the accesses
817 	 are forward.  */
818       delta = - delta;
819       step = -step;
820       delta_r = PREFETCH_BLOCK - 1 - delta_r;
821       delta_b = PREFETCH_BLOCK - 1 - delta_b;
822     }
823   else
824     {
825       if (delta < 0)
826 	return;
827     }
828 
829   /* Check whether the two references are likely to hit the same cache
830      line, and how distant the iterations in that it occurs are from
831      each other.  */
832 
833   if (step <= PREFETCH_BLOCK)
834     {
835       /* The accesses are sure to meet.  Let us check when.  */
836       hit_from = ddown (delta_b, PREFETCH_BLOCK) * PREFETCH_BLOCK;
837       prefetch_before = (hit_from - delta_r + step - 1) / step;
838 
839       /* Do not reduce prefetch_before if we meet beyond cache size.  */
840       if (prefetch_before > absu_hwi (L2_CACHE_SIZE_BYTES / step))
841         prefetch_before = PREFETCH_ALL;
842       if (prefetch_before < ref->prefetch_before)
843 	ref->prefetch_before = prefetch_before;
844 
845       return;
846     }
847 
848   /* A more complicated case with step > prefetch_block.  First reduce
849      the ratio between the step and the cache line size to its simplest
850      terms.  The resulting denominator will then represent the number of
851      distinct iterations after which each address will go back to its
852      initial location within the cache line.  This computation assumes
853      that PREFETCH_BLOCK is a power of two.  */
854   prefetch_block = PREFETCH_BLOCK;
855   reduced_prefetch_block = prefetch_block;
856   reduced_step = step;
857   while ((reduced_step & 1) == 0
858 	 && reduced_prefetch_block > 1)
859     {
860       reduced_step >>= 1;
861       reduced_prefetch_block >>= 1;
862     }
863 
864   prefetch_before = delta / step;
865   delta %= step;
866   ref_type = TREE_TYPE (ref->mem);
867   align_unit = TYPE_ALIGN (ref_type) / 8;
868   if (is_miss_rate_acceptable (prefetch_block, step, delta,
869 			       reduced_prefetch_block, align_unit))
870     {
871       /* Do not reduce prefetch_before if we meet beyond cache size.  */
872       if (prefetch_before > L2_CACHE_SIZE_BYTES / PREFETCH_BLOCK)
873         prefetch_before = PREFETCH_ALL;
874       if (prefetch_before < ref->prefetch_before)
875 	ref->prefetch_before = prefetch_before;
876 
877       return;
878     }
879 
880   /* Try also the following iteration.  */
881   prefetch_before++;
882   delta = step - delta;
883   if (is_miss_rate_acceptable (prefetch_block, step, delta,
884 			       reduced_prefetch_block, align_unit))
885     {
886       if (prefetch_before < ref->prefetch_before)
887 	ref->prefetch_before = prefetch_before;
888 
889       return;
890     }
891 
892   /* The ref probably does not reuse by.  */
893   return;
894 }
895 
896 /* Prune the prefetch candidate REF using the reuses with other references
897    in REFS.  */
898 
899 static void
900 prune_ref_by_reuse (struct mem_ref *ref, struct mem_ref *refs)
901 {
902   struct mem_ref *prune_by;
903   bool before = true;
904 
905   prune_ref_by_self_reuse (ref);
906 
907   for (prune_by = refs; prune_by; prune_by = prune_by->next)
908     {
909       if (prune_by == ref)
910 	{
911 	  before = false;
912 	  continue;
913 	}
914 
915       if (!WRITE_CAN_USE_READ_PREFETCH
916 	  && ref->write_p
917 	  && !prune_by->write_p)
918 	continue;
919       if (!READ_CAN_USE_WRITE_PREFETCH
920 	  && !ref->write_p
921 	  && prune_by->write_p)
922 	continue;
923 
924       prune_ref_by_group_reuse (ref, prune_by, before);
925     }
926 }
927 
928 /* Prune the prefetch candidates in GROUP using the reuse analysis.  */
929 
930 static void
931 prune_group_by_reuse (struct mem_ref_group *group)
932 {
933   struct mem_ref *ref_pruned;
934 
935   for (ref_pruned = group->refs; ref_pruned; ref_pruned = ref_pruned->next)
936     {
937       prune_ref_by_reuse (ref_pruned, group->refs);
938 
939       if (dump_file && (dump_flags & TDF_DETAILS))
940 	{
941 	  fprintf (dump_file, "Reference %p:", (void *) ref_pruned);
942 
943 	  if (ref_pruned->prefetch_before == PREFETCH_ALL
944 	      && ref_pruned->prefetch_mod == 1)
945 	    fprintf (dump_file, " no restrictions");
946 	  else if (ref_pruned->prefetch_before == 0)
947 	    fprintf (dump_file, " do not prefetch");
948 	  else if (ref_pruned->prefetch_before <= ref_pruned->prefetch_mod)
949 	    fprintf (dump_file, " prefetch once");
950 	  else
951 	    {
952 	      if (ref_pruned->prefetch_before != PREFETCH_ALL)
953 		{
954 		  fprintf (dump_file, " prefetch before ");
955 		  fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
956 			   ref_pruned->prefetch_before);
957 		}
958 	      if (ref_pruned->prefetch_mod != 1)
959 		{
960 		  fprintf (dump_file, " prefetch mod ");
961 		  fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
962 			   ref_pruned->prefetch_mod);
963 		}
964 	    }
965 	  fprintf (dump_file, "\n");
966 	}
967     }
968 }
969 
970 /* Prune the list of prefetch candidates GROUPS using the reuse analysis.  */
971 
972 static void
973 prune_by_reuse (struct mem_ref_group *groups)
974 {
975   for (; groups; groups = groups->next)
976     prune_group_by_reuse (groups);
977 }
978 
979 /* Returns true if we should issue prefetch for REF.  */
980 
981 static bool
982 should_issue_prefetch_p (struct mem_ref *ref)
983 {
984   /* For now do not issue prefetches for only first few of the
985      iterations.  */
986   if (ref->prefetch_before != PREFETCH_ALL)
987     {
988       if (dump_file && (dump_flags & TDF_DETAILS))
989         fprintf (dump_file, "Ignoring %p due to prefetch_before\n",
990 		 (void *) ref);
991       return false;
992     }
993 
994   /* Do not prefetch nontemporal stores.  */
995   if (ref->storent_p)
996     {
997       if (dump_file && (dump_flags & TDF_DETAILS))
998         fprintf (dump_file, "Ignoring nontemporal store %p\n", (void *) ref);
999       return false;
1000     }
1001 
1002   return true;
1003 }
1004 
1005 /* Decide which of the prefetch candidates in GROUPS to prefetch.
1006    AHEAD is the number of iterations to prefetch ahead (which corresponds
1007    to the number of simultaneous instances of one prefetch running at a
1008    time).  UNROLL_FACTOR is the factor by that the loop is going to be
1009    unrolled.  Returns true if there is anything to prefetch.  */
1010 
1011 static bool
1012 schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor,
1013 		     unsigned ahead)
1014 {
1015   unsigned remaining_prefetch_slots, n_prefetches, prefetch_slots;
1016   unsigned slots_per_prefetch;
1017   struct mem_ref *ref;
1018   bool any = false;
1019 
1020   /* At most SIMULTANEOUS_PREFETCHES should be running at the same time.  */
1021   remaining_prefetch_slots = SIMULTANEOUS_PREFETCHES;
1022 
1023   /* The prefetch will run for AHEAD iterations of the original loop, i.e.,
1024      AHEAD / UNROLL_FACTOR iterations of the unrolled loop.  In each iteration,
1025      it will need a prefetch slot.  */
1026   slots_per_prefetch = (ahead + unroll_factor / 2) / unroll_factor;
1027   if (dump_file && (dump_flags & TDF_DETAILS))
1028     fprintf (dump_file, "Each prefetch instruction takes %u prefetch slots.\n",
1029 	     slots_per_prefetch);
1030 
1031   /* For now we just take memory references one by one and issue
1032      prefetches for as many as possible.  The groups are sorted
1033      starting with the largest step, since the references with
1034      large step are more likely to cause many cache misses.  */
1035 
1036   for (; groups; groups = groups->next)
1037     for (ref = groups->refs; ref; ref = ref->next)
1038       {
1039 	if (!should_issue_prefetch_p (ref))
1040 	  continue;
1041 
1042         /* The loop is far from being sufficiently unrolled for this
1043            prefetch.  Do not generate prefetch to avoid many redudant
1044            prefetches.  */
1045         if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO)
1046           continue;
1047 
1048 	/* If we need to prefetch the reference each PREFETCH_MOD iterations,
1049 	   and we unroll the loop UNROLL_FACTOR times, we need to insert
1050 	   ceil (UNROLL_FACTOR / PREFETCH_MOD) instructions in each
1051 	   iteration.  */
1052 	n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1053 			/ ref->prefetch_mod);
1054 	prefetch_slots = n_prefetches * slots_per_prefetch;
1055 
1056 	/* If more than half of the prefetches would be lost anyway, do not
1057 	   issue the prefetch.  */
1058 	if (2 * remaining_prefetch_slots < prefetch_slots)
1059 	  continue;
1060 
1061 	ref->issue_prefetch_p = true;
1062 
1063 	if (remaining_prefetch_slots <= prefetch_slots)
1064 	  return true;
1065 	remaining_prefetch_slots -= prefetch_slots;
1066 	any = true;
1067       }
1068 
1069   return any;
1070 }
1071 
1072 /* Return TRUE if no prefetch is going to be generated in the given
1073    GROUPS.  */
1074 
1075 static bool
1076 nothing_to_prefetch_p (struct mem_ref_group *groups)
1077 {
1078   struct mem_ref *ref;
1079 
1080   for (; groups; groups = groups->next)
1081     for (ref = groups->refs; ref; ref = ref->next)
1082       if (should_issue_prefetch_p (ref))
1083 	return false;
1084 
1085   return true;
1086 }
1087 
1088 /* Estimate the number of prefetches in the given GROUPS.
1089    UNROLL_FACTOR is the factor by which LOOP was unrolled.  */
1090 
1091 static int
1092 estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor)
1093 {
1094   struct mem_ref *ref;
1095   unsigned n_prefetches;
1096   int prefetch_count = 0;
1097 
1098   for (; groups; groups = groups->next)
1099     for (ref = groups->refs; ref; ref = ref->next)
1100       if (should_issue_prefetch_p (ref))
1101 	{
1102 	  n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1103 			  / ref->prefetch_mod);
1104 	  prefetch_count += n_prefetches;
1105 	}
1106 
1107   return prefetch_count;
1108 }
1109 
1110 /* Issue prefetches for the reference REF into loop as decided before.
1111    HEAD is the number of iterations to prefetch ahead.  UNROLL_FACTOR
1112    is the factor by which LOOP was unrolled.  */
1113 
1114 static void
1115 issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead)
1116 {
1117   HOST_WIDE_INT delta;
1118   tree addr, addr_base, write_p, local, forward;
1119   gcall *prefetch;
1120   gimple_stmt_iterator bsi;
1121   unsigned n_prefetches, ap;
1122   bool nontemporal = ref->reuse_distance >= L2_CACHE_SIZE_BYTES;
1123 
1124   if (dump_file && (dump_flags & TDF_DETAILS))
1125     fprintf (dump_file, "Issued%s prefetch for %p.\n",
1126 	     nontemporal ? " nontemporal" : "",
1127 	     (void *) ref);
1128 
1129   bsi = gsi_for_stmt (ref->stmt);
1130 
1131   n_prefetches = ((unroll_factor + ref->prefetch_mod - 1)
1132 		  / ref->prefetch_mod);
1133   addr_base = build_fold_addr_expr_with_type (ref->mem, ptr_type_node);
1134   addr_base = force_gimple_operand_gsi (&bsi, unshare_expr (addr_base),
1135 					true, NULL, true, GSI_SAME_STMT);
1136   write_p = ref->write_p ? integer_one_node : integer_zero_node;
1137   local = nontemporal ? integer_zero_node : integer_three_node;
1138 
1139   for (ap = 0; ap < n_prefetches; ap++)
1140     {
1141       if (cst_and_fits_in_hwi (ref->group->step))
1142         {
1143           /* Determine the address to prefetch.  */
1144           delta = (ahead + ap * ref->prefetch_mod) *
1145 		   int_cst_value (ref->group->step);
1146           addr = fold_build_pointer_plus_hwi (addr_base, delta);
1147           addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, NULL,
1148                                            true, GSI_SAME_STMT);
1149         }
1150       else
1151         {
1152           /* The step size is non-constant but loop-invariant.  We use the
1153              heuristic to simply prefetch ahead iterations ahead.  */
1154           forward = fold_build2 (MULT_EXPR, sizetype,
1155                                  fold_convert (sizetype, ref->group->step),
1156                                  fold_convert (sizetype, size_int (ahead)));
1157           addr = fold_build_pointer_plus (addr_base, forward);
1158           addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true,
1159 					   NULL, true, GSI_SAME_STMT);
1160       }
1161       /* Create the prefetch instruction.  */
1162       prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
1163 				    3, addr, write_p, local);
1164       gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT);
1165     }
1166 }
1167 
1168 /* Issue prefetches for the references in GROUPS into loop as decided before.
1169    HEAD is the number of iterations to prefetch ahead.  UNROLL_FACTOR is the
1170    factor by that LOOP was unrolled.  */
1171 
1172 static void
1173 issue_prefetches (struct mem_ref_group *groups,
1174 		  unsigned unroll_factor, unsigned ahead)
1175 {
1176   struct mem_ref *ref;
1177 
1178   for (; groups; groups = groups->next)
1179     for (ref = groups->refs; ref; ref = ref->next)
1180       if (ref->issue_prefetch_p)
1181 	issue_prefetch_ref (ref, unroll_factor, ahead);
1182 }
1183 
1184 /* Returns true if REF is a memory write for that a nontemporal store insn
1185    can be used.  */
1186 
1187 static bool
1188 nontemporal_store_p (struct mem_ref *ref)
1189 {
1190   machine_mode mode;
1191   enum insn_code code;
1192 
1193   /* REF must be a write that is not reused.  We require it to be independent
1194      on all other memory references in the loop, as the nontemporal stores may
1195      be reordered with respect to other memory references.  */
1196   if (!ref->write_p
1197       || !ref->independent_p
1198       || ref->reuse_distance < L2_CACHE_SIZE_BYTES)
1199     return false;
1200 
1201   /* Check that we have the storent instruction for the mode.  */
1202   mode = TYPE_MODE (TREE_TYPE (ref->mem));
1203   if (mode == BLKmode)
1204     return false;
1205 
1206   code = optab_handler (storent_optab, mode);
1207   return code != CODE_FOR_nothing;
1208 }
1209 
1210 /* If REF is a nontemporal store, we mark the corresponding modify statement
1211    and return true.  Otherwise, we return false.  */
1212 
1213 static bool
1214 mark_nontemporal_store (struct mem_ref *ref)
1215 {
1216   if (!nontemporal_store_p (ref))
1217     return false;
1218 
1219   if (dump_file && (dump_flags & TDF_DETAILS))
1220     fprintf (dump_file, "Marked reference %p as a nontemporal store.\n",
1221 	     (void *) ref);
1222 
1223   gimple_assign_set_nontemporal_move (ref->stmt, true);
1224   ref->storent_p = true;
1225 
1226   return true;
1227 }
1228 
1229 /* Issue a memory fence instruction after LOOP.  */
1230 
1231 static void
1232 emit_mfence_after_loop (struct loop *loop)
1233 {
1234   vec<edge> exits = get_loop_exit_edges (loop);
1235   edge exit;
1236   gcall *call;
1237   gimple_stmt_iterator bsi;
1238   unsigned i;
1239 
1240   FOR_EACH_VEC_ELT (exits, i, exit)
1241     {
1242       call = gimple_build_call (FENCE_FOLLOWING_MOVNT, 0);
1243 
1244       if (!single_pred_p (exit->dest)
1245 	  /* If possible, we prefer not to insert the fence on other paths
1246 	     in cfg.  */
1247 	  && !(exit->flags & EDGE_ABNORMAL))
1248 	split_loop_exit_edge (exit);
1249       bsi = gsi_after_labels (exit->dest);
1250 
1251       gsi_insert_before (&bsi, call, GSI_NEW_STMT);
1252     }
1253 
1254   exits.release ();
1255   update_ssa (TODO_update_ssa_only_virtuals);
1256 }
1257 
1258 /* Returns true if we can use storent in loop, false otherwise.  */
1259 
1260 static bool
1261 may_use_storent_in_loop_p (struct loop *loop)
1262 {
1263   bool ret = true;
1264 
1265   if (loop->inner != NULL)
1266     return false;
1267 
1268   /* If we must issue a mfence insn after using storent, check that there
1269      is a suitable place for it at each of the loop exits.  */
1270   if (FENCE_FOLLOWING_MOVNT != NULL_TREE)
1271     {
1272       vec<edge> exits = get_loop_exit_edges (loop);
1273       unsigned i;
1274       edge exit;
1275 
1276       FOR_EACH_VEC_ELT (exits, i, exit)
1277 	if ((exit->flags & EDGE_ABNORMAL)
1278 	    && exit->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1279 	  ret = false;
1280 
1281       exits.release ();
1282     }
1283 
1284   return ret;
1285 }
1286 
1287 /* Marks nontemporal stores in LOOP.  GROUPS contains the description of memory
1288    references in the loop.  */
1289 
1290 static void
1291 mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
1292 {
1293   struct mem_ref *ref;
1294   bool any = false;
1295 
1296   if (!may_use_storent_in_loop_p (loop))
1297     return;
1298 
1299   for (; groups; groups = groups->next)
1300     for (ref = groups->refs; ref; ref = ref->next)
1301       any |= mark_nontemporal_store (ref);
1302 
1303   if (any && FENCE_FOLLOWING_MOVNT != NULL_TREE)
1304     emit_mfence_after_loop (loop);
1305 }
1306 
1307 /* Determines whether we can profitably unroll LOOP FACTOR times, and if
1308    this is the case, fill in DESC by the description of number of
1309    iterations.  */
1310 
1311 static bool
1312 should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
1313 		      unsigned factor)
1314 {
1315   if (!can_unroll_loop_p (loop, factor, desc))
1316     return false;
1317 
1318   /* We only consider loops without control flow for unrolling.  This is not
1319      a hard restriction -- tree_unroll_loop works with arbitrary loops
1320      as well; but the unrolling/prefetching is usually more profitable for
1321      loops consisting of a single basic block, and we want to limit the
1322      code growth.  */
1323   if (loop->num_nodes > 2)
1324     return false;
1325 
1326   return true;
1327 }
1328 
1329 /* Determine the coefficient by that unroll LOOP, from the information
1330    contained in the list of memory references REFS.  Description of
1331    umber of iterations of LOOP is stored to DESC.  NINSNS is the number of
1332    insns of the LOOP.  EST_NITER is the estimated number of iterations of
1333    the loop, or -1 if no estimate is available.  */
1334 
1335 static unsigned
1336 determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
1337 			 unsigned ninsns, struct tree_niter_desc *desc,
1338 			 HOST_WIDE_INT est_niter)
1339 {
1340   unsigned upper_bound;
1341   unsigned nfactor, factor, mod_constraint;
1342   struct mem_ref_group *agp;
1343   struct mem_ref *ref;
1344 
1345   /* First check whether the loop is not too large to unroll.  We ignore
1346      PARAM_MAX_UNROLL_TIMES, because for small loops, it prevented us
1347      from unrolling them enough to make exactly one cache line covered by each
1348      iteration.  Also, the goal of PARAM_MAX_UNROLL_TIMES is to prevent
1349      us from unrolling the loops too many times in cases where we only expect
1350      gains from better scheduling and decreasing loop overhead, which is not
1351      the case here.  */
1352   upper_bound = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / ninsns;
1353 
1354   /* If we unrolled the loop more times than it iterates, the unrolled version
1355      of the loop would be never entered.  */
1356   if (est_niter >= 0 && est_niter < (HOST_WIDE_INT) upper_bound)
1357     upper_bound = est_niter;
1358 
1359   if (upper_bound <= 1)
1360     return 1;
1361 
1362   /* Choose the factor so that we may prefetch each cache just once,
1363      but bound the unrolling by UPPER_BOUND.  */
1364   factor = 1;
1365   for (agp = refs; agp; agp = agp->next)
1366     for (ref = agp->refs; ref; ref = ref->next)
1367       if (should_issue_prefetch_p (ref))
1368 	{
1369 	  mod_constraint = ref->prefetch_mod;
1370 	  nfactor = least_common_multiple (mod_constraint, factor);
1371 	  if (nfactor <= upper_bound)
1372 	    factor = nfactor;
1373 	}
1374 
1375   if (!should_unroll_loop_p (loop, desc, factor))
1376     return 1;
1377 
1378   return factor;
1379 }
1380 
1381 /* Returns the total volume of the memory references REFS, taking into account
1382    reuses in the innermost loop and cache line size.  TODO -- we should also
1383    take into account reuses across the iterations of the loops in the loop
1384    nest.  */
1385 
1386 static unsigned
1387 volume_of_references (struct mem_ref_group *refs)
1388 {
1389   unsigned volume = 0;
1390   struct mem_ref_group *gr;
1391   struct mem_ref *ref;
1392 
1393   for (gr = refs; gr; gr = gr->next)
1394     for (ref = gr->refs; ref; ref = ref->next)
1395       {
1396 	/* Almost always reuses another value?  */
1397 	if (ref->prefetch_before != PREFETCH_ALL)
1398 	  continue;
1399 
1400 	/* If several iterations access the same cache line, use the size of
1401 	   the line divided by this number.  Otherwise, a cache line is
1402 	   accessed in each iteration.  TODO -- in the latter case, we should
1403 	   take the size of the reference into account, rounding it up on cache
1404 	   line size multiple.  */
1405 	volume += L1_CACHE_LINE_SIZE / ref->prefetch_mod;
1406       }
1407   return volume;
1408 }
1409 
1410 /* Returns the volume of memory references accessed across VEC iterations of
1411    loops, whose sizes are described in the LOOP_SIZES array.  N is the number
1412    of the loops in the nest (length of VEC and LOOP_SIZES vectors).  */
1413 
1414 static unsigned
1415 volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
1416 {
1417   unsigned i;
1418 
1419   for (i = 0; i < n; i++)
1420     if (vec[i] != 0)
1421       break;
1422 
1423   if (i == n)
1424     return 0;
1425 
1426   gcc_assert (vec[i] > 0);
1427 
1428   /* We ignore the parts of the distance vector in subloops, since usually
1429      the numbers of iterations are much smaller.  */
1430   return loop_sizes[i] * vec[i];
1431 }
1432 
1433 /* Add the steps of ACCESS_FN multiplied by STRIDE to the array STRIDE
1434    at the position corresponding to the loop of the step.  N is the depth
1435    of the considered loop nest, and, LOOP is its innermost loop.  */
1436 
1437 static void
1438 add_subscript_strides (tree access_fn, unsigned stride,
1439 		       HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
1440 {
1441   struct loop *aloop;
1442   tree step;
1443   HOST_WIDE_INT astep;
1444   unsigned min_depth = loop_depth (loop) - n;
1445 
1446   while (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1447     {
1448       aloop = get_chrec_loop (access_fn);
1449       step = CHREC_RIGHT (access_fn);
1450       access_fn = CHREC_LEFT (access_fn);
1451 
1452       if ((unsigned) loop_depth (aloop) <= min_depth)
1453 	continue;
1454 
1455       if (tree_fits_shwi_p (step))
1456 	astep = tree_to_shwi (step);
1457       else
1458 	astep = L1_CACHE_LINE_SIZE;
1459 
1460       strides[n - 1 - loop_depth (loop) + loop_depth (aloop)] += astep * stride;
1461 
1462     }
1463 }
1464 
1465 /* Returns the volume of memory references accessed between two consecutive
1466    self-reuses of the reference DR.  We consider the subscripts of DR in N
1467    loops, and LOOP_SIZES contains the volumes of accesses in each of the
1468    loops.  LOOP is the innermost loop of the current loop nest.  */
1469 
1470 static unsigned
1471 self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
1472 		     struct loop *loop)
1473 {
1474   tree stride, access_fn;
1475   HOST_WIDE_INT *strides, astride;
1476   vec<tree> access_fns;
1477   tree ref = DR_REF (dr);
1478   unsigned i, ret = ~0u;
1479 
1480   /* In the following example:
1481 
1482      for (i = 0; i < N; i++)
1483        for (j = 0; j < N; j++)
1484          use (a[j][i]);
1485      the same cache line is accessed each N steps (except if the change from
1486      i to i + 1 crosses the boundary of the cache line).  Thus, for self-reuse,
1487      we cannot rely purely on the results of the data dependence analysis.
1488 
1489      Instead, we compute the stride of the reference in each loop, and consider
1490      the innermost loop in that the stride is less than cache size.  */
1491 
1492   strides = XCNEWVEC (HOST_WIDE_INT, n);
1493   access_fns = DR_ACCESS_FNS (dr);
1494 
1495   FOR_EACH_VEC_ELT (access_fns, i, access_fn)
1496     {
1497       /* Keep track of the reference corresponding to the subscript, so that we
1498 	 know its stride.  */
1499       while (handled_component_p (ref) && TREE_CODE (ref) != ARRAY_REF)
1500 	ref = TREE_OPERAND (ref, 0);
1501 
1502       if (TREE_CODE (ref) == ARRAY_REF)
1503 	{
1504 	  stride = TYPE_SIZE_UNIT (TREE_TYPE (ref));
1505 	  if (tree_fits_uhwi_p (stride))
1506 	    astride = tree_to_uhwi (stride);
1507 	  else
1508 	    astride = L1_CACHE_LINE_SIZE;
1509 
1510 	  ref = TREE_OPERAND (ref, 0);
1511 	}
1512       else
1513 	astride = 1;
1514 
1515       add_subscript_strides (access_fn, astride, strides, n, loop);
1516     }
1517 
1518   for (i = n; i-- > 0; )
1519     {
1520       unsigned HOST_WIDE_INT s;
1521 
1522       s = strides[i] < 0 ?  -strides[i] : strides[i];
1523 
1524       if (s < (unsigned) L1_CACHE_LINE_SIZE
1525 	  && (loop_sizes[i]
1526 	      > (unsigned) (L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)))
1527 	{
1528 	  ret = loop_sizes[i];
1529 	  break;
1530 	}
1531     }
1532 
1533   free (strides);
1534   return ret;
1535 }
1536 
1537 /* Determines the distance till the first reuse of each reference in REFS
1538    in the loop nest of LOOP.  NO_OTHER_REFS is true if there are no other
1539    memory references in the loop.  Return false if the analysis fails.  */
1540 
1541 static bool
1542 determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
1543 			   bool no_other_refs)
1544 {
1545   struct loop *nest, *aloop;
1546   vec<data_reference_p> datarefs = vNULL;
1547   vec<ddr_p> dependences = vNULL;
1548   struct mem_ref_group *gr;
1549   struct mem_ref *ref, *refb;
1550   vec<loop_p> vloops = vNULL;
1551   unsigned *loop_data_size;
1552   unsigned i, j, n;
1553   unsigned volume, dist, adist;
1554   HOST_WIDE_INT vol;
1555   data_reference_p dr;
1556   ddr_p dep;
1557 
1558   if (loop->inner)
1559     return true;
1560 
1561   /* Find the outermost loop of the loop nest of loop (we require that
1562      there are no sibling loops inside the nest).  */
1563   nest = loop;
1564   while (1)
1565     {
1566       aloop = loop_outer (nest);
1567 
1568       if (aloop == current_loops->tree_root
1569 	  || aloop->inner->next)
1570 	break;
1571 
1572       nest = aloop;
1573     }
1574 
1575   /* For each loop, determine the amount of data accessed in each iteration.
1576      We use this to estimate whether the reference is evicted from the
1577      cache before its reuse.  */
1578   find_loop_nest (nest, &vloops);
1579   n = vloops.length ();
1580   loop_data_size = XNEWVEC (unsigned, n);
1581   volume = volume_of_references (refs);
1582   i = n;
1583   while (i-- != 0)
1584     {
1585       loop_data_size[i] = volume;
1586       /* Bound the volume by the L2 cache size, since above this bound,
1587 	 all dependence distances are equivalent.  */
1588       if (volume > L2_CACHE_SIZE_BYTES)
1589 	continue;
1590 
1591       aloop = vloops[i];
1592       vol = estimated_stmt_executions_int (aloop);
1593       if (vol == -1)
1594 	vol = expected_loop_iterations (aloop);
1595       volume *= vol;
1596     }
1597 
1598   /* Prepare the references in the form suitable for data dependence
1599      analysis.  We ignore unanalyzable data references (the results
1600      are used just as a heuristics to estimate temporality of the
1601      references, hence we do not need to worry about correctness).  */
1602   for (gr = refs; gr; gr = gr->next)
1603     for (ref = gr->refs; ref; ref = ref->next)
1604       {
1605 	dr = create_data_ref (nest, loop_containing_stmt (ref->stmt),
1606 			      ref->mem, ref->stmt, !ref->write_p);
1607 
1608 	if (dr)
1609 	  {
1610 	    ref->reuse_distance = volume;
1611 	    dr->aux = ref;
1612 	    datarefs.safe_push (dr);
1613 	  }
1614 	else
1615 	  no_other_refs = false;
1616       }
1617 
1618   FOR_EACH_VEC_ELT (datarefs, i, dr)
1619     {
1620       dist = self_reuse_distance (dr, loop_data_size, n, loop);
1621       ref = (struct mem_ref *) dr->aux;
1622       if (ref->reuse_distance > dist)
1623 	ref->reuse_distance = dist;
1624 
1625       if (no_other_refs)
1626 	ref->independent_p = true;
1627     }
1628 
1629   if (!compute_all_dependences (datarefs, &dependences, vloops, true))
1630     return false;
1631 
1632   FOR_EACH_VEC_ELT (dependences, i, dep)
1633     {
1634       if (DDR_ARE_DEPENDENT (dep) == chrec_known)
1635 	continue;
1636 
1637       ref = (struct mem_ref *) DDR_A (dep)->aux;
1638       refb = (struct mem_ref *) DDR_B (dep)->aux;
1639 
1640       if (DDR_ARE_DEPENDENT (dep) == chrec_dont_know
1641 	  || DDR_NUM_DIST_VECTS (dep) == 0)
1642 	{
1643 	  /* If the dependence cannot be analyzed, assume that there might be
1644 	     a reuse.  */
1645 	  dist = 0;
1646 
1647 	  ref->independent_p = false;
1648 	  refb->independent_p = false;
1649 	}
1650       else
1651 	{
1652 	  /* The distance vectors are normalized to be always lexicographically
1653 	     positive, hence we cannot tell just from them whether DDR_A comes
1654 	     before DDR_B or vice versa.  However, it is not important,
1655 	     anyway -- if DDR_A is close to DDR_B, then it is either reused in
1656 	     DDR_B (and it is not nontemporal), or it reuses the value of DDR_B
1657 	     in cache (and marking it as nontemporal would not affect
1658 	     anything).  */
1659 
1660 	  dist = volume;
1661 	  for (j = 0; j < DDR_NUM_DIST_VECTS (dep); j++)
1662 	    {
1663 	      adist = volume_of_dist_vector (DDR_DIST_VECT (dep, j),
1664 					     loop_data_size, n);
1665 
1666 	      /* If this is a dependence in the innermost loop (i.e., the
1667 		 distances in all superloops are zero) and it is not
1668 		 the trivial self-dependence with distance zero, record that
1669 		 the references are not completely independent.  */
1670 	      if (lambda_vector_zerop (DDR_DIST_VECT (dep, j), n - 1)
1671 		  && (ref != refb
1672 		      || DDR_DIST_VECT (dep, j)[n-1] != 0))
1673 		{
1674 		  ref->independent_p = false;
1675 		  refb->independent_p = false;
1676 		}
1677 
1678 	      /* Ignore accesses closer than
1679 		 L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION,
1680 	      	 so that we use nontemporal prefetches e.g. if single memory
1681 		 location is accessed several times in a single iteration of
1682 		 the loop.  */
1683 	      if (adist < L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION)
1684 		continue;
1685 
1686 	      if (adist < dist)
1687 		dist = adist;
1688 	    }
1689 	}
1690 
1691       if (ref->reuse_distance > dist)
1692 	ref->reuse_distance = dist;
1693       if (refb->reuse_distance > dist)
1694 	refb->reuse_distance = dist;
1695     }
1696 
1697   free_dependence_relations (dependences);
1698   free_data_refs (datarefs);
1699   free (loop_data_size);
1700 
1701   if (dump_file && (dump_flags & TDF_DETAILS))
1702     {
1703       fprintf (dump_file, "Reuse distances:\n");
1704       for (gr = refs; gr; gr = gr->next)
1705 	for (ref = gr->refs; ref; ref = ref->next)
1706 	  fprintf (dump_file, " ref %p distance %u\n",
1707 		   (void *) ref, ref->reuse_distance);
1708     }
1709 
1710   return true;
1711 }
1712 
1713 /* Determine whether or not the trip count to ahead ratio is too small based
1714    on prefitablility consideration.
1715    AHEAD: the iteration ahead distance,
1716    EST_NITER: the estimated trip count.  */
1717 
1718 static bool
1719 trip_count_to_ahead_ratio_too_small_p (unsigned ahead, HOST_WIDE_INT est_niter)
1720 {
1721   /* Assume trip count to ahead ratio is big enough if the trip count could not
1722      be estimated at compile time.  */
1723   if (est_niter < 0)
1724     return false;
1725 
1726   if (est_niter < (HOST_WIDE_INT) (TRIP_COUNT_TO_AHEAD_RATIO * ahead))
1727     {
1728       if (dump_file && (dump_flags & TDF_DETAILS))
1729 	fprintf (dump_file,
1730 		 "Not prefetching -- loop estimated to roll only %d times\n",
1731 		 (int) est_niter);
1732       return true;
1733     }
1734 
1735   return false;
1736 }
1737 
1738 /* Determine whether or not the number of memory references in the loop is
1739    reasonable based on the profitablity and compilation time considerations.
1740    NINSNS: estimated number of instructions in the loop,
1741    MEM_REF_COUNT: total number of memory references in the loop.  */
1742 
1743 static bool
1744 mem_ref_count_reasonable_p (unsigned ninsns, unsigned mem_ref_count)
1745 {
1746   int insn_to_mem_ratio;
1747 
1748   if (mem_ref_count == 0)
1749     return false;
1750 
1751   /* Miss rate computation (is_miss_rate_acceptable) and dependence analysis
1752      (compute_all_dependences) have high costs based on quadratic complexity.
1753      To avoid huge compilation time, we give up prefetching if mem_ref_count
1754      is too large.  */
1755   if (mem_ref_count > PREFETCH_MAX_MEM_REFS_PER_LOOP)
1756     return false;
1757 
1758   /* Prefetching improves performance by overlapping cache missing
1759      memory accesses with CPU operations.  If the loop does not have
1760      enough CPU operations to overlap with memory operations, prefetching
1761      won't give a significant benefit.  One approximate way of checking
1762      this is to require the ratio of instructions to memory references to
1763      be above a certain limit.  This approximation works well in practice.
1764      TODO: Implement a more precise computation by estimating the time
1765      for each CPU or memory op in the loop. Time estimates for memory ops
1766      should account for cache misses.  */
1767   insn_to_mem_ratio = ninsns / mem_ref_count;
1768 
1769   if (insn_to_mem_ratio < PREFETCH_MIN_INSN_TO_MEM_RATIO)
1770     {
1771       if (dump_file && (dump_flags & TDF_DETAILS))
1772         fprintf (dump_file,
1773 		 "Not prefetching -- instruction to memory reference ratio (%d) too small\n",
1774 		 insn_to_mem_ratio);
1775       return false;
1776     }
1777 
1778   return true;
1779 }
1780 
1781 /* Determine whether or not the instruction to prefetch ratio in the loop is
1782    too small based on the profitablity consideration.
1783    NINSNS: estimated number of instructions in the loop,
1784    PREFETCH_COUNT: an estimate of the number of prefetches,
1785    UNROLL_FACTOR:  the factor to unroll the loop if prefetching.  */
1786 
1787 static bool
1788 insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
1789                                      unsigned unroll_factor)
1790 {
1791   int insn_to_prefetch_ratio;
1792 
1793   /* Prefetching most likely causes performance degradation when the instruction
1794      to prefetch ratio is too small.  Too many prefetch instructions in a loop
1795      may reduce the I-cache performance.
1796      (unroll_factor * ninsns) is used to estimate the number of instructions in
1797      the unrolled loop.  This implementation is a bit simplistic -- the number
1798      of issued prefetch instructions is also affected by unrolling.  So,
1799      prefetch_mod and the unroll factor should be taken into account when
1800      determining prefetch_count.  Also, the number of insns of the unrolled
1801      loop will usually be significantly smaller than the number of insns of the
1802      original loop * unroll_factor (at least the induction variable increases
1803      and the exit branches will get eliminated), so it might be better to use
1804      tree_estimate_loop_size + estimated_unrolled_size.  */
1805   insn_to_prefetch_ratio = (unroll_factor * ninsns) / prefetch_count;
1806   if (insn_to_prefetch_ratio < MIN_INSN_TO_PREFETCH_RATIO)
1807     {
1808       if (dump_file && (dump_flags & TDF_DETAILS))
1809         fprintf (dump_file,
1810 		 "Not prefetching -- instruction to prefetch ratio (%d) too small\n",
1811 		 insn_to_prefetch_ratio);
1812       return true;
1813     }
1814 
1815   return false;
1816 }
1817 
1818 
1819 /* Issue prefetch instructions for array references in LOOP.  Returns
1820    true if the LOOP was unrolled.  */
1821 
1822 static bool
1823 loop_prefetch_arrays (struct loop *loop)
1824 {
1825   struct mem_ref_group *refs;
1826   unsigned ahead, ninsns, time, unroll_factor;
1827   HOST_WIDE_INT est_niter;
1828   struct tree_niter_desc desc;
1829   bool unrolled = false, no_other_refs;
1830   unsigned prefetch_count;
1831   unsigned mem_ref_count;
1832 
1833   if (optimize_loop_nest_for_size_p (loop))
1834     {
1835       if (dump_file && (dump_flags & TDF_DETAILS))
1836 	fprintf (dump_file, "  ignored (cold area)\n");
1837       return false;
1838     }
1839 
1840   /* FIXME: the time should be weighted by the probabilities of the blocks in
1841      the loop body.  */
1842   time = tree_num_loop_insns (loop, &eni_time_weights);
1843   if (time == 0)
1844     return false;
1845 
1846   ahead = (PREFETCH_LATENCY + time - 1) / time;
1847   est_niter = estimated_stmt_executions_int (loop);
1848   if (est_niter == -1)
1849     est_niter = max_stmt_executions_int (loop);
1850 
1851   /* Prefetching is not likely to be profitable if the trip count to ahead
1852      ratio is too small.  */
1853   if (trip_count_to_ahead_ratio_too_small_p (ahead, est_niter))
1854     return false;
1855 
1856   ninsns = tree_num_loop_insns (loop, &eni_size_weights);
1857 
1858   /* Step 1: gather the memory references.  */
1859   refs = gather_memory_references (loop, &no_other_refs, &mem_ref_count);
1860 
1861   /* Give up prefetching if the number of memory references in the
1862      loop is not reasonable based on profitablity and compilation time
1863      considerations.  */
1864   if (!mem_ref_count_reasonable_p (ninsns, mem_ref_count))
1865     goto fail;
1866 
1867   /* Step 2: estimate the reuse effects.  */
1868   prune_by_reuse (refs);
1869 
1870   if (nothing_to_prefetch_p (refs))
1871     goto fail;
1872 
1873   if (!determine_loop_nest_reuse (loop, refs, no_other_refs))
1874     goto fail;
1875 
1876   /* Step 3: determine unroll factor.  */
1877   unroll_factor = determine_unroll_factor (loop, refs, ninsns, &desc,
1878 					   est_niter);
1879 
1880   /* Estimate prefetch count for the unrolled loop.  */
1881   prefetch_count = estimate_prefetch_count (refs, unroll_factor);
1882   if (prefetch_count == 0)
1883     goto fail;
1884 
1885   if (dump_file && (dump_flags & TDF_DETAILS))
1886     fprintf (dump_file, "Ahead %d, unroll factor %d, trip count "
1887 	     HOST_WIDE_INT_PRINT_DEC "\n"
1888 	     "insn count %d, mem ref count %d, prefetch count %d\n",
1889 	     ahead, unroll_factor, est_niter,
1890 	     ninsns, mem_ref_count, prefetch_count);
1891 
1892   /* Prefetching is not likely to be profitable if the instruction to prefetch
1893      ratio is too small.  */
1894   if (insn_to_prefetch_ratio_too_small_p (ninsns, prefetch_count,
1895 					  unroll_factor))
1896     goto fail;
1897 
1898   mark_nontemporal_stores (loop, refs);
1899 
1900   /* Step 4: what to prefetch?  */
1901   if (!schedule_prefetches (refs, unroll_factor, ahead))
1902     goto fail;
1903 
1904   /* Step 5: unroll the loop.  TODO -- peeling of first and last few
1905      iterations so that we do not issue superfluous prefetches.  */
1906   if (unroll_factor != 1)
1907     {
1908       tree_unroll_loop (loop, unroll_factor,
1909 			single_dom_exit (loop), &desc);
1910       unrolled = true;
1911     }
1912 
1913   /* Step 6: issue the prefetches.  */
1914   issue_prefetches (refs, unroll_factor, ahead);
1915 
1916 fail:
1917   release_mem_refs (refs);
1918   return unrolled;
1919 }
1920 
1921 /* Issue prefetch instructions for array references in loops.  */
1922 
1923 unsigned int
1924 tree_ssa_prefetch_arrays (void)
1925 {
1926   struct loop *loop;
1927   bool unrolled = false;
1928   int todo_flags = 0;
1929 
1930   if (!targetm.have_prefetch ()
1931       /* It is possible to ask compiler for say -mtune=i486 -march=pentium4.
1932 	 -mtune=i486 causes us having PREFETCH_BLOCK 0, since this is part
1933 	 of processor costs and i486 does not have prefetch, but
1934 	 -march=pentium4 causes targetm.have_prefetch to be true.  Ugh.  */
1935       || PREFETCH_BLOCK == 0)
1936     return 0;
1937 
1938   if (dump_file && (dump_flags & TDF_DETAILS))
1939     {
1940       fprintf (dump_file, "Prefetching parameters:\n");
1941       fprintf (dump_file, "    simultaneous prefetches: %d\n",
1942 	       SIMULTANEOUS_PREFETCHES);
1943       fprintf (dump_file, "    prefetch latency: %d\n", PREFETCH_LATENCY);
1944       fprintf (dump_file, "    prefetch block size: %d\n", PREFETCH_BLOCK);
1945       fprintf (dump_file, "    L1 cache size: %d lines, %d kB\n",
1946 	       L1_CACHE_SIZE_BYTES / L1_CACHE_LINE_SIZE, L1_CACHE_SIZE);
1947       fprintf (dump_file, "    L1 cache line size: %d\n", L1_CACHE_LINE_SIZE);
1948       fprintf (dump_file, "    L2 cache size: %d kB\n", L2_CACHE_SIZE);
1949       fprintf (dump_file, "    min insn-to-prefetch ratio: %d \n",
1950 	       MIN_INSN_TO_PREFETCH_RATIO);
1951       fprintf (dump_file, "    min insn-to-mem ratio: %d \n",
1952 	       PREFETCH_MIN_INSN_TO_MEM_RATIO);
1953       fprintf (dump_file, "\n");
1954     }
1955 
1956   initialize_original_copy_tables ();
1957 
1958   if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
1959     {
1960       tree type = build_function_type_list (void_type_node,
1961 					    const_ptr_type_node, NULL_TREE);
1962       tree decl = add_builtin_function ("__builtin_prefetch", type,
1963 					BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
1964 					NULL, NULL_TREE);
1965       DECL_IS_NOVOPS (decl) = true;
1966       set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
1967     }
1968 
1969   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1970     {
1971       if (dump_file && (dump_flags & TDF_DETAILS))
1972 	fprintf (dump_file, "Processing loop %d:\n", loop->num);
1973 
1974       unrolled |= loop_prefetch_arrays (loop);
1975 
1976       if (dump_file && (dump_flags & TDF_DETAILS))
1977 	fprintf (dump_file, "\n\n");
1978     }
1979 
1980   if (unrolled)
1981     {
1982       scev_reset ();
1983       todo_flags |= TODO_cleanup_cfg;
1984     }
1985 
1986   free_original_copy_tables ();
1987   return todo_flags;
1988 }
1989 
1990 /* Prefetching.  */
1991 
1992 namespace {
1993 
1994 const pass_data pass_data_loop_prefetch =
1995 {
1996   GIMPLE_PASS, /* type */
1997   "aprefetch", /* name */
1998   OPTGROUP_LOOP, /* optinfo_flags */
1999   TV_TREE_PREFETCH, /* tv_id */
2000   ( PROP_cfg | PROP_ssa ), /* properties_required */
2001   0, /* properties_provided */
2002   0, /* properties_destroyed */
2003   0, /* todo_flags_start */
2004   0, /* todo_flags_finish */
2005 };
2006 
2007 class pass_loop_prefetch : public gimple_opt_pass
2008 {
2009 public:
2010   pass_loop_prefetch (gcc::context *ctxt)
2011     : gimple_opt_pass (pass_data_loop_prefetch, ctxt)
2012   {}
2013 
2014   /* opt_pass methods: */
2015   virtual bool gate (function *) { return flag_prefetch_loop_arrays > 0; }
2016   virtual unsigned int execute (function *);
2017 
2018 }; // class pass_loop_prefetch
2019 
2020 unsigned int
2021 pass_loop_prefetch::execute (function *fun)
2022 {
2023   if (number_of_loops (fun) <= 1)
2024     return 0;
2025 
2026   if ((PREFETCH_BLOCK & (PREFETCH_BLOCK - 1)) != 0)
2027     {
2028       static bool warned = false;
2029 
2030       if (!warned)
2031 	{
2032 	  warning (OPT_Wdisabled_optimization,
2033 		   "%<l1-cache-size%> parameter is not a power of two %d",
2034 		   PREFETCH_BLOCK);
2035 	  warned = true;
2036 	}
2037       return 0;
2038     }
2039 
2040   return tree_ssa_prefetch_arrays ();
2041 }
2042 
2043 } // anon namespace
2044 
2045 gimple_opt_pass *
2046 make_pass_loop_prefetch (gcc::context *ctxt)
2047 {
2048   return new pass_loop_prefetch (ctxt);
2049 }
2050 
2051 
2052