1 /* Subroutines for the gcc driver.
2 Copyright (C) 2006-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #define IN_TARGET_CODE 1
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26
27 const char *host_detect_local_cpu (int argc, const char **argv);
28
29 #if defined(__GNUC__) && (__GNUC__ >= 5 || !defined(__PIC__))
30 #include "cpuid.h"
31 #include "common/config/i386/cpuinfo.h"
32 #include "common/config/i386/i386-isas.h"
33
34 struct cache_desc
35 {
36 unsigned sizekb;
37 unsigned assoc;
38 unsigned line;
39 };
40
41 /* Returns command line parameters that describe size and
42 cache line size of the processor caches. */
43
44 static char *
describe_cache(struct cache_desc level1,struct cache_desc level2)45 describe_cache (struct cache_desc level1, struct cache_desc level2)
46 {
47 char size[100], line[100], size2[100];
48
49 /* At the moment, gcc does not use the information
50 about the associativity of the cache. */
51
52 snprintf (size, sizeof (size),
53 "--param l1-cache-size=%u ", level1.sizekb);
54 snprintf (line, sizeof (line),
55 "--param l1-cache-line-size=%u ", level1.line);
56
57 snprintf (size2, sizeof (size2),
58 "--param l2-cache-size=%u ", level2.sizekb);
59
60 return concat (size, line, size2, NULL);
61 }
62
63 /* Detect L2 cache parameters using CPUID extended function 0x80000006. */
64
65 static void
detect_l2_cache(struct cache_desc * level2)66 detect_l2_cache (struct cache_desc *level2)
67 {
68 unsigned eax, ebx, ecx, edx;
69 unsigned assoc;
70
71 __cpuid (0x80000006, eax, ebx, ecx, edx);
72
73 level2->sizekb = (ecx >> 16) & 0xffff;
74 level2->line = ecx & 0xff;
75
76 assoc = (ecx >> 12) & 0xf;
77 if (assoc == 6)
78 assoc = 8;
79 else if (assoc == 8)
80 assoc = 16;
81 else if (assoc >= 0xa && assoc <= 0xc)
82 assoc = 32 + (assoc - 0xa) * 16;
83 else if (assoc >= 0xd && assoc <= 0xe)
84 assoc = 96 + (assoc - 0xd) * 32;
85
86 level2->assoc = assoc;
87 }
88
89 /* Returns the description of caches for an AMD processor. */
90
91 static const char *
detect_caches_amd(unsigned max_ext_level)92 detect_caches_amd (unsigned max_ext_level)
93 {
94 unsigned eax, ebx, ecx, edx;
95
96 struct cache_desc level1, level2 = {0, 0, 0};
97
98 if (max_ext_level < 0x80000005)
99 return "";
100
101 __cpuid (0x80000005, eax, ebx, ecx, edx);
102
103 level1.sizekb = (ecx >> 24) & 0xff;
104 level1.assoc = (ecx >> 16) & 0xff;
105 level1.line = ecx & 0xff;
106
107 if (max_ext_level >= 0x80000006)
108 detect_l2_cache (&level2);
109
110 return describe_cache (level1, level2);
111 }
112
113 /* Decodes the size, the associativity and the cache line size of
114 L1/L2 caches of an Intel processor. Values are based on
115 "Intel Processor Identification and the CPUID Instruction"
116 [Application Note 485], revision -032, December 2007. */
117
118 static void
decode_caches_intel(unsigned reg,bool xeon_mp,struct cache_desc * level1,struct cache_desc * level2)119 decode_caches_intel (unsigned reg, bool xeon_mp,
120 struct cache_desc *level1, struct cache_desc *level2)
121 {
122 int i;
123
124 for (i = 24; i >= 0; i -= 8)
125 switch ((reg >> i) & 0xff)
126 {
127 case 0x0a:
128 level1->sizekb = 8; level1->assoc = 2; level1->line = 32;
129 break;
130 case 0x0c:
131 level1->sizekb = 16; level1->assoc = 4; level1->line = 32;
132 break;
133 case 0x0d:
134 level1->sizekb = 16; level1->assoc = 4; level1->line = 64;
135 break;
136 case 0x0e:
137 level1->sizekb = 24; level1->assoc = 6; level1->line = 64;
138 break;
139 case 0x21:
140 level2->sizekb = 256; level2->assoc = 8; level2->line = 64;
141 break;
142 case 0x24:
143 level2->sizekb = 1024; level2->assoc = 16; level2->line = 64;
144 break;
145 case 0x2c:
146 level1->sizekb = 32; level1->assoc = 8; level1->line = 64;
147 break;
148 case 0x39:
149 level2->sizekb = 128; level2->assoc = 4; level2->line = 64;
150 break;
151 case 0x3a:
152 level2->sizekb = 192; level2->assoc = 6; level2->line = 64;
153 break;
154 case 0x3b:
155 level2->sizekb = 128; level2->assoc = 2; level2->line = 64;
156 break;
157 case 0x3c:
158 level2->sizekb = 256; level2->assoc = 4; level2->line = 64;
159 break;
160 case 0x3d:
161 level2->sizekb = 384; level2->assoc = 6; level2->line = 64;
162 break;
163 case 0x3e:
164 level2->sizekb = 512; level2->assoc = 4; level2->line = 64;
165 break;
166 case 0x41:
167 level2->sizekb = 128; level2->assoc = 4; level2->line = 32;
168 break;
169 case 0x42:
170 level2->sizekb = 256; level2->assoc = 4; level2->line = 32;
171 break;
172 case 0x43:
173 level2->sizekb = 512; level2->assoc = 4; level2->line = 32;
174 break;
175 case 0x44:
176 level2->sizekb = 1024; level2->assoc = 4; level2->line = 32;
177 break;
178 case 0x45:
179 level2->sizekb = 2048; level2->assoc = 4; level2->line = 32;
180 break;
181 case 0x48:
182 level2->sizekb = 3072; level2->assoc = 12; level2->line = 64;
183 break;
184 case 0x49:
185 if (xeon_mp)
186 break;
187 level2->sizekb = 4096; level2->assoc = 16; level2->line = 64;
188 break;
189 case 0x4e:
190 level2->sizekb = 6144; level2->assoc = 24; level2->line = 64;
191 break;
192 case 0x60:
193 level1->sizekb = 16; level1->assoc = 8; level1->line = 64;
194 break;
195 case 0x66:
196 level1->sizekb = 8; level1->assoc = 4; level1->line = 64;
197 break;
198 case 0x67:
199 level1->sizekb = 16; level1->assoc = 4; level1->line = 64;
200 break;
201 case 0x68:
202 level1->sizekb = 32; level1->assoc = 4; level1->line = 64;
203 break;
204 case 0x78:
205 level2->sizekb = 1024; level2->assoc = 4; level2->line = 64;
206 break;
207 case 0x79:
208 level2->sizekb = 128; level2->assoc = 8; level2->line = 64;
209 break;
210 case 0x7a:
211 level2->sizekb = 256; level2->assoc = 8; level2->line = 64;
212 break;
213 case 0x7b:
214 level2->sizekb = 512; level2->assoc = 8; level2->line = 64;
215 break;
216 case 0x7c:
217 level2->sizekb = 1024; level2->assoc = 8; level2->line = 64;
218 break;
219 case 0x7d:
220 level2->sizekb = 2048; level2->assoc = 8; level2->line = 64;
221 break;
222 case 0x7f:
223 level2->sizekb = 512; level2->assoc = 2; level2->line = 64;
224 break;
225 case 0x80:
226 level2->sizekb = 512; level2->assoc = 8; level2->line = 64;
227 break;
228 case 0x82:
229 level2->sizekb = 256; level2->assoc = 8; level2->line = 32;
230 break;
231 case 0x83:
232 level2->sizekb = 512; level2->assoc = 8; level2->line = 32;
233 break;
234 case 0x84:
235 level2->sizekb = 1024; level2->assoc = 8; level2->line = 32;
236 break;
237 case 0x85:
238 level2->sizekb = 2048; level2->assoc = 8; level2->line = 32;
239 break;
240 case 0x86:
241 level2->sizekb = 512; level2->assoc = 4; level2->line = 64;
242 break;
243 case 0x87:
244 level2->sizekb = 1024; level2->assoc = 8; level2->line = 64;
245
246 default:
247 break;
248 }
249 }
250
251 /* Detect cache parameters using CPUID function 2. */
252
253 static void
detect_caches_cpuid2(bool xeon_mp,struct cache_desc * level1,struct cache_desc * level2)254 detect_caches_cpuid2 (bool xeon_mp,
255 struct cache_desc *level1, struct cache_desc *level2)
256 {
257 unsigned regs[4];
258 int nreps, i;
259
260 __cpuid (2, regs[0], regs[1], regs[2], regs[3]);
261
262 nreps = regs[0] & 0x0f;
263 regs[0] &= ~0x0f;
264
265 while (--nreps >= 0)
266 {
267 for (i = 0; i < 4; i++)
268 if (regs[i] && !((regs[i] >> 31) & 1))
269 decode_caches_intel (regs[i], xeon_mp, level1, level2);
270
271 if (nreps)
272 __cpuid (2, regs[0], regs[1], regs[2], regs[3]);
273 }
274 }
275
276 /* Detect cache parameters using CPUID function 4. This
277 method doesn't require hardcoded tables. */
278
279 enum cache_type
280 {
281 CACHE_END = 0,
282 CACHE_DATA = 1,
283 CACHE_INST = 2,
284 CACHE_UNIFIED = 3
285 };
286
287 static void
detect_caches_cpuid4(struct cache_desc * level1,struct cache_desc * level2,struct cache_desc * level3)288 detect_caches_cpuid4 (struct cache_desc *level1, struct cache_desc *level2,
289 struct cache_desc *level3)
290 {
291 struct cache_desc *cache;
292
293 unsigned eax, ebx, ecx, edx;
294 int count;
295
296 for (count = 0;; count++)
297 {
298 __cpuid_count(4, count, eax, ebx, ecx, edx);
299 switch (eax & 0x1f)
300 {
301 case CACHE_END:
302 return;
303 case CACHE_DATA:
304 case CACHE_UNIFIED:
305 {
306 switch ((eax >> 5) & 0x07)
307 {
308 case 1:
309 cache = level1;
310 break;
311 case 2:
312 cache = level2;
313 break;
314 case 3:
315 cache = level3;
316 break;
317 default:
318 cache = NULL;
319 }
320
321 if (cache)
322 {
323 unsigned sets = ecx + 1;
324 unsigned part = ((ebx >> 12) & 0x03ff) + 1;
325
326 cache->assoc = ((ebx >> 22) & 0x03ff) + 1;
327 cache->line = (ebx & 0x0fff) + 1;
328
329 cache->sizekb = (cache->assoc * part
330 * cache->line * sets) / 1024;
331 }
332 }
333 default:
334 break;
335 }
336 }
337 }
338
339 /* Returns the description of caches for an Intel processor. */
340
341 static const char *
detect_caches_intel(bool xeon_mp,unsigned max_level,unsigned max_ext_level,unsigned * l2sizekb)342 detect_caches_intel (bool xeon_mp, unsigned max_level,
343 unsigned max_ext_level, unsigned *l2sizekb)
344 {
345 struct cache_desc level1 = {0, 0, 0}, level2 = {0, 0, 0}, level3 = {0, 0, 0};
346
347 if (max_level >= 4)
348 detect_caches_cpuid4 (&level1, &level2, &level3);
349 else if (max_level >= 2)
350 detect_caches_cpuid2 (xeon_mp, &level1, &level2);
351 else
352 return "";
353
354 if (level1.sizekb == 0)
355 return "";
356
357 /* Let the L3 replace the L2. This assumes inclusive caches
358 and single threaded program for now. */
359 if (level3.sizekb)
360 level2 = level3;
361
362 /* Intel CPUs are equipped with AMD style L2 cache info. Try this
363 method if other methods fail to provide L2 cache parameters. */
364 if (level2.sizekb == 0 && max_ext_level >= 0x80000006)
365 detect_l2_cache (&level2);
366
367 *l2sizekb = level2.sizekb;
368
369 return describe_cache (level1, level2);
370 }
371
372 /* This will be called by the spec parser in gcc.cc when it sees
373 a %:local_cpu_detect(args) construct. Currently it will be
374 called with either "arch [32|64]" or "tune [32|64]" as argument
375 depending on if -march=native or -mtune=native is to be substituted.
376
377 It returns a string containing new command line parameters to be
378 put at the place of the above two options, depending on what CPU
379 this is executed. E.g. "-march=k8" on an AMD64 machine
380 for -march=native.
381
382 ARGC and ARGV are set depending on the actual arguments given
383 in the spec. */
384
host_detect_local_cpu(int argc,const char ** argv)385 const char *host_detect_local_cpu (int argc, const char **argv)
386 {
387 enum processor_type processor = PROCESSOR_I386;
388 const char *cpu = "i386";
389
390 const char *cache = "";
391 const char *options = "";
392
393 unsigned int ebx, ecx, edx;
394
395 unsigned int max_level, ext_level;
396
397 unsigned int vendor;
398 unsigned int model, family;
399
400 bool arch;
401
402 unsigned int l2sizekb = 0;
403
404 if (argc < 2)
405 return NULL;
406
407 arch = !strcmp (argv[0], "arch");
408
409 if (!arch && strcmp (argv[0], "tune"))
410 return NULL;
411
412 bool codegen_x86_64;
413
414 if (!strcmp (argv[1], "32"))
415 codegen_x86_64 = false;
416 else if (!strcmp (argv[1], "64"))
417 codegen_x86_64 = true;
418 else
419 return NULL;
420
421 struct __processor_model cpu_model = { };
422 struct __processor_model2 cpu_model2 = { };
423 unsigned int cpu_features2[SIZE_OF_CPU_FEATURES] = { };
424
425 if (cpu_indicator_init (&cpu_model, &cpu_model2, cpu_features2) != 0)
426 goto done;
427
428 vendor = cpu_model.__cpu_vendor;
429 family = cpu_model2.__cpu_family;
430 model = cpu_model2.__cpu_model;
431 max_level = cpu_model2.__cpu_max_level;
432 ext_level = cpu_model2.__cpu_ext_level;
433
434 if (!arch)
435 {
436 if (vendor == VENDOR_AMD
437 || vendor == VENDOR_CENTAUR
438 || vendor == VENDOR_CYRIX
439 || vendor == VENDOR_NSC)
440 cache = detect_caches_amd (ext_level);
441 else if (vendor == VENDOR_INTEL)
442 {
443 bool xeon_mp = (family == 15 && model == 6);
444 cache = detect_caches_intel (xeon_mp, max_level,
445 ext_level, &l2sizekb);
446 }
447 }
448
449 /* Extended features */
450 #define has_feature(f) \
451 has_cpu_feature (&cpu_model, cpu_features2, f)
452
453 if (vendor == VENDOR_AMD)
454 {
455 unsigned int name;
456
457 /* Detect geode processor by its processor signature. */
458 if (ext_level >= 0x80000002)
459 __cpuid (0x80000002, name, ebx, ecx, edx);
460 else
461 name = 0;
462
463 if (name == signature_NSC_ebx)
464 processor = PROCESSOR_GEODE;
465 else if (has_feature (FEATURE_MOVBE) && family == 22)
466 processor = PROCESSOR_BTVER2;
467 else if (has_feature (FEATURE_AVX512F))
468 processor = PROCESSOR_ZNVER4;
469 else if (has_feature (FEATURE_VAES))
470 processor = PROCESSOR_ZNVER3;
471 else if (has_feature (FEATURE_CLWB))
472 processor = PROCESSOR_ZNVER2;
473 else if (has_feature (FEATURE_CLZERO))
474 processor = PROCESSOR_ZNVER1;
475 else if (has_feature (FEATURE_AVX2))
476 processor = PROCESSOR_BDVER4;
477 else if (has_feature (FEATURE_XSAVEOPT))
478 processor = PROCESSOR_BDVER3;
479 else if (has_feature (FEATURE_BMI))
480 processor = PROCESSOR_BDVER2;
481 else if (has_feature (FEATURE_XOP))
482 processor = PROCESSOR_BDVER1;
483 else if (has_feature (FEATURE_SSE4_A)
484 && has_feature (FEATURE_SSSE3))
485 processor = PROCESSOR_BTVER1;
486 else if (has_feature (FEATURE_SSE4_A))
487 processor = PROCESSOR_AMDFAM10;
488 else if (has_feature (FEATURE_SSE2)
489 || has_feature (FEATURE_LM))
490 processor = PROCESSOR_K8;
491 else if (has_feature (FEATURE_3DNOWP) && family == 6)
492 processor = PROCESSOR_ATHLON;
493 else if (has_feature (FEATURE_MMX))
494 processor = PROCESSOR_K6;
495 else
496 processor = PROCESSOR_PENTIUM;
497 }
498 else if (vendor == VENDOR_CENTAUR)
499 {
500 processor = PROCESSOR_GENERIC;
501
502 switch (family)
503 {
504 default:
505 /* We have no idea. */
506 break;
507
508 case 5:
509 if (has_feature (FEATURE_3DNOW)
510 || has_feature (FEATURE_MMX))
511 processor = PROCESSOR_I486;
512 break;
513
514 case 6:
515 if (has_feature (FEATURE_LM))
516 processor = PROCESSOR_K8;
517 else if (model >= 9)
518 processor = PROCESSOR_PENTIUMPRO;
519 else if (model >= 6)
520 processor = PROCESSOR_I486;
521 }
522 }
523 else
524 {
525 switch (family)
526 {
527 case 4:
528 processor = PROCESSOR_I486;
529 break;
530 case 5:
531 processor = PROCESSOR_PENTIUM;
532 break;
533 case 6:
534 processor = PROCESSOR_PENTIUMPRO;
535 break;
536 case 15:
537 processor = PROCESSOR_PENTIUM4;
538 break;
539 default:
540 /* We have no idea. */
541 processor = PROCESSOR_GENERIC;
542 }
543 }
544
545 switch (processor)
546 {
547 case PROCESSOR_I386:
548 /* Default. */
549 break;
550 case PROCESSOR_I486:
551 if (arch && vendor == VENDOR_CENTAUR)
552 {
553 if (model >= 6)
554 cpu = "c3";
555 else if (has_feature (FEATURE_3DNOW))
556 cpu = "winchip2";
557 else
558 /* Assume WinChip C6. */
559 cpu = "winchip-c6";
560 }
561 else
562 cpu = "i486";
563 break;
564 case PROCESSOR_PENTIUM:
565 if (arch && has_feature (FEATURE_MMX))
566 cpu = "pentium-mmx";
567 else
568 cpu = "pentium";
569 break;
570 case PROCESSOR_PENTIUMPRO:
571 cpu = get_intel_cpu (&cpu_model, &cpu_model2, cpu_features2);
572 if (cpu == NULL)
573 {
574 if (arch)
575 {
576 /* This is unknown family 0x6 CPU. */
577 if (has_feature (FEATURE_AVX))
578 {
579 /* Assume Tiger Lake */
580 if (has_feature (FEATURE_AVX512VP2INTERSECT))
581 cpu = "tigerlake";
582 /* Assume Sapphire Rapids. */
583 else if (has_feature (FEATURE_TSXLDTRK))
584 cpu = "sapphirerapids";
585 /* Assume Cooper Lake */
586 else if (has_feature (FEATURE_AVX512BF16))
587 cpu = "cooperlake";
588 /* Assume Ice Lake Server. */
589 else if (has_feature (FEATURE_WBNOINVD))
590 cpu = "icelake-server";
591 /* Assume Ice Lake. */
592 else if (has_feature (FEATURE_AVX512BITALG))
593 cpu = "icelake-client";
594 /* Assume Cannon Lake. */
595 else if (has_feature (FEATURE_AVX512VBMI))
596 cpu = "cannonlake";
597 /* Assume Knights Mill. */
598 else if (has_feature (FEATURE_AVX5124VNNIW))
599 cpu = "knm";
600 /* Assume Knights Landing. */
601 else if (has_feature (FEATURE_AVX512ER))
602 cpu = "knl";
603 /* Assume Skylake with AVX-512. */
604 else if (has_feature (FEATURE_AVX512F))
605 cpu = "skylake-avx512";
606 /* Assume Alder Lake */
607 else if (has_feature (FEATURE_SERIALIZE))
608 cpu = "alderlake";
609 /* Assume Skylake. */
610 else if (has_feature (FEATURE_CLFLUSHOPT))
611 cpu = "skylake";
612 /* Assume Broadwell. */
613 else if (has_feature (FEATURE_ADX))
614 cpu = "broadwell";
615 else if (has_feature (FEATURE_AVX2))
616 /* Assume Haswell. */
617 cpu = "haswell";
618 else
619 /* Assume Sandy Bridge. */
620 cpu = "sandybridge";
621 }
622 else if (has_feature (FEATURE_SSE4_2))
623 {
624 if (has_feature (FEATURE_GFNI))
625 /* Assume Tremont. */
626 cpu = "tremont";
627 else if (has_feature (FEATURE_SGX))
628 /* Assume Goldmont Plus. */
629 cpu = "goldmont-plus";
630 else if (has_feature (FEATURE_XSAVE))
631 /* Assume Goldmont. */
632 cpu = "goldmont";
633 else if (has_feature (FEATURE_MOVBE))
634 /* Assume Silvermont. */
635 cpu = "silvermont";
636 else
637 /* Assume Nehalem. */
638 cpu = "nehalem";
639 }
640 else if (has_feature (FEATURE_SSSE3))
641 {
642 if (has_feature (FEATURE_MOVBE))
643 /* Assume Bonnell. */
644 cpu = "bonnell";
645 else
646 /* Assume Core 2. */
647 cpu = "core2";
648 }
649 else if (has_feature (FEATURE_LM))
650 /* Perhaps some emulator? Assume x86-64, otherwise gcc
651 -march=native would be unusable for 64-bit compilations,
652 as all the CPUs below are 32-bit only. */
653 cpu = "x86-64";
654 else if (has_feature (FEATURE_SSE3))
655 {
656 if (vendor == VENDOR_CENTAUR)
657 /* C7 / Eden "Esther" */
658 cpu = "c7";
659 else
660 /* It is Core Duo. */
661 cpu = "pentium-m";
662 }
663 else if (has_feature (FEATURE_SSE2))
664 /* It is Pentium M. */
665 cpu = "pentium-m";
666 else if (has_feature (FEATURE_SSE))
667 {
668 if (vendor == VENDOR_CENTAUR)
669 {
670 if (model >= 9)
671 /* Eden "Nehemiah" */
672 cpu = "nehemiah";
673 else
674 cpu = "c3-2";
675 }
676 else
677 /* It is Pentium III. */
678 cpu = "pentium3";
679 }
680 else if (has_feature (FEATURE_MMX))
681 /* It is Pentium II. */
682 cpu = "pentium2";
683 else
684 /* Default to Pentium Pro. */
685 cpu = "pentiumpro";
686 }
687 else
688 /* For -mtune, we default to -mtune=generic. */
689 cpu = "generic";
690 }
691 break;
692 case PROCESSOR_PENTIUM4:
693 if (has_feature (FEATURE_SSE3))
694 {
695 if (has_feature (FEATURE_LM))
696 cpu = "nocona";
697 else
698 cpu = "prescott";
699 }
700 else
701 cpu = "pentium4";
702 break;
703 case PROCESSOR_GEODE:
704 cpu = "geode";
705 break;
706 case PROCESSOR_K6:
707 if (arch && has_feature (FEATURE_3DNOW))
708 cpu = "k6-3";
709 else
710 cpu = "k6";
711 break;
712 case PROCESSOR_ATHLON:
713 if (arch && has_feature (FEATURE_SSE))
714 cpu = "athlon-4";
715 else
716 cpu = "athlon";
717 break;
718 case PROCESSOR_K8:
719 if (arch)
720 {
721 if (vendor == VENDOR_CENTAUR)
722 {
723 if (has_feature (FEATURE_SSE4_1))
724 /* Nano 3000 | Nano dual / quad core | Eden X4 */
725 cpu = "nano-3000";
726 else if (has_feature (FEATURE_SSSE3))
727 /* Nano 1000 | Nano 2000 */
728 cpu = "nano";
729 else if (has_feature (FEATURE_SSE3))
730 /* Eden X2 */
731 cpu = "eden-x2";
732 else
733 /* Default to k8 */
734 cpu = "k8";
735 }
736 else if (has_feature (FEATURE_SSE3))
737 cpu = "k8-sse3";
738 else
739 cpu = "k8";
740 }
741 else
742 /* For -mtune, we default to -mtune=k8 */
743 cpu = "k8";
744 break;
745 case PROCESSOR_AMDFAM10:
746 cpu = "amdfam10";
747 break;
748 case PROCESSOR_BDVER1:
749 cpu = "bdver1";
750 break;
751 case PROCESSOR_BDVER2:
752 cpu = "bdver2";
753 break;
754 case PROCESSOR_BDVER3:
755 cpu = "bdver3";
756 break;
757 case PROCESSOR_BDVER4:
758 cpu = "bdver4";
759 break;
760 case PROCESSOR_ZNVER1:
761 cpu = "znver1";
762 break;
763 case PROCESSOR_ZNVER2:
764 cpu = "znver2";
765 break;
766 case PROCESSOR_ZNVER3:
767 cpu = "znver3";
768 break;
769 case PROCESSOR_ZNVER4:
770 cpu = "znver4";
771 break;
772 case PROCESSOR_BTVER1:
773 cpu = "btver1";
774 break;
775 case PROCESSOR_BTVER2:
776 cpu = "btver2";
777 break;
778
779 default:
780 /* Use something reasonable. */
781 if (arch)
782 {
783 if (has_feature (FEATURE_SSSE3))
784 cpu = "core2";
785 else if (has_feature (FEATURE_SSE3))
786 {
787 if (has_feature (FEATURE_LM))
788 cpu = "nocona";
789 else
790 cpu = "prescott";
791 }
792 else if (has_feature (FEATURE_LM))
793 /* Perhaps some emulator? Assume x86-64, otherwise gcc
794 -march=native would be unusable for 64-bit compilations,
795 as all the CPUs below are 32-bit only. */
796 cpu = "x86-64";
797 else if (has_feature (FEATURE_SSE2))
798 cpu = "pentium4";
799 else if (has_feature (FEATURE_CMOV))
800 cpu = "pentiumpro";
801 else if (has_feature (FEATURE_MMX))
802 cpu = "pentium-mmx";
803 else if (has_feature (FEATURE_CMPXCHG8B))
804 cpu = "pentium";
805 }
806 else
807 cpu = "generic";
808 }
809
810 if (arch)
811 {
812 unsigned int i;
813 const char *const neg_option = " -mno-";
814 for (i = 0; i < ARRAY_SIZE (isa_names_table); i++)
815 if (isa_names_table[i].option)
816 {
817 if (has_feature (isa_names_table[i].feature))
818 {
819 if (codegen_x86_64
820 || isa_names_table[i].feature != FEATURE_UINTR)
821 options = concat (options, " ",
822 isa_names_table[i].option, NULL);
823 }
824 else
825 options = concat (options, neg_option,
826 isa_names_table[i].option + 2, NULL);
827 }
828 }
829
830 done:
831 return concat (cache, "-m", argv[0], "=", cpu, options, NULL);
832 }
833 #else
834
835 /* If we are compiling with GCC where %EBX register is fixed, then the
836 driver will just ignore -march and -mtune "native" target and will leave
837 to the newly built compiler to generate code for its default target. */
838
host_detect_local_cpu(int,const char **)839 const char *host_detect_local_cpu (int, const char **)
840 {
841 return NULL;
842 }
843 #endif /* __GNUC__ */
844