1 /* Subroutines used for code generation for eBPF.
2 Copyright (C) 2019-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #define IN_TARGET_CODE 1
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "insn-config.h"
29 #include "insn-attr.h"
30 #include "recog.h"
31 #include "output.h"
32 #include "alias.h"
33 #include "tree.h"
34 #include "stringpool.h"
35 #include "attribs.h"
36 #include "varasm.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "function.h"
40 #include "explow.h"
41 #include "memmodel.h"
42 #include "emit-rtl.h"
43 #include "reload.h"
44 #include "tm_p.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "basic-block.h"
48 #include "expr.h"
49 #include "optabs.h"
50 #include "bitmap.h"
51 #include "df.h"
52 #include "c-family/c-common.h"
53 #include "diagnostic.h"
54 #include "builtins.h"
55 #include "predict.h"
56 #include "langhooks.h"
57 #include "flags.h"
58
59 #include "cfg.h" /* needed for struct control_flow_graph used in BB macros */
60 #include "gimple.h"
61 #include "gimple-iterator.h"
62 #include "gimple-walk.h"
63 #include "tree-pass.h"
64 #include "tree-iterator.h"
65
66 #include "context.h"
67 #include "pass_manager.h"
68
69 #include "gimplify.h"
70 #include "gimplify-me.h"
71
72 #include "ctfc.h"
73 #include "btf.h"
74
75 #include "coreout.h"
76
77 /* Per-function machine data. */
78 struct GTY(()) machine_function
79 {
80 /* Number of bytes saved on the stack for local variables. */
81 int local_vars_size;
82
83 /* Number of bytes saved on the stack for callee-saved
84 registers. */
85 int callee_saved_reg_size;
86 };
87
88 /* Handle an attribute requiring a FUNCTION_DECL;
89 arguments as in struct attribute_spec.handler. */
90
91 static tree
bpf_handle_fndecl_attribute(tree * node,tree name,tree args,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs)92 bpf_handle_fndecl_attribute (tree *node, tree name,
93 tree args,
94 int flags ATTRIBUTE_UNUSED,
95 bool *no_add_attrs)
96 {
97 if (TREE_CODE (*node) != FUNCTION_DECL)
98 {
99 warning (OPT_Wattributes, "%qE attribute only applies to functions",
100 name);
101 *no_add_attrs = true;
102 }
103
104 if (is_attribute_p ("kernel_helper", name))
105 {
106 if (args)
107 {
108 tree cst = TREE_VALUE (args);
109 if (TREE_CODE (cst) != INTEGER_CST)
110 {
111 warning (OPT_Wattributes, "%qE attribute requires an integer argument",
112 name);
113 *no_add_attrs = true;
114 }
115 }
116 else
117 {
118 warning (OPT_Wattributes, "%qE requires an argument", name);
119 *no_add_attrs = true;
120 }
121 }
122
123 return NULL_TREE;
124 }
125
126 /* Handle preserve_access_index attribute, which can be applied to structs,
127 unions and classes. Actually adding the attribute to the TYPE_DECL is
128 taken care of for us, so just warn for types that aren't supported. */
129
130 static tree
bpf_handle_preserve_access_index_attribute(tree * node,tree name,tree args ATTRIBUTE_UNUSED,int flags ATTRIBUTE_UNUSED,bool * no_add_attrs)131 bpf_handle_preserve_access_index_attribute (tree *node, tree name,
132 tree args ATTRIBUTE_UNUSED,
133 int flags ATTRIBUTE_UNUSED,
134 bool *no_add_attrs)
135 {
136 if (TREE_CODE (*node) != RECORD_TYPE && TREE_CODE (*node) != UNION_TYPE)
137 {
138 warning (OPT_Wattributes,
139 "%qE attribute only applies to structure, union and class types",
140 name);
141 *no_add_attrs = true;
142 }
143
144 return NULL_TREE;
145 }
146
147 /* Target-specific attributes. */
148
149 static const struct attribute_spec bpf_attribute_table[] =
150 {
151 /* Syntax: { name, min_len, max_len, decl_required, type_required,
152 function_type_required, affects_type_identity, handler,
153 exclude } */
154
155 /* Attribute to mark function prototypes as kernel helpers. */
156 { "kernel_helper", 1, 1, true, false, false, false,
157 bpf_handle_fndecl_attribute, NULL },
158
159 /* CO-RE support: attribute to mark that all accesses to the declared
160 struct/union/array should be recorded. */
161 { "preserve_access_index", 0, -1, false, true, false, true,
162 bpf_handle_preserve_access_index_attribute, NULL },
163
164 /* The last attribute spec is set to be NULL. */
165 { NULL, 0, 0, false, false, false, false, NULL, NULL }
166 };
167
168 #undef TARGET_ATTRIBUTE_TABLE
169 #define TARGET_ATTRIBUTE_TABLE bpf_attribute_table
170
171 /* Data structures for the eBPF specific built-ins. */
172
173 /* Maximum number of arguments taken by a builtin function, plus
174 one. */
175 #define BPF_BUILTIN_MAX_ARGS 5
176
177 enum bpf_builtins
178 {
179 BPF_BUILTIN_UNUSED = 0,
180 /* Built-ins for non-generic loads and stores. */
181 BPF_BUILTIN_LOAD_BYTE,
182 BPF_BUILTIN_LOAD_HALF,
183 BPF_BUILTIN_LOAD_WORD,
184
185 /* Compile Once - Run Everywhere (CO-RE) support. */
186 BPF_BUILTIN_PRESERVE_ACCESS_INDEX,
187
188 BPF_BUILTIN_MAX,
189 };
190
191 static GTY (()) tree bpf_builtins[(int) BPF_BUILTIN_MAX];
192
193
194 void bpf_register_coreattr_pass (void);
195
196 /* Initialize the per-function machine status. */
197
198 static struct machine_function *
bpf_init_machine_status(void)199 bpf_init_machine_status (void)
200 {
201 /* Note this initializes all fields to 0, which is just OK for
202 us. */
203 return ggc_cleared_alloc<machine_function> ();
204 }
205
206 /* Override options and do some other initialization. */
207
208 static void
bpf_option_override(void)209 bpf_option_override (void)
210 {
211 /* Set the initializer for the per-function status structure. */
212 init_machine_status = bpf_init_machine_status;
213
214 /* BPF CO-RE support requires BTF debug info generation. */
215 if (TARGET_BPF_CORE && !btf_debuginfo_p ())
216 error ("BPF CO-RE requires BTF debugging information, use %<-gbtf%>");
217
218 /* To support the portability needs of BPF CO-RE approach, BTF debug
219 information includes the BPF CO-RE relocations. */
220 if (TARGET_BPF_CORE)
221 write_symbols |= BTF_WITH_CORE_DEBUG;
222
223 /* Unlike much of the other BTF debug information, the information necessary
224 for CO-RE relocations is added to the CTF container by the BPF backend.
225 Enabling LTO adds some complications in the generation of the BPF CO-RE
226 relocations because if LTO is in effect, the relocations need to be
227 generated late in the LTO link phase. This poses a new challenge for the
228 compiler to now provide means to combine the early BTF and late BTF CO-RE
229 debug info, similar to DWARF debug info. BTF/CO-RE debug info is not
230 amenable to such a split generation and a later merging.
231
232 In any case, in absence of linker support for BTF sections at this time,
233 it is acceptable to simply disallow LTO for BPF CO-RE compilations. */
234
235 if (flag_lto && TARGET_BPF_CORE)
236 sorry ("BPF CO-RE does not support LTO");
237
238 /* -gbtf implies -mcore when using the BPF backend, unless -mno-co-re
239 is specified. */
240 if (btf_debuginfo_p () && !(target_flags_explicit & MASK_BPF_CORE))
241 {
242 target_flags |= MASK_BPF_CORE;
243 write_symbols |= BTF_WITH_CORE_DEBUG;
244 }
245
246 /* Determine available features from ISA setting (-mcpu=). */
247 if (bpf_has_jmpext == -1)
248 bpf_has_jmpext = (bpf_isa >= ISA_V2);
249
250 if (bpf_has_alu32 == -1)
251 bpf_has_alu32 = (bpf_isa >= ISA_V3);
252
253 if (bpf_has_jmp32 == -1)
254 bpf_has_jmp32 = (bpf_isa >= ISA_V3);
255
256 }
257
258 #undef TARGET_OPTION_OVERRIDE
259 #define TARGET_OPTION_OVERRIDE bpf_option_override
260
261 /* Implement TARGET_ASM_INIT_SECTIONS. */
262
263 static void
bpf_asm_init_sections(void)264 bpf_asm_init_sections (void)
265 {
266 if (TARGET_BPF_CORE)
267 btf_ext_init ();
268 }
269
270 #undef TARGET_ASM_INIT_SECTIONS
271 #define TARGET_ASM_INIT_SECTIONS bpf_asm_init_sections
272
273 /* Implement TARGET_ASM_FILE_END. */
274
275 static void
bpf_file_end(void)276 bpf_file_end (void)
277 {
278 if (TARGET_BPF_CORE)
279 btf_ext_output ();
280 }
281
282 #undef TARGET_ASM_FILE_END
283 #define TARGET_ASM_FILE_END bpf_file_end
284
285 /* Define target-specific CPP macros. This function in used in the
286 definition of TARGET_CPU_CPP_BUILTINS in bpf.h */
287
288 #define builtin_define(TXT) cpp_define (pfile, TXT)
289
290 void
bpf_target_macros(cpp_reader * pfile)291 bpf_target_macros (cpp_reader *pfile)
292 {
293 builtin_define ("__BPF__");
294
295 if (TARGET_BIG_ENDIAN)
296 builtin_define ("__BPF_BIG_ENDIAN__");
297 else
298 builtin_define ("__BPF_LITTLE_ENDIAN__");
299
300 /* Define BPF_KERNEL_VERSION_CODE */
301 {
302 const char *version_code;
303 char *kernel_version_code;
304
305 switch (bpf_kernel)
306 {
307 case LINUX_V4_0: version_code = "0x40000"; break;
308 case LINUX_V4_1: version_code = "0x40100"; break;
309 case LINUX_V4_2: version_code = "0x40200"; break;
310 case LINUX_V4_3: version_code = "0x40300"; break;
311 case LINUX_V4_4: version_code = "0x40400"; break;
312 case LINUX_V4_5: version_code = "0x40500"; break;
313 case LINUX_V4_6: version_code = "0x40600"; break;
314 case LINUX_V4_7: version_code = "0x40700"; break;
315 case LINUX_V4_8: version_code = "0x40800"; break;
316 case LINUX_V4_9: version_code = "0x40900"; break;
317 case LINUX_V4_10: version_code = "0x40a00"; break;
318 case LINUX_V4_11: version_code = "0x40b00"; break;
319 case LINUX_V4_12: version_code = "0x40c00"; break;
320 case LINUX_V4_13: version_code = "0x40d00"; break;
321 case LINUX_V4_14: version_code = "0x40e00"; break;
322 case LINUX_V4_15: version_code = "0x40f00"; break;
323 case LINUX_V4_16: version_code = "0x41000"; break;
324 case LINUX_V4_17: version_code = "0x42000"; break;
325 case LINUX_V4_18: version_code = "0x43000"; break;
326 case LINUX_V4_19: version_code = "0x44000"; break;
327 case LINUX_V4_20: version_code = "0x45000"; break;
328 case LINUX_V5_0: version_code = "0x50000"; break;
329 case LINUX_V5_1: version_code = "0x50100"; break;
330 case LINUX_V5_2: version_code = "0x50200"; break;
331 default:
332 gcc_unreachable ();
333 }
334
335 kernel_version_code = ACONCAT (("__BPF_KERNEL_VERSION_CODE__=",
336 version_code, NULL));
337 builtin_define (kernel_version_code);
338 }
339 }
340
341 /* Return an RTX representing the place where a function returns or
342 receives a value of data type RET_TYPE, a tree node representing a
343 data type. */
344
345 static rtx
bpf_function_value(const_tree ret_type,const_tree fntype_or_decl,bool outgoing ATTRIBUTE_UNUSED)346 bpf_function_value (const_tree ret_type,
347 const_tree fntype_or_decl,
348 bool outgoing ATTRIBUTE_UNUSED)
349 {
350 enum machine_mode mode;
351 int unsignedp;
352
353 mode = TYPE_MODE (ret_type);
354 if (INTEGRAL_TYPE_P (ret_type))
355 mode = promote_function_mode (ret_type, mode, &unsignedp,
356 fntype_or_decl, 1);
357
358 return gen_rtx_REG (mode, BPF_R0);
359 }
360
361 #undef TARGET_FUNCTION_VALUE
362 #define TARGET_FUNCTION_VALUE bpf_function_value
363
364 /* Return true if REGNO is the number of a hard register in which the
365 values of called function may come back. */
366
367 static bool
bpf_function_value_regno_p(const unsigned int regno)368 bpf_function_value_regno_p (const unsigned int regno)
369 {
370 return (regno == BPF_R0);
371 }
372
373 #undef TARGET_FUNCTION_VALUE_REGNO_P
374 #define TARGET_FUNCTION_VALUE_REGNO_P bpf_function_value_regno_p
375
376 /* Compute the size of the function's stack frame, including the local
377 area and the register-save area. */
378
379 static void
bpf_compute_frame_layout(void)380 bpf_compute_frame_layout (void)
381 {
382 int stack_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
383 int padding_locals, regno;
384
385 /* Set the space used in the stack by local variables. This is
386 rounded up to respect the minimum stack alignment. */
387 cfun->machine->local_vars_size = get_frame_size ();
388
389 padding_locals = cfun->machine->local_vars_size % stack_alignment;
390 if (padding_locals)
391 padding_locals = stack_alignment - padding_locals;
392
393 cfun->machine->local_vars_size += padding_locals;
394
395 if (TARGET_XBPF)
396 {
397 /* Set the space used in the stack by callee-saved used
398 registers in the current function. There is no need to round
399 up, since the registers are all 8 bytes wide. */
400 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
401 if ((df_regs_ever_live_p (regno)
402 && !call_used_or_fixed_reg_p (regno))
403 || (cfun->calls_alloca
404 && regno == STACK_POINTER_REGNUM))
405 cfun->machine->callee_saved_reg_size += 8;
406 }
407
408 /* Check that the total size of the frame doesn't exceed the limit
409 imposed by eBPF. */
410 if ((cfun->machine->local_vars_size
411 + cfun->machine->callee_saved_reg_size) > bpf_frame_limit)
412 {
413 static int stack_limit_exceeded = 0;
414
415 if (!stack_limit_exceeded)
416 error ("eBPF stack limit exceeded");
417 stack_limit_exceeded = 1;
418 }
419 }
420
421 #undef TARGET_COMPUTE_FRAME_LAYOUT
422 #define TARGET_COMPUTE_FRAME_LAYOUT bpf_compute_frame_layout
423
424 /* Expand to the instructions in a function prologue. This function
425 is called when expanding the 'prologue' pattern in bpf.md. */
426
427 void
bpf_expand_prologue(void)428 bpf_expand_prologue (void)
429 {
430 rtx insn;
431 HOST_WIDE_INT size;
432
433 size = (cfun->machine->local_vars_size
434 + cfun->machine->callee_saved_reg_size);
435
436 /* The BPF "hardware" provides a fresh new set of registers for each
437 called function, some of which are initialized to the values of
438 the arguments passed in the first five registers. In doing so,
439 it saves the values of the registers of the caller, and restored
440 them upon returning. Therefore, there is no need to save the
441 callee-saved registers here. What is worse, the kernel
442 implementation refuses to run programs in which registers are
443 referred before being initialized. */
444 if (TARGET_XBPF)
445 {
446 int regno;
447 int fp_offset = -cfun->machine->local_vars_size;
448
449 /* Save callee-saved hard registes. The register-save-area
450 starts right after the local variables. */
451 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
452 {
453 if ((df_regs_ever_live_p (regno)
454 && !call_used_or_fixed_reg_p (regno))
455 || (cfun->calls_alloca
456 && regno == STACK_POINTER_REGNUM))
457 {
458 rtx mem;
459
460 if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
461 /* This has been already reported as an error in
462 bpf_compute_frame_layout. */
463 break;
464 else
465 {
466 mem = gen_frame_mem (DImode,
467 plus_constant (DImode,
468 hard_frame_pointer_rtx,
469 fp_offset - 8));
470 insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
471 fp_offset -= 8;
472 }
473 }
474 }
475 }
476
477 /* Set the stack pointer, if the function allocates space
478 dynamically. Note that the value of %sp should be directly
479 derived from %fp, for the kernel verifier to track it as a stack
480 accessor. */
481 if (cfun->calls_alloca)
482 {
483 insn = emit_move_insn (stack_pointer_rtx,
484 hard_frame_pointer_rtx);
485
486 if (size > 0)
487 {
488 insn = emit_insn (gen_rtx_SET (stack_pointer_rtx,
489 gen_rtx_PLUS (Pmode,
490 stack_pointer_rtx,
491 GEN_INT (-size))));
492 }
493 }
494 }
495
496 /* Expand to the instructions in a function epilogue. This function
497 is called when expanding the 'epilogue' pattern in bpf.md. */
498
499 void
bpf_expand_epilogue(void)500 bpf_expand_epilogue (void)
501 {
502 /* See note in bpf_expand_prologue for an explanation on why we are
503 not restoring callee-saved registers in BPF. */
504 if (TARGET_XBPF)
505 {
506 rtx insn;
507 int regno;
508 int fp_offset = -cfun->machine->local_vars_size;
509
510 /* Restore callee-saved hard registes from the stack. */
511 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
512 {
513 if ((df_regs_ever_live_p (regno)
514 && !call_used_or_fixed_reg_p (regno))
515 || (cfun->calls_alloca
516 && regno == STACK_POINTER_REGNUM))
517 {
518 rtx mem;
519
520 if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
521 /* This has been already reported as an error in
522 bpf_compute_frame_layout. */
523 break;
524 else
525 {
526 mem = gen_frame_mem (DImode,
527 plus_constant (DImode,
528 hard_frame_pointer_rtx,
529 fp_offset - 8));
530 insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
531 fp_offset -= 8;
532 }
533 }
534 }
535 }
536
537 emit_jump_insn (gen_exit ());
538 }
539
540 /* Expand to the instructions for a conditional branch. This function
541 is called when expanding the 'cbranch<mode>4' pattern in bpf.md. */
542
543 void
bpf_expand_cbranch(machine_mode mode,rtx * operands)544 bpf_expand_cbranch (machine_mode mode, rtx *operands)
545 {
546 /* If all jump instructions are available, nothing special to do here. */
547 if (bpf_has_jmpext)
548 return;
549
550 enum rtx_code code = GET_CODE (operands[0]);
551
552 /* Without the conditional branch instructions jslt, jsle, jlt, jle, we need
553 to convert conditional branches that would use them to an available
554 operation instead by reversing the comparison. */
555 if ((code == LT || code == LE || code == LTU || code == LEU))
556 {
557 /* Reverse the condition. */
558 PUT_CODE (operands[0], reverse_condition (code));
559
560 /* Swap the operands, and ensure that the first is a register. */
561 if (!register_operand (operands[2], mode))
562 operands[2] = force_reg (mode, operands[2]);
563
564 rtx tmp = operands[1];
565 operands[1] = operands[2];
566 operands[2] = tmp;
567 }
568 }
569
570 /* Return the initial difference between the specified pair of
571 registers. The registers that can figure in FROM, and TO, are
572 specified by ELIMINABLE_REGS in bpf.h.
573
574 This function is used in the definition of
575 INITIAL_ELIMINATION_OFFSET in bpf.h */
576
577 HOST_WIDE_INT
bpf_initial_elimination_offset(int from,int to)578 bpf_initial_elimination_offset (int from, int to)
579 {
580 HOST_WIDE_INT ret;
581
582 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
583 ret = (cfun->machine->local_vars_size
584 + cfun->machine->callee_saved_reg_size);
585 else if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
586 ret = 0;
587 else
588 gcc_unreachable ();
589
590 return ret;
591 }
592
593 /* Return the number of consecutive hard registers, starting at
594 register number REGNO, required to hold a value of mode MODE. */
595
596 static unsigned int
bpf_hard_regno_nregs(unsigned int regno ATTRIBUTE_UNUSED,enum machine_mode mode)597 bpf_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED,
598 enum machine_mode mode)
599 {
600 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
601 }
602
603 #undef TARGET_HARD_REGNO_NREGS
604 #define TARGET_HARD_REGNO_NREGS bpf_hard_regno_nregs
605
606 /* Return true if it is permissible to store a value of mode MODE in
607 hard register number REGNO, or in several registers starting with
608 that one. */
609
610 static bool
bpf_hard_regno_mode_ok(unsigned int regno ATTRIBUTE_UNUSED,enum machine_mode mode)611 bpf_hard_regno_mode_ok (unsigned int regno ATTRIBUTE_UNUSED,
612 enum machine_mode mode)
613 {
614 switch (mode)
615 {
616 case E_SImode:
617 case E_DImode:
618 case E_HImode:
619 case E_QImode:
620 case E_TImode:
621 case E_SFmode:
622 case E_DFmode:
623 return true;
624 default:
625 return false;
626 }
627 }
628
629 #undef TARGET_HARD_REGNO_MODE_OK
630 #define TARGET_HARD_REGNO_MODE_OK bpf_hard_regno_mode_ok
631
632 /* Return true if a function must have and use a frame pointer. */
633
634 static bool
bpf_frame_pointer_required(void)635 bpf_frame_pointer_required (void)
636 {
637 /* We do not have a stack pointer, so we absolutely depend on the
638 frame-pointer in order to access the stack... and fishes walk and
639 pigs fly glglgl */
640 return true;
641 }
642
643 #undef TARGET_FRAME_POINTER_REQUIRED
644 #define TARGET_FRAME_POINTER_REQUIRED bpf_frame_pointer_required
645
646 /* Return `true' if the given RTX X is a valid base for an indirect
647 memory access. STRICT has the same meaning than in
648 bpf_legitimate_address_p. */
649
650 static inline bool
bpf_address_base_p(rtx x,bool strict)651 bpf_address_base_p (rtx x, bool strict)
652 {
653 return (GET_CODE (x) == REG
654 && (REGNO (x) < 11
655 || (!strict && REGNO (x) >= FIRST_PSEUDO_REGISTER)));
656 }
657
658 /* Return true if X (a RTX) is a legitimate memory address on the
659 target machine for a memory operand of mode MODE. */
660
661 static bool
bpf_legitimate_address_p(machine_mode mode ATTRIBUTE_UNUSED,rtx x,bool strict)662 bpf_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
663 rtx x,
664 bool strict)
665 {
666 switch (GET_CODE (x))
667 {
668 case REG:
669 return bpf_address_base_p (x, strict);
670
671 case PLUS:
672 {
673 /* Accept (PLUS ADDR_BASE CONST_INT), provided CONST_INT fits
674 in a signed 16-bit.
675
676 Note that LABEL_REF and SYMBOL_REF are not allowed in
677 REG+IMM addresses, because it is almost certain they will
678 overload the offset field. */
679
680 rtx x0 = XEXP (x, 0);
681 rtx x1 = XEXP (x, 1);
682
683 if (bpf_address_base_p (x0, strict) && GET_CODE (x1) == CONST_INT)
684 return IN_RANGE (INTVAL (x1), -1 - 0x7fff, 0x7fff);
685
686 break;
687 }
688 default:
689 break;
690 }
691
692 return false;
693 }
694
695 #undef TARGET_LEGITIMATE_ADDRESS_P
696 #define TARGET_LEGITIMATE_ADDRESS_P bpf_legitimate_address_p
697
698 /* Describe the relative costs of RTL expressions. Return true when
699 all subexpressions of X have been processed, and false when
700 `rtx_cost' should recurse. */
701
702 static bool
bpf_rtx_costs(rtx x ATTRIBUTE_UNUSED,enum machine_mode mode ATTRIBUTE_UNUSED,int outer_code ATTRIBUTE_UNUSED,int opno ATTRIBUTE_UNUSED,int * total ATTRIBUTE_UNUSED,bool speed ATTRIBUTE_UNUSED)703 bpf_rtx_costs (rtx x ATTRIBUTE_UNUSED,
704 enum machine_mode mode ATTRIBUTE_UNUSED,
705 int outer_code ATTRIBUTE_UNUSED,
706 int opno ATTRIBUTE_UNUSED,
707 int *total ATTRIBUTE_UNUSED,
708 bool speed ATTRIBUTE_UNUSED)
709 {
710 /* To be written. */
711 return false;
712 }
713
714 #undef TARGET_RTX_COSTS
715 #define TARGET_RTX_COSTS bpf_rtx_costs
716
717 /* Return true if an argument at the position indicated by CUM should
718 be passed by reference. If the hook returns true, a copy of that
719 argument is made in memory and a pointer to the argument is passed
720 instead of the argument itself. */
721
722 static bool
bpf_pass_by_reference(cumulative_args_t cum ATTRIBUTE_UNUSED,const function_arg_info & arg)723 bpf_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
724 const function_arg_info &arg)
725 {
726 unsigned num_bytes = arg.type_size_in_bytes ();
727
728 /* Pass aggregates and values bigger than 5 words by reference.
729 Everything else is passed by copy. */
730 return (arg.aggregate_type_p () || (num_bytes > 8*5));
731 }
732
733 #undef TARGET_PASS_BY_REFERENCE
734 #define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference
735
736 /* Return a RTX indicating whether a function argument is passed in a
737 register and if so, which register. */
738
739 static rtx
bpf_function_arg(cumulative_args_t ca,const function_arg_info & arg)740 bpf_function_arg (cumulative_args_t ca, const function_arg_info &arg)
741 {
742 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
743
744 if (*cum < 5)
745 return gen_rtx_REG (arg.mode, *cum + 1);
746 else
747 /* An error will be emitted for this in
748 bpf_function_arg_advance. */
749 return NULL_RTX;
750 }
751
752 #undef TARGET_FUNCTION_ARG
753 #define TARGET_FUNCTION_ARG bpf_function_arg
754
755 /* Update the summarizer variable pointed by CA to advance past an
756 argument in the argument list. */
757
758 static void
bpf_function_arg_advance(cumulative_args_t ca,const function_arg_info & arg)759 bpf_function_arg_advance (cumulative_args_t ca,
760 const function_arg_info &arg)
761 {
762 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
763 unsigned num_bytes = arg.type_size_in_bytes ();
764 unsigned num_words = CEIL (num_bytes, UNITS_PER_WORD);
765
766 if (*cum <= 5 && *cum + num_words > 5)
767 error ("too many function arguments for eBPF");
768
769 *cum += num_words;
770 }
771
772 #undef TARGET_FUNCTION_ARG_ADVANCE
773 #define TARGET_FUNCTION_ARG_ADVANCE bpf_function_arg_advance
774
775 /* Output the assembly code for a constructor. Since eBPF doesn't
776 support indirect calls, constructors are not supported. */
777
778 static void
bpf_output_constructor(rtx symbol,int priority ATTRIBUTE_UNUSED)779 bpf_output_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
780 {
781 tree decl = SYMBOL_REF_DECL (symbol);
782
783 if (decl)
784 sorry_at (DECL_SOURCE_LOCATION (decl),
785 "no constructors");
786 else
787 sorry ("no constructors");
788 }
789
790 #undef TARGET_ASM_CONSTRUCTOR
791 #define TARGET_ASM_CONSTRUCTOR bpf_output_constructor
792
793 /* Output the assembly code for a destructor. Since eBPF doesn't
794 support indirect calls, destructors are not supported. */
795
796 static void
bpf_output_destructor(rtx symbol,int priority ATTRIBUTE_UNUSED)797 bpf_output_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
798 {
799 tree decl = SYMBOL_REF_DECL (symbol);
800
801 if (decl)
802 sorry_at (DECL_SOURCE_LOCATION (decl),
803 "no destructors");
804 else
805 sorry ("no destructors");
806 }
807
808 #undef TARGET_ASM_DESTRUCTOR
809 #define TARGET_ASM_DESTRUCTOR bpf_output_destructor
810
811 /* Return the appropriate instruction to CALL to a function. TARGET
812 is an RTX denoting the address of the called function.
813
814 The main purposes of this function are:
815 - To reject indirect CALL instructions, which are not supported by
816 eBPF.
817 - To recognize calls to kernel helper functions and emit the
818 corresponding CALL N instruction.
819
820 This function is called from the expansion of the 'call' pattern in
821 bpf.md. */
822
823 const char *
bpf_output_call(rtx target)824 bpf_output_call (rtx target)
825 {
826 rtx xops[1];
827
828 switch (GET_CODE (target))
829 {
830 case CONST_INT:
831 output_asm_insn ("call\t%0", &target);
832 break;
833 case SYMBOL_REF:
834 {
835 tree decl = SYMBOL_REF_DECL (target);
836 tree attr;
837
838 if (decl
839 && (attr = lookup_attribute ("kernel_helper",
840 DECL_ATTRIBUTES (decl))))
841 {
842 tree attr_args = TREE_VALUE (attr);
843
844 xops[0] = GEN_INT (TREE_INT_CST_LOW (TREE_VALUE (attr_args)));
845 output_asm_insn ("call\t%0", xops);
846 }
847 else
848 output_asm_insn ("call\t%0", &target);
849
850 break;
851 }
852 default:
853 if (TARGET_XBPF)
854 output_asm_insn ("call\t%0", &target);
855 else
856 {
857 error ("indirect call in function, which are not supported by eBPF");
858 output_asm_insn ("call 0", NULL);
859 }
860 break;
861 }
862
863 return "";
864 }
865
866 /* Print an instruction operand. This function is called in the macro
867 PRINT_OPERAND defined in bpf.h */
868
869 void
bpf_print_operand(FILE * file,rtx op,int code ATTRIBUTE_UNUSED)870 bpf_print_operand (FILE *file, rtx op, int code ATTRIBUTE_UNUSED)
871 {
872 switch (GET_CODE (op))
873 {
874 case REG:
875 fprintf (file, "%s", reg_names[REGNO (op)]);
876 break;
877 case MEM:
878 output_address (GET_MODE (op), XEXP (op, 0));
879 break;
880 case CONST_DOUBLE:
881 if (CONST_DOUBLE_HIGH (op))
882 fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
883 CONST_DOUBLE_HIGH (op), CONST_DOUBLE_LOW (op));
884 else if (CONST_DOUBLE_LOW (op) < 0)
885 fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (op));
886 else
887 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (op));
888 break;
889 default:
890 output_addr_const (file, op);
891 }
892 }
893
894 /* Print an operand which is an address. This function should handle
895 any legit address, as accepted by bpf_legitimate_address_p, and
896 also addresses that are valid in CALL instructions.
897
898 This function is called in the PRINT_OPERAND_ADDRESS macro defined
899 in bpf.h */
900
901 void
bpf_print_operand_address(FILE * file,rtx addr)902 bpf_print_operand_address (FILE *file, rtx addr)
903 {
904 switch (GET_CODE (addr))
905 {
906 case REG:
907 fprintf (file, "[%s+0]", reg_names[REGNO (addr)]);
908 break;
909 case PLUS:
910 {
911 rtx op0 = XEXP (addr, 0);
912 rtx op1 = XEXP (addr, 1);
913
914 if (GET_CODE (op0) == REG && GET_CODE (op1) == CONST_INT)
915 {
916 fprintf (file, "[%s+", reg_names[REGNO (op0)]);
917 output_addr_const (file, op1);
918 fputs ("]", file);
919 }
920 else
921 fatal_insn ("invalid address in operand", addr);
922 break;
923 }
924 case MEM:
925 /* Fallthrough. */
926 case LABEL_REF:
927 /* Fallthrough. */
928 fatal_insn ("unsupported operand", addr);
929 break;
930 default:
931 output_addr_const (file, addr);
932 break;
933 }
934 }
935
936 /* Add a BPF builtin function with NAME, CODE and TYPE. Return
937 the function decl or NULL_TREE if the builtin was not added. */
938
939 static tree
def_builtin(const char * name,enum bpf_builtins code,tree type)940 def_builtin (const char *name, enum bpf_builtins code, tree type)
941 {
942 tree t
943 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
944
945 bpf_builtins[code] = t;
946 return t;
947 }
948
949 /* Define machine-specific built-in functions. */
950
951 static void
bpf_init_builtins(void)952 bpf_init_builtins (void)
953 {
954 tree ullt = long_long_unsigned_type_node;
955
956 /* Built-ins for BPF_LD_ABS and BPF_LD_IND instructions. */
957
958 def_builtin ("__builtin_bpf_load_byte", BPF_BUILTIN_LOAD_BYTE,
959 build_function_type_list (ullt, ullt, 0));
960 def_builtin ("__builtin_bpf_load_half", BPF_BUILTIN_LOAD_HALF,
961 build_function_type_list (ullt, ullt, 0));
962 def_builtin ("__builtin_bpf_load_word", BPF_BUILTIN_LOAD_WORD,
963 build_function_type_list (ullt, ullt, 0));
964 def_builtin ("__builtin_preserve_access_index",
965 BPF_BUILTIN_PRESERVE_ACCESS_INDEX,
966 build_function_type_list (ptr_type_node, ptr_type_node, 0));
967 }
968
969 #undef TARGET_INIT_BUILTINS
970 #define TARGET_INIT_BUILTINS bpf_init_builtins
971
972 static tree bpf_core_compute (tree, vec<unsigned int> *);
973 static int bpf_core_get_index (const tree);
974 static bool is_attr_preserve_access (tree);
975
976 /* Expand a call to a BPF-specific built-in function that was set up
977 with bpf_init_builtins. */
978
979 static rtx
bpf_expand_builtin(tree exp,rtx target ATTRIBUTE_UNUSED,rtx subtarget ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED,int ignore ATTRIBUTE_UNUSED)980 bpf_expand_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
981 rtx subtarget ATTRIBUTE_UNUSED,
982 machine_mode mode ATTRIBUTE_UNUSED,
983 int ignore ATTRIBUTE_UNUSED)
984 {
985 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
986 int code = DECL_MD_FUNCTION_CODE (fndecl);
987
988 if (code == BPF_BUILTIN_LOAD_BYTE
989 || code == BPF_BUILTIN_LOAD_HALF
990 || code == BPF_BUILTIN_LOAD_WORD)
991 {
992 /* Expand an indirect load from the sk_buff in the context.
993 There is just one argument to the builtin, which is the
994 offset.
995
996 We try first to expand a ldabs* instruction. In case this
997 fails, we try a ldind* instruction. */
998
999 enum insn_code abs_icode
1000 = (code == BPF_BUILTIN_LOAD_BYTE ? CODE_FOR_ldabsb
1001 : code == BPF_BUILTIN_LOAD_HALF ? CODE_FOR_ldabsh
1002 : CODE_FOR_ldabsw);
1003
1004 enum insn_code ind_icode
1005 = (code == BPF_BUILTIN_LOAD_BYTE ? CODE_FOR_ldindb
1006 : code == BPF_BUILTIN_LOAD_HALF ? CODE_FOR_ldindh
1007 : CODE_FOR_ldindw);
1008
1009 tree offset_arg = CALL_EXPR_ARG (exp, 0);
1010 struct expand_operand ops[2];
1011
1012 create_input_operand (&ops[0], expand_normal (offset_arg),
1013 TYPE_MODE (TREE_TYPE (offset_arg)));
1014 create_input_operand (&ops[1], const0_rtx, SImode);
1015
1016 if (!maybe_expand_insn (abs_icode, 2, ops)
1017 && !maybe_expand_insn (ind_icode, 2, ops))
1018 {
1019 error ("invalid argument to built-in function");
1020 return gen_rtx_REG (ops[0].mode, BPF_R0);
1021 }
1022
1023 /* The result of the load is in R0. */
1024 return gen_rtx_REG (ops[0].mode, BPF_R0);
1025 }
1026 else if (code == -1)
1027 {
1028 /* A resolved overloaded builtin, e.g. __bpf_preserve_access_index_si */
1029 tree arg = CALL_EXPR_ARG (exp, 0);
1030
1031 if (arg == NULL_TREE)
1032 return NULL_RTX;
1033
1034 auto_vec<unsigned int, 16> accessors;
1035 tree container;
1036
1037 if (TREE_CODE (arg) == SSA_NAME)
1038 {
1039 gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
1040
1041 if (is_gimple_assign (def_stmt))
1042 arg = gimple_assign_rhs1 (def_stmt);
1043 else
1044 return expand_normal (arg);
1045 }
1046
1047 /* Avoid double-recording information if the argument is an access to
1048 a struct/union marked __attribute__((preserve_access_index)). This
1049 Will be handled by the attribute handling pass. */
1050 if (is_attr_preserve_access (arg))
1051 return expand_normal (arg);
1052
1053 container = bpf_core_compute (arg, &accessors);
1054
1055 /* Any valid use of the builtin must have at least one access. Otherwise,
1056 there is nothing to record and nothing to do. This is primarily a
1057 guard against optimizations leading to unexpected expressions in the
1058 argument of the builtin. For example, if the builtin is used to read
1059 a field of a structure which can be statically determined to hold a
1060 constant value, the argument to the builtin will be optimized to that
1061 constant. This is OK, and means the builtin call is superfluous.
1062 e.g.
1063 struct S foo;
1064 foo.a = 5;
1065 int x = __preserve_access_index (foo.a);
1066 ... do stuff with x
1067 'foo.a' in the builtin argument will be optimized to '5' with -01+.
1068 This sequence does not warrant recording a CO-RE relocation. */
1069
1070 if (accessors.length () < 1)
1071 return expand_normal (arg);
1072
1073 accessors.reverse ();
1074
1075 container = TREE_TYPE (container);
1076
1077 rtx_code_label *label = gen_label_rtx ();
1078 LABEL_PRESERVE_P (label) = 1;
1079 emit_label (label);
1080
1081 /* Determine what output section this relocation will apply to.
1082 If this function is associated with a section, use that. Otherwise,
1083 fall back on '.text'. */
1084 const char * section_name;
1085 if (current_function_decl && DECL_SECTION_NAME (current_function_decl))
1086 section_name = DECL_SECTION_NAME (current_function_decl);
1087 else
1088 section_name = ".text";
1089
1090 /* Add the CO-RE relocation information to the BTF container. */
1091 bpf_core_reloc_add (container, section_name, &accessors, label);
1092
1093 return expand_normal (arg);
1094 }
1095 gcc_unreachable ();
1096 }
1097
1098 #undef TARGET_EXPAND_BUILTIN
1099 #define TARGET_EXPAND_BUILTIN bpf_expand_builtin
1100
1101 /* Initialize target-specific function library calls. This is mainly
1102 used to call library-provided soft-fp operations, since eBPF
1103 doesn't support floating-point in "hardware". */
1104
1105 static void
bpf_init_libfuncs(void)1106 bpf_init_libfuncs (void)
1107 {
1108 set_conv_libfunc (sext_optab, DFmode, SFmode,
1109 "__bpf_extendsfdf2");
1110 set_conv_libfunc (trunc_optab, SFmode, DFmode,
1111 "__bpf_truncdfsf2");
1112 set_conv_libfunc (sfix_optab, SImode, DFmode,
1113 "__bpf_fix_truncdfsi");
1114 set_conv_libfunc (sfloat_optab, DFmode, SImode,
1115 "__bpf_floatsidf");
1116 set_conv_libfunc (ufloat_optab, DFmode, SImode,
1117 "__bpf_floatunsidf");
1118 }
1119
1120 #undef TARGET_INIT_LIBFUNCS
1121 #define TARGET_INIT_LIBFUNCS bpf_init_libfuncs
1122
1123 /* Define the mechanism that will be used for describing frame unwind
1124 information to the debugger. In eBPF it is not possible to unwind
1125 frames. */
1126
1127 static enum unwind_info_type
bpf_debug_unwind_info()1128 bpf_debug_unwind_info ()
1129 {
1130 return UI_NONE;
1131 }
1132
1133 #undef TARGET_DEBUG_UNWIND_INFO
1134 #define TARGET_DEBUG_UNWIND_INFO bpf_debug_unwind_info
1135
1136 /* Output assembly directives to assemble data of various sized and
1137 alignments. */
1138
1139 #undef TARGET_ASM_BYTE_OP
1140 #define TARGET_ASM_BYTE_OP "\t.byte\t"
1141 #undef TARGET_ASM_ALIGNED_HI_OP
1142 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1143 #undef TARGET_ASM_ALIGNED_SI_OP
1144 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1145 #undef TARGET_ASM_ALIGNED_DI_OP
1146 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1147
1148
1149 /* BPF Compile Once - Run Everywhere (CO-RE) support routines.
1150
1151 BPF CO-RE is supported in two forms:
1152 - A target builtin, __builtin_preserve_access_index
1153
1154 This builtin accepts a single argument. Any access to an aggregate data
1155 structure (struct, union or array) within the argument will be recorded by
1156 the CO-RE machinery, resulting in a relocation record being placed in the
1157 .BTF.ext section of the output.
1158
1159 It is implemented in bpf_resolve_overloaded_builtin () and
1160 bpf_expand_builtin (), using the supporting routines below.
1161
1162 - An attribute, __attribute__((preserve_access_index))
1163
1164 This attribute can be applied to struct and union types. Any access to a
1165 type with this attribute will be recorded by the CO-RE machinery.
1166
1167 The pass pass_bpf_core_attr, below, implements support for
1168 this attribute. */
1169
1170 /* Traverse the subtree under NODE, which is expected to be some form of
1171 aggregate access the CO-RE machinery cares about (like a read of a member of
1172 a struct or union), collecting access indices for the components and storing
1173 them in the vector referenced by ACCESSORS.
1174
1175 Return the ultimate (top-level) container of the aggregate access. In general,
1176 this will be a VAR_DECL or some kind of REF.
1177
1178 Note that the accessors are computed *in reverse order* of how the BPF
1179 CO-RE machinery defines them. The vector needs to be reversed (or simply
1180 output in reverse order) for the .BTF.ext relocation information. */
1181
1182 static tree
bpf_core_compute(tree node,vec<unsigned int> * accessors)1183 bpf_core_compute (tree node, vec<unsigned int> *accessors)
1184 {
1185
1186 if (TREE_CODE (node) == ADDR_EXPR)
1187 node = TREE_OPERAND (node, 0);
1188
1189 else if (TREE_CODE (node) == INDIRECT_REF
1190 || TREE_CODE (node) == POINTER_PLUS_EXPR)
1191 {
1192 accessors->safe_push (0);
1193 return TREE_OPERAND (node, 0);
1194 }
1195
1196 while (1)
1197 {
1198 switch (TREE_CODE (node))
1199 {
1200 case COMPONENT_REF:
1201 accessors->safe_push (bpf_core_get_index (TREE_OPERAND (node, 1)));
1202 break;
1203
1204 case ARRAY_REF:
1205 case ARRAY_RANGE_REF:
1206 accessors->safe_push (bpf_core_get_index (node));
1207 break;
1208
1209 case MEM_REF:
1210 accessors->safe_push (bpf_core_get_index (node));
1211 if (TREE_CODE (TREE_OPERAND (node, 0)) == ADDR_EXPR)
1212 node = TREE_OPERAND (TREE_OPERAND (node, 0), 0);
1213 goto done;
1214
1215 default:
1216 goto done;
1217 }
1218 node = TREE_OPERAND (node, 0);
1219 }
1220 done:
1221 return node;
1222
1223 }
1224
1225 /* Compute the index of the NODE in its immediate container.
1226 NODE should be a FIELD_DECL (i.e. of struct or union), or an ARRAY_REF. */
1227 static int
bpf_core_get_index(const tree node)1228 bpf_core_get_index (const tree node)
1229 {
1230 enum tree_code code = TREE_CODE (node);
1231
1232 if (code == FIELD_DECL)
1233 {
1234 /* Lookup the index from the BTF information. Some struct/union members
1235 may not be emitted in BTF; only the BTF container has enough
1236 information to compute the correct index. */
1237 int idx = bpf_core_get_sou_member_index (ctf_get_tu_ctfc (), node);
1238 if (idx >= 0)
1239 return idx;
1240 }
1241
1242 else if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF)
1243 {
1244 /* For array accesses, the index is operand 1. */
1245 tree index = TREE_OPERAND (node, 1);
1246
1247 /* If the indexing operand is a constant, extracting is trivial. */
1248 if (TREE_CODE (index) == INTEGER_CST && tree_fits_shwi_p (index))
1249 return tree_to_shwi (index);
1250 }
1251
1252 return -1;
1253 }
1254
1255 /* Synthesize a new builtin function declaration with signature TYPE.
1256 Used by bpf_resolve_overloaded_builtin to resolve calls to
1257 __builtin_preserve_access_index. */
1258
1259 static tree
bpf_core_newdecl(tree type)1260 bpf_core_newdecl (tree type)
1261 {
1262 tree rettype = build_function_type_list (type, type, NULL);
1263 char name[80];
1264 int len = snprintf (name, sizeof (name), "%s", "__builtin_pai_");
1265
1266 static unsigned long cnt = 0;
1267 len = snprintf (name + len, sizeof (name) - len, "%lu", cnt++);
1268
1269 return add_builtin_function_ext_scope (name, rettype, -1, BUILT_IN_MD, NULL,
1270 NULL_TREE);
1271 }
1272
1273 /* Return whether EXPR could access some aggregate data structure that
1274 BPF CO-RE support needs to know about. */
1275
1276 static int
bpf_core_is_maybe_aggregate_access(tree expr)1277 bpf_core_is_maybe_aggregate_access (tree expr)
1278 {
1279 enum tree_code code = TREE_CODE (expr);
1280 if (code == COMPONENT_REF || code == ARRAY_REF)
1281 return 1;
1282
1283 if (code == ADDR_EXPR)
1284 return bpf_core_is_maybe_aggregate_access (TREE_OPERAND (expr, 0));
1285
1286 return 0;
1287 }
1288
1289 /* Callback function used with walk_tree from bpf_resolve_overloaded_builtin. */
1290
1291 static tree
bpf_core_walk(tree * tp,int * walk_subtrees,void * data)1292 bpf_core_walk (tree *tp, int *walk_subtrees, void *data)
1293 {
1294 location_t loc = *((location_t *) data);
1295
1296 /* If this is a type, don't do anything. */
1297 if (TYPE_P (*tp))
1298 {
1299 *walk_subtrees = 0;
1300 return NULL_TREE;
1301 }
1302
1303 if (bpf_core_is_maybe_aggregate_access (*tp))
1304 {
1305 tree newdecl = bpf_core_newdecl (TREE_TYPE (*tp));
1306 tree newcall = build_call_expr_loc (loc, newdecl, 1, *tp);
1307 *tp = newcall;
1308 *walk_subtrees = 0;
1309 }
1310
1311 return NULL_TREE;
1312 }
1313
1314
1315 /* Implement TARGET_RESOLVE_OVERLOADED_BUILTIN (see gccint manual section
1316 Target Macros::Misc.).
1317 We use this for the __builtin_preserve_access_index builtin for CO-RE
1318 support.
1319
1320 FNDECL is the declaration of the builtin, and ARGLIST is the list of
1321 arguments passed to it, and is really a vec<tree,_> *.
1322
1323 In this case, the 'operation' implemented by the builtin is a no-op;
1324 the builtin is just a marker. So, the result is simply the argument. */
1325
1326 static tree
bpf_resolve_overloaded_builtin(location_t loc,tree fndecl,void * arglist)1327 bpf_resolve_overloaded_builtin (location_t loc, tree fndecl, void *arglist)
1328 {
1329 if (DECL_MD_FUNCTION_CODE (fndecl) != BPF_BUILTIN_PRESERVE_ACCESS_INDEX)
1330 return NULL_TREE;
1331
1332 /* We only expect one argument, but it may be an arbitrarily-complicated
1333 statement-expression. */
1334 vec<tree, va_gc> *params = static_cast<vec<tree, va_gc> *> (arglist);
1335 unsigned n_params = params ? params->length() : 0;
1336
1337 if (n_params != 1)
1338 {
1339 error_at (loc, "expected exactly 1 argument");
1340 return NULL_TREE;
1341 }
1342
1343 tree param = (*params)[0];
1344
1345 /* If not generating BPF_CORE information, the builtin does nothing. */
1346 if (!TARGET_BPF_CORE)
1347 return param;
1348
1349 /* Do remove_c_maybe_const_expr for the arg.
1350 TODO: WHY do we have to do this here? Why doesn't c-typeck take care
1351 of it before or after this hook? */
1352 if (TREE_CODE (param) == C_MAYBE_CONST_EXPR)
1353 param = C_MAYBE_CONST_EXPR_EXPR (param);
1354
1355 /* Construct a new function declaration with the correct type, and return
1356 a call to it.
1357
1358 Calls with statement-expressions, for example:
1359 _(({ foo->a = 1; foo->u[2].b = 2; }))
1360 require special handling.
1361
1362 We rearrange this into a new block scope in which each statement
1363 becomes a unique builtin call:
1364 {
1365 _ ({ foo->a = 1;});
1366 _ ({ foo->u[2].b = 2;});
1367 }
1368
1369 This ensures that all the relevant information remains within the
1370 expression trees the builtin finally gets. */
1371
1372 walk_tree (¶m, bpf_core_walk, (void *) &loc, NULL);
1373
1374 return param;
1375 }
1376
1377 #undef TARGET_RESOLVE_OVERLOADED_BUILTIN
1378 #define TARGET_RESOLVE_OVERLOADED_BUILTIN bpf_resolve_overloaded_builtin
1379
1380
1381 /* Handling for __attribute__((preserve_access_index)) for BPF CO-RE support.
1382
1383 This attribute marks a structure/union/array type as "preseve", so that
1384 every access to that type should be recorded and replayed by the BPF loader;
1385 this is just the same functionality as __builtin_preserve_access_index,
1386 but in the form of an attribute for an entire aggregate type.
1387
1388 Note also that nested structs behave as though they all have the attribute.
1389 For example:
1390 struct X { int a; };
1391 struct Y { struct X bar} __attribute__((preserve_access_index));
1392 struct Y foo;
1393 foo.bar.a;
1394 will record access all the way to 'a', even though struct X does not have
1395 the preserve_access_index attribute.
1396
1397 This is to follow LLVM behavior.
1398
1399 This pass finds all accesses to objects of types marked with the attribute,
1400 and wraps them in the same "low-level" builtins used by the builtin version.
1401 All logic afterwards is therefore identical to the builtin version of
1402 preserve_access_index. */
1403
1404 /* True iff tree T accesses any member of a struct/union/class which is marked
1405 with the PRESERVE_ACCESS_INDEX attribute. */
1406
1407 static bool
is_attr_preserve_access(tree t)1408 is_attr_preserve_access (tree t)
1409 {
1410 if (t == NULL_TREE)
1411 return false;
1412
1413 poly_int64 bitsize, bitpos;
1414 tree var_off;
1415 machine_mode mode;
1416 int sign, reverse, vol;
1417
1418 tree base = get_inner_reference (t, &bitsize, &bitpos, &var_off, &mode,
1419 &sign, &reverse, &vol);
1420
1421 if (TREE_CODE (base) == MEM_REF)
1422 {
1423 return lookup_attribute ("preserve_access_index",
1424 TYPE_ATTRIBUTES (TREE_TYPE (base)));
1425 }
1426
1427 if (TREE_CODE (t) == COMPONENT_REF)
1428 {
1429 /* preserve_access_index propegates into nested structures,
1430 so check whether this is a component of another component
1431 which in turn is part of such a struct. */
1432
1433 const tree op = TREE_OPERAND (t, 0);
1434
1435 if (TREE_CODE (op) == COMPONENT_REF)
1436 return is_attr_preserve_access (op);
1437
1438 const tree container = DECL_CONTEXT (TREE_OPERAND (t, 1));
1439
1440 return lookup_attribute ("preserve_access_index",
1441 TYPE_ATTRIBUTES (container));
1442 }
1443
1444 else if (TREE_CODE (t) == ADDR_EXPR)
1445 return is_attr_preserve_access (TREE_OPERAND (t, 0));
1446
1447 return false;
1448 }
1449
1450 /* The body of pass_bpf_core_attr. Scan RTL for accesses to structs/unions
1451 marked with __attribute__((preserve_access_index)) and generate a CO-RE
1452 relocation for any such access. */
1453
1454 static void
handle_attr_preserve(function * fn)1455 handle_attr_preserve (function *fn)
1456 {
1457 basic_block bb;
1458 rtx_insn *insn;
1459 rtx_code_label *label;
1460 FOR_EACH_BB_FN (bb, fn)
1461 {
1462 FOR_BB_INSNS (bb, insn)
1463 {
1464 if (!NONJUMP_INSN_P (insn))
1465 continue;
1466 rtx pat = PATTERN (insn);
1467 if (GET_CODE (pat) != SET)
1468 continue;
1469
1470 start_sequence();
1471
1472 for (int i = 0; i < 2; i++)
1473 {
1474 rtx mem = XEXP (pat, i);
1475 if (MEM_P (mem))
1476 {
1477 tree expr = MEM_EXPR (mem);
1478 if (!expr)
1479 continue;
1480
1481 if (TREE_CODE (expr) == MEM_REF
1482 && TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME)
1483 {
1484 gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (expr, 0));
1485 if (def_stmt && is_gimple_assign (def_stmt))
1486 expr = gimple_assign_rhs1 (def_stmt);
1487 }
1488
1489 if (is_attr_preserve_access (expr))
1490 {
1491 auto_vec<unsigned int, 16> accessors;
1492 tree container = bpf_core_compute (expr, &accessors);
1493 if (accessors.length () < 1)
1494 continue;
1495 accessors.reverse ();
1496
1497 container = TREE_TYPE (container);
1498 const char * section_name;
1499 if (DECL_SECTION_NAME (fn->decl))
1500 section_name = DECL_SECTION_NAME (fn->decl);
1501 else
1502 section_name = ".text";
1503
1504 label = gen_label_rtx ();
1505 LABEL_PRESERVE_P (label) = 1;
1506 emit_label (label);
1507
1508 /* Add the CO-RE relocation information to the BTF container. */
1509 bpf_core_reloc_add (container, section_name, &accessors, label);
1510 }
1511 }
1512 }
1513 rtx_insn *seq = get_insns ();
1514 end_sequence ();
1515 emit_insn_before (seq, insn);
1516 }
1517 }
1518 }
1519
1520
1521 /* This pass finds accesses to structures marked with the BPF target attribute
1522 __attribute__((preserve_access_index)). For every such access, a CO-RE
1523 relocation record is generated, to be output in the .BTF.ext section. */
1524
1525 namespace {
1526
1527 const pass_data pass_data_bpf_core_attr =
1528 {
1529 RTL_PASS, /* type */
1530 "bpf_core_attr", /* name */
1531 OPTGROUP_NONE, /* optinfo_flags */
1532 TV_NONE, /* tv_id */
1533 0, /* properties_required */
1534 0, /* properties_provided */
1535 0, /* properties_destroyed */
1536 0, /* todo_flags_start */
1537 0, /* todo_flags_finish */
1538 };
1539
1540 class pass_bpf_core_attr : public rtl_opt_pass
1541 {
1542 public:
pass_bpf_core_attr(gcc::context * ctxt)1543 pass_bpf_core_attr (gcc::context *ctxt)
1544 : rtl_opt_pass (pass_data_bpf_core_attr, ctxt)
1545 {}
1546
gate(function *)1547 virtual bool gate (function *) { return TARGET_BPF_CORE; }
1548 virtual unsigned int execute (function *);
1549 };
1550
1551 unsigned int
execute(function * fn)1552 pass_bpf_core_attr::execute (function *fn)
1553 {
1554 handle_attr_preserve (fn);
1555 return 0;
1556 }
1557
1558 } /* Anonymous namespace. */
1559
1560 rtl_opt_pass *
make_pass_bpf_core_attr(gcc::context * ctxt)1561 make_pass_bpf_core_attr (gcc::context *ctxt)
1562 {
1563 return new pass_bpf_core_attr (ctxt);
1564 }
1565
1566 /* Finally, build the GCC target. */
1567
1568 struct gcc_target targetm = TARGET_INITIALIZER;
1569
1570 #include "gt-bpf.h"
1571