1 /* GNU/Linux/PowerPC specific low level interface, for the in-process 2 agent library for GDB. 3 4 Copyright (C) 2016-2024 Free Software Foundation, Inc. 5 6 This file is part of GDB. 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 20 21 #include <sys/mman.h> 22 #include "tracepoint.h" 23 #include "arch/ppc-linux-tdesc.h" 24 #include "linux-ppc-tdesc-init.h" 25 #include <elf.h> 26 #ifdef HAVE_GETAUXVAL 27 #include <sys/auxv.h> 28 #endif 29 30 /* These macros define the position of registers in the buffer collected 31 by the fast tracepoint jump pad. */ 32 #define FT_CR_R0 0 33 #define FT_CR_CR 32 34 #define FT_CR_XER 33 35 #define FT_CR_LR 34 36 #define FT_CR_CTR 35 37 #define FT_CR_PC 36 38 #define FT_CR_GPR(n) (FT_CR_R0 + (n)) 39 40 static const int ppc_ft_collect_regmap[] = { 41 /* GPRs */ 42 FT_CR_GPR (0), FT_CR_GPR (1), FT_CR_GPR (2), 43 FT_CR_GPR (3), FT_CR_GPR (4), FT_CR_GPR (5), 44 FT_CR_GPR (6), FT_CR_GPR (7), FT_CR_GPR (8), 45 FT_CR_GPR (9), FT_CR_GPR (10), FT_CR_GPR (11), 46 FT_CR_GPR (12), FT_CR_GPR (13), FT_CR_GPR (14), 47 FT_CR_GPR (15), FT_CR_GPR (16), FT_CR_GPR (17), 48 FT_CR_GPR (18), FT_CR_GPR (19), FT_CR_GPR (20), 49 FT_CR_GPR (21), FT_CR_GPR (22), FT_CR_GPR (23), 50 FT_CR_GPR (24), FT_CR_GPR (25), FT_CR_GPR (26), 51 FT_CR_GPR (27), FT_CR_GPR (28), FT_CR_GPR (29), 52 FT_CR_GPR (30), FT_CR_GPR (31), 53 /* FPRs - not collected. */ 54 -1, -1, -1, -1, -1, -1, -1, -1, 55 -1, -1, -1, -1, -1, -1, -1, -1, 56 -1, -1, -1, -1, -1, -1, -1, -1, 57 -1, -1, -1, -1, -1, -1, -1, -1, 58 FT_CR_PC, /* PC */ 59 -1, /* MSR */ 60 FT_CR_CR, /* CR */ 61 FT_CR_LR, /* LR */ 62 FT_CR_CTR, /* CTR */ 63 FT_CR_XER, /* XER */ 64 -1, /* FPSCR */ 65 }; 66 67 #define PPC_NUM_FT_COLLECT_GREGS \ 68 (sizeof (ppc_ft_collect_regmap) / sizeof(ppc_ft_collect_regmap[0])) 69 70 /* Supply registers collected by the fast tracepoint jump pad. 71 BUF is the second argument we pass to gdb_collect in jump pad. */ 72 73 void 74 supply_fast_tracepoint_registers (struct regcache *regcache, 75 const unsigned char *buf) 76 { 77 int i; 78 79 for (i = 0; i < PPC_NUM_FT_COLLECT_GREGS; i++) 80 { 81 if (ppc_ft_collect_regmap[i] == -1) 82 continue; 83 supply_register (regcache, i, 84 ((char *) buf) 85 + ppc_ft_collect_regmap[i] * sizeof (long)); 86 } 87 } 88 89 /* Return the value of register REGNUM. RAW_REGS is collected buffer 90 by jump pad. This function is called by emit_reg. */ 91 92 ULONGEST 93 get_raw_reg (const unsigned char *raw_regs, int regnum) 94 { 95 if (regnum >= PPC_NUM_FT_COLLECT_GREGS) 96 return 0; 97 if (ppc_ft_collect_regmap[regnum] == -1) 98 return 0; 99 100 return *(unsigned long *) (raw_regs 101 + ppc_ft_collect_regmap[regnum] * sizeof (long)); 102 } 103 104 /* Allocate buffer for the jump pads. The branch instruction has a reach 105 of +/- 32MiB, and the executable is loaded at 0x10000000 (256MiB). 106 107 64-bit: To maximize the area of executable that can use tracepoints, 108 try allocating at 0x10000000 - size initially, decreasing until we hit 109 a free area. 110 111 32-bit: ld.so loads dynamic libraries right below the executable, so 112 we cannot depend on that area (dynamic libraries can be quite large). 113 Instead, aim right after the executable - at sbrk(0). This will 114 cause future brk to fail, and malloc will fallback to mmap. */ 115 116 void * 117 alloc_jump_pad_buffer (size_t size) 118 { 119 #ifdef __powerpc64__ 120 uintptr_t addr; 121 uintptr_t exec_base = getauxval (AT_PHDR); 122 int pagesize; 123 void *res; 124 125 if (exec_base == 0) 126 exec_base = 0x10000000; 127 128 pagesize = sysconf (_SC_PAGE_SIZE); 129 if (pagesize == -1) 130 perror_with_name ("sysconf"); 131 132 addr = exec_base - size; 133 134 /* size should already be page-aligned, but this can't hurt. */ 135 addr &= ~(pagesize - 1); 136 137 /* Search for a free area. If we hit 0, we're out of luck. */ 138 for (; addr; addr -= pagesize) 139 { 140 /* No MAP_FIXED - we don't want to zap someone's mapping. */ 141 res = mmap ((void *) addr, size, 142 PROT_READ | PROT_WRITE | PROT_EXEC, 143 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 144 145 /* If we got what we wanted, return. */ 146 if ((uintptr_t) res == addr) 147 return res; 148 149 /* If we got a mapping, but at a wrong address, undo it. */ 150 if (res != MAP_FAILED) 151 munmap (res, size); 152 } 153 154 return NULL; 155 #else 156 void *target = sbrk (0); 157 void *res = mmap (target, size, PROT_READ | PROT_WRITE | PROT_EXEC, 158 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 159 160 if (res == target) 161 return res; 162 163 if (res != MAP_FAILED) 164 munmap (res, size); 165 166 return NULL; 167 #endif 168 } 169 170 /* Return target_desc to use for IPA, given the tdesc index passed by 171 gdbserver. */ 172 173 const struct target_desc * 174 get_ipa_tdesc (int idx) 175 { 176 switch (idx) 177 { 178 #ifdef __powerpc64__ 179 case PPC_TDESC_BASE: 180 return tdesc_powerpc_64l; 181 case PPC_TDESC_ALTIVEC: 182 return tdesc_powerpc_altivec64l; 183 case PPC_TDESC_VSX: 184 return tdesc_powerpc_vsx64l; 185 case PPC_TDESC_ISA205: 186 return tdesc_powerpc_isa205_64l; 187 case PPC_TDESC_ISA205_ALTIVEC: 188 return tdesc_powerpc_isa205_altivec64l; 189 case PPC_TDESC_ISA205_VSX: 190 return tdesc_powerpc_isa205_vsx64l; 191 case PPC_TDESC_ISA205_PPR_DSCR_VSX: 192 return tdesc_powerpc_isa205_ppr_dscr_vsx64l; 193 case PPC_TDESC_ISA207_VSX: 194 return tdesc_powerpc_isa207_vsx64l; 195 case PPC_TDESC_ISA207_HTM_VSX: 196 return tdesc_powerpc_isa207_htm_vsx64l; 197 #else 198 case PPC_TDESC_BASE: 199 return tdesc_powerpc_32l; 200 case PPC_TDESC_ALTIVEC: 201 return tdesc_powerpc_altivec32l; 202 case PPC_TDESC_VSX: 203 return tdesc_powerpc_vsx32l; 204 case PPC_TDESC_ISA205: 205 return tdesc_powerpc_isa205_32l; 206 case PPC_TDESC_ISA205_ALTIVEC: 207 return tdesc_powerpc_isa205_altivec32l; 208 case PPC_TDESC_ISA205_VSX: 209 return tdesc_powerpc_isa205_vsx32l; 210 case PPC_TDESC_ISA205_PPR_DSCR_VSX: 211 return tdesc_powerpc_isa205_ppr_dscr_vsx32l; 212 case PPC_TDESC_ISA207_VSX: 213 return tdesc_powerpc_isa207_vsx32l; 214 case PPC_TDESC_ISA207_HTM_VSX: 215 return tdesc_powerpc_isa207_htm_vsx32l; 216 case PPC_TDESC_E500: 217 return tdesc_powerpc_e500l; 218 #endif 219 default: 220 internal_error ("unknown ipa tdesc index: %d", idx); 221 #ifdef __powerpc64__ 222 return tdesc_powerpc_64l; 223 #else 224 return tdesc_powerpc_32l; 225 #endif 226 } 227 } 228 229 230 /* Initialize ipa_tdesc and others. */ 231 232 void 233 initialize_low_tracepoint (void) 234 { 235 #ifdef __powerpc64__ 236 init_registers_powerpc_64l (); 237 init_registers_powerpc_altivec64l (); 238 init_registers_powerpc_vsx64l (); 239 init_registers_powerpc_isa205_64l (); 240 init_registers_powerpc_isa205_altivec64l (); 241 init_registers_powerpc_isa205_vsx64l (); 242 init_registers_powerpc_isa205_ppr_dscr_vsx64l (); 243 init_registers_powerpc_isa207_vsx64l (); 244 init_registers_powerpc_isa207_htm_vsx64l (); 245 #else 246 init_registers_powerpc_32l (); 247 init_registers_powerpc_altivec32l (); 248 init_registers_powerpc_vsx32l (); 249 init_registers_powerpc_isa205_32l (); 250 init_registers_powerpc_isa205_altivec32l (); 251 init_registers_powerpc_isa205_vsx32l (); 252 init_registers_powerpc_isa205_ppr_dscr_vsx32l (); 253 init_registers_powerpc_isa207_vsx32l (); 254 init_registers_powerpc_isa207_htm_vsx32l (); 255 init_registers_powerpc_e500l (); 256 #endif 257 } 258