1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /* Portions Copyright 2013 Justin Hibbits */
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/fasttrap_isa.h>
28 #include <sys/fasttrap_impl.h>
29 #include <sys/dtrace.h>
30 #include <sys/dtrace_impl.h>
31 #include <cddl/dev/dtrace/dtrace_cddl.h>
32 #include <sys/proc.h>
33 #include <sys/types.h>
34 #include <sys/uio.h>
35 #include <sys/ptrace.h>
36 #include <sys/rmlock.h>
37 #include <sys/sysent.h>
38
39 #define OP(x) ((x) >> 26)
40 #define OPX(x) (((x) >> 2) & 0x3FF)
41 #define OP_BO(x) (((x) & 0x03E00000) >> 21)
42 #define OP_BI(x) (((x) & 0x001F0000) >> 16)
43 #define OP_RS(x) (((x) & 0x03E00000) >> 21)
44 #define OP_RA(x) (((x) & 0x001F0000) >> 16)
45 #define OP_RB(x) (((x) & 0x0000F100) >> 11)
46
47 int
fasttrap_tracepoint_install(proc_t * p,fasttrap_tracepoint_t * tp)48 fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp)
49 {
50 fasttrap_instr_t instr = FASTTRAP_INSTR;
51
52 if (uwrite(p, &instr, 4, tp->ftt_pc) != 0)
53 return (-1);
54
55 return (0);
56 }
57
58 int
fasttrap_tracepoint_remove(proc_t * p,fasttrap_tracepoint_t * tp)59 fasttrap_tracepoint_remove(proc_t *p, fasttrap_tracepoint_t *tp)
60 {
61 uint32_t instr;
62
63 /*
64 * Distinguish between read or write failures and a changed
65 * instruction.
66 */
67 if (uread(p, &instr, 4, tp->ftt_pc) != 0)
68 return (0);
69 if (instr != FASTTRAP_INSTR)
70 return (0);
71 if (uwrite(p, &tp->ftt_instr, 4, tp->ftt_pc) != 0)
72 return (-1);
73
74 return (0);
75 }
76
77 int
fasttrap_tracepoint_init(proc_t * p,fasttrap_tracepoint_t * tp,uintptr_t pc,fasttrap_probe_type_t type)78 fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, uintptr_t pc,
79 fasttrap_probe_type_t type)
80 {
81 uint32_t instr;
82 //int32_t disp;
83
84 /*
85 * Read the instruction at the given address out of the process's
86 * address space. We don't have to worry about a debugger
87 * changing this instruction before we overwrite it with our trap
88 * instruction since P_PR_LOCK is set.
89 */
90 if (uread(p, &instr, 4, pc) != 0)
91 return (-1);
92
93 /*
94 * Decode the instruction to fill in the probe flags. We can have
95 * the process execute most instructions on its own using a pc/npc
96 * trick, but pc-relative control transfer present a problem since
97 * we're relocating the instruction. We emulate these instructions
98 * in the kernel. We assume a default type and over-write that as
99 * needed.
100 *
101 * pc-relative instructions must be emulated for correctness;
102 * other instructions (which represent a large set of commonly traced
103 * instructions) are emulated or otherwise optimized for performance.
104 */
105 tp->ftt_type = FASTTRAP_T_COMMON;
106 tp->ftt_instr = instr;
107
108 switch (OP(instr)) {
109 /* The following are invalid for trapping (invalid opcodes, tw/twi). */
110 case 0:
111 case 1:
112 case 2:
113 case 4:
114 case 5:
115 case 6:
116 case 30:
117 case 39:
118 case 58:
119 case 62:
120 case 3: /* twi */
121 return (-1);
122 case 31: /* tw */
123 if (OPX(instr) == 4)
124 return (-1);
125 else if (OPX(instr) == 444 && OP_RS(instr) == OP_RA(instr) &&
126 OP_RS(instr) == OP_RB(instr))
127 tp->ftt_type = FASTTRAP_T_NOP;
128 break;
129 case 16:
130 tp->ftt_type = FASTTRAP_T_BC;
131 tp->ftt_dest = instr & 0x0000FFFC; /* Extract target address */
132 if (instr & 0x00008000)
133 tp->ftt_dest |= 0xFFFF0000;
134 /* Use as offset if not absolute address. */
135 if (!(instr & 0x02))
136 tp->ftt_dest += pc;
137 tp->ftt_bo = OP_BO(instr);
138 tp->ftt_bi = OP_BI(instr);
139 break;
140 case 18:
141 tp->ftt_type = FASTTRAP_T_B;
142 tp->ftt_dest = instr & 0x03FFFFFC; /* Extract target address */
143 if (instr & 0x02000000)
144 tp->ftt_dest |= 0xFC000000;
145 /* Use as offset if not absolute address. */
146 if (!(instr & 0x02))
147 tp->ftt_dest += pc;
148 break;
149 case 19:
150 switch (OPX(instr)) {
151 case 528: /* bcctr */
152 tp->ftt_type = FASTTRAP_T_BCTR;
153 tp->ftt_bo = OP_BO(instr);
154 tp->ftt_bi = OP_BI(instr);
155 break;
156 case 16: /* bclr */
157 tp->ftt_type = FASTTRAP_T_BCTR;
158 tp->ftt_bo = OP_BO(instr);
159 tp->ftt_bi = OP_BI(instr);
160 break;
161 };
162 break;
163 case 24:
164 if (OP_RS(instr) == OP_RA(instr) &&
165 (instr & 0x0000FFFF) == 0)
166 tp->ftt_type = FASTTRAP_T_NOP;
167 break;
168 };
169
170 /*
171 * We don't know how this tracepoint is going to be used, but in case
172 * it's used as part of a function return probe, we need to indicate
173 * whether it's always a return site or only potentially a return
174 * site. If it's part of a return probe, it's always going to be a
175 * return from that function if it's a restore instruction or if
176 * the previous instruction was a return. If we could reliably
177 * distinguish jump tables from return sites, this wouldn't be
178 * necessary.
179 */
180 #if 0
181 if (tp->ftt_type != FASTTRAP_T_RESTORE &&
182 (uread(p, &instr, 4, pc - sizeof (instr)) != 0 ||
183 !(OP(instr) == 2 && OP3(instr) == OP3_RETURN)))
184 tp->ftt_flags |= FASTTRAP_F_RETMAYBE;
185 #endif
186
187 return (0);
188 }
189
190 static uint64_t
fasttrap_anarg(struct reg * rp,int argno)191 fasttrap_anarg(struct reg *rp, int argno)
192 {
193 uint64_t value;
194 proc_t *p = curproc;
195
196 /* The first 8 arguments are in registers. */
197 if (argno < 8)
198 return rp->fixreg[argno + 3];
199
200 /* Arguments on stack start after SP+LR (2 register slots). */
201 if (SV_PROC_FLAG(p, SV_ILP32)) {
202 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
203 value = dtrace_fuword32((void *)(rp->fixreg[1] + 8 +
204 ((argno - 8) * sizeof(uint32_t))));
205 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
206 } else {
207 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
208 value = dtrace_fuword64((void *)(rp->fixreg[1] + 48 +
209 ((argno - 8) * sizeof(uint64_t))));
210 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
211 }
212 return value;
213 }
214
215 uint64_t
fasttrap_pid_getarg(void * arg,dtrace_id_t id,void * parg,int argno,int aframes)216 fasttrap_pid_getarg(void *arg, dtrace_id_t id, void *parg, int argno,
217 int aframes)
218 {
219 struct reg r;
220
221 fill_regs(curthread, &r);
222
223 return (fasttrap_anarg(&r, argno));
224 }
225
226 uint64_t
fasttrap_usdt_getarg(void * arg,dtrace_id_t id,void * parg,int argno,int aframes)227 fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno,
228 int aframes)
229 {
230 struct reg r;
231
232 fill_regs(curthread, &r);
233
234 return (fasttrap_anarg(&r, argno));
235 }
236
237 static void
fasttrap_usdt_args(fasttrap_probe_t * probe,struct reg * rp,int argc,uintptr_t * argv)238 fasttrap_usdt_args(fasttrap_probe_t *probe, struct reg *rp, int argc,
239 uintptr_t *argv)
240 {
241 int i, x, cap = MIN(argc, probe->ftp_nargs);
242
243 for (i = 0; i < cap; i++) {
244 x = probe->ftp_argmap[i];
245
246 if (x < 8)
247 argv[i] = rp->fixreg[x];
248 else
249 if (SV_PROC_FLAG(curproc, SV_ILP32))
250 argv[i] = fuword32((void *)(rp->fixreg[1] + 8 +
251 (x * sizeof(uint32_t))));
252 else
253 argv[i] = fuword64((void *)(rp->fixreg[1] + 48 +
254 (x * sizeof(uint64_t))));
255 }
256
257 for (; i < argc; i++) {
258 argv[i] = 0;
259 }
260 }
261
262 static void
fasttrap_return_common(struct reg * rp,uintptr_t pc,pid_t pid,uintptr_t new_pc)263 fasttrap_return_common(struct reg *rp, uintptr_t pc, pid_t pid,
264 uintptr_t new_pc)
265 {
266 struct rm_priotracker tracker;
267 fasttrap_tracepoint_t *tp;
268 fasttrap_bucket_t *bucket;
269 fasttrap_id_t *id;
270
271 rm_rlock(&fasttrap_tp_lock, &tracker);
272 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
273
274 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
275 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
276 tp->ftt_proc->ftpc_acount != 0)
277 break;
278 }
279
280 /*
281 * Don't sweat it if we can't find the tracepoint again; unlike
282 * when we're in fasttrap_pid_probe(), finding the tracepoint here
283 * is not essential to the correct execution of the process.
284 */
285 if (tp == NULL) {
286 rm_runlock(&fasttrap_tp_lock, &tracker);
287 return;
288 }
289
290 for (id = tp->ftt_retids; id != NULL; id = id->fti_next) {
291 /*
292 * If there's a branch that could act as a return site, we
293 * need to trace it, and check here if the program counter is
294 * external to the function.
295 */
296 /* Skip function-local branches. */
297 if ((new_pc - id->fti_probe->ftp_faddr) < id->fti_probe->ftp_fsize)
298 continue;
299
300 dtrace_probe(id->fti_probe->ftp_id,
301 pc - id->fti_probe->ftp_faddr,
302 rp->fixreg[3], rp->fixreg[4], 0, 0);
303 }
304 rm_runlock(&fasttrap_tp_lock, &tracker);
305 }
306
307
308 static int
fasttrap_branch_taken(int bo,int bi,struct reg * regs)309 fasttrap_branch_taken(int bo, int bi, struct reg *regs)
310 {
311 int crzero = 0;
312
313 /* Branch always? */
314 if ((bo & 0x14) == 0x14)
315 return 1;
316
317 /* Handle decrementing ctr */
318 if (!(bo & 0x04)) {
319 --regs->ctr;
320 crzero = (regs->ctr == 0);
321 if (bo & 0x10) {
322 return (!(crzero ^ (bo >> 1)));
323 }
324 }
325
326 return (crzero | (((regs->cr >> (31 - bi)) ^ (bo >> 3)) ^ 1));
327 }
328
329
330 int
fasttrap_pid_probe(struct reg * rp)331 fasttrap_pid_probe(struct reg *rp)
332 {
333 struct rm_priotracker tracker;
334 proc_t *p = curproc;
335 uintptr_t pc = rp->pc;
336 uintptr_t new_pc = 0;
337 fasttrap_bucket_t *bucket;
338 fasttrap_tracepoint_t *tp, tp_local;
339 pid_t pid;
340 dtrace_icookie_t cookie;
341 uint_t is_enabled = 0;
342
343 /*
344 * It's possible that a user (in a veritable orgy of bad planning)
345 * could redirect this thread's flow of control before it reached the
346 * return probe fasttrap. In this case we need to kill the process
347 * since it's in a unrecoverable state.
348 */
349 if (curthread->t_dtrace_step) {
350 ASSERT(curthread->t_dtrace_on);
351 fasttrap_sigtrap(p, curthread, pc);
352 return (0);
353 }
354
355 /*
356 * Clear all user tracing flags.
357 */
358 curthread->t_dtrace_ft = 0;
359 curthread->t_dtrace_pc = 0;
360 curthread->t_dtrace_npc = 0;
361 curthread->t_dtrace_scrpc = 0;
362 curthread->t_dtrace_astpc = 0;
363
364 rm_rlock(&fasttrap_tp_lock, &tracker);
365 pid = p->p_pid;
366 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
367
368 /*
369 * Lookup the tracepoint that the process just hit.
370 */
371 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
372 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
373 tp->ftt_proc->ftpc_acount != 0)
374 break;
375 }
376
377 /*
378 * If we couldn't find a matching tracepoint, either a tracepoint has
379 * been inserted without using the pid<pid> ioctl interface (see
380 * fasttrap_ioctl), or somehow we have mislaid this tracepoint.
381 */
382 if (tp == NULL) {
383 rm_runlock(&fasttrap_tp_lock, &tracker);
384 return (-1);
385 }
386
387 if (tp->ftt_ids != NULL) {
388 fasttrap_id_t *id;
389
390 for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
391 fasttrap_probe_t *probe = id->fti_probe;
392
393 if (id->fti_ptype == DTFTP_ENTRY) {
394 /*
395 * We note that this was an entry
396 * probe to help ustack() find the
397 * first caller.
398 */
399 cookie = dtrace_interrupt_disable();
400 DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY);
401 dtrace_probe(probe->ftp_id, rp->fixreg[3],
402 rp->fixreg[4], rp->fixreg[5], rp->fixreg[6],
403 rp->fixreg[7]);
404 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY);
405 dtrace_interrupt_enable(cookie);
406 } else if (id->fti_ptype == DTFTP_IS_ENABLED) {
407 /*
408 * Note that in this case, we don't
409 * call dtrace_probe() since it's only
410 * an artificial probe meant to change
411 * the flow of control so that it
412 * encounters the true probe.
413 */
414 is_enabled = 1;
415 } else if (probe->ftp_argmap == NULL) {
416 dtrace_probe(probe->ftp_id, rp->fixreg[3],
417 rp->fixreg[4], rp->fixreg[5], rp->fixreg[6],
418 rp->fixreg[7]);
419 } else {
420 uintptr_t t[5];
421
422 fasttrap_usdt_args(probe, rp,
423 sizeof (t) / sizeof (t[0]), t);
424
425 dtrace_probe(probe->ftp_id, t[0], t[1],
426 t[2], t[3], t[4]);
427 }
428 }
429 }
430
431 /*
432 * We're about to do a bunch of work so we cache a local copy of
433 * the tracepoint to emulate the instruction, and then find the
434 * tracepoint again later if we need to light up any return probes.
435 */
436 tp_local = *tp;
437 rm_runlock(&fasttrap_tp_lock, &tracker);
438 tp = &tp_local;
439
440 /*
441 * If there's an is-enabled probe connected to this tracepoint it
442 * means that there was a 'xor r3, r3, r3'
443 * instruction that was placed there by DTrace when the binary was
444 * linked. As this probe is, in fact, enabled, we need to stuff 1
445 * into R3. Accordingly, we can bypass all the instruction
446 * emulation logic since we know the inevitable result. It's possible
447 * that a user could construct a scenario where the 'is-enabled'
448 * probe was on some other instruction, but that would be a rather
449 * exotic way to shoot oneself in the foot.
450 */
451 if (is_enabled) {
452 rp->fixreg[3] = 1;
453 new_pc = rp->pc + 4;
454 goto done;
455 }
456
457
458 switch (tp->ftt_type) {
459 case FASTTRAP_T_NOP:
460 new_pc = rp->pc + 4;
461 break;
462 case FASTTRAP_T_BC:
463 if (!fasttrap_branch_taken(tp->ftt_bo, tp->ftt_bi, rp))
464 break;
465 /* FALLTHROUGH */
466 case FASTTRAP_T_B:
467 if (tp->ftt_instr & 0x01)
468 rp->lr = rp->pc + 4;
469 new_pc = tp->ftt_dest;
470 break;
471 case FASTTRAP_T_BLR:
472 case FASTTRAP_T_BCTR:
473 if (!fasttrap_branch_taken(tp->ftt_bo, tp->ftt_bi, rp))
474 break;
475 /* FALLTHROUGH */
476 if (tp->ftt_type == FASTTRAP_T_BCTR)
477 new_pc = rp->ctr;
478 else
479 new_pc = rp->lr;
480 if (tp->ftt_instr & 0x01)
481 rp->lr = rp->pc + 4;
482 break;
483 case FASTTRAP_T_COMMON:
484 break;
485 };
486 done:
487 /*
488 * If there were no return probes when we first found the tracepoint,
489 * we should feel no obligation to honor any return probes that were
490 * subsequently enabled -- they'll just have to wait until the next
491 * time around.
492 */
493 if (tp->ftt_retids != NULL) {
494 /*
495 * We need to wait until the results of the instruction are
496 * apparent before invoking any return probes. If this
497 * instruction was emulated we can just call
498 * fasttrap_return_common(); if it needs to be executed, we
499 * need to wait until the user thread returns to the kernel.
500 */
501 if (tp->ftt_type != FASTTRAP_T_COMMON) {
502 fasttrap_return_common(rp, pc, pid, new_pc);
503 } else {
504 ASSERT(curthread->t_dtrace_ret != 0);
505 ASSERT(curthread->t_dtrace_pc == pc);
506 ASSERT(curthread->t_dtrace_scrpc != 0);
507 ASSERT(new_pc == curthread->t_dtrace_astpc);
508 }
509 }
510
511 rp->pc = new_pc;
512 set_regs(curthread, rp);
513
514 return (0);
515 }
516
517 int
fasttrap_return_probe(struct reg * rp)518 fasttrap_return_probe(struct reg *rp)
519 {
520 proc_t *p = curproc;
521 uintptr_t pc = curthread->t_dtrace_pc;
522 uintptr_t npc = curthread->t_dtrace_npc;
523
524 curthread->t_dtrace_pc = 0;
525 curthread->t_dtrace_npc = 0;
526 curthread->t_dtrace_scrpc = 0;
527 curthread->t_dtrace_astpc = 0;
528
529 /*
530 * We set rp->pc to the address of the traced instruction so
531 * that it appears to dtrace_probe() that we're on the original
532 * instruction, and so that the user can't easily detect our
533 * complex web of lies. dtrace_return_probe() (our caller)
534 * will correctly set %pc after we return.
535 */
536 rp->pc = pc;
537
538 fasttrap_return_common(rp, pc, p->p_pid, npc);
539
540 return (0);
541 }
542
543