1*404b540aSrobert /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2*404b540aSrobert Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3*404b540aSrobert Contributed by Frank Ch. Eigler <fche@redhat.com>
4*404b540aSrobert and Graydon Hoare <graydon@redhat.com>
5*404b540aSrobert Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>,
6*404b540aSrobert adapted from libiberty.
7*404b540aSrobert
8*404b540aSrobert This file is part of GCC.
9*404b540aSrobert
10*404b540aSrobert GCC is free software; you can redistribute it and/or modify it under
11*404b540aSrobert the terms of the GNU General Public License as published by the Free
12*404b540aSrobert Software Foundation; either version 2, or (at your option) any later
13*404b540aSrobert version.
14*404b540aSrobert
15*404b540aSrobert In addition to the permissions in the GNU General Public License, the
16*404b540aSrobert Free Software Foundation gives you unlimited permission to link the
17*404b540aSrobert compiled version of this file into combinations with other programs,
18*404b540aSrobert and to distribute those combinations without any restriction coming
19*404b540aSrobert from the use of this file. (The General Public License restrictions
20*404b540aSrobert do apply in other respects; for example, they cover modification of
21*404b540aSrobert the file, and distribution when not linked into a combine
22*404b540aSrobert executable.)
23*404b540aSrobert
24*404b540aSrobert GCC is distributed in the hope that it will be useful, but WITHOUT ANY
25*404b540aSrobert WARRANTY; without even the implied warranty of MERCHANTABILITY or
26*404b540aSrobert FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27*404b540aSrobert for more details.
28*404b540aSrobert
29*404b540aSrobert You should have received a copy of the GNU General Public License
30*404b540aSrobert along with GCC; see the file COPYING. If not, write to the Free
31*404b540aSrobert Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
32*404b540aSrobert 02110-1301, USA. */
33*404b540aSrobert
34*404b540aSrobert #include "config.h"
35*404b540aSrobert
36*404b540aSrobert /* These attempt to coax various unix flavours to declare all our
37*404b540aSrobert needed tidbits in the system headers. */
38*404b540aSrobert #if !defined(__FreeBSD__) && !defined(__APPLE__)
39*404b540aSrobert #define _POSIX_SOURCE
40*404b540aSrobert #endif /* Some BSDs break <sys/socket.h> if this is defined. */
41*404b540aSrobert #define _GNU_SOURCE
42*404b540aSrobert #define _XOPEN_SOURCE
43*404b540aSrobert #define _BSD_TYPES
44*404b540aSrobert #define __EXTENSIONS__
45*404b540aSrobert #define _ALL_SOURCE
46*404b540aSrobert #define _LARGE_FILE_API
47*404b540aSrobert #define _XOPEN_SOURCE_EXTENDED 1
48*404b540aSrobert
49*404b540aSrobert #include <stdio.h>
50*404b540aSrobert #include <stdlib.h>
51*404b540aSrobert #include <sys/types.h>
52*404b540aSrobert #include <sys/time.h>
53*404b540aSrobert #include <time.h>
54*404b540aSrobert #include <unistd.h>
55*404b540aSrobert #ifdef HAVE_EXECINFO_H
56*404b540aSrobert #include <execinfo.h>
57*404b540aSrobert #endif
58*404b540aSrobert #ifdef HAVE_SIGNAL_H
59*404b540aSrobert #include <signal.h>
60*404b540aSrobert #endif
61*404b540aSrobert #include <assert.h>
62*404b540aSrobert
63*404b540aSrobert #include <string.h>
64*404b540aSrobert #include <limits.h>
65*404b540aSrobert #include <sys/types.h>
66*404b540aSrobert #include <signal.h>
67*404b540aSrobert #include <errno.h>
68*404b540aSrobert #include <ctype.h>
69*404b540aSrobert
70*404b540aSrobert #include "mf-runtime.h"
71*404b540aSrobert #include "mf-impl.h"
72*404b540aSrobert
73*404b540aSrobert
74*404b540aSrobert /* ------------------------------------------------------------------------ */
75*404b540aSrobert /* Splay-tree implementation. */
76*404b540aSrobert
77*404b540aSrobert typedef uintptr_t mfsplay_tree_key;
78*404b540aSrobert typedef void *mfsplay_tree_value;
79*404b540aSrobert
80*404b540aSrobert /* Forward declaration for a node in the tree. */
81*404b540aSrobert typedef struct mfsplay_tree_node_s *mfsplay_tree_node;
82*404b540aSrobert
83*404b540aSrobert /* The type of a function used to iterate over the tree. */
84*404b540aSrobert typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *);
85*404b540aSrobert
86*404b540aSrobert /* The nodes in the splay tree. */
87*404b540aSrobert struct mfsplay_tree_node_s
88*404b540aSrobert {
89*404b540aSrobert /* Data. */
90*404b540aSrobert mfsplay_tree_key key;
91*404b540aSrobert mfsplay_tree_value value;
92*404b540aSrobert /* Children. */
93*404b540aSrobert mfsplay_tree_node left;
94*404b540aSrobert mfsplay_tree_node right;
95*404b540aSrobert /* XXX: The addition of a parent pointer may eliminate some recursion. */
96*404b540aSrobert };
97*404b540aSrobert
98*404b540aSrobert /* The splay tree itself. */
99*404b540aSrobert struct mfsplay_tree_s
100*404b540aSrobert {
101*404b540aSrobert /* The root of the tree. */
102*404b540aSrobert mfsplay_tree_node root;
103*404b540aSrobert
104*404b540aSrobert /* The last key value for which the tree has been splayed, but not
105*404b540aSrobert since modified. */
106*404b540aSrobert mfsplay_tree_key last_splayed_key;
107*404b540aSrobert int last_splayed_key_p;
108*404b540aSrobert
109*404b540aSrobert /* Statistics. */
110*404b540aSrobert unsigned num_keys;
111*404b540aSrobert
112*404b540aSrobert /* Traversal recursion control flags. */
113*404b540aSrobert unsigned max_depth;
114*404b540aSrobert unsigned depth;
115*404b540aSrobert unsigned rebalance_p;
116*404b540aSrobert };
117*404b540aSrobert typedef struct mfsplay_tree_s *mfsplay_tree;
118*404b540aSrobert
119*404b540aSrobert static mfsplay_tree mfsplay_tree_new (void);
120*404b540aSrobert static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value);
121*404b540aSrobert static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key);
122*404b540aSrobert static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key);
123*404b540aSrobert static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key);
124*404b540aSrobert static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key);
125*404b540aSrobert static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *);
126*404b540aSrobert static void mfsplay_tree_rebalance (mfsplay_tree sp);
127*404b540aSrobert
128*404b540aSrobert /* ------------------------------------------------------------------------ */
129*404b540aSrobert /* Utility macros */
130*404b540aSrobert
131*404b540aSrobert #define CTOR __attribute__ ((constructor))
132*404b540aSrobert #define DTOR __attribute__ ((destructor))
133*404b540aSrobert
134*404b540aSrobert
135*404b540aSrobert /* Codes to describe the context in which a violation occurs. */
136*404b540aSrobert #define __MF_VIOL_UNKNOWN 0
137*404b540aSrobert #define __MF_VIOL_READ 1
138*404b540aSrobert #define __MF_VIOL_WRITE 2
139*404b540aSrobert #define __MF_VIOL_REGISTER 3
140*404b540aSrobert #define __MF_VIOL_UNREGISTER 4
141*404b540aSrobert #define __MF_VIOL_WATCH 5
142*404b540aSrobert
143*404b540aSrobert /* Protect against recursive calls. */
144*404b540aSrobert
145*404b540aSrobert static void
begin_recursion_protect1(const char * pf)146*404b540aSrobert begin_recursion_protect1 (const char *pf)
147*404b540aSrobert {
148*404b540aSrobert if (__mf_get_state () == reentrant)
149*404b540aSrobert {
150*404b540aSrobert write (2, "mf: erroneous reentrancy detected in `", 38);
151*404b540aSrobert write (2, pf, strlen(pf));
152*404b540aSrobert write (2, "'\n", 2); \
153*404b540aSrobert abort ();
154*404b540aSrobert }
155*404b540aSrobert __mf_set_state (reentrant);
156*404b540aSrobert }
157*404b540aSrobert
158*404b540aSrobert #define BEGIN_RECURSION_PROTECT() \
159*404b540aSrobert begin_recursion_protect1 (__PRETTY_FUNCTION__)
160*404b540aSrobert
161*404b540aSrobert #define END_RECURSION_PROTECT() \
162*404b540aSrobert __mf_set_state (active)
163*404b540aSrobert
164*404b540aSrobert /* ------------------------------------------------------------------------ */
165*404b540aSrobert /* Required globals. */
166*404b540aSrobert
167*404b540aSrobert #define LOOKUP_CACHE_MASK_DFL 1023
168*404b540aSrobert #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */
169*404b540aSrobert #define LOOKUP_CACHE_SHIFT_DFL 2
170*404b540aSrobert
171*404b540aSrobert struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX];
172*404b540aSrobert uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL;
173*404b540aSrobert unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL;
174*404b540aSrobert #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1)
175*404b540aSrobert
176*404b540aSrobert struct __mf_options __mf_opts;
177*404b540aSrobert int __mf_starting_p = 1;
178*404b540aSrobert
179*404b540aSrobert #ifdef LIBMUDFLAPTH
180*404b540aSrobert #ifdef HAVE_TLS
181*404b540aSrobert __thread enum __mf_state_enum __mf_state_1 = reentrant;
182*404b540aSrobert #endif
183*404b540aSrobert #else
184*404b540aSrobert enum __mf_state_enum __mf_state_1 = reentrant;
185*404b540aSrobert #endif
186*404b540aSrobert
187*404b540aSrobert #ifdef LIBMUDFLAPTH
188*404b540aSrobert pthread_mutex_t __mf_biglock =
189*404b540aSrobert #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
190*404b540aSrobert PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
191*404b540aSrobert #else
192*404b540aSrobert PTHREAD_MUTEX_INITIALIZER;
193*404b540aSrobert #endif
194*404b540aSrobert #endif
195*404b540aSrobert
196*404b540aSrobert /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even
197*404b540aSrobert the libmudflap.la (no threading support) can diagnose whether
198*404b540aSrobert the application is linked with -lpthread. See __mf_usage() below. */
199*404b540aSrobert #if HAVE_PTHREAD_H
200*404b540aSrobert #ifdef _POSIX_THREADS
201*404b540aSrobert #pragma weak pthread_join
202*404b540aSrobert #else
203*404b540aSrobert #define pthread_join NULL
204*404b540aSrobert #endif
205*404b540aSrobert #endif
206*404b540aSrobert
207*404b540aSrobert
208*404b540aSrobert /* ------------------------------------------------------------------------ */
209*404b540aSrobert /* stats-related globals. */
210*404b540aSrobert
211*404b540aSrobert static unsigned long __mf_count_check;
212*404b540aSrobert static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX];
213*404b540aSrobert static unsigned long __mf_count_register;
214*404b540aSrobert static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1];
215*404b540aSrobert static unsigned long __mf_count_unregister;
216*404b540aSrobert static unsigned long __mf_total_unregister_size;
217*404b540aSrobert static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1];
218*404b540aSrobert static unsigned long __mf_sigusr1_received;
219*404b540aSrobert static unsigned long __mf_sigusr1_handled;
220*404b540aSrobert /* not static */ unsigned long __mf_reentrancy;
221*404b540aSrobert #ifdef LIBMUDFLAPTH
222*404b540aSrobert /* not static */ unsigned long __mf_lock_contention;
223*404b540aSrobert #endif
224*404b540aSrobert
225*404b540aSrobert
226*404b540aSrobert /* ------------------------------------------------------------------------ */
227*404b540aSrobert /* mode-check-related globals. */
228*404b540aSrobert
229*404b540aSrobert typedef struct __mf_object
230*404b540aSrobert {
231*404b540aSrobert uintptr_t low, high; /* __mf_register parameters */
232*404b540aSrobert const char *name;
233*404b540aSrobert char type; /* __MF_TYPE_something */
234*404b540aSrobert char watching_p; /* Trigger a VIOL_WATCH on access? */
235*404b540aSrobert unsigned read_count; /* Number of times __mf_check/read was called on this object. */
236*404b540aSrobert unsigned write_count; /* Likewise for __mf_check/write. */
237*404b540aSrobert unsigned liveness; /* A measure of recent checking activity. */
238*404b540aSrobert unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */
239*404b540aSrobert
240*404b540aSrobert uintptr_t alloc_pc;
241*404b540aSrobert struct timeval alloc_time;
242*404b540aSrobert char **alloc_backtrace;
243*404b540aSrobert size_t alloc_backtrace_size;
244*404b540aSrobert #ifdef LIBMUDFLAPTH
245*404b540aSrobert pthread_t alloc_thread;
246*404b540aSrobert #endif
247*404b540aSrobert
248*404b540aSrobert int deallocated_p;
249*404b540aSrobert uintptr_t dealloc_pc;
250*404b540aSrobert struct timeval dealloc_time;
251*404b540aSrobert char **dealloc_backtrace;
252*404b540aSrobert size_t dealloc_backtrace_size;
253*404b540aSrobert #ifdef LIBMUDFLAPTH
254*404b540aSrobert pthread_t dealloc_thread;
255*404b540aSrobert #endif
256*404b540aSrobert } __mf_object_t;
257*404b540aSrobert
258*404b540aSrobert /* Live objects: splay trees, separated by type, ordered on .low (base address). */
259*404b540aSrobert /* Actually stored as static vars within lookup function below. */
260*404b540aSrobert
261*404b540aSrobert /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */
262*404b540aSrobert static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */
263*404b540aSrobert static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX];
264*404b540aSrobert
265*404b540aSrobert
266*404b540aSrobert /* ------------------------------------------------------------------------ */
267*404b540aSrobert /* Forward function declarations */
268*404b540aSrobert
269*404b540aSrobert void __mf_init () CTOR;
270*404b540aSrobert static void __mf_sigusr1_respond ();
271*404b540aSrobert static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
272*404b540aSrobert __mf_object_t **objs, unsigned max_objs);
273*404b540aSrobert static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
274*404b540aSrobert __mf_object_t **objs, unsigned max_objs, int type);
275*404b540aSrobert static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
276*404b540aSrobert __mf_object_t **objs, unsigned max_objs);
277*404b540aSrobert static void __mf_adapt_cache ();
278*404b540aSrobert static void __mf_describe_object (__mf_object_t *obj);
279*404b540aSrobert static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag);
280*404b540aSrobert static mfsplay_tree __mf_object_tree (int type);
281*404b540aSrobert static void __mf_link_object (__mf_object_t *node);
282*404b540aSrobert static void __mf_unlink_object (__mf_object_t *node);
283*404b540aSrobert
284*404b540aSrobert
285*404b540aSrobert /* ------------------------------------------------------------------------ */
286*404b540aSrobert /* Configuration engine */
287*404b540aSrobert
288*404b540aSrobert static void
__mf_set_default_options()289*404b540aSrobert __mf_set_default_options ()
290*404b540aSrobert {
291*404b540aSrobert memset (& __mf_opts, 0, sizeof (__mf_opts));
292*404b540aSrobert
293*404b540aSrobert __mf_opts.adapt_cache = 1000003;
294*404b540aSrobert __mf_opts.abbreviate = 1;
295*404b540aSrobert __mf_opts.verbose_violations = 1;
296*404b540aSrobert __mf_opts.free_queue_length = 4;
297*404b540aSrobert __mf_opts.persistent_count = 100;
298*404b540aSrobert __mf_opts.crumple_zone = 32;
299*404b540aSrobert __mf_opts.backtrace = 4;
300*404b540aSrobert __mf_opts.timestamps = 1;
301*404b540aSrobert __mf_opts.mudflap_mode = mode_check;
302*404b540aSrobert __mf_opts.violation_mode = viol_nop;
303*404b540aSrobert __mf_opts.heur_std_data = 1;
304*404b540aSrobert #ifdef LIBMUDFLAPTH
305*404b540aSrobert __mf_opts.thread_stack = 0;
306*404b540aSrobert #endif
307*404b540aSrobert }
308*404b540aSrobert
309*404b540aSrobert static struct option
310*404b540aSrobert {
311*404b540aSrobert char *name;
312*404b540aSrobert char *description;
313*404b540aSrobert enum
314*404b540aSrobert {
315*404b540aSrobert set_option,
316*404b540aSrobert read_integer_option,
317*404b540aSrobert } type;
318*404b540aSrobert unsigned value;
319*404b540aSrobert unsigned *target;
320*404b540aSrobert }
321*404b540aSrobert options [] =
322*404b540aSrobert {
323*404b540aSrobert {"mode-nop",
324*404b540aSrobert "mudflaps do nothing",
325*404b540aSrobert set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
326*404b540aSrobert {"mode-populate",
327*404b540aSrobert "mudflaps populate object tree",
328*404b540aSrobert set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
329*404b540aSrobert {"mode-check",
330*404b540aSrobert "mudflaps check for memory violations",
331*404b540aSrobert set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
332*404b540aSrobert {"mode-violate",
333*404b540aSrobert "mudflaps always cause violations (diagnostic)",
334*404b540aSrobert set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
335*404b540aSrobert
336*404b540aSrobert {"viol-nop",
337*404b540aSrobert "violations do not change program execution",
338*404b540aSrobert set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
339*404b540aSrobert {"viol-abort",
340*404b540aSrobert "violations cause a call to abort()",
341*404b540aSrobert set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
342*404b540aSrobert {"viol-segv",
343*404b540aSrobert "violations are promoted to SIGSEGV signals",
344*404b540aSrobert set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
345*404b540aSrobert {"viol-gdb",
346*404b540aSrobert "violations fork a gdb process attached to current program",
347*404b540aSrobert set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
348*404b540aSrobert {"trace-calls",
349*404b540aSrobert "trace calls to mudflap runtime library",
350*404b540aSrobert set_option, 1, &__mf_opts.trace_mf_calls},
351*404b540aSrobert {"verbose-trace",
352*404b540aSrobert "trace internal events within mudflap runtime library",
353*404b540aSrobert set_option, 1, &__mf_opts.verbose_trace},
354*404b540aSrobert {"collect-stats",
355*404b540aSrobert "collect statistics on mudflap's operation",
356*404b540aSrobert set_option, 1, &__mf_opts.collect_stats},
357*404b540aSrobert #ifdef SIGUSR1
358*404b540aSrobert {"sigusr1-report",
359*404b540aSrobert "print report upon SIGUSR1",
360*404b540aSrobert set_option, 1, &__mf_opts.sigusr1_report},
361*404b540aSrobert #endif
362*404b540aSrobert {"internal-checking",
363*404b540aSrobert "perform more expensive internal checking",
364*404b540aSrobert set_option, 1, &__mf_opts.internal_checking},
365*404b540aSrobert {"print-leaks",
366*404b540aSrobert "print any memory leaks at program shutdown",
367*404b540aSrobert set_option, 1, &__mf_opts.print_leaks},
368*404b540aSrobert {"check-initialization",
369*404b540aSrobert "detect uninitialized object reads",
370*404b540aSrobert set_option, 1, &__mf_opts.check_initialization},
371*404b540aSrobert {"verbose-violations",
372*404b540aSrobert "print verbose messages when memory violations occur",
373*404b540aSrobert set_option, 1, &__mf_opts.verbose_violations},
374*404b540aSrobert {"abbreviate",
375*404b540aSrobert "abbreviate repetitive listings",
376*404b540aSrobert set_option, 1, &__mf_opts.abbreviate},
377*404b540aSrobert {"timestamps",
378*404b540aSrobert "track object lifetime timestamps",
379*404b540aSrobert set_option, 1, &__mf_opts.timestamps},
380*404b540aSrobert {"ignore-reads",
381*404b540aSrobert "ignore read accesses - assume okay",
382*404b540aSrobert set_option, 1, &__mf_opts.ignore_reads},
383*404b540aSrobert {"wipe-stack",
384*404b540aSrobert "wipe stack objects at unwind",
385*404b540aSrobert set_option, 1, &__mf_opts.wipe_stack},
386*404b540aSrobert {"wipe-heap",
387*404b540aSrobert "wipe heap objects at free",
388*404b540aSrobert set_option, 1, &__mf_opts.wipe_heap},
389*404b540aSrobert {"heur-proc-map",
390*404b540aSrobert "support /proc/self/map heuristics",
391*404b540aSrobert set_option, 1, &__mf_opts.heur_proc_map},
392*404b540aSrobert {"heur-stack-bound",
393*404b540aSrobert "enable a simple upper stack bound heuristic",
394*404b540aSrobert set_option, 1, &__mf_opts.heur_stack_bound},
395*404b540aSrobert {"heur-start-end",
396*404b540aSrobert "support _start.._end heuristics",
397*404b540aSrobert set_option, 1, &__mf_opts.heur_start_end},
398*404b540aSrobert {"heur-stdlib",
399*404b540aSrobert "register standard library data (argv, errno, stdin, ...)",
400*404b540aSrobert set_option, 1, &__mf_opts.heur_std_data},
401*404b540aSrobert {"free-queue-length",
402*404b540aSrobert "queue N deferred free() calls before performing them",
403*404b540aSrobert read_integer_option, 0, &__mf_opts.free_queue_length},
404*404b540aSrobert {"persistent-count",
405*404b540aSrobert "keep a history of N unregistered regions",
406*404b540aSrobert read_integer_option, 0, &__mf_opts.persistent_count},
407*404b540aSrobert {"crumple-zone",
408*404b540aSrobert "surround allocations with crumple zones of N bytes",
409*404b540aSrobert read_integer_option, 0, &__mf_opts.crumple_zone},
410*404b540aSrobert /* XXX: not type-safe.
411*404b540aSrobert {"lc-mask",
412*404b540aSrobert "set lookup cache size mask to N (2**M - 1)",
413*404b540aSrobert read_integer_option, 0, (int *)(&__mf_lc_mask)},
414*404b540aSrobert {"lc-shift",
415*404b540aSrobert "set lookup cache pointer shift",
416*404b540aSrobert read_integer_option, 0, (int *)(&__mf_lc_shift)},
417*404b540aSrobert */
418*404b540aSrobert {"lc-adapt",
419*404b540aSrobert "adapt mask/shift parameters after N cache misses",
420*404b540aSrobert read_integer_option, 1, &__mf_opts.adapt_cache},
421*404b540aSrobert {"backtrace",
422*404b540aSrobert "keep an N-level stack trace of each call context",
423*404b540aSrobert read_integer_option, 0, &__mf_opts.backtrace},
424*404b540aSrobert #ifdef LIBMUDFLAPTH
425*404b540aSrobert {"thread-stack",
426*404b540aSrobert "override thread stacks allocation: N kB",
427*404b540aSrobert read_integer_option, 0, &__mf_opts.thread_stack},
428*404b540aSrobert #endif
429*404b540aSrobert {0, 0, set_option, 0, NULL}
430*404b540aSrobert };
431*404b540aSrobert
432*404b540aSrobert static void
__mf_usage()433*404b540aSrobert __mf_usage ()
434*404b540aSrobert {
435*404b540aSrobert struct option *opt;
436*404b540aSrobert
437*404b540aSrobert fprintf (stderr,
438*404b540aSrobert "This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
439*404b540aSrobert "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
440*404b540aSrobert "\n"
441*404b540aSrobert "The mudflap code can be controlled by an environment variable:\n"
442*404b540aSrobert "\n"
443*404b540aSrobert "$ export MUDFLAP_OPTIONS='<options>'\n"
444*404b540aSrobert "$ <mudflapped_program>\n"
445*404b540aSrobert "\n"
446*404b540aSrobert "where <options> is a space-separated list of \n"
447*404b540aSrobert "any of the following options. Use `-no-OPTION' to disable options.\n"
448*404b540aSrobert "\n",
449*404b540aSrobert #if HAVE_PTHREAD_H
450*404b540aSrobert (pthread_join ? "multi-threaded " : "single-threaded "),
451*404b540aSrobert #else
452*404b540aSrobert "",
453*404b540aSrobert #endif
454*404b540aSrobert #if LIBMUDFLAPTH
455*404b540aSrobert "thread-aware "
456*404b540aSrobert #else
457*404b540aSrobert "thread-unaware "
458*404b540aSrobert #endif
459*404b540aSrobert );
460*404b540aSrobert /* XXX: The multi-threaded thread-unaware combination is bad. */
461*404b540aSrobert
462*404b540aSrobert for (opt = options; opt->name; opt++)
463*404b540aSrobert {
464*404b540aSrobert int default_p = (opt->value == * opt->target);
465*404b540aSrobert
466*404b540aSrobert switch (opt->type)
467*404b540aSrobert {
468*404b540aSrobert char buf[128];
469*404b540aSrobert case set_option:
470*404b540aSrobert fprintf (stderr, "-%-23.23s %s", opt->name, opt->description);
471*404b540aSrobert if (default_p)
472*404b540aSrobert fprintf (stderr, " [active]\n");
473*404b540aSrobert else
474*404b540aSrobert fprintf (stderr, "\n");
475*404b540aSrobert break;
476*404b540aSrobert case read_integer_option:
477*404b540aSrobert strncpy (buf, opt->name, 128);
478*404b540aSrobert strncpy (buf + strlen (opt->name), "=N", 2);
479*404b540aSrobert fprintf (stderr, "-%-23.23s %s", buf, opt->description);
480*404b540aSrobert fprintf (stderr, " [%d]\n", * opt->target);
481*404b540aSrobert break;
482*404b540aSrobert default: abort();
483*404b540aSrobert }
484*404b540aSrobert }
485*404b540aSrobert
486*404b540aSrobert fprintf (stderr, "\n");
487*404b540aSrobert }
488*404b540aSrobert
489*404b540aSrobert
490*404b540aSrobert int
__mf_set_options(const char * optstr)491*404b540aSrobert __mf_set_options (const char *optstr)
492*404b540aSrobert {
493*404b540aSrobert int rc;
494*404b540aSrobert LOCKTH ();
495*404b540aSrobert BEGIN_RECURSION_PROTECT ();
496*404b540aSrobert rc = __mfu_set_options (optstr);
497*404b540aSrobert /* XXX: It's not really that easy. A change to a bunch of parameters
498*404b540aSrobert can require updating auxiliary state or risk crashing:
499*404b540aSrobert free_queue_length, crumple_zone ... */
500*404b540aSrobert END_RECURSION_PROTECT ();
501*404b540aSrobert UNLOCKTH ();
502*404b540aSrobert return rc;
503*404b540aSrobert }
504*404b540aSrobert
505*404b540aSrobert
506*404b540aSrobert int
__mfu_set_options(const char * optstr)507*404b540aSrobert __mfu_set_options (const char *optstr)
508*404b540aSrobert {
509*404b540aSrobert struct option *opts = 0;
510*404b540aSrobert char *nxt = 0;
511*404b540aSrobert long tmp = 0;
512*404b540aSrobert int rc = 0;
513*404b540aSrobert const char *saved_optstr = optstr;
514*404b540aSrobert
515*404b540aSrobert /* XXX: bounds-check for optstr! */
516*404b540aSrobert
517*404b540aSrobert while (*optstr)
518*404b540aSrobert {
519*404b540aSrobert switch (*optstr) {
520*404b540aSrobert case ' ':
521*404b540aSrobert case '\t':
522*404b540aSrobert case '\n':
523*404b540aSrobert optstr++;
524*404b540aSrobert break;
525*404b540aSrobert
526*404b540aSrobert case '-':
527*404b540aSrobert if (*optstr+1)
528*404b540aSrobert {
529*404b540aSrobert int negate = 0;
530*404b540aSrobert optstr++;
531*404b540aSrobert
532*404b540aSrobert if (*optstr == '?' ||
533*404b540aSrobert strncmp (optstr, "help", 4) == 0)
534*404b540aSrobert {
535*404b540aSrobert /* Caller will print help and exit. */
536*404b540aSrobert return -1;
537*404b540aSrobert }
538*404b540aSrobert
539*404b540aSrobert if (strncmp (optstr, "no-", 3) == 0)
540*404b540aSrobert {
541*404b540aSrobert negate = 1;
542*404b540aSrobert optstr = & optstr[3];
543*404b540aSrobert }
544*404b540aSrobert
545*404b540aSrobert for (opts = options; opts->name; opts++)
546*404b540aSrobert {
547*404b540aSrobert if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
548*404b540aSrobert {
549*404b540aSrobert optstr += strlen (opts->name);
550*404b540aSrobert assert (opts->target);
551*404b540aSrobert switch (opts->type)
552*404b540aSrobert {
553*404b540aSrobert case set_option:
554*404b540aSrobert if (negate)
555*404b540aSrobert *(opts->target) = 0;
556*404b540aSrobert else
557*404b540aSrobert *(opts->target) = opts->value;
558*404b540aSrobert break;
559*404b540aSrobert case read_integer_option:
560*404b540aSrobert if (! negate && (*optstr == '=' && *(optstr+1)))
561*404b540aSrobert {
562*404b540aSrobert optstr++;
563*404b540aSrobert tmp = strtol (optstr, &nxt, 10);
564*404b540aSrobert if ((optstr != nxt) && (tmp != LONG_MAX))
565*404b540aSrobert {
566*404b540aSrobert optstr = nxt;
567*404b540aSrobert *(opts->target) = (int)tmp;
568*404b540aSrobert }
569*404b540aSrobert }
570*404b540aSrobert else if (negate)
571*404b540aSrobert * opts->target = 0;
572*404b540aSrobert break;
573*404b540aSrobert }
574*404b540aSrobert }
575*404b540aSrobert }
576*404b540aSrobert }
577*404b540aSrobert break;
578*404b540aSrobert
579*404b540aSrobert default:
580*404b540aSrobert fprintf (stderr,
581*404b540aSrobert "warning: unrecognized string '%s' in mudflap options\n",
582*404b540aSrobert optstr);
583*404b540aSrobert optstr += strlen (optstr);
584*404b540aSrobert rc = -1;
585*404b540aSrobert break;
586*404b540aSrobert }
587*404b540aSrobert }
588*404b540aSrobert
589*404b540aSrobert /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */
590*404b540aSrobert __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
591*404b540aSrobert __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1);
592*404b540aSrobert
593*404b540aSrobert /* Clear the lookup cache, in case the parameters got changed. */
594*404b540aSrobert /* XXX: race */
595*404b540aSrobert memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
596*404b540aSrobert /* void slot 0 */
597*404b540aSrobert __mf_lookup_cache[0].low = MAXPTR;
598*404b540aSrobert
599*404b540aSrobert TRACE ("set options from `%s'\n", saved_optstr);
600*404b540aSrobert
601*404b540aSrobert /* Call this unconditionally, in case -sigusr1-report was toggled. */
602*404b540aSrobert __mf_sigusr1_respond ();
603*404b540aSrobert
604*404b540aSrobert return rc;
605*404b540aSrobert }
606*404b540aSrobert
607*404b540aSrobert
608*404b540aSrobert #ifdef PIC
609*404b540aSrobert
610*404b540aSrobert void
__mf_resolve_single_dynamic(struct __mf_dynamic_entry * e)611*404b540aSrobert __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
612*404b540aSrobert {
613*404b540aSrobert char *err;
614*404b540aSrobert
615*404b540aSrobert assert (e);
616*404b540aSrobert if (e->pointer) return;
617*404b540aSrobert
618*404b540aSrobert #if HAVE_DLVSYM
619*404b540aSrobert if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */
620*404b540aSrobert e->pointer = dlvsym (RTLD_NEXT, e->name, e->version);
621*404b540aSrobert else
622*404b540aSrobert #endif
623*404b540aSrobert e->pointer = dlsym (RTLD_NEXT, e->name);
624*404b540aSrobert
625*404b540aSrobert err = dlerror ();
626*404b540aSrobert
627*404b540aSrobert if (err)
628*404b540aSrobert {
629*404b540aSrobert fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
630*404b540aSrobert e->name, err);
631*404b540aSrobert abort ();
632*404b540aSrobert }
633*404b540aSrobert if (! e->pointer)
634*404b540aSrobert {
635*404b540aSrobert fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
636*404b540aSrobert abort ();
637*404b540aSrobert }
638*404b540aSrobert }
639*404b540aSrobert
640*404b540aSrobert
641*404b540aSrobert static void
__mf_resolve_dynamics()642*404b540aSrobert __mf_resolve_dynamics ()
643*404b540aSrobert {
644*404b540aSrobert int i;
645*404b540aSrobert for (i = 0; i < dyn_INITRESOLVE; i++)
646*404b540aSrobert __mf_resolve_single_dynamic (& __mf_dynamic[i]);
647*404b540aSrobert }
648*404b540aSrobert
649*404b540aSrobert
650*404b540aSrobert /* NB: order must match enums in mf-impl.h */
651*404b540aSrobert struct __mf_dynamic_entry __mf_dynamic [] =
652*404b540aSrobert {
653*404b540aSrobert {NULL, "calloc", NULL},
654*404b540aSrobert {NULL, "free", NULL},
655*404b540aSrobert {NULL, "malloc", NULL},
656*404b540aSrobert {NULL, "mmap", NULL},
657*404b540aSrobert {NULL, "munmap", NULL},
658*404b540aSrobert {NULL, "realloc", NULL},
659*404b540aSrobert {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */
660*404b540aSrobert #ifdef LIBMUDFLAPTH
661*404b540aSrobert {NULL, "pthread_create", PTHREAD_CREATE_VERSION},
662*404b540aSrobert {NULL, "pthread_join", NULL},
663*404b540aSrobert {NULL, "pthread_exit", NULL}
664*404b540aSrobert #endif
665*404b540aSrobert };
666*404b540aSrobert
667*404b540aSrobert #endif /* PIC */
668*404b540aSrobert
669*404b540aSrobert
670*404b540aSrobert
671*404b540aSrobert /* ------------------------------------------------------------------------ */
672*404b540aSrobert
673*404b540aSrobert /* Lookup & manage automatic initialization of the five or so splay trees. */
674*404b540aSrobert static mfsplay_tree
__mf_object_tree(int type)675*404b540aSrobert __mf_object_tree (int type)
676*404b540aSrobert {
677*404b540aSrobert static mfsplay_tree trees [__MF_TYPE_MAX+1];
678*404b540aSrobert assert (type >= 0 && type <= __MF_TYPE_MAX);
679*404b540aSrobert if (UNLIKELY (trees[type] == NULL))
680*404b540aSrobert trees[type] = mfsplay_tree_new ();
681*404b540aSrobert return trees[type];
682*404b540aSrobert }
683*404b540aSrobert
684*404b540aSrobert
685*404b540aSrobert /* not static */void
__mf_init()686*404b540aSrobert __mf_init ()
687*404b540aSrobert {
688*404b540aSrobert char *ov = 0;
689*404b540aSrobert
690*404b540aSrobert /* Return if initialization has already been done. */
691*404b540aSrobert if (LIKELY (__mf_starting_p == 0))
692*404b540aSrobert return;
693*404b540aSrobert
694*404b540aSrobert /* This initial bootstrap phase requires that __mf_starting_p = 1. */
695*404b540aSrobert #ifdef PIC
696*404b540aSrobert __mf_resolve_dynamics ();
697*404b540aSrobert #endif
698*404b540aSrobert __mf_starting_p = 0;
699*404b540aSrobert
700*404b540aSrobert __mf_set_state (active);
701*404b540aSrobert
702*404b540aSrobert __mf_set_default_options ();
703*404b540aSrobert
704*404b540aSrobert ov = getenv ("MUDFLAP_OPTIONS");
705*404b540aSrobert if (ov)
706*404b540aSrobert {
707*404b540aSrobert int rc = __mfu_set_options (ov);
708*404b540aSrobert if (rc < 0)
709*404b540aSrobert {
710*404b540aSrobert __mf_usage ();
711*404b540aSrobert exit (1);
712*404b540aSrobert }
713*404b540aSrobert }
714*404b540aSrobert
715*404b540aSrobert /* Initialize to a non-zero description epoch. */
716*404b540aSrobert __mf_describe_object (NULL);
717*404b540aSrobert
718*404b540aSrobert #define REG_RESERVED(obj) \
719*404b540aSrobert __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj)
720*404b540aSrobert
721*404b540aSrobert REG_RESERVED (__mf_lookup_cache);
722*404b540aSrobert REG_RESERVED (__mf_lc_mask);
723*404b540aSrobert REG_RESERVED (__mf_lc_shift);
724*404b540aSrobert /* XXX: others of our statics? */
725*404b540aSrobert
726*404b540aSrobert /* Prevent access to *NULL. */
727*404b540aSrobert __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL");
728*404b540aSrobert __mf_lookup_cache[0].low = (uintptr_t) -1;
729*404b540aSrobert }
730*404b540aSrobert
731*404b540aSrobert
732*404b540aSrobert
733*404b540aSrobert int
__wrap_main(int argc,char * argv[])734*404b540aSrobert __wrap_main (int argc, char* argv[])
735*404b540aSrobert {
736*404b540aSrobert extern char **environ;
737*404b540aSrobert extern int main ();
738*404b540aSrobert extern int __real_main ();
739*404b540aSrobert static int been_here = 0;
740*404b540aSrobert
741*404b540aSrobert if (__mf_opts.heur_std_data && ! been_here)
742*404b540aSrobert {
743*404b540aSrobert unsigned i;
744*404b540aSrobert
745*404b540aSrobert been_here = 1;
746*404b540aSrobert __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]");
747*404b540aSrobert for (i=0; i<argc; i++)
748*404b540aSrobert {
749*404b540aSrobert unsigned j = strlen (argv[i]);
750*404b540aSrobert __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element");
751*404b540aSrobert }
752*404b540aSrobert
753*404b540aSrobert for (i=0; ; i++)
754*404b540aSrobert {
755*404b540aSrobert char *e = environ[i];
756*404b540aSrobert unsigned j;
757*404b540aSrobert if (e == NULL) break;
758*404b540aSrobert j = strlen (environ[i]);
759*404b540aSrobert __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element");
760*404b540aSrobert }
761*404b540aSrobert __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]");
762*404b540aSrobert
763*404b540aSrobert __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area");
764*404b540aSrobert
765*404b540aSrobert __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin");
766*404b540aSrobert __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout");
767*404b540aSrobert __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr");
768*404b540aSrobert
769*404b540aSrobert /* Make some effort to register ctype.h static arrays. */
770*404b540aSrobert /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */
771*404b540aSrobert /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt
772*404b540aSrobert with in mf-hooks2.c. */
773*404b540aSrobert }
774*404b540aSrobert
775*404b540aSrobert #ifdef PIC
776*404b540aSrobert return main (argc, argv, environ);
777*404b540aSrobert #else
778*404b540aSrobert return __real_main (argc, argv, environ);
779*404b540aSrobert #endif
780*404b540aSrobert }
781*404b540aSrobert
782*404b540aSrobert
783*404b540aSrobert
784*404b540aSrobert extern void __mf_fini () DTOR;
__mf_fini()785*404b540aSrobert void __mf_fini ()
786*404b540aSrobert {
787*404b540aSrobert TRACE ("__mf_fini\n");
788*404b540aSrobert __mfu_report ();
789*404b540aSrobert
790*404b540aSrobert #ifndef PIC
791*404b540aSrobert /* Since we didn't populate the tree for allocations in constructors
792*404b540aSrobert before __mf_init, we cannot check destructors after __mf_fini. */
793*404b540aSrobert __mf_opts.mudflap_mode = mode_nop;
794*404b540aSrobert #endif
795*404b540aSrobert }
796*404b540aSrobert
797*404b540aSrobert
798*404b540aSrobert
799*404b540aSrobert /* ------------------------------------------------------------------------ */
800*404b540aSrobert /* __mf_check */
801*404b540aSrobert
__mf_check(void * ptr,size_t sz,int type,const char * location)802*404b540aSrobert void __mf_check (void *ptr, size_t sz, int type, const char *location)
803*404b540aSrobert {
804*404b540aSrobert LOCKTH ();
805*404b540aSrobert BEGIN_RECURSION_PROTECT ();
806*404b540aSrobert __mfu_check (ptr, sz, type, location);
807*404b540aSrobert END_RECURSION_PROTECT ();
808*404b540aSrobert UNLOCKTH ();
809*404b540aSrobert }
810*404b540aSrobert
811*404b540aSrobert
__mfu_check(void * ptr,size_t sz,int type,const char * location)812*404b540aSrobert void __mfu_check (void *ptr, size_t sz, int type, const char *location)
813*404b540aSrobert {
814*404b540aSrobert unsigned entry_idx = __MF_CACHE_INDEX (ptr);
815*404b540aSrobert struct __mf_cache *entry = & __mf_lookup_cache [entry_idx];
816*404b540aSrobert int judgement = 0; /* 0=undecided; <0=violation; >0=okay */
817*404b540aSrobert uintptr_t ptr_low = (uintptr_t) ptr;
818*404b540aSrobert uintptr_t ptr_high = CLAMPSZ (ptr, sz);
819*404b540aSrobert struct __mf_cache old_entry = *entry;
820*404b540aSrobert
821*404b540aSrobert if (UNLIKELY (__mf_opts.sigusr1_report))
822*404b540aSrobert __mf_sigusr1_respond ();
823*404b540aSrobert if (UNLIKELY (__mf_opts.ignore_reads && type == 0))
824*404b540aSrobert return;
825*404b540aSrobert
826*404b540aSrobert TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
827*404b540aSrobert ptr, entry_idx, (unsigned long)sz,
828*404b540aSrobert (type == 0 ? "read" : "write"), location);
829*404b540aSrobert
830*404b540aSrobert switch (__mf_opts.mudflap_mode)
831*404b540aSrobert {
832*404b540aSrobert case mode_nop:
833*404b540aSrobert /* It is tempting to poison the cache here similarly to
834*404b540aSrobert mode_populate. However that eliminates a valuable
835*404b540aSrobert distinction between these two modes. mode_nop is useful to
836*404b540aSrobert let a user count & trace every single check / registration
837*404b540aSrobert call. mode_populate is useful to let a program run fast
838*404b540aSrobert while unchecked.
839*404b540aSrobert */
840*404b540aSrobert judgement = 1;
841*404b540aSrobert break;
842*404b540aSrobert
843*404b540aSrobert case mode_populate:
844*404b540aSrobert entry->low = ptr_low;
845*404b540aSrobert entry->high = ptr_high;
846*404b540aSrobert judgement = 1;
847*404b540aSrobert break;
848*404b540aSrobert
849*404b540aSrobert case mode_check:
850*404b540aSrobert {
851*404b540aSrobert unsigned heuristics = 0;
852*404b540aSrobert
853*404b540aSrobert /* Advance aging/adaptation counters. */
854*404b540aSrobert static unsigned adapt_count;
855*404b540aSrobert adapt_count ++;
856*404b540aSrobert if (UNLIKELY (__mf_opts.adapt_cache > 0 &&
857*404b540aSrobert adapt_count > __mf_opts.adapt_cache))
858*404b540aSrobert {
859*404b540aSrobert adapt_count = 0;
860*404b540aSrobert __mf_adapt_cache ();
861*404b540aSrobert }
862*404b540aSrobert
863*404b540aSrobert /* Looping only occurs if heuristics were triggered. */
864*404b540aSrobert while (judgement == 0)
865*404b540aSrobert {
866*404b540aSrobert DECLARE (void, free, void *p);
867*404b540aSrobert __mf_object_t* ovr_obj[1];
868*404b540aSrobert unsigned obj_count;
869*404b540aSrobert __mf_object_t** all_ovr_obj = NULL;
870*404b540aSrobert __mf_object_t** dealloc_me = NULL;
871*404b540aSrobert unsigned i;
872*404b540aSrobert
873*404b540aSrobert /* Find all overlapping objects. Be optimistic that there is just one. */
874*404b540aSrobert obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1);
875*404b540aSrobert if (UNLIKELY (obj_count > 1))
876*404b540aSrobert {
877*404b540aSrobert /* Allocate a real buffer and do the search again. */
878*404b540aSrobert DECLARE (void *, malloc, size_t c);
879*404b540aSrobert unsigned n;
880*404b540aSrobert all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) *
881*404b540aSrobert obj_count));
882*404b540aSrobert if (all_ovr_obj == NULL) abort ();
883*404b540aSrobert n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count);
884*404b540aSrobert assert (n == obj_count);
885*404b540aSrobert dealloc_me = all_ovr_obj;
886*404b540aSrobert }
887*404b540aSrobert else
888*404b540aSrobert {
889*404b540aSrobert all_ovr_obj = ovr_obj;
890*404b540aSrobert dealloc_me = NULL;
891*404b540aSrobert }
892*404b540aSrobert
893*404b540aSrobert /* Update object statistics. */
894*404b540aSrobert for (i = 0; i < obj_count; i++)
895*404b540aSrobert {
896*404b540aSrobert __mf_object_t *obj = all_ovr_obj[i];
897*404b540aSrobert assert (obj != NULL);
898*404b540aSrobert if (type == __MF_CHECK_READ)
899*404b540aSrobert obj->read_count ++;
900*404b540aSrobert else
901*404b540aSrobert obj->write_count ++;
902*404b540aSrobert obj->liveness ++;
903*404b540aSrobert }
904*404b540aSrobert
905*404b540aSrobert /* Iterate over the various objects. There are a number of special cases. */
906*404b540aSrobert for (i = 0; i < obj_count; i++)
907*404b540aSrobert {
908*404b540aSrobert __mf_object_t *obj = all_ovr_obj[i];
909*404b540aSrobert
910*404b540aSrobert /* Any __MF_TYPE_NOACCESS hit is bad. */
911*404b540aSrobert if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS))
912*404b540aSrobert judgement = -1;
913*404b540aSrobert
914*404b540aSrobert /* Any object with a watch flag is bad. */
915*404b540aSrobert if (UNLIKELY (obj->watching_p))
916*404b540aSrobert judgement = -2; /* trigger VIOL_WATCH */
917*404b540aSrobert
918*404b540aSrobert /* A read from an uninitialized object is bad. */
919*404b540aSrobert if (UNLIKELY (__mf_opts.check_initialization
920*404b540aSrobert /* reading */
921*404b540aSrobert && type == __MF_CHECK_READ
922*404b540aSrobert /* not written */
923*404b540aSrobert && obj->write_count == 0
924*404b540aSrobert /* uninitialized (heap) */
925*404b540aSrobert && obj->type == __MF_TYPE_HEAP))
926*404b540aSrobert judgement = -1;
927*404b540aSrobert }
928*404b540aSrobert
929*404b540aSrobert /* We now know that the access spans no invalid objects. */
930*404b540aSrobert if (LIKELY (judgement >= 0))
931*404b540aSrobert for (i = 0; i < obj_count; i++)
932*404b540aSrobert {
933*404b540aSrobert __mf_object_t *obj = all_ovr_obj[i];
934*404b540aSrobert
935*404b540aSrobert /* Is this access entirely contained within this object? */
936*404b540aSrobert if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
937*404b540aSrobert {
938*404b540aSrobert /* Valid access. */
939*404b540aSrobert entry->low = obj->low;
940*404b540aSrobert entry->high = obj->high;
941*404b540aSrobert judgement = 1;
942*404b540aSrobert }
943*404b540aSrobert }
944*404b540aSrobert
945*404b540aSrobert /* This access runs off the end of one valid object. That
946*404b540aSrobert could be okay, if other valid objects fill in all the
947*404b540aSrobert holes. We allow this only for HEAP and GUESS type
948*404b540aSrobert objects. Accesses to STATIC and STACK variables
949*404b540aSrobert should not be allowed to span. */
950*404b540aSrobert if (UNLIKELY ((judgement == 0) && (obj_count > 1)))
951*404b540aSrobert {
952*404b540aSrobert unsigned uncovered = 0;
953*404b540aSrobert for (i = 0; i < obj_count; i++)
954*404b540aSrobert {
955*404b540aSrobert __mf_object_t *obj = all_ovr_obj[i];
956*404b540aSrobert int j, uncovered_low_p, uncovered_high_p;
957*404b540aSrobert uintptr_t ptr_lower, ptr_higher;
958*404b540aSrobert
959*404b540aSrobert uncovered_low_p = ptr_low < obj->low;
960*404b540aSrobert ptr_lower = CLAMPSUB (obj->low, 1);
961*404b540aSrobert uncovered_high_p = ptr_high > obj->high;
962*404b540aSrobert ptr_higher = CLAMPADD (obj->high, 1);
963*404b540aSrobert
964*404b540aSrobert for (j = 0; j < obj_count; j++)
965*404b540aSrobert {
966*404b540aSrobert __mf_object_t *obj2 = all_ovr_obj[j];
967*404b540aSrobert
968*404b540aSrobert if (i == j) continue;
969*404b540aSrobert
970*404b540aSrobert /* Filter out objects that cannot be spanned across. */
971*404b540aSrobert if (obj2->type == __MF_TYPE_STACK
972*404b540aSrobert || obj2->type == __MF_TYPE_STATIC)
973*404b540aSrobert continue;
974*404b540aSrobert
975*404b540aSrobert /* Consider a side "covered" if obj2 includes
976*404b540aSrobert the next byte on that side. */
977*404b540aSrobert if (uncovered_low_p
978*404b540aSrobert && (ptr_lower >= obj2->low && ptr_lower <= obj2->high))
979*404b540aSrobert uncovered_low_p = 0;
980*404b540aSrobert if (uncovered_high_p
981*404b540aSrobert && (ptr_high >= obj2->low && ptr_higher <= obj2->high))
982*404b540aSrobert uncovered_high_p = 0;
983*404b540aSrobert }
984*404b540aSrobert
985*404b540aSrobert if (uncovered_low_p || uncovered_high_p)
986*404b540aSrobert uncovered ++;
987*404b540aSrobert }
988*404b540aSrobert
989*404b540aSrobert /* Success if no overlapping objects are uncovered. */
990*404b540aSrobert if (uncovered == 0)
991*404b540aSrobert judgement = 1;
992*404b540aSrobert }
993*404b540aSrobert
994*404b540aSrobert
995*404b540aSrobert if (dealloc_me != NULL)
996*404b540aSrobert CALL_REAL (free, dealloc_me);
997*404b540aSrobert
998*404b540aSrobert /* If the judgment is still unknown at this stage, loop
999*404b540aSrobert around at most one more time. */
1000*404b540aSrobert if (judgement == 0)
1001*404b540aSrobert {
1002*404b540aSrobert if (heuristics++ < 2) /* XXX parametrize this number? */
1003*404b540aSrobert judgement = __mf_heuristic_check (ptr_low, ptr_high);
1004*404b540aSrobert else
1005*404b540aSrobert judgement = -1;
1006*404b540aSrobert }
1007*404b540aSrobert }
1008*404b540aSrobert
1009*404b540aSrobert }
1010*404b540aSrobert break;
1011*404b540aSrobert
1012*404b540aSrobert case mode_violate:
1013*404b540aSrobert judgement = -1;
1014*404b540aSrobert break;
1015*404b540aSrobert }
1016*404b540aSrobert
1017*404b540aSrobert if (__mf_opts.collect_stats)
1018*404b540aSrobert {
1019*404b540aSrobert __mf_count_check ++;
1020*404b540aSrobert
1021*404b540aSrobert if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
1022*404b540aSrobert /* && (old_entry.low != 0) && (old_entry.high != 0)) */
1023*404b540aSrobert __mf_lookup_cache_reusecount [entry_idx] ++;
1024*404b540aSrobert }
1025*404b540aSrobert
1026*404b540aSrobert if (UNLIKELY (judgement < 0))
1027*404b540aSrobert __mf_violation (ptr, sz,
1028*404b540aSrobert (uintptr_t) __builtin_return_address (0), location,
1029*404b540aSrobert ((judgement == -1) ?
1030*404b540aSrobert (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
1031*404b540aSrobert __MF_VIOL_WATCH));
1032*404b540aSrobert }
1033*404b540aSrobert
1034*404b540aSrobert
1035*404b540aSrobert static __mf_object_t *
__mf_insert_new_object(uintptr_t low,uintptr_t high,int type,const char * name,uintptr_t pc)1036*404b540aSrobert __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
1037*404b540aSrobert const char *name, uintptr_t pc)
1038*404b540aSrobert {
1039*404b540aSrobert DECLARE (void *, calloc, size_t c, size_t n);
1040*404b540aSrobert
1041*404b540aSrobert __mf_object_t *new_obj;
1042*404b540aSrobert new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t));
1043*404b540aSrobert new_obj->low = low;
1044*404b540aSrobert new_obj->high = high;
1045*404b540aSrobert new_obj->type = type;
1046*404b540aSrobert new_obj->name = name;
1047*404b540aSrobert new_obj->alloc_pc = pc;
1048*404b540aSrobert #if HAVE_GETTIMEOFDAY
1049*404b540aSrobert if (__mf_opts.timestamps)
1050*404b540aSrobert gettimeofday (& new_obj->alloc_time, NULL);
1051*404b540aSrobert #endif
1052*404b540aSrobert #if LIBMUDFLAPTH
1053*404b540aSrobert new_obj->alloc_thread = pthread_self ();
1054*404b540aSrobert #endif
1055*404b540aSrobert
1056*404b540aSrobert if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
1057*404b540aSrobert new_obj->alloc_backtrace_size =
1058*404b540aSrobert __mf_backtrace (& new_obj->alloc_backtrace,
1059*404b540aSrobert (void *) pc, 2);
1060*404b540aSrobert
1061*404b540aSrobert __mf_link_object (new_obj);
1062*404b540aSrobert return new_obj;
1063*404b540aSrobert }
1064*404b540aSrobert
1065*404b540aSrobert
1066*404b540aSrobert static void
__mf_uncache_object(__mf_object_t * old_obj)1067*404b540aSrobert __mf_uncache_object (__mf_object_t *old_obj)
1068*404b540aSrobert {
1069*404b540aSrobert /* Remove any low/high pointers for this object from the lookup cache. */
1070*404b540aSrobert
1071*404b540aSrobert /* Can it possibly exist in the cache? */
1072*404b540aSrobert if (LIKELY (old_obj->read_count + old_obj->write_count))
1073*404b540aSrobert {
1074*404b540aSrobert /* As reported by Herman ten Brugge, we need to scan the entire
1075*404b540aSrobert cache for entries that may hit this object. */
1076*404b540aSrobert uintptr_t low = old_obj->low;
1077*404b540aSrobert uintptr_t high = old_obj->high;
1078*404b540aSrobert struct __mf_cache *entry = & __mf_lookup_cache [0];
1079*404b540aSrobert unsigned i;
1080*404b540aSrobert for (i = 0; i <= __mf_lc_mask; i++, entry++)
1081*404b540aSrobert {
1082*404b540aSrobert /* NB: the "||" in the following test permits this code to
1083*404b540aSrobert tolerate the situation introduced by __mf_check over
1084*404b540aSrobert contiguous objects, where a cache entry spans several
1085*404b540aSrobert objects. */
1086*404b540aSrobert if (entry->low == low || entry->high == high)
1087*404b540aSrobert {
1088*404b540aSrobert entry->low = MAXPTR;
1089*404b540aSrobert entry->high = MINPTR;
1090*404b540aSrobert }
1091*404b540aSrobert }
1092*404b540aSrobert }
1093*404b540aSrobert }
1094*404b540aSrobert
1095*404b540aSrobert
1096*404b540aSrobert void
__mf_register(void * ptr,size_t sz,int type,const char * name)1097*404b540aSrobert __mf_register (void *ptr, size_t sz, int type, const char *name)
1098*404b540aSrobert {
1099*404b540aSrobert LOCKTH ();
1100*404b540aSrobert BEGIN_RECURSION_PROTECT ();
1101*404b540aSrobert __mfu_register (ptr, sz, type, name);
1102*404b540aSrobert END_RECURSION_PROTECT ();
1103*404b540aSrobert UNLOCKTH ();
1104*404b540aSrobert }
1105*404b540aSrobert
1106*404b540aSrobert
1107*404b540aSrobert void
__mfu_register(void * ptr,size_t sz,int type,const char * name)1108*404b540aSrobert __mfu_register (void *ptr, size_t sz, int type, const char *name)
1109*404b540aSrobert {
1110*404b540aSrobert TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
1111*404b540aSrobert ptr, (unsigned long) sz, type, name ? name : "");
1112*404b540aSrobert
1113*404b540aSrobert if (__mf_opts.collect_stats)
1114*404b540aSrobert {
1115*404b540aSrobert __mf_count_register ++;
1116*404b540aSrobert __mf_total_register_size [(type < 0) ? 0 :
1117*404b540aSrobert (type > __MF_TYPE_MAX) ? 0 :
1118*404b540aSrobert type] += sz;
1119*404b540aSrobert }
1120*404b540aSrobert
1121*404b540aSrobert if (UNLIKELY (__mf_opts.sigusr1_report))
1122*404b540aSrobert __mf_sigusr1_respond ();
1123*404b540aSrobert
1124*404b540aSrobert switch (__mf_opts.mudflap_mode)
1125*404b540aSrobert {
1126*404b540aSrobert case mode_nop:
1127*404b540aSrobert break;
1128*404b540aSrobert
1129*404b540aSrobert case mode_violate:
1130*404b540aSrobert __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
1131*404b540aSrobert __MF_VIOL_REGISTER);
1132*404b540aSrobert break;
1133*404b540aSrobert
1134*404b540aSrobert case mode_populate:
1135*404b540aSrobert /* Clear the cache. */
1136*404b540aSrobert /* XXX: why the entire cache? */
1137*404b540aSrobert /* XXX: race */
1138*404b540aSrobert memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1139*404b540aSrobert /* void slot 0 */
1140*404b540aSrobert __mf_lookup_cache[0].low = MAXPTR;
1141*404b540aSrobert break;
1142*404b540aSrobert
1143*404b540aSrobert case mode_check:
1144*404b540aSrobert {
1145*404b540aSrobert __mf_object_t *ovr_objs [1];
1146*404b540aSrobert unsigned num_overlapping_objs;
1147*404b540aSrobert uintptr_t low = (uintptr_t) ptr;
1148*404b540aSrobert uintptr_t high = CLAMPSZ (ptr, sz);
1149*404b540aSrobert uintptr_t pc = (uintptr_t) __builtin_return_address (0);
1150*404b540aSrobert
1151*404b540aSrobert /* Treat unknown size indication as 1. */
1152*404b540aSrobert if (UNLIKELY (sz == 0)) sz = 1;
1153*404b540aSrobert
1154*404b540aSrobert /* Look for objects only of the same type. This will e.g. permit a registration
1155*404b540aSrobert of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At
1156*404b540aSrobert __mf_check time however harmful overlaps will be detected. */
1157*404b540aSrobert num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type);
1158*404b540aSrobert
1159*404b540aSrobert /* Handle overlaps. */
1160*404b540aSrobert if (UNLIKELY (num_overlapping_objs > 0))
1161*404b540aSrobert {
1162*404b540aSrobert __mf_object_t *ovr_obj = ovr_objs[0];
1163*404b540aSrobert
1164*404b540aSrobert /* Accept certain specific duplication pairs. */
1165*404b540aSrobert if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
1166*404b540aSrobert && ovr_obj->low == low
1167*404b540aSrobert && ovr_obj->high == high
1168*404b540aSrobert && ovr_obj->type == type)
1169*404b540aSrobert {
1170*404b540aSrobert /* Duplicate registration for static objects may come
1171*404b540aSrobert from distinct compilation units. */
1172*404b540aSrobert VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
1173*404b540aSrobert (void *) low, (void *) high,
1174*404b540aSrobert (ovr_obj->name ? ovr_obj->name : ""));
1175*404b540aSrobert break;
1176*404b540aSrobert }
1177*404b540aSrobert
1178*404b540aSrobert /* Alas, a genuine violation. */
1179*404b540aSrobert else
1180*404b540aSrobert {
1181*404b540aSrobert /* Two or more *real* mappings here. */
1182*404b540aSrobert __mf_violation ((void *) ptr, sz,
1183*404b540aSrobert (uintptr_t) __builtin_return_address (0), NULL,
1184*404b540aSrobert __MF_VIOL_REGISTER);
1185*404b540aSrobert }
1186*404b540aSrobert }
1187*404b540aSrobert else /* No overlapping objects: AOK. */
1188*404b540aSrobert __mf_insert_new_object (low, high, type, name, pc);
1189*404b540aSrobert
1190*404b540aSrobert /* We could conceivably call __mf_check() here to prime the cache,
1191*404b540aSrobert but then the read_count/write_count field is not reliable. */
1192*404b540aSrobert break;
1193*404b540aSrobert }
1194*404b540aSrobert } /* end switch (__mf_opts.mudflap_mode) */
1195*404b540aSrobert }
1196*404b540aSrobert
1197*404b540aSrobert
1198*404b540aSrobert void
__mf_unregister(void * ptr,size_t sz,int type)1199*404b540aSrobert __mf_unregister (void *ptr, size_t sz, int type)
1200*404b540aSrobert {
1201*404b540aSrobert LOCKTH ();
1202*404b540aSrobert BEGIN_RECURSION_PROTECT ();
1203*404b540aSrobert __mfu_unregister (ptr, sz, type);
1204*404b540aSrobert END_RECURSION_PROTECT ();
1205*404b540aSrobert UNLOCKTH ();
1206*404b540aSrobert }
1207*404b540aSrobert
1208*404b540aSrobert
1209*404b540aSrobert void
__mfu_unregister(void * ptr,size_t sz,int type)1210*404b540aSrobert __mfu_unregister (void *ptr, size_t sz, int type)
1211*404b540aSrobert {
1212*404b540aSrobert DECLARE (void, free, void *ptr);
1213*404b540aSrobert
1214*404b540aSrobert if (UNLIKELY (__mf_opts.sigusr1_report))
1215*404b540aSrobert __mf_sigusr1_respond ();
1216*404b540aSrobert
1217*404b540aSrobert TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
1218*404b540aSrobert
1219*404b540aSrobert switch (__mf_opts.mudflap_mode)
1220*404b540aSrobert {
1221*404b540aSrobert case mode_nop:
1222*404b540aSrobert break;
1223*404b540aSrobert
1224*404b540aSrobert case mode_violate:
1225*404b540aSrobert __mf_violation (ptr, sz,
1226*404b540aSrobert (uintptr_t) __builtin_return_address (0), NULL,
1227*404b540aSrobert __MF_VIOL_UNREGISTER);
1228*404b540aSrobert break;
1229*404b540aSrobert
1230*404b540aSrobert case mode_populate:
1231*404b540aSrobert /* Clear the cache. */
1232*404b540aSrobert /* XXX: race */
1233*404b540aSrobert memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1234*404b540aSrobert /* void slot 0 */
1235*404b540aSrobert __mf_lookup_cache[0].low = MAXPTR;
1236*404b540aSrobert break;
1237*404b540aSrobert
1238*404b540aSrobert case mode_check:
1239*404b540aSrobert {
1240*404b540aSrobert __mf_object_t *old_obj = NULL;
1241*404b540aSrobert __mf_object_t *del_obj = NULL; /* Object to actually delete. */
1242*404b540aSrobert __mf_object_t *objs[1] = {NULL};
1243*404b540aSrobert unsigned num_overlapping_objs;
1244*404b540aSrobert
1245*404b540aSrobert num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1246*404b540aSrobert CLAMPSZ (ptr, sz), objs, 1, type);
1247*404b540aSrobert
1248*404b540aSrobert /* Special case for HEAP_I - see free & realloc hook. They don't
1249*404b540aSrobert know whether the input region was HEAP or HEAP_I before
1250*404b540aSrobert unmapping it. Here we give HEAP a try in case HEAP_I
1251*404b540aSrobert failed. */
1252*404b540aSrobert if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0))
1253*404b540aSrobert {
1254*404b540aSrobert num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr,
1255*404b540aSrobert CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP);
1256*404b540aSrobert }
1257*404b540aSrobert
1258*404b540aSrobert old_obj = objs[0];
1259*404b540aSrobert if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */
1260*404b540aSrobert || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */
1261*404b540aSrobert || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */
1262*404b540aSrobert {
1263*404b540aSrobert __mf_violation (ptr, sz,
1264*404b540aSrobert (uintptr_t) __builtin_return_address (0), NULL,
1265*404b540aSrobert __MF_VIOL_UNREGISTER);
1266*404b540aSrobert break;
1267*404b540aSrobert }
1268*404b540aSrobert
1269*404b540aSrobert __mf_unlink_object (old_obj);
1270*404b540aSrobert __mf_uncache_object (old_obj);
1271*404b540aSrobert
1272*404b540aSrobert /* Wipe buffer contents if desired. */
1273*404b540aSrobert if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
1274*404b540aSrobert || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
1275*404b540aSrobert || old_obj->type == __MF_TYPE_HEAP_I)))
1276*404b540aSrobert {
1277*404b540aSrobert memset ((void *) old_obj->low,
1278*404b540aSrobert 0,
1279*404b540aSrobert (size_t) (old_obj->high - old_obj->low + 1));
1280*404b540aSrobert }
1281*404b540aSrobert
1282*404b540aSrobert /* Manage the object cemetary. */
1283*404b540aSrobert if (__mf_opts.persistent_count > 0
1284*404b540aSrobert && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM)
1285*404b540aSrobert {
1286*404b540aSrobert old_obj->deallocated_p = 1;
1287*404b540aSrobert old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0);
1288*404b540aSrobert #if HAVE_GETTIMEOFDAY
1289*404b540aSrobert if (__mf_opts.timestamps)
1290*404b540aSrobert gettimeofday (& old_obj->dealloc_time, NULL);
1291*404b540aSrobert #endif
1292*404b540aSrobert #ifdef LIBMUDFLAPTH
1293*404b540aSrobert old_obj->dealloc_thread = pthread_self ();
1294*404b540aSrobert #endif
1295*404b540aSrobert
1296*404b540aSrobert if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
1297*404b540aSrobert old_obj->dealloc_backtrace_size =
1298*404b540aSrobert __mf_backtrace (& old_obj->dealloc_backtrace,
1299*404b540aSrobert NULL, 2);
1300*404b540aSrobert
1301*404b540aSrobert /* Encourage this object to be displayed again in current epoch. */
1302*404b540aSrobert old_obj->description_epoch --;
1303*404b540aSrobert
1304*404b540aSrobert /* Put this object into the cemetary. This may require this plot to
1305*404b540aSrobert be recycled, and the previous resident to be designated del_obj. */
1306*404b540aSrobert {
1307*404b540aSrobert unsigned row = old_obj->type;
1308*404b540aSrobert unsigned plot = __mf_object_dead_head [row];
1309*404b540aSrobert
1310*404b540aSrobert del_obj = __mf_object_cemetary [row][plot];
1311*404b540aSrobert __mf_object_cemetary [row][plot] = old_obj;
1312*404b540aSrobert plot ++;
1313*404b540aSrobert if (plot == __mf_opts.persistent_count) plot = 0;
1314*404b540aSrobert __mf_object_dead_head [row] = plot;
1315*404b540aSrobert }
1316*404b540aSrobert }
1317*404b540aSrobert else
1318*404b540aSrobert del_obj = old_obj;
1319*404b540aSrobert
1320*404b540aSrobert if (__mf_opts.print_leaks)
1321*404b540aSrobert {
1322*404b540aSrobert if ((old_obj->read_count + old_obj->write_count) == 0 &&
1323*404b540aSrobert (old_obj->type == __MF_TYPE_HEAP
1324*404b540aSrobert || old_obj->type == __MF_TYPE_HEAP_I))
1325*404b540aSrobert {
1326*404b540aSrobert fprintf (stderr,
1327*404b540aSrobert "*******\n"
1328*404b540aSrobert "mudflap warning: unaccessed registered object:\n");
1329*404b540aSrobert __mf_describe_object (old_obj);
1330*404b540aSrobert }
1331*404b540aSrobert }
1332*404b540aSrobert
1333*404b540aSrobert if (del_obj != NULL) /* May or may not equal old_obj. */
1334*404b540aSrobert {
1335*404b540aSrobert if (__mf_opts.backtrace > 0)
1336*404b540aSrobert {
1337*404b540aSrobert CALL_REAL(free, del_obj->alloc_backtrace);
1338*404b540aSrobert if (__mf_opts.persistent_count > 0)
1339*404b540aSrobert {
1340*404b540aSrobert CALL_REAL(free, del_obj->dealloc_backtrace);
1341*404b540aSrobert }
1342*404b540aSrobert }
1343*404b540aSrobert CALL_REAL(free, del_obj);
1344*404b540aSrobert }
1345*404b540aSrobert
1346*404b540aSrobert break;
1347*404b540aSrobert }
1348*404b540aSrobert } /* end switch (__mf_opts.mudflap_mode) */
1349*404b540aSrobert
1350*404b540aSrobert
1351*404b540aSrobert if (__mf_opts.collect_stats)
1352*404b540aSrobert {
1353*404b540aSrobert __mf_count_unregister ++;
1354*404b540aSrobert __mf_total_unregister_size += sz;
1355*404b540aSrobert }
1356*404b540aSrobert }
1357*404b540aSrobert
1358*404b540aSrobert
1359*404b540aSrobert
1360*404b540aSrobert struct tree_stats
1361*404b540aSrobert {
1362*404b540aSrobert unsigned obj_count;
1363*404b540aSrobert unsigned long total_size;
1364*404b540aSrobert unsigned live_obj_count;
1365*404b540aSrobert double total_weight;
1366*404b540aSrobert double weighted_size;
1367*404b540aSrobert unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2];
1368*404b540aSrobert };
1369*404b540aSrobert
1370*404b540aSrobert
1371*404b540aSrobert
1372*404b540aSrobert static int
__mf_adapt_cache_fn(mfsplay_tree_node n,void * param)1373*404b540aSrobert __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
1374*404b540aSrobert {
1375*404b540aSrobert __mf_object_t *obj = (__mf_object_t *) n->value;
1376*404b540aSrobert struct tree_stats *s = (struct tree_stats *) param;
1377*404b540aSrobert
1378*404b540aSrobert assert (obj != NULL && s != NULL);
1379*404b540aSrobert
1380*404b540aSrobert /* Exclude never-accessed objects. */
1381*404b540aSrobert if (obj->read_count + obj->write_count)
1382*404b540aSrobert {
1383*404b540aSrobert s->obj_count ++;
1384*404b540aSrobert s->total_size += (obj->high - obj->low + 1);
1385*404b540aSrobert
1386*404b540aSrobert if (obj->liveness)
1387*404b540aSrobert {
1388*404b540aSrobert unsigned i;
1389*404b540aSrobert uintptr_t addr;
1390*404b540aSrobert
1391*404b540aSrobert /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n",
1392*404b540aSrobert (void *) obj->low, obj->liveness, obj->name); */
1393*404b540aSrobert
1394*404b540aSrobert s->live_obj_count ++;
1395*404b540aSrobert s->total_weight += (double) obj->liveness;
1396*404b540aSrobert s->weighted_size +=
1397*404b540aSrobert (double) (obj->high - obj->low + 1) *
1398*404b540aSrobert (double) obj->liveness;
1399*404b540aSrobert
1400*404b540aSrobert addr = obj->low;
1401*404b540aSrobert for (i=0; i<sizeof(uintptr_t) * 8; i++)
1402*404b540aSrobert {
1403*404b540aSrobert unsigned bit = addr & 1;
1404*404b540aSrobert s->weighted_address_bits[i][bit] += obj->liveness;
1405*404b540aSrobert addr = addr >> 1;
1406*404b540aSrobert }
1407*404b540aSrobert
1408*404b540aSrobert /* Age the liveness value. */
1409*404b540aSrobert obj->liveness >>= 1;
1410*404b540aSrobert }
1411*404b540aSrobert }
1412*404b540aSrobert
1413*404b540aSrobert return 0;
1414*404b540aSrobert }
1415*404b540aSrobert
1416*404b540aSrobert
1417*404b540aSrobert static void
__mf_adapt_cache()1418*404b540aSrobert __mf_adapt_cache ()
1419*404b540aSrobert {
1420*404b540aSrobert struct tree_stats s;
1421*404b540aSrobert uintptr_t new_mask = 0;
1422*404b540aSrobert unsigned char new_shift;
1423*404b540aSrobert float cache_utilization;
1424*404b540aSrobert float max_value;
1425*404b540aSrobert static float smoothed_new_shift = -1.0;
1426*404b540aSrobert unsigned i;
1427*404b540aSrobert
1428*404b540aSrobert memset (&s, 0, sizeof (s));
1429*404b540aSrobert
1430*404b540aSrobert mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s);
1431*404b540aSrobert mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s);
1432*404b540aSrobert mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s);
1433*404b540aSrobert mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s);
1434*404b540aSrobert mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s);
1435*404b540aSrobert
1436*404b540aSrobert /* Maybe we're dealing with funny aging/adaptation parameters, or an
1437*404b540aSrobert empty tree. Just leave the cache alone in such cases, rather
1438*404b540aSrobert than risk dying by division-by-zero. */
1439*404b540aSrobert if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0))
1440*404b540aSrobert return;
1441*404b540aSrobert
1442*404b540aSrobert /* Guess a good value for the shift parameter by finding an address bit that is a
1443*404b540aSrobert good discriminant of lively objects. */
1444*404b540aSrobert max_value = 0.0;
1445*404b540aSrobert for (i=0; i<sizeof (uintptr_t)*8; i++)
1446*404b540aSrobert {
1447*404b540aSrobert float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1448*404b540aSrobert if (max_value < value) max_value = value;
1449*404b540aSrobert }
1450*404b540aSrobert for (i=0; i<sizeof (uintptr_t)*8; i++)
1451*404b540aSrobert {
1452*404b540aSrobert float shoulder_factor = 0.7; /* Include slightly less popular bits too. */
1453*404b540aSrobert float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1];
1454*404b540aSrobert if (value >= max_value * shoulder_factor)
1455*404b540aSrobert break;
1456*404b540aSrobert }
1457*404b540aSrobert if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
1458*404b540aSrobert /* Converge toward this slowly to reduce flapping. */
1459*404b540aSrobert smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
1460*404b540aSrobert new_shift = (unsigned) (smoothed_new_shift + 0.5);
1461*404b540aSrobert assert (new_shift < sizeof (uintptr_t)*8);
1462*404b540aSrobert
1463*404b540aSrobert /* Count number of used buckets. */
1464*404b540aSrobert cache_utilization = 0.0;
1465*404b540aSrobert for (i = 0; i < (1 + __mf_lc_mask); i++)
1466*404b540aSrobert if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0)
1467*404b540aSrobert cache_utilization += 1.0;
1468*404b540aSrobert cache_utilization /= (1 + __mf_lc_mask);
1469*404b540aSrobert
1470*404b540aSrobert new_mask |= 0xffff; /* XXX: force a large cache. */
1471*404b540aSrobert new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1);
1472*404b540aSrobert
1473*404b540aSrobert VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => "
1474*404b540aSrobert "util=%u%% m=%p s=%u\n",
1475*404b540aSrobert s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size,
1476*404b540aSrobert (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift);
1477*404b540aSrobert
1478*404b540aSrobert /* We should reinitialize cache if its parameters have changed. */
1479*404b540aSrobert if (new_mask != __mf_lc_mask ||
1480*404b540aSrobert new_shift != __mf_lc_shift)
1481*404b540aSrobert {
1482*404b540aSrobert __mf_lc_mask = new_mask;
1483*404b540aSrobert __mf_lc_shift = new_shift;
1484*404b540aSrobert /* XXX: race */
1485*404b540aSrobert memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache));
1486*404b540aSrobert /* void slot 0 */
1487*404b540aSrobert __mf_lookup_cache[0].low = MAXPTR;
1488*404b540aSrobert }
1489*404b540aSrobert }
1490*404b540aSrobert
1491*404b540aSrobert
1492*404b540aSrobert
1493*404b540aSrobert /* __mf_find_object[s] */
1494*404b540aSrobert
1495*404b540aSrobert /* Find overlapping live objecs between [low,high]. Return up to
1496*404b540aSrobert max_objs of their pointers in objs[]. Return total count of
1497*404b540aSrobert overlaps (may exceed max_objs). */
1498*404b540aSrobert
1499*404b540aSrobert unsigned
__mf_find_objects2(uintptr_t ptr_low,uintptr_t ptr_high,__mf_object_t ** objs,unsigned max_objs,int type)1500*404b540aSrobert __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
1501*404b540aSrobert __mf_object_t **objs, unsigned max_objs, int type)
1502*404b540aSrobert {
1503*404b540aSrobert unsigned count = 0;
1504*404b540aSrobert mfsplay_tree t = __mf_object_tree (type);
1505*404b540aSrobert mfsplay_tree_key k = (mfsplay_tree_key) ptr_low;
1506*404b540aSrobert int direction;
1507*404b540aSrobert
1508*404b540aSrobert mfsplay_tree_node n = mfsplay_tree_lookup (t, k);
1509*404b540aSrobert /* An exact match for base address implies a hit. */
1510*404b540aSrobert if (n != NULL)
1511*404b540aSrobert {
1512*404b540aSrobert if (count < max_objs)
1513*404b540aSrobert objs[count] = (__mf_object_t *) n->value;
1514*404b540aSrobert count ++;
1515*404b540aSrobert }
1516*404b540aSrobert
1517*404b540aSrobert /* Iterate left then right near this key value to find all overlapping objects. */
1518*404b540aSrobert for (direction = 0; direction < 2; direction ++)
1519*404b540aSrobert {
1520*404b540aSrobert /* Reset search origin. */
1521*404b540aSrobert k = (mfsplay_tree_key) ptr_low;
1522*404b540aSrobert
1523*404b540aSrobert while (1)
1524*404b540aSrobert {
1525*404b540aSrobert __mf_object_t *obj;
1526*404b540aSrobert
1527*404b540aSrobert n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
1528*404b540aSrobert if (n == NULL) break;
1529*404b540aSrobert obj = (__mf_object_t *) n->value;
1530*404b540aSrobert
1531*404b540aSrobert if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
1532*404b540aSrobert break;
1533*404b540aSrobert
1534*404b540aSrobert if (count < max_objs)
1535*404b540aSrobert objs[count] = (__mf_object_t *) n->value;
1536*404b540aSrobert count ++;
1537*404b540aSrobert
1538*404b540aSrobert k = (mfsplay_tree_key) obj->low;
1539*404b540aSrobert }
1540*404b540aSrobert }
1541*404b540aSrobert
1542*404b540aSrobert return count;
1543*404b540aSrobert }
1544*404b540aSrobert
1545*404b540aSrobert
1546*404b540aSrobert unsigned
__mf_find_objects(uintptr_t ptr_low,uintptr_t ptr_high,__mf_object_t ** objs,unsigned max_objs)1547*404b540aSrobert __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
1548*404b540aSrobert __mf_object_t **objs, unsigned max_objs)
1549*404b540aSrobert {
1550*404b540aSrobert int type;
1551*404b540aSrobert unsigned count = 0;
1552*404b540aSrobert
1553*404b540aSrobert /* Search each splay tree for overlaps. */
1554*404b540aSrobert for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++)
1555*404b540aSrobert {
1556*404b540aSrobert unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type);
1557*404b540aSrobert if (c > max_objs)
1558*404b540aSrobert {
1559*404b540aSrobert max_objs = 0;
1560*404b540aSrobert objs = NULL;
1561*404b540aSrobert }
1562*404b540aSrobert else /* NB: C may equal 0 */
1563*404b540aSrobert {
1564*404b540aSrobert max_objs -= c;
1565*404b540aSrobert objs += c;
1566*404b540aSrobert }
1567*404b540aSrobert count += c;
1568*404b540aSrobert }
1569*404b540aSrobert
1570*404b540aSrobert return count;
1571*404b540aSrobert }
1572*404b540aSrobert
1573*404b540aSrobert
1574*404b540aSrobert
1575*404b540aSrobert /* __mf_link_object */
1576*404b540aSrobert
1577*404b540aSrobert static void
__mf_link_object(__mf_object_t * node)1578*404b540aSrobert __mf_link_object (__mf_object_t *node)
1579*404b540aSrobert {
1580*404b540aSrobert mfsplay_tree t = __mf_object_tree (node->type);
1581*404b540aSrobert mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node);
1582*404b540aSrobert }
1583*404b540aSrobert
1584*404b540aSrobert /* __mf_unlink_object */
1585*404b540aSrobert
1586*404b540aSrobert static void
__mf_unlink_object(__mf_object_t * node)1587*404b540aSrobert __mf_unlink_object (__mf_object_t *node)
1588*404b540aSrobert {
1589*404b540aSrobert mfsplay_tree t = __mf_object_tree (node->type);
1590*404b540aSrobert mfsplay_tree_remove (t, (mfsplay_tree_key) node->low);
1591*404b540aSrobert }
1592*404b540aSrobert
1593*404b540aSrobert /* __mf_find_dead_objects */
1594*404b540aSrobert
1595*404b540aSrobert /* Find overlapping dead objecs between [low,high]. Return up to
1596*404b540aSrobert max_objs of their pointers in objs[]. Return total count of
1597*404b540aSrobert overlaps (may exceed max_objs). */
1598*404b540aSrobert
1599*404b540aSrobert static unsigned
__mf_find_dead_objects(uintptr_t low,uintptr_t high,__mf_object_t ** objs,unsigned max_objs)1600*404b540aSrobert __mf_find_dead_objects (uintptr_t low, uintptr_t high,
1601*404b540aSrobert __mf_object_t **objs, unsigned max_objs)
1602*404b540aSrobert {
1603*404b540aSrobert if (__mf_opts.persistent_count > 0)
1604*404b540aSrobert {
1605*404b540aSrobert unsigned count = 0;
1606*404b540aSrobert unsigned recollection = 0;
1607*404b540aSrobert unsigned row = 0;
1608*404b540aSrobert
1609*404b540aSrobert assert (low <= high);
1610*404b540aSrobert assert (max_objs == 0 || objs != NULL);
1611*404b540aSrobert
1612*404b540aSrobert /* Widen the search from the most recent plots in each row, looking
1613*404b540aSrobert backward in time. */
1614*404b540aSrobert recollection = 0;
1615*404b540aSrobert while (recollection < __mf_opts.persistent_count)
1616*404b540aSrobert {
1617*404b540aSrobert count = 0;
1618*404b540aSrobert
1619*404b540aSrobert for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1620*404b540aSrobert {
1621*404b540aSrobert unsigned plot;
1622*404b540aSrobert unsigned i;
1623*404b540aSrobert
1624*404b540aSrobert plot = __mf_object_dead_head [row];
1625*404b540aSrobert for (i = 0; i <= recollection; i ++)
1626*404b540aSrobert {
1627*404b540aSrobert __mf_object_t *obj;
1628*404b540aSrobert
1629*404b540aSrobert /* Look backward through row: it's a circular buffer. */
1630*404b540aSrobert if (plot > 0) plot --;
1631*404b540aSrobert else plot = __mf_opts.persistent_count - 1;
1632*404b540aSrobert
1633*404b540aSrobert obj = __mf_object_cemetary [row][plot];
1634*404b540aSrobert if (obj && obj->low <= high && obj->high >= low)
1635*404b540aSrobert {
1636*404b540aSrobert /* Found an overlapping dead object! */
1637*404b540aSrobert if (count < max_objs)
1638*404b540aSrobert objs [count] = obj;
1639*404b540aSrobert count ++;
1640*404b540aSrobert }
1641*404b540aSrobert }
1642*404b540aSrobert }
1643*404b540aSrobert
1644*404b540aSrobert if (count)
1645*404b540aSrobert break;
1646*404b540aSrobert
1647*404b540aSrobert /* Look farther back in time. */
1648*404b540aSrobert recollection = (recollection * 2) + 1;
1649*404b540aSrobert }
1650*404b540aSrobert
1651*404b540aSrobert return count;
1652*404b540aSrobert } else {
1653*404b540aSrobert return 0;
1654*404b540aSrobert }
1655*404b540aSrobert }
1656*404b540aSrobert
1657*404b540aSrobert /* __mf_describe_object */
1658*404b540aSrobert
1659*404b540aSrobert static void
__mf_describe_object(__mf_object_t * obj)1660*404b540aSrobert __mf_describe_object (__mf_object_t *obj)
1661*404b540aSrobert {
1662*404b540aSrobert static unsigned epoch = 0;
1663*404b540aSrobert if (obj == NULL)
1664*404b540aSrobert {
1665*404b540aSrobert epoch ++;
1666*404b540aSrobert return;
1667*404b540aSrobert }
1668*404b540aSrobert
1669*404b540aSrobert if (__mf_opts.abbreviate && obj->description_epoch == epoch)
1670*404b540aSrobert {
1671*404b540aSrobert fprintf (stderr,
1672*404b540aSrobert "mudflap %sobject %p: name=`%s'\n",
1673*404b540aSrobert (obj->deallocated_p ? "dead " : ""),
1674*404b540aSrobert (void *) obj, (obj->name ? obj->name : ""));
1675*404b540aSrobert return;
1676*404b540aSrobert }
1677*404b540aSrobert else
1678*404b540aSrobert obj->description_epoch = epoch;
1679*404b540aSrobert
1680*404b540aSrobert fprintf (stderr,
1681*404b540aSrobert "mudflap %sobject %p: name=`%s'\n"
1682*404b540aSrobert "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n"
1683*404b540aSrobert "alloc time=%lu.%06lu pc=%p"
1684*404b540aSrobert #ifdef LIBMUDFLAPTH
1685*404b540aSrobert " thread=%u"
1686*404b540aSrobert #endif
1687*404b540aSrobert "\n",
1688*404b540aSrobert (obj->deallocated_p ? "dead " : ""),
1689*404b540aSrobert (void *) obj, (obj->name ? obj->name : ""),
1690*404b540aSrobert (void *) obj->low, (void *) obj->high,
1691*404b540aSrobert (unsigned long) (obj->high - obj->low + 1),
1692*404b540aSrobert (obj->type == __MF_TYPE_NOACCESS ? "no-access" :
1693*404b540aSrobert obj->type == __MF_TYPE_HEAP ? "heap" :
1694*404b540aSrobert obj->type == __MF_TYPE_HEAP_I ? "heap-init" :
1695*404b540aSrobert obj->type == __MF_TYPE_STACK ? "stack" :
1696*404b540aSrobert obj->type == __MF_TYPE_STATIC ? "static" :
1697*404b540aSrobert obj->type == __MF_TYPE_GUESS ? "guess" :
1698*404b540aSrobert "unknown"),
1699*404b540aSrobert obj->read_count, obj->write_count, obj->liveness,
1700*404b540aSrobert obj->watching_p ? " watching" : "",
1701*404b540aSrobert obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
1702*404b540aSrobert (void *) obj->alloc_pc
1703*404b540aSrobert #ifdef LIBMUDFLAPTH
1704*404b540aSrobert , (unsigned) obj->alloc_thread
1705*404b540aSrobert #endif
1706*404b540aSrobert );
1707*404b540aSrobert
1708*404b540aSrobert if (__mf_opts.backtrace > 0)
1709*404b540aSrobert {
1710*404b540aSrobert unsigned i;
1711*404b540aSrobert for (i=0; i<obj->alloc_backtrace_size; i++)
1712*404b540aSrobert fprintf (stderr, " %s\n", obj->alloc_backtrace[i]);
1713*404b540aSrobert }
1714*404b540aSrobert
1715*404b540aSrobert if (__mf_opts.persistent_count > 0)
1716*404b540aSrobert {
1717*404b540aSrobert if (obj->deallocated_p)
1718*404b540aSrobert {
1719*404b540aSrobert fprintf (stderr, "dealloc time=%lu.%06lu pc=%p"
1720*404b540aSrobert #ifdef LIBMUDFLAPTH
1721*404b540aSrobert " thread=%u"
1722*404b540aSrobert #endif
1723*404b540aSrobert "\n",
1724*404b540aSrobert obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
1725*404b540aSrobert (void *) obj->dealloc_pc
1726*404b540aSrobert #ifdef LIBMUDFLAPTH
1727*404b540aSrobert , (unsigned) obj->dealloc_thread
1728*404b540aSrobert #endif
1729*404b540aSrobert );
1730*404b540aSrobert
1731*404b540aSrobert
1732*404b540aSrobert if (__mf_opts.backtrace > 0)
1733*404b540aSrobert {
1734*404b540aSrobert unsigned i;
1735*404b540aSrobert for (i=0; i<obj->dealloc_backtrace_size; i++)
1736*404b540aSrobert fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]);
1737*404b540aSrobert }
1738*404b540aSrobert }
1739*404b540aSrobert }
1740*404b540aSrobert }
1741*404b540aSrobert
1742*404b540aSrobert
1743*404b540aSrobert static int
__mf_report_leaks_fn(mfsplay_tree_node n,void * param)1744*404b540aSrobert __mf_report_leaks_fn (mfsplay_tree_node n, void *param)
1745*404b540aSrobert {
1746*404b540aSrobert __mf_object_t *node = (__mf_object_t *) n->value;
1747*404b540aSrobert unsigned *count = (unsigned *) param;
1748*404b540aSrobert
1749*404b540aSrobert if (count != NULL)
1750*404b540aSrobert (*count) ++;
1751*404b540aSrobert
1752*404b540aSrobert fprintf (stderr, "Leaked object %u:\n", (*count));
1753*404b540aSrobert __mf_describe_object (node);
1754*404b540aSrobert
1755*404b540aSrobert return 0;
1756*404b540aSrobert }
1757*404b540aSrobert
1758*404b540aSrobert
1759*404b540aSrobert static unsigned
__mf_report_leaks()1760*404b540aSrobert __mf_report_leaks ()
1761*404b540aSrobert {
1762*404b540aSrobert unsigned count = 0;
1763*404b540aSrobert
1764*404b540aSrobert (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP),
1765*404b540aSrobert __mf_report_leaks_fn, & count);
1766*404b540aSrobert (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I),
1767*404b540aSrobert __mf_report_leaks_fn, & count);
1768*404b540aSrobert
1769*404b540aSrobert return count;
1770*404b540aSrobert }
1771*404b540aSrobert
1772*404b540aSrobert /* ------------------------------------------------------------------------ */
1773*404b540aSrobert /* __mf_report */
1774*404b540aSrobert
1775*404b540aSrobert void
__mf_report()1776*404b540aSrobert __mf_report ()
1777*404b540aSrobert {
1778*404b540aSrobert LOCKTH ();
1779*404b540aSrobert BEGIN_RECURSION_PROTECT ();
1780*404b540aSrobert __mfu_report ();
1781*404b540aSrobert END_RECURSION_PROTECT ();
1782*404b540aSrobert UNLOCKTH ();
1783*404b540aSrobert }
1784*404b540aSrobert
1785*404b540aSrobert void
__mfu_report()1786*404b540aSrobert __mfu_report ()
1787*404b540aSrobert {
1788*404b540aSrobert if (__mf_opts.collect_stats)
1789*404b540aSrobert {
1790*404b540aSrobert fprintf (stderr,
1791*404b540aSrobert "*******\n"
1792*404b540aSrobert "mudflap stats:\n"
1793*404b540aSrobert "calls to __mf_check: %lu\n"
1794*404b540aSrobert " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n"
1795*404b540aSrobert " __mf_unregister: %lu [%luB]\n"
1796*404b540aSrobert " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n",
1797*404b540aSrobert __mf_count_check,
1798*404b540aSrobert __mf_count_register,
1799*404b540aSrobert __mf_total_register_size[0], __mf_total_register_size[1],
1800*404b540aSrobert __mf_total_register_size[2], __mf_total_register_size[3],
1801*404b540aSrobert __mf_total_register_size[4], /* XXX */
1802*404b540aSrobert __mf_count_unregister, __mf_total_unregister_size,
1803*404b540aSrobert __mf_count_violation[0], __mf_count_violation[1],
1804*404b540aSrobert __mf_count_violation[2], __mf_count_violation[3],
1805*404b540aSrobert __mf_count_violation[4]);
1806*404b540aSrobert
1807*404b540aSrobert fprintf (stderr,
1808*404b540aSrobert "calls with reentrancy: %lu\n", __mf_reentrancy);
1809*404b540aSrobert #ifdef LIBMUDFLAPTH
1810*404b540aSrobert fprintf (stderr,
1811*404b540aSrobert " lock contention: %lu\n", __mf_lock_contention);
1812*404b540aSrobert #endif
1813*404b540aSrobert
1814*404b540aSrobert /* Lookup cache stats. */
1815*404b540aSrobert {
1816*404b540aSrobert unsigned i;
1817*404b540aSrobert unsigned max_reuse = 0;
1818*404b540aSrobert unsigned num_used = 0;
1819*404b540aSrobert unsigned num_unused = 0;
1820*404b540aSrobert
1821*404b540aSrobert for (i = 0; i < LOOKUP_CACHE_SIZE; i++)
1822*404b540aSrobert {
1823*404b540aSrobert if (__mf_lookup_cache_reusecount[i])
1824*404b540aSrobert num_used ++;
1825*404b540aSrobert else
1826*404b540aSrobert num_unused ++;
1827*404b540aSrobert if (max_reuse < __mf_lookup_cache_reusecount[i])
1828*404b540aSrobert max_reuse = __mf_lookup_cache_reusecount[i];
1829*404b540aSrobert }
1830*404b540aSrobert fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n",
1831*404b540aSrobert num_used, num_unused, max_reuse);
1832*404b540aSrobert }
1833*404b540aSrobert
1834*404b540aSrobert {
1835*404b540aSrobert unsigned live_count;
1836*404b540aSrobert live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0);
1837*404b540aSrobert fprintf (stderr, "number of live objects: %u\n", live_count);
1838*404b540aSrobert }
1839*404b540aSrobert
1840*404b540aSrobert if (__mf_opts.persistent_count > 0)
1841*404b540aSrobert {
1842*404b540aSrobert unsigned dead_count = 0;
1843*404b540aSrobert unsigned row, plot;
1844*404b540aSrobert for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
1845*404b540aSrobert for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++)
1846*404b540aSrobert if (__mf_object_cemetary [row][plot] != 0)
1847*404b540aSrobert dead_count ++;
1848*404b540aSrobert fprintf (stderr, " zombie objects: %u\n", dead_count);
1849*404b540aSrobert }
1850*404b540aSrobert }
1851*404b540aSrobert if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check))
1852*404b540aSrobert {
1853*404b540aSrobert unsigned l;
1854*404b540aSrobert extern void * __mf_wrap_alloca_indirect (size_t c);
1855*404b540aSrobert
1856*404b540aSrobert /* Free up any remaining alloca()'d blocks. */
1857*404b540aSrobert __mf_wrap_alloca_indirect (0);
1858*404b540aSrobert __mf_describe_object (NULL); /* Reset description epoch. */
1859*404b540aSrobert l = __mf_report_leaks ();
1860*404b540aSrobert fprintf (stderr, "number of leaked objects: %u\n", l);
1861*404b540aSrobert }
1862*404b540aSrobert }
1863*404b540aSrobert
1864*404b540aSrobert /* __mf_backtrace */
1865*404b540aSrobert
1866*404b540aSrobert size_t
__mf_backtrace(char *** symbols,void * guess_pc,unsigned guess_omit_levels)1867*404b540aSrobert __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
1868*404b540aSrobert {
1869*404b540aSrobert void ** pc_array;
1870*404b540aSrobert unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels;
1871*404b540aSrobert unsigned remaining_size;
1872*404b540aSrobert unsigned omitted_size = 0;
1873*404b540aSrobert unsigned i;
1874*404b540aSrobert DECLARE (void, free, void *ptr);
1875*404b540aSrobert DECLARE (void *, calloc, size_t c, size_t n);
1876*404b540aSrobert DECLARE (void *, malloc, size_t n);
1877*404b540aSrobert
1878*404b540aSrobert pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) );
1879*404b540aSrobert #ifdef HAVE_BACKTRACE
1880*404b540aSrobert pc_array_size = backtrace (pc_array, pc_array_size);
1881*404b540aSrobert #else
1882*404b540aSrobert #define FETCH(n) do { if (pc_array_size >= n) { \
1883*404b540aSrobert pc_array[n] = __builtin_return_address(n); \
1884*404b540aSrobert if (pc_array[n] == 0) pc_array_size = n; } } while (0)
1885*404b540aSrobert
1886*404b540aSrobert /* Unroll some calls __builtin_return_address because this function
1887*404b540aSrobert only takes a literal integer parameter. */
1888*404b540aSrobert FETCH (0);
1889*404b540aSrobert #if 0
1890*404b540aSrobert /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments,
1891*404b540aSrobert rather than simply returning 0. :-( */
1892*404b540aSrobert FETCH (1);
1893*404b540aSrobert FETCH (2);
1894*404b540aSrobert FETCH (3);
1895*404b540aSrobert FETCH (4);
1896*404b540aSrobert FETCH (5);
1897*404b540aSrobert FETCH (6);
1898*404b540aSrobert FETCH (7);
1899*404b540aSrobert FETCH (8);
1900*404b540aSrobert if (pc_array_size > 8) pc_array_size = 9;
1901*404b540aSrobert #else
1902*404b540aSrobert if (pc_array_size > 0) pc_array_size = 1;
1903*404b540aSrobert #endif
1904*404b540aSrobert
1905*404b540aSrobert #undef FETCH
1906*404b540aSrobert #endif
1907*404b540aSrobert
1908*404b540aSrobert /* We want to trim the first few levels of the stack traceback,
1909*404b540aSrobert since they contain libmudflap wrappers and junk. If pc_array[]
1910*404b540aSrobert ends up containing a non-NULL guess_pc, then trim everything
1911*404b540aSrobert before that. Otherwise, omit the first guess_omit_levels
1912*404b540aSrobert entries. */
1913*404b540aSrobert
1914*404b540aSrobert if (guess_pc != NULL)
1915*404b540aSrobert for (i=0; i<pc_array_size; i++)
1916*404b540aSrobert if (pc_array [i] == guess_pc)
1917*404b540aSrobert omitted_size = i;
1918*404b540aSrobert
1919*404b540aSrobert if (omitted_size == 0) /* No match? */
1920*404b540aSrobert if (pc_array_size > guess_omit_levels)
1921*404b540aSrobert omitted_size = guess_omit_levels;
1922*404b540aSrobert
1923*404b540aSrobert remaining_size = pc_array_size - omitted_size;
1924*404b540aSrobert
1925*404b540aSrobert #ifdef HAVE_BACKTRACE_SYMBOLS
1926*404b540aSrobert *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size);
1927*404b540aSrobert #else
1928*404b540aSrobert {
1929*404b540aSrobert /* Let's construct a buffer by hand. It will have <remaining_size>
1930*404b540aSrobert char*'s at the front, pointing at individual strings immediately
1931*404b540aSrobert afterwards. */
1932*404b540aSrobert void *buffer;
1933*404b540aSrobert char *chars;
1934*404b540aSrobert char **pointers;
1935*404b540aSrobert enum { perline = 30 };
1936*404b540aSrobert buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *)));
1937*404b540aSrobert pointers = (char **) buffer;
1938*404b540aSrobert chars = (char *)buffer + (remaining_size * sizeof (char *));
1939*404b540aSrobert for (i = 0; i < remaining_size; i++)
1940*404b540aSrobert {
1941*404b540aSrobert pointers[i] = chars;
1942*404b540aSrobert sprintf (chars, "[0x%p]", pc_array [omitted_size + i]);
1943*404b540aSrobert chars = chars + perline;
1944*404b540aSrobert }
1945*404b540aSrobert *symbols = pointers;
1946*404b540aSrobert }
1947*404b540aSrobert #endif
1948*404b540aSrobert CALL_REAL (free, pc_array);
1949*404b540aSrobert
1950*404b540aSrobert return remaining_size;
1951*404b540aSrobert }
1952*404b540aSrobert
1953*404b540aSrobert /* ------------------------------------------------------------------------ */
1954*404b540aSrobert /* __mf_violation */
1955*404b540aSrobert
1956*404b540aSrobert void
__mf_violation(void * ptr,size_t sz,uintptr_t pc,const char * location,int type)1957*404b540aSrobert __mf_violation (void *ptr, size_t sz, uintptr_t pc,
1958*404b540aSrobert const char *location, int type)
1959*404b540aSrobert {
1960*404b540aSrobert char buf [128];
1961*404b540aSrobert static unsigned violation_number;
1962*404b540aSrobert DECLARE(void, free, void *ptr);
1963*404b540aSrobert
1964*404b540aSrobert TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
1965*404b540aSrobert (void *) pc,
1966*404b540aSrobert (location != NULL ? location : ""), type, ptr, (unsigned long) sz);
1967*404b540aSrobert
1968*404b540aSrobert if (__mf_opts.collect_stats)
1969*404b540aSrobert __mf_count_violation [(type < 0) ? 0 :
1970*404b540aSrobert (type > __MF_VIOL_WATCH) ? 0 :
1971*404b540aSrobert type] ++;
1972*404b540aSrobert
1973*404b540aSrobert /* Print out a basic warning message. */
1974*404b540aSrobert if (__mf_opts.verbose_violations)
1975*404b540aSrobert {
1976*404b540aSrobert unsigned dead_p;
1977*404b540aSrobert unsigned num_helpful = 0;
1978*404b540aSrobert struct timeval now = { 0, 0 };
1979*404b540aSrobert #if HAVE_GETTIMEOFDAY
1980*404b540aSrobert gettimeofday (& now, NULL);
1981*404b540aSrobert #endif
1982*404b540aSrobert
1983*404b540aSrobert violation_number ++;
1984*404b540aSrobert fprintf (stderr,
1985*404b540aSrobert "*******\n"
1986*404b540aSrobert "mudflap violation %u (%s): time=%lu.%06lu "
1987*404b540aSrobert "ptr=%p size=%lu\npc=%p%s%s%s\n",
1988*404b540aSrobert violation_number,
1989*404b540aSrobert ((type == __MF_VIOL_READ) ? "check/read" :
1990*404b540aSrobert (type == __MF_VIOL_WRITE) ? "check/write" :
1991*404b540aSrobert (type == __MF_VIOL_REGISTER) ? "register" :
1992*404b540aSrobert (type == __MF_VIOL_UNREGISTER) ? "unregister" :
1993*404b540aSrobert (type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
1994*404b540aSrobert now.tv_sec, now.tv_usec,
1995*404b540aSrobert (void *) ptr, (unsigned long)sz, (void *) pc,
1996*404b540aSrobert (location != NULL ? " location=`" : ""),
1997*404b540aSrobert (location != NULL ? location : ""),
1998*404b540aSrobert (location != NULL ? "'" : ""));
1999*404b540aSrobert
2000*404b540aSrobert if (__mf_opts.backtrace > 0)
2001*404b540aSrobert {
2002*404b540aSrobert char ** symbols;
2003*404b540aSrobert unsigned i, num;
2004*404b540aSrobert
2005*404b540aSrobert num = __mf_backtrace (& symbols, (void *) pc, 2);
2006*404b540aSrobert /* Note: backtrace_symbols calls malloc(). But since we're in
2007*404b540aSrobert __mf_violation and presumably __mf_check, it'll detect
2008*404b540aSrobert recursion, and not put the new string into the database. */
2009*404b540aSrobert
2010*404b540aSrobert for (i=0; i<num; i++)
2011*404b540aSrobert fprintf (stderr, " %s\n", symbols[i]);
2012*404b540aSrobert
2013*404b540aSrobert /* Calling free() here would trigger a violation. */
2014*404b540aSrobert CALL_REAL(free, symbols);
2015*404b540aSrobert }
2016*404b540aSrobert
2017*404b540aSrobert
2018*404b540aSrobert /* Look for nearby objects. For this, we start with s_low/s_high
2019*404b540aSrobert pointing to the given area, looking for overlapping objects.
2020*404b540aSrobert If none show up, widen the search area and keep looking. */
2021*404b540aSrobert
2022*404b540aSrobert if (sz == 0) sz = 1;
2023*404b540aSrobert
2024*404b540aSrobert for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
2025*404b540aSrobert {
2026*404b540aSrobert enum {max_objs = 3}; /* magic */
2027*404b540aSrobert __mf_object_t *objs[max_objs];
2028*404b540aSrobert unsigned num_objs = 0;
2029*404b540aSrobert uintptr_t s_low, s_high;
2030*404b540aSrobert unsigned tries = 0;
2031*404b540aSrobert unsigned i;
2032*404b540aSrobert
2033*404b540aSrobert s_low = (uintptr_t) ptr;
2034*404b540aSrobert s_high = CLAMPSZ (ptr, sz);
2035*404b540aSrobert
2036*404b540aSrobert while (tries < 16) /* magic */
2037*404b540aSrobert {
2038*404b540aSrobert if (dead_p)
2039*404b540aSrobert num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs);
2040*404b540aSrobert else
2041*404b540aSrobert num_objs = __mf_find_objects (s_low, s_high, objs, max_objs);
2042*404b540aSrobert
2043*404b540aSrobert if (num_objs) /* good enough */
2044*404b540aSrobert break;
2045*404b540aSrobert
2046*404b540aSrobert tries ++;
2047*404b540aSrobert
2048*404b540aSrobert /* XXX: tune this search strategy. It's too dependent on
2049*404b540aSrobert sz, which can vary from 1 to very big (when array index
2050*404b540aSrobert checking) numbers. */
2051*404b540aSrobert s_low = CLAMPSUB (s_low, (sz * tries * tries));
2052*404b540aSrobert s_high = CLAMPADD (s_high, (sz * tries * tries));
2053*404b540aSrobert }
2054*404b540aSrobert
2055*404b540aSrobert for (i = 0; i < min (num_objs, max_objs); i++)
2056*404b540aSrobert {
2057*404b540aSrobert __mf_object_t *obj = objs[i];
2058*404b540aSrobert uintptr_t low = (uintptr_t) ptr;
2059*404b540aSrobert uintptr_t high = CLAMPSZ (ptr, sz);
2060*404b540aSrobert unsigned before1 = (low < obj->low) ? obj->low - low : 0;
2061*404b540aSrobert unsigned after1 = (low > obj->high) ? low - obj->high : 0;
2062*404b540aSrobert unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0;
2063*404b540aSrobert unsigned before2 = (high < obj->low) ? obj->low - high : 0;
2064*404b540aSrobert unsigned after2 = (high > obj->high) ? high - obj->high : 0;
2065*404b540aSrobert unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0;
2066*404b540aSrobert
2067*404b540aSrobert fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n",
2068*404b540aSrobert num_helpful + i + 1,
2069*404b540aSrobert (before1 ? before1 : after1 ? after1 : into1),
2070*404b540aSrobert (before1 ? "before" : after1 ? "after" : "into"),
2071*404b540aSrobert (before2 ? before2 : after2 ? after2 : into2),
2072*404b540aSrobert (before2 ? "before" : after2 ? "after" : "into"));
2073*404b540aSrobert __mf_describe_object (obj);
2074*404b540aSrobert }
2075*404b540aSrobert num_helpful += num_objs;
2076*404b540aSrobert }
2077*404b540aSrobert
2078*404b540aSrobert fprintf (stderr, "number of nearby objects: %u\n", num_helpful);
2079*404b540aSrobert }
2080*404b540aSrobert
2081*404b540aSrobert /* How to finally handle this violation? */
2082*404b540aSrobert switch (__mf_opts.violation_mode)
2083*404b540aSrobert {
2084*404b540aSrobert case viol_nop:
2085*404b540aSrobert break;
2086*404b540aSrobert case viol_segv:
2087*404b540aSrobert kill (getpid(), SIGSEGV);
2088*404b540aSrobert break;
2089*404b540aSrobert case viol_abort:
2090*404b540aSrobert abort ();
2091*404b540aSrobert break;
2092*404b540aSrobert case viol_gdb:
2093*404b540aSrobert
2094*404b540aSrobert snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ());
2095*404b540aSrobert system (buf);
2096*404b540aSrobert /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER)
2097*404b540aSrobert instead, and let the forked child execlp() gdb. That way, this
2098*404b540aSrobert subject process can be resumed under the supervision of gdb.
2099*404b540aSrobert This can't happen now, since system() only returns when gdb
2100*404b540aSrobert dies. In that case, we need to beware of starting a second
2101*404b540aSrobert concurrent gdb child upon the next violation. (But if the first
2102*404b540aSrobert gdb dies, then starting a new one is appropriate.) */
2103*404b540aSrobert break;
2104*404b540aSrobert }
2105*404b540aSrobert }
2106*404b540aSrobert
2107*404b540aSrobert /* ------------------------------------------------------------------------ */
2108*404b540aSrobert
2109*404b540aSrobert
__mf_watch(void * ptr,size_t sz)2110*404b540aSrobert unsigned __mf_watch (void *ptr, size_t sz)
2111*404b540aSrobert {
2112*404b540aSrobert unsigned rc;
2113*404b540aSrobert LOCKTH ();
2114*404b540aSrobert BEGIN_RECURSION_PROTECT ();
2115*404b540aSrobert rc = __mf_watch_or_not (ptr, sz, 1);
2116*404b540aSrobert END_RECURSION_PROTECT ();
2117*404b540aSrobert UNLOCKTH ();
2118*404b540aSrobert return rc;
2119*404b540aSrobert }
2120*404b540aSrobert
__mf_unwatch(void * ptr,size_t sz)2121*404b540aSrobert unsigned __mf_unwatch (void *ptr, size_t sz)
2122*404b540aSrobert {
2123*404b540aSrobert unsigned rc;
2124*404b540aSrobert LOCKTH ();
2125*404b540aSrobert rc = __mf_watch_or_not (ptr, sz, 0);
2126*404b540aSrobert UNLOCKTH ();
2127*404b540aSrobert return rc;
2128*404b540aSrobert }
2129*404b540aSrobert
2130*404b540aSrobert
2131*404b540aSrobert static unsigned
__mf_watch_or_not(void * ptr,size_t sz,char flag)2132*404b540aSrobert __mf_watch_or_not (void *ptr, size_t sz, char flag)
2133*404b540aSrobert {
2134*404b540aSrobert uintptr_t ptr_high = CLAMPSZ (ptr, sz);
2135*404b540aSrobert uintptr_t ptr_low = (uintptr_t) ptr;
2136*404b540aSrobert unsigned count = 0;
2137*404b540aSrobert
2138*404b540aSrobert TRACE ("%s ptr=%p size=%lu\n",
2139*404b540aSrobert (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
2140*404b540aSrobert
2141*404b540aSrobert switch (__mf_opts.mudflap_mode)
2142*404b540aSrobert {
2143*404b540aSrobert case mode_nop:
2144*404b540aSrobert case mode_populate:
2145*404b540aSrobert case mode_violate:
2146*404b540aSrobert count = 0;
2147*404b540aSrobert break;
2148*404b540aSrobert
2149*404b540aSrobert case mode_check:
2150*404b540aSrobert {
2151*404b540aSrobert __mf_object_t **all_ovr_objs;
2152*404b540aSrobert unsigned obj_count;
2153*404b540aSrobert unsigned n;
2154*404b540aSrobert DECLARE (void *, malloc, size_t c);
2155*404b540aSrobert DECLARE (void, free, void *p);
2156*404b540aSrobert
2157*404b540aSrobert obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0);
2158*404b540aSrobert VERBOSE_TRACE (" %u:", obj_count);
2159*404b540aSrobert
2160*404b540aSrobert all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count));
2161*404b540aSrobert if (all_ovr_objs == NULL) abort ();
2162*404b540aSrobert n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count);
2163*404b540aSrobert assert (n == obj_count);
2164*404b540aSrobert
2165*404b540aSrobert for (n = 0; n < obj_count; n ++)
2166*404b540aSrobert {
2167*404b540aSrobert __mf_object_t *obj = all_ovr_objs[n];
2168*404b540aSrobert
2169*404b540aSrobert VERBOSE_TRACE (" [%p]", (void *) obj);
2170*404b540aSrobert if (obj->watching_p != flag)
2171*404b540aSrobert {
2172*404b540aSrobert obj->watching_p = flag;
2173*404b540aSrobert count ++;
2174*404b540aSrobert
2175*404b540aSrobert /* Remove object from cache, to ensure next access
2176*404b540aSrobert goes through __mf_check(). */
2177*404b540aSrobert if (flag)
2178*404b540aSrobert __mf_uncache_object (obj);
2179*404b540aSrobert }
2180*404b540aSrobert }
2181*404b540aSrobert CALL_REAL (free, all_ovr_objs);
2182*404b540aSrobert }
2183*404b540aSrobert break;
2184*404b540aSrobert }
2185*404b540aSrobert
2186*404b540aSrobert return count;
2187*404b540aSrobert }
2188*404b540aSrobert
2189*404b540aSrobert
2190*404b540aSrobert void
__mf_sigusr1_handler(int num)2191*404b540aSrobert __mf_sigusr1_handler (int num)
2192*404b540aSrobert {
2193*404b540aSrobert __mf_sigusr1_received ++;
2194*404b540aSrobert }
2195*404b540aSrobert
2196*404b540aSrobert /* Install or remove SIGUSR1 handler as necessary.
2197*404b540aSrobert Also, respond to a received pending SIGUSR1. */
2198*404b540aSrobert void
__mf_sigusr1_respond()2199*404b540aSrobert __mf_sigusr1_respond ()
2200*404b540aSrobert {
2201*404b540aSrobert static int handler_installed;
2202*404b540aSrobert
2203*404b540aSrobert #ifdef SIGUSR1
2204*404b540aSrobert /* Manage handler */
2205*404b540aSrobert if (__mf_opts.sigusr1_report && ! handler_installed)
2206*404b540aSrobert {
2207*404b540aSrobert signal (SIGUSR1, __mf_sigusr1_handler);
2208*404b540aSrobert handler_installed = 1;
2209*404b540aSrobert }
2210*404b540aSrobert else if(! __mf_opts.sigusr1_report && handler_installed)
2211*404b540aSrobert {
2212*404b540aSrobert signal (SIGUSR1, SIG_DFL);
2213*404b540aSrobert handler_installed = 0;
2214*404b540aSrobert }
2215*404b540aSrobert #endif
2216*404b540aSrobert
2217*404b540aSrobert /* Manage enqueued signals */
2218*404b540aSrobert if (__mf_sigusr1_received > __mf_sigusr1_handled)
2219*404b540aSrobert {
2220*404b540aSrobert __mf_sigusr1_handled ++;
2221*404b540aSrobert assert (__mf_get_state () == reentrant);
2222*404b540aSrobert __mfu_report ();
2223*404b540aSrobert handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */
2224*404b540aSrobert }
2225*404b540aSrobert }
2226*404b540aSrobert
2227*404b540aSrobert
2228*404b540aSrobert /* XXX: provide an alternative __assert_fail function that cannot
2229*404b540aSrobert fail due to libmudflap infinite recursion. */
2230*404b540aSrobert #ifndef NDEBUG
2231*404b540aSrobert
2232*404b540aSrobert static void
write_itoa(int fd,unsigned n)2233*404b540aSrobert write_itoa (int fd, unsigned n)
2234*404b540aSrobert {
2235*404b540aSrobert enum x { bufsize = sizeof(n)*4 };
2236*404b540aSrobert char buf [bufsize];
2237*404b540aSrobert unsigned i;
2238*404b540aSrobert
2239*404b540aSrobert for (i=0; i<bufsize-1; i++)
2240*404b540aSrobert {
2241*404b540aSrobert unsigned digit = n % 10;
2242*404b540aSrobert buf[bufsize-2-i] = digit + '0';
2243*404b540aSrobert n /= 10;
2244*404b540aSrobert if (n == 0)
2245*404b540aSrobert {
2246*404b540aSrobert char *m = & buf [bufsize-2-i];
2247*404b540aSrobert buf[bufsize-1] = '\0';
2248*404b540aSrobert write (fd, m, strlen(m));
2249*404b540aSrobert break;
2250*404b540aSrobert }
2251*404b540aSrobert }
2252*404b540aSrobert }
2253*404b540aSrobert
2254*404b540aSrobert
2255*404b540aSrobert void
__assert_fail(const char * msg,const char * file,unsigned line,const char * func)2256*404b540aSrobert __assert_fail (const char *msg, const char *file, unsigned line, const char *func)
2257*404b540aSrobert {
2258*404b540aSrobert #define write2(string) write (2, (string), strlen ((string)));
2259*404b540aSrobert write2("mf");
2260*404b540aSrobert #ifdef LIBMUDFLAPTH
2261*404b540aSrobert write2("(");
2262*404b540aSrobert write_itoa (2, (unsigned) pthread_self ());
2263*404b540aSrobert write2(")");
2264*404b540aSrobert #endif
2265*404b540aSrobert write2(": assertion failure: `");
2266*404b540aSrobert write (2, msg, strlen (msg));
2267*404b540aSrobert write2("' in ");
2268*404b540aSrobert write (2, func, strlen (func));
2269*404b540aSrobert write2(" at ");
2270*404b540aSrobert write (2, file, strlen (file));
2271*404b540aSrobert write2(":");
2272*404b540aSrobert write_itoa (2, line);
2273*404b540aSrobert write2("\n");
2274*404b540aSrobert #undef write2
2275*404b540aSrobert abort ();
2276*404b540aSrobert }
2277*404b540aSrobert
2278*404b540aSrobert
2279*404b540aSrobert #endif
2280*404b540aSrobert
2281*404b540aSrobert
2282*404b540aSrobert
2283*404b540aSrobert /* Adapted splay tree code, originally from libiberty. It has been
2284*404b540aSrobert specialized for libmudflap as requested by RMS. */
2285*404b540aSrobert
2286*404b540aSrobert static void
mfsplay_tree_free(void * p)2287*404b540aSrobert mfsplay_tree_free (void *p)
2288*404b540aSrobert {
2289*404b540aSrobert DECLARE (void, free, void *p);
2290*404b540aSrobert CALL_REAL (free, p);
2291*404b540aSrobert }
2292*404b540aSrobert
2293*404b540aSrobert static void *
mfsplay_tree_xmalloc(size_t s)2294*404b540aSrobert mfsplay_tree_xmalloc (size_t s)
2295*404b540aSrobert {
2296*404b540aSrobert DECLARE (void *, malloc, size_t s);
2297*404b540aSrobert return CALL_REAL (malloc, s);
2298*404b540aSrobert }
2299*404b540aSrobert
2300*404b540aSrobert
2301*404b540aSrobert static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key);
2302*404b540aSrobert static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree,
2303*404b540aSrobert mfsplay_tree_key,
2304*404b540aSrobert mfsplay_tree_node *,
2305*404b540aSrobert mfsplay_tree_node *,
2306*404b540aSrobert mfsplay_tree_node *);
2307*404b540aSrobert
2308*404b540aSrobert
2309*404b540aSrobert /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent
2310*404b540aSrobert and grandparent, respectively, of NODE. */
2311*404b540aSrobert
2312*404b540aSrobert static mfsplay_tree_node
mfsplay_tree_splay_helper(mfsplay_tree sp,mfsplay_tree_key key,mfsplay_tree_node * node,mfsplay_tree_node * parent,mfsplay_tree_node * grandparent)2313*404b540aSrobert mfsplay_tree_splay_helper (mfsplay_tree sp,
2314*404b540aSrobert mfsplay_tree_key key,
2315*404b540aSrobert mfsplay_tree_node * node,
2316*404b540aSrobert mfsplay_tree_node * parent,
2317*404b540aSrobert mfsplay_tree_node * grandparent)
2318*404b540aSrobert {
2319*404b540aSrobert mfsplay_tree_node *next;
2320*404b540aSrobert mfsplay_tree_node n;
2321*404b540aSrobert int comparison;
2322*404b540aSrobert
2323*404b540aSrobert n = *node;
2324*404b540aSrobert
2325*404b540aSrobert if (!n)
2326*404b540aSrobert return *parent;
2327*404b540aSrobert
2328*404b540aSrobert comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0));
2329*404b540aSrobert
2330*404b540aSrobert if (comparison == 0)
2331*404b540aSrobert /* We've found the target. */
2332*404b540aSrobert next = 0;
2333*404b540aSrobert else if (comparison < 0)
2334*404b540aSrobert /* The target is to the left. */
2335*404b540aSrobert next = &n->left;
2336*404b540aSrobert else
2337*404b540aSrobert /* The target is to the right. */
2338*404b540aSrobert next = &n->right;
2339*404b540aSrobert
2340*404b540aSrobert if (next)
2341*404b540aSrobert {
2342*404b540aSrobert /* Check whether our recursion depth is too high. Abort this search,
2343*404b540aSrobert and signal that a rebalance is required to continue. */
2344*404b540aSrobert if (sp->depth > sp->max_depth)
2345*404b540aSrobert {
2346*404b540aSrobert sp->rebalance_p = 1;
2347*404b540aSrobert return n;
2348*404b540aSrobert }
2349*404b540aSrobert
2350*404b540aSrobert /* Continue down the tree. */
2351*404b540aSrobert sp->depth ++;
2352*404b540aSrobert n = mfsplay_tree_splay_helper (sp, key, next, node, parent);
2353*404b540aSrobert sp->depth --;
2354*404b540aSrobert
2355*404b540aSrobert /* The recursive call will change the place to which NODE
2356*404b540aSrobert points. */
2357*404b540aSrobert if (*node != n || sp->rebalance_p)
2358*404b540aSrobert return n;
2359*404b540aSrobert }
2360*404b540aSrobert
2361*404b540aSrobert if (!parent)
2362*404b540aSrobert /* NODE is the root. We are done. */
2363*404b540aSrobert return n;
2364*404b540aSrobert
2365*404b540aSrobert /* First, handle the case where there is no grandparent (i.e.,
2366*404b540aSrobert *PARENT is the root of the tree.) */
2367*404b540aSrobert if (!grandparent)
2368*404b540aSrobert {
2369*404b540aSrobert if (n == (*parent)->left)
2370*404b540aSrobert {
2371*404b540aSrobert *node = n->right;
2372*404b540aSrobert n->right = *parent;
2373*404b540aSrobert }
2374*404b540aSrobert else
2375*404b540aSrobert {
2376*404b540aSrobert *node = n->left;
2377*404b540aSrobert n->left = *parent;
2378*404b540aSrobert }
2379*404b540aSrobert *parent = n;
2380*404b540aSrobert return n;
2381*404b540aSrobert }
2382*404b540aSrobert
2383*404b540aSrobert /* Next handle the cases where both N and *PARENT are left children,
2384*404b540aSrobert or where both are right children. */
2385*404b540aSrobert if (n == (*parent)->left && *parent == (*grandparent)->left)
2386*404b540aSrobert {
2387*404b540aSrobert mfsplay_tree_node p = *parent;
2388*404b540aSrobert
2389*404b540aSrobert (*grandparent)->left = p->right;
2390*404b540aSrobert p->right = *grandparent;
2391*404b540aSrobert p->left = n->right;
2392*404b540aSrobert n->right = p;
2393*404b540aSrobert *grandparent = n;
2394*404b540aSrobert return n;
2395*404b540aSrobert }
2396*404b540aSrobert else if (n == (*parent)->right && *parent == (*grandparent)->right)
2397*404b540aSrobert {
2398*404b540aSrobert mfsplay_tree_node p = *parent;
2399*404b540aSrobert
2400*404b540aSrobert (*grandparent)->right = p->left;
2401*404b540aSrobert p->left = *grandparent;
2402*404b540aSrobert p->right = n->left;
2403*404b540aSrobert n->left = p;
2404*404b540aSrobert *grandparent = n;
2405*404b540aSrobert return n;
2406*404b540aSrobert }
2407*404b540aSrobert
2408*404b540aSrobert /* Finally, deal with the case where N is a left child, but *PARENT
2409*404b540aSrobert is a right child, or vice versa. */
2410*404b540aSrobert if (n == (*parent)->left)
2411*404b540aSrobert {
2412*404b540aSrobert (*parent)->left = n->right;
2413*404b540aSrobert n->right = *parent;
2414*404b540aSrobert (*grandparent)->right = n->left;
2415*404b540aSrobert n->left = *grandparent;
2416*404b540aSrobert *grandparent = n;
2417*404b540aSrobert return n;
2418*404b540aSrobert }
2419*404b540aSrobert else
2420*404b540aSrobert {
2421*404b540aSrobert (*parent)->right = n->left;
2422*404b540aSrobert n->left = *parent;
2423*404b540aSrobert (*grandparent)->left = n->right;
2424*404b540aSrobert n->right = *grandparent;
2425*404b540aSrobert *grandparent = n;
2426*404b540aSrobert return n;
2427*404b540aSrobert }
2428*404b540aSrobert }
2429*404b540aSrobert
2430*404b540aSrobert
2431*404b540aSrobert
2432*404b540aSrobert static int
mfsplay_tree_rebalance_helper1(mfsplay_tree_node n,void * array_ptr)2433*404b540aSrobert mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr)
2434*404b540aSrobert {
2435*404b540aSrobert mfsplay_tree_node **p = array_ptr;
2436*404b540aSrobert *(*p) = n;
2437*404b540aSrobert (*p)++;
2438*404b540aSrobert return 0;
2439*404b540aSrobert }
2440*404b540aSrobert
2441*404b540aSrobert
2442*404b540aSrobert static mfsplay_tree_node
mfsplay_tree_rebalance_helper2(mfsplay_tree_node * array,unsigned low,unsigned high)2443*404b540aSrobert mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low,
2444*404b540aSrobert unsigned high)
2445*404b540aSrobert {
2446*404b540aSrobert unsigned middle = low + (high - low) / 2;
2447*404b540aSrobert mfsplay_tree_node n = array[middle];
2448*404b540aSrobert
2449*404b540aSrobert /* Note that since we're producing a balanced binary tree, it is not a problem
2450*404b540aSrobert that this function is recursive. */
2451*404b540aSrobert if (low + 1 <= middle)
2452*404b540aSrobert n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1);
2453*404b540aSrobert else
2454*404b540aSrobert n->left = NULL;
2455*404b540aSrobert
2456*404b540aSrobert if (middle + 1 <= high)
2457*404b540aSrobert n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high);
2458*404b540aSrobert else
2459*404b540aSrobert n->right = NULL;
2460*404b540aSrobert
2461*404b540aSrobert return n;
2462*404b540aSrobert }
2463*404b540aSrobert
2464*404b540aSrobert
2465*404b540aSrobert /* Rebalance the entire tree. Do this by copying all the node
2466*404b540aSrobert pointers into an array, then cleverly re-linking them. */
2467*404b540aSrobert static void
mfsplay_tree_rebalance(mfsplay_tree sp)2468*404b540aSrobert mfsplay_tree_rebalance (mfsplay_tree sp)
2469*404b540aSrobert {
2470*404b540aSrobert mfsplay_tree_node *all_nodes, *all_nodes_1;
2471*404b540aSrobert
2472*404b540aSrobert if (sp->num_keys <= 2)
2473*404b540aSrobert return;
2474*404b540aSrobert
2475*404b540aSrobert all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys);
2476*404b540aSrobert
2477*404b540aSrobert /* Traverse all nodes to copy their addresses into this array. */
2478*404b540aSrobert all_nodes_1 = all_nodes;
2479*404b540aSrobert mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1,
2480*404b540aSrobert (void *) &all_nodes_1);
2481*404b540aSrobert
2482*404b540aSrobert /* Relink all the nodes. */
2483*404b540aSrobert sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1);
2484*404b540aSrobert
2485*404b540aSrobert mfsplay_tree_free (all_nodes);
2486*404b540aSrobert }
2487*404b540aSrobert
2488*404b540aSrobert
2489*404b540aSrobert /* Splay SP around KEY. */
2490*404b540aSrobert static void
mfsplay_tree_splay(mfsplay_tree sp,mfsplay_tree_key key)2491*404b540aSrobert mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
2492*404b540aSrobert {
2493*404b540aSrobert if (sp->root == 0)
2494*404b540aSrobert return;
2495*404b540aSrobert
2496*404b540aSrobert /* If we just splayed the tree with the same key, do nothing. */
2497*404b540aSrobert if (sp->last_splayed_key_p &&
2498*404b540aSrobert (sp->last_splayed_key == key))
2499*404b540aSrobert return;
2500*404b540aSrobert
2501*404b540aSrobert /* Compute a maximum recursion depth for a splay tree with NUM nodes.
2502*404b540aSrobert The idea is to limit excessive stack usage if we're facing
2503*404b540aSrobert degenerate access patterns. Unfortunately such patterns can occur
2504*404b540aSrobert e.g. during static initialization, where many static objects might
2505*404b540aSrobert be registered in increasing address sequence, or during a case where
2506*404b540aSrobert large tree-like heap data structures are allocated quickly.
2507*404b540aSrobert
2508*404b540aSrobert On x86, this corresponds to roughly 200K of stack usage.
2509*404b540aSrobert XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
2510*404b540aSrobert sp->max_depth = 2500;
2511*404b540aSrobert sp->rebalance_p = sp->depth = 0;
2512*404b540aSrobert
2513*404b540aSrobert mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2514*404b540aSrobert if (sp->rebalance_p)
2515*404b540aSrobert {
2516*404b540aSrobert mfsplay_tree_rebalance (sp);
2517*404b540aSrobert
2518*404b540aSrobert sp->rebalance_p = sp->depth = 0;
2519*404b540aSrobert mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL);
2520*404b540aSrobert
2521*404b540aSrobert if (sp->rebalance_p)
2522*404b540aSrobert abort ();
2523*404b540aSrobert }
2524*404b540aSrobert
2525*404b540aSrobert
2526*404b540aSrobert /* Cache this splay key. */
2527*404b540aSrobert sp->last_splayed_key = key;
2528*404b540aSrobert sp->last_splayed_key_p = 1;
2529*404b540aSrobert }
2530*404b540aSrobert
2531*404b540aSrobert
2532*404b540aSrobert
2533*404b540aSrobert /* Allocate a new splay tree. */
2534*404b540aSrobert static mfsplay_tree
mfsplay_tree_new()2535*404b540aSrobert mfsplay_tree_new ()
2536*404b540aSrobert {
2537*404b540aSrobert mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s));
2538*404b540aSrobert sp->root = NULL;
2539*404b540aSrobert sp->last_splayed_key_p = 0;
2540*404b540aSrobert sp->num_keys = 0;
2541*404b540aSrobert
2542*404b540aSrobert return sp;
2543*404b540aSrobert }
2544*404b540aSrobert
2545*404b540aSrobert
2546*404b540aSrobert
2547*404b540aSrobert /* Insert a new node (associating KEY with DATA) into SP. If a
2548*404b540aSrobert previous node with the indicated KEY exists, its data is replaced
2549*404b540aSrobert with the new value. Returns the new node. */
2550*404b540aSrobert static mfsplay_tree_node
mfsplay_tree_insert(mfsplay_tree sp,mfsplay_tree_key key,mfsplay_tree_value value)2551*404b540aSrobert mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value)
2552*404b540aSrobert {
2553*404b540aSrobert int comparison = 0;
2554*404b540aSrobert
2555*404b540aSrobert mfsplay_tree_splay (sp, key);
2556*404b540aSrobert
2557*404b540aSrobert if (sp->root)
2558*404b540aSrobert comparison = ((sp->root->key > key) ? 1 :
2559*404b540aSrobert ((sp->root->key < key) ? -1 : 0));
2560*404b540aSrobert
2561*404b540aSrobert if (sp->root && comparison == 0)
2562*404b540aSrobert {
2563*404b540aSrobert /* If the root of the tree already has the indicated KEY, just
2564*404b540aSrobert replace the value with VALUE. */
2565*404b540aSrobert sp->root->value = value;
2566*404b540aSrobert }
2567*404b540aSrobert else
2568*404b540aSrobert {
2569*404b540aSrobert /* Create a new node, and insert it at the root. */
2570*404b540aSrobert mfsplay_tree_node node;
2571*404b540aSrobert
2572*404b540aSrobert node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s));
2573*404b540aSrobert node->key = key;
2574*404b540aSrobert node->value = value;
2575*404b540aSrobert sp->num_keys++;
2576*404b540aSrobert if (!sp->root)
2577*404b540aSrobert node->left = node->right = 0;
2578*404b540aSrobert else if (comparison < 0)
2579*404b540aSrobert {
2580*404b540aSrobert node->left = sp->root;
2581*404b540aSrobert node->right = node->left->right;
2582*404b540aSrobert node->left->right = 0;
2583*404b540aSrobert }
2584*404b540aSrobert else
2585*404b540aSrobert {
2586*404b540aSrobert node->right = sp->root;
2587*404b540aSrobert node->left = node->right->left;
2588*404b540aSrobert node->right->left = 0;
2589*404b540aSrobert }
2590*404b540aSrobert
2591*404b540aSrobert sp->root = node;
2592*404b540aSrobert sp->last_splayed_key_p = 0;
2593*404b540aSrobert }
2594*404b540aSrobert
2595*404b540aSrobert return sp->root;
2596*404b540aSrobert }
2597*404b540aSrobert
2598*404b540aSrobert /* Remove KEY from SP. It is not an error if it did not exist. */
2599*404b540aSrobert
2600*404b540aSrobert static void
mfsplay_tree_remove(mfsplay_tree sp,mfsplay_tree_key key)2601*404b540aSrobert mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
2602*404b540aSrobert {
2603*404b540aSrobert mfsplay_tree_splay (sp, key);
2604*404b540aSrobert sp->last_splayed_key_p = 0;
2605*404b540aSrobert if (sp->root && (sp->root->key == key))
2606*404b540aSrobert {
2607*404b540aSrobert mfsplay_tree_node left, right;
2608*404b540aSrobert left = sp->root->left;
2609*404b540aSrobert right = sp->root->right;
2610*404b540aSrobert /* Delete the root node itself. */
2611*404b540aSrobert mfsplay_tree_free (sp->root);
2612*404b540aSrobert sp->num_keys--;
2613*404b540aSrobert /* One of the children is now the root. Doesn't matter much
2614*404b540aSrobert which, so long as we preserve the properties of the tree. */
2615*404b540aSrobert if (left)
2616*404b540aSrobert {
2617*404b540aSrobert sp->root = left;
2618*404b540aSrobert /* If there was a right child as well, hang it off the
2619*404b540aSrobert right-most leaf of the left child. */
2620*404b540aSrobert if (right)
2621*404b540aSrobert {
2622*404b540aSrobert while (left->right)
2623*404b540aSrobert left = left->right;
2624*404b540aSrobert left->right = right;
2625*404b540aSrobert }
2626*404b540aSrobert }
2627*404b540aSrobert else
2628*404b540aSrobert sp->root = right;
2629*404b540aSrobert }
2630*404b540aSrobert }
2631*404b540aSrobert
2632*404b540aSrobert /* Lookup KEY in SP, returning VALUE if present, and NULL
2633*404b540aSrobert otherwise. */
2634*404b540aSrobert
2635*404b540aSrobert static mfsplay_tree_node
mfsplay_tree_lookup(mfsplay_tree sp,mfsplay_tree_key key)2636*404b540aSrobert mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key)
2637*404b540aSrobert {
2638*404b540aSrobert mfsplay_tree_splay (sp, key);
2639*404b540aSrobert if (sp->root && (sp->root->key == key))
2640*404b540aSrobert return sp->root;
2641*404b540aSrobert else
2642*404b540aSrobert return 0;
2643*404b540aSrobert }
2644*404b540aSrobert
2645*404b540aSrobert
2646*404b540aSrobert /* Return the immediate predecessor KEY, or NULL if there is no
2647*404b540aSrobert predecessor. KEY need not be present in the tree. */
2648*404b540aSrobert
2649*404b540aSrobert static mfsplay_tree_node
mfsplay_tree_predecessor(mfsplay_tree sp,mfsplay_tree_key key)2650*404b540aSrobert mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key)
2651*404b540aSrobert {
2652*404b540aSrobert int comparison;
2653*404b540aSrobert mfsplay_tree_node node;
2654*404b540aSrobert /* If the tree is empty, there is certainly no predecessor. */
2655*404b540aSrobert if (!sp->root)
2656*404b540aSrobert return NULL;
2657*404b540aSrobert /* Splay the tree around KEY. That will leave either the KEY
2658*404b540aSrobert itself, its predecessor, or its successor at the root. */
2659*404b540aSrobert mfsplay_tree_splay (sp, key);
2660*404b540aSrobert comparison = ((sp->root->key > key) ? 1 :
2661*404b540aSrobert ((sp->root->key < key) ? -1 : 0));
2662*404b540aSrobert
2663*404b540aSrobert /* If the predecessor is at the root, just return it. */
2664*404b540aSrobert if (comparison < 0)
2665*404b540aSrobert return sp->root;
2666*404b540aSrobert /* Otherwise, find the rightmost element of the left subtree. */
2667*404b540aSrobert node = sp->root->left;
2668*404b540aSrobert if (node)
2669*404b540aSrobert while (node->right)
2670*404b540aSrobert node = node->right;
2671*404b540aSrobert return node;
2672*404b540aSrobert }
2673*404b540aSrobert
2674*404b540aSrobert /* Return the immediate successor KEY, or NULL if there is no
2675*404b540aSrobert successor. KEY need not be present in the tree. */
2676*404b540aSrobert
2677*404b540aSrobert static mfsplay_tree_node
mfsplay_tree_successor(mfsplay_tree sp,mfsplay_tree_key key)2678*404b540aSrobert mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
2679*404b540aSrobert {
2680*404b540aSrobert int comparison;
2681*404b540aSrobert mfsplay_tree_node node;
2682*404b540aSrobert /* If the tree is empty, there is certainly no successor. */
2683*404b540aSrobert if (!sp->root)
2684*404b540aSrobert return NULL;
2685*404b540aSrobert /* Splay the tree around KEY. That will leave either the KEY
2686*404b540aSrobert itself, its predecessor, or its successor at the root. */
2687*404b540aSrobert mfsplay_tree_splay (sp, key);
2688*404b540aSrobert comparison = ((sp->root->key > key) ? 1 :
2689*404b540aSrobert ((sp->root->key < key) ? -1 : 0));
2690*404b540aSrobert /* If the successor is at the root, just return it. */
2691*404b540aSrobert if (comparison > 0)
2692*404b540aSrobert return sp->root;
2693*404b540aSrobert /* Otherwise, find the leftmost element of the right subtree. */
2694*404b540aSrobert node = sp->root->right;
2695*404b540aSrobert if (node)
2696*404b540aSrobert while (node->left)
2697*404b540aSrobert node = node->left;
2698*404b540aSrobert return node;
2699*404b540aSrobert }
2700*404b540aSrobert
2701*404b540aSrobert /* Call FN, passing it the DATA, for every node in SP, following an
2702*404b540aSrobert in-order traversal. If FN every returns a non-zero value, the
2703*404b540aSrobert iteration ceases immediately, and the value is returned.
2704*404b540aSrobert Otherwise, this function returns 0.
2705*404b540aSrobert
2706*404b540aSrobert This function simulates recursion using dynamically allocated
2707*404b540aSrobert arrays, since it may be called from mfsplay_tree_rebalance(), which
2708*404b540aSrobert in turn means that the tree is already uncomfortably deep for stack
2709*404b540aSrobert space limits. */
2710*404b540aSrobert static int
mfsplay_tree_foreach(mfsplay_tree st,mfsplay_tree_foreach_fn fn,void * data)2711*404b540aSrobert mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data)
2712*404b540aSrobert {
2713*404b540aSrobert mfsplay_tree_node *stack1;
2714*404b540aSrobert char *stack2;
2715*404b540aSrobert unsigned sp;
2716*404b540aSrobert int val = 0;
2717*404b540aSrobert enum s { s_left, s_here, s_right, s_up };
2718*404b540aSrobert
2719*404b540aSrobert if (st->root == NULL) /* => num_keys == 0 */
2720*404b540aSrobert return 0;
2721*404b540aSrobert
2722*404b540aSrobert stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys);
2723*404b540aSrobert stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys);
2724*404b540aSrobert
2725*404b540aSrobert sp = 0;
2726*404b540aSrobert stack1 [sp] = st->root;
2727*404b540aSrobert stack2 [sp] = s_left;
2728*404b540aSrobert
2729*404b540aSrobert while (1)
2730*404b540aSrobert {
2731*404b540aSrobert mfsplay_tree_node n;
2732*404b540aSrobert enum s s;
2733*404b540aSrobert
2734*404b540aSrobert n = stack1 [sp];
2735*404b540aSrobert s = stack2 [sp];
2736*404b540aSrobert
2737*404b540aSrobert /* Handle each of the four possible states separately. */
2738*404b540aSrobert
2739*404b540aSrobert /* 1: We're here to traverse the left subtree (if any). */
2740*404b540aSrobert if (s == s_left)
2741*404b540aSrobert {
2742*404b540aSrobert stack2 [sp] = s_here;
2743*404b540aSrobert if (n->left != NULL)
2744*404b540aSrobert {
2745*404b540aSrobert sp ++;
2746*404b540aSrobert stack1 [sp] = n->left;
2747*404b540aSrobert stack2 [sp] = s_left;
2748*404b540aSrobert }
2749*404b540aSrobert }
2750*404b540aSrobert
2751*404b540aSrobert /* 2: We're here to traverse this node. */
2752*404b540aSrobert else if (s == s_here)
2753*404b540aSrobert {
2754*404b540aSrobert stack2 [sp] = s_right;
2755*404b540aSrobert val = (*fn) (n, data);
2756*404b540aSrobert if (val) break;
2757*404b540aSrobert }
2758*404b540aSrobert
2759*404b540aSrobert /* 3: We're here to traverse the right subtree (if any). */
2760*404b540aSrobert else if (s == s_right)
2761*404b540aSrobert {
2762*404b540aSrobert stack2 [sp] = s_up;
2763*404b540aSrobert if (n->right != NULL)
2764*404b540aSrobert {
2765*404b540aSrobert sp ++;
2766*404b540aSrobert stack1 [sp] = n->right;
2767*404b540aSrobert stack2 [sp] = s_left;
2768*404b540aSrobert }
2769*404b540aSrobert }
2770*404b540aSrobert
2771*404b540aSrobert /* 4: We're here after both subtrees (if any) have been traversed. */
2772*404b540aSrobert else if (s == s_up)
2773*404b540aSrobert {
2774*404b540aSrobert /* Pop the stack. */
2775*404b540aSrobert if (sp == 0) break; /* Popping off the root note: we're finished! */
2776*404b540aSrobert sp --;
2777*404b540aSrobert }
2778*404b540aSrobert
2779*404b540aSrobert else
2780*404b540aSrobert abort ();
2781*404b540aSrobert }
2782*404b540aSrobert
2783*404b540aSrobert mfsplay_tree_free (stack1);
2784*404b540aSrobert mfsplay_tree_free (stack2);
2785*404b540aSrobert return val;
2786*404b540aSrobert }
2787