1 //===-- sanitizer_coverage_fuchsia.cc -------------------------------------===// 2 // 3 // This file is distributed under the University of Illinois Open Source 4 // License. See LICENSE.TXT for details. 5 // 6 //===----------------------------------------------------------------------===// 7 // 8 // Sanitizer Coverage Controller for Trace PC Guard, Fuchsia-specific version. 9 // 10 // This Fuchsia-specific implementation uses the same basic scheme and the 11 // same simple '.sancov' file format as the generic implementation. The 12 // difference is that we just produce a single blob of output for the whole 13 // program, not a separate one per DSO. We do not sort the PC table and do 14 // not prune the zeros, so the resulting file is always as large as it 15 // would be to report 100% coverage. Implicit tracing information about 16 // the address ranges of DSOs allows offline tools to split the one big 17 // blob into separate files that the 'sancov' tool can understand. 18 // 19 // Unlike the traditional implementation that uses an atexit hook to write 20 // out data files at the end, the results on Fuchsia do not go into a file 21 // per se. The 'coverage_dir' option is ignored. Instead, they are stored 22 // directly into a shared memory object (a Zircon VMO). At exit, that VMO 23 // is handed over to a system service that's responsible for getting the 24 // data out to somewhere that it can be fed into the sancov tool (where and 25 // how is not our problem). 26 27 #include "sanitizer_platform.h" 28 #if SANITIZER_FUCHSIA 29 #include "sanitizer_atomic.h" 30 #include "sanitizer_common.h" 31 #include "sanitizer_internal_defs.h" 32 33 #include <zircon/process.h> 34 #include <zircon/sanitizer.h> 35 #include <zircon/syscalls.h> 36 37 using namespace __sanitizer; // NOLINT 38 39 namespace __sancov { 40 namespace { 41 42 // TODO(mcgrathr): Move the constant into a header shared with other impls. 43 constexpr u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL; 44 static_assert(SANITIZER_WORDSIZE == 64, "Fuchsia is always LP64"); 45 46 constexpr const char kSancovSinkName[] = "sancov"; 47 48 // Collects trace-pc guard coverage. 49 // This class relies on zero-initialization. 50 class TracePcGuardController final { 51 public: 52 // For each PC location being tracked, there is a u32 reserved in global 53 // data called the "guard". At startup, we assign each guard slot a 54 // unique index into the big results array. Later during runtime, the 55 // first call to TracePcGuard (below) will store the corresponding PC at 56 // that index in the array. (Each later call with the same guard slot is 57 // presumed to be from the same PC.) Then it clears the guard slot back 58 // to zero, which tells the compiler not to bother calling in again. At 59 // the end of the run, we have a big array where each element is either 60 // zero or is a tracked PC location that was hit in the trace. 61 62 // This is called from global constructors. Each translation unit has a 63 // contiguous array of guard slots, and a constructor that calls here 64 // with the bounds of its array. Those constructors are allowed to call 65 // here more than once for the same array. Usually all of these 66 // constructors run in the initial thread, but it's possible that a 67 // dlopen call on a secondary thread will run constructors that get here. 68 void InitTracePcGuard(u32 *start, u32 *end) { 69 if (end > start && *start == 0 && common_flags()->coverage) { 70 // Complete the setup before filling in any guards with indices. 71 // This avoids the possibility of code called from Setup reentering 72 // TracePcGuard. 73 u32 idx = Setup(end - start); 74 for (u32 *p = start; p < end; ++p) { 75 *p = idx++; 76 } 77 } 78 } 79 80 void TracePcGuard(u32 *guard, uptr pc) { 81 atomic_uint32_t *guard_ptr = reinterpret_cast<atomic_uint32_t *>(guard); 82 u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed); 83 if (idx > 0) array_[idx] = pc; 84 } 85 86 void Dump() { 87 BlockingMutexLock locked(&setup_lock_); 88 if (array_) { 89 CHECK_NE(vmo_, ZX_HANDLE_INVALID); 90 91 // Publish the VMO to the system, where it can be collected and 92 // analyzed after this process exits. This always consumes the VMO 93 // handle. Any failure is just logged and not indicated to us. 94 __sanitizer_publish_data(kSancovSinkName, vmo_); 95 vmo_ = ZX_HANDLE_INVALID; 96 97 // This will route to __sanitizer_log_write, which will ensure that 98 // information about shared libraries is written out. This message 99 // uses the `dumpfile` symbolizer markup element to highlight the 100 // dump. See the explanation for this in: 101 // https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md 102 Printf("SanitizerCoverage: {{{dumpfile:%s:%s}}} with up to %u PCs\n", 103 kSancovSinkName, vmo_name_, next_index_ - 1); 104 } 105 } 106 107 private: 108 // We map in the largest possible view into the VMO: one word 109 // for every possible 32-bit index value. This avoids the need 110 // to change the mapping when increasing the size of the VMO. 111 // We can always spare the 32G of address space. 112 static constexpr size_t MappingSize = sizeof(uptr) << 32; 113 114 BlockingMutex setup_lock_ = BlockingMutex(LINKER_INITIALIZED); 115 uptr *array_ = nullptr; 116 u32 next_index_ = 0; 117 zx_handle_t vmo_ = {}; 118 char vmo_name_[ZX_MAX_NAME_LEN] = {}; 119 120 size_t DataSize() const { return next_index_ * sizeof(uintptr_t); } 121 122 u32 Setup(u32 num_guards) { 123 BlockingMutexLock locked(&setup_lock_); 124 DCHECK(common_flags()->coverage); 125 126 if (next_index_ == 0) { 127 CHECK_EQ(vmo_, ZX_HANDLE_INVALID); 128 CHECK_EQ(array_, nullptr); 129 130 // The first sample goes at [1] to reserve [0] for the magic number. 131 next_index_ = 1 + num_guards; 132 133 zx_status_t status = _zx_vmo_create(DataSize(), 0, &vmo_); 134 CHECK_EQ(status, ZX_OK); 135 136 // Give the VMO a name including our process KOID so it's easy to spot. 137 internal_snprintf(vmo_name_, sizeof(vmo_name_), "%s.%zu", kSancovSinkName, 138 internal_getpid()); 139 _zx_object_set_property(vmo_, ZX_PROP_NAME, vmo_name_, 140 internal_strlen(vmo_name_)); 141 142 // Map the largest possible view we might need into the VMO. Later 143 // we might need to increase the VMO's size before we can use larger 144 // indices, but we'll never move the mapping address so we don't have 145 // any multi-thread synchronization issues with that. 146 uintptr_t mapping; 147 status = 148 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 149 0, vmo_, 0, MappingSize, &mapping); 150 CHECK_EQ(status, ZX_OK); 151 152 // Hereafter other threads are free to start storing into 153 // elements [1, next_index_) of the big array. 154 array_ = reinterpret_cast<uptr *>(mapping); 155 156 // Store the magic number. 157 // Hereafter, the VMO serves as the contents of the '.sancov' file. 158 array_[0] = Magic64; 159 160 return 1; 161 } else { 162 // The VMO is already mapped in, but it's not big enough to use the 163 // new indices. So increase the size to cover the new maximum index. 164 165 CHECK_NE(vmo_, ZX_HANDLE_INVALID); 166 CHECK_NE(array_, nullptr); 167 168 uint32_t first_index = next_index_; 169 next_index_ += num_guards; 170 171 zx_status_t status = _zx_vmo_set_size(vmo_, DataSize()); 172 CHECK_EQ(status, ZX_OK); 173 174 return first_index; 175 } 176 } 177 }; 178 179 static TracePcGuardController pc_guard_controller; 180 181 } // namespace 182 } // namespace __sancov 183 184 namespace __sanitizer { 185 void InitializeCoverage(bool enabled, const char *dir) { 186 CHECK_EQ(enabled, common_flags()->coverage); 187 CHECK_EQ(dir, common_flags()->coverage_dir); 188 189 static bool coverage_enabled = false; 190 if (!coverage_enabled) { 191 coverage_enabled = enabled; 192 Atexit(__sanitizer_cov_dump); 193 AddDieCallback(__sanitizer_cov_dump); 194 } 195 } 196 } // namespace __sanitizer 197 198 extern "C" { 199 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT 200 const uptr *pcs, uptr len) { 201 UNIMPLEMENTED(); 202 } 203 204 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *guard) { 205 if (!*guard) return; 206 __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1); 207 } 208 209 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, 210 u32 *start, u32 *end) { 211 if (start == end || *start) return; 212 __sancov::pc_guard_controller.InitTracePcGuard(start, end); 213 } 214 215 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() { 216 __sancov::pc_guard_controller.Dump(); 217 } 218 SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { 219 __sanitizer_dump_trace_pc_guard_coverage(); 220 } 221 // Default empty implementations (weak). Users should redefine them. 222 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {} 223 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {} 224 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {} 225 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {} 226 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {} 227 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {} 228 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {} 229 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {} 230 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {} 231 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {} 232 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {} 233 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {} 234 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {} 235 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {} 236 } // extern "C" 237 238 #endif // !SANITIZER_FUCHSIA 239