1#!/usr/bin/env bash 2# We don't want to tell kernel to include %e or %E since these 3# can include whitespaces or other funny characters, and working 4# with those on the cmdline would be a nightmare. Use procfs for 5# the remaining pieces we want to gather: 6# |$rootdir/scripts/core-collector.sh %P %s %t %c $output_dir 7 8core_meta() { 9 jq . <<- CORE 10 { 11 "$exe_comm": { 12 "ts": "$core_time", 13 "size": "$core_size bytes", 14 "PID": $core_pid, 15 "signal": "$core_sig ($core_sig_name)", 16 "path": "$exe_path", 17 "statm": "$statm" 18 } 19 } 20 CORE 21} 22 23bt() { hash gdb && gdb -batch -ex "thread apply all bt full" "$1" "$2" 2>&1; } 24 25stderr() { 26 exec 2> "$core.stderr.txt" 27 set -x 28} 29 30args+=(core_pid) 31args+=(core_sig) 32args+=(core_ts) 33args+=(rlimit) 34args+=(output_dir) 35 36read -r "${args[@]}" <<< "$*" 37 38exe_path=$(readlink -f "/proc/$core_pid/exe") 39exe_comm=$(< "/proc/$core_pid/comm") 40statm=$(< "/proc/$core_pid/statm") 41core_time=$(date -d@"$core_ts") 42core_sig_name=$(kill -l "$core_sig") 43 44core=$output_dir/${exe_path##*/}_$core_pid.core 45stderr 46 47# RLIMIT_CORE is not enforced when core is piped to us. To make 48# sure we won't attempt to overload underlying storage, copy 49# only the reasonable amount of bytes (systemd defaults to 2G 50# so let's follow that). But first, check limits of terminating 51# process to see if we need to make any adjustments. 52max_core=$((1024 * 1024 * 1024 * 2)) 53 54if ((rlimit == 0xffffffffffffffff || rlimit > max_core)); then 55 rlimit=$max_core 56fi 57 58# Clear path for lz 59rm -f "$core"{,.{bin,bt,gz,json}} 60 61# Slurp the core 62head -c "$rlimit" <&0 > "$core" 63core_size=$(wc -c < "$core") 64 65# Compress it 66gzip -c "$core" > "$core.gz" 67 68# Save the binary 69cp "$exe_path" "$core.bin" 70 71# Save the backtrace 72bt "$exe_path" "$core" > "$core.bt.txt" 73 74# Save the metadata of the core 75core_meta > "$core.json" 76 77# Nuke the original core 78rm "$core" 79