Zbieranie danych w celach diagnostycznych
Możemy wyróżnić 3 rodzaje kontenerów w tym przykładzie:
- Agent Zbiera dane i wystawia web endpoint, aby wygodnie odczytać te dane
- Magazyn danych w naszym przykładzie prometheus. Zbiera dane i zarządza retencją
- Odbiorcy danych Wszystkie aplikacje, które konsumują nasze dane diagnostyczne
Przedstawiam kilka rodzajów aplikacji agentów:
- node-exporter Zbiera dane o fizycznym hoście
- smartmon Zbiera dane o dyskach S.M.A.R.T. oraz przekazuje do node-exporter
- cadvisor Zbiera dane o innych kontenerach
- prometheus-pve-exporter Zbiera dane o maszynach wirtualnych w Proxmox.
Zbieranie danych
Zbieranie danych o fizycznej maszynie
version: '3.8'
services:
node-exporter:
image: prom/node-exporter:latest
container_name: node-exporter
restart: unless-stopped
hostname: portainer-pc
ports: [9100:9100]
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/TZ:ro
- /global_config/docker_data/node-exporter/textfile:/etc/node-exporter/textfile
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
- '--collector.textfile.directory=/etc/node-exporter/textfile'
smartmon-exporter:
privileged: true
image: ghcr.io/galexrt/node-exporter-textfiles:main
container_name: smartmon
restart: unless-stopped
volumes:
- /global_config/docker_data/node-exporter/textfile:/var/lib/node_exporter
- /global_config/docker_data/node-exporter/scripts/smart.sh:/scripts/smart.sh:ro
environment:
- SCRIPT=smart.sh
- OUTPUT_FILENAME=smartmon_data
- INTERVAL=60
Po uruchomieniu smartmon konieczne może okazać się aktualizacja basy wewnątrz kontenera.
docker exec -it smartmon /bin/bash
update-smart-drivedb --no-verify
smart.sh minimalne modyfikacje w odniesieniu do orginału.
#!/bin/bash
parse_smartctl_attributes_awk="$(
cat <<'SMARTCTLAWK'
$1 ~ /^ *[0-9]+$/ && $2 ~ /^[a-zA-Z0-9_-]+$/ {
gsub(/-/, "_");
printf "%s_value{%s,smart_id=\"%s\"} %d\n", $2, labels, $1, $4
printf "%s_worst{%s,smart_id=\"%s\"} %d\n", $2, labels, $1, $5
printf "%s_threshold{%s,smart_id=\"%s\"} %d\n", $2, labels, $1, $6
printf "%s_raw_value{%s,smart_id=\"%s\"} %e\n", $2, labels, $1, $10
}
SMARTCTLAWK
)"
smartmon_attrs="$(
cat <<'SMARTMONATTRS'
airflow_temperature_cel
command_timeout
current_pending_sector
end_to_end_error
erase_fail_count
g_sense_error_rate
hardware_ecc_recovered
host_reads_32mib
host_reads_mib
host_writes_32mib
host_writes_mib
load_cycle_count
media_wearout_indicator
nand_writes_1gib
offline_uncorrectable
power_cycle_count
power_on_hours
ssd_life_left
program_fail_cnt_total
program_fail_count
raw_read_error_rate
reallocated_event_count
reallocated_sector_ct
reported_uncorrect
runtime_bad_block
sata_downshift_count
seek_error_rate
spin_retry_count
spin_up_time
start_stop_count
temperature_case
temperature_celsius
temperature_internal
total_lbas_read
total_lbas_written
udma_crc_error_count
unsafe_shutdown_count
unused_rsvd_blk_cnt_tot
wear_leveling_count
workld_host_reads_perc
workld_media_wear_indic
workload_minutes
SMARTMONATTRS
)"
smartmon_attrs="$(echo "${smartmon_attrs}" | xargs | tr ' ' '|')"
parse_smartctl_attributes() {
local disk="$1"
local disk_type="$2"
local labels="disk=\"${disk}\",type=\"${disk_type}\""
sed 's/^ \+//g' |
awk -v labels="${labels}" "${parse_smartctl_attributes_awk}" 2>/dev/null |
tr '[:upper:]' '[:lower:]' |
grep -E "(${smartmon_attrs})"
}
parse_smartctl_scsi_attributes() {
local disk="$1"
local disk_type="$2"
local labels="disk=\"${disk}\",type=\"${disk_type}\""
while read -r line; do
attr_type="$(echo "${line}" | tr '=' ':' | cut -f1 -d: | sed 's/^ \+//g' | tr ' ' '_')"
attr_value="$(echo "${line}" | tr '=' ':' | cut -f2 -d: | sed 's/^ \+//g')"
case "${attr_type}" in
number_of_hours_powered_up_) power_on="$(echo "${attr_value}" | awk '{ printf "%e\n", $1 }')" ;;
Current_Drive_Temperature) temp_cel="$(echo "${attr_value}" | cut -f1 -d' ' | awk '{ printf "%e\n", $1 }')" ;;
Blocks_sent_to_initiator_) lbas_read="$(echo "${attr_value}" | awk '{ printf "%e\n", $1 }')" ;;
Blocks_received_from_initiator_) lbas_written="$(echo "${attr_value}" | awk '{ printf "%e\n", $1 }')" ;;
Accumulated_start-stop_cycles) power_cycle="$(echo "${attr_value}" | awk '{ printf "%e\n", $1 }')" ;;
Elements_in_grown_defect_list) grown_defects="$(echo "${attr_value}" | awk '{ printf "%e\n", $1 }')" ;;
esac
done
[ -n "$power_on" ] && echo "power_on_hours_raw_value{${labels},smart_id=\"9\"} ${power_on}"
[ -n "$temp_cel" ] && echo "temperature_celsius_raw_value{${labels},smart_id=\"194\"} ${temp_cel}"
[ -n "$lbas_read" ] && echo "total_lbas_read_raw_value{${labels},smart_id=\"242\"} ${lbas_read}"
[ -n "$lbas_written" ] && echo "total_lbas_written_raw_value{${labels},smart_id=\"242\"} ${lbas_written}"
[ -n "$power_cycle" ] && echo "power_cycle_count_raw_value{${labels},smart_id=\"12\"} ${power_cycle}"
[ -n "$grown_defects" ] && echo "grown_defects_count_raw_value{${labels},smart_id=\"12\"} ${grown_defects}"
}
parse_smartctl_info() {
local -i smart_available=0 smart_enabled=0 smart_healthy=
local disk="$1" disk_type="$2"
local model_family='' device_model='' serial_number='' fw_version='' vendor='' product='' revision='' lun_id=''
while read -r line; do
info_type="$(echo "${line}" | cut -f1 -d: | tr ' ' '_')"
info_value="$(echo "${line}" | cut -f2- -d: | sed 's/^ \+//g' | sed 's/"/\\"/')"
case "${info_type}" in
Model_Family) model_family="${info_value}" ;;
Device_Model) device_model="${info_value}" ;;
Serial_Number) serial_number="${info_value}" ;;
Firmware_Version) fw_version="${info_value}" ;;
Vendor) vendor="${info_value}" ;;
Product) product="${info_value}" ;;
Revision) revision="${info_value}" ;;
Logical_Unit_id) lun_id="${info_value}" ;;
esac
if [[ "${info_type}" == 'SMART_support_is' ]]; then
case "${info_value:0:7}" in
Enabled) smart_available=1; smart_enabled=1 ;;
Availab) smart_available=1; smart_enabled=0 ;;
Unavail) smart_available=0; smart_enabled=0 ;;
esac
fi
if [[ "${info_type}" == 'SMART_overall-health_self-assessment_test_result' ]]; then
case "${info_value:0:6}" in
PASSED) smart_healthy=1 ;;
*) smart_healthy=0 ;;
esac
elif [[ "${info_type}" == 'SMART_Health_Status' ]]; then
case "${info_value:0:2}" in
OK) smart_healthy=1 ;;
*) smart_healthy=0 ;;
esac
fi
done
echo "device_info{disk=\"${disk}\",type=\"${disk_type}\",vendor=\"${vendor}\",product=\"${product}\",revision=\"${revision}\",lun_id=\"${lun_id}\",model_family=\"${model_family}\",device_model=\"${device_model}\",serial_number=\"${serial_number}\",firmware_version=\"${fw_version}\"} 1"
echo "device_smart_available{disk=\"${disk}\",type=\"${disk_type}\"} ${smart_available}"
echo "device_smart_enabled{disk=\"${disk}\",type=\"${disk_type}\"} ${smart_enabled}"
[[ "${smart_healthy}" != "" ]] && echo "device_smart_healthy{disk=\"${disk}\",type=\"${disk_type}\"} ${smart_healthy}"
}
output_format_awk="$(
cat <<'OUTPUTAWK'
BEGIN { v = "" }
v != $1 {
print "";
print "# HELP smartmon_" $1 " SMART metric " $1;
print "# TYPE smartmon_" $1 " gauge";
v = $1
}
{print "smartmon_" $0}
OUTPUTAWK
)"
format_output() {
sort |
awk -F'{' "${output_format_awk}"
}
smartctl_version="$(/usr/sbin/smartctl -V | head -n1 | awk '$1 == "smartctl" {print $2}')"
echo "smartctl_version{version=\"${smartctl_version}\"} 1" | format_output
if [[ "$(expr "${smartctl_version}" : '\([0-9]*\)\..*')" -lt 6 ]]; then
exit
fi
device_list="$(/usr/sbin/smartctl --scan-open | awk '/^\/dev/{print $1 "|" $3}')"
for device in ${device_list}; do
disk="$(echo "${device}" | cut -f1 -d'|')"
type="$(echo "${device}" | cut -f2 -d'|')"
active=1
echo "smartctl_run{disk=\"${disk}\",type=\"${type}\"}" "$(TZ=UTC date '+%s')"
# Check if the device is in a low-power mode
/usr/sbin/smartctl -n standby -d "${type}" "${disk}" > /dev/null || active=0
echo "device_active{disk=\"${disk}\",type=\"${type}\"}" "${active}"
# Skip further metrics to prevent the disk from spinning up
test ${active} -eq 0 && continue
# Get the SMART information and health
/usr/sbin/smartctl -i -H -d "${type}" "${disk}" | parse_smartctl_info "${disk}" "${type}"
# Get the SMART attributes
case ${type} in
sat) /usr/sbin/smartctl -A -d "${type}" "${disk}" | parse_smartctl_attributes "${disk}" "${type}" ;;
sat+megaraid*) /usr/sbin/smartctl -A -d "${type}" "${disk}" | parse_smartctl_attributes "${disk}" "${type}" ;;
scsi) /usr/sbin/smartctl -A -d "${type}" "${disk}" | parse_smartctl_scsi_attributes "${disk}" "${type}" ;;
megaraid*) /usr/sbin/smartctl -A -d "${type}" "${disk}" | parse_smartctl_scsi_attributes "${disk}" "${type}" ;;
nvme*) /usr/sbin/smartctl -A -d "${type}" "${disk}" | parse_smartctl_scsi_attributes "${disk}" "${type}" ;;
*)
(>&2 echo "disk type is not sat, scsi, nvme or megaraid but ${type}")
exit
;;
esac
done | format_output
Node exporter możemy też zainstalować bez dockera.
sudo apt install prometheus-node-exporter
Zbieranie danych o innych kontenerach
version: '3.8'
services:
cadvisor:
image: google/cadvisor:latest
container_name: cadvisor
restart: unless-stopped
ports: [19998:8080]
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
cadvisor dla arm uruchomiony na raspberry PI
image: braingamer/cadvisor-arm:latest
Monitorowanie Proxmox z poziomu kontenera
proxmox_monitor:
image: prompve/prometheus-pve-exporter
container_name: proxmox_monitor
restart: unless-stopped
ports: [9221:9221]
volumes:
- /global_config/docker_data/proxmox_exporter/pve.yml:/etc/pve.yml:ro
default:
user: root@pam
token_name: "token_monitor"
token_value: "333333-aaaa-bbbb-cccc-111111111"
verify_ssl: false
Magazyn danych
Prometheus docker composer
version: "3.8"
#http://portainer-pc.lan:9090
services:
prometheus:
user: 0:0
network_mode: host
image: prom/prometheus
stop_signal: SIGKILL
container_name: prometheus
restart: unless-stopped
command:
- '--config.file=/config/config.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=30d'
- '--storage.tsdb.retention.size=1GB'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
volumes:
- /global_config/docker_data/prometheus/config:/config
- /global_config/docker_data/prometheus/data:/prometheus
labels:
- "traefik.enable=true"
- "traefik.http.routers.prometheus.rule=Host(`prometheus.home.robertolechowski.com`)"
- "traefik.http.routers.prometheus.tls.certresolver=myresolver"
- "traefik.http.routers.prometheus.service=prometheus_service"
- "traefik.http.services.prometheus_service.loadbalancer.server.port=9090"
Prometheus config
global:
scrape_interval: 15s
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
- job_name: "node"
static_configs:
- targets: ["portainer-pc.lan:9100", "pi-hole.lan:9100"]
relabel_configs:
- source_labels: [__address__]
target_label: instance
regex: '([^:]+)(:[0-9]+)?'
replacement: '${1}'
- job_name: 'cadvisor'
metrics_path: '/metrics'
static_configs:
- targets: ["portainer-pc.lan:19998"]
relabel_configs:
- source_labels: [__address__]
target_label: instance
regex: '([^:]+)(:[0-9]+)?'
replacement: '${1}'
metric_relabel_configs:
- regex: '^container_label_traefik_(.*)'
action: labeldrop
- regex: '^container_label_com_docker_(.*)'
action: labeldrop
- regex: '^container_label_org_(.*)'
action: labeldrop
- job_name: 'pve'
static_configs:
- targets: ["192.168.30.70"]
metrics_path: /pve
params:
module: [default]
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: portainer-pc.lan:9221
- job_name: 'openweather-exporter'
scrape_interval: 60s
static_configs:
- targets: ["portainer-pc.lan:3362"]
metric_relabel_configs:
- source_labels: [__name__]
regex: '^go_(.*)'
action: drop
Pozostaje nam tylko skonfigurować dashboard w Grafana.