## 全局定义部分
# my global config
global:scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. prometheus采集数据的间隔时间;evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 执行对应的一些规则间隔时间;(报警规则等)
minute. # scrape_timeout is set to the global default (10s). 采集数据的超时时间# Alertmanager configuration 用于配置报警规则,alertmanager
alerting:alertmanagers:- static_configs:- targets:# - alertmanager:9093# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:# - "first_rules.yml"# - "second_rules.yml"# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: # 数据采集的配置# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.- job_name: 'prometheus' # 被监控主机的名称(随便)# metrics_path defaults to '/metrics'# scheme defaults to 'http'.static_configs: # 静态配置文件,被监控的主机的信息,修改后要重启服务- targets: ['localhost:9090']# file_sd_configs: 动态配置文件,动态读取文件内容,然后进行监控,修改后不需要重启服务