简介:由于资源有限,本实验用了两台机器
https://prometheus.io/download/
mkdir -p /data/prometheus
tar -zxvf /root/prometheus-2.42.0.linux-amd64.tar.gz -C /data/
cd /data
mv prometheus-2.42.0.linux-amd64/ prometheus
useradd -s /sbin/nologin -M prometheus
mkdir -p /data/database/prometheus
chown -R prometheus:prometheus /data/database/prometheus/
vim /etc/systemd/system/prometheus.service
[Unit] Description=Prometheus Documentation=https://prometheus.io/ After=network.target [Service] Type=simple User=prometheus ExecStart=/data/prometheus/prometheus --web.enable-lifecycle --config.file=/data/prometheus/prometheus.yml --storage.tsdb.path=/data/database/prometheus Restart=on-failure [Install] WantedBy=multi-user.target
systemctl daemon-reload
systemctl start prometheus
systemctl status prometheus
systemctl enable prometheus
访问web页面,IP:9090
查看到监控的数据,IP:9090/metrics
wget https://github.com/prometheus/node_exporter/releases/download/v1.5.0/node_exporter-1.5.0.linux-amd64.tar.gz
tar -zxvf node_exporter-1.5.0.linux-amd64.tar.gz -C /data/
mv /data/node_exporter-1.5.0.linux-amd64/ /data/node_exporter
vim /etc/systemd/system/node_exporter.service
[Unit]
Description=node_exporter
[Service]
ExecStart=/data/source.package/node_exporter-1.1.2.linux-amd64/node_exporter
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
[Install]
WantedBy=multi-user.target
systemctl daemon-reload
systemctl start node_exporter.service
systemctl status node_exporter.service
systemctl enable node_exporter.service
在主配置文件最后加上下面三行
vim /data/prometheus/prometheus.yml
- job_name: 'agent1' #取一个job名称来代表被监控的机器
static_configs:
- targets: ['192.168.1.1:9100'] # 这里改成被监控机器的IP,后面端口接9100
[root@VM-16-2-centos prometheus]# ./promtool check config prometheus.yml
Checking prometheus.yml
SUCCESS: prometheus.yml is valid prometheus config file syntax
curl -X POST http://127.0.0.1:9090/-/reload,打开prometheus页面输入up查看是不是有对应的数据了
回到web管理界面 ——>点——>点Targets ——>可以看到多了一台监控目标
wget https://github.com/prometheus/mysqld_exporter/releases/download/v0.14.0/mysqld_exporter-0.14.0.linux-amd64.tar.gz2
tar -zxvf mysqld_exporter-0.14.0.linux-amd64.tar.gz -C /data/
mv /data/mysqld_exporter-0.14.0.linux-amd64/ /data/mysqld_exporter
[root@VM-12-2-centos ~]# ls /data/mysqld_exporter/
LICENSE mysqld_exporter NOTICE
yum -y install mariadb-server -y
systemctl start mariadb
[root@VM-12-2-centos ~]# mysql
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 2
Server version: 5.5.68-MariaDB MariaDB Server
Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]>
MariaDB [(none)]> grant select,replication client,process ON *.* to 'mysql_monito'@'localhost' identified by '123';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]>
MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]>
MariaDB [(none)]> exit
Bye
nohup /usr/local/mysqld_exporter/mysqld_exporter --config.my-cnf=/usr/local/mysqld_exporter/.my.cnf &
vim /data/prometheus/prometheus.yml
- job_name: 'mysql' #取一个job名称来代表被监控的机器
static_configs:
- targets: ['192.168.1.1:9104'] # 这里改成被监控机器的IP,后面端口接9104
systemctl restart prometheus
wget https://dl.grafana.com/enterprise/release/grafana-enterprise-9.3.6.linux-amd64.tar.gz
tar -zxvf grafana-enterprise-9.3.6.linux-amd64.tar.gz -C /data
mv grafana-9.3.6/ grafana
cp /data/grafana/conf/defaults.ini /data/grafana/conf/defaults.ini.bak
vim /data/grafana/conf/defaults.ini
data = /data/database/grafana/data
logs = /data/database/grafana/log
plugins = /data/database/grafana/plugins
provisioning = /data/grafana/conf/provisioning/
vim /etc/systemd/system/grafana-server.service
[Unit]
Description=Grafana
After=network.target
[Service]
User=grafana
Group=grafana
Type=notify
ExecStart=/data/grafana/bin/grafana-server -homepath /data/grafana/
Restart=on-failure
[Install]
WantedBy=multi-user.target
systemctl daemon-reload
systemctl start grafana-server.service
systemctl status grafana-server.service
systemctl enable grafana-server.service
web页面:ip+3000
添加prometheus监控数据及模板,将grafana和prometheus关联起来,也就是在grafana中添加添加数据源
点击:左边栏Dashboards“+”号内import->输入“8919”->load->更改name为“Prometheus Node”->victoriaMetrics选择刚创建的数据源“prometheus”
设置完成后,点击"Dashboards",->"victoriaMetrics"->"Prometheus Node"
https://prometheus.io/download/
tar -zxvf alertmanager-0.25.0.linux-amd64.tar.gz -C /data/
cd /data
mv alertmanager-0.25.0.linux-amd64/ alertmanager
chown -R prometheus:prometheus /data/alertmanager
mkdir -p /data/alertmanager/data
vim /data/alertmanager/alertmanager.yml(最初配置)
global:
resolve_timeout: 5m
route:
group_by: ['alertname']
group_wait: 10s
group_interval: 10s
repeat_interval: 1h
receiver: 'web.hook'
receivers:
- name: 'web.hook'
webhook_configs:
- url: 'http://127.0.0.1:5001/'
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'dev', 'instance']
vim /etc/systemd/system/alertmanager.service
[Unit]
Description=Alertmanager
After=network.target
[Service]
Type=simple
User=prometheus
ExecStart=/data/alertmanager/alertmanager --config.file=/data/alertmanager/alertmanager.yml --storage.path=/data/alertmanager/data
Restart=on-failure
[Install]
WantedBy=multi-user.target
systemctl daemon-reload
systemctl start alertmanager.service
systemctl status alertmanager.service
systemctl enable alertmanager.service
cp /data/prometheus/prometheus.yml /data/prometheus/prometheus.yml.bak
vim /data/prometheus/prometheus.yml (job_name中有几台监控的机器就写几行)
alerting:
alertmanagers:
- static_configs:
- targets:
- 192.168.1.1:9093
rule_files:
- "/data/database/prometheus/rules/*.rules"
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['192.168.1.1:9090']
- job_name: 'node'
static_configs:
- targets: ['192.168.1.2:9100']
- targets: ['192.168.1.3:9100']
- targets: ['192.168.1.4:9100']
cd /data/prometheus
./promtool check config prometheus.yml
[root@VM-16-2-centos prometheus]# ./promtool check config prometheus.yml
Checking prometheus.yml
SUCCESS: 1 rule files found
SUCCESS: prometheus.yml is valid prometheus config file syntax
Checking /data/database/prometheus/rules/node.rules
SUCCESS: 21 rules found
mkdir /data/database/prometheus/rules
vim /data/database/prometheus/rules/node.rules
groups:
- name: Node-rules
rules:
- alert: Node-Down
expr: up{job="node1"} == 0
for: 1m
labels:
severity: 严重警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{$labels.instance }} 节点已经宕机 1分钟"
description: "节点宕机"
- alert: Node-CpuHigh
expr: (1 - avg by (instance) (irate(node_cpu_seconds_total{job="node",mode="idle"}[5m]))) * 100 > 80
for: 1m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} cpu使用率超 80%"
description: "CPU 使用率为 {{ $value }}%"
- alert: Node-CpuIowaitHigh
expr: avg by (instance) (irate(node_cpu_seconds_total{job="node",mode="iowait"}[5m])) * 100 > 80
for: 1m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} CPU iowait 使用率超过 80%"
description: "CPU iowait 使用率为 {{ $value }}%"
- alert: Node-MemoryHigh
expr: (1 - node_memory_MemAvailable_bytes{job="node"} / node_memory_MemTotal_bytes{job="node"}) * 100 > 80
for: 1m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Memory使用率超过 80%"
description: "Memory 使用率为 {{ $value }}%"
- alert: Node-Load5High
expr: node_load5 > (count by (instance) (node_cpu_seconds_total{job="node",mode='system'})) * 1.2
for: 1m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Load(5m)过高,超出cpu核数1.2倍"
description: "Load(5m)过高,超出cpu核数 1.2倍"
- alert: Node-DiskRootHigh
expr: (1 - node_filesystem_avail_bytes{job="node",fstype=~"ext.*|xfs",mountpoint ="/"} / node_filesystem_size_bytes{job="node",fstype=~"ext.*|xfs",mountpoint ="/"}) * 100 > 80
for: 10m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Disk(/ 分区) 使用率超过 80%"
description: "Disk(/ 分区) 使用率为 {{ $value }}%"
- alert: Node-DiskDataHigh
expr: (1 - node_filesystem_avail_bytes{job="node",fstype=~"ext.*|xfs",mountpoint ="/data"} / node_filesystem_size_bytes{job="node",fstype=~"ext.*|xfs",mountpoint ="/data"}) * 100 > 80
for: 10m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Disk(/data 分区) 使用率超过 80%"
description: "Disk(/data 分区) 使用率为 {{ $value }}%"
- alert: Node-DiskReadHigh
expr: irate(node_disk_read_bytes_total{job="node"}[5m]) > 20 * (1024 ^ 2)
for: 1m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Disk 读取字节数速率超过 20 MB/s"
description: "Disk 读取字节数速率为 {{ $value }}MB/s"
- alert: Node-DiskWriteHigh
expr: irate(node_disk_written_bytes_total{job="node"}[5m]) > 20 * (1024 ^ 2)
for: 1m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Disk 写入字节数速率超过 20 MB/s"
description: "Disk 写入字节数速率为 {{ $value }}MB/s"
- alert: Node-DiskReadRateCountHigh
expr: irate(node_disk_reads_completed_total{job="node"}[5m]) > 3000
for: 1m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Disk iops 每秒读取速率超过 3000 iops"
description: "Disk iops 每秒读取速率为 {{ $value }}"
- alert: Node-DiskWriteRateCountHigh
expr: irate(node_disk_writes_completed_total{job="node"}[5m]) > 3000
for: 1m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Disk iops 每秒写入速率超过 3000 iops"
description: "Disk iops 每秒写入速率为 {{ $value }}"
- alert: Node-InodeRootUsedPercentHigh
expr: (1 - node_filesystem_files_free{job="node",fstype=~"ext4|xfs",mountpoint="/"} / node_filesystem_files{job="node",fstype=~"ext4|xfs",mountpoint="/"}) * 100 > 80
for: 10m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Disk (/ 分区) inode 使用率超过 80%"
description: "Disk (/ 分区) inode 使用率为 {{ $value }}%"
- alert: Node-InodeBootUsedPercentHigh
expr: (1 - node_filesystem_files_free{job="node",fstype=~"ext4|xfs",mountpoint="/data"} / node_filesystem_files{job="node",fstype=~"ext4|xfs",mountpoint="/data"}) * 100 > 80
for: 10m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Disk (/data 分区) inode 使用率超过 80%"
description: "Disk (/data 分区) inode 使用率为 {{ $value }}%"
- alert: Node-FilefdAllocatedPercentHigh
expr: node_filefd_allocated{job="node"} / node_filefd_maximum{job="node"} * 100 > 80
for: 10m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Filefd 打开百分比超过 80%"
description: "Filefd 打开百分比为 {{ $value }}%"
- alert: Node-NetworkNetinBitRateHigh
expr: avg by (instance) (irate(node_network_receive_bytes_total{device=~"eth0|eth1|ens33|ens37"}[1m]) * 8) > 20 * (1024 ^ 2) * 8
for: 3m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Network 接收比特数速率超过 20MB/s"
description: "Network 接收比特数速率为 {{ $value }}MB/s"
- alert: Node-NetworkNetoutBitRateHigh
expr: avg by (instance) (irate(node_network_transmit_bytes_total{device=~"eth0|eth1|ens33|ens37"}[1m]) * 8) > 20 * (1024 ^ 2) * 8
for: 3m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Network 接收比特数速率超过 20MB/s"
description: "Network 发送比特数速率为 {{ $value }}MB/s"
- alert: Node-NetworkNetinPacketErrorRateHigh
expr: avg by (instance) (irate(node_network_receive_errs_total{device=~"eth0|eth1|ens33|ens37"}[1m])) > 15
for: 3m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Network 接收错误包速率超过 15个/秒"
description: "Network 接收错误包速率为 {{ $value }}个/秒"
- alert: Node-NetworkNetoutPacketErrorRateHigh
expr: avg by (instance) (irate(node_network_transmit_packets_total{device=~"eth0|eth1|ens33|ens37"}[1m])) > 15
for: 3m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Network 发送错误包速率超过 15个/秒"
description: "Network 发送错误包速率为 {{ $value }}个/秒"
- alert: Node-ProcessBlockedHigh
expr: node_procs_blocked{job="node"} > 10
for: 10m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} Process 当前被阻塞的任务的数量超过 10个"
description: "Process 当前被阻塞的任务的数量为 {{ $value }}个"
- alert: Node-TimeOffsetHigh
expr: abs(node_timex_offset_seconds{job="node"}) > 3 * 60
for: 2m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} 节点的时间偏差超过 3m"
description: "节点的时间偏差为 {{ $value }}m"
- alert: Node-TCPconnection
expr: node_sockstat_TCP_tw{job="node"} > 15000
for: 2m
labels:
severity: 警告
instance: "{{ $labels.instance }}"
annotations:
summary: "{{ $labels.instance }} TCP 等待关闭的TCP连接数TIME_WAIT过高大于15000"
description: "TCP 等待关闭的TCP连接数为 {{ $value }}"
8. 配置alertmanager邮件报警
vim /data/alertmanager/alertmanager.yml
# 全局配置项
global:
resolve_timeout: 5m #处理超时时间,默认为5min
smtp_smarthost: 'smtp.qq.com:465' #邮箱smtp服务器代理
smtp_from: '111111112@qq.com' #发送邮箱名称
smtp_auth_username: '111111112@qq.com' #邮箱名称
smtp_auth_password: 'asdklfjwiehrqc' #邮箱授权码
smtp_require_tls: false
smtp_hello: 'qq.com'
vim /data/alertmanager/email.tmpl
{{ define "email.html" }}
{{- if gt (len .Alerts.Firing) 0 -}}
{{- range $index, $alert := .Alerts -}}
<pre>
======== 异常告警 ========
告警类型:{{ $alert.Labels.alertname }}
告警级别:{{ $alert.Labels.severity }}
告警实例:{{ $alert.Labels.instance }}
告警应用: {{ $alert.Labels.name }}
告警信息:{{ $alert.Annotations.summary }}
告警详情:{{ $alert.Annotations.description }}
告警时间:{{ $alert.StartsAt.Local }}
========== END ==========
</pre>
{{- end }}
{{- end }}
{{- if gt (len .Alerts.Resolved) 0 -}}
{{- range $index, $alert := .Alerts -}}
<pre>
======== 告警恢复 ========
告警类型:{{ $alert.Labels.alertname }}
告警级别:{{ $alert.Labels.severity }}
告警实例:{{ $alert.Labels.instance }}
告警详情:{{ $alert.Annotations.description }}
告警应用: {{ $alert.Labels.name }}
当前状态: OK
告警时间:{{ $alert.StartsAt.Local }}
恢复时间:{:{ $alert.EndsAt.Local }}
========== END ==========
</pre>
{{- end }}
{{- end }}
{{- end }}
systemctl restart prometheus.service
systemctl restart alertmanager.service
我是Google云的新手,我正在尝试对其进行首次部署。我的第一个部署是RubyonRails项目。我基本上是在关注thisguideinthegoogleclouddocumentation.唯一的区别是我使用的是我自己的项目,而不是他们提供的“helloworld”项目。这是我的app.yaml文件runtime:customvm:trueentrypoint:bundleexecrackup-p8080-Eproductionconfig.ruresources:cpu:0.5memory_gb:1.3disk_size_gb:10当我转到我的项目目录并运行gcloudprevie
我可以在Azure网站上部署RubyonRails吗? 最佳答案 还没有。目前仅支持.NET和PHP。 关于ruby-on-rails-RubyonRails可以部署在Azure网站上吗?,我们在StackOverflow上找到一个类似的问题: https://stackoverflow.com/questions/12964010/
前置步骤我们都操作完了,这篇开始介绍jenkins的集成。话不多说,看操作1、登录进入jenkins后会让你选择安装插件,选择第一个默认的就行。安装完成后设置账号密码,重新登录。2、配置JDK和Git都需要执行路径,所以需要先把执行路径找到,先进入服务器的docker容器,2.1JDK的路径root@69eef9ee86cf:/usr/bin#echo$JAVA_HOME/usr/local/openjdk-82.2Git的路径root@69eef9ee86cf:/#whichgit/usr/bin/git3、先配置JDK和Git。点击:ManageJenkins>>GlobalToolCon
深度学习部署:Windows安装pycocotools报错解决方法1.pycocotools库的简介2.pycocotools安装的坑3.解决办法更多Ai资讯:公主号AiCharm本系列是作者在跑一些深度学习实例时,遇到的各种各样的问题及解决办法,希望能够帮助到大家。ERROR:Commanderroredoutwithexitstatus1:'D:\Anaconda3\python.exe'-u-c'importsys,setuptools,tokenize;sys.argv[0]='"'"'C:\\Users\\46653\\AppData\\Local\\Temp\\pip-instal
Ocra无法处理需要“tk”的应用程序require'tk'puts'nope'用奥克拉http://github.com/larsch/ocra不起作用(如链接中的一个问题所述)问题:https://github.com/larsch/ocra/issues/29(Ocra是1.9的"new"rubyscript2exe,本质上它用于将rb脚本部署为可执行文件)唯一的问题似乎是缺少tcl的DLL文件我不认为这是一个问题据我所知,问题是缺少tk的DLL文件如果它们是已知的,则可以在执行ocra时将它们包括在内有没有办法知道tk工作所需的DLL依赖项? 最佳答
我有一个类unzipper.rb,它使用Rubyzip解压文件。在我的本地环境中,我可以成功解压缩文件,而无需使用require'zip'明确包含依赖项但是在Heroku上,我得到一个NameError(uninitializedconstantUnzipper::Zip)我只能通过使用明确的require来解决问题:为什么这在Heroku环境中是必需的,但在本地主机上却不是?我的印象是Rails自动需要所有gem。app/services/unzipper.rbrequire'zip'#OnlyrequiredforHeroku.Workslocallywithout!class
出于某种原因,heroku尝试要求dm-sqlite-adapter,即使它应该在这里使用Postgres。请注意,这发生在我打开任何URL时-而不是在gitpush本身期间。我构建了一个默认的Facebook应用程序。gem文件:source:gemcuttergem"foreman"gem"sinatra"gem"mogli"gem"json"gem"httparty"gem"thin"gem"data_mapper"gem"heroku"group:productiondogem"pg"gem"dm-postgres-adapter"endgroup:development,:t
如何使用Capistrano将Rails应用程序部署到无法访问外部网络或存储库的生产或暂存服务器?我已经设法完成部署的一半,并意识到Capistrano没有在我的本地机器上下载gitrepo,但它首先连接到远程服务器并尝试在那里下载Git存储库。我希望有一个类似Javaee的构建系统,其中创建可交付成果并将该可交付成果发送到服务器。就像您构建.ear文件并将其部署到您想要的任何服务器上一样。显然在RoR中,你被迫(据我所知)在该服务器上构建应用程序,在那里创建一个gem存储库,在那里克隆最新的分支等等。有什么方法可以将准备运行的包发送到远程服务器吗? 最佳答
集成背景我们当前集群使用的是ClouderaCDP,Flink版本为ClouderaVersion1.14,整体Flink安装目录以及配置文件结构与社区版本有较大出入。直接根据Streampark官方文档进行部署,将无法配置FlinkHome,以及后续整体Flink任务提交到集群中,因此需要进行针对化适配集成,在满足使用需求上,尽量提供完整的Streampark使用体验。集成步骤版本匹配问题解决首先解决无法识别Cloudera中的FlinkHome问题,根据报错主要明确到的事情是无法读取到Flink版本、lib下面的jar包名称无法匹配。修改对象:修改源码:(解决无法匹配clouderajar
我正在寻找一种可靠的方式来部署Rack应用程序(在本例中为Sinatra应用程序)。请求将花费一些时间(0.25-0.5秒等待代理HTTP请求)并且可能会有相当大的流量。我应该使用传统的mongrel集群设置吗?使用HAProxy作为负载均衡器?恩金克斯?架子?您使用过哪些解决方案,有哪些优势? 最佳答案 Nginx/unicornFTW!前面的Nginx提供静态文件和unicorn处理Sinatra应用程序。优点:性能、使用unixsocks的良好负载平衡以及无需停机即可部署/升级(您可以在不停机的情况下升级Ruby/Nginx/