分类 Linux 下的文章
Docker部署alertmanager对接prometheus
- 作者: SUNBALCONY
- 时间:
- 分类: Linux,Docker
- 评论
alertmanager配置部分
创建文件夹
mkdir -p /etc/alertmanager/
mkdir -p /etc/alertmanager/template
新建配置文件
vim /etc/alertmanager/alertmanager.yml
global:
resolve_timeout: 5m
smtp_from: 'i@valarx.com' # 发件人
smtp_smarthost: 'smtp.office365.com:587' # 邮箱服务器的 POP3/SMTP 主机配置 smtp.qq.com 端口为 465 或 587
smtp_auth_username: 'i@valarx.com' # 用户名
smtp_auth_password: 'xxxx' # 授权码
smtp_require_tls: true
smtp_hello: 'xxxx'
templates:
- '/etc/alertmanager/template/*.tmpl'
route:
group_by: ['alertname'] # 告警分组
group_wait: 5s # 在组内等待所配置的时间,如果同组内,5 秒内出现相同报警,在一个组内出现。
group_interval: 5m # 如果组内内容不变化,合并为一条警报信息,5 分钟后发送。
repeat_interval: 5m # 发送告警间隔时间 s/m/h,如果指定时间内没有修复,则重新发送告警
receiver: 'email' # 优先使用 wechat 发送
routes: #子路由,使用 email 发送
- receiver: email
match_re:
serverity: email
receivers:
- name: 'email'
email_configs:
- to: 'xxx@qq.com' # 如果想发送多个人就以 ',' 做分割
send_resolved: true
html: '{{ template "email.html" . }}' #使用自定义的模板发送
- name: 'wechat'
wechat_configs:
- corp_id: 'xxxxxxxxxxxxx' #企业 ID
api_url: 'https://qyapi.weixin.qq.com/cgi-bin/' # 企业微信 api 接口 统一定义
to_party: '2' # 通知组 ID
agent_id: '1000002' # 新建应用的 agent_id
api_secret: 'xxxxxxxxxxxxxx' # 生成的 secret
send_resolved: true
创建自定义模板
vim /etc/alertmanager/template/email.tmpl
{{ define "email.html" }}
{{ range $i, $alert :=.Alerts }}
========监控报警==========<br>
告警状态:{{ .Status }}<br>
告警级别:{{ $alert.Labels.severity }}<br>
告警类型:{{ $alert.Labels.alertname }}<br>
告警应用:{{ $alert.Annotations.summary }}<br>
告警主机:{{ $alert.Labels.instance }}<br>
告警详情:{{ $alert.Annotations.description }}<br>
触发阀值:{{ $alert.Annotations.value }}<br>
告警时间:{{ $alert.StartsAt.Format "2006-01-02 15:04:05" }}<br>
========end=============<br>
{{ end }}
{{ end }}
启动alertmanager
docker run -d --restart=always \
--name=alertmanager \
-p 9093:9093 \
-v /etc/alertmanager:/etc/alertmanager \
-v /etc/localtime:/etc/localtime \
prom/alertmanager:latest
Prometheus部分告警规则
启用alertmanager模块和rules告警规则
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- 10.0.4.10:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "/etc/prometheus/rules/*.rules"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
创建rules告警规则
vim /etc/prometheus/rules/alerts.rules
这边是两个规则:
主机CPU利用率>85%
主机MEM利用率>70%
---
groups:
- name: hostStatsAlert
rules:
- alert: hostCpuUsageAlert
expr: (1 - avg(irate(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))*100 > 85
for: 1m
labels:
severity: critical
annotations:
summary: "Instance {{ $labels.instance }} CPU usage high"
description: "{{ $labels.instance }} CPU usage above 85% (current value: {{ $value }})"
- alert: hostMemUsageAlert
expr: (1 - (node_memory_MemAvailable_bytes{} / (node_memory_MemTotal_bytes{})))* 100 > 70
for: 1m
labels:
severity: critical
annotations:
summary: "Instance {{ $labels.instance }} MEM usage high"
description: "{{ $labels.instance }} MEM usage above 70% (current value: {{ $value }})"
重载规则
docker restart prometheus
验证
使用Golang调用OpenAI API
- 作者: SUNBALCONY
- 时间:
- 分类: Linux
- 评论
## v1 使用原生HTTP接口
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
)
const ASAK = "sk-xxxxxxxxxx"
type Payload struct {
Prompt string `json:"prompt"`
MaxTokens int64 `json:"max_tokens"`
Temperature float64 `json:"temperature"`
TopP int64 `json:"top_p"`
FrequencyPenalty int64 `json:"frequency_penalty"`
PresencePenalty int64 `json:"presence_penalty"`
Model string `json:"model"`
}
func main() {
data := Payload{
Prompt: "腾讯是一家怎样的公司",
MaxTokens: 2048,
Temperature: 0.5,
TopP: 0,
FrequencyPenalty: 0,
PresencePenalty: 0,
Model: "text-davinci-003",
}
payloadBytes, err := json.Marshal(data)
if err != nil {
// handle err
}
body := bytes.NewReader(payloadBytes)
req, err := http.NewRequest("POST", "https://api.openai.com/v1/completions", body)
if err != nil {
// handle err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("%s %s", "Bearer", ASAK))
resp, err := http.DefaultClient.Do(req)
if err != nil {
// handle err
}
defer resp.Body.Close()
all, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Println(string(all))
}
---
# v2 使用OpenAI SDK(OpenAI教我的,你说难受不。。。。)
正在思考,请等待......
OpenAI API是OpenAI提供的一组软件API,它可以让开发人员使用自然语言处理(NLP)、机器学习(ML)和机器人技术(Robotics)等技术来开发自然语言处理(NLP)应用程序。
要使用OpenAI API,首先需要在OpenAI官网上注册一个账号,然后登录,获取API key。
然后,可以使用Go语言的第三方库OpenAI SDK来调用OpenAI API。OpenAI SDK提供了一个简单的API,可以用来访问OpenAI API。
使用OpenAI SDK,可以使用Go语言来实现一个简单的调用OpenAI API的示例:
package main
import (
"fmt"
"log"
"os"
"github.com/openai/openai-sdk-go/openai"
)
func main() {
// 获取OpenAI API key
apiKey := os.Getenv("OPENAI_API_KEY")
// 创建OpenAI客户端
client := openai.NewClient(apiKey)
// 调用OpenAI API
resp, err := client.Completion.Create(
"The quick brown fox jumps over the lazy dog",
"The quick brown fox jumps over the",
10,
)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp)
}
上面的代码使用OpenAI SDK来调用OpenAI API,并打印出调用结果。
使用Docker部署Mysql数据库
- 作者: SUNBALCONY
- 时间:
- 分类: Linux,Docker
- 评论
docker run -dit --name mysql -p 30060:3306 -e MYSQL_ROOT_PASSWORD=123456789 -e TZ=Asia/Shanghai -v /data/mysql/config:/etc/mysql/conf.d -v /data/mysql/data:/var/lib/mysql mysql:5.7.19 --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
touch /data/mysql/config/my.cnf
[client]
default-character-set=utf8mb4
[mysql]
default-character-set=utf8mb4
[mysqld]
skip-ssl
max_connections = 2000
secure_file_priv=/var/lib/mysql
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
docker restart mysql
使用Docker部署Prometheus和Blackbox和Grafana和ImageRender
- 作者: SUNBALCONY
- 时间:
- 分类: Linux,Docker
- 评论
#prometheus
docker run --name=prometheus -d -p 9090:9090 -v /etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus
#blackbox
docker run --rm -d -p 9115:9115 --name blackbox -v /etc/timezone:/etc/timezone:ro -v /etc/localtime:/etc/localtime:ro -v /etc/blackbox:/config prom/blackbox-exporter:master --config.file=/config/blackbox.yml --log.level=debug
#grafana
docker run -d --name grafana -p 3000:3000 -v /data/grafana:/var/lib/grafana grafana/grafana:8.2.1
#grafana-render
docker run -d --name=grafana-image-renderer -p 8088:8088 --env GF_RENDERER_PLUGIN_TZ=Asia/Shanghai --env GF_RENDERER_PLUGIN_IGNORE_HTTPS_ERRORS=true grafana/grafana-image-renderer
#prometheus
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 5s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label to any timeseries scraped from this config.
- job_name: 'prometheus'
static_configs:
- targets:
- 1.1.1.1:9100
- job_name: 'HTTPPUB'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
- https://www.valarx.com
- https://www.eagleyao.com
- https://www.ayao.ltd
- https://www.yeyihan.top
- https://www.eebbk.top
- https://www.xiaco.cn
- https://shi7zuku.live
- https://www.izoyoi.ink
- https://www.tencentcloud.love
- https://www.pixiv.work
- https://www.googles.ltd
- https://www.eebbk.com.cn
- https://www.domaincdn.com.cn
- https://ap-guangzhou.cos.eagleyao.com
- https://www.yunpan.host
- https://www.rehiy.com
- https://www.github.com
- https://blog.laoda.de
- https://www.lomcia.cn
- https://www.vkxx.com
- https://www.sccens.net
- https://www.czmz.top
- https://www.smalljun.com
- https://www.52ghs.cn
- https://www.lspss.com
- https://github.awk.im
- https://awk.im
- https://6b78f738-cdn-share-example1-cdn2.tencentcloud.i0i0.cn
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: 10.0.20.4:9115
#blackbox
modules:
http_2xx:
prober: http
timeout: 5s
http:
preferred_ip_protocol: ip4
method: GET
http_post_2xx:
prober: http
http:
method: GET
preferred_ip_protocol: ip4
ip_protocol_fallback: false
tcp_connect:
prober: tcp
pop3s_banner:
prober: tcp
tcp:
query_response:
- expect: "^+OK"
tls: true
tls_config:
insecure_skip_verify: false
grpc:
prober: grpc
grpc:
tls: true
preferred_ip_protocol: "ip4"
grpc_plain:
prober: grpc
grpc:
tls: false
service: "service1"
ssh_banner:
prober: tcp
tcp:
query_response:
- expect: "^SSH-2.0-"
- send: "SSH-2.0-blackbox-ssh-check"
irc_banner:
prober: tcp
tcp:
query_response:
- send: "NICK prober"
- send: "USER prober prober prober :prober"
- expect: "PING :([^ ]+)"
send: "PONG ${1}"
- expect: "^:[^ ]+ 001"
icmp:
prober: icmp
icmp_ttl5:
prober: icmp
timeout: 5s
icmp:
ttl: 5