y71.第四章 Prometheus大厂监控体系及实战 -- prometheus server安装(二)
发布时间
阅读量:
阅读量
2.2 prometheus server安装
prometheus可以通过多种方式安装

2.2.1 通过容器启动
https://prometheus.io/docs/prometheus/latest/installation/
https://hub.docker.com/r/prom/prometheus
root@k8s-master1:~# docker pull prom/prometheus:latest
root@k8s-master1:~# docker run \
-p 9090:9090 \
prom/prometheus
AI助手

2.2.2 在线安装
# apt search prometheus
# apt-cache madison prometheus
prometheus | 2.15.2+ds-2 | http://mirrors.cloud.tencent.com/ubuntu focal/universe amd64 Packages
prometheus | 2.15.2+ds-2 | http://mirrors.cloud.tencent.com/ubuntu focal/universe Sources
# apt -y install prometheus
AI助手
2.2.3 Operator部署
该项目的具体位置位于GitHub平台上的https://github.com/prometheus-operator/kube-prometheus/。
第一步可以通过该项目地址定位到与自己Kubernetes版本匹配的Kube Prometheus Stack版本。

git clone -b release-0.9 https://github.com/prometheus-operator/kube-prometheus.git
root@k8s-master1:~# git clone -b release-0.9 https://gitee.com/raymond9/kube-prometheus.git
root@k8s-master1:~# cd kube-prometheus/manifests
root@k8s-master1:~/kube-prometheus/manifests# ls
alertmanager-alertmanager.yaml node-exporter-clusterRole.yaml
alertmanager-podDisruptionBudget.yaml node-exporter-clusterRoleBinding.yaml
alertmanager-prometheusRule.yaml node-exporter-daemonset.yaml
alertmanager-secret.yaml node-exporter-prometheusRule.yaml
alertmanager-service.yaml node-exporter-service.yaml
alertmanager-serviceAccount.yaml node-exporter-serviceAccount.yaml
alertmanager-serviceMonitor.yaml node-exporter-serviceMonitor.yaml
blackbox-exporter-clusterRole.yaml prometheus-adapter-apiService.yaml
blackbox-exporter-clusterRoleBinding.yaml prometheus-adapter-clusterRole.yaml
blackbox-exporter-configuration.yaml prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml
blackbox-exporter-deployment.yaml prometheus-adapter-clusterRoleBinding.yaml
blackbox-exporter-service.yaml prometheus-adapter-clusterRoleBindingDelegator.yaml
blackbox-exporter-serviceAccount.yaml prometheus-adapter-clusterRoleServerResources.yaml
blackbox-exporter-serviceMonitor.yaml prometheus-adapter-configMap.yaml
grafana-dashboardDatasources.yaml prometheus-adapter-deployment.yaml
grafana-dashboardDefinitions.yaml prometheus-adapter-podDisruptionBudget.yaml
grafana-dashboardSources.yaml prometheus-adapter-roleBindingAuthReader.yaml
grafana-deployment.yaml prometheus-adapter-service.yaml
grafana-service.yaml prometheus-adapter-serviceAccount.yaml
grafana-serviceAccount.yaml prometheus-adapter-serviceMonitor.yaml
grafana-serviceMonitor.yaml prometheus-clusterRole.yaml
kube-prometheus-prometheusRule.yaml prometheus-clusterRoleBinding.yaml
kube-state-metrics-clusterRole.yaml prometheus-operator-prometheusRule.yaml
kube-state-metrics-clusterRoleBinding.yaml prometheus-operator-serviceMonitor.yaml
kube-state-metrics-deployment.yaml prometheus-podDisruptionBudget.yaml
kube-state-metrics-prometheusRule.yaml prometheus-prometheus.yaml
kube-state-metrics-service.yaml prometheus-prometheusRule.yaml
kube-state-metrics-serviceAccount.yaml prometheus-roleBindingConfig.yaml
kube-state-metrics-serviceMonitor.yaml prometheus-roleBindingSpecificNamespaces.yaml
kubernetes-prometheusRule.yaml prometheus-roleConfig.yaml
kubernetes-serviceMonitorApiserver.yaml prometheus-roleSpecificNamespaces.yaml
kubernetes-serviceMonitorCoreDNS.yaml prometheus-service.yaml
kubernetes-serviceMonitorKubeControllerManager.yaml prometheus-serviceAccount.yaml
kubernetes-serviceMonitorKubeScheduler.yaml prometheus-serviceMonitor.yaml
kubernetes-serviceMonitorKubelet.yaml setup
root@k8s-master1:~/kube-prometheus/manifests# grep "image:" setup/*.yaml |awk -F" " '{print $NF}' | uniq
image:
quay.io/prometheus-operator/prometheus-operator:v0.49.0
quay.io/brancz/kube-rbac-proxy:v0.11.0
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/prometheus-operator:v0.49.0
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/prometheus-operator:v0.49.0 harbor.raymonds.cc/base_images/prometheus-operator:v0.49.0
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/prometheus-operator:v0.49.0
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/kube-rbac-proxy:v0.11.0
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/kube-rbac-proxy:v0.11.0 harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
root@k8s-master1:~/kube-prometheus/manifests# sed -i "s#quay.io/prometheus-operator/#harbor.raymonds.cc/base_images/#g" setup/*.yaml
root@k8s-master1:~/kube-prometheus/manifests# sed -i "s#quay.io/brancz/#harbor.raymonds.cc/base_images/#g" setup/*.yaml
root@k8s-master1:~/kube-prometheus/manifests# grep "image:" setup/*.yaml |awk -F" " '{print $NF}' | uniq
image:
harbor.raymonds.cc/base_images/prometheus-operator:v0.49.0
harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
AI助手
安装Prometheus Operator:
root@k8s-master1:~/kube-prometheus/manifests# kubectl apply -f setup/
namespace/monitoring created
customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created
clusterrole.rbac.authorization.k8s.io/prometheus-operator created
clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created
deployment.apps/prometheus-operator created
service/prometheus-operator created
serviceaccount/prometheus-operator created
AI助手
查看Operator容器的状态:
root@k8s-master1:~/kube-prometheus/manifests# kubectl get pod -n monitoring
NAME READY STATUS RESTARTS AGE
prometheus-operator-6db58766b8-zhr8q 2/2 Running 0 38s
AI助手
Operator容器启动后,安装Prometheus Stack:
root@k8s-master1:~/kube-prometheus/manifests# grep "image:" *.yaml |awk -F" " '{print $NF}' | uniq
quay.io/prometheus/alertmanager:v0.22.2
quay.io/prometheus/blackbox-exporter:v0.19.0
jimmidyson/configmap-reload:v0.5.0
quay.io/brancz/kube-rbac-proxy:v0.11.0
grafana/grafana:8.1.1
k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.1.1
quay.io/prometheus/node-exporter:v1.2.2
k8s.gcr.io/prometheus-adapter/prometheus-adapter:v0.9.0
quay.io/prometheus/prometheus:v2.29.1
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/alertmanager:v0.22.2
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/alertmanager:v0.22.2 harbor.raymonds.cc/base_images/alertmanager:v0.22.2
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/alertmanager:v0.22.2
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/blackbox-exporter:v0.19.0
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/blackbox-exporter:v0.19.0 harbor.raymonds.cc/base_images/blackbox-exporter:v0.19.0
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/blackbox-exporter:v0.19.0
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/configmap-reload:v0.5.0
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/configmap-reload:v0.5.0 harbor.raymonds.cc/base_images/configmap-reload:v0.5.0
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/configmap-reload:v0.5.0
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/kube-rbac-proxy:v0.11.0
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/kube-rbac-proxy:v0.11.0 harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/grafana:8.1.1
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/grafana:8.1.1 harbor.raymonds.cc/base_images/grafana:8.1.1
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/grafana:8.1.1
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/kube-state-metrics:v2.1.1
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/kube-state-metrics:v2.1.1 harbor.raymonds.cc/base_images/kube-state-metrics:v2.1.1
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/kube-state-metrics:v2.1.1
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/node-exporter:v1.2.2
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/node-exporter:v1.2.2 harbor.raymonds.cc/base_images/node-exporter:v1.2.2
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/node-exporter:v1.2.2
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/prometheus-adapter:v0.9.0
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/prometheus-adapter:v0.9.0 harbor.raymonds.cc/base_images/prometheus-adapter:v0.9.0
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/prometheus-adapter:v0.9.0
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/prometheus:v2.29.1
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/prometheus:v2.29.1 harbor.raymonds.cc/base_images/prometheus:v2.29.1
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/prometheus:v2.29.1
root@k8s-master1:~/kube-prometheus/manifests# sed -i "s#quay.io/prometheus/#harbor.raymonds.cc/base_images/#g" *.yaml
root@k8s-master1:~/kube-prometheus/manifests# sed -i "s#jimmidyson/#harbor.raymonds.cc/base_images/#g" *.yaml
root@k8s-master1:~/kube-prometheus/manifests# sed -i "s#quay.io/brancz/#harbor.raymonds.cc/base_images/#g" *.yaml
root@k8s-master1:~/kube-prometheus/manifests# sed -i "s#grafana/#harbor.raymonds.cc/base_images/#g" *.yaml
root@k8s-master1:~/kube-prometheus/manifests# sed -i "s#k8s.gcr.io/kube-state-metrics/#harbor.raymonds.cc/base_images/#g" *.yaml
root@k8s-master1:~/kube-prometheus/manifests# sed -i "s#k8s.gcr.io/prometheus-adapter/#harbor.raymonds.cc/base_images/#g" *.yaml
root@k8s-master1:~/kube-prometheus/manifests# grep "image:" *.yaml |awk -F" " '{print $NF}'
harbor.raymonds.cc/base_images/alertmanager:v0.22.2
harbor.raymonds.cc/base_images/blackbox-exporter:v0.19.0
harbor.raymonds.cc/base_images/configmap-reload:v0.5.0
harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
harbor.raymonds.cc/base_images/grafana:8.1.1
harbor.raymonds.cc/base_images/kube-state-metrics:v2.1.1
harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
harbor.raymonds.cc/base_images/node-exporter:v1.2.2
harbor.raymonds.cc/base_images/kube-rbac-proxy:v0.11.0
harbor.raymonds.cc/base_images/prometheus-adapter:v0.9.0
harbor.raymonds.cc/base_images/prometheus:v2.29.1
root@k8s-master1:~/kube-prometheus/manifests# kubectl apply -f .
AI助手
查看Prometheus容器状态:
root@k8s-master1:~/kube-prometheus/manifests# kubectl get pod -n monitoring
NAME READY STATUS RESTARTS AGE
alertmanager-main-0 1/2 ImagePullBackOff 0 2m55s
alertmanager-main-1 1/2 ImagePullBackOff 0 2m55s
alertmanager-main-2 1/2 ImagePullBackOff 0 2m55s
blackbox-exporter-7f7f57cf45-llhnk 3/3 Running 0 2m55s
grafana-7d8fdcf57d-dkpjf 1/1 Running 0 2m54s
kube-state-metrics-7564f95f7d-26gjs 3/3 Running 0 2m53s
node-exporter-bt45t 2/2 Running 0 2m52s
node-exporter-cv59k 2/2 Running 0 2m52s
node-exporter-j5wst 2/2 Running 0 2m52s
node-exporter-tnclt 2/2 Running 0 2m52s
node-exporter-tzlc6 2/2 Running 0 2m52s
node-exporter-vq8jp 2/2 Running 0 2m52s
prometheus-adapter-cbcdb5cc-29ldr 1/1 Running 0 2m51s
prometheus-adapter-cbcdb5cc-8wdcr 1/1 Running 0 2m51s
prometheus-k8s-0 0/2 Init:ImagePullBackOff 0 2m50s
prometheus-k8s-1 0/2 Init:ImagePullBackOff 0 2m50s
prometheus-operator-6db58766b8-zhr8q 2/2 Running 0 78m
root@k8s-master1:~/kube-prometheus/manifests# docker pull registry.cn-beijing.aliyuncs.com/raymond9/prometheus-config-reloader:v0.49.0
root@k8s-master1:~/kube-prometheus/manifests# docker tag registry.cn-beijing.aliyuncs.com/raymond9/prometheus-config-reloader:v0.49.0 harbor.raymonds.cc/base_images/prometheus-config-reloader:v0.49.0
root@k8s-master1:~/kube-prometheus/manifests# docker push harbor.raymonds.cc/base_images/prometheus-config-reloader:v0.49.0
root@k8s-master1:~/kube-prometheus/manifests# kubectl delete pod alertmanager-main-0 alertmanager-main-1 alertmanager-main-2 -n monitoring
pod "alertmanager-main-0" deleted
pod "alertmanager-main-1" deleted
pod "alertmanager-main-2" deleted
root@k8s-master1:~/kube-prometheus/manifests# kubectl get pod -n monitoring
NAME READY STATUS RESTARTS AGE
alertmanager-main-0 2/2 Running 0 15s
alertmanager-main-1 2/2 Running 0 11s
alertmanager-main-2 2/2 Running 0 15s
blackbox-exporter-7f7f57cf45-llhnk 3/3 Running 0 21m
grafana-7d8fdcf57d-dkpjf 1/1 Running 0 21m
kube-state-metrics-7564f95f7d-26gjs 3/3 Running 0 21m
node-exporter-bt45t 2/2 Running 0 21m
node-exporter-cv59k 2/2 Running 0 21m
node-exporter-j5wst 2/2 Running 0 21m
node-exporter-tnclt 2/2 Running 0 21m
node-exporter-tzlc6 2/2 Running 0 21m
node-exporter-vq8jp 2/2 Running 0 21m
prometheus-adapter-cbcdb5cc-29ldr 1/1 Running 0 21m
prometheus-adapter-cbcdb5cc-8wdcr 1/1 Running 0 21m
prometheus-k8s-0 2/2 Running 0 21m
prometheus-k8s-1 2/2 Running 0 21m
prometheus-operator-6db58766b8-zhr8q 2/2 Running 0 97m
AI助手
2.2.3.1 验证prometheus
root@k8s-master1:~/kube-prometheus/manifests# kubectl port-forward --help
Forward one or more local ports to a pod. This command requires the node to have 'socat' installed.
Use resource type/name such as deployment/mydeployment to select a pod. Resource type defaults to 'pod' if omitted.
If there are multiple pods matching the criteria, a pod will be selected automatically. The forwarding session ends
when the selected pod terminates, and rerun of the command is needed to resume forwarding.
Examples:
# Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod
kubectl port-forward pod/mypod 5000 6000
# Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the
deployment
kubectl port-forward deployment/mydeployment 5000 6000
# Listen on port 8443 locally, forwarding to the targetPort of the service's port named "https" in a pod selected by
the service
kubectl port-forward service/myservice 8443:https
# Listen on port 8888 locally, forwarding to 5000 in the pod
kubectl port-forward pod/mypod 8888:5000
# Listen on port 8888 on all addresses, forwarding to 5000 in the pod
kubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000
# Listen on port 8888 on localhost and selected IP, forwarding to 5000 in the pod
kubectl port-forward --address localhost,10.19.21.23 pod/mypod 8888:5000
# Listen on a random port locally, forwarding to 5000 in the pod
kubectl port-forward pod/mypod :5000
Options:
--address=[localhost]: Addresses to listen on (comma separated). Only accepts IP addresses or localhost as a
value. When localhost is supplied, kubectl will try to bind on both 127.0.0.1 and ::1 and will fail if neither of these
addresses are available to bind.
--pod-running-timeout=1m0s: The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one
pod is running
Usage:
kubectl port-forward TYPE/NAME [options] [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]
Use "kubectl options" for a list of global command-line options (applies to all commands).
root@k8s-master1:~/kube-prometheus/manifests# kubectl get svc -n monitoring
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
alertmanager-main ClusterIP 10.100.223.119 <none> 9093/TCP 2m4s
alertmanager-operated ClusterIP None <none> 9093/TCP,9094/TCP,9094/UDP 2m4s
blackbox-exporter ClusterIP 10.100.44.58 <none> 9115/TCP,19115/TCP 2m4s
grafana ClusterIP 10.100.21.246 <none> 3000/TCP 2m2s
kube-state-metrics ClusterIP None <none> 8443/TCP,9443/TCP 2m2s
node-exporter ClusterIP None <none> 9100/TCP 2m1s
prometheus-adapter ClusterIP 10.100.171.117 <none> 443/TCP 2m
prometheus-k8s ClusterIP 10.100.23.90 <none> 9090/TCP 118s
prometheus-operated ClusterIP None <none> 9090/TCP 118s
prometheus-operator ClusterIP None <none> 8443/TCP 2m14s
root@k8s-master1:~/kube-prometheus/manifests# kubectl port-forward --address 0.0.0.0 svc/prometheus-k8s 9090:9090 -n monitoring
Forwarding from 0.0.0.0:9090 -> 9090
AI助手

2.2.3.2 svc暴露promutheus
root@k8s-master1:~/kube-prometheus/manifests# vim prometheus-service.yaml
...
spec:
type: NodePort #添加这行
ports:
- name: web
port: 9090
nodePort: 39090 #添加这行
targetPort: web
...
root@k8s-master1:~/kube-prometheus/manifests# kubectl apply -f prometheus-service.yaml
service/prometheus-k8s configured
AI助手

2.2.3.3 验证grafana
root@k8s-master1:~/kube-prometheus/manifests# kubectl port-forward --address 0.0.0.0 svc/grafana 3000:3000 -n monitoring
Forwarding from 0.0.0.0:3000 -> 3000
AI助手

2.2.3.4 svc暴露grafana
root@k8s-master1:~/kube-prometheus/manifests# vim grafana-service.yaml
...
spec:
type: NodePort #添加这行
ports:
- name: http
port: 3000
nodePort: 33000 #添加这行
targetPort: http
...
root@k8s-master1:~/kube-prometheus/manifests# kubectl apply -f grafana-service.yaml
service/grafana configured
AI助手
Grafana默认登录的账号密码为admin/admin。

提示修改密码,可以修改或者Skip跳过

全部评论 (0)
还没有任何评论哟~
