Difference between revisions of "Kubernetes/Istio/Observability"

From Ever changing code
Jump to navigation Jump to search
Line 94: Line 94:
kubectl apply -f <(cat <<EOF
kubectl apply -f <(cat <<EOF
apiVersion: monitoring.coreos.com/v1
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
kind: PodMonitor
metadata:
metadata:
   name: istio-component-monitor
   name: envoy-stats-monitor
   namespace: prometheus
   namespace: prometheus
   labels:
   labels:
     monitoring: istio-components
     monitoring: istio-proxies
     release: prom
     release: prom
spec:
spec:
  jobLabel: istio
  targetLabels: [app]
   selector:
   selector:
     matchExpressions:
     matchExpressions:
     - {key: istio, operator: In, values: [pilot]}
     - {key: istio-prometheus-ignore, operator: DoesNotExist}
   namespaceSelector:
   namespaceSelector:
     any: true
     any: true
   endpoints:
   jobLabel: envoy-stats
   - port: http-monitoring
  podMetricsEndpoints:
   - path: /stats/prometheus
     interval: 15s
     interval: 15s
    relabelings:
    - action: keep
      sourceLabels: [__meta_kubernetes_pod_container_name]
      regex: "istio-proxy"
    - action: keep
      sourceLabels: [__meta_kubernetes_pod_annotationpresent_prometheus_io_scrape]
    - sourceLabels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
      action: replace
      regex: ([^:]+)(?::\d+)?;(\d+)
      replacement: $1:$2
      targetLabel: __address__
    - action: labeldrop
      regex: "__meta_kubernetes_pod_label_(.+)"
    - sourceLabels: [__meta_kubernetes_namespace]
      action: replace
      targetLabel: namespace
    - sourceLabels: [__meta_kubernetes_pod_name]
      action: replace
      targetLabel: pod_name
EOF
EOF
) --dry-run=server
) --dry-run=server
</syntaxhighlightjs>
</syntaxhighlightjs>

Revision as of 15:22, 2 April 2021

Prometheus

kubectl create ns prometheus
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
helm install prom prometheus-community/kube-prometheus-stack --version 13.13.1 -n prometheus -f values.yaml

# Dashboards
kubectl -n prometheus port-forward statefulset/prometheus-prom-kube-prometheus-stack-prometheus 9090
kubectl -n prometheus port-forward svc/prom-grafana 3000:80


Access now:


The values.yaml file is default just with following components disabled: <syntaxhighlightjs lang="yaml"> defaultRules: ## Create default rules for monitoring the cluster

 create: false

alertmanager: ## Deploy alertmanager

 enabled: false

kubeApiServer: ## Component scraping the kube api server

 enabled: false

kubelet: ## Component scraping the kubelet and kubelet-hosted cAdvisor

 enabled: false

coreDns: ## Component scraping coreDns. Use either this or kubeDns

 enabled: false

kubeDns: ## Component scraping kubeDns. Use either this or coreDns

 enabled: false

kubeEtcd: ## Component scraping etcd

 enabled: false

kubeScheduler: ## Component scraping kube scheduler

 enabled: false

kubeProxy: ## Component scraping kube proxy

 enabled: false

</syntaxhighlightjs>

Grafana Istio dashboards

Get the dashboards from the Istio source repo.

git clone https://github.com/istio/istio
cd manifests/addons

# Create istio-dashboards configMap
kubectl -n prometheus create cm istio-dashboards \
--from-file=pilot-dashboard.json=dashboards/pilot-dashboard.json \
--from-file=istio-workload-dashboard.json=dashboards/istio-workload-dashboard.json \
--from-file=istio-service-dashboard.json=dashboards/istio-service-dashboard.json \
--from-file=istio-performance-dashboard.json=dashboards/istio-performance-dashboard.json \
--from-file=istio-mesh-dashboard.json=dashboards/istio-mesh-dashboard.json \
--from-file=istio-extension-dashboard.json=dashboards/istio-extension-dashboard.json

# Label this 'istio-dashboards' configmap for Grafana to pick it up
kubectl label -n prometheus cm istio-dashboards grafana_dashboard=1

New set of dashboards should appear in UI. These will be empty as we haven's set any metrics to be scraped.

Setup Prometheus to scrape metrics

We will use the Prometheus Operator CRs ServiceMonitor and PodMonitor. These Custom Resources are described in good detail in the design doc on the Prometheus Operator repo.

Scrape Istio control-plane <syntaxhighlightjs lang=yaml> kubectl apply -f <(cat <<EOF apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata:

 name: istio-component-monitor
 namespace: prometheus
 labels:
   monitoring: istio-components
   release: prom

spec:

 jobLabel: istio
 targetLabels: [app]
 selector:
   matchExpressions:
   - {key: istio, operator: In, values: [pilot]}
 namespaceSelector:
   any: true
 endpoints:
 - port: http-monitoring
   interval: 15s

EOF ) --dry-run=server </syntaxhighlightjs>


Scrape Istio data-plance <syntaxhighlightjs lang=yaml> kubectl apply -f <(cat <<EOF apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata:

 name: envoy-stats-monitor
 namespace: prometheus
 labels:
   monitoring: istio-proxies
   release: prom

spec:

 selector:
   matchExpressions:
   - {key: istio-prometheus-ignore, operator: DoesNotExist}
 namespaceSelector:
   any: true
 jobLabel: envoy-stats
 podMetricsEndpoints:
 - path: /stats/prometheus
   interval: 15s
   relabelings:
   - action: keep
     sourceLabels: [__meta_kubernetes_pod_container_name]
     regex: "istio-proxy"
   - action: keep
     sourceLabels: [__meta_kubernetes_pod_annotationpresent_prometheus_io_scrape]
   - sourceLabels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
     action: replace
     regex: ([^:]+)(?::\d+)?;(\d+)
     replacement: $1:$2
     targetLabel: __address__
   - action: labeldrop
     regex: "__meta_kubernetes_pod_label_(.+)"
   - sourceLabels: [__meta_kubernetes_namespace]
     action: replace
     targetLabel: namespace
   - sourceLabels: [__meta_kubernetes_pod_name]
     action: replace
     targetLabel: pod_name

EOF ) --dry-run=server </syntaxhighlightjs>