Compare commits

..

No commits in common. "main" and "v0.2.0" have entirely different histories.
main ... v0.2.0

9 changed files with 15 additions and 267 deletions

View File

@ -22,15 +22,6 @@ jobs:
with:
version: v3.10.0
- name: Add Helm repositories
run: |
helm repo add bjw-s https://bjw-s-labs.github.io/helm-charts/ --force-update
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts --force-update
helm repo update
- name: Build Helm chart dependencies
run: helm dependency build ./charts/iperf3-monitor
- name: Helm Lint
run: helm lint ./charts/iperf3-monitor
@ -63,11 +54,6 @@ jobs:
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=semver,pattern={{version}}
# This ensures that for a git tag like "v0.1.0",
# an image tag "0.1.0" is generated.
# It will also generate "latest" for the most recent semver tag.
- name: Build and push Docker image
uses: docker/build-push-action@v4
@ -100,15 +86,6 @@ jobs:
sudo wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq &&\
sudo chmod +x /usr/bin/yq
- name: Add Helm repositories
run: |
helm repo add bjw-s https://bjw-s-labs.github.io/helm-charts/ --force-update
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts --force-update
helm repo update
- name: Build Helm chart dependencies
run: helm dependency build ./charts/iperf3-monitor
- name: Set Chart Version from Tag
run: |
VERSION=$(echo "${{ github.ref_name }}" | sed 's/^v//')

4
.gitignore vendored
View File

@ -37,7 +37,3 @@ Thumbs.db
# Helm
!charts/iperf3-monitor/.helmignore
charts/iperf3-monitor/charts/
# Rendered Kubernetes manifests (for local testing)
rendered-manifests.yaml
rendered-manifests-updated.yaml

View File

@ -1,194 +0,0 @@
{
"__inputs": [],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "8.0.0"
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"links": [],
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"gridPos": {
"h": 9,
"w": 24,
"x": 0,
"y": 0
},
"id": 2,
"targets": [
{
"expr": "avg(iperf_network_bandwidth_mbps) by (source_node, destination_node)",
"format": "heatmap",
"legendFormat": "{{source_node}} -> {{destination_node}}",
"refId": "A"
}
],
"cards": { "cardPadding": null, "cardRound": null },
"color": {
"mode": "spectrum",
"scheme": "red-yellow-green",
"exponent": 0.5,
"reverse": false
},
"dataFormat": "tsbuckets",
"yAxis": { "show": true, "format": "short" },
"xAxis": { "show": true }
},
{
"title": "Bandwidth Over Time (Source: $source_node, Dest: $destination_node)",
"type": "timeseries",
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 9
},
"targets": [
{
"expr": "iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\", destination_node=~\"^$destination_node$\", protocol=~\"^$protocol$\"}",
"legendFormat": "Bandwidth",
"refId": "A"
}
],
"fieldConfig": {
"defaults": {
"unit": "mbps"
}
}
},
{
"title": "Jitter Over Time (Source: $source_node, Dest: $destination_node)",
"type": "timeseries",
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 9
},
"targets": [
{
"expr": "iperf_network_jitter_ms{source_node=~\"^$source_node$\", destination_node=~\"^$destination_node$\", protocol=\"udp\"}",
"legendFormat": "Jitter",
"refId": "A"
}
],
"fieldConfig": {
"defaults": {
"unit": "ms"
}
}
}
],
"refresh": "30s",
"schemaVersion": 36,
"style": "dark",
"tags": ["iperf3", "network", "kubernetes"],
"templating": {
"list": [
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"definition": "label_values(iperf_network_bandwidth_mbps, source_node)",
"hide": 0,
"includeAll": false,
"multi": false,
"name": "source_node",
"options": [],
"query": "label_values(iperf_network_bandwidth_mbps, source_node)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
},
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"definition": "label_values(iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\"}, destination_node)",
"hide": 0,
"includeAll": false,
"multi": false,
"name": "destination_node",
"options": [],
"query": "label_values(iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\"}, destination_node)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
},
{
"current": { "selected": true, "text": "tcp", "value": "tcp" },
"hide": 0,
"includeAll": false,
"multi": false,
"name": "protocol",
"options": [
{ "selected": true, "text": "tcp", "value": "tcp" },
{ "selected": false, "text": "udp", "value": "udp" }
],
"query": "tcp,udp",
"skipUrlSync": false,
"type": "custom"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "Kubernetes iperf3 Network Performance",
"uid": "k8s-iperf3-dashboard",
"version": 1,
"weekStart": ""
}

View File

@ -72,23 +72,12 @@ Proceed with modifications only if the exporter controller is defined.
{{- /*
Ensure the container image tag is set, defaulting to Chart.AppVersion if empty,
as the common library validation requires it during 'helm template'.
NOTE: BJW-S common library typically handles defaulting image.tag to Chart.appVersion
if image.tag is empty or null in values. The custom logic below prepending "v"
is specific to this chart and might be redundant if the common library's default
is preferred. For now, we keep it as it was the reason for previous errors if tag was not set.
However, if common library handles it, this block could be removed and image.tag in values.yaml set to "" or null.
Forcing the tag to be set (even if to chart.appVersion) ensures the common library doesn't complain.
The issue encountered during `helm template` earlier (empty output) was resolved by
explicitly setting the tag (e.g. via --set or by ensuring values.yaml has it).
The common library's internal validation likely needs *a* tag to be present in the values passed to it,
even if that tag is derived from AppVersion. This block ensures that.
*/}}
{{- $exporterContainerCfg := get $exporterControllerConfig.containers "exporter" -}}
{{- if $exporterContainerCfg -}}
{{- if not $exporterContainerCfg.image.tag -}}
{{- if $chart.AppVersion -}}
{{- $_ := set $exporterContainerCfg.image "tag" (printf "%s" $chart.AppVersion) -}} # Removed "v" prefix
{{- $_ := set $exporterContainerCfg.image "tag" $chart.AppVersion -}}
{{- else -}}
{{- fail (printf "Error: Container image tag is not specified for controller '%s', container '%s', and Chart.AppVersion is also empty." $exporterControllerKey "exporter") -}}
{{- end -}}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-grafana-dashboard
labels:
grafana_dashboard: "1"
app.kubernetes.io/name: {{ include "iperf3-monitor.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
iperf3-dashboard.json: |
{{ .Files.Get "grafana/iperf3-dashboard.json" | nindent 4 }}

View File

@ -7,10 +7,9 @@ metadata:
{{- include "iperf3-monitor.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
kind: ClusterRole
metadata:
name: {{ include "iperf3-monitor.fullname" . }}-role
namespace: {{ .Release.Namespace }}
labels:
{{- include "iperf3-monitor.labels" . | nindent 4 }}
rules:
@ -19,10 +18,9 @@ rules:
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: {{ include "iperf3-monitor.fullname" . }}-rb
namespace: {{ .Release.Namespace }}
labels:
{{- include "iperf3-monitor.labels" . | nindent 4 }}
subjects:
@ -30,7 +28,7 @@ subjects:
name: {{ include "iperf3-monitor.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role # Changed from ClusterRole
kind: ClusterRole
name: {{ include "iperf3-monitor.fullname" . }}-role
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -11,7 +11,7 @@ spec:
{{- include "iperf3-monitor.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: exporter
ports:
- name: metrics # Assuming 'metrics' is the intended name, aligns with values structure
port: {{ .Values.service.main.ports.metrics.port }}
targetPort: {{ .Values.service.main.ports.metrics.targetPort }}
protocol: {{ .Values.service.main.ports.metrics.protocol | default "TCP" }}
- name: metrics
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
protocol: TCP

View File

@ -45,8 +45,7 @@ controllers:
# -- Annotations for the exporter pod.
annotations: {}
# -- Labels for the exporter pod.
labels:
app.kubernetes.io/component: exporter # Ensure pods get the component label for service selection
labels: {} # The common library will add its own default labels.
# -- Node selector for scheduling exporter pods.
nodeSelector: {}
# -- Tolerations for scheduling exporter pods.
@ -87,15 +86,13 @@ controllers:
# key: mykey
# -- Ports for the exporter container.
# Expected by Kubernetes and bjw-s common library as a list of objects.
ports:
- name: metrics # Name of the port, referenced by Service's targetPort
metrics: # Name of the port, will be used in Service definition
# -- Port number for the metrics endpoint on the container.
containerPort: 9876
port: 9876 # Default, should match service.targetPort
# -- Protocol for the metrics port.
protocol: TCP
# -- Whether this port definition is enabled. Specific to bjw-s common library.
enabled: true
protocol: TCP # Common library defaults to TCP if not specified.
enabled: true # This port is enabled
# -- CPU and memory resource requests and limits for the exporter container.
resources:

View File

@ -92,18 +92,16 @@ def discover_iperf_servers():
logging.info(f"Discovering iperf3 servers with label '{label_selector}' in namespace '{namespace}'")
# Use list_namespaced_pod to query only the specified namespace
ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector, watch=False)
ret = v1.list_pod_for_all_namespaces(label_selector=label_selector, watch=False)
servers = []
for item in ret.items:
# No need to filter by namespace here as the API call is already namespaced
if item.status.pod_ip and item.status.phase == 'Running':
servers.append({
'ip': item.status.pod_ip,
'node_name': item.spec.node_name # Node where the iperf server pod is running
})
logging.info(f"Discovered {len(servers)} iperf3 server pods in namespace '{namespace}'.")
logging.info(f"Discovered {len(servers)} iperf3 server pods.")
return servers
except config.ConfigException as e:
logging.error(f"Kubernetes config error: {e}. Is the exporter running in a cluster with RBAC permissions?")