Compare commits

..

No commits in common. "main" and "v0.2.1" have entirely different histories.
main ... v0.2.1

9 changed files with 15 additions and 249 deletions

View File

@ -63,11 +63,6 @@ jobs:
uses: docker/metadata-action@v4 uses: docker/metadata-action@v4
with: with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=semver,pattern={{version}}
# This ensures that for a git tag like "v0.1.0",
# an image tag "0.1.0" is generated.
# It will also generate "latest" for the most recent semver tag.
- name: Build and push Docker image - name: Build and push Docker image
uses: docker/build-push-action@v4 uses: docker/build-push-action@v4

4
.gitignore vendored
View File

@ -37,7 +37,3 @@ Thumbs.db
# Helm # Helm
!charts/iperf3-monitor/.helmignore !charts/iperf3-monitor/.helmignore
charts/iperf3-monitor/charts/ charts/iperf3-monitor/charts/
# Rendered Kubernetes manifests (for local testing)
rendered-manifests.yaml
rendered-manifests-updated.yaml

View File

@ -1,194 +0,0 @@
{
"__inputs": [],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "8.0.0"
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"links": [],
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"gridPos": {
"h": 9,
"w": 24,
"x": 0,
"y": 0
},
"id": 2,
"targets": [
{
"expr": "avg(iperf_network_bandwidth_mbps) by (source_node, destination_node)",
"format": "heatmap",
"legendFormat": "{{source_node}} -> {{destination_node}}",
"refId": "A"
}
],
"cards": { "cardPadding": null, "cardRound": null },
"color": {
"mode": "spectrum",
"scheme": "red-yellow-green",
"exponent": 0.5,
"reverse": false
},
"dataFormat": "tsbuckets",
"yAxis": { "show": true, "format": "short" },
"xAxis": { "show": true }
},
{
"title": "Bandwidth Over Time (Source: $source_node, Dest: $destination_node)",
"type": "timeseries",
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 9
},
"targets": [
{
"expr": "iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\", destination_node=~\"^$destination_node$\", protocol=~\"^$protocol$\"}",
"legendFormat": "Bandwidth",
"refId": "A"
}
],
"fieldConfig": {
"defaults": {
"unit": "mbps"
}
}
},
{
"title": "Jitter Over Time (Source: $source_node, Dest: $destination_node)",
"type": "timeseries",
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 9
},
"targets": [
{
"expr": "iperf_network_jitter_ms{source_node=~\"^$source_node$\", destination_node=~\"^$destination_node$\", protocol=\"udp\"}",
"legendFormat": "Jitter",
"refId": "A"
}
],
"fieldConfig": {
"defaults": {
"unit": "ms"
}
}
}
],
"refresh": "30s",
"schemaVersion": 36,
"style": "dark",
"tags": ["iperf3", "network", "kubernetes"],
"templating": {
"list": [
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"definition": "label_values(iperf_network_bandwidth_mbps, source_node)",
"hide": 0,
"includeAll": false,
"multi": false,
"name": "source_node",
"options": [],
"query": "label_values(iperf_network_bandwidth_mbps, source_node)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
},
{
"current": {},
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"definition": "label_values(iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\"}, destination_node)",
"hide": 0,
"includeAll": false,
"multi": false,
"name": "destination_node",
"options": [],
"query": "label_values(iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\"}, destination_node)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
},
{
"current": { "selected": true, "text": "tcp", "value": "tcp" },
"hide": 0,
"includeAll": false,
"multi": false,
"name": "protocol",
"options": [
{ "selected": true, "text": "tcp", "value": "tcp" },
{ "selected": false, "text": "udp", "value": "udp" }
],
"query": "tcp,udp",
"skipUrlSync": false,
"type": "custom"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "Kubernetes iperf3 Network Performance",
"uid": "k8s-iperf3-dashboard",
"version": 1,
"weekStart": ""
}

View File

@ -72,23 +72,12 @@ Proceed with modifications only if the exporter controller is defined.
{{- /* {{- /*
Ensure the container image tag is set, defaulting to Chart.AppVersion if empty, Ensure the container image tag is set, defaulting to Chart.AppVersion if empty,
as the common library validation requires it during 'helm template'. as the common library validation requires it during 'helm template'.
NOTE: BJW-S common library typically handles defaulting image.tag to Chart.appVersion
if image.tag is empty or null in values. The custom logic below prepending "v"
is specific to this chart and might be redundant if the common library's default
is preferred. For now, we keep it as it was the reason for previous errors if tag was not set.
However, if common library handles it, this block could be removed and image.tag in values.yaml set to "" or null.
Forcing the tag to be set (even if to chart.appVersion) ensures the common library doesn't complain.
The issue encountered during `helm template` earlier (empty output) was resolved by
explicitly setting the tag (e.g. via --set or by ensuring values.yaml has it).
The common library's internal validation likely needs *a* tag to be present in the values passed to it,
even if that tag is derived from AppVersion. This block ensures that.
*/}} */}}
{{- $exporterContainerCfg := get $exporterControllerConfig.containers "exporter" -}} {{- $exporterContainerCfg := get $exporterControllerConfig.containers "exporter" -}}
{{- if $exporterContainerCfg -}} {{- if $exporterContainerCfg -}}
{{- if not $exporterContainerCfg.image.tag -}} {{- if not $exporterContainerCfg.image.tag -}}
{{- if $chart.AppVersion -}} {{- if $chart.AppVersion -}}
{{- $_ := set $exporterContainerCfg.image "tag" (printf "%s" $chart.AppVersion) -}} # Removed "v" prefix {{- $_ := set $exporterContainerCfg.image "tag" $chart.AppVersion -}}
{{- else -}} {{- else -}}
{{- fail (printf "Error: Container image tag is not specified for controller '%s', container '%s', and Chart.AppVersion is also empty." $exporterControllerKey "exporter") -}} {{- fail (printf "Error: Container image tag is not specified for controller '%s', container '%s', and Chart.AppVersion is also empty." $exporterControllerKey "exporter") -}}
{{- end -}} {{- end -}}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-grafana-dashboard
labels:
grafana_dashboard: "1"
app.kubernetes.io/name: {{ include "iperf3-monitor.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
iperf3-dashboard.json: |
{{ .Files.Get "grafana/iperf3-dashboard.json" | nindent 4 }}

View File

@ -7,10 +7,9 @@ metadata:
{{- include "iperf3-monitor.labels" . | nindent 4 }} {{- include "iperf3-monitor.labels" . | nindent 4 }}
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: ClusterRole
metadata: metadata:
name: {{ include "iperf3-monitor.fullname" . }}-role name: {{ include "iperf3-monitor.fullname" . }}-role
namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "iperf3-monitor.labels" . | nindent 4 }} {{- include "iperf3-monitor.labels" . | nindent 4 }}
rules: rules:
@ -19,10 +18,9 @@ rules:
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: {{ include "iperf3-monitor.fullname" . }}-rb name: {{ include "iperf3-monitor.fullname" . }}-rb
namespace: {{ .Release.Namespace }}
labels: labels:
{{- include "iperf3-monitor.labels" . | nindent 4 }} {{- include "iperf3-monitor.labels" . | nindent 4 }}
subjects: subjects:
@ -30,7 +28,7 @@ subjects:
name: {{ include "iperf3-monitor.serviceAccountName" . }} name: {{ include "iperf3-monitor.serviceAccountName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
roleRef: roleRef:
kind: Role # Changed from ClusterRole kind: ClusterRole
name: {{ include "iperf3-monitor.fullname" . }}-role name: {{ include "iperf3-monitor.fullname" . }}-role
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
{{- end -}} {{- end -}}

View File

@ -11,7 +11,7 @@ spec:
{{- include "iperf3-monitor.selectorLabels" . | nindent 4 }} {{- include "iperf3-monitor.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: exporter app.kubernetes.io/component: exporter
ports: ports:
- name: metrics # Assuming 'metrics' is the intended name, aligns with values structure - name: metrics
port: {{ .Values.service.main.ports.metrics.port }} port: {{ .Values.service.port }}
targetPort: {{ .Values.service.main.ports.metrics.targetPort }} targetPort: {{ .Values.service.targetPort }}
protocol: {{ .Values.service.main.ports.metrics.protocol | default "TCP" }} protocol: TCP

View File

@ -45,8 +45,7 @@ controllers:
# -- Annotations for the exporter pod. # -- Annotations for the exporter pod.
annotations: {} annotations: {}
# -- Labels for the exporter pod. # -- Labels for the exporter pod.
labels: labels: {} # The common library will add its own default labels.
app.kubernetes.io/component: exporter # Ensure pods get the component label for service selection
# -- Node selector for scheduling exporter pods. # -- Node selector for scheduling exporter pods.
nodeSelector: {} nodeSelector: {}
# -- Tolerations for scheduling exporter pods. # -- Tolerations for scheduling exporter pods.
@ -87,15 +86,13 @@ controllers:
# key: mykey # key: mykey
# -- Ports for the exporter container. # -- Ports for the exporter container.
# Expected by Kubernetes and bjw-s common library as a list of objects.
ports: ports:
- name: metrics # Name of the port, referenced by Service's targetPort metrics: # Name of the port, will be used in Service definition
# -- Port number for the metrics endpoint on the container. # -- Port number for the metrics endpoint on the container.
containerPort: 9876 port: 9876 # Default, should match service.targetPort
# -- Protocol for the metrics port. # -- Protocol for the metrics port.
protocol: TCP protocol: TCP # Common library defaults to TCP if not specified.
# -- Whether this port definition is enabled. Specific to bjw-s common library. enabled: true # This port is enabled
enabled: true
# -- CPU and memory resource requests and limits for the exporter container. # -- CPU and memory resource requests and limits for the exporter container.
resources: resources:

View File

@ -92,18 +92,16 @@ def discover_iperf_servers():
logging.info(f"Discovering iperf3 servers with label '{label_selector}' in namespace '{namespace}'") logging.info(f"Discovering iperf3 servers with label '{label_selector}' in namespace '{namespace}'")
# Use list_namespaced_pod to query only the specified namespace ret = v1.list_pod_for_all_namespaces(label_selector=label_selector, watch=False)
ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector, watch=False)
servers = [] servers = []
for item in ret.items: for item in ret.items:
# No need to filter by namespace here as the API call is already namespaced
if item.status.pod_ip and item.status.phase == 'Running': if item.status.pod_ip and item.status.phase == 'Running':
servers.append({ servers.append({
'ip': item.status.pod_ip, 'ip': item.status.pod_ip,
'node_name': item.spec.node_name # Node where the iperf server pod is running 'node_name': item.spec.node_name # Node where the iperf server pod is running
}) })
logging.info(f"Discovered {len(servers)} iperf3 server pods in namespace '{namespace}'.") logging.info(f"Discovered {len(servers)} iperf3 server pods.")
return servers return servers
except config.ConfigException as e: except config.ConfigException as e:
logging.error(f"Kubernetes config error: {e}. Is the exporter running in a cluster with RBAC permissions?") logging.error(f"Kubernetes config error: {e}. Is the exporter running in a cluster with RBAC permissions?")