Compare commits

..

No commits in common. "587290f1fb90d6081e4495b19eb3149544ebfb1a" and "49fb881f24be7abdc78fdb88cd0d486f4796abfd" have entirely different histories.

12 changed files with 161 additions and 778 deletions

View File

@ -19,16 +19,7 @@ jobs:
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: v3.10.0 # Using a specific version, can be updated
- name: Add Helm repositories
run: |
helm repo add bjw-s https://bjw-s-labs.github.io/helm-charts/ --force-update
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts --force-update
helm repo update
- name: Build Helm chart dependencies
run: helm dependency build ./charts/iperf3-monitor
version: v3.10.0
- name: Helm Lint
run: helm lint ./charts/iperf3-monitor

View File

@ -22,15 +22,6 @@ jobs:
with:
version: v3.10.0
- name: Add Helm repositories
run: |
helm repo add bjw-s https://bjw-s-labs.github.io/helm-charts/ --force-update
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts --force-update
helm repo update
- name: Build Helm chart dependencies
run: helm dependency build ./charts/iperf3-monitor
- name: Helm Lint
run: helm lint ./charts/iperf3-monitor
@ -95,15 +86,6 @@ jobs:
sudo wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq &&\
sudo chmod +x /usr/bin/yq
- name: Add Helm repositories
run: |
helm repo add bjw-s https://bjw-s-labs.github.io/helm-charts/ --force-update
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts --force-update
helm repo update
- name: Build Helm chart dependencies
run: helm dependency build ./charts/iperf3-monitor
- name: Set Chart Version from Tag
run: |
VERSION=$(echo "${{ github.ref_name }}" | sed 's/^v//')

120
README.md
View File

@ -74,123 +74,37 @@ nameOverride: ""
# -- Override the fully qualified app name.
fullnameOverride: ""
# Exporter Configuration (`controllers.exporter`)
# The iperf3 exporter is managed under the `controllers.exporter` section,
# leveraging the `bjw-s/common-library` for robust workload management.
controllers:
exporter:
# -- Enable the exporter controller.
enabled: true
# -- Set the controller type for the exporter.
# Valid options are "deployment" or "daemonset".
# Use "daemonset" for N-to-N node monitoring where an exporter runs on each node (or selected nodes).
# Use "deployment" for a centralized exporter (typically with replicaCount: 1).
# @default -- "deployment"
type: deployment
# -- Number of desired exporter pods. Only used if type is "deployment".
# @default -- 1
replicas: 1
exporter:
# -- Configuration for the exporter container image.
image:
# -- The container image repository for the exporter.
repository: ghcr.io/malarinv/iperf3-monitor
# -- The container image tag for the exporter. If not set, the chart's appVersion is used.
tag: ""
# -- The image pull policy for the exporter container.
pullPolicy: IfNotPresent
# -- Number of exporter pod replicas. Typically 1 is sufficient.
replicaCount: 1
# -- Application-specific configuration for the iperf3 exporter.
# These values are used to populate environment variables for the exporter container.
appConfig:
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
testInterval: 300
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL).
logLevel: INFO
# -- Timeout in seconds for a single iperf3 test run.
testTimeout: 10
# -- Protocol to use for testing (tcp or udp).
testProtocol: tcp
# -- iperf3 server port to connect to. Should match the server's listening port.
serverPort: "5201"
# -- Label selector to find iperf3 server pods.
# This is templated. Default: 'app.kubernetes.io/name=<chart-name>,app.kubernetes.io/instance=<release-name>,app.kubernetes.io/component=server'
serverLabelSelector: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
# -- Pod-level configurations for the exporter.
pod:
# -- Annotations for the exporter pod.
annotations: {}
# -- Labels for the exporter pod (the common library adds its own defaults too).
labels: {}
# -- Node selector for scheduling exporter pods. Useful for DaemonSet or specific scheduling with Deployments.
# Example:
# nodeSelector:
# kubernetes.io/os: linux
nodeSelector: {}
# -- Tolerations for scheduling exporter pods.
# Example:
# tolerations:
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Exists"
# effect: "NoSchedule"
tolerations: []
# -- Affinity rules for scheduling exporter pods.
# Example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: "kubernetes.io/arch"
# operator: In
# values:
# - amd64
affinity: {}
# -- Security context for the exporter pod.
# securityContext:
# fsGroup: 65534
# runAsUser: 65534
# runAsGroup: 65534
# runAsNonRoot: true
securityContext: {}
# -- Automount service account token for the pod.
automountServiceAccountToken: true
# -- Container-level configurations for the main exporter container.
containers:
exporter: # Name of the primary container
image:
repository: ghcr.io/malarinv/iperf3-monitor
tag: "" # Defaults to .Chart.AppVersion
pullPolicy: IfNotPresent
# -- Custom environment variables for the exporter container.
# These are merged with the ones generated from appConfig.
# env:
# MY_CUSTOM_VAR: "my_value"
env: {}
# -- Ports for the exporter container.
ports:
metrics: # Name of the port
port: 9876 # Container port for metrics
protocol: TCP
enabled: true
# -- CPU and memory resource requests and limits.
# resources:
# -- CPU and memory resource requests and limits for the exporter pod.
# @default -- A small default is provided if commented out.
resources: {}
# requests:
# cpu: "100m"
# memory: "128Mi"
# limits:
# cpu: "500m"
# memory: "256Mi"
resources: {}
# -- Probes configuration for the exporter container.
# probes:
# liveness:
# enabled: true # Example: enable liveness probe
# spec: # Customize probe spec if needed
# initialDelaySeconds: 30
# periodSeconds: 15
# timeoutSeconds: 5
# failureThreshold: 3
probes:
liveness:
enabled: false
readiness:
enabled: false
startup:
enabled: false
server:
# -- Configuration for the iperf3 server container image (DaemonSet).

View File

@ -1,12 +1,9 @@
dependencies:
- name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts
version: 75.7.0
version: 75.3.6
- name: prometheus-operator
repository: oci://tccr.io/truecharts
version: 11.5.1
- name: common
repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.1.2
digest: sha256:68485b4e158a6a405073e9c59966d251b62971846cdc9871e41fde46f19aabfe
generated: "2025-07-01T20:32:00.061995907Z"
digest: sha256:3000e63445f8ba8df601cb483f4f77d14c5c4662bff2d16ffcf5cf1f7def314b
generated: "2025-06-20T17:25:44.538372209+05:30"

View File

@ -32,6 +32,3 @@ dependencies:
version: ">=8.11.1"
repository: "oci://tccr.io/truecharts"
condition: "dependencies.install, serviceMonitor.enabled, dependencies.useTrueChartsPrometheusOperator"
- name: common
version: "4.1.2"
repository: "https://bjw-s-labs.github.io/helm-charts/"

View File

@ -1,129 +0,0 @@
{{- /*
This template is responsible for rendering the 'exporter' controller (Deployment or DaemonSet)
by calling the bjw-s common library.
The primary values for the exporter are expected under .Values.controllers.exporter.
Modifications to environment variables and service account are handled here before
passing the configuration to the common library.
*/}}
{{- /*
Prepare a local, modifiable copy of the .Values. This allows us to adjust the
exporter controller's configuration (like env vars and SA) specifically for this chart's needs
before the common library processes it.
Convert to map[string]interface{} via toYaml/fromYaml to ensure compatibility with 'dig'.
*/}}
{{- $localValues := .Values | toYaml | fromYaml | deepCopy -}}
{{- $chart := .Chart -}}
{{- $release := .Release -}}
{{- $appName := include "iperf3-monitor.name" . -}}
{{- $fullName := include "iperf3-monitor.fullname" . -}}
{{- /*
Define the key for the exporter controller, typically "exporter" as per our values.yaml.
*/}}
{{- $exporterControllerKey := "exporter" -}}
{{- /*
Attempt to get the exporter controller's configuration block.
Proceed with modifications only if the exporter controller is defined.
*/}}
{{- $exporterControllerConfig := get $localValues.controllers $exporterControllerKey -}}
{{- if $exporterControllerConfig -}}
{{- /*
Construct the base set of environment variables required by the iperf3-exporter application.
These are derived from the 'appConfig' section of the exporter's controller configuration.
*/}}
{{- $baseExporterEnv := dict -}}
{{- if $exporterControllerConfig.appConfig -}}
{{- $_ := set $baseExporterEnv "SOURCE_NODE_NAME" (dict "valueFrom" (dict "fieldRef" (dict "fieldPath" "spec.nodeName"))) -}}
{{- $_ := set $baseExporterEnv "IPERF_TEST_INTERVAL" ($exporterControllerConfig.appConfig.testInterval | default "300" | toString) -}}
{{- $_ := set $baseExporterEnv "IPERF_TEST_PROTOCOL" ($exporterControllerConfig.appConfig.testProtocol | default "tcp") -}}
{{- $_ := set $baseExporterEnv "LOG_LEVEL" ($exporterControllerConfig.appConfig.logLevel | default "INFO") -}}
{{- $_ := set $baseExporterEnv "IPERF_SERVER_PORT" ($exporterControllerConfig.appConfig.serverPort | default "5201" | toString) -}}
{{- $_ := set $baseExporterEnv "IPERF_SERVER_NAMESPACE" (dict "valueFrom" (dict "fieldRef" (dict "fieldPath" "metadata.namespace"))) -}}
{{- $_ := set $baseExporterEnv "IPERF_TEST_TIMEOUT" ($exporterControllerConfig.appConfig.testTimeout | default "10" | toString) -}}
{{- $serverLabelSelectorDefault := printf "app.kubernetes.io/name=%s,app.kubernetes.io/instance=%s,app.kubernetes.io/component=server" $appName $release.Name -}}
{{- $serverLabelSelector := tpl ($exporterControllerConfig.appConfig.serverLabelSelector | default $serverLabelSelectorDefault) . -}}
{{- $_ := set $baseExporterEnv "IPERF_SERVER_LABEL_SELECTOR" $serverLabelSelector -}}
{{- end -}}
{{- /*
Merge the base environment variables with any user-defined environment variables.
User-defined variables (from .Values.controllers.exporter.containers.exporter.env)
will take precedence in case of conflicting keys.
*/}}
{{- $userExporterEnv := $exporterControllerConfig.containers.exporter.env | default dict -}}
{{- $finalExporterEnv := mergeOverwrite $baseExporterEnv $userExporterEnv -}}
{{- /*
Ensure the container structure exists and update its 'env' field with the final set.
The common library expects this under controllers.<key>.containers.<containerName>.env
*/}}
{{- if not $exporterControllerConfig.containers -}}
{{- $_ := set $exporterControllerConfig "containers" dict -}}
{{- end -}}
{{- if not $exporterControllerConfig.containers.exporter -}}
{{- $_ := set $exporterControllerConfig.containers "exporter" dict -}}
{{- end -}}
{{- $_ := set $exporterControllerConfig.containers.exporter "env" $finalExporterEnv -}}
{{- /*
Ensure the container image tag is set, defaulting to Chart.AppVersion if empty,
as the common library validation requires it during 'helm template'.
*/}}
{{- $exporterContainerCfg := get $exporterControllerConfig.containers "exporter" -}}
{{- if $exporterContainerCfg -}}
{{- if not $exporterContainerCfg.image.tag -}}
{{- if $chart.AppVersion -}}
{{- $_ := set $exporterContainerCfg.image "tag" (printf "v%s" $chart.AppVersion) -}}
{{- else -}}
{{- fail (printf "Error: Container image tag is not specified for controller '%s', container '%s', and Chart.AppVersion is also empty." $exporterControllerKey "exporter") -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /*
Configure the Service Account for the exporter controller.
This ensures the controller pod uses the ServiceAccount that is intended by this chart's
RBAC configuration (.Values.rbac.create and .Values.serviceAccount.name).
*/}}
{{- $serviceAccountNameFromValues := $localValues.serviceAccount.name | default (printf "%s-exporter" $fullName) -}}
{{- if not $exporterControllerConfig.serviceAccount -}}
{{- $_ := set $exporterControllerConfig "serviceAccount" dict -}}
{{- end -}}
{{- $_ := set $exporterControllerConfig.serviceAccount "name" $serviceAccountNameFromValues -}}
{{- $_ := set $exporterControllerConfig.serviceAccount "create" $localValues.rbac.create -}}
{{- $_ := set $exporterControllerConfig.serviceAccount "automountServiceAccountToken" ($exporterControllerConfig.pod.automountServiceAccountToken | default true) -}}
{{- /*
Replace the original exporter controller config in our $localValues copy
with the modified version (that now includes the correct env and SA settings).
*/}}
{{- $_ := set $localValues.controllers $exporterControllerKey $exporterControllerConfig -}}
{{- end -}}
{{- /*
Ensure .Values.global exists and is a map, as the common library expects it.
*/}}
{{- if not (get $localValues "global") -}}
{{- $_ := set $localValues "global" dict -}}
{{- else if not (kindIs "map" (get $localValues "global")) -}}
{{- $_ := set $localValues "global" dict -}}
{{- end -}}
{{- /*
Ensure defaultPodOptionsStrategy exists, as common lib expects it at the root of Values.
*/}}
{{- if not (get $localValues "defaultPodOptionsStrategy") -}}
{{- $_ := set $localValues "defaultPodOptionsStrategy" "overwrite" -}}
{{- end -}}
{{- /*
Call the common library's main render function for controllers.
This function iterates through all controllers defined under $localValues.controllers
(in our case, just "exporter") and renders them using their specified type and configuration.
The context passed must mirror the global Helm context, including 'Values', 'Chart', 'Release', 'Capabilities', and 'Template'.
*/}}
{{- include "bjw-s.common.render.controllers" (dict "Values" $localValues "Chart" $chart "Release" $release "Capabilities" .Capabilities "Template" .Template) | nindent 0 -}}

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "iperf3-monitor.fullname" . }}-exporter
labels:
{{- include "iperf3-monitor.labels" . | nindent 4 }}
app.kubernetes.io/component: exporter
spec:
replicas: {{ .Values.exporter.replicaCount }}
selector:
matchLabels:
{{- include "iperf3-monitor.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: exporter
template:
metadata:
labels:
{{- include "iperf3-monitor.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: exporter
spec:
serviceAccountName: {{ include "iperf3-monitor.serviceAccountName" . }}
containers:
- name: iperf3-exporter
image: "{{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.exporter.image.pullPolicy }}
ports:
- containerPort: {{ .Values.service.targetPort }}
name: metrics
env:
- name: SOURCE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: IPERF_TEST_INTERVAL
value: "{{ .Values.exporter.testInterval }}"
- name: IPERF_TEST_PROTOCOL
value: "{{ .Values.exporter.testProtocol }}"
- name: LOG_LEVEL
value: "{{ .Values.exporter.logLevel }}"
- name: IPERF_SERVER_PORT
value: "5201" # Hardcoded as per server DaemonSet
- name: IPERF_SERVER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: IPERF_SERVER_LABEL_SELECTOR
value: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
{{- with .Values.exporter.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{- end }}

View File

@ -7,10 +7,9 @@ metadata:
{{- include "iperf3-monitor.labels" . | nindent 4 }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
kind: ClusterRole
metadata:
name: {{ include "iperf3-monitor.fullname" . }}-role
namespace: {{ .Release.Namespace }}
labels:
{{- include "iperf3-monitor.labels" . | nindent 4 }}
rules:
@ -19,10 +18,9 @@ rules:
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
kind: ClusterRoleBinding
metadata:
name: {{ include "iperf3-monitor.fullname" . }}-rb
namespace: {{ .Release.Namespace }}
labels:
{{- include "iperf3-monitor.labels" . | nindent 4 }}
subjects:
@ -30,7 +28,7 @@ subjects:
name: {{ include "iperf3-monitor.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role # Changed from ClusterRole
kind: ClusterRole
name: {{ include "iperf3-monitor.fullname" . }}-role
apiGroup: rbac.authorization.k8s.io
{{- end -}}

View File

@ -11,7 +11,7 @@ spec:
{{- include "iperf3-monitor.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: exporter
ports:
- name: metrics # Assuming 'metrics' is the intended name, aligns with values structure
port: {{ .Values.service.main.ports.metrics.port }}
targetPort: {{ .Values.service.main.ports.metrics.targetPort }}
protocol: {{ .Values.service.main.ports.metrics.protocol | default "TCP" }}
- name: metrics
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
protocol: TCP

View File

@ -8,95 +8,33 @@ nameOverride: ""
# -- Override the fully qualified app name.
fullnameOverride: ""
controllers:
exporter:
# -- Enable the exporter controller.
enabled: true
# -- Set the controller type for the exporter.
# Valid options are "deployment" or "daemonset".
# @default -- "deployment"
type: deployment
# -- Number of desired exporter pods. Only used if type is "deployment".
# @default -- 1
replicas: 1
# -- Application-specific configuration for the iperf3 exporter.
# These values are used to populate environment variables for the exporter container.
appConfig:
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
testInterval: 300
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL).
logLevel: INFO
# -- Timeout in seconds for a single iperf3 test run.
testTimeout: 10
# -- Protocol to use for testing (tcp or udp).
testProtocol: tcp
# -- iperf3 server port to connect to. Should match the server's listening port.
# @default -- "5201" (hardcoded in the original chart for server daemonset)
serverPort: "5201"
# -- Label selector to find iperf3 server pods.
# This will be templated in the actual deployment.
# Example default (if not overridden by template logic): 'app.kubernetes.io/component=server'
serverLabelSelector: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
# -- Pod-level configurations for the exporter, leveraging bjw-s common library structure.
pod:
# -- Annotations for the exporter pod.
annotations: {}
# -- Labels for the exporter pod.
labels: {} # The common library will add its own default labels.
# -- Node selector for scheduling exporter pods.
nodeSelector: {}
# -- Tolerations for scheduling exporter pods.
tolerations: []
# -- Affinity rules for scheduling exporter pods.
affinity: {}
# -- Security context for the exporter pod.
securityContext: {}
# fsGroup: 65534
# runAsUser: 65534
# runAsGroup: 65534
# runAsNonRoot: true
# -- Automount service account token for the pod.
automountServiceAccountToken: true # Default from common lib
# -- Container-level configurations for the main exporter container.
containers:
exporter: # This is the primary container, name it 'exporter'
exporter:
# -- Configuration for the exporter container image.
image:
# -- The container image repository for the exporter.
repository: ghcr.io/malarinv/iperf3-monitor
# -- The container image tag for the exporter. If not set, the chart's appVersion is used.
tag: "" # Defaults to .Chart.AppVersion via common library
tag: ""
# -- The image pull policy for the exporter container.
pullPolicy: IfNotPresent
# -- Environment variables for the exporter container.
# The actual env map will be constructed in the main chart template
# and passed to the common library. This section is for user overrides
# if they want to directly set other env vars using common lib's env schema.
env: {}
# Example:
# MY_CUSTOM_VAR: "my_value"
# ANOTHER_VAR:
# valueFrom:
# secretKeyRef:
# name: mysecret
# key: mykey
# -- Number of exporter pod replicas. Typically 1 is sufficient.
replicaCount: 1
# -- Ports for the exporter container.
# Expected by Kubernetes and bjw-s common library as a list of objects.
ports:
- name: metrics # Name of the port, referenced by Service's targetPort
# -- Port number for the metrics endpoint on the container.
containerPort: 9876
# -- Protocol for the metrics port.
protocol: TCP
# -- Whether this port definition is enabled. Specific to bjw-s common library.
enabled: true
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
testInterval: 300
# -- CPU and memory resource requests and limits for the exporter container.
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL).
logLevel: INFO
# -- Timeout in seconds for a single iperf3 test run.
testTimeout: 10
# -- Protocol to use for testing (tcp or udp).
testProtocol: tcp
# -- CPU and memory resource requests and limits for the exporter pod.
# @default -- A small default is provided if commented out.
resources:
{}
# requests:
@ -106,16 +44,6 @@ controllers:
# cpu: "500m"
# memory: "256Mi"
# -- Probes configuration for the exporter container.
probes:
liveness:
enabled: false
readiness:
enabled: false
startup:
enabled: false
# Server configuration (iperf3 server daemonset)
server:
# -- Configuration for the iperf3 server container image (DaemonSet).
image:
@ -125,6 +53,8 @@ server:
tag: latest
# -- CPU and memory resource requests and limits for the iperf3 server pods (DaemonSet).
# These should be very low as the server is mostly idle.
# @default -- A small default is provided if commented out.
resources:
{}
# requests:
@ -135,9 +65,13 @@ server:
# memory: "128Mi"
# -- Node selector for scheduling iperf3 server pods.
# Use this to restrict the DaemonSet to a subset of nodes.
# @default -- {} (schedule on all nodes)
nodeSelector: {}
# -- Tolerations for scheduling iperf3 server pods on tainted nodes.
# -- Tolerations for scheduling iperf3 server pods on tainted nodes (e.g., control-plane nodes).
# This is often necessary to include master nodes in the test mesh.
# @default -- Tolerates control-plane and master taints.
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
@ -146,62 +80,60 @@ server:
operator: "Exists"
effect: "NoSchedule"
# RBAC and ServiceAccount settings
# These are for the exporter. The exporter deployment (managed by common library)
# will need to use the ServiceAccount specified here or one created by the library.
rbac:
# -- If true, create ServiceAccount, ClusterRole, and ClusterRoleBinding for the exporter.
# Set to false if you manage RBAC externally.
create: true
serviceAccount:
# -- The name of the ServiceAccount to use/create for the exporter pod.
# If rbac.create is true, this SA is created. The exporter pod must use this SA.
# -- The name of the ServiceAccount to use for the exporter pod.
# Only used if rbac.create is false. If not set, it defaults to the chart's fullname.
name: "iperf3-monitor"
# Service Monitor configuration for Prometheus
serviceMonitor:
# -- If true, create a ServiceMonitor resource.
# -- If true, create a ServiceMonitor resource for integration with Prometheus Operator.
# Requires a running Prometheus Operator in the cluster.
enabled: true
# -- Scrape interval for the ServiceMonitor.
# -- Scrape interval for the ServiceMonitor. How often Prometheus scrapes the exporter metrics.
interval: 60s
# -- Scrape timeout for the ServiceMonitor.
# -- Scrape timeout for the ServiceMonitor. How long Prometheus waits for metrics response.
scrapeTimeout: 30s
# Service configuration for the exporter
# This defines how the exporter is exposed.
# The common library can also manage services, or we can use our own template.
# This structure is compatible with bjw-s common library's service management if we choose to use it.
# -- Configuration for the exporter Service.
service:
main: # A key for the service, 'main' is a common convention.
# -- Enable the exporter service.
enabled: true
# -- Service type.
type: ClusterIP # ClusterIP is typical for internal services scraped by Prometheus.
# -- Ports configuration for the service.
ports:
metrics: # Name of the service port, should align with a container port name.
# -- Port number on which the service is exposed.
# -- Service type. ClusterIP is typically sufficient.
type: ClusterIP
# -- Port on which the exporter service is exposed.
port: 9876
# -- Target port on the exporter pod. Can be a number or name.
# Refers to the 'metrics' port defined in controllers.exporter.containers.exporter.ports.
targetPort: metrics
protocol: TCP
# -- Target port on the exporter pod.
targetPort: 9876
# Network Policy (optional)
# -- Optional configuration for a network policy to allow traffic to the iperf3 server DaemonSet.
# This is often necessary if you are using a network policy controller.
networkPolicy:
# -- If true, create a NetworkPolicy resource.
enabled: false
# -- Source selectors for ingress rules.
# -- Specify source selectors if needed (e.g., pods in a specific namespace).
from: []
# -- Namespace selectors for ingress rules.
# -- Specify namespace selectors if needed.
namespaceSelector: {}
# -- Pod selectors for ingress rules.
# -- Specify pod selectors if needed.
podSelector: {}
# Dependency Configuration (for Prometheus Operator)
# -----------------------------------------------------------------------------
# Dependency Configuration
# -----------------------------------------------------------------------------
dependencies:
# -- Set to false by default. Set to true to install a Prometheus operator dependency (used if serviceMonitor.enabled=true).
# -- If false (default), and serviceMonitor.enabled is true, you must have a compatible Prometheus Operator already running in your cluster.
install: false
# -- If true, use TrueCharts Prometheus Operator instead of kube-prometheus-stack (used if dependencies.install is true).
# -- Set to true to install Prometheus operator dependency if serviceMonitor.enabled is also true.
# -- Set to false to disable the installation of Prometheus operator dependency,
# -- regardless of serviceMonitor.enabled. This is useful if you have Prometheus
# -- Operator installed and managed separately in your cluster.
install: true
# -- Set to true to use the TrueCharts Prometheus Operator instead of kube-prometheus-stack.
# This chart's ServiceMonitor resources require a Prometheus Operator to be functional.
# If serviceMonitor.enabled is true and dependencies.install is true,
# one of these two dependencies will be pulled based on this flag.
useTrueChartsPrometheusOperator: false

View File

@ -92,18 +92,16 @@ def discover_iperf_servers():
logging.info(f"Discovering iperf3 servers with label '{label_selector}' in namespace '{namespace}'")
# Use list_namespaced_pod to query only the specified namespace
ret = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector, watch=False)
ret = v1.list_pod_for_all_namespaces(label_selector=label_selector, watch=False)
servers = []
for item in ret.items:
# No need to filter by namespace here as the API call is already namespaced
if item.status.pod_ip and item.status.phase == 'Running':
servers.append({
'ip': item.status.pod_ip,
'node_name': item.spec.node_name # Node where the iperf server pod is running
})
logging.info(f"Discovered {len(servers)} iperf3 server pods in namespace '{namespace}'.")
logging.info(f"Discovered {len(servers)} iperf3 server pods.")
return servers
except config.ConfigException as e:
logging.error(f"Kubernetes config error: {e}. Is the exporter running in a cluster with RBAC permissions?")

View File

@ -1,347 +0,0 @@
#!/usr/bin/env bash
# Copyright The Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The install script is based off of the MIT-licensed script from glide,
# the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
: ${BINARY_NAME:="helm"}
: ${USE_SUDO:="true"}
: ${DEBUG:="false"}
: ${VERIFY_CHECKSUM:="true"}
: ${VERIFY_SIGNATURES:="false"}
: ${HELM_INSTALL_DIR:="/usr/local/bin"}
: ${GPG_PUBRING:="pubring.kbx"}
HAS_CURL="$(type "curl" &> /dev/null && echo true || echo false)"
HAS_WGET="$(type "wget" &> /dev/null && echo true || echo false)"
HAS_OPENSSL="$(type "openssl" &> /dev/null && echo true || echo false)"
HAS_GPG="$(type "gpg" &> /dev/null && echo true || echo false)"
HAS_GIT="$(type "git" &> /dev/null && echo true || echo false)"
HAS_TAR="$(type "tar" &> /dev/null && echo true || echo false)"
# initArch discovers the architecture for this system.
initArch() {
ARCH=$(uname -m)
case $ARCH in
armv5*) ARCH="armv5";;
armv6*) ARCH="armv6";;
armv7*) ARCH="arm";;
aarch64) ARCH="arm64";;
x86) ARCH="386";;
x86_64) ARCH="amd64";;
i686) ARCH="386";;
i386) ARCH="386";;
esac
}
# initOS discovers the operating system for this system.
initOS() {
OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
case "$OS" in
# Minimalist GNU for Windows
mingw*|cygwin*) OS='windows';;
esac
}
# runs the given command as root (detects if we are root already)
runAsRoot() {
if [ $EUID -ne 0 -a "$USE_SUDO" = "true" ]; then
sudo "${@}"
else
"${@}"
fi
}
# verifySupported checks that the os/arch combination is supported for
# binary builds, as well whether or not necessary tools are present.
verifySupported() {
local supported="darwin-amd64\ndarwin-arm64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nlinux-s390x\nlinux-riscv64\nwindows-amd64\nwindows-arm64"
if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
echo "No prebuilt binary for ${OS}-${ARCH}."
echo "To build from source, go to https://github.com/helm/helm"
exit 1
fi
if [ "${HAS_CURL}" != "true" ] && [ "${HAS_WGET}" != "true" ]; then
echo "Either curl or wget is required"
exit 1
fi
if [ "${VERIFY_CHECKSUM}" == "true" ] && [ "${HAS_OPENSSL}" != "true" ]; then
echo "In order to verify checksum, openssl must first be installed."
echo "Please install openssl or set VERIFY_CHECKSUM=false in your environment."
exit 1
fi
if [ "${VERIFY_SIGNATURES}" == "true" ]; then
if [ "${HAS_GPG}" != "true" ]; then
echo "In order to verify signatures, gpg must first be installed."
echo "Please install gpg or set VERIFY_SIGNATURES=false in your environment."
exit 1
fi
if [ "${OS}" != "linux" ]; then
echo "Signature verification is currently only supported on Linux."
echo "Please set VERIFY_SIGNATURES=false or verify the signatures manually."
exit 1
fi
fi
if [ "${HAS_GIT}" != "true" ]; then
echo "[WARNING] Could not find git. It is required for plugin installation."
fi
if [ "${HAS_TAR}" != "true" ]; then
echo "[ERROR] Could not find tar. It is required to extract the helm binary archive."
exit 1
fi
}
# checkDesiredVersion checks if the desired version is available.
checkDesiredVersion() {
if [ "x$DESIRED_VERSION" == "x" ]; then
# Get tag from release URL
local latest_release_url="https://get.helm.sh/helm-latest-version"
local latest_release_response=""
if [ "${HAS_CURL}" == "true" ]; then
latest_release_response=$( curl -L --silent --show-error --fail "$latest_release_url" 2>&1 || true )
elif [ "${HAS_WGET}" == "true" ]; then
latest_release_response=$( wget "$latest_release_url" -q -O - 2>&1 || true )
fi
TAG=$( echo "$latest_release_response" | grep '^v[0-9]' )
if [ "x$TAG" == "x" ]; then
printf "Could not retrieve the latest release tag information from %s: %s\n" "${latest_release_url}" "${latest_release_response}"
exit 1
fi
else
TAG=$DESIRED_VERSION
fi
}
# checkHelmInstalledVersion checks which version of helm is installed and
# if it needs to be changed.
checkHelmInstalledVersion() {
if [[ -f "${HELM_INSTALL_DIR}/${BINARY_NAME}" ]]; then
local version=$("${HELM_INSTALL_DIR}/${BINARY_NAME}" version --template="{{ .Version }}")
if [[ "$version" == "$TAG" ]]; then
echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
return 0
else
echo "Helm ${TAG} is available. Changing from version ${version}."
return 1
fi
else
return 1
fi
}
# downloadFile downloads the latest binary package and also the checksum
# for that binary.
downloadFile() {
HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST"
CHECKSUM_URL="$DOWNLOAD_URL.sha256"
HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
echo "Downloading $DOWNLOAD_URL"
if [ "${HAS_CURL}" == "true" ]; then
curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
elif [ "${HAS_WGET}" == "true" ]; then
wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
fi
}
# verifyFile verifies the SHA256 checksum of the binary package
# and the GPG signatures for both the package and checksum file
# (depending on settings in environment).
verifyFile() {
if [ "${VERIFY_CHECKSUM}" == "true" ]; then
verifyChecksum
fi
if [ "${VERIFY_SIGNATURES}" == "true" ]; then
verifySignatures
fi
}
# installFile installs the Helm binary.
installFile() {
HELM_TMP="$HELM_TMP_ROOT/$BINARY_NAME"
mkdir -p "$HELM_TMP"
tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/helm"
echo "Preparing to install $BINARY_NAME into ${HELM_INSTALL_DIR}"
runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$BINARY_NAME"
echo "$BINARY_NAME installed into $HELM_INSTALL_DIR/$BINARY_NAME"
}
# verifyChecksum verifies the SHA256 checksum of the binary package.
verifyChecksum() {
printf "Verifying checksum... "
local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
local expected_sum=$(cat ${HELM_SUM_FILE})
if [ "$sum" != "$expected_sum" ]; then
echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting."
exit 1
fi
echo "Done."
}
# verifySignatures obtains the latest KEYS file from GitHub main branch
# as well as the signature .asc files from the specific GitHub release,
# then verifies that the release artifacts were signed by a maintainer's key.
verifySignatures() {
printf "Verifying signatures... "
local keys_filename="KEYS"
local github_keys_url="https://raw.githubusercontent.com/helm/helm/main/${keys_filename}"
if [ "${HAS_CURL}" == "true" ]; then
curl -SsL "${github_keys_url}" -o "${HELM_TMP_ROOT}/${keys_filename}"
elif [ "${HAS_WGET}" == "true" ]; then
wget -q -O "${HELM_TMP_ROOT}/${keys_filename}" "${github_keys_url}"
fi
local gpg_keyring="${HELM_TMP_ROOT}/keyring.gpg"
local gpg_homedir="${HELM_TMP_ROOT}/gnupg"
mkdir -p -m 0700 "${gpg_homedir}"
local gpg_stderr_device="/dev/null"
if [ "${DEBUG}" == "true" ]; then
gpg_stderr_device="/dev/stderr"
fi
gpg --batch --quiet --homedir="${gpg_homedir}" --import "${HELM_TMP_ROOT}/${keys_filename}" 2> "${gpg_stderr_device}"
gpg --batch --no-default-keyring --keyring "${gpg_homedir}/${GPG_PUBRING}" --export > "${gpg_keyring}"
local github_release_url="https://github.com/helm/helm/releases/download/${TAG}"
if [ "${HAS_CURL}" == "true" ]; then
curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
elif [ "${HAS_WGET}" == "true" ]; then
wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
fi
local error_text="If you think this might be a potential security issue,"
error_text="${error_text}\nplease see here: https://github.com/helm/community/blob/master/SECURITY.md"
local num_goodlines_sha=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
if [[ ${num_goodlines_sha} -lt 2 ]]; then
echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256!"
echo -e "${error_text}"
exit 1
fi
local num_goodlines_tar=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
if [[ ${num_goodlines_tar} -lt 2 ]]; then
echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz!"
echo -e "${error_text}"
exit 1
fi
echo "Done."
}
# fail_trap is executed if an error occurs.
fail_trap() {
result=$?
if [ "$result" != "0" ]; then
if [[ -n "$INPUT_ARGUMENTS" ]]; then
echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS"
help
else
echo "Failed to install $BINARY_NAME"
fi
echo -e "\tFor support, go to https://github.com/helm/helm."
fi
cleanup
exit $result
}
# testVersion tests the installed client to make sure it is working.
testVersion() {
set +e
HELM="$(command -v $BINARY_NAME)"
if [ "$?" = "1" ]; then
echo "$BINARY_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
exit 1
fi
set -e
}
# help provides possible cli installation arguments
help () {
echo "Accepted cli arguments are:"
echo -e "\t[--help|-h ] ->> prints this help"
echo -e "\t[--version|-v <desired_version>] . When not defined it fetches the latest release tag from the Helm CDN"
echo -e "\te.g. --version v3.0.0 or -v canary"
echo -e "\t[--no-sudo] ->> install without sudo"
}
# cleanup temporary files to avoid https://github.com/helm/helm/issues/2977
cleanup() {
if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
rm -rf "$HELM_TMP_ROOT"
fi
}
# Execution
#Stop execution on any error
trap "fail_trap" EXIT
set -e
# Set debug if desired
if [ "${DEBUG}" == "true" ]; then
set -x
fi
# Parsing input arguments (if any)
export INPUT_ARGUMENTS="${@}"
set -u
while [[ $# -gt 0 ]]; do
case $1 in
'--version'|-v)
shift
if [[ $# -ne 0 ]]; then
export DESIRED_VERSION="${1}"
if [[ "$1" != "v"* ]]; then
echo "Expected version arg ('${DESIRED_VERSION}') to begin with 'v', fixing..."
export DESIRED_VERSION="v${1}"
fi
else
echo -e "Please provide the desired version. e.g. --version v3.0.0 or -v canary"
exit 0
fi
;;
'--no-sudo')
USE_SUDO="false"
;;
'--help'|-h)
help
exit 0
;;
*) exit 1
;;
esac
shift
done
set +u
initArch
initOS
verifySupported
checkDesiredVersion
if ! checkHelmInstalledVersion; then
downloadFile
verifyFile
installFile
fi
testVersion
cleanup