fix: Correct common lib repo URL, rename exporter template (#19)
* fix: Correct common lib repo URL, rename exporter template - Reverted common library repository URL in Chart.yaml to https://bjw-s-labs.github.io/helm-charts/. - Ensured helm dependency commands are run after adding repositories. - Renamed exporter template from exporter-deployment.yaml to exporter-controller.yaml to better reflect its new role with common library. Note: Full helm lint/template validation with dependencies was not possible in the automated environment due to issues with dependency file persistence in the sandbox. * fix: Integrate bjw-s/common library for exporter controller - Corrected bjw-s/common library repository URL in Chart.yaml to the traditional HTTPS URL and ensured dependencies are fetched. - Renamed exporter template to exporter-controller.yaml. - Updated exporter-controller.yaml to correctly use `bjw-s.common.render.controllers` for rendering. - Refined the context passed to the common library to include Values, Chart, Release, and Capabilities, and initialized expected top-level keys (global, defaultPodOptionsStrategy) in the Values. - Ensured image.tag is defaulted to Chart.AppVersion in the template data to pass common library validations. - Helm lint and template commands now pass successfully for both Deployment and DaemonSet configurations of the exporter. * fix: Set dependencies.install to false by default - Changed the default value for `dependencies.install` to `false` in values.yaml. - Updated comments to clarify that users should explicitly enable it if they need the chart to install a Prometheus Operator dependency. * fix: Update CI workflow to add Helm repositories and build dependencies * hotfix: Pass .Template to common lib for tpl context - Updated exporter-controller.yaml to include .Template in the dict passed to the bjw-s.common.render.controllers include. - This is to resolve a 'cannot retrieve Template.Basepath' error encountered with the tpl function in older Helm versions (like v3.10.0 in CI) when the tpl context does not contain the .Template object. --------- Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com>pull/20/head v0.2.0
parent
5fa41a6aad
commit
a0ecc5c11a
|
|
@ -19,7 +19,16 @@ jobs:
|
|||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3
|
||||
with:
|
||||
version: v3.10.0
|
||||
version: v3.10.0 # Using a specific version, can be updated
|
||||
|
||||
- name: Add Helm repositories
|
||||
run: |
|
||||
helm repo add bjw-s https://bjw-s-labs.github.io/helm-charts/ --force-update
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts --force-update
|
||||
helm repo update
|
||||
|
||||
- name: Build Helm chart dependencies
|
||||
run: helm dependency build ./charts/iperf3-monitor
|
||||
|
||||
- name: Helm Lint
|
||||
run: helm lint ./charts/iperf3-monitor
|
||||
|
|
|
|||
142
README.md
142
README.md
|
|
@ -74,37 +74,123 @@ nameOverride: ""
|
|||
# -- Override the fully qualified app name.
|
||||
fullnameOverride: ""
|
||||
|
||||
exporter:
|
||||
# -- Configuration for the exporter container image.
|
||||
image:
|
||||
# -- The container image repository for the exporter.
|
||||
repository: ghcr.io/malarinv/iperf3-monitor
|
||||
# -- The container image tag for the exporter. If not set, the chart's appVersion is used.
|
||||
tag: ""
|
||||
# -- The image pull policy for the exporter container.
|
||||
pullPolicy: IfNotPresent
|
||||
# Exporter Configuration (`controllers.exporter`)
|
||||
# The iperf3 exporter is managed under the `controllers.exporter` section,
|
||||
# leveraging the `bjw-s/common-library` for robust workload management.
|
||||
controllers:
|
||||
exporter:
|
||||
# -- Enable the exporter controller.
|
||||
enabled: true
|
||||
# -- Set the controller type for the exporter.
|
||||
# Valid options are "deployment" or "daemonset".
|
||||
# Use "daemonset" for N-to-N node monitoring where an exporter runs on each node (or selected nodes).
|
||||
# Use "deployment" for a centralized exporter (typically with replicaCount: 1).
|
||||
# @default -- "deployment"
|
||||
type: deployment
|
||||
# -- Number of desired exporter pods. Only used if type is "deployment".
|
||||
# @default -- 1
|
||||
replicas: 1
|
||||
|
||||
# -- Number of exporter pod replicas. Typically 1 is sufficient.
|
||||
replicaCount: 1
|
||||
# -- Application-specific configuration for the iperf3 exporter.
|
||||
# These values are used to populate environment variables for the exporter container.
|
||||
appConfig:
|
||||
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
|
||||
testInterval: 300
|
||||
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL).
|
||||
logLevel: INFO
|
||||
# -- Timeout in seconds for a single iperf3 test run.
|
||||
testTimeout: 10
|
||||
# -- Protocol to use for testing (tcp or udp).
|
||||
testProtocol: tcp
|
||||
# -- iperf3 server port to connect to. Should match the server's listening port.
|
||||
serverPort: "5201"
|
||||
# -- Label selector to find iperf3 server pods.
|
||||
# This is templated. Default: 'app.kubernetes.io/name=<chart-name>,app.kubernetes.io/instance=<release-name>,app.kubernetes.io/component=server'
|
||||
serverLabelSelector: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
|
||||
|
||||
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
|
||||
testInterval: 300
|
||||
# -- Pod-level configurations for the exporter.
|
||||
pod:
|
||||
# -- Annotations for the exporter pod.
|
||||
annotations: {}
|
||||
# -- Labels for the exporter pod (the common library adds its own defaults too).
|
||||
labels: {}
|
||||
# -- Node selector for scheduling exporter pods. Useful for DaemonSet or specific scheduling with Deployments.
|
||||
# Example:
|
||||
# nodeSelector:
|
||||
# kubernetes.io/os: linux
|
||||
nodeSelector: {}
|
||||
# -- Tolerations for scheduling exporter pods.
|
||||
# Example:
|
||||
# tolerations:
|
||||
# - key: "node-role.kubernetes.io/control-plane"
|
||||
# operator: "Exists"
|
||||
# effect: "NoSchedule"
|
||||
tolerations: []
|
||||
# -- Affinity rules for scheduling exporter pods.
|
||||
# Example:
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: "kubernetes.io/arch"
|
||||
# operator: In
|
||||
# values:
|
||||
# - amd64
|
||||
affinity: {}
|
||||
# -- Security context for the exporter pod.
|
||||
# securityContext:
|
||||
# fsGroup: 65534
|
||||
# runAsUser: 65534
|
||||
# runAsGroup: 65534
|
||||
# runAsNonRoot: true
|
||||
securityContext: {}
|
||||
# -- Automount service account token for the pod.
|
||||
automountServiceAccountToken: true
|
||||
|
||||
# -- Timeout in seconds for a single iperf3 test run.
|
||||
testTimeout: 10
|
||||
|
||||
# -- Protocol to use for testing (tcp or udp).
|
||||
testProtocol: tcp
|
||||
|
||||
# -- CPU and memory resource requests and limits for the exporter pod.
|
||||
# @default -- A small default is provided if commented out.
|
||||
resources: {}
|
||||
# requests:
|
||||
# cpu: "100m"
|
||||
# memory: "128Mi"
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "256Mi"
|
||||
# -- Container-level configurations for the main exporter container.
|
||||
containers:
|
||||
exporter: # Name of the primary container
|
||||
image:
|
||||
repository: ghcr.io/malarinv/iperf3-monitor
|
||||
tag: "" # Defaults to .Chart.AppVersion
|
||||
pullPolicy: IfNotPresent
|
||||
# -- Custom environment variables for the exporter container.
|
||||
# These are merged with the ones generated from appConfig.
|
||||
# env:
|
||||
# MY_CUSTOM_VAR: "my_value"
|
||||
env: {}
|
||||
# -- Ports for the exporter container.
|
||||
ports:
|
||||
metrics: # Name of the port
|
||||
port: 9876 # Container port for metrics
|
||||
protocol: TCP
|
||||
enabled: true
|
||||
# -- CPU and memory resource requests and limits.
|
||||
# resources:
|
||||
# requests:
|
||||
# cpu: "100m"
|
||||
# memory: "128Mi"
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "256Mi"
|
||||
resources: {}
|
||||
# -- Probes configuration for the exporter container.
|
||||
# probes:
|
||||
# liveness:
|
||||
# enabled: true # Example: enable liveness probe
|
||||
# spec: # Customize probe spec if needed
|
||||
# initialDelaySeconds: 30
|
||||
# periodSeconds: 15
|
||||
# timeoutSeconds: 5
|
||||
# failureThreshold: 3
|
||||
probes:
|
||||
liveness:
|
||||
enabled: false
|
||||
readiness:
|
||||
enabled: false
|
||||
startup:
|
||||
enabled: false
|
||||
|
||||
server:
|
||||
# -- Configuration for the iperf3 server container image (DaemonSet).
|
||||
|
|
|
|||
|
|
@ -1,9 +1,12 @@
|
|||
dependencies:
|
||||
- name: kube-prometheus-stack
|
||||
repository: https://prometheus-community.github.io/helm-charts
|
||||
version: 75.3.6
|
||||
version: 75.7.0
|
||||
- name: prometheus-operator
|
||||
repository: oci://tccr.io/truecharts
|
||||
version: 11.5.1
|
||||
digest: sha256:3000e63445f8ba8df601cb483f4f77d14c5c4662bff2d16ffcf5cf1f7def314b
|
||||
generated: "2025-06-20T17:25:44.538372209+05:30"
|
||||
- name: common
|
||||
repository: https://bjw-s-labs.github.io/helm-charts/
|
||||
version: 4.1.2
|
||||
digest: sha256:68485b4e158a6a405073e9c59966d251b62971846cdc9871e41fde46f19aabfe
|
||||
generated: "2025-07-01T20:32:00.061995907Z"
|
||||
|
|
|
|||
|
|
@ -32,3 +32,6 @@ dependencies:
|
|||
version: ">=8.11.1"
|
||||
repository: "oci://tccr.io/truecharts"
|
||||
condition: "dependencies.install, serviceMonitor.enabled, dependencies.useTrueChartsPrometheusOperator"
|
||||
- name: common
|
||||
version: "4.1.2"
|
||||
repository: "https://bjw-s-labs.github.io/helm-charts/"
|
||||
|
|
|
|||
|
|
@ -1,194 +0,0 @@
|
|||
{
|
||||
"__inputs": [],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "8.0.0"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "prometheus",
|
||||
"name": "Prometheus",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "avg(iperf_network_bandwidth_mbps) by (source_node, destination_node)",
|
||||
"format": "heatmap",
|
||||
"legendFormat": "{{source_node}} -> {{destination_node}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"cards": { "cardPadding": null, "cardRound": null },
|
||||
"color": {
|
||||
"mode": "spectrum",
|
||||
"scheme": "red-yellow-green",
|
||||
"exponent": 0.5,
|
||||
"reverse": false
|
||||
},
|
||||
"dataFormat": "tsbuckets",
|
||||
"yAxis": { "show": true, "format": "short" },
|
||||
"xAxis": { "show": true }
|
||||
},
|
||||
{
|
||||
"title": "Bandwidth Over Time (Source: $source_node, Dest: $destination_node)",
|
||||
"type": "timeseries",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 9
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\", destination_node=~\"^$destination_node$\", protocol=~\"^$protocol$\"}",
|
||||
"legendFormat": "Bandwidth",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "mbps"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"title": "Jitter Over Time (Source: $source_node, Dest: $destination_node)",
|
||||
"type": "timeseries",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 9
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"expr": "iperf_network_jitter_ms{source_node=~\"^$source_node$\", destination_node=~\"^$destination_node$\", protocol=\"udp\"}",
|
||||
"legendFormat": "Jitter",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": "30s",
|
||||
"schemaVersion": 36,
|
||||
"style": "dark",
|
||||
"tags": ["iperf3", "network", "kubernetes"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"current": {},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"definition": "label_values(iperf_network_bandwidth_mbps, source_node)",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"multi": false,
|
||||
"name": "source_node",
|
||||
"options": [],
|
||||
"query": "label_values(iperf_network_bandwidth_mbps, source_node)",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"type": "query"
|
||||
},
|
||||
{
|
||||
"current": {},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"definition": "label_values(iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\"}, destination_node)",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"multi": false,
|
||||
"name": "destination_node",
|
||||
"options": [],
|
||||
"query": "label_values(iperf_network_bandwidth_mbps{source_node=~\"^$source_node$\"}, destination_node)",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"type": "query"
|
||||
},
|
||||
{
|
||||
"current": { "selected": true, "text": "tcp", "value": "tcp" },
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"multi": false,
|
||||
"name": "protocol",
|
||||
"options": [
|
||||
{ "selected": true, "text": "tcp", "value": "tcp" },
|
||||
{ "selected": false, "text": "udp", "value": "udp" }
|
||||
],
|
||||
"query": "tcp,udp",
|
||||
"skipUrlSync": false,
|
||||
"type": "custom"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "Kubernetes iperf3 Network Performance",
|
||||
"uid": "k8s-iperf3-dashboard",
|
||||
"version": 1,
|
||||
"weekStart": ""
|
||||
}
|
||||
|
|
@ -0,0 +1,129 @@
|
|||
{{- /*
|
||||
This template is responsible for rendering the 'exporter' controller (Deployment or DaemonSet)
|
||||
by calling the bjw-s common library.
|
||||
|
||||
The primary values for the exporter are expected under .Values.controllers.exporter.
|
||||
Modifications to environment variables and service account are handled here before
|
||||
passing the configuration to the common library.
|
||||
*/}}
|
||||
|
||||
{{- /*
|
||||
Prepare a local, modifiable copy of the .Values. This allows us to adjust the
|
||||
exporter controller's configuration (like env vars and SA) specifically for this chart's needs
|
||||
before the common library processes it.
|
||||
Convert to map[string]interface{} via toYaml/fromYaml to ensure compatibility with 'dig'.
|
||||
*/}}
|
||||
{{- $localValues := .Values | toYaml | fromYaml | deepCopy -}}
|
||||
{{- $chart := .Chart -}}
|
||||
{{- $release := .Release -}}
|
||||
{{- $appName := include "iperf3-monitor.name" . -}}
|
||||
{{- $fullName := include "iperf3-monitor.fullname" . -}}
|
||||
|
||||
{{- /*
|
||||
Define the key for the exporter controller, typically "exporter" as per our values.yaml.
|
||||
*/}}
|
||||
{{- $exporterControllerKey := "exporter" -}}
|
||||
|
||||
{{- /*
|
||||
Attempt to get the exporter controller's configuration block.
|
||||
Proceed with modifications only if the exporter controller is defined.
|
||||
*/}}
|
||||
{{- $exporterControllerConfig := get $localValues.controllers $exporterControllerKey -}}
|
||||
{{- if $exporterControllerConfig -}}
|
||||
|
||||
{{- /*
|
||||
Construct the base set of environment variables required by the iperf3-exporter application.
|
||||
These are derived from the 'appConfig' section of the exporter's controller configuration.
|
||||
*/}}
|
||||
{{- $baseExporterEnv := dict -}}
|
||||
{{- if $exporterControllerConfig.appConfig -}}
|
||||
{{- $_ := set $baseExporterEnv "SOURCE_NODE_NAME" (dict "valueFrom" (dict "fieldRef" (dict "fieldPath" "spec.nodeName"))) -}}
|
||||
{{- $_ := set $baseExporterEnv "IPERF_TEST_INTERVAL" ($exporterControllerConfig.appConfig.testInterval | default "300" | toString) -}}
|
||||
{{- $_ := set $baseExporterEnv "IPERF_TEST_PROTOCOL" ($exporterControllerConfig.appConfig.testProtocol | default "tcp") -}}
|
||||
{{- $_ := set $baseExporterEnv "LOG_LEVEL" ($exporterControllerConfig.appConfig.logLevel | default "INFO") -}}
|
||||
{{- $_ := set $baseExporterEnv "IPERF_SERVER_PORT" ($exporterControllerConfig.appConfig.serverPort | default "5201" | toString) -}}
|
||||
{{- $_ := set $baseExporterEnv "IPERF_SERVER_NAMESPACE" (dict "valueFrom" (dict "fieldRef" (dict "fieldPath" "metadata.namespace"))) -}}
|
||||
{{- $_ := set $baseExporterEnv "IPERF_TEST_TIMEOUT" ($exporterControllerConfig.appConfig.testTimeout | default "10" | toString) -}}
|
||||
{{- $serverLabelSelectorDefault := printf "app.kubernetes.io/name=%s,app.kubernetes.io/instance=%s,app.kubernetes.io/component=server" $appName $release.Name -}}
|
||||
{{- $serverLabelSelector := tpl ($exporterControllerConfig.appConfig.serverLabelSelector | default $serverLabelSelectorDefault) . -}}
|
||||
{{- $_ := set $baseExporterEnv "IPERF_SERVER_LABEL_SELECTOR" $serverLabelSelector -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Merge the base environment variables with any user-defined environment variables.
|
||||
User-defined variables (from .Values.controllers.exporter.containers.exporter.env)
|
||||
will take precedence in case of conflicting keys.
|
||||
*/}}
|
||||
{{- $userExporterEnv := $exporterControllerConfig.containers.exporter.env | default dict -}}
|
||||
{{- $finalExporterEnv := mergeOverwrite $baseExporterEnv $userExporterEnv -}}
|
||||
|
||||
{{- /*
|
||||
Ensure the container structure exists and update its 'env' field with the final set.
|
||||
The common library expects this under controllers.<key>.containers.<containerName>.env
|
||||
*/}}
|
||||
{{- if not $exporterControllerConfig.containers -}}
|
||||
{{- $_ := set $exporterControllerConfig "containers" dict -}}
|
||||
{{- end -}}
|
||||
{{- if not $exporterControllerConfig.containers.exporter -}}
|
||||
{{- $_ := set $exporterControllerConfig.containers "exporter" dict -}}
|
||||
{{- end -}}
|
||||
{{- $_ := set $exporterControllerConfig.containers.exporter "env" $finalExporterEnv -}}
|
||||
|
||||
{{- /*
|
||||
Ensure the container image tag is set, defaulting to Chart.AppVersion if empty,
|
||||
as the common library validation requires it during 'helm template'.
|
||||
*/}}
|
||||
{{- $exporterContainerCfg := get $exporterControllerConfig.containers "exporter" -}}
|
||||
{{- if $exporterContainerCfg -}}
|
||||
{{- if not $exporterContainerCfg.image.tag -}}
|
||||
{{- if $chart.AppVersion -}}
|
||||
{{- $_ := set $exporterContainerCfg.image "tag" $chart.AppVersion -}}
|
||||
{{- else -}}
|
||||
{{- fail (printf "Error: Container image tag is not specified for controller '%s', container '%s', and Chart.AppVersion is also empty." $exporterControllerKey "exporter") -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Configure the Service Account for the exporter controller.
|
||||
This ensures the controller pod uses the ServiceAccount that is intended by this chart's
|
||||
RBAC configuration (.Values.rbac.create and .Values.serviceAccount.name).
|
||||
*/}}
|
||||
{{- $serviceAccountNameFromValues := $localValues.serviceAccount.name | default (printf "%s-exporter" $fullName) -}}
|
||||
{{- if not $exporterControllerConfig.serviceAccount -}}
|
||||
{{- $_ := set $exporterControllerConfig "serviceAccount" dict -}}
|
||||
{{- end -}}
|
||||
{{- $_ := set $exporterControllerConfig.serviceAccount "name" $serviceAccountNameFromValues -}}
|
||||
{{- $_ := set $exporterControllerConfig.serviceAccount "create" $localValues.rbac.create -}}
|
||||
{{- $_ := set $exporterControllerConfig.serviceAccount "automountServiceAccountToken" ($exporterControllerConfig.pod.automountServiceAccountToken | default true) -}}
|
||||
|
||||
{{- /*
|
||||
Replace the original exporter controller config in our $localValues copy
|
||||
with the modified version (that now includes the correct env and SA settings).
|
||||
*/}}
|
||||
{{- $_ := set $localValues.controllers $exporterControllerKey $exporterControllerConfig -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Ensure .Values.global exists and is a map, as the common library expects it.
|
||||
*/}}
|
||||
{{- if not (get $localValues "global") -}}
|
||||
{{- $_ := set $localValues "global" dict -}}
|
||||
{{- else if not (kindIs "map" (get $localValues "global")) -}}
|
||||
{{- $_ := set $localValues "global" dict -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Ensure defaultPodOptionsStrategy exists, as common lib expects it at the root of Values.
|
||||
*/}}
|
||||
{{- if not (get $localValues "defaultPodOptionsStrategy") -}}
|
||||
{{- $_ := set $localValues "defaultPodOptionsStrategy" "overwrite" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- /*
|
||||
Call the common library's main render function for controllers.
|
||||
This function iterates through all controllers defined under $localValues.controllers
|
||||
(in our case, just "exporter") and renders them using their specified type and configuration.
|
||||
The context passed must mirror the global Helm context, including 'Values', 'Chart', 'Release', 'Capabilities', and 'Template'.
|
||||
*/}}
|
||||
{{- include "bjw-s.common.render.controllers" (dict "Values" $localValues "Chart" $chart "Release" $release "Capabilities" .Capabilities "Template" .Template) | nindent 0 -}}
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "iperf3-monitor.fullname" . }}-exporter
|
||||
labels:
|
||||
{{- include "iperf3-monitor.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: exporter
|
||||
spec:
|
||||
replicas: {{ .Values.exporter.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "iperf3-monitor.selectorLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/component: exporter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "iperf3-monitor.selectorLabels" . | nindent 8 }}
|
||||
app.kubernetes.io/component: exporter
|
||||
spec:
|
||||
serviceAccountName: {{ include "iperf3-monitor.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: iperf3-exporter
|
||||
image: "{{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.exporter.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.service.targetPort }}
|
||||
name: metrics
|
||||
env:
|
||||
- name: SOURCE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: IPERF_TEST_INTERVAL
|
||||
value: "{{ .Values.exporter.testInterval }}"
|
||||
- name: IPERF_TEST_PROTOCOL
|
||||
value: "{{ .Values.exporter.testProtocol }}"
|
||||
- name: LOG_LEVEL
|
||||
value: "{{ .Values.exporter.logLevel }}"
|
||||
- name: IPERF_SERVER_PORT
|
||||
value: "5201" # Hardcoded as per server DaemonSet
|
||||
- name: IPERF_SERVER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: IPERF_SERVER_LABEL_SELECTOR
|
||||
value: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
|
||||
{{- with .Values.exporter.resources }}
|
||||
resources:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-grafana-dashboard
|
||||
labels:
|
||||
grafana_dashboard: "1"
|
||||
app.kubernetes.io/name: {{ include "iperf3-monitor.name" . }}
|
||||
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
iperf3-dashboard.json: |
|
||||
{{ .Files.Get "grafana/iperf3-dashboard.json" | nindent 4 }}
|
||||
|
|
@ -8,42 +8,112 @@ nameOverride: ""
|
|||
# -- Override the fully qualified app name.
|
||||
fullnameOverride: ""
|
||||
|
||||
exporter:
|
||||
# -- Configuration for the exporter container image.
|
||||
image:
|
||||
# -- The container image repository for the exporter.
|
||||
repository: ghcr.io/malarinv/iperf3-monitor
|
||||
# -- The container image tag for the exporter. If not set, the chart's appVersion is used.
|
||||
tag: ""
|
||||
# -- The image pull policy for the exporter container.
|
||||
pullPolicy: IfNotPresent
|
||||
controllers:
|
||||
exporter:
|
||||
# -- Enable the exporter controller.
|
||||
enabled: true
|
||||
# -- Set the controller type for the exporter.
|
||||
# Valid options are "deployment" or "daemonset".
|
||||
# @default -- "deployment"
|
||||
type: deployment
|
||||
# -- Number of desired exporter pods. Only used if type is "deployment".
|
||||
# @default -- 1
|
||||
replicas: 1
|
||||
|
||||
# -- Number of exporter pod replicas. Typically 1 is sufficient.
|
||||
replicaCount: 1
|
||||
# -- Application-specific configuration for the iperf3 exporter.
|
||||
# These values are used to populate environment variables for the exporter container.
|
||||
appConfig:
|
||||
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
|
||||
testInterval: 300
|
||||
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL).
|
||||
logLevel: INFO
|
||||
# -- Timeout in seconds for a single iperf3 test run.
|
||||
testTimeout: 10
|
||||
# -- Protocol to use for testing (tcp or udp).
|
||||
testProtocol: tcp
|
||||
# -- iperf3 server port to connect to. Should match the server's listening port.
|
||||
# @default -- "5201" (hardcoded in the original chart for server daemonset)
|
||||
serverPort: "5201"
|
||||
# -- Label selector to find iperf3 server pods.
|
||||
# This will be templated in the actual deployment.
|
||||
# Example default (if not overridden by template logic): 'app.kubernetes.io/component=server'
|
||||
serverLabelSelector: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
|
||||
|
||||
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
|
||||
testInterval: 300
|
||||
|
||||
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL).
|
||||
logLevel: INFO
|
||||
# -- Pod-level configurations for the exporter, leveraging bjw-s common library structure.
|
||||
pod:
|
||||
# -- Annotations for the exporter pod.
|
||||
annotations: {}
|
||||
# -- Labels for the exporter pod.
|
||||
labels: {} # The common library will add its own default labels.
|
||||
# -- Node selector for scheduling exporter pods.
|
||||
nodeSelector: {}
|
||||
# -- Tolerations for scheduling exporter pods.
|
||||
tolerations: []
|
||||
# -- Affinity rules for scheduling exporter pods.
|
||||
affinity: {}
|
||||
# -- Security context for the exporter pod.
|
||||
securityContext: {}
|
||||
# fsGroup: 65534
|
||||
# runAsUser: 65534
|
||||
# runAsGroup: 65534
|
||||
# runAsNonRoot: true
|
||||
# -- Automount service account token for the pod.
|
||||
automountServiceAccountToken: true # Default from common lib
|
||||
|
||||
# -- Timeout in seconds for a single iperf3 test run.
|
||||
testTimeout: 10
|
||||
# -- Container-level configurations for the main exporter container.
|
||||
containers:
|
||||
exporter: # This is the primary container, name it 'exporter'
|
||||
image:
|
||||
# -- The container image repository for the exporter.
|
||||
repository: ghcr.io/malarinv/iperf3-monitor
|
||||
# -- The container image tag for the exporter. If not set, the chart's appVersion is used.
|
||||
tag: "" # Defaults to .Chart.AppVersion via common library
|
||||
# -- The image pull policy for the exporter container.
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# -- Protocol to use for testing (tcp or udp).
|
||||
testProtocol: tcp
|
||||
# -- Environment variables for the exporter container.
|
||||
# The actual env map will be constructed in the main chart template
|
||||
# and passed to the common library. This section is for user overrides
|
||||
# if they want to directly set other env vars using common lib's env schema.
|
||||
env: {}
|
||||
# Example:
|
||||
# MY_CUSTOM_VAR: "my_value"
|
||||
# ANOTHER_VAR:
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: mysecret
|
||||
# key: mykey
|
||||
|
||||
# -- CPU and memory resource requests and limits for the exporter pod.
|
||||
# @default -- A small default is provided if commented out.
|
||||
resources:
|
||||
{}
|
||||
# requests:
|
||||
# cpu: "100m"
|
||||
# memory: "128Mi"
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "256Mi"
|
||||
# -- Ports for the exporter container.
|
||||
ports:
|
||||
metrics: # Name of the port, will be used in Service definition
|
||||
# -- Port number for the metrics endpoint on the container.
|
||||
port: 9876 # Default, should match service.targetPort
|
||||
# -- Protocol for the metrics port.
|
||||
protocol: TCP # Common library defaults to TCP if not specified.
|
||||
enabled: true # This port is enabled
|
||||
|
||||
# -- CPU and memory resource requests and limits for the exporter container.
|
||||
resources:
|
||||
{}
|
||||
# requests:
|
||||
# cpu: "100m"
|
||||
# memory: "128Mi"
|
||||
# limits:
|
||||
# cpu: "500m"
|
||||
# memory: "256Mi"
|
||||
|
||||
# -- Probes configuration for the exporter container.
|
||||
probes:
|
||||
liveness:
|
||||
enabled: false
|
||||
readiness:
|
||||
enabled: false
|
||||
startup:
|
||||
enabled: false
|
||||
|
||||
# Server configuration (iperf3 server daemonset)
|
||||
server:
|
||||
# -- Configuration for the iperf3 server container image (DaemonSet).
|
||||
image:
|
||||
|
|
@ -53,8 +123,6 @@ server:
|
|||
tag: latest
|
||||
|
||||
# -- CPU and memory resource requests and limits for the iperf3 server pods (DaemonSet).
|
||||
# These should be very low as the server is mostly idle.
|
||||
# @default -- A small default is provided if commented out.
|
||||
resources:
|
||||
{}
|
||||
# requests:
|
||||
|
|
@ -65,13 +133,9 @@ server:
|
|||
# memory: "128Mi"
|
||||
|
||||
# -- Node selector for scheduling iperf3 server pods.
|
||||
# Use this to restrict the DaemonSet to a subset of nodes.
|
||||
# @default -- {} (schedule on all nodes)
|
||||
nodeSelector: {}
|
||||
|
||||
# -- Tolerations for scheduling iperf3 server pods on tainted nodes (e.g., control-plane nodes).
|
||||
# This is often necessary to include master nodes in the test mesh.
|
||||
# @default -- Tolerates control-plane and master taints.
|
||||
# -- Tolerations for scheduling iperf3 server pods on tainted nodes.
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/control-plane"
|
||||
operator: "Exists"
|
||||
|
|
@ -80,60 +144,62 @@ server:
|
|||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
# RBAC and ServiceAccount settings
|
||||
# These are for the exporter. The exporter deployment (managed by common library)
|
||||
# will need to use the ServiceAccount specified here or one created by the library.
|
||||
rbac:
|
||||
# -- If true, create ServiceAccount, ClusterRole, and ClusterRoleBinding for the exporter.
|
||||
# Set to false if you manage RBAC externally.
|
||||
create: true
|
||||
|
||||
serviceAccount:
|
||||
# -- The name of the ServiceAccount to use for the exporter pod.
|
||||
# Only used if rbac.create is false. If not set, it defaults to the chart's fullname.
|
||||
# -- The name of the ServiceAccount to use/create for the exporter pod.
|
||||
# If rbac.create is true, this SA is created. The exporter pod must use this SA.
|
||||
name: "iperf3-monitor"
|
||||
|
||||
# Service Monitor configuration for Prometheus
|
||||
serviceMonitor:
|
||||
# -- If true, create a ServiceMonitor resource for integration with Prometheus Operator.
|
||||
# Requires a running Prometheus Operator in the cluster.
|
||||
# -- If true, create a ServiceMonitor resource.
|
||||
enabled: true
|
||||
|
||||
# -- Scrape interval for the ServiceMonitor. How often Prometheus scrapes the exporter metrics.
|
||||
# -- Scrape interval for the ServiceMonitor.
|
||||
interval: 60s
|
||||
|
||||
# -- Scrape timeout for the ServiceMonitor. How long Prometheus waits for metrics response.
|
||||
# -- Scrape timeout for the ServiceMonitor.
|
||||
scrapeTimeout: 30s
|
||||
|
||||
# -- Configuration for the exporter Service.
|
||||
# Service configuration for the exporter
|
||||
# This defines how the exporter is exposed.
|
||||
# The common library can also manage services, or we can use our own template.
|
||||
# This structure is compatible with bjw-s common library's service management if we choose to use it.
|
||||
service:
|
||||
# -- Service type. ClusterIP is typically sufficient.
|
||||
type: ClusterIP
|
||||
# -- Port on which the exporter service is exposed.
|
||||
port: 9876
|
||||
# -- Target port on the exporter pod.
|
||||
targetPort: 9876
|
||||
main: # A key for the service, 'main' is a common convention.
|
||||
# -- Enable the exporter service.
|
||||
enabled: true
|
||||
# -- Service type.
|
||||
type: ClusterIP # ClusterIP is typical for internal services scraped by Prometheus.
|
||||
# -- Ports configuration for the service.
|
||||
ports:
|
||||
metrics: # Name of the service port, should align with a container port name.
|
||||
# -- Port number on which the service is exposed.
|
||||
port: 9876
|
||||
# -- Target port on the exporter pod. Can be a number or name.
|
||||
# Refers to the 'metrics' port defined in controllers.exporter.containers.exporter.ports.
|
||||
targetPort: metrics
|
||||
protocol: TCP
|
||||
|
||||
# -- Optional configuration for a network policy to allow traffic to the iperf3 server DaemonSet.
|
||||
# This is often necessary if you are using a network policy controller.
|
||||
# Network Policy (optional)
|
||||
networkPolicy:
|
||||
# -- If true, create a NetworkPolicy resource.
|
||||
enabled: false
|
||||
# -- Specify source selectors if needed (e.g., pods in a specific namespace).
|
||||
# -- Source selectors for ingress rules.
|
||||
from: []
|
||||
# -- Specify namespace selectors if needed.
|
||||
# -- Namespace selectors for ingress rules.
|
||||
namespaceSelector: {}
|
||||
# -- Specify pod selectors if needed.
|
||||
# -- Pod selectors for ingress rules.
|
||||
podSelector: {}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Dependency Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
# Dependency Configuration (for Prometheus Operator)
|
||||
dependencies:
|
||||
# -- Set to true to install Prometheus operator dependency if serviceMonitor.enabled is also true.
|
||||
# -- Set to false to disable the installation of Prometheus operator dependency,
|
||||
# -- regardless of serviceMonitor.enabled. This is useful if you have Prometheus
|
||||
# -- Operator installed and managed separately in your cluster.
|
||||
install: true
|
||||
|
||||
# -- Set to true to use the TrueCharts Prometheus Operator instead of kube-prometheus-stack.
|
||||
# This chart's ServiceMonitor resources require a Prometheus Operator to be functional.
|
||||
# If serviceMonitor.enabled is true and dependencies.install is true,
|
||||
# one of these two dependencies will be pulled based on this flag.
|
||||
# -- Set to false by default. Set to true to install a Prometheus operator dependency (used if serviceMonitor.enabled=true).
|
||||
# -- If false (default), and serviceMonitor.enabled is true, you must have a compatible Prometheus Operator already running in your cluster.
|
||||
install: false
|
||||
# -- If true, use TrueCharts Prometheus Operator instead of kube-prometheus-stack (used if dependencies.install is true).
|
||||
useTrueChartsPrometheusOperator: false
|
||||
|
|
|
|||
|
|
@ -0,0 +1,347 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright The Helm Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The install script is based off of the MIT-licensed script from glide,
|
||||
# the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
|
||||
|
||||
: ${BINARY_NAME:="helm"}
|
||||
: ${USE_SUDO:="true"}
|
||||
: ${DEBUG:="false"}
|
||||
: ${VERIFY_CHECKSUM:="true"}
|
||||
: ${VERIFY_SIGNATURES:="false"}
|
||||
: ${HELM_INSTALL_DIR:="/usr/local/bin"}
|
||||
: ${GPG_PUBRING:="pubring.kbx"}
|
||||
|
||||
HAS_CURL="$(type "curl" &> /dev/null && echo true || echo false)"
|
||||
HAS_WGET="$(type "wget" &> /dev/null && echo true || echo false)"
|
||||
HAS_OPENSSL="$(type "openssl" &> /dev/null && echo true || echo false)"
|
||||
HAS_GPG="$(type "gpg" &> /dev/null && echo true || echo false)"
|
||||
HAS_GIT="$(type "git" &> /dev/null && echo true || echo false)"
|
||||
HAS_TAR="$(type "tar" &> /dev/null && echo true || echo false)"
|
||||
|
||||
# initArch discovers the architecture for this system.
|
||||
initArch() {
|
||||
ARCH=$(uname -m)
|
||||
case $ARCH in
|
||||
armv5*) ARCH="armv5";;
|
||||
armv6*) ARCH="armv6";;
|
||||
armv7*) ARCH="arm";;
|
||||
aarch64) ARCH="arm64";;
|
||||
x86) ARCH="386";;
|
||||
x86_64) ARCH="amd64";;
|
||||
i686) ARCH="386";;
|
||||
i386) ARCH="386";;
|
||||
esac
|
||||
}
|
||||
|
||||
# initOS discovers the operating system for this system.
|
||||
initOS() {
|
||||
OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$OS" in
|
||||
# Minimalist GNU for Windows
|
||||
mingw*|cygwin*) OS='windows';;
|
||||
esac
|
||||
}
|
||||
|
||||
# runs the given command as root (detects if we are root already)
|
||||
runAsRoot() {
|
||||
if [ $EUID -ne 0 -a "$USE_SUDO" = "true" ]; then
|
||||
sudo "${@}"
|
||||
else
|
||||
"${@}"
|
||||
fi
|
||||
}
|
||||
|
||||
# verifySupported checks that the os/arch combination is supported for
|
||||
# binary builds, as well whether or not necessary tools are present.
|
||||
verifySupported() {
|
||||
local supported="darwin-amd64\ndarwin-arm64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nlinux-s390x\nlinux-riscv64\nwindows-amd64\nwindows-arm64"
|
||||
if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
|
||||
echo "No prebuilt binary for ${OS}-${ARCH}."
|
||||
echo "To build from source, go to https://github.com/helm/helm"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${HAS_CURL}" != "true" ] && [ "${HAS_WGET}" != "true" ]; then
|
||||
echo "Either curl or wget is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${VERIFY_CHECKSUM}" == "true" ] && [ "${HAS_OPENSSL}" != "true" ]; then
|
||||
echo "In order to verify checksum, openssl must first be installed."
|
||||
echo "Please install openssl or set VERIFY_CHECKSUM=false in your environment."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${VERIFY_SIGNATURES}" == "true" ]; then
|
||||
if [ "${HAS_GPG}" != "true" ]; then
|
||||
echo "In order to verify signatures, gpg must first be installed."
|
||||
echo "Please install gpg or set VERIFY_SIGNATURES=false in your environment."
|
||||
exit 1
|
||||
fi
|
||||
if [ "${OS}" != "linux" ]; then
|
||||
echo "Signature verification is currently only supported on Linux."
|
||||
echo "Please set VERIFY_SIGNATURES=false or verify the signatures manually."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${HAS_GIT}" != "true" ]; then
|
||||
echo "[WARNING] Could not find git. It is required for plugin installation."
|
||||
fi
|
||||
|
||||
if [ "${HAS_TAR}" != "true" ]; then
|
||||
echo "[ERROR] Could not find tar. It is required to extract the helm binary archive."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# checkDesiredVersion checks if the desired version is available.
|
||||
checkDesiredVersion() {
|
||||
if [ "x$DESIRED_VERSION" == "x" ]; then
|
||||
# Get tag from release URL
|
||||
local latest_release_url="https://get.helm.sh/helm-latest-version"
|
||||
local latest_release_response=""
|
||||
if [ "${HAS_CURL}" == "true" ]; then
|
||||
latest_release_response=$( curl -L --silent --show-error --fail "$latest_release_url" 2>&1 || true )
|
||||
elif [ "${HAS_WGET}" == "true" ]; then
|
||||
latest_release_response=$( wget "$latest_release_url" -q -O - 2>&1 || true )
|
||||
fi
|
||||
TAG=$( echo "$latest_release_response" | grep '^v[0-9]' )
|
||||
if [ "x$TAG" == "x" ]; then
|
||||
printf "Could not retrieve the latest release tag information from %s: %s\n" "${latest_release_url}" "${latest_release_response}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
TAG=$DESIRED_VERSION
|
||||
fi
|
||||
}
|
||||
|
||||
# checkHelmInstalledVersion checks which version of helm is installed and
|
||||
# if it needs to be changed.
|
||||
checkHelmInstalledVersion() {
|
||||
if [[ -f "${HELM_INSTALL_DIR}/${BINARY_NAME}" ]]; then
|
||||
local version=$("${HELM_INSTALL_DIR}/${BINARY_NAME}" version --template="{{ .Version }}")
|
||||
if [[ "$version" == "$TAG" ]]; then
|
||||
echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
|
||||
return 0
|
||||
else
|
||||
echo "Helm ${TAG} is available. Changing from version ${version}."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# downloadFile downloads the latest binary package and also the checksum
|
||||
# for that binary.
|
||||
downloadFile() {
|
||||
HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
|
||||
DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST"
|
||||
CHECKSUM_URL="$DOWNLOAD_URL.sha256"
|
||||
HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
|
||||
HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
|
||||
HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
|
||||
echo "Downloading $DOWNLOAD_URL"
|
||||
if [ "${HAS_CURL}" == "true" ]; then
|
||||
curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
|
||||
curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
|
||||
elif [ "${HAS_WGET}" == "true" ]; then
|
||||
wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
|
||||
wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
|
||||
fi
|
||||
}
|
||||
|
||||
# verifyFile verifies the SHA256 checksum of the binary package
|
||||
# and the GPG signatures for both the package and checksum file
|
||||
# (depending on settings in environment).
|
||||
verifyFile() {
|
||||
if [ "${VERIFY_CHECKSUM}" == "true" ]; then
|
||||
verifyChecksum
|
||||
fi
|
||||
if [ "${VERIFY_SIGNATURES}" == "true" ]; then
|
||||
verifySignatures
|
||||
fi
|
||||
}
|
||||
|
||||
# installFile installs the Helm binary.
|
||||
installFile() {
|
||||
HELM_TMP="$HELM_TMP_ROOT/$BINARY_NAME"
|
||||
mkdir -p "$HELM_TMP"
|
||||
tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
|
||||
HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/helm"
|
||||
echo "Preparing to install $BINARY_NAME into ${HELM_INSTALL_DIR}"
|
||||
runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$BINARY_NAME"
|
||||
echo "$BINARY_NAME installed into $HELM_INSTALL_DIR/$BINARY_NAME"
|
||||
}
|
||||
|
||||
# verifyChecksum verifies the SHA256 checksum of the binary package.
|
||||
verifyChecksum() {
|
||||
printf "Verifying checksum... "
|
||||
local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
|
||||
local expected_sum=$(cat ${HELM_SUM_FILE})
|
||||
if [ "$sum" != "$expected_sum" ]; then
|
||||
echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
echo "Done."
|
||||
}
|
||||
|
||||
# verifySignatures obtains the latest KEYS file from GitHub main branch
|
||||
# as well as the signature .asc files from the specific GitHub release,
|
||||
# then verifies that the release artifacts were signed by a maintainer's key.
|
||||
verifySignatures() {
|
||||
printf "Verifying signatures... "
|
||||
local keys_filename="KEYS"
|
||||
local github_keys_url="https://raw.githubusercontent.com/helm/helm/main/${keys_filename}"
|
||||
if [ "${HAS_CURL}" == "true" ]; then
|
||||
curl -SsL "${github_keys_url}" -o "${HELM_TMP_ROOT}/${keys_filename}"
|
||||
elif [ "${HAS_WGET}" == "true" ]; then
|
||||
wget -q -O "${HELM_TMP_ROOT}/${keys_filename}" "${github_keys_url}"
|
||||
fi
|
||||
local gpg_keyring="${HELM_TMP_ROOT}/keyring.gpg"
|
||||
local gpg_homedir="${HELM_TMP_ROOT}/gnupg"
|
||||
mkdir -p -m 0700 "${gpg_homedir}"
|
||||
local gpg_stderr_device="/dev/null"
|
||||
if [ "${DEBUG}" == "true" ]; then
|
||||
gpg_stderr_device="/dev/stderr"
|
||||
fi
|
||||
gpg --batch --quiet --homedir="${gpg_homedir}" --import "${HELM_TMP_ROOT}/${keys_filename}" 2> "${gpg_stderr_device}"
|
||||
gpg --batch --no-default-keyring --keyring "${gpg_homedir}/${GPG_PUBRING}" --export > "${gpg_keyring}"
|
||||
local github_release_url="https://github.com/helm/helm/releases/download/${TAG}"
|
||||
if [ "${HAS_CURL}" == "true" ]; then
|
||||
curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
|
||||
curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
|
||||
elif [ "${HAS_WGET}" == "true" ]; then
|
||||
wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
|
||||
wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
|
||||
fi
|
||||
local error_text="If you think this might be a potential security issue,"
|
||||
error_text="${error_text}\nplease see here: https://github.com/helm/community/blob/master/SECURITY.md"
|
||||
local num_goodlines_sha=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
|
||||
if [[ ${num_goodlines_sha} -lt 2 ]]; then
|
||||
echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256!"
|
||||
echo -e "${error_text}"
|
||||
exit 1
|
||||
fi
|
||||
local num_goodlines_tar=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
|
||||
if [[ ${num_goodlines_tar} -lt 2 ]]; then
|
||||
echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz!"
|
||||
echo -e "${error_text}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Done."
|
||||
}
|
||||
|
||||
# fail_trap is executed if an error occurs.
|
||||
fail_trap() {
|
||||
result=$?
|
||||
if [ "$result" != "0" ]; then
|
||||
if [[ -n "$INPUT_ARGUMENTS" ]]; then
|
||||
echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS"
|
||||
help
|
||||
else
|
||||
echo "Failed to install $BINARY_NAME"
|
||||
fi
|
||||
echo -e "\tFor support, go to https://github.com/helm/helm."
|
||||
fi
|
||||
cleanup
|
||||
exit $result
|
||||
}
|
||||
|
||||
# testVersion tests the installed client to make sure it is working.
|
||||
testVersion() {
|
||||
set +e
|
||||
HELM="$(command -v $BINARY_NAME)"
|
||||
if [ "$?" = "1" ]; then
|
||||
echo "$BINARY_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
|
||||
exit 1
|
||||
fi
|
||||
set -e
|
||||
}
|
||||
|
||||
# help provides possible cli installation arguments
|
||||
help () {
|
||||
echo "Accepted cli arguments are:"
|
||||
echo -e "\t[--help|-h ] ->> prints this help"
|
||||
echo -e "\t[--version|-v <desired_version>] . When not defined it fetches the latest release tag from the Helm CDN"
|
||||
echo -e "\te.g. --version v3.0.0 or -v canary"
|
||||
echo -e "\t[--no-sudo] ->> install without sudo"
|
||||
}
|
||||
|
||||
# cleanup temporary files to avoid https://github.com/helm/helm/issues/2977
|
||||
cleanup() {
|
||||
if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
|
||||
rm -rf "$HELM_TMP_ROOT"
|
||||
fi
|
||||
}
|
||||
|
||||
# Execution
|
||||
|
||||
#Stop execution on any error
|
||||
trap "fail_trap" EXIT
|
||||
set -e
|
||||
|
||||
# Set debug if desired
|
||||
if [ "${DEBUG}" == "true" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
# Parsing input arguments (if any)
|
||||
export INPUT_ARGUMENTS="${@}"
|
||||
set -u
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
'--version'|-v)
|
||||
shift
|
||||
if [[ $# -ne 0 ]]; then
|
||||
export DESIRED_VERSION="${1}"
|
||||
if [[ "$1" != "v"* ]]; then
|
||||
echo "Expected version arg ('${DESIRED_VERSION}') to begin with 'v', fixing..."
|
||||
export DESIRED_VERSION="v${1}"
|
||||
fi
|
||||
else
|
||||
echo -e "Please provide the desired version. e.g. --version v3.0.0 or -v canary"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
'--no-sudo')
|
||||
USE_SUDO="false"
|
||||
;;
|
||||
'--help'|-h)
|
||||
help
|
||||
exit 0
|
||||
;;
|
||||
*) exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
set +u
|
||||
|
||||
initArch
|
||||
initOS
|
||||
verifySupported
|
||||
checkDesiredVersion
|
||||
if ! checkHelmInstalledVersion; then
|
||||
downloadFile
|
||||
verifyFile
|
||||
installFile
|
||||
fi
|
||||
testVersion
|
||||
cleanup
|
||||
Loading…
Reference in New Issue