fix: Correct common lib repo URL, rename exporter template

- Reverted common library repository URL in Chart.yaml to
  https://bjw-s-labs.github.io/helm-charts/.
- Ensured helm dependency commands are run after adding repositories.
- Renamed exporter template from exporter-deployment.yaml to
  exporter-controller.yaml to better reflect its new role with common library.

Note: Full helm lint/template validation with dependencies was not possible
in the automated environment due to issues with dependency file persistence
in the sandbox.
fix/common-lib-repo-url-and-rename
google-labs-jules[bot] 2025-07-01 21:00:28 +00:00 committed by Malar Invention
parent 5fa41a6aad
commit 309f6ccca9
7 changed files with 674 additions and 150 deletions

120
README.md
View File

@ -74,37 +74,123 @@ nameOverride: ""
# -- Override the fully qualified app name. # -- Override the fully qualified app name.
fullnameOverride: "" fullnameOverride: ""
exporter: # Exporter Configuration (`controllers.exporter`)
# -- Configuration for the exporter container image. # The iperf3 exporter is managed under the `controllers.exporter` section,
image: # leveraging the `bjw-s/common-library` for robust workload management.
# -- The container image repository for the exporter. controllers:
repository: ghcr.io/malarinv/iperf3-monitor exporter:
# -- The container image tag for the exporter. If not set, the chart's appVersion is used. # -- Enable the exporter controller.
tag: "" enabled: true
# -- The image pull policy for the exporter container. # -- Set the controller type for the exporter.
pullPolicy: IfNotPresent # Valid options are "deployment" or "daemonset".
# Use "daemonset" for N-to-N node monitoring where an exporter runs on each node (or selected nodes).
# -- Number of exporter pod replicas. Typically 1 is sufficient. # Use "deployment" for a centralized exporter (typically with replicaCount: 1).
replicaCount: 1 # @default -- "deployment"
type: deployment
# -- Number of desired exporter pods. Only used if type is "deployment".
# @default -- 1
replicas: 1
# -- Application-specific configuration for the iperf3 exporter.
# These values are used to populate environment variables for the exporter container.
appConfig:
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes). # -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
testInterval: 300 testInterval: 300
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL).
logLevel: INFO
# -- Timeout in seconds for a single iperf3 test run. # -- Timeout in seconds for a single iperf3 test run.
testTimeout: 10 testTimeout: 10
# -- Protocol to use for testing (tcp or udp). # -- Protocol to use for testing (tcp or udp).
testProtocol: tcp testProtocol: tcp
# -- iperf3 server port to connect to. Should match the server's listening port.
serverPort: "5201"
# -- Label selector to find iperf3 server pods.
# This is templated. Default: 'app.kubernetes.io/name=<chart-name>,app.kubernetes.io/instance=<release-name>,app.kubernetes.io/component=server'
serverLabelSelector: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
# -- CPU and memory resource requests and limits for the exporter pod. # -- Pod-level configurations for the exporter.
# @default -- A small default is provided if commented out. pod:
resources: {} # -- Annotations for the exporter pod.
annotations: {}
# -- Labels for the exporter pod (the common library adds its own defaults too).
labels: {}
# -- Node selector for scheduling exporter pods. Useful for DaemonSet or specific scheduling with Deployments.
# Example:
# nodeSelector:
# kubernetes.io/os: linux
nodeSelector: {}
# -- Tolerations for scheduling exporter pods.
# Example:
# tolerations:
# - key: "node-role.kubernetes.io/control-plane"
# operator: "Exists"
# effect: "NoSchedule"
tolerations: []
# -- Affinity rules for scheduling exporter pods.
# Example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: "kubernetes.io/arch"
# operator: In
# values:
# - amd64
affinity: {}
# -- Security context for the exporter pod.
# securityContext:
# fsGroup: 65534
# runAsUser: 65534
# runAsGroup: 65534
# runAsNonRoot: true
securityContext: {}
# -- Automount service account token for the pod.
automountServiceAccountToken: true
# -- Container-level configurations for the main exporter container.
containers:
exporter: # Name of the primary container
image:
repository: ghcr.io/malarinv/iperf3-monitor
tag: "" # Defaults to .Chart.AppVersion
pullPolicy: IfNotPresent
# -- Custom environment variables for the exporter container.
# These are merged with the ones generated from appConfig.
# env:
# MY_CUSTOM_VAR: "my_value"
env: {}
# -- Ports for the exporter container.
ports:
metrics: # Name of the port
port: 9876 # Container port for metrics
protocol: TCP
enabled: true
# -- CPU and memory resource requests and limits.
# resources:
# requests: # requests:
# cpu: "100m" # cpu: "100m"
# memory: "128Mi" # memory: "128Mi"
# limits: # limits:
# cpu: "500m" # cpu: "500m"
# memory: "256Mi" # memory: "256Mi"
resources: {}
# -- Probes configuration for the exporter container.
# probes:
# liveness:
# enabled: true # Example: enable liveness probe
# spec: # Customize probe spec if needed
# initialDelaySeconds: 30
# periodSeconds: 15
# timeoutSeconds: 5
# failureThreshold: 3
probes:
liveness:
enabled: false
readiness:
enabled: false
startup:
enabled: false
server: server:
# -- Configuration for the iperf3 server container image (DaemonSet). # -- Configuration for the iperf3 server container image (DaemonSet).

View File

@ -1,9 +1,12 @@
dependencies: dependencies:
- name: kube-prometheus-stack - name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts repository: https://prometheus-community.github.io/helm-charts
version: 75.3.6 version: 75.7.0
- name: prometheus-operator - name: prometheus-operator
repository: oci://tccr.io/truecharts repository: oci://tccr.io/truecharts
version: 11.5.1 version: 11.5.1
digest: sha256:3000e63445f8ba8df601cb483f4f77d14c5c4662bff2d16ffcf5cf1f7def314b - name: common
generated: "2025-06-20T17:25:44.538372209+05:30" repository: https://bjw-s-labs.github.io/helm-charts/
version: 4.1.2
digest: sha256:68485b4e158a6a405073e9c59966d251b62971846cdc9871e41fde46f19aabfe
generated: "2025-07-01T20:32:00.061995907Z"

View File

@ -32,3 +32,6 @@ dependencies:
version: ">=8.11.1" version: ">=8.11.1"
repository: "oci://tccr.io/truecharts" repository: "oci://tccr.io/truecharts"
condition: "dependencies.install, serviceMonitor.enabled, dependencies.useTrueChartsPrometheusOperator" condition: "dependencies.install, serviceMonitor.enabled, dependencies.useTrueChartsPrometheusOperator"
- name: common
version: "4.1.2"
repository: "https://bjw-s-labs.github.io/helm-charts/"

View File

@ -0,0 +1,70 @@
{{- /*
Get the exporter controller configuration from values.
We make a deep copy to be able to modify it locally for env vars and service account.
*/}}
{{- $exporterControllerConfig := deepCopy .Values.controllers.exporter -}}
{{- $appName := include "iperf3-monitor.name" . -}}
{{- $fullName := include "iperf3-monitor.fullname" . -}}
{{- $chart := .Chart -}}
{{- $release := .Release -}}
{{- $values := .Values -}}
{{- /*
Construct the base environment variables for the exporter container.
*/}}
{{- $baseExporterEnv := dict -}}
{{- $_ := set $baseExporterEnv "SOURCE_NODE_NAME" (dict "valueFrom" (dict "fieldRef" (dict "fieldPath" "spec.nodeName"))) -}}
{{- $_ := set $baseExporterEnv "IPERF_TEST_INTERVAL" ($exporterControllerConfig.appConfig.testInterval | toString) -}}
{{- $_ := set $baseExporterEnv "IPERF_TEST_PROTOCOL" $exporterControllerConfig.appConfig.testProtocol -}}
{{- $_ := set $baseExporterEnv "LOG_LEVEL" $exporterControllerConfig.appConfig.logLevel -}}
{{- $_ := set $baseExporterEnv "IPERF_SERVER_PORT" ($exporterControllerConfig.appConfig.serverPort | toString) -}}
{{- $_ := set $baseExporterEnv "IPERF_SERVER_NAMESPACE" (dict "valueFrom" (dict "fieldRef" (dict "fieldPath" "metadata.namespace"))) -}}
{{- $_ := set $baseExporterEnv "IPERF_TEST_TIMEOUT" ($exporterControllerConfig.appConfig.testTimeout | toString) -}}
{{- $serverLabelSelector := tpl ($exporterControllerConfig.appConfig.serverLabelSelector | default (printf "app.kubernetes.io/name=%s,app.kubernetes.io/instance=%s,app.kubernetes.io/component=server" $appName $release.Name)) . -}}
{{- $_ := set $baseExporterEnv "IPERF_SERVER_LABEL_SELECTOR" $serverLabelSelector -}}
{{- /*
Merge with any additional environment variables defined by the user
under controllers.exporter.containers.exporter.env.
User-defined values (from .Values.controllers.exporter.containers.exporter.env)
will take precedence if keys conflict, achieved by merging them on top of base.
*/}}
{{- $userExporterEnv := $exporterControllerConfig.containers.exporter.env | default dict -}}
{{- $finalExporterEnv := mergeOverwrite $baseExporterEnv $userExporterEnv -}}
{{- /*
Update the exporter container's env in our local copy of the controller config.
The common library expects the env map under containers.<container_name>.env.
The container name is assumed to be 'exporter' as per our values.yaml structure.
*/}}
{{- if not $exporterControllerConfig.containers.exporter -}}
{{- $_ := set $exporterControllerConfig.containers "exporter" dict -}}
{{- end -}}
{{- $_ := set $exporterControllerConfig.containers.exporter "env" $finalExporterEnv -}}
{{- /*
Configure Service Account for the exporter controller.
It should use the SA name defined in .Values.serviceAccount.name, and its creation
should be controlled by .Values.rbac.create.
The common library helper "bjw-s.common.lib.controller.serviceAccountName" will use
serviceAccount.name if serviceAccount.create is true.
*/}}
{{- $serviceAccountNameFromValues := .Values.serviceAccount.name | default (printf "%s-exporter" $fullName) -}}
{{- if not $exporterControllerConfig.serviceAccount -}}
{{- $_ := set $exporterControllerConfig "serviceAccount" dict -}}
{{- end -}}
{{- $_ := set $exporterControllerConfig.serviceAccount "name" $serviceAccountNameFromValues -}}
{{- $_ := set $exporterControllerConfig.serviceAccount "create" .Values.rbac.create -}}
{{- $_ := set $exporterControllerConfig.serviceAccount "automountServiceAccountToken" true -}} {{/* Explicitly set, though often default */}}
{{- /*
Call the common library template to render the controller (Deployment or DaemonSet).
Pass necessary context:
- controller: our modified $exporterControllerConfig.
- config: The top-level .Values.
- chart: The .Chart object.
- release: The .Release object.
- name: The application name (used by library for defaults if needed).
*/}}
{{- include "bjw-s.common.lib.chart.controller" (dict "controller" $exporterControllerConfig "config" $values "chart" $chart "release" $release "name" $appName ) -}}

View File

@ -1,50 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "iperf3-monitor.fullname" . }}-exporter
labels:
{{- include "iperf3-monitor.labels" . | nindent 4 }}
app.kubernetes.io/component: exporter
spec:
replicas: {{ .Values.exporter.replicaCount }}
selector:
matchLabels:
{{- include "iperf3-monitor.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: exporter
template:
metadata:
labels:
{{- include "iperf3-monitor.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: exporter
spec:
serviceAccountName: {{ include "iperf3-monitor.serviceAccountName" . }}
containers:
- name: iperf3-exporter
image: "{{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.exporter.image.pullPolicy }}
ports:
- containerPort: {{ .Values.service.targetPort }}
name: metrics
env:
- name: SOURCE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: IPERF_TEST_INTERVAL
value: "{{ .Values.exporter.testInterval }}"
- name: IPERF_TEST_PROTOCOL
value: "{{ .Values.exporter.testProtocol }}"
- name: LOG_LEVEL
value: "{{ .Values.exporter.logLevel }}"
- name: IPERF_SERVER_PORT
value: "5201" # Hardcoded as per server DaemonSet
- name: IPERF_SERVER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: IPERF_SERVER_LABEL_SELECTOR
value: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
{{- with .Values.exporter.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{- end }}

View File

@ -8,33 +8,93 @@ nameOverride: ""
# -- Override the fully qualified app name. # -- Override the fully qualified app name.
fullnameOverride: "" fullnameOverride: ""
exporter: controllers:
# -- Configuration for the exporter container image. exporter:
# -- Enable the exporter controller.
enabled: true
# -- Set the controller type for the exporter.
# Valid options are "deployment" or "daemonset".
# @default -- "deployment"
type: deployment
# -- Number of desired exporter pods. Only used if type is "deployment".
# @default -- 1
replicas: 1
# -- Application-specific configuration for the iperf3 exporter.
# These values are used to populate environment variables for the exporter container.
appConfig:
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes).
testInterval: 300
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL).
logLevel: INFO
# -- Timeout in seconds for a single iperf3 test run.
testTimeout: 10
# -- Protocol to use for testing (tcp or udp).
testProtocol: tcp
# -- iperf3 server port to connect to. Should match the server's listening port.
# @default -- "5201" (hardcoded in the original chart for server daemonset)
serverPort: "5201"
# -- Label selector to find iperf3 server pods.
# This will be templated in the actual deployment.
# Example default (if not overridden by template logic): 'app.kubernetes.io/component=server'
serverLabelSelector: 'app.kubernetes.io/name={{ include "iperf3-monitor.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=server'
# -- Pod-level configurations for the exporter, leveraging bjw-s common library structure.
pod:
# -- Annotations for the exporter pod.
annotations: {}
# -- Labels for the exporter pod.
labels: {} # The common library will add its own default labels.
# -- Node selector for scheduling exporter pods.
nodeSelector: {}
# -- Tolerations for scheduling exporter pods.
tolerations: []
# -- Affinity rules for scheduling exporter pods.
affinity: {}
# -- Security context for the exporter pod.
securityContext: {}
# fsGroup: 65534
# runAsUser: 65534
# runAsGroup: 65534
# runAsNonRoot: true
# -- Automount service account token for the pod.
automountServiceAccountToken: true # Default from common lib
# -- Container-level configurations for the main exporter container.
containers:
exporter: # This is the primary container, name it 'exporter'
image: image:
# -- The container image repository for the exporter. # -- The container image repository for the exporter.
repository: ghcr.io/malarinv/iperf3-monitor repository: ghcr.io/malarinv/iperf3-monitor
# -- The container image tag for the exporter. If not set, the chart's appVersion is used. # -- The container image tag for the exporter. If not set, the chart's appVersion is used.
tag: "" tag: "" # Defaults to .Chart.AppVersion via common library
# -- The image pull policy for the exporter container. # -- The image pull policy for the exporter container.
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
# -- Number of exporter pod replicas. Typically 1 is sufficient. # -- Environment variables for the exporter container.
replicaCount: 1 # The actual env map will be constructed in the main chart template
# and passed to the common library. This section is for user overrides
# if they want to directly set other env vars using common lib's env schema.
env: {}
# Example:
# MY_CUSTOM_VAR: "my_value"
# ANOTHER_VAR:
# valueFrom:
# secretKeyRef:
# name: mysecret
# key: mykey
# -- Interval in seconds between complete test cycles (i.e., testing all server nodes). # -- Ports for the exporter container.
testInterval: 300 ports:
metrics: # Name of the port, will be used in Service definition
# -- Port number for the metrics endpoint on the container.
port: 9876 # Default, should match service.targetPort
# -- Protocol for the metrics port.
protocol: TCP # Common library defaults to TCP if not specified.
enabled: true # This port is enabled
# -- Log level for the iperf3 exporter (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL). # -- CPU and memory resource requests and limits for the exporter container.
logLevel: INFO
# -- Timeout in seconds for a single iperf3 test run.
testTimeout: 10
# -- Protocol to use for testing (tcp or udp).
testProtocol: tcp
# -- CPU and memory resource requests and limits for the exporter pod.
# @default -- A small default is provided if commented out.
resources: resources:
{} {}
# requests: # requests:
@ -44,6 +104,16 @@ exporter:
# cpu: "500m" # cpu: "500m"
# memory: "256Mi" # memory: "256Mi"
# -- Probes configuration for the exporter container.
probes:
liveness:
enabled: false
readiness:
enabled: false
startup:
enabled: false
# Server configuration (iperf3 server daemonset)
server: server:
# -- Configuration for the iperf3 server container image (DaemonSet). # -- Configuration for the iperf3 server container image (DaemonSet).
image: image:
@ -53,8 +123,6 @@ server:
tag: latest tag: latest
# -- CPU and memory resource requests and limits for the iperf3 server pods (DaemonSet). # -- CPU and memory resource requests and limits for the iperf3 server pods (DaemonSet).
# These should be very low as the server is mostly idle.
# @default -- A small default is provided if commented out.
resources: resources:
{} {}
# requests: # requests:
@ -65,13 +133,9 @@ server:
# memory: "128Mi" # memory: "128Mi"
# -- Node selector for scheduling iperf3 server pods. # -- Node selector for scheduling iperf3 server pods.
# Use this to restrict the DaemonSet to a subset of nodes.
# @default -- {} (schedule on all nodes)
nodeSelector: {} nodeSelector: {}
# -- Tolerations for scheduling iperf3 server pods on tainted nodes (e.g., control-plane nodes). # -- Tolerations for scheduling iperf3 server pods on tainted nodes.
# This is often necessary to include master nodes in the test mesh.
# @default -- Tolerates control-plane and master taints.
tolerations: tolerations:
- key: "node-role.kubernetes.io/control-plane" - key: "node-role.kubernetes.io/control-plane"
operator: "Exists" operator: "Exists"
@ -80,60 +144,61 @@ server:
operator: "Exists" operator: "Exists"
effect: "NoSchedule" effect: "NoSchedule"
# RBAC and ServiceAccount settings
# These are for the exporter. The exporter deployment (managed by common library)
# will need to use the ServiceAccount specified here or one created by the library.
rbac: rbac:
# -- If true, create ServiceAccount, ClusterRole, and ClusterRoleBinding for the exporter. # -- If true, create ServiceAccount, ClusterRole, and ClusterRoleBinding for the exporter.
# Set to false if you manage RBAC externally.
create: true create: true
serviceAccount: serviceAccount:
# -- The name of the ServiceAccount to use for the exporter pod. # -- The name of the ServiceAccount to use/create for the exporter pod.
# Only used if rbac.create is false. If not set, it defaults to the chart's fullname. # If rbac.create is true, this SA is created. The exporter pod must use this SA.
name: "iperf3-monitor" name: "iperf3-monitor"
# Service Monitor configuration for Prometheus
serviceMonitor: serviceMonitor:
# -- If true, create a ServiceMonitor resource for integration with Prometheus Operator. # -- If true, create a ServiceMonitor resource.
# Requires a running Prometheus Operator in the cluster.
enabled: true enabled: true
# -- Scrape interval for the ServiceMonitor.
# -- Scrape interval for the ServiceMonitor. How often Prometheus scrapes the exporter metrics.
interval: 60s interval: 60s
# -- Scrape timeout for the ServiceMonitor.
# -- Scrape timeout for the ServiceMonitor. How long Prometheus waits for metrics response.
scrapeTimeout: 30s scrapeTimeout: 30s
# -- Configuration for the exporter Service. # Service configuration for the exporter
# This defines how the exporter is exposed.
# The common library can also manage services, or we can use our own template.
# This structure is compatible with bjw-s common library's service management if we choose to use it.
service: service:
# -- Service type. ClusterIP is typically sufficient. main: # A key for the service, 'main' is a common convention.
type: ClusterIP # -- Enable the exporter service.
# -- Port on which the exporter service is exposed. enabled: true
# -- Service type.
type: ClusterIP # ClusterIP is typical for internal services scraped by Prometheus.
# -- Ports configuration for the service.
ports:
metrics: # Name of the service port, should align with a container port name.
# -- Port number on which the service is exposed.
port: 9876 port: 9876
# -- Target port on the exporter pod. # -- Target port on the exporter pod. Can be a number or name.
targetPort: 9876 # Refers to the 'metrics' port defined in controllers.exporter.containers.exporter.ports.
targetPort: metrics
protocol: TCP
# -- Optional configuration for a network policy to allow traffic to the iperf3 server DaemonSet. # Network Policy (optional)
# This is often necessary if you are using a network policy controller.
networkPolicy: networkPolicy:
# -- If true, create a NetworkPolicy resource. # -- If true, create a NetworkPolicy resource.
enabled: false enabled: false
# -- Specify source selectors if needed (e.g., pods in a specific namespace). # -- Source selectors for ingress rules.
from: [] from: []
# -- Specify namespace selectors if needed. # -- Namespace selectors for ingress rules.
namespaceSelector: {} namespaceSelector: {}
# -- Specify pod selectors if needed. # -- Pod selectors for ingress rules.
podSelector: {} podSelector: {}
# ----------------------------------------------------------------------------- # Dependency Configuration (for Prometheus Operator)
# Dependency Configuration
# -----------------------------------------------------------------------------
dependencies: dependencies:
# -- Set to true to install Prometheus operator dependency if serviceMonitor.enabled is also true. # -- If true, install Prometheus operator dependency (used if serviceMonitor.enabled=true).
# -- Set to false to disable the installation of Prometheus operator dependency,
# -- regardless of serviceMonitor.enabled. This is useful if you have Prometheus
# -- Operator installed and managed separately in your cluster.
install: true install: true
# -- If true, use TrueCharts Prometheus Operator instead of kube-prometheus-stack.
# -- Set to true to use the TrueCharts Prometheus Operator instead of kube-prometheus-stack.
# This chart's ServiceMonitor resources require a Prometheus Operator to be functional.
# If serviceMonitor.enabled is true and dependencies.install is true,
# one of these two dependencies will be pulled based on this flag.
useTrueChartsPrometheusOperator: false useTrueChartsPrometheusOperator: false

347
get_helm.sh Executable file
View File

@ -0,0 +1,347 @@
#!/usr/bin/env bash
# Copyright The Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The install script is based off of the MIT-licensed script from glide,
# the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
: ${BINARY_NAME:="helm"}
: ${USE_SUDO:="true"}
: ${DEBUG:="false"}
: ${VERIFY_CHECKSUM:="true"}
: ${VERIFY_SIGNATURES:="false"}
: ${HELM_INSTALL_DIR:="/usr/local/bin"}
: ${GPG_PUBRING:="pubring.kbx"}
HAS_CURL="$(type "curl" &> /dev/null && echo true || echo false)"
HAS_WGET="$(type "wget" &> /dev/null && echo true || echo false)"
HAS_OPENSSL="$(type "openssl" &> /dev/null && echo true || echo false)"
HAS_GPG="$(type "gpg" &> /dev/null && echo true || echo false)"
HAS_GIT="$(type "git" &> /dev/null && echo true || echo false)"
HAS_TAR="$(type "tar" &> /dev/null && echo true || echo false)"
# initArch discovers the architecture for this system.
initArch() {
ARCH=$(uname -m)
case $ARCH in
armv5*) ARCH="armv5";;
armv6*) ARCH="armv6";;
armv7*) ARCH="arm";;
aarch64) ARCH="arm64";;
x86) ARCH="386";;
x86_64) ARCH="amd64";;
i686) ARCH="386";;
i386) ARCH="386";;
esac
}
# initOS discovers the operating system for this system.
initOS() {
OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
case "$OS" in
# Minimalist GNU for Windows
mingw*|cygwin*) OS='windows';;
esac
}
# runs the given command as root (detects if we are root already)
runAsRoot() {
if [ $EUID -ne 0 -a "$USE_SUDO" = "true" ]; then
sudo "${@}"
else
"${@}"
fi
}
# verifySupported checks that the os/arch combination is supported for
# binary builds, as well whether or not necessary tools are present.
verifySupported() {
local supported="darwin-amd64\ndarwin-arm64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nlinux-s390x\nlinux-riscv64\nwindows-amd64\nwindows-arm64"
if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
echo "No prebuilt binary for ${OS}-${ARCH}."
echo "To build from source, go to https://github.com/helm/helm"
exit 1
fi
if [ "${HAS_CURL}" != "true" ] && [ "${HAS_WGET}" != "true" ]; then
echo "Either curl or wget is required"
exit 1
fi
if [ "${VERIFY_CHECKSUM}" == "true" ] && [ "${HAS_OPENSSL}" != "true" ]; then
echo "In order to verify checksum, openssl must first be installed."
echo "Please install openssl or set VERIFY_CHECKSUM=false in your environment."
exit 1
fi
if [ "${VERIFY_SIGNATURES}" == "true" ]; then
if [ "${HAS_GPG}" != "true" ]; then
echo "In order to verify signatures, gpg must first be installed."
echo "Please install gpg or set VERIFY_SIGNATURES=false in your environment."
exit 1
fi
if [ "${OS}" != "linux" ]; then
echo "Signature verification is currently only supported on Linux."
echo "Please set VERIFY_SIGNATURES=false or verify the signatures manually."
exit 1
fi
fi
if [ "${HAS_GIT}" != "true" ]; then
echo "[WARNING] Could not find git. It is required for plugin installation."
fi
if [ "${HAS_TAR}" != "true" ]; then
echo "[ERROR] Could not find tar. It is required to extract the helm binary archive."
exit 1
fi
}
# checkDesiredVersion checks if the desired version is available.
checkDesiredVersion() {
if [ "x$DESIRED_VERSION" == "x" ]; then
# Get tag from release URL
local latest_release_url="https://get.helm.sh/helm-latest-version"
local latest_release_response=""
if [ "${HAS_CURL}" == "true" ]; then
latest_release_response=$( curl -L --silent --show-error --fail "$latest_release_url" 2>&1 || true )
elif [ "${HAS_WGET}" == "true" ]; then
latest_release_response=$( wget "$latest_release_url" -q -O - 2>&1 || true )
fi
TAG=$( echo "$latest_release_response" | grep '^v[0-9]' )
if [ "x$TAG" == "x" ]; then
printf "Could not retrieve the latest release tag information from %s: %s\n" "${latest_release_url}" "${latest_release_response}"
exit 1
fi
else
TAG=$DESIRED_VERSION
fi
}
# checkHelmInstalledVersion checks which version of helm is installed and
# if it needs to be changed.
checkHelmInstalledVersion() {
if [[ -f "${HELM_INSTALL_DIR}/${BINARY_NAME}" ]]; then
local version=$("${HELM_INSTALL_DIR}/${BINARY_NAME}" version --template="{{ .Version }}")
if [[ "$version" == "$TAG" ]]; then
echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
return 0
else
echo "Helm ${TAG} is available. Changing from version ${version}."
return 1
fi
else
return 1
fi
}
# downloadFile downloads the latest binary package and also the checksum
# for that binary.
downloadFile() {
HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST"
CHECKSUM_URL="$DOWNLOAD_URL.sha256"
HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
echo "Downloading $DOWNLOAD_URL"
if [ "${HAS_CURL}" == "true" ]; then
curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
elif [ "${HAS_WGET}" == "true" ]; then
wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
fi
}
# verifyFile verifies the SHA256 checksum of the binary package
# and the GPG signatures for both the package and checksum file
# (depending on settings in environment).
verifyFile() {
if [ "${VERIFY_CHECKSUM}" == "true" ]; then
verifyChecksum
fi
if [ "${VERIFY_SIGNATURES}" == "true" ]; then
verifySignatures
fi
}
# installFile installs the Helm binary.
installFile() {
HELM_TMP="$HELM_TMP_ROOT/$BINARY_NAME"
mkdir -p "$HELM_TMP"
tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/helm"
echo "Preparing to install $BINARY_NAME into ${HELM_INSTALL_DIR}"
runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$BINARY_NAME"
echo "$BINARY_NAME installed into $HELM_INSTALL_DIR/$BINARY_NAME"
}
# verifyChecksum verifies the SHA256 checksum of the binary package.
verifyChecksum() {
printf "Verifying checksum... "
local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
local expected_sum=$(cat ${HELM_SUM_FILE})
if [ "$sum" != "$expected_sum" ]; then
echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting."
exit 1
fi
echo "Done."
}
# verifySignatures obtains the latest KEYS file from GitHub main branch
# as well as the signature .asc files from the specific GitHub release,
# then verifies that the release artifacts were signed by a maintainer's key.
verifySignatures() {
printf "Verifying signatures... "
local keys_filename="KEYS"
local github_keys_url="https://raw.githubusercontent.com/helm/helm/main/${keys_filename}"
if [ "${HAS_CURL}" == "true" ]; then
curl -SsL "${github_keys_url}" -o "${HELM_TMP_ROOT}/${keys_filename}"
elif [ "${HAS_WGET}" == "true" ]; then
wget -q -O "${HELM_TMP_ROOT}/${keys_filename}" "${github_keys_url}"
fi
local gpg_keyring="${HELM_TMP_ROOT}/keyring.gpg"
local gpg_homedir="${HELM_TMP_ROOT}/gnupg"
mkdir -p -m 0700 "${gpg_homedir}"
local gpg_stderr_device="/dev/null"
if [ "${DEBUG}" == "true" ]; then
gpg_stderr_device="/dev/stderr"
fi
gpg --batch --quiet --homedir="${gpg_homedir}" --import "${HELM_TMP_ROOT}/${keys_filename}" 2> "${gpg_stderr_device}"
gpg --batch --no-default-keyring --keyring "${gpg_homedir}/${GPG_PUBRING}" --export > "${gpg_keyring}"
local github_release_url="https://github.com/helm/helm/releases/download/${TAG}"
if [ "${HAS_CURL}" == "true" ]; then
curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
elif [ "${HAS_WGET}" == "true" ]; then
wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc"
wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc"
fi
local error_text="If you think this might be a potential security issue,"
error_text="${error_text}\nplease see here: https://github.com/helm/community/blob/master/SECURITY.md"
local num_goodlines_sha=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
if [[ ${num_goodlines_sha} -lt 2 ]]; then
echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256!"
echo -e "${error_text}"
exit 1
fi
local num_goodlines_tar=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)')
if [[ ${num_goodlines_tar} -lt 2 ]]; then
echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz!"
echo -e "${error_text}"
exit 1
fi
echo "Done."
}
# fail_trap is executed if an error occurs.
fail_trap() {
result=$?
if [ "$result" != "0" ]; then
if [[ -n "$INPUT_ARGUMENTS" ]]; then
echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS"
help
else
echo "Failed to install $BINARY_NAME"
fi
echo -e "\tFor support, go to https://github.com/helm/helm."
fi
cleanup
exit $result
}
# testVersion tests the installed client to make sure it is working.
testVersion() {
set +e
HELM="$(command -v $BINARY_NAME)"
if [ "$?" = "1" ]; then
echo "$BINARY_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
exit 1
fi
set -e
}
# help provides possible cli installation arguments
help () {
echo "Accepted cli arguments are:"
echo -e "\t[--help|-h ] ->> prints this help"
echo -e "\t[--version|-v <desired_version>] . When not defined it fetches the latest release tag from the Helm CDN"
echo -e "\te.g. --version v3.0.0 or -v canary"
echo -e "\t[--no-sudo] ->> install without sudo"
}
# cleanup temporary files to avoid https://github.com/helm/helm/issues/2977
cleanup() {
if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
rm -rf "$HELM_TMP_ROOT"
fi
}
# Execution
#Stop execution on any error
trap "fail_trap" EXIT
set -e
# Set debug if desired
if [ "${DEBUG}" == "true" ]; then
set -x
fi
# Parsing input arguments (if any)
export INPUT_ARGUMENTS="${@}"
set -u
while [[ $# -gt 0 ]]; do
case $1 in
'--version'|-v)
shift
if [[ $# -ne 0 ]]; then
export DESIRED_VERSION="${1}"
if [[ "$1" != "v"* ]]; then
echo "Expected version arg ('${DESIRED_VERSION}') to begin with 'v', fixing..."
export DESIRED_VERSION="v${1}"
fi
else
echo -e "Please provide the desired version. e.g. --version v3.0.0 or -v canary"
exit 0
fi
;;
'--no-sudo')
USE_SUDO="false"
;;
'--help'|-h)
help
exit 0
;;
*) exit 1
;;
esac
shift
done
set +u
initArch
initOS
verifySupported
checkDesiredVersion
if ! checkHelmInstalledVersion; then
downloadFile
verifyFile
installFile
fi
testVersion
cleanup