helm install nginx bitnami/nginx NAME: nginx LAST DEPLOYED: Thu Jun 24 11:01:24 2021 NAMESPACE: default STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: ** Please be patient while the chart is being deployed **
NGINX can be accessed through the following DNS name from within your cluster:
nginx.default.svc.cluster.local (port 80)
To access NGINX from outside the cluster, follow the steps below:
1. Get the NGINX URL by running these commands:
NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace default -w nginx'
export SERVICE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].port}" services nginx) export SERVICE_IP=$(kubectl get svc --namespace default nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') echo "http://${SERVICE_IP}:${SERVICE_PORT}"
## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value ## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## # global: # imageRegistry: myRegistryName # imagePullSecrets: # - myRegistryKeySecretName
## Bitnami NGINX image version ## ref: https://hub.docker.com/r/bitnami/nginx/tags/ ## image: registry:docker.io repository:bitnami/nginx tag:1.21.0-debian-10-r0 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy:IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## E.g.: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] ## Set to true if you would like to see extra information on logs ## debug:false
## String to partially override nginx.fullname template (will maintain the release name) ## # nameOverride:
## String to fully override nginx.fullname template ## # fullnameOverride:
## Force target Kubernetes version (using Helm capabilites if not set) ## kubeVersion:
## Deployment pod host aliases ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ ## hostAliases: []
## Extra objects to deploy (value evaluated as a template) ## extraDeploy: []
## Add labels to all the deployed resources ## commonLabels: {}
## Add annotations to all the deployed resources ## commonAnnotations: {}
## Command and args for running the container (set to default if not set). Use array form ## # command: # args:
## Additional environment variables to set ## E.g: ## extraEnvVars: ## - name: FOO ## value: BAR ## extraEnvVars: []
## ConfigMap with extra environment variables ## # extraEnvVarsCM:
## Secret with extra environment variables ## # extraEnvVarsSecret:
## Get the server static content from a git repository ## NOTE: This will override staticSiteConfigmap and staticSitePVC ## cloneStaticSiteFromGit: enabled:false ## Bitnami Git image version ## ref: https://hub.docker.com/r/bitnami/git/tags/ ## image: registry:docker.io repository:bitnami/git tag:2.31.1-debian-10-r63 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy:IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## # pullSecrets: # - myRegistryKeySecretName ## Repository to clone static content from ## # repository: ## Branch inside the git repository ## # branch: ## Interval for sidecar container pull from the repository ## interval:60 ## Additional configuration for git-clone-repository initContainer ## gitClone: ## Override container command ## command: [] ## Override container args ## args: ## Additional configuration for the git-repo-syncer container ## gitSync: ## Override container command ## command: [] ## Override container args ## args: []
## Additional environment variables to set for the in the containers that clone static site from git ## E.g: ## extraEnvVars: ## - name: FOO ## value: BAR ## extraEnvVars: [] ## Add extra volume mounts for the GIT containers ## Useful to mount keys to connect through ssh. (normally used with extraVolumes) ## E.g: ## extraVolumeMounts: ## - name: ssh-dir ## mountPath: /root/.ssh/ ## extraVolumeMounts: []
## Custom server block to be added to NGINX configuration ## PHP-FPM example server block: ## serverBlock: |- ## server { ## listen 0.0.0.0:8080; ## root /app; ## location / { ## index index.html index.php; ## } ## location ~ \.php$ { ## fastcgi_pass phpfpm-server:9000; ## fastcgi_index index.php; ## include fastcgi.conf; ## } ## } ## # serverBlock:
## ConfigMap with custom server block to be added to NGINX configuration ## NOTE: This will override serverBlock ## # existingServerBlockConfigmap:
## Name of existing ConfigMap with the server static site content ## # staticSiteConfigmap
## Name of existing PVC with the server static site content ## NOTE: This will override staticSiteConfigmap ## # staticSitePVC
## Number of replicas to deploy ## replicaCount:1
## Pod extra labels ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ ## podLabels: {}
## Pod annotations ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ ## podAnnotations: {}
## Pod affinity preset ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## Allowed values: soft, hard ## podAffinityPreset:""
## Pod anti-affinity preset ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity ## Allowed values: soft, hard ## podAntiAffinityPreset:soft
## Node affinity preset ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity ## Allowed values: soft, hard ## nodeAffinityPreset: ## Node affinity type ## Allowed values: soft, hard ## type:"" ## Node label key to match ## E.g. ## key: "kubernetes.io/e2e-az-name" ## key:"" ## Node label values to match ## E.g. ## values: ## - e2e-az1 ## - e2e-az2 ## values: []
## Affinity for pod assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set ## affinity: {}
## Node labels for pod assignment. Evaluated as a template. ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {}
## Tolerations for pod assignment. Evaluated as a template. ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: {}
## Priority class name ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass ## priorityClassName:""
## Configures the ports NGINX listens on ## containerPorts: http:8080 # https: 8443
## NGINX containers' resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: {} # cpu: 100m # memory: 128Mi requests: {} # cpu: 100m # memory: 128Mi
## Array to add extra volumes (evaluated as a template) ## extraVolumes: []
## Array to add extra mounts (normally used with extraVolumes, evaluated as a template) ## extraVolumeMounts: []
## Pods Service Account ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## serviceAccount: ## Specifies whether a ServiceAccount should be created ## create:false ## The name of the ServiceAccount to use. ## If not set and create is true, a name is generated using the `common.names.fullname` template # name:
## Annotations for service account. Evaluated as a template. ## Only used if `create` is `true`. ## annotations: {}
## NGINX Service properties ## service: ## Service type ## type:LoadBalancer
## HTTP Port ## port:80
## HTTPS Port ## httpsPort:443
## Specify the nodePort(s) value(s) for the LoadBalancer and NodePort service types. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport ## nodePorts: http:"" https:""
## Target port reference value for the Loadbalancer service types can be specified explicitly. ## Listeners for the Loadbalancer can be custom mapped to the http or https service. ## Example: Mapping the https listener to targetPort http [http: https] ## targetPort: http:http https:https
## Set the LoadBalancer service type to internal only. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer ## # loadBalancerIP:
## Provide any additional annotations which may be required. This can be used to ## set the LoadBalancer service type to internal only. ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer ## annotations: {}
## LDAP Auth Daemon Configuration ## ## These different properties define the form of requests performed ## against the given LDAP server ## ## BEWARE THAT THESE VALUES WILL BE IGNORED IF A CUSTOM LDAP SERVER BLOCK ## ALREADY SPECIFIES THEM. ## ## ldapConfig:
## LDAP URI where to query the server ## Must follow the pattern -> ldap[s]:/<hostname>:<port> ## uri:""
## LDAP search base DN ## baseDN:""
## LDAP bind DN ## bindDN:""
## LDAP bind Password ## bindPassword:""
## LDAP search filter ## filter:""
## LDAP auth realm ## httpRealm:""
## LDAP cookie name ## httpCookieName:""
## NGINX Configuration File containing the directives (that define ## how LDAP requests are performed) and tells NGINX to use the LDAP Daemon ## as proxy. Besides, it defines the routes that will require of LDAP auth ## in order to be accessed. ## ## If LDAP directives are provided, they will take precedence over ## the ones specified in ldapConfig. ## ## This will be evaluated as a template. ## ##
nginxServerBlock:|- server { listen 0.0.0.0:{{ .Values.containerPorts.http }}; # You can provide a special subPath or the root location=/ { auth_request/auth-proxy; }
############################################################### # YOU SHOULD CHANGE THE FOLLOWING TO YOUR LDAP CONFIGURATION # ###############################################################
# URL and port for connecting to the LDAP server # proxy_set_header X-Ldap-URL "ldap://YOUR_LDAP_SERVER_IP:YOUR_LDAP_SERVER_PORT";
# Base DN # proxy_set_header X-Ldap-BaseDN "dc=example,dc=org";
## Use an existing Secret holding an NGINX Configuration file that ## configures LDAP requests. (will be evaluated as a template) ## ## If provided, both nginxServerBlock and ldapConfig properties are ignored. ## existingNginxServerBlockSecret:
## Configure the ingress resource that allows you to access the ## Nginx installation. Set up the URL ## ref: http://kubernetes.io/docs/user-guide/ingress/ ## ingress: ## Set to true to enable ingress record generation ## enabled:false
## Set this to true in order to add the corresponding annotations for cert-manager ## certManager:false
## Ingress Path type ## pathType:ImplementationSpecific
## Override API Version (automatically detected if not set) ## apiVersion:
## When the ingress is enabled, a host pointing to this will be created ## hostname:nginx.local
## The Path to Nginx. You may need to set this to '/*' in order to use this ## with ALB ingress controllers. ## path:/
## Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set ## annotations: {}
## Enable TLS configuration for the hostname defined at ingress.hostname parameter ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} ## You can use the ingress.secrets parameter to create this TLS secret or relay on cert-manager to create it ## tls:false
## The list of additional hostnames to be covered with this ingress record. ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array ## extraHosts: ## - name: nginx.local ## path: / ##
## Any additional arbitrary paths that may need to be added to the ingress under the main host. ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. ## extraPaths: ## - path: /* ## backend: ## serviceName: ssl-redirect ## servicePort: use-annotation ##
## The tls configuration for additional hostnames to be covered with this ingress record. ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls ## extraTls: ## - hosts: ## - nginx.local ## secretName: nginx.local-tls ##
## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or ## -----BEGIN RSA PRIVATE KEY----- ## ## name should line up with a tlsSecret set further up ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set ## ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information ## secrets: [] ## - name: nginx.local-tls ## key: ## certificate: ##
## Health Ingress parameters ## healthIngress: ## Set to true to enable health ingress record generation ## enabled:false
## Set this to true in order to add the corresponding annotations for cert-manager ## certManager:false
## Ingress Path type ## pathType:ImplementationSpecific
## When the health ingress is enabled, a host pointing to this will be created ## hostname:example.local
## Health Ingress annotations done as key:value pairs ## For a full list of possible ingress annotations, please see ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md ## ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set ## annotations: {}
## Enable TLS configuration for the hostname defined at healthIngress.hostname parameter ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.healthIngress.hostname }} ## You can use the healthIngress.secrets parameter to create this TLS secret, relay on cert-manager to create it, or ## let the chart create self-signed certificates for you ## tls:false
## The list of additional hostnames to be covered with this health ingress record. ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array ## E.g. ## extraHosts: ## - name: example.local ## path: / ## extraHosts: []
## The tls configuration for additional hostnames to be covered with this health ingress record. ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls ## E.g. ## extraTls: ## - hosts: ## - example.local ## secretName: example.local-tls ## extraTls: []
## If you're providing your own certificates, please use this to add the certificates as secrets ## key and certificate should start with -----BEGIN CERTIFICATE----- or -----BEGIN RSA PRIVATE KEY----- ## name should line up with a secretName set further up ## If it is not set and you're using cert-manager, this is unneeded, as it will create the secret for you ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information ## ## E.g. ## secrets: ## - name: example.local-tls ## key: ## certificate: ## secrets: []
## Prometheus exporter service parameters ## service: ## NGINX Prometheus exporter port ## port:9113 ## Annotations for the Prometheus exporter service ## annotations: prometheus.io/scrape:"true" prometheus.io/port:"{{ .Values.metrics.service.port }}"
## NGINX Prometheus exporter resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: {} # cpu: 100m # memory: 128Mi requests: {} # cpu: 100m # memory: 128Mi
## Prometheus Operator ServiceMonitor configuration ## serviceMonitor: enabled:false ## Namespace in which Prometheus is running ## # namespace: monitoring
## Interval at which metrics should be scraped. ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint ## # interval: 10s
## Timeout after which the scrape is ended ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint ## # scrapeTimeout: 10s
## Pod Disruption Budget configuration ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ ## pdb: create:false ## Min number of pods that must still be available after the eviction ## minAvailable:1 ## Max number of pods that can be unavailable after the eviction ## # maxUnavailable: 1
helm upgrade -f config.yaml nginx bitnami/nginx Release "nginx" has been upgraded. Happy Helming! NAME: nginx LAST DEPLOYED: Thu Jun 24 11:08:00 2021 NAMESPACE: default STATUS: deployed REVISION: 2 TEST SUITE: None NOTES: ** Please be patient while the chart is being deployed **
NGINX can be accessed through the following DNS name from within your cluster:
nginx.default.svc.cluster.local (port 80)
To access NGINX from outside the cluster, follow the steps below:
1. Get the NGINX URL by running these commands:
NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get svc --namespace default -w nginx'
export SERVICE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].port}" services nginx) export SERVICE_IP=$(kubectl get svc --namespace default nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') echo "http://${SERVICE_IP}:${SERVICE_PORT}" WARNING: Rolling tag detected (bitnami/nginx:latest), please note that it is strongly recommended to avoid using rolling tags in a production environment. +info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
kubectl get pods NAME READY STATUS RESTARTS AGE nginx-84f6956df4-8pg9k 1/1 Running 0 37s