This is a draft document that was built and uploaded automatically. It may document beta software and be incomplete or even incorrect. Use this document at your own risk.
This is the complete output of helm inspect values suse/kubecf for
the current SUSE Cloud Application Platform 2.1.1 release.
# REQUIRED: the domain that the deployment will be visible to the user.
# system_domain: example.com
# List of stacks to install; the first one will be used as the default.
# A stack is a prebuilt root file system that supports a specific
# operating system with a corresponding set of buildpacks.
install_stacks: [sle15, cflinuxfs3]
# Set or override job properties. The first level of the map is the instance group name. The second
# level of the map is the job name. E.g.:
# properties:
# adapter:
# adapter:
# scalablesyslog:
# adapter:
# logs:
# addr: kubecf-log-api:8082
#
properties: {}
# Override credentials to not be auto-generated. The credentials can either be
# specified as a nested mapping, or with a dot-separated key. For example:
# credentials:
# cf_admin_password: changeme
# credhub_tls.ca: credhub-real-ca
# credhub_tls:
# certificate: the-cert
credentials: {}
# Override variable values to not be auto-generated. The variables are a simple
# mapping with keys/values. Note that the `system_domain` domain is handled
# differently and must be set via the top-level key (which is required).
# For example:
# variables:
# key: value
variables: {}
kube:
# The storage class to be used for the instance groups that need it (e.g. bits, database and
# singleton-blobstore). If it's not set, the default storage class will be used.
storage_class: ~
# The psp key contains the configuration related to Pod Security Policies. By default, a PSP will
# be generated with the necessary permissions for running KubeCF. To pass an existing PSP and
# prevent KubeCF from creating a new one, set the kube.psp.default with the PSP name.
psp:
default: ~
# The global list of image pull secret names. The secrets themselves will have to be created by
# the user before installing kubecf.
image_pull_secrets: []
# Set to true to enable support for multiple availability zones.
multi_az: false
# Set to true to enable high availability mode, where pods are replicated in
# order to prevent downtime when a node is temporarily unavailable.
high_availability: false
# Instance sizing takes precedence over the high_availability property. I.e. setting the
# instance count for an instance group greater than 1 will make it highly available.
#
# It is also possible to specify custom affinity rules for each instance group. If no rule
# is provided, then each group as anti-affinity to itself, to try to spread the pods between
# different nodes. In addition diego-cell and router also have anti-affinity to each other.
#
# The default rules look like this:
#
# sizing:
# sample_group:
# affinity:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: quarks.cloudfoundry.org/quarks-statefulset-name
# operator: In
# values:
# - sample_group
# topologyKey: kubernetes.io/hostname
#
# Any affinity rules specified here will *overwrite* the default rule and not merge with it.
sizing:
adapter:
instances: ~
api:
instances: ~
apps_dns:
instances: ~
asactors:
instances: ~
asapi:
instances: ~
asmetrics:
instances: ~
asnozzle:
instances: ~
auctioneer:
instances: ~
bits:
instances: ~
cc_worker:
instances: ~
credhub:
instances: ~
database:
persistence:
size: 20Gi
diego_api:
instances: ~
diego_cell:
ephemeral_disk:
# Size of the ephemeral disk used to store applications in MB
size: 40960
# IMPORTANT! Only set this if you understand the consequences of using a PVC as ephemeral
# storage for diego cells. The storage class should be high performance, and not based on NFS.
# Do not set this value in production environments unless you've tested your storage class with
# diego cells and have found no problems.
# The name of the storage class used for the ephemeral disk PVC.
storage_class: ~
instances: ~
doppler:
instances: ~
eirini:
instances: ~
log_api:
instances: ~
nats:
instances: ~
router:
instances: ~
routing_api:
instances: ~
scheduler:
instances: ~
uaa:
instances: ~
tcp_router:
instances: ~
# External endpoints are created for the instance groups only if features.ingress.enabled is false.
services:
router:
annotations: {}
type: LoadBalancer
externalIPs: []
clusterIP: ~
loadBalancerIP: ~
ssh-proxy:
annotations: {}
type: LoadBalancer
externalIPs: []
clusterIP: ~
loadBalancerIP: ~
tcp-router:
annotations: {}
type: LoadBalancer
externalIPs: []
clusterIP: ~
loadBalancerIP: ~
port_range:
start: 20000
end: 20008
# CPU and memory resources can be configured via the `resources` tree when features.cpu_limits.enabled
# or features.memory_limits.enabled are set respectively. Each setting covers both limit and request
# settings for their resource type.
#
# The helm chart includes default memory limits for all processes, and some explicit requests. When no
# request size is specified, a default is calculated as a percentage of the limit, but at least some
# minimum threshold, but never more than the limit itself. See the features.memory_limits setting to
# finetune this algorithm.
#
# All values are integers; cpu values are in millicpus (m) and memory is in megabytes (Mi).
#
# More information about the `resources` structure can be found in the config/resources.yaml file
# inside this helm chart.
resources:
diego-cell:
garden:
garden: {memory: {limit: 524288, request: 16}}
settings:
router:
# tls sets up the public TLS for the router. The tls keys:
# crt: the certificate in the PEM format. Required.
# key: the private key in the PEM format. Required.
tls: {}
# crt: |
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# key: |
# -----BEGIN PRIVATE KEY-----
# ...
# -----END PRIVATE KEY-----
features:
# Set default memory limits and requests for all containers
memory_limits:
enabled: true
# The memory request size default is calculated as a percentage of the limit.
# The default is always at least a minimum value, but never larger than the limit itself.
default_request_minimum: 32
default_request_in_percent: 25
eirini:
enabled: false
# To support multi-clusters, deploy diego-cell separately please set control_plane is false and cell_segment is true
multiple_cluster_mode:
control_plane:
enabled: false
cell_segment:
enabled: false
# To support multi-clusters, services for diego-cell deployed separately
control_plane_workers:
uaa:
name: uaa
addresses:
- ip: ~
diego_api:
name: diego-api
addresses:
- ip: ~
api:
name: api
addresses:
- ip: ~
singleton_blobstore:
name: singleton-blobstore
addresses:
- ip: ~
# To support multi-clusters, provider link secrets for diego-cell deployed separately
provider_link_service:
nats:
secret_name: minion-link-nats
service_name: minion-service-nats
addresses:
- ip: ~
# To support multi-clusters, fill the provider link secrets context of nats, for example:
# link: |
# ---
# nats.user: "nats"
# nats.password: "xxxxxx"
# nats.hostname: "nats"
# nats.port: 4222
link: ~
nats_tls:
secret_name: minion-link-nats-tls
service_name: minion-service-nats-tls
addresses:
- ip: ~
# To support multi-clusters, fill the provider link secrets context of nats_tls, for example:
# link: |
# ---
# nats.user: "nats"
# nats.password: "xxxxxx"
# nats.hostname: "nats"
# nats.port: 4223
# nats.external.tls.ca: ""
link: ~
routing_api:
secret_name: minion-link-routing-api
service_name: minion-service-routing-api
addresses:
- ip: ~
# To support multi-clusters, fill the provider link secrets context of routing-api, for example:
# link: |
# routing_api.clients: ~
# routing_api.system_domain: "xxx.xxx.xxx"
# routing_api.port: 3000
# routing_api.mtls_port: 3001
# routing_api.mtls_ca: |
# -----BEGIN CERTIFICATE-----
# xxxxxx
# -----END CERTIFICATE-----
# ......
link: ~
doppler:
secret_name: minion-link-doppler
service_name: minion-service-doppler
addresses:
- ip: ~
# To support multi-clusters, fill the provider link secrets context of doppler, for example:
# link: |
# doppler.grpc_port: 8082
link: ~
loggregator:
secret_name: minion-link-loggregator
service_name: minion-service-loggregator
addresses:
- ip: ~
# To support multi-clusters, fill the provider link secrets context of loggregator, for example:
# link: |
# loggregator.tls.ca_cert: |
# -----BEGIN CERTIFICATE-----
# xxxxxx
# -----END CERTIFICATE-----
# ......
link: ~
cloud_controller:
secret_name: minion-link-cloud-controller
service_name: minion-service-cloud-controller
addresses:
- ip: ~
# To support multi-clusters, fill the provider link secrets context of cloud-controller, for example:
# link: |
# system_domain: "{{ .Values.system_domain }}"
# app_domains: []
link: ~
cloud_controller_container_networking_info:
secret_name: minion-link-cloud-controller-container-networking-info
service_name: minion-service-cloud-controller-container-networking-info
addresses:
- ip: ~
# link: |
# cc.internal_route_vip_range: "127.128.0.0/9"
link: ~
cf_network:
secret_name: minion-link-cf-network
service_name: minion-service-cf-network
addresses:
- ip: ~
# link: |
# network: "10.255.0.0/16"
# subnet_prefix_length: 24
link: ~
# CA certs from control plane to generate certs required by diego cell
control_plane_ca:
service_cf_internal_ca:
name: service-cf-internal-ca
certificate: ~
private_key: ~
application_ca:
name: application-ca
certificate: ~
private_key: ~
loggregator_ca:
name: loggregator-ca
certificate: ~
private_key: ~
metric_scraper_ca:
name: metric-scraper-ca
certificate: ~
private_key: ~
silk_ca:
name: silk-ca
certificate: ~
private_key: ~
network_policy_ca:
name: network-policy-ca
certificate: ~
private_key: ~
cf_app_sd_ca:
name: cf-app-sd-ca
certificate: ~
private_key: ~
nats_ca:
name: nats-ca
certificate: ~
private_key: ~
ingress:
enabled: false
tls:
# TLS certificate for the ingress controller. This should be a wildcard certificate for the
# system domain (*.example.com, where api.example.com is the API endpoint). It should also
# include the full certificate chain (that is, include the intermediate certificates).
crt: ~
# TLS certificate private key for the ingress controller, matching features.ingress.tls.crt.
key: ~
annotations: {}
labels: {}
autoscaler:
# Enable the application autoscaler. The autoscaler service must be manually registered; see
# https://github.com/cloudfoundry/app-autoscaler-release#register-service for details.
enabled: false
mysql:
enabled: false
credhub:
# Enable credhub; this is only used as a service broker for applications, and is not used for
# authentication with the Cloud Foundry deployment.
enabled: true
routing_api:
# Enable the routing API. Disabling this will also disable TCP routing, which is used for TCP
# port forwarding.
# Enabled by default, except under Eirini, where the routing-api is not (yet) supported.
enabled: ~
embedded_database:
# Enable the embedded database. If this is disabled, then features.external_database should be
# configured to use an external database.
enabled: true
# Number of seconds to wait for the database to be ready, per iteration of the waiter loop
connect_timeout: 3
blobstore:
# Possible values for provider: fog or singleton.
provider: singleton
# fog:
# app_package_directory_key: YOUR-APP-PACKAGE-BUCKET
# buildpack_directory_key: YOUR-BUILDPACK-BUCKET
# droplet_directory_key: YOUR-DROPLET-BUCKET
# resource_directory_key: YOUR-RESOURCE-BUCKET
#
# Example config for S3
# ---------------------
# connection:
# provider: AWS
# aws_access_key_id: S3-ACCESS-KEY
# aws_secret_access_key: S3-SECRET-ACCESS-KEY
# region: ""
#
# Additional settings for e.g. MinIO
# ----------------------------------
# aws_signature_version: '2'
# endpoint: https://custom-s3-endpoint.example.com
# # path_style is only supported by Diego, but not by Eirini (bits-service).
# # MinIO can be configured to use vhost addressing using MINIO_DOMAIN and a wildcard cert.
# path_style: true
#
# Example config for Google Cloud Storage
# ---------------------------------------
# connection:
# provider: Google
# google_storage_access_key_id: GCS-ACCESS-KEY
# google_storage_secret_access_key: GCS-SECRET-ACCESS-KEY
#
# Example config for Azure Cloud Storage
# --------------------------------------
# connection:
# provider: AzureRM
# environment: AzureCloud
# azure_storage_account_name: YOUR-AZURE-STORAGE-ACCOUNT-NAME
# azure_storage_access_key: YOUR-AZURE-STORAGE-ACCESS-KEY
# Configuration for the external database; see also features.embedded_database. Please refer to
# https://kubecf.io/docs/deployment/advanced-topics/#external-database for details.
external_database:
enabled: false
require_ssl: false
ca_cert: ~
# The external database type; it can be either 'mysql' or 'postgres'.
type: ~
host: ~
port: ~
# Number of seconds to wait for the database to be ready, per iteration of the waiter loop
connect_timeout: 3
# If seed is set to true, we will initialize the database using the provided
# root password (see `.variables.pxc-root-password`); in that case it is not
# necessary to provide the configuration for the individual databases.
seed: false
databases:
uaa:
name: uaa
password: ~
username: ~
cc:
name: cloud_controller
password: ~
username: ~
bbs:
name: diego
password: ~
username: ~
routing_api:
name: routing-api
password: ~
username: ~
policy_server:
name: network_policy
password: ~
username: ~
silk_controller:
name: network_connectivity
password: ~
username: ~
locket:
name: locket
password: ~
username: ~
credhub:
name: credhub
password: ~
username: ~
# Enable or disable instance groups for the different test suites.
# Only smoke tests should be run in production environments.
testing:
# __ATTENTION__: The brain tests do things with the cluster which
# required them to have `cluster-admin` permissions (i.e. root).
# Enabling them is thus potentially insecure. They should only be
# activated for isolated testing.
brain_tests:
enabled: false
# Delete the testing pod after completion (default: false)
delete_pod: false
cf_acceptance_tests:
enabled: false
# Delete the testing pod after completion (default: false)
delete_pod: false
smoke_tests:
enabled: true
# Delete the testing pod after completion (default: false)
delete_pod: false
sync_integration_tests:
enabled: false
# Delete the testing pod after completion (default: false)
delete_pod: false
ccdb:
encryption:
# Configure CCDB key rotation. Please see
# https://kubecf.io/docs/tasks/secrets/#rotating-the-ccdb-encryption-keys for details.
rotation:
# Key labels must be <= 240 characters long.
key_labels:
- encryption_key_0
current_key_label: encryption_key_0
operations:
# A list of configmap names that should be applied to the BOSH manifest.
custom: []
# Inlined operations that get into generated ConfigMaps. E.g. adding a password variable:
# operations:
# inline:
# - type: replace
# path: /variables/-
# value:
# name: my_password
# type: password
inline: []
hooks:
# Image that contains kubectl to be used in helm upgrade and delete hook scripts
image: registry.suse.com/cap/kubecf-kubectl:v1.19.2
eirinix:
persi-broker:
# Service plans for Eirini persistant storage support
service-plans:
- id: default
name: "default"
description: "Existing default storage class"
kube_storage_class: ~
free: true
default_size: "1Gi"
description: Eirini persistence broker
long_description: Eirini persistence broker to provide Kubernete storage classes
provider_display_name: Eirini broker
documentation_url: https://github.com/SUSE/eirini-persi-broker
support_url: https://github.com/SUSE/eirini-persi-broker/issues
display_name: Eirini broker
icon_image: Eirini broker
secrets:
auth-password: ~ # Password is randomly generated if not given
This is the complete output of helm inspect values suse/cf-operator for
the current SUSE Cloud Application Platform 2.1.1 release.
## Default values for Quarks Operator Helm Chart.
## This is a YAML-formatted file.
## Declare variables to be passed into your templates.
# applyCRD is a boolean to control the installation of CRD's.
applyCRD: true
cluster:
# domain is the the Kubernetes cluster domain
domain: "cluster.local"
# fullnameOverride overrides the release name
fullnameOverride: ""
# image is the docker image of quarks job.
image:
# repository that provides the operator docker image.
repository: quarks-operator
# org that provides the operator docker image.
org: registry.suse.com/cap
# tag of the operator docker image
tag: v7.2.1-0.gaeb6ef3
# creates a service account for coredns-quarks, the must be unique as it is used for the cluster role too.
corednsServiceAccount:
create: true
name: coredns-quarks
# logrotateInterval is the time between logrotate calls for instance groups in minutes
logrotateInterval: 1440
# logLevel defines from which level the logs should be printed (trace,debug,info,warn).
logLevel: debug
# nameOverride overrides the chart name part of the release name
nameOverride: ""
# workers are the int values for running maximum number of workers of the respective controller.
workers:
boshdeployment: 1
operator:
webhook:
# host under which the webhook server can be reached from the cluster
host: ~
# port the webhook server listens on
port: "2999"
# boshDNSDockerImage is the docker image used for emulating bosh DNS (a CoreDNS image).
boshDNSDockerImage: "registry.suse.com/cap/coredns:0.1.0-1.6.7-bp152.1.19"
hookDockerImage: "registry.suse.com/cap/kubecf-kubectl:v1.20.2"
# serviceAccount contains the configuration
# values of the service account used by quarks-operator.
serviceAccount:
# create is a boolean to control the creation of service account name.
create: true
# name of the service account.
name:
global:
# Context Timeout for each K8's API request in seconds.
contextTimeout: 300
# MeltdownDuration is the duration (in seconds) of the meltdown period, in which we
# postpone further reconciles for the same resource
meltdownDuration: 60
# MeltdownRequeueAfter is the duration (in seconds) for which we delay the requeuing of the reconcile
meltdownRequeueAfter: 30
image:
# pullPolicy defines the policy used for pulling docker images.
pullPolicy: IfNotPresent
# credentials is used for pulling docker images.
credentials: ~
# username:
# password:
# servername:
# monitoredID is a string that has to match the content of the 'monitored' label in each monitored namespace.
monitoredID: cfo
operator:
webhook:
# useServiceReference is a boolean to control the use of the
# service reference in the webhook spec instead of a url.
useServiceReference: true
rbac:
# create is a boolean to control the installation of rbac resources.
create: true
singleNamespace:
# create is a boolean to control the creation of resources for a simplified setup
create: true
# name is the name of the single namespace, being watched for BOSH deployments.
name: kubecf
quarks-job:
logLevel: info
serviceAccount:
# create is a boolean to control the creation of service account name.
create: true
# name of the service account.
name:
persistOutputClusterRole:
# create is a boolean to control the creation of the persist output cluster role
create: true
# name of the cluster role.
name: qjob-persist-output
singleNamespace:
createNamespace: false
quarks-secret:
logLevel: info
quarks-statefulset:
logLevel: info