diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/.image_descriptor_template.json b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/.image_descriptor_template.json new file mode 100644 index 0000000000..8a99a95664 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/.image_descriptor_template.json @@ -0,0 +1 @@ +{"server":{"deployment":{"image_tag":"{{.Tag}}","image":"{{.Name}}"}},"pipelineName": "{{.PipelineName}}","releaseVersion":"{{.ReleaseVersion}}","deploymentType": "{{.DeploymentType}}", "app": "{{.App}}", "env": "{{.Env}}", "appMetrics": {{.AppMetrics}}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/Chart.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/Chart.yaml new file mode 100644 index 0000000000..12593984ab --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: cronjob-chart_1-6-0 +version: 1.6.0 diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/app-values.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/app-values.yaml new file mode 100644 index 0000000000..8af5840d9e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/app-values.yaml @@ -0,0 +1,177 @@ +# Mandatory configs +kind: Job + +jobConfigs: + backoffLimit: 5 + activeDeadlineSeconds: 100 + parallelism: 1 + completions: 2 + suspend: false + # ttlSecondsAfterFinished: 100 + +cronjobConfigs: + schedule: "* * * * *" + startingDeadlineSeconds: 100 + concurrencyPolicy: Allow + suspend: false + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 + restartPolicy: OnFailure + +kedaAutoscaling: + envSourceContainerName: "" + minReplicaCount: 1 + maxReplicaCount: 2 + pollingInterval: 30 + successfulJobsHistoryLimit: 5 + failedJobsHistoryLimit: 5 + rolloutStrategy: default + scalingStrategy: + strategy: "custom" + customScalingQueueLengthDeduction: 1 + customScalingRunningJobPercentage: "0.5" + pendingPodConditions: + - "Ready" + - "PodScheduled" + - "AnyOtherCustomPodCondition" + multipleScalersCalculation : "max" + triggers: + - type: rabbitmq + metadata: + queueName: hello + host: RabbitMqHost + queueLength : '5' + authenticationRef: {} + triggerAuthentication: + enabled: false + name: "" + spec: {} + +MinReadySeconds: 60 +GracePeriod: 30 +image: + pullPolicy: IfNotPresent +service: + type: ClusterIP + enabled: false + #name: "service-1234567890" + annotations: {} + # test1: test2 + # test3: test4 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: true + supportStreaming: true + idleTimeout: 1800s +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +# Optional configs + +command: + enabled: false + value: [] + +args: + enabled: false + value: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + +#For adding custom labels to pods + +podLabels: {} +# customKey: customValue +podAnnotations: {} +# customKey: customValue + +rawYaml: [] + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage. + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +containers: [] + ## Additional containers to run along with application pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + +tolerations: [] + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + +prometheus: + release: monitoring + +server: + deployment: + image_tag: 1-95af053 + image: "" + +servicemonitor: + additionalLabels: {} + +imagePullSecrets: [] + # - test1 + # - test2 + +containerSecurityContext: + allowPrivilegeEscalation: false + +podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 3000 + # fsGroup: 2000 + +shareProcessNamespace: false +setHostnameAsFQDN: false +readinessGates: [] +ephemeralContainers: [] +topologySpreadConstraints: [] diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/env-values.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/env-values.yaml new file mode 100644 index 0000000000..a0fcb7e26b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/env-values.yaml @@ -0,0 +1,33 @@ +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 + +Spec: + Affinity: + key: "" + Values: nodes + +secret: + enabled: false + data: {} +# my_own_secret: S3ViZXJuZXRlcyBXb3Jrcw== + +EnvVariables: [] +# - name: FLASK_ENV +# value: qa + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: "0.05" + memory: 50Mi + requests: + cpu: "0.01" + memory: 10Mi + + diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/pipeline-values.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/pipeline-values.yaml new file mode 100644 index 0000000000..40a5ec633d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/pipeline-values.yaml @@ -0,0 +1,24 @@ +deployment: + strategy: + blueGreen: + autoPromotionSeconds: 30 + scaleDownDelaySeconds: 30 + previewReplicaCount: 1 + autoPromotionEnabled: false + rolling: + maxSurge: "25%" + maxUnavailable: 1 + canary: + maxSurge: "25%" + maxUnavailable: 1 + steps: + - setWeight: 25 + - pause: + duration: 15 # 1 min + - setWeight: 50 + - pause: + duration: 15 # 1 min + - setWeight: 75 + - pause: + duration: 15 # 1 min + recreate: {} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/release-values.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/release-values.yaml new file mode 100644 index 0000000000..48eb3f482c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/release-values.yaml @@ -0,0 +1,14 @@ +server: + deployment: + image_tag: IMAGE_TAG + image: IMAGE_REPO + enabled: false +dbMigrationConfig: + enabled: false + +pauseForSecondsBeforeSwitchActive: 0 +waitForSecondsBeforeScalingDown: 0 +autoPromotionSeconds: 30 + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/schema.json b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/schema.json new file mode 100644 index 0000000000..5f6a66b16d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/schema.json @@ -0,0 +1,760 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "containerExtraSpecs": { + "type": "object", + "title": "containerExtraSpecs", + "description": "Define container extra specs here" + }, + "ContainerPort": { + "type": "array", + "description": "defines ports on which application services will be exposed to other services", + "title": "Container Port", + "items": { + "type": "object", + "properties": { + "envoyPort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "envoy port for the container", + "title": "Envoy Port" + }, + "idleTimeout": { + "type": "string", + "description": "duration of time for which a connection is idle before the connection is terminated", + "title": "Idle Timeout" + }, + "name": { + "type": "string", + "description": "name of the port", + "title": "Name" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Port", + "title": "port for the container" + }, + "servicePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port of the corresponding kubernetes service", + "title": "Service Port" + }, + "nodePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "nodeport of the corresponding kubernetes service", + "title": "Node Port" + }, + "supportStreaming": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "field to enable/disable timeout for high performance protocols like grpc", + "title": "Support Streaming" + }, + "useHTTP2": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": " field for setting if envoy container can accept(or not) HTTP2 requests", + "title": "Use HTTP2" + } + } + } + }, + "EnvVariables": { + "type": "array", + "items": {}, + "description": "contains environment variables needed by the containers", + "title": "Environment Variables" + }, + "EnvVariablesFromFieldPath": { + "type": "array", + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs", + "title": "EnvVariablesFromFieldPath", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be" + }, + "fieldPath": { + "type": "string", + "title": "fieldPath", + "description": "Path of the field to select in the specified API version" + } + } + } + ] + }, + "EnvVariablesFromSecretKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromSecretKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "secretName": { + "type": "string", + "title": "secretName", + "description": "Name of Secret from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "EnvVariablesFromConfigMapKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromConfigMapKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "configMapName": { + "type": "string", + "title": "configMapName", + "description": "Name of configMap from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "GracePeriod": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "time for which Kubernetes waits before terminating the pods", + "title": "Grace Period" + }, + "MaxSurge": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be created over the desired number of pods", + "title": "Maximum Surge" + }, + "MaxUnavailable": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be unavailable during the update process", + "title": "Maximum Unavailable" + }, + "MinReadySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", + "title": "Minimum Ready Seconds" + }, + "Spec": { + "type": "object", + "description": "used to define the desire state of the given container", + "title": "Spec", + "properties": { + "Affinity": { + "type": "object", + "description": "Node/Inter-pod Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node/pods", + "title": "Affinity", + "properties": { + "Key": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string", + "description": "Key part of the label for node/pod selection", + "title": "Key" + } + ] + }, + "Values": { + "type": "string", + "description": "Value part of the label for node/pod selection", + "title": "Values" + }, + "key": { + "type": "string" + } + } + } + } + }, + "args": { + "type": "object", + "description": " used to give arguments to command", + "title": "Arguments", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling aruguments", + "title": "Enabled" + }, + "value": { + "type": "array", + "description": "values of the arguments", + "title": "Value", + "items": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + } + } + }, + "command": { + "type": "object", + "description": "contains the commands for the server", + "title": "Command", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling commands" + }, + "value": { + "type": "array", + "items": {}, + "description": "contains the commands", + "title": "Value" + }, + "workingDir": { + "type": "object", + "items": {}, + "description": "contains the working directory", + "title": "Working directory" + } + } + }, + "cronjobConfigs": { + "type": "object", + "description": " used to give configs to schdule cronjob", + "title": "Cronjob Configs", + "properties": { + "concurrencyPolicy": { + "type": "string", + "description": "Specifies how to treat concurrent executions of a Job.", + "title": "cronjobConfigs", + "enum": [ "Allow", "Forbid","Replace"] + }, + "failedJobsHistoryLimit": { + "type": "integer", + "description": "The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.", + "title": "failedJobsHistoryLimit" + }, + "restartPolicy": { + "type": "string", + "description": "It restarts the docker container based on defined conditions.", + "title": "Restart Policy", + "enum": [ + "Always", + "OnFailure", + "Never" + ] + }, + "schedule": { + "type": "string", + "description": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "title": "schedule" + }, + "startingDeadlineSeconds":{ + "type": "integer", + "description": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason.", + "title": "startingDeadlineSeconds" + }, + "successfulJobsHistoryLimit": { + "type": "integer", + "description": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", + "title": "Successful Jobs History Limit" + }, + "suspend":{ + "type":"boolean", + "description": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults is false." + } + } + }, + "jobConfigs":{ + "type":"object", + "description": "used to give configs to schdule job", + "title": "Job Config", + "properties": { + "activeDeadlineSeconds":{ + "type":"integer", + "description": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer.", + "title": "Active Deadline Seconds" + }, + "backoffLimit":{ + "type":"integer", + "description": "Specifies the number of retries before marking this job failed. Defaults is 5" + }, + "completions":{ + "type":"integer", + "description": "Specifies the desired number of successfully finished pods the job should be run with. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" + }, + "parallelism":{ + "type":"integer", + "description": "Specifies the maximum desired number of pods the job should run at any given time. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" + }, + "suspend":{ + "type":"boolean", + "description": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults is false." + } + } + }, + "kind":{ + "type": "string", + "description": "Kind is a string value representing the object type.", + "enum": [ + "Job", + "CronJob", + "ScaledJob" + ], + "title": "Kind" + + }, + + "kedaAutoscaling": { + "type": "object", + "description": "Kubernetes-based event driven autoscaler. With KEDA, one can drive the scaling of any container in Kubernetes based on the no. of events needing to be processed", + "title": "KEDA Autoscaling", + "properties": { + "envSourceContainerName":{ + "type": "string", + "description": "Is an optional property that specifies the name of container in the target resource, from which KEDA should try to get environment properties holding secrets etc. ", + "title": "Env Source Container Name" + }, + "failedJobsHistoryLimit": { + "type": "integer", + "description": "specifies how many failed jobs to keep", + "title": "Failed Jobs History Limit" + }, + "maxReplicaCount":{ + "type":"integer", + "description": "maxReplicaCount in KEDA specifies the maximum number of replicas the target resource can be scaled to.", + "title": "Max Replica Count" + }, + "minReplicaCount":{ + "type":"integer", + "description": "minReplicaCount in KEDA specifies the minimum number of replicas a resource will be scaled down to.", + "title": "Max Replica Count" + + }, + "pollingInterval":{ + "type": "integer", + "description": "This is the interval to check each trigger on. By default, KEDA will check each trigger source on every ScaledObject every 30 seconds.", + "title": "Polling Interval" + }, + "rolloutStrategy": { + "type": "string", + "description": "rollout.strategy specifies the rollout strategy KEDA will use while updating an existing ScaledJob", + "enum":[ + "gradual","default" + ] + }, + "scalingStrategy": { + "type": "object", + "properties": + { + "customScalingQueueLengthDeduction":{ + "type": "integer", + "description": "Optional. A parameter to optimize custom ScalingStrategy.", + "title": "Custom Scaling Queue Length Deduction" + }, + "customScalingRunningJobPercentage":{ + "type": "string", + "description": "Optional. A parameter to optimize custom ScalingStrategy.", + "title": "Custom Scaling QueueLengthDeduction" + }, + "multipleScalersCalculation": + { + "type": "string", + "description": "Select a behavior if you have multiple triggers", + "title": "Multiple Scalers Calculation", + "enum":[ + "max", "min", "avg", "sum" + ] + + }, + "pendingPodConditions": { + "type": "array", + "description": "Optional. A parameter to calculate pending job count per the specified pod conditions", + "title": "Pending Pod Conditions" + }, + "strategy":{ + "type": "string", + "description": "Optional. Default: default. Which Scaling Strategy to use.", + "title":"Strategy", + "enum": ["default", "custom", "accurate"] + } + } + + }, + "successfulJobsHistoryLimit": { + "type": "integer", + "description": " The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", + "title": "Successful Jobs History Limit" + }, + "triggerAuthentication":{ + "type": "object", + "title": "Trigger Authentication", + "description": "TriggerAuthentication allows you to describe authentication parameters separate from the ScaledObject and the deployment containers.", + "properties": + { + "enabled": { + "type": "boolean", + "description": "enabling TriggerAuthentication" + }, + "name": { + "type": "string" + }, + "spec":{ + "type": "object" + } + } + }, + "triggers":{ + "type":"array", + "description": "list of triggers to activate scaling of the target resource" + } + + } + + }, + "podAnnotations":{ + "type": "object", + "description": "adding extra annotations to pod" + }, + "ephemeralContainers": + { + "type": "array", + "description": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging." + }, + "initContainers":{ + "type":"array", + "description":"List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started." + }, + "imagePullSecrets": + { + "type":"array", + "description":" ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod" + } + , + "containerSecurityContext": { + "type": "object", + "description": " defines privilege and access control settings for a Container", + "title": "Container Security Context" + }, + "containers": { + "type": "array", + "items": {}, + "description": " used to run side-car containers along with the main container within same pod" + }, + "dbMigrationConfig": { + "type": "object", + "description": "used to configure database migration", + "title": "Db Migration Config", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling the config", + "title": "Enabled" + } + } + }, + "image": { + "type": "object", + "description": "used to access images in kubernetes", + "title": "Image", + "properties": { + "pullPolicy": { + "type": "string", + "description": "used to define the instances calling the image", + "title": "Pull Policy", + "enum": [ + "IfNotPresent", + "Always" + ] + } + } + }, + "podExtraSpecs": { + "type": "object", + "description": "ExtraSpec for the pods to be configured", + "title": "podExtraSpecs" + }, + "podLabels": { + "type": "object", + "description": "key/value pairs that are attached to pods, are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system", + "title": "Pod Labels" + }, + "podSecurityContext": { + "type": "object", + "description": "defines privilege and access control settings for a Pod or Container", + "title": "Pod Security Context" + }, + "prometheus": { + "type": "object", + "description": "a kubernetes monitoring tool", + "title": "Prometheus", + "properties": { + "release": { + "type": "string", + "description": "name of the file to be monitored, describes the state of prometheus" + } + } + }, + "rawYaml": { + "type": "array", + "items": {}, + "description": "Accepts an array of Kubernetes objects. One can specify any kubernetes yaml here & it will be applied when a app gets deployed.", + "title": "Raw YAML" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + }, + "secret": { + "type": "object", + "properties": { + "data": { + "type": "object" + }, + "enabled": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "description": "used for providing server configurations.", + "title": "Server", + "properties": { + "deployment": { + "type": "object", + "description": "gives the details for deployment", + "title": "Deployment", + "properties": { + "image": { + "type": "string", + "description": "URL of the image", + "title": "Image" + }, + "image_tag": { + "type": "string", + "description": "tag of the image", + "title": "Image Tag" + } + } + } + } + }, + "service": { + "type": "object", + "description": "defines annotations and the type of service", + "title": "Service", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service" + }, + "type": { + "type": "string", + "description": "type of service", + "title": "Type", + "enum": [ + "ClusterIP", + "LoadBalancer", + "NodePort", + "ExternalName" + ] + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable service", + "title": "Enabled" + } + } + }, + "serviceAccount": { + "type": "object", + "description": "defines service account for pods", + "title": "Service Account", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service account" + }, + "name": { + "type": "string", + "description": "name of service account", + "title": "Name" + }, + "create": { + "type": "boolean", + "description": "If set to true, a service account will be created, ensuring that no roles or role bindings are created in the process." + + } + } + }, + "servicemonitor": { + "type": "object", + "description": "gives the set of targets to be monitored", + "title": "Service Monitor", + "properties": { + "additionalLabels": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array", + "items": {}, + "description": "a mechanism which work together with Taints which ensures that pods are not placed on inappropriate nodes", + "title": "Tolerations" + }, + "topologySpreadConstraints": { + "type": "array", + "items": {}, + "description": "used to control how Pods are spread across a cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains", + "title": "Topology Spread Constraints" + }, + "volumeMounts": { + "type": "array", + "items": {}, + "description": "used to provide mounts to the volume", + "title": "Volume Mounts" + }, + "volumes": { + "type": "array", + "items": {}, + "description": "required when some values need to be read from or written to an external disk", + "title": "Volumes" + }, + "waitForSecondsBeforeScalingDown": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Wait for given period of time before scaling down the container", + "title": "Wait For Seconds Before Scaling Down" + } + } +} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/secrets-test-values.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/secrets-test-values.yaml new file mode 100644 index 0000000000..4a20404db8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/secrets-test-values.yaml @@ -0,0 +1 @@ +{"ConfigSecrets":{"enabled":true,"secrets":[{"data":{"standard_key":"c3RhbmRhcmQtdmFsdWU="},"external":false,"externalType":"","mountPath":"/test","name":"normal-secret","type":"volume"},{"data":{"secret_key":"U0VDUkVUIERBVEE="},"external":true,"externalType":"AWSSecretsManager","mountPath":"","name":"external-secret-3","type":"environment"}]}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/NOTES.txt b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/NOTES.txt new file mode 100644 index 0000000000..c6ccbb8211 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/NOTES.txt @@ -0,0 +1,13 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".Chart.Name .fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include ".Chart.Name .fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".Chart.Name .fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".Chart.Name .name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_helpers.tpl b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_helpers.tpl new file mode 100644 index 0000000000..03fabbc338 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_helpers.tpl @@ -0,0 +1,159 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define ".Chart.Name .name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create service name +*/}} +{{- define ".servicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 55 | trimSuffix "-" -}}-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create preview service name +*/}} +{{- define ".previewservicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 55 | trimSuffix "-" -}}-preview +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".Chart.Name .fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".Chart.Name .chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define ".Chart.Name .color" -}} +{{- $active0 := (index .Values.server.deployment 0).enabled -}} +{{/* +{{- $active1 := (index .Values.server.deployment 1).enabled -}} +*/}} +{{- $active1 := include "safeenabledcheck" . -}} +{{- $active := and $active0 $active1 -}} +{{- $active -}} +{{- end -}} + +{{- define "safeenabledcheck" -}} +{{- if (eq (len .Values.server.deployment) 2) -}} + {{- if (index .Values.server.deployment 1).enabled -}} + {{- $active := true -}} + {{- $active -}} + {{- else -}} + {{- $active := false -}} + {{- $active -}} + {{- end -}} +{{- else -}} + {{- $active := false -}} + {{- $active -}} +{{- end -}} +{{- end -}} + + +{{- define "isCMVolumeExists" -}} + {{- $isCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $isCMVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isCMVolumeExists -}} +{{- end -}} + +{{- define "isSecretVolumeExists" -}} + {{- $isSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $isSecretVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isSecretVolumeExists -}} +{{- end -}} + + + + +{{- define "serviceMonitorEnabled" -}} + {{- $SMenabled := false -}} + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if and .servicemonitor.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- end }} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + + {{- $hasCMEnvExists := false -}} + {{- $hasCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $hasCMVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasCMEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + {{- $hasSecretEnvExists := false -}} + {{- $hasSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $hasSecretVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasSecretEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_job_template_spec.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_job_template_spec.yaml new file mode 100644 index 0000000000..0ff1ddc64f --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_job_template_spec.yaml @@ -0,0 +1,42 @@ +{{- define "job-template-spec" }} +{{- if $.Values.jobConfigs }} +{{- if $.Values.jobConfigs.backoffLimit }} +backoffLimit: {{ $.Values.jobConfigs.backoffLimit }} +{{- end }} +{{- if $.Values.jobConfigs.activeDeadlineSeconds }} +activeDeadlineSeconds: {{ $.Values.jobConfigs.activeDeadlineSeconds }} +{{- end }} +{{- if $.Values.jobConfigs.parallelism }} +parallelism: {{ $.Values.jobConfigs.parallelism }} +{{- end }} +{{- if $.Values.jobConfigs.completions }} +completions: {{ $.Values.jobConfigs.completions }} +{{- end }} +{{- if semverCompare ">1.20" .Capabilities.KubeVersion.GitVersion }} +{{- if $.Values.jobConfigs.suspend }} +suspend: {{ $.Values.jobConfigs.suspend }} +{{- end }} +{{- end }} +{{- if $.Values.jobConfigs.ttlSecondsAfterFinished }} +ttlSecondsAfterFinished: {{ $.Values.jobConfigs.ttlSecondsAfterFinished }} +{{- end }} +{{- end }} +template: + metadata: + {{- if $.Values.podAnnotations }} + annotations: + {{- range $key, $value := $.Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- if $.Values.podLabels }} +{{ toYaml $.Values.podLabels | indent 6 }} + {{- end }} + spec: + {{- include "pod-template-spec" . | indent 4 }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_pod_template_spec.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_pod_template_spec.yaml new file mode 100644 index 0000000000..127c89b108 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/_pod_template_spec.yaml @@ -0,0 +1,313 @@ +{{ define "pod-template-spec" }} +{{- $hasCMEnvExists := false -}} +{{- $hasCMVolumeExists := false -}} +{{- if .Values.ConfigMaps.enabled }} +{{- range .Values.ConfigMaps.maps }} +{{- if eq .type "volume"}} +{{- $hasCMVolumeExists = true}} +{{- end }} +{{- if eq .type "environment"}} +{{- $hasCMEnvExists = true}} +{{- end }} +{{- end }} +{{- end }} + +{{- $hasSecretEnvExists := false -}} +{{- $hasSecretVolumeExists := false -}} +{{- if .Values.ConfigSecrets.enabled }} +{{- range .Values.ConfigSecrets.secrets }} +{{- if eq .type "volume"}} +{{- $hasSecretVolumeExists = true}} +{{- end }} +{{- if eq .type "environment"}} +{{- $hasSecretEnvExists = true}} +{{- end }} +{{- end }} +{{- end }} +{{- if $.Values.shareProcessNamespace }} +shareProcessNamespace: {{ $.Values.shareProcessNamespace }} +{{- end }} +{{- if $.Values.GracePeriod }} +terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} +{{- end }} +{{- if $.Values.topologySpreadConstraints }} +topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} +- maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 6 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 6 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.podSpec.subdomain }} +subdomain: {{ $.Values.podSpec.subdomain }} +{{- end }} +{{- if $.Values.podSpec.setHostnameAsFQDN }} +setHostnameAsFQDN: {{ $.Values.podSpec.setHostnameAsFQDN }} +{{- end }} +{{- if $.Values.podSpec.schedulerName }} +schedulerName: {{ $.Values.podSpec.schedulerName }} +{{- end }} +{{- if $.Values.podSpec.readinessGates }} +readinessGates: + {{ toYaml $.podSpec.readinessGates }} +{{- end }} +{{- if $.Values.podSpec.dnsPolicy }} +dnsPolicy: {{ $.Values.podSpec.dnsPolicy }} +{{- end }} +{{- if $.Values.podSpec.enableServiceLinks }} +dnsPolicy: {{ $.Values.podSpec.enableServiceLinks }} +{{- end }} +{{- with $.Values.ephemeralContainers }} +ephemeralContainers: +{{- toYaml $.Values.ephemeralContainers }} +{{- end }} +{{- with $.Values.dnsConfig }} +dnsConfig: +{{- toYaml $.Values.dnsConfig }} +{{- end }} +restartPolicy: {{ $.Values.restartPolicy }} +{{- if and $.Values.Spec.Affinity.Key $.Values.Spec.Affinity.Values }} +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.Spec.Affinity.Key }} + operator: In + values: + - {{ $.Values.Spec.Affinity.Values | default "nodes" }} +{{- end }} +{{- if $.Values.serviceAccountName }} +serviceAccountName: {{ $.Values.serviceAccountName }} +{{- end }} +{{- if .Values.tolerations }} +tolerations: +{{ toYaml .Values.tolerations | indent 2 }} +{{- end }} +{{- if $.Values.podSecurityContext }} +securityContext: +{{ toYaml .Values.podSecurityContext | indent 2 }} +{{- end }} +{{- if $.Values.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end}} +{{- if $.Values.initContainers }} +initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage }} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .command }} + command: +{{ toYaml .command | indent 6 -}} +{{- end }} +{{- if .resources }} + resources: +{{ toYaml .resources | indent 6 }} +{{- end }} +{{- if .volumeMounts }} + volumeMounts: +{{ toYaml .volumeMounts | indent 6 -}} +{{- end }} +{{- else }} + - +{{ toYaml $c | indent 4 -}} +{{- end }} +{{- end }} +{{- end }} +containers: +{{- if $.Values.containers }} +{{ toYaml $.Values.containers | indent 2 -}} +{{- end }} + - name: {{ $.Chart.Name }} + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if $.Values.privileged }} + securityContext: + privileged: true +{{- end }} +{{- if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 6 }} +{{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 6 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name }} + containerPort: {{ .port }} + protocol: TCP + {{- end }} +{{- if and $.Values.command.value $.Values.command.enabled }} + command: +{{ toYaml $.Values.command.value | indent 6 -}} +{{- end }} +{{- if and $.Values.args.value $.Values.args.enabled }} + args: +{{ toYaml $.Values.args.value | indent 6 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }} + - name: SECRET_HASH + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- range $.Values.EnvVariablesFromFieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end }} + {{- range $.Values.EnvVariables }} + - name: {{ .name}} + value: {{ .value | quote }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 6 }} + volumeMounts: +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 6 }} +{{- end }} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume" }} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + {{- else if and (eq (.subPath) true) (eq (.externalType) "KubernetesSecret") }} + {{- else if and (eq (.subPath) true) (eq (.external) true) }} + {{- range .secretData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .name }} + subPath: {{ .name }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} []{{- end }} +volumes: +{{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} +{{- end }} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 2 }} +{{- end }} +{{- if .Values.ConfigMaps.enabled }} +{{- range .Values.ConfigMaps.maps }} +{{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.ConfigSecrets.enabled }} +{{- range .Values.ConfigSecrets.secrets }} +{{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- if and (eq (len .Values.volumes) 0) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) (eq (.Values.appMetrics) false) }} []{{- end }} +{{- if and (eq (len .Values.volumes) 0) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) (eq (.Values.appMetrics) false) }} []{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/configmap.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/configmap.yaml new file mode 100644 index 0000000000..ac7f15fafb --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2019-08-12T18:38:34Z + name: {{ .name}}-{{ $.Values.app }} +data: +{{ toYaml .data | trim | indent 2 }} + {{- end}} + {{- end}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/cronjob.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/cronjob.yaml new file mode 100644 index 0000000000..aebc009bdd --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/cronjob.yaml @@ -0,0 +1,41 @@ +{{- if eq .Values.kind "CronJob" }} +{{- if semverCompare "<1.21" .Capabilities.KubeVersion.GitVersion }} +apiVersion: batch/v1beta1 +{{- else }} +apiVersion: batch/v1 +{{- end }} +kind: CronJob +metadata: + name: {{ include ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} +spec: + {{- if $.Values.cronjobConfigs }} + {{- if $.Values.cronjobConfigs.schedule }} + schedule: {{ $.Values.cronjobConfigs.schedule | quote }} + {{- end }} + {{- if $.Values.cronjobConfigs.startingDeadlineSeconds }} + startingDeadlineSeconds: {{ $.Values.cronjobConfigs.startingDeadlineSeconds }} + {{- end }} + {{- if $.Values.cronjobConfigs.concurrencyPolicy }} + concurrencyPolicy: {{ $.Values.cronjobConfigs.concurrencyPolicy }} + {{- end }} + {{- if semverCompare ">1.20" .Capabilities.KubeVersion.GitVersion }} + {{- if $.Values.cronjobConfigs.suspend }} + suspend: {{ $.Values.cronjobConfigs.suspend }} + {{- end }} + {{- end }} + {{- if $.Values.cronjobConfigs.successfulJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ $.Values.cronjobConfigs.successfulJobsHistoryLimit }} + {{- end }} + {{- if $.Values.cronjobConfigs.failedJobsHistoryLimit }} + failedJobsHistoryLimit: {{ $.Values.cronjobConfigs.failedJobsHistoryLimit }} + {{- end }} + {{- end }} + jobTemplate: + spec: + {{- include "job-template-spec" . | indent 6 }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/externalsecrets.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/externalsecrets.yaml new file mode 100644 index 0000000000..f1bd80229b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/externalsecrets.yaml @@ -0,0 +1,54 @@ +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external true }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} +--- +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: {{ .name }} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + provider: + {{- toYaml .esoSecretData.secretStore | nindent 4 }} + +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ .name }} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .refreshInterval }} + refreshInterval: {{ .refreshInterval }} + {{- else }} + refreshInterval: 1h + {{- end }} + {{- if and .esoSecretData.secretStoreRef (not .esoSecretData.secretStore) }} + secretStoreRef: +{{ toYaml .esoSecretData.secretStoreRef | indent 4 }} + {{- else }} + secretStoreRef: + name: {{ .name}} + kind: SecretStore + {{- end }} + target: + name: {{ .name }} + creationPolicy: Owner + data: + {{- range .esoSecretData.esoData }} + - secretKey: {{ .secretKey }} + remoteRef: + key: {{ .key }} + property: {{ .property }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/generic.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/generic.yaml new file mode 100644 index 0000000000..db95e84267 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/generic.yaml @@ -0,0 +1,4 @@ +{{- range .Values.rawYaml }} +--- +{{ toYaml . }} + {{- end -}} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/job.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/job.yaml new file mode 100644 index 0000000000..e2762026e8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/job.yaml @@ -0,0 +1,14 @@ +{{ if eq .Values.kind "Job" }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include ".Chart.Name .fullname" $ }}-{{ $.Values.releaseVersion }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ .Values.pipelineName }} +spec: + {{- include "job-template-spec" . | indent 2 }} +{{ end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/keda-autoscaling.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/keda-autoscaling.yaml new file mode 100644 index 0000000000..7601b17910 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/keda-autoscaling.yaml @@ -0,0 +1,45 @@ +{{- if eq .Values.kind "ScaledJob" }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-keda +spec: + {{- if $.Values.kedaAutoscaling.maxReplicaCount }} + maxReplicaCount: {{ $.Values.kedaAutoscaling.maxReplicaCount | default 1 }} + {{- end }} + {{- if $.Values.kedaAutoscaling.minReplicaCount }} + minReplicaCount: {{ $.Values.kedaAutoscaling.minReplicaCount | default 0 }} + {{- end }} + {{- if $.Values.kedaAutoscaling.pollingInterval }} + pollingInterval: {{ $.Values.kedaAutoscaling.pollingInterval | default 30 }} + {{- end }} + {{- if $.Values.kedaAutoscaling.scalingStrategy }} + scalingStrategy: +{{ toYaml $.Values.kedaAutoscaling.scalingStrategy | indent 4 }} + {{- end }} + {{- if $.Values.kedaAutoscaling.successfulJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ $.Values.kedaAutoscaling.successfulJobsHistoryLimit | default 100 }} + {{- end }} + {{- if $.Values.kedaAutoscaling.rolloutStrategy }} + rolloutStrategy: {{ $.Values.kedaAutoscaling.rolloutStrategy }} + {{- end }} + {{- if $.Values.kedaAutoscaling.failedJobsHistoryLimit }} + failedJobsHistoryLimit: {{ $.Values.kedaAutoscaling.failedJobsHistoryLimit | default 100 }} + {{- end }} + {{- if $.Values.kedaAutoscaling.envSourceContainerName }} + envSourceContainerName: {{ $.Values.kedaAutoscaling.envSourceContainerName }} + {{- end }} + triggers: +{{ toYaml $.Values.kedaAutoscaling.triggers | indent 4 }} + jobTargetRef: +{{- include "job-template-spec" . | indent 4 }} +--- +{{- if $.Values.kedaAutoscaling.triggerAuthentication.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{ $.Values.kedaAutoscaling.triggerAuthentication.name }} +spec: +{{ toYaml $.Values.kedaAutoscaling.triggerAuthentication.spec | indent 2 }} +{{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/metrics-service-monitor.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/metrics-service-monitor.yaml new file mode 100644 index 0000000000..9130dc2f80 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/metrics-service-monitor.yaml @@ -0,0 +1,30 @@ +{{- if $.Values.appMetrics -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: + jobLabel: {{ template ".Chart.Name .name" $ }} + endpoints: + - port: envoy-admin + interval: 30s + path: /stats/prometheus + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} + podTargetLabels: + - appId + - envId + - rollouts-pod-template-hash +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/prometheusrules.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/prometheusrules.yaml new file mode 100644 index 0000000000..90f398bff4 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/prometheusrules.yaml @@ -0,0 +1,22 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template ".Chart.Name .fullname" . }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: + kind: Prometheus + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.prometheusRule.additionalLabels }} +{{ toYaml .Values.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.prometheusRule.rules }} + groups: + - name: {{ template ".Chart.Name .fullname" $ }} + rules: {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/secret.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/secret.yaml new file mode 100644 index 0000000000..9a8ab67837 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/secret.yaml @@ -0,0 +1,57 @@ +{{- if $.Values.secret.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-secret +type: Opaque +data: +{{ toYaml $.Values.secret.data | indent 2 }} +{{- end }} + + +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name}}-{{ $.Values.app }} +type: Opaque +data: +{{ toYaml .data | trim | indent 2 }} +{{- end}} + {{if eq .external true }} + {{if (or (eq .externalType "AWSSecretsManager") (eq .externalType "AWSSystemManager") (eq .externalType "HashiCorpVault"))}} +--- +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: {{ .name}} +spec: + {{- if .roleARN }} + roleArn: .roleARN + {{- end}} + {{- if eq .externalType "AWSSecretsManager"}} + backendType: secretsManager + {{- end}} + {{- if eq .externalType "AWSSystemManager"}} + backendType: systemManager + {{- end}} + {{- if eq .externalType "HashiCorpVault"}} + backendType: vault + {{- end}} + data: + {{- range .secretData }} + - key: {{.key}} + name: {{.name}} + {{- if .property }} + property: {{.property}} + {{- end}} + isBinary: {{.isBinary}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/service.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/service.yaml new file mode 100644 index 0000000000..99ef805c7c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/service.yaml @@ -0,0 +1,67 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".servicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end}} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + targetPort: {{ .name }} + protocol: TCP + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + app: {{ template ".Chart.Name .name" . }} +{{- if eq .Values.deploymentType "BLUE-GREEN" }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".previewservicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + targetPort: {{ .name }} + protocol: TCP + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + app: {{ template ".Chart.Name .name" . }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/servicemonitor.yaml new file mode 100644 index 0000000000..1f90c722cb --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{ $serviceMonitorEnabled := include "serviceMonitorEnabled" . }} +{{- if eq "true" $serviceMonitorEnabled -}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" . }}-sm + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.servicemonitor.additionalLabels }} +{{ toYaml .Values.servicemonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicePort }} + - port: {{ .name }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/sidecar-configmap.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/sidecar-configmap.yaml new file mode 100644 index 0000000000..30dc74cbda --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/templates/sidecar-configmap.yaml @@ -0,0 +1,166 @@ +{{- if .Values.appMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2019-08-12T18:38:34Z + name: sidecar-config-{{ template ".Chart.Name .name" $ }} +data: + envoy-config.json: | + { + "stats_config": { + "use_all_default_tags": false, + "stats_tags": [ + { + "tag_name": "cluster_name", + "regex": "^cluster\\.((.+?(\\..+?\\.svc\\.cluster\\.local)?)\\.)" + }, + { + "tag_name": "tcp_prefix", + "regex": "^tcp\\.((.*?)\\.)\\w+?$" + }, + { + "tag_name": "response_code", + "regex": "_rq(_(\\d{3}))$" + }, + { + "tag_name": "response_code_class", + "regex": ".*_rq(_(\\dxx))$" + }, + { + "tag_name": "http_conn_manager_listener_prefix", + "regex": "^listener(?=\\.).*?\\.http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "http_conn_manager_prefix", + "regex": "^http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "listener_address", + "regex": "^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "mongo_prefix", + "regex": "^mongo\\.(.+?)\\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$" + } + ], + "stats_matcher": { + "inclusion_list": { + "patterns": [ + { + "regex": ".*_rq_\\dxx$" + }, + { + "regex": ".*_rq_time$" + }, + { + "regex": "cluster.*" + }, + ] + } + } + }, + "admin": { + "access_log_path": "/dev/null", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 9901 + } + } + }, + "static_resources": { + "clusters": [ + {{- range $index, $element := .Values.ContainerPort }} + { + "name": "{{ $.Values.app }}-{{ $index }}", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", +{{- if $element.idleTimeout }} + "common_http_protocol_options": { + "idle_timeout": {{ $element.idleTimeout | quote }} + }, +{{- end }} +{{- if or $element.useHTTP2 $element.useGRPC }} + "http2_protocol_options": {}, +{{- end }} +{{- if and (not $element.useGRPC) (not $element.supportStreaming) }} + "max_requests_per_connection": "1", +{{- end }} + "load_assignment": { + "cluster_name": "9", + "endpoints": { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": {{ $element.port }} + } + } + } + } + ] + } + } + }, + {{- end }} + ], + "listeners":[ + {{- range $index, $element := .Values.ContainerPort }} + { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + "port_value": {{ $element.envoyPort | default (add 8790 $index) }} + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "config": { + "codec_type": "AUTO", + "stat_prefix": "stats", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { +{{- if $element.supportStreaming }} + "timeout": "0s", +{{- end }} + "cluster": "{{ $.Values.app }}-{{ $index }}" + } + } + ] + } + ] + }, + "http_filters": { + "name": "envoy.filters.http.router" + } + } + } + ] + } + ] + }, + {{- end }} + ] + } + } +--- +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/test_values.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/test_values.yaml new file mode 100644 index 0000000000..af35e15627 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/test_values.yaml @@ -0,0 +1,404 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +kind: ScaledJob + +jobConfigs: + backoffLimit: 5 + activeDeadlineSeconds: 100 + parallelism: 1 + completions: 2 + suspend: false + ttlSecondsAfterFinished: 100 + +cronjobConfigs: + schedule: "* * * * *" + startingDeadlineSeconds: 100 + concurrencyPolicy: Allow + suspend: false + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 + restartPolicy: OnFailure + +imagePullSecrets: + - test1 + - test2 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: true + supportStreaming: true + idleTimeout: 1800s + servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace + - name: app1 + port: 8090 + servicePort: 8080 + useGRPC: true + servicemonitor: + enabled: true + - name: app2 + port: 8091 + servicePort: 8081 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +secret: + enabled: false + +service: + enabled: false + type: ClusterIP + # name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + +server: + deployment: + image_tag: 1-95af053 + image: "" +deploymentType: "RECREATE" + + +EnvVariables: + - name: FLASK_ENV + value: qa + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + +kedaAutoscaling: + envSourceContainerName: "" + minReplicaCount: 1 + maxReplicaCount: 2 + pollingInterval: 30 + successfulJobsHistoryLimit: 5 + failedJobsHistoryLimit: 5 + rolloutStrategy: default + scalingStrategy: + strategy: "custom" + customScalingQueueLengthDeduction: 1 + customScalingRunningJobPercentage: "0.5" + pendingPodConditions: + - "Ready" + - "PodScheduled" + - "AnyOtherCustomPodCondition" + multipleScalersCalculation : "max" + triggers: + - type: rabbitmq + metadata: + queueName: hello + host: RabbitMqHost + queueLength : '5' + authenticationRef: {} + triggerAuthentication: + enabled: true + name: "" + spec: {} + +prometheusRule: + enabled: true + additionalLabels: {} + namespace: "" + rules: + # These are just examples rules, please adapt them to your needs + - alert: TooMany500s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 5XXs + summary: More than 5% of the all requests did return 5XX, this require your attention + - alert: TooMany400s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 4XXs + summary: More than 5% of the all requests did return 4XX, this require your attention + +command: + enabled: true + value: + - /bin/sh + +args: + enabled: false + value: [] + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi +volumeMounts: + - name: log-volume + mountPath: /var/log + +volumes: + - name: log-volume + emptyDir: {} + + +nodeSelector: {} + + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + # maps: + # - name: config-map-1 + # type: environment + # external: false + # data: + # key1: key1value-1 + # key2: key2value-1 + # key3: key3value-1 + # - name: config-map-2 + # type: volume + # external: false + # mountPath: /etc/config/2 + # subPath: false + # filePermission: "777" + # data: + # key1: | + # club : manchester utd + # nation : england + # key2: abc-2 + # key3: abc-2 + # - name: config-map-3 + # type: environment + # external: true + # mountPath: /etc/config/3 + # data: [] + # - name: config-map-4 + # type: volume + # external: true + # mountPath: /etc/config/4 + # data: [] + + +ConfigSecrets: + enabled: false + secrets: + - name: config-secret-1 + type: environment + external: true + externalType: ESO_AWSSecretsManager + esoSecretData: + secretStore: + aws: + service: SecretsManager + region: us-east-1 + auth: + secretRef: + accessKeyIDSecretRef: + name: awssm-secret + key: access-key + secretAccessKeySecretRef: + name: awssm-secret + key: secret-access-key + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + data: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + # - name: config-secret-1 + # type: environment + # external: false + # data: + # key1: key1value-1 + # key2: key2value-1 + # key3: key3value-1 + # - name: config-secret-2x + # type: volume + # external: false + # mountPath: /etc/config/2 + # subPath: false + # filePermission: "777" + # data: + # key1: | + # club : manchester utd + # nation : england + # key2: abc-2 + # key3: abc-2 + + +initContainers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage. + ## If reuse container image is set as true, you cannot specify an addition init container along with the image. + # - reuseContainerImage: true + # volumeMounts: + # - mountPath: /etc/ls-oms + # name: ls-oms-cm-vol + # command: + # - flyway + # - -configFiles=/etc/ls-oms/flyway.conf + # - migrate + + # - name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + # resources: + # limits: + # cpu: 50m + # memory: 100Mi + # requests: + # cpu: 10m + # memory: 50Mi + - name: volume-mount-hack2 + image: busybox + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + resources: + limits: + cpu: 50m + memory: 100Mi + requests: + cpu: 10m + memory: 50Mi + +containers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + # - name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +podDisruptionBudget: {} + # minAvailable: 1 + # maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +## + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +appMetrics: false + +podAnnotations: + fluentbit.io/exclude: true + +podLabels: + severity: critical + +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - SYS_PTRACE + + +podSecurityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + +shareProcessNamespace: true diff --git a/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/values.yaml b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/values.yaml new file mode 100644 index 0000000000..5d31f6d83a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/cronjob-chart_1-6-0/values.yaml @@ -0,0 +1,320 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: true + supportStreaming: true + idleTimeout: 1800s +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace + + # - name: app1 + # port: 8090 + # servicePort: 8080 + # useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 +shareProcessNamespace: false +setHostnameAsFQDN: false +readinessGates: [] +ephemeralContainers: [] +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: zone + # whenUnsatisfiable: DoNotSchedule + # autoLabelSelector: true + # customLabelSelector: + # foo: bar + +podSpec: {} + # subdomain: "" + # setHostnameAsFQDN: "" + # schedulerName: "" + # readinessGates: + # - conditionType: "www.example.com/feature-1" + # dnsPolicy: "" + # enableServiceLinks: false + # dnsConfig: {} + +Spec: + Affinity: + Key: +# Key: kops.k8s.io/instancegroup + Values: + +restartPolicy: OnFailure + +image: + pullPolicy: IfNotPresent + +secret: + enabled: false + +service: + type: ClusterIP + enabled: false +# name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + +server: + deployment: + image_tag: 1-95af053 + image: "" + + +EnvVariables: [] + # - name: FLASK_ENV + # value: qa + +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" +# rules: +# # These are just examples rules, please adapt them to your needs +# - alert: TooMany500s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 5XXs +# summary: More than 5% of the all requests did return 5XX, this require your attention +# - alert: TooMany400s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 4XXs +# summary: More than 5% of the all requests did return 4XX, this require your attention +# + +command: + enabled: false + value: [] + +args: + enabled: false + value: [] + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: [] +# - name: config-secret-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + ## Uncomment below line ONLY IF you want to reuse the container image. + ## This will assign your application's docker image to init container. + # reuseContainerImage: true + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +kedaAutoscaling: {} + # envSourceContainerName: "" + # minReplicaCount: 1 + # maxReplicaCount: 2 + # pollingInterval: 30 + # successfulJobsHistoryLimit: 5 + # failedJobsHistoryLimit: 5 + # rolloutStrategy: default + # scalingStrategy: + # strategy: "custom" + # customScalingQueueLengthDeduction: 1 + # customScalingRunningJobPercentage: "0.5" + # pendingPodConditions: + # - "Ready" + # - "PodScheduled" + # - "AnyOtherCustomPodCondition" + # multipleScalersCalculation : "max" + # triggers: + # - type: rabbitmq + # metadata: + # queueName: hello + # host: RabbitMqHost + # queueLength : '5' + # authenticationRef: {} + # triggerAuthentication: + # enabled: false + # name: "" + # spec: {} + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +imagePullSecrets: [] + # - test1 + # - test2 \ No newline at end of file