提问者:小点点

如何使用HELM图表将kubernetes部署作业转换为kubernetes cron作业


我正在使用Helm图表在库伯内特斯上运行我的Spring启动应用程序docker映像。

以下是相同的详细信息

模板/部署。yam l

apiVersion: apps/v1
kind: Deployment
metadata:
  name: {{ include "xyz.fullname" . }}
  labels:
    {{- include "xyz.labels" . | nindent 4 }}
spec:
  {{- if not .Values.autoscaling.enabled }}
  replicas: {{ .Values.replicaCount }}
  {{- end }}
  selector:
    matchLabels:
      {{- include "xyz.selectorLabels" . | nindent 6 }}
  template:
    metadata:
      {{- with .Values.podAnnotations }}
      annotations:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      labels:
        {{- include "xyz.selectorLabels" . | nindent 8 }}
    spec:
      {{- with .Values.imagePullSecrets }}
      imagePullSecrets:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      serviceAccountName: {{ include "xyz.serviceAccountName" . }}
      securityContext:
        {{- toYaml .Values.podSecurityContext | nindent 8 }}
      containers:
        - name: {{ .Chart.Name }}
          securityContext:
            {{- toYaml .Values.securityContext | nindent 12 }}
          image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
          imagePullPolicy: {{ .Values.image.pullPolicy }}
          env:
          - name: DB_USER_NAME
            valueFrom:
              secretKeyRef:
                name: customsecret
                key: DB_USER_NAME
          - name: DB_PASSWORD
            valueFrom:
              secretKeyRef:
                name: customsecret
                key: DB_PASSWORD
          - name: DB_URL
            valueFrom:
              secretKeyRef:
                name: customsecret
                key: DB_URL
          - name: TOKEN
            valueFrom:
              secretKeyRef:
                name: customsecret
                key: TOKEN
          ports:
            - name: http
              containerPort: {{ .Values.service.port }}
              protocol: TCP
          livenessProbe:
            httpGet:
              path: {{ .Values.service.liveness }}
              port: http
            initialDelaySeconds: 60
            periodSeconds: 60
          readinessProbe:
            httpGet:
              path: {{ .Values.service.readiness }}
              port: {{ .Values.service.port }}
            initialDelaySeconds: 60
            periodSeconds: 30
          resources:
            {{- toYaml .Values.resources | nindent 12 }}
      {{- with .Values.nodeSelector }}
      nodeSelector:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      {{- with .Values.affinity }}
      affinity:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      {{- with .Values.tolerations }}
      tolerations:
        {{- toYaml . | nindent 8 }}
      {{- end }}


图表. yaml

apiVersion: v2
name: xyz
description: A Helm chart for Kubernetes

# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application

# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: <APP_VERSION_PLACEHOLDER>


value. yaml

# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

### - If we want 3 intances then we will metion 3 -then 3 pods will be created on server
### - For staging env we usually keep 1
replicaCount: 1

image:
### --->We can also give local Image details also here
### --->We can create image in Docker repository and use that image URL here
  repository: gcr.io/mgcp-109-xyz-operations/projectname
  pullPolicy: IfNotPresent
  # Overrides the image tag whose default is the chart appVersion.
  tag: ""

imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

serviceAccount:
  # Specifies whether a service account should be created
  create: true
  # Annotations to add to the service account
  annotations: {}
  # The name of the service account to use.
  # If not set and create is true, a name is generated using the fullname template
  name: "xyz"

podAnnotations: {}

podSecurityContext: {}
  # fsGroup: 2000

securityContext: {}
  # capabilities:
  #   drop:
  #   - ALL
  # readOnlyRootFilesystem: true
  # runAsNonRoot: true
  # runAsUser: 1000

schedule: "*/5 * * * *"

###SMS2-40 - There are 2 ways how we want to serve our applications-->1st->LoadBalancer or 2-->NodePort
service:
  type: NodePort
  port: 8087
  liveness: /actuator/health/liveness
  readiness: /actuator/health/readiness
###service:
###  type: ClusterIP
###  port: 80

ingress:
  enabled: false
  className: ""
  annotations: {}
    # kubernetes.io/ingress.class: nginx
    # kubernetes.io/tls-acme: "true"
  hosts:
    - host: chart-example.local
      paths:
        - path: /
          pathType: ImplementationSpecific
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - chart-example.local

resources: {}
  # We usually recommend not to specify default resources and to leave this as a conscious
  # choice for the user. This also increases chances charts run on environments with little
  # resources, such as Minikube. If you do want to specify resources, uncomment the following
  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  # limits:
  #   cpu: 100m
  #   memory: 128Mi
  # requests:
  #   cpu: 100m
  #   memory: 128Mi

autoscaling:
  enabled: false
  minReplicas: 1
  maxReplicas: 100
  targetCPUUtilizationPercentage: 80
  # targetMemoryUtilizationPercentage: 80

nodeSelector: {}

tolerations: []

affinity: {}
#application:
#  configoveride: "config/application.properties"

模板/cr on job. yam l

apiVersion: batch/v1
kind: CronJob
metadata:
  name: {{ include "xyz.fullname" . }}
spec:
  schedule: {{ .Values.schedule }}
  jobTemplate:
    spec:
      backoffLimit: 5
      template:
        spec:
          containers:
            - name: {{ .Chart.Name }}
              securityContext:
                {{- toYaml .Values.securityContext | nindent 12 }}
              image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
              imagePullPolicy: {{ .Values.image.pullPolicy }}
              env:
              - name: DB_USER_NAME
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_USER_NAME
              - name: DB_PASSWORD
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_PASSWORD
              - name: DB_URL
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_URL
              - name: TOKEN
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: TOKEN
              ports:
                - name: http
                  containerPort: {{ .Values.service.port }}
                  protocol: TCP
              livenessProbe:
                httpGet:
                  path: {{ .Values.service.liveness }}
                  port: http
                initialDelaySeconds: 60
                periodSeconds: 60
              readinessProbe:
                httpGet:
                  path: {{ .Values.service.readiness }}
                  port: {{ .Values.service.port }}
                initialDelaySeconds: 60
                periodSeconds: 30
              resources:
                {{- toYaml .Values.resources | nindent 12 }}
          {{- with .Values.nodeSelector }}

模板/服务. yam l


apiVersion: v1
kind: Service
metadata:
  name: {{ include "xyz.fullname" . }}
  labels:
    {{- include "xyz.labels" . | nindent 4 }}
spec:
  type: {{ .Values.service.type }}
  ports:
    - port: {{ .Values.service.port }}
      targetPort: http
      protocol: TCP
      name: http
  selector:
    {{- include "xyz.selectorLabels" . | nindent 4 }}


我在没有cronjob. yaml的情况下首先运行了我的应用程序

一旦我的应用程序开始在kubernetes上运行,我就试图将其转换为kubernetes cron job,因此我删除了tem在/部署. yaml,而是添加了模板/cronjob.yaml

在我部署我的应用程序后,它运行了,但当我做kubectl获取cronjob时,它会显示在日志中在默认命名空间中找不到资源。

我在这里做错了什么,无法弄清楚我使用下面的命令安装我的头盔图表头盔升级--install chartname


共2个答案

匿名用户

不确定你的文件是一半,但它没有正确结束EOF错误可能是在那里当图表被测试

cronjob结束部分

{{- with .Values.nodeSelector }}
          nodeSelector:
            {{- toYaml . | nindent 12 }}
          {{- end }} 

完整的文件应该是这样的

apiVersion: batch/v1
kind: CronJob
metadata:
  name: test
spec:
  schedule: {{ .Values.schedule }}
  jobTemplate:
    spec:
      backoffLimit: 5
      template:
        spec:
          containers:
            - name: {{ .Chart.Name }}
              securityContext:
                {{- toYaml .Values.securityContext | nindent 12 }}
              image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
              imagePullPolicy: {{ .Values.image.pullPolicy }}
              ports:
                - name: http
                  containerPort: {{ .Values.service.port }}
                  protocol: TCP
              livenessProbe:
                httpGet:
                  path: {{ .Values.service.liveness }}
                  port: http
                initialDelaySeconds: 60
                periodSeconds: 60
              readinessProbe:
                httpGet:
                  path: {{ .Values.service.readiness }}
                  port: {{ .Values.service.port }}
                initialDelaySeconds: 60
                periodSeconds: 30
              resources:
                {{- toYaml .Values.resources | nindent 12 }}
          {{- with .Values.nodeSelector }}
          nodeSelector:
            {{- toYaml . | nindent 12 }}
          {{- end }}

我刚刚测试了上面的工作正常。

测试舵手图表模板的命令

helm template <chart name> . --output-dir ./yaml

匿名用户

我还部署了部署. yaml,这是一个错误,所以我删除了部署.yaml文件,只保留了cronjob.yaml文件,其内容如下

apiVersion: batch/v1
kind: CronJob
metadata:
  name: {{ include "xyz.fullname" . }}
  labels:
    {{ include "xyz.labels" . | nindent 4 }}
spec:
  schedule: "{{ .Values.schedule }}"
  concurrencyPolicy: Forbid
  failedJobsHistoryLimit: 2
  jobTemplate:
    spec:
      template:
        spec:
          restartPolicy: Never
          containers:
            - name: {{ .Chart.Name }}
              image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
              imagePullPolicy: {{ .Values.image.pullPolicy }}
              env:
              - name: DB_USER_NAME
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_USER_NAME
              - name: DB_PASSWORD
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_PASSWORD
              - name: DB_URL
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_URL
              - name: TOKEN
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: TOKEN
              - name: POD_NAME
                valueFrom:
                  fieldRef:
                    fieldPath: metadata.name
              - name: DD_AGENT_HOST
                valueFrom:
                  fieldRef:
                    fieldPath: status.hostIP  
              - name: DD_ENV
                value: {{ .Values.datadog.env }}
              - name: DD_SERVICE
                value: {{ include "xyz.name" . }}
              - name: DD_VERSION
                value: {{ include "xyz.AppVersion" . }}
              - name: DD_LOGS_INJECTION
                value: "true"
              - name: DD_RUNTIME_METRICS_ENABLED
                value: "true"
              volumeMounts:
                - mountPath: /app/config
                  name: logback
              ports:
                - name: http
                  containerPort: {{ .Values.service.port }}
                  protocol: TCP
          volumes:
            - configMap:
                name: {{ include "xyz.name" . }}
              name: logback
      backoffLimit: 0
    metadata:
      {{ with .Values.podAnnotations }}
    annotations:
      {{ toYaml . | nindent 8 }}
    labels:
      {{ include "xyz.selectorLabels" . | nindent 8 }}
      {{- end }}