diff --git a/config/config.sample.toml b/config/config.sample.toml index 818423a..b21014d 100644 --- a/config/config.sample.toml +++ b/config/config.sample.toml @@ -44,6 +44,9 @@ image_registry = "mattermost" # a per job type basis. Example: #[jobs.kubernetes] #jobs_resource_requirements = '{"transcribing":{"limits":{"cpu":"4000m"},"requests":{"cpu":"2000m"}},"recording":{"limits":{"cpu":"2000m"},"requests":{"cpu":"1000m"}}}' +# +# The Persistent Volume Claim name to use to store data produced by jobs (e.g. recording files). +#persistent_volume_claim_name = "my-pvc" [logger] # A boolean controlling whether to log to the console. @@ -62,4 +65,3 @@ file_level = "DEBUG" file_location = "calls-offloader.log" # A boolean controlling whether to display colors when logging to the console. enable_color = true - diff --git a/service/kubernetes/service.go b/service/kubernetes/service.go index c1777da..8629999 100644 --- a/service/kubernetes/service.go +++ b/service/kubernetes/service.go @@ -51,10 +51,11 @@ func (r *JobsResourceRequirements) UnmarshalTOML(data interface{}) error { } type JobServiceConfig struct { - MaxConcurrentJobs int - FailedJobsRetentionTime time.Duration - ImageRegistry string - JobsResourceRequirements JobsResourceRequirements `toml:"jobs_resource_requirements"` + MaxConcurrentJobs int + FailedJobsRetentionTime time.Duration + ImageRegistry string + JobsResourceRequirements JobsResourceRequirements `toml:"jobs_resource_requirements"` + PersistentVolumeClaimName string `toml:"persistent_volume_claim_name"` } func (c JobServiceConfig) IsValid() error { @@ -210,6 +211,21 @@ func (s *JobService) CreateJob(cfg job.Config, onStopCb job.StopCb) (job.Job, er ttlSecondsAfterFinished = newInt32(int32(s.cfg.FailedJobsRetentionTime.Seconds())) } + volumes := []corev1.Volume{ + { + Name: jobID, + }, + } + + if s.cfg.PersistentVolumeClaimName != "" { + s.log.Debug("using persistent volume claim", mlog.String("name", s.cfg.PersistentVolumeClaimName)) + volumes[0].VolumeSource = corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: s.cfg.PersistentVolumeClaimName, + }, + } + } + spec := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobID, @@ -254,11 +270,7 @@ func (s *JobService) CreateJob(cfg job.Config, onStopCb job.StopCb) (job.Job, er Resources: s.cfg.JobsResourceRequirements[cfg.Type], }, }, - Volumes: []corev1.Volume{ - { - Name: jobID, - }, - }, + Volumes: volumes, Tolerations: tolerations, // We don't want to ever restart pods as any failure needs to be // surfaced to the user who should hit record again.