-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvalues.yaml
205 lines (187 loc) · 4.69 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
# Default values for clickhouse.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: yandex/clickhouse-server
tag: "19.11.3.11"
pullPolicy: ifNotPresent
podManagementPolicy: Parallel
terminationGracePeriodSeconds: 30
# coded password for clickhouse config for default user. Generated by command:
# echo "PASSWORD" | sha256sum | tr -d '-' | tr -d ' '
# Example:
# command: echo -n "password" | sha256sum | tr -d '-' | tr -d ' '
# result: 5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8
#
config:
log_level: "error"
aggregator:
enabled: false
nodeport: false
cluster_name: ch
replication:
user: replication
pass: default
internal: "true"
shard:
size: 2
count: 1
users:
default:
password: password
profile: default
quota: default
quotas:
profiles:
default:
use_uncompressed_cache: 0
load_balancing: random
options:
builtin_dictionaries_reload_interval: 600
max_session_timeout: 3600
default_session_timeout: 600
max_connections: 8192
keep_alive_timeout: 3
max_open_files: 262144
max_concurrent_queries: 1000
mark_cache_size: 1Gi
timezone: UTC
max_memory_usage: 2Gi
max_memory_usage_for_all_queries: 2Gi
max_bytes_before_external_sort: 512Mi
max_bytes_before_external_group_by: 512Mi
dictionaries_lazy_load: false
zookeeper:
nodes: 1
name: zookeeper
port: 2181
env:
secret: {}
# MYSQL_USER: root
# MYSQL_PASS: root
# MYSQL_HOST: mysql
# MYSQL_PORT: 3306
# MYSQL_DATABASE: test
raw: {}
# updateStrategy:
# type: RollingUpdate
# rollingUpdate:
# maxUnavailable: 1
service:
type: ClusterIP
ports:
http:
containerPort: 8123
nodePort: 30123
protocol: TCP
native:
containerPort: 9000
nodePort: 30900
protocol: TCP
interserver:
containerPort: 9009
protocol: TCP
dictionaries: {}
initdb: {}
persistence:
enabled: false
# reclaimPolicy: Retain
# existingClaim: ""
# storageClass: "local-storage"
accessMode: ReadWriteOnce
size: 10Gi
# annotations: {}
# finalizers: {}
podAnnotations: {}
podLabels: {}
metrics:
enabled: true
port: 9116
image:
repository: tkroman/clickhouse_exporter_fresh
tag: "18.12.14"
pullPolicy: ifNotPresent
backup:
enabled: false
image:
repository: alexakulov/clickhouse-backup
tag: latest
pullPolicy: Always
cron:
schedule: "0 4 * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
startingDeadlineSeconds: 60
restartPolicy: OnFailure
command:
- sh
- -c
- |
#!/bin/sh
BACKUP_NAME=$(date -u +%Y-%m-%dT%H-%M-%S)
clickhouse-backup list
clickhouse-backup create $BACKUP_NAME
clickhouse-backup upload $BACKUP_NAME
config:
clickhouse:
username: default
port: 9000
data_path: ""
skip_tables:
- system.*
s3:
# access_key: ""
# secret_key: ""
# bucket: ""
# endpoint: ""
# region: us-east-1
acl: private
force_path_style: true
disable_ssl: true
disable_progress_bar: false
# Define behavior for rewrite exists files with the same size. Must set to "skip", "etag" or "always"
# "skip" - the fastest but can make backup inconsistently
# "etag" - calculate etag for local files, set this if your network is very slow
overwrite_strategy: always
part_size: 5242880
delete_extra_files: true
strategy: archive
backups_to_keep_local: 0
backups_to_keep_s3: 0
compression_level: 1
# supported: 'tar', 'lz4', 'bzip2', 'gzip', 'sz', 'xz'
compression_format: lz4
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
resources:
limits:
cpu: 2
memory: 3096Mi
requests:
cpu: 100m
memory: 128Mi
tolerations: {}
## Affinity settings
affinity: ""
# affinity: |
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 20
# podAffinityTerm:
# topologyKey: kubernetes.io/hostname
# labelSelector:
# matchExpressions:
# - key: release
# operator: In
# values:
# - "{{ .Release.Name }}"