forked from pegasystems/pega-helm-charts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvalues-large.yaml
496 lines (436 loc) · 21.1 KB
/
values-large.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
---
global:
# This values.yaml file is an example of a large Pega deployment.
# For more information about each configuration option, see the
# project readme.
# Enter your Kubernetes provider.
provider: "YOUR_KUBERNETES_PROVIDER"
deployment:
# The name specified will be used to prefix all of the Pega pods (replacing "pega" with something like "app1-dev").
name: "pega"
# Deploy Pega nodes
actions:
execute: "deploy"
# Add custom certificates to be mounted to container
# to support custom certificates as plain text (less secure), pass them directly using the certificates parameter;
# to support multiple custom certificates as external secrets, specify each of your external secrets
# as an array of comma-separated strings using the certificatesSecrets parameter.
certificatesSecrets: []
certificates:
# Add krb5.conf file content here.
# Feature is used for Decisioning data flows to fetch data from Kafka or HBase streams
kerberos:
# If a storage class to be passed to the VolumeClaimTemplates in search and stream pods, it can be specified here:
storageClassName: ""
# Provide JDBC connection information to the Pega relational database
# If you are installing or upgrading on IBM DB2, update the udb.conf file in the /charts/pega/charts/installer/config/udb directory with any additional connection properties.
jdbc:
# url Valid values are:
#
# Oracle jdbc:oracle:thin:@//localhost:1521/dbName
# IBM DB/2 z / OS jdbc:db2://localhost:50000/dbName
# IBM DB/2 jdbc:db2://localhost:50000/dbName:fullyMaterializeLobData=true;fullyMaterializeInputStreams=true;
# progressiveStreaming=2;useJDBC4ColumnNameAndLabelSemantics=2;
# SQL Server jdbc:sqlserver://localhost:1433;databaseName=dbName;selectMethod=cursor;sendStringParametersAsUnicode=false
# PostgreSQL jdbc:postgresql://localhost:5432/dbName
url: "YOUR_JDBC_URL"
# driverClass -- jdbc class. Valid values are:
#
# Oracle oracle.jdbc.OracleDriver
# IBM DB/2 com.ibm.db2.jcc.DB2Driver
# SQL Server com.microsoft.sqlserver.jdbc.SQLServerDriver
# PostgreSQL org.postgresql.Driver
driverClass: "YOUR_JDBC_DRIVER_CLASS"
# pega.database.type Valid values are: mssql, oracledate, udb, db2zos, postgres
dbType: "YOUR_DATABASE_TYPE"
# For databases that use multiple JDBC driver files (such as DB2), specify comma separated values for 'driverUri'
driverUri: "YOUR_JDBC_DRIVER_URI"
username: "YOUR_JDBC_USERNAME"
password: "YOUR_JDBC_PASSWORD"
# To avoid exposing username & password, leave the jdbc.password & jdbc.username parameters empty (no quotes),
# configure JDBC username & password parameters in the External Secrets Manager, and enter the external secret for the credentials
# make sure the keys in the secret should be DB_USERNAME and DB_PASSWORD respectively
external_secret_name: ""
# CUSTOM CONNECTION PROPERTIES
# Add a list of ; delimited connections properties. The list must end with ;
# For example: connectionProperties=user=usr;password=pwd;
connectionProperties: ""
rulesSchema: "YOUR_RULES_SCHEMA"
dataSchema: "YOUR_DATA_SCHEMA"
customerDataSchema: ""
customArtifactory:
# If you use a secured custom artifactory to manager your JDBC driver,
# provide the authentication details below by filling in the appropriate authentication section,
# either basic or apiKey.
authentication:
# Provide the basic authentication credentials or the API key authentication details to satisfy your custom artifactory authentication mechanism.
basic:
username: ""
password: ""
apiKey:
headerName: ""
value: ""
# To avoid exposing basic.username,basic.password,apiKey.headerName,apiKey.value parameters, configure the
# basic.username,basic.password,apiKey.headerName,apiKey.value parameters in External Secrets Manager, and enter the external secret for the credentials
# make sure the keys in the secret should be CUSTOM_ARTIFACTORY_USERNAME , CUSTOM_ARTIFACTORY_PASSWORD , CUSTOM_ARTIFACTORY_APIKEY_HEADER , CUSTOM_ARTIFACTORY_APIKEY
external_secret_name: ""
# Leave customArtifactory.enableSSLVerification enabled to ensure secure access to your custom artifactory;
# when customArtifactory.enableSSLVerification is false, SSL verification is skipped and establishes an insecure connection.
enableSSLVerification: true
# Provide a required domain certificate for your custom artifactory; if none is required, leave this field blank.
certificate:
docker:
# If using a custom Docker registry, supply the credentials here to pull Docker images.
registry:
url: "YOUR_DOCKER_REGISTRY"
username: "YOUR_DOCKER_REGISTRY_USERNAME"
password: "YOUR_DOCKER_REGISTRY_PASSWORD"
# To avoid exposing Docker registry details, create secrets to manage your Docker registry credentials.
# Specify secret names as an array of comma-separated strings in double quotation marks using the imagePullSecretNames parameter. For example: ["secret1", "secret2"]
imagePullSecretNames: []
# Docker image information for the Pega docker image, containing the application server.
pega:
image: "pegasystems/pega"
utilityImages:
busybox:
image: busybox:1.31.0
imagePullPolicy: IfNotPresent
k8s_wait_for:
image: pegasystems/k8s-wait-for
imagePullPolicy: "IfNotPresent"
# waitTimeSeconds: 2
# maxRetries: 1
# Upgrade specific properties
upgrade:
# Configure only for aks/pks
# Run "kubectl cluster-info" command to get the service host and https service port of kubernetes api server.
# Example - Kubernetes master is running at https://<service_host>:<https_service_port>
kube-apiserver:
serviceHost: "API_SERVICE_ADDRESS"
httpsServicePort: "SERVICE_PORT_HTTPS"
# Specify the Pega tiers to deploy
tier:
- name: "web"
# Create a an interactive tier for web users. This tier uses
# the WebUser node type and will be exposed via a service to
# the load balancer.
nodeType: "WebUser"
# Pega requestor specific properties
requestor:
# Inactivity time after which requestor is passivated
passivationTimeSec: 900
service:
# For help configuring the service block, see the Helm chart documentation
# https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#service
httpEnabled: true
port: 80
targetPort: 8080
# To configure TLS between the ingress/load balancer and the backend, set the following:
tls:
enabled: false
# To avoid entering the certificate values in plain text, configure the keystore, keystorepassword, cacertificate parameter
# values in the External Secrets Manager, and enter the external secret name below
# make sure the keys in the secret should be TOMCAT_KEYSTORE_CONTENT, TOMCAT_KEYSTORE_PASSWORD and ca.crt respectively
external_secret_name: ""
keystore:
keystorepassword:
port: 443
targetPort: 8443
# set the value of CA certificate here in case of baremetal/openshift deployments - CA certificate should be in base64 format
# pass the certificateChainFile file if you are using certificateFile and certificateKeyFile
cacertificate:
# provide the SSL certificate and private key as a PEM format
certificateFile:
certificateKeyFile:
# if you will deploy traefik addon chart and enable traefik, set enabled=true; otherwise leave the default setting.
traefik:
enabled: false
# the SAN of the certificate present inside the container
serverName: ""
# set insecureSkipVerify=true, if the certificate verification has to be skipped
insecureSkipVerify: false
ingress:
# For help configuring the ingress block, see the Helm chart documentation
# https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md#ingress
# Enter the domain name to access web nodes via a load balancer.
# e.g. web.mypega.example.com
domain: "YOUR_WEB_NODE_DOMAIN"
# Configure custom path for given host along with pathType. Default pathType is ImplementationSpecific.
# path:
# pathType:
tls:
# Enable TLS encryption
enabled: true
secretName:
# For GKE Managed Certificate, mention true if Google Managed Certificate has to be created and annotation specified
useManagedCertificate: false
# Provide appropriate certificate annotations for EKS or GKE
# For EKS, use alb.ingress.kubernetes.io/certificate-arn: <certificate-arn>
# For GKE Pre-shared Certificate, use ingress.gcp.kubernetes.io/pre-shared-cert: <pre-shared-certificate-name>
# For GKE to use static IP for load balancer, use kubernetes.io/ingress.global-static-ip-name: <global-static-ip-name>
ssl_annotation:
# For Openshift, Pega deployments enable TLS to secure the connection
# from the browser to the router by creating the route using reencrypt termination policy.
# Add your certificate, the corresponding key using the appropriate .pem or .crt format and
# specify a CA certificate to validate the endpoint certificate.
certificate:
key:
cacertificate:
replicas: 1
javaOpts: ""
# Check the 'JVM Arguments' section in https://github.com/pegasystems/pega-helm-charts/blob/master/charts/pega/README.md
catalinaOpts: "-XX:InitialCodeCacheSize=256M -XX:ReservedCodeCacheSize=512M -XX:MetaspaceSize=512M"
pegaDiagnosticUser: ""
pegaDiagnosticPassword: ""
deploymentStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
livenessProbe:
port: 8081
hpa:
enabled: true
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
- name: "batch"
# Create a background tier for batch processing. This tier uses
# a collection of background node types and will not be exposed to
# the load balancer.
nodeType: "BackgroundProcessing,Search,Batch,Custom1,Custom2,Custom3,Custom4,Custom5"
replicas: 1
javaOpts: ""
pegaDiagnosticUser: ""
pegaDiagnosticPassword: ""
deploymentStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
livenessProbe:
port: 8081
hpa:
enabled: true
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
- name: "stream"
# Create a stream tier for queue processing. This tier deploys
# as a stateful set to ensure durability of queued data. It may
# be optionally exposed to the load balancer.
# Note: Stream tier is deprecated, please enable externalized Kafka service configuration under External Services.
# When your Pega Platform deployment uses an externalize Kafka configuration, your deployment no longer uses the "Stream" node type.
nodeType: "Stream"
# Pega requestor specific properties
requestor:
# Inactivity time after which requestor is passivated
passivationTimeSec: 900
service:
port: 7003
targetPort: 7003
ingress:
# Enter the domain name to access web nodes via a load balancer.
# e.g. web.mypega.example.com
domain: "YOUR_STREAM_NODE_DOMAIN"
tls:
enabled: true
# Give the name of the secret that contains certificate information - works for GKE, AKS and K8S
secretName:
# For GKE Managed Certificate, mention true if Google Managed Certificate has to be created and annotation specified
useManagedCertificate: false
# Provide appropriate certificate annotations for EKS or GKE
# For EKS, use alb.ingress.kubernetes.io/certificate-arn: <certificate-arn>
# For GKE Pre-shared Certificate, use ingress.gcp.kubernetes.io/pre-shared-cert: <pre-shared-certificate-name>
# For GKE to use static IP for load balancer, use kubernetes.io/ingress.global-static-ip-name: <global-static-ip-name>
ssl_annotation:
replicas: 2
javaOpts: ""
livenessProbe:
port: 8081
volumeClaimTemplate:
resources:
requests:
storage: 5Gi
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
- name: "bix"
# Create a background tier for BIX processing. This tier uses
# the BIX node type and will not be exposed to the load balancer.
nodeType: "BIX"
replicas: 1
javaOpts: ""
pegaDiagnosticUser: ""
pegaDiagnosticPassword: ""
deploymentStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
livenessProbe:
port: 8081
hpa:
enabled: true
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
- name: "realtime"
# Create a dedicated tier for real-time data grid processing.
nodeType: "RealTime"
replicas: 1
javaOpts: ""
pegaDiagnosticUser: ""
pegaDiagnosticPassword: ""
deploymentStrategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
livenessProbe:
port: 8081
hpa:
enabled: true
# Set enabled to true to include a Pod Disruption Budget for this tier.
# To enable this budget, specifiy either a pdb.minAvailable or pdb.maxUnavailable
# value and comment out the other parameter.
pdb:
enabled: false
minAvailable: 1
# maxUnavailable: "50%"
# External services
# Cassandra automatic deployment settings.
cassandra:
enabled: true
persistence:
enabled: true
resources:
requests:
memory: "4Gi"
cpu: 2
limits:
memory: "8Gi"
cpu: 4
# DDS (external Cassandra) connection settings.
# These settings should only be modified if you are using a custom Cassandra deployment.
dds:
# A comma separated list of hosts in the Cassandra cluster.
externalNodes: ""
# TCP Port to connect to cassandra.
port: "9042"
# The username for authentication with the Cassandra cluster.
username: "dnode_ext"
# The password for authentication with the Cassandra cluster.
password: "dnode_ext"
# To avoid exposing username,password,trustStorePassword,keyStorePassword parameters, configure the
# username,password,trustStorePassword,keyStorePassword parameters in External Secrets Manager, and enter the external secret for the credentials
# make sure the keys in the secret should be CASSANDRA_USERNAME, CASSANDRA_PASSWORD , CASSANDRA_TRUSTSTORE_PASSWORD , CASSANDRA_KEYSTORE_PASSWORD
external_secret_name: ""
# Elasticsearch deployment settings.
# Note: This Elasticsearch deployment is used for Pega search, and is not the same Elasticsearch deployment used by the EFK stack.
# These search nodes will be deployed regardless of the Elasticsearch configuration above.
# Refer to README document to configure `Search and Reporting Service` as a search functionality provider under this section.
pegasearch:
image: "pegasystems/search"
memLimit: "3Gi"
# Pega Installer settings
installer:
image: "YOUR_INSTALLER_IMAGE:TAG"
adminPassword: "ADMIN_PASSWORD"
# Upgrade specific properties
upgrade:
# Type of upgrade
# Valid upgradeType values are 'in-place' , 'zero-downtime' , 'custom' , 'out-of-place-rules' , 'out-of-place-data' .
upgradeType: "in-place"
# Specify a name for a target rules schema that the upgrade process creates for patches and upgrades.
targetRulesSchema: ""
# Specify a name for a target data schema that the upgrade process creates for patches and upgrades.
# For postgres databases that you are upgrading from Pega Infinity version 8.4.0 and later
# And for Oracle databases that you are upgrading from Pega Infinity version 8.4.3 and later.
targetDataSchema: ""
# Specify the username and password to access the pre-upgrade Pega Platform to perform pre- and post- actions during zero-downtime upgrades.
pegaRESTUsername: ""
pegaRESTPassword: ""
# Hazelcast settings (applicable from Pega 8.6)
hazelcast:
# Hazelcast docker image for platform version 8.6 through 8.7.x
image: "YOUR_HAZELCAST_IMAGE:TAG"
# Hazelcast docker image for platform version 8.8 and later
clusteringServiceImage: "YOUR_CLUSTERING_SERVICE_IMAGE:TAG"
# Setting below to true will deploy the infinity in client-server Hazelcast model.
# Note: Make sure to set this value as "false" in case of platform version below "8.6". If not set this will fail the installation.
enabled: true
# Setting up for Pega 8.8 and above fresh install and for HZ upgrade
clusteringServiceEnabled: false
# Setting related to Hazelcast migration.
migration:
# Set to `true` to initiate the migration job.
initiateMigration: false
# Reference the `platform/clustering-service-kubectl` Docker image to create the migration job.
migrationJobImage: "YOUR_MIGRATION_JOB_IMAGE:TAG"
# Set to `true` when migrating from embedded Hazelcast.
embeddedToCSMigration: false
# No. of initial members to join
replicas: 3
# UserName in the client-server Hazelcast model authentication. This setting is exposed and not secure.
username: ""
# Password in the client-server Hazelcast model authentication. This setting is exposed and not secure.
password: ""
# To avoid exposing username and password parameters, leave these parameters empty and configure
# these cluster settings using an External Secrets Manager. Use the following keys in the secret:
# HZ_CS_AUTH_USERNAME for username and HZ_CS_AUTH_PASSWORD for password.
# Enter the external secret for these credentials below.
external_secret_name: ""
# Stream (externalized Kafka service) settings.
stream:
# Beginning with Pega Platform '23, enabled by default; when disabled, your deployment does not use a"Kafka stream service" configuration.
enabled: true
# Provide externalized Kafka service broker urls.
bootstrapServer: ""
# Provide Security Protocol used to communicate with kafka brokers. Supported values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
securityProtocol: PLAINTEXT
# If required, provide trustStore certificate file name
# When using a trustStore certificate, you must also include a Kubernetes secret name, that contains the trustStore certificate,
# in the global.certificatesSecrets parameter.
# Pega deployments only support trustStores using the Java Key Store (.jks) format.
trustStore: ""
# If required provide trustStorePassword value in plain text.
trustStorePassword: ""
# If required, provide keyStore certificate file name
# When using a keyStore certificate, you must also include a Kubernetes secret name, that contains the keyStore certificate,
# in the global.certificatesSecrets parameter.
# Pega deployments only support keyStores using the Java Key Store (.jks) format.
keyStore: ""
# If required, provide keyStore value in plain text.
keyStorePassword: ""
# If required, provide jaasConfig value in plain text.
jaasConfig: ""
# If required, provide a SASL mechanism**. Supported values are: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512.
saslMechanism: PLAIN
# By default, topics originating from Pega Platform have the pega- prefix,
# so that it is easy to distinguish them from topics created by other applications.
# Pega supports customizing the name pattern for your Externalized Kafka configuration for each deployment.
streamNamePattern: "pega-{stream.name}"
# Your replicationFactor value cannot be more than the number of Kafka brokers and 3.
replicationFactor: "1"
# To avoid exposing trustStorePassword, keyStorePassword, and jaasConfig parameters, leave the values empty and
# configure them using an External Secrets Manager, making sure you configure the keys in the secret in the order:
# STREAM_TRUSTSTORE_PASSWORD, STREAM_KEYSTORE_PASSWORD and STREAM_JAAS_CONFIG.
# Enter the external secret name below.
external_secret_name: ""