-
Notifications
You must be signed in to change notification settings - Fork 118
/
default.yaml
219 lines (170 loc) · 7.87 KB
/
default.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
# TCP address that gRPC API server should listen on.
grpc_addr: 0.0.0.0:19091
# TCP address that RESTful API server should listen on.
tcp_addr: 0.0.0.0:19092
# Unix domain socket address that RESTful API server should listen on.
# Listening on a unix domain socket is disabled by default.
# unix_addr: "/var/run/kafka-pixy.sock"
# A map of cluster names to respective proxy configurations. The first proxy
# in the map is considered to be `default`. It is used in API calls that do not
# specify cluster name explicitly.
proxies:
# Name of a Kafka+ZooKeeper cluster. The only requirement to the name is that
# it should be unique in this config file. The value of this parameter is
# a configuration of a proxy to access the cluster.
default:
# Unique ID that identifies a Kafka-Pixy instance in both ZooKeeper and
# Kafka. It is automatically generated by default and it is recommended to
# leave it like that.
# client_id: AUTOGENERATED
# Kafka parameters section.
kafka:
# List of seed Kafka peers that Kafka-Pixy should access to resolve the
# Kafka cluster topology.
seed_peers:
- localhost:9092
# Version of the Kafka cluster. Supported versions are 0.10.2.1 - 2.0.0
version: 0.10.2.1
# Enable TLS when connecting to the Kafka cluster
tls: false
# The filepath to the CA root certificate
# ca_certificate_file:
# The filepath to the client certificate
# client_certificate_file:
# The filepath to the client certificate key
# client_key_file:
# Disable hostname verification
# insecure: false
# Networking parameters section. These all pass through to sarama's
# `config.Net` field.
net:
# How long to wait for the initial connection.
dial_timeout: 30s
# How long to wait for a response.
read_timeout: 30s
# How long to wait for a transmit.
write_timeout: 30s
# ZooKeeper parameters section.
zoo_keeper:
# List of seed ZooKeeper peers that Kafka-Pixy should access to resolve the
# ZooKeeper cluster topology.
seed_peers:
- localhost:2181
# A root directory in ZooKeeper to store consumers data.
# chroot: ""
# ZooKeeper session timeout has to be a minimum of 2 times the tickTime
# (as set in the server configuration) and a maximum of 20 times the
# tickTime. The default ZooKeeper tickTime is 2 seconds.
#
# See http://zookeeper.apache.org/doc/trunk/zookeeperProgrammers.html#ch_zkSessions
session_timeout: 15s
# Producer parameters section.
producer:
# Size of all buffered channels created by the producer module.
channel_buffer_size: 4096
# The maximum permitted size of a message (defaults to 1000000). Should be
# set equal to or smaller than the broker's `message.max.bytes`.
max_message_bytes: 1000000
# The type of compression to use on messages. Allowed values are:
# none, gzip, snappy, and lz4.
compression: snappy
# The best-effort number of bytes needed to trigger a flush.
flush_bytes: 1048576
# The best-effort frequency of flushes.
flush_frequency: 500ms
# How long to wait for the cluster to settle between retries.
retry_backoff: 10s
# The total number of times to retry sending a message before giving up.
retry_max: 6
# The level of acknowledgement reliability needed from the broker.
# Allowed values are:
# * no_response: the broker doesn't send any response, the TCP ACK
# is all you get.
# * wait_for_local: the broker responds as soon as it commits to the
# local disk.
# * wait_for_all: the broker waits for all in-sync replicas to commit
# before responding.
required_acks: wait_for_all
# Period of time that Kafka-Pixy should keep trying to submit buffered
# messages to Kafka. It is recommended to make it large enough to survive
# a ZooKeeper leader election in your setup.
shutdown_timeout: 30s
# How to assign incoming messages to a Kafka partition. Defaults to using
# a hash of the specified message key, or random if the key is
# unspecified. Allowed values are:
# * hash: for messages with a key, take the FNV-1a hash of the
# bytes, modulus the number of partitions; otherwise use a
# random partition.
# * random: all messages are published to a random partition.
# * roundrobin: iterate over partitions sequentially
partitioner: hash
# The timeout to specify on individual produce requests to the broker. The
# broker will wait for replication to complete up to this duration before
# returning an error.
timeout: 10s
# Consumer parameters section.
consumer:
# If set, Kafka-Pixy will not configure a consumer, and any attempts to
# call the consumer APIs will return an error.
disabled: false
# Period of time that Kafka-Pixy should wait for an acknowledgement
# before retrying.
ack_timeout: 5m
# Size of all buffered channels created by the consumer module.
channel_buffer_size: 64
# The number of bytes of messages to attempt to fetch for each
# topic-partition in each fetch request. These bytes will be read into
# memory for each partition, so this helps control the memory used by
# the consumer. The fetch request size must be at least as large as
# the maximum message size the server allows or else it is possible
# for the producer to send messages larger than the consumer can fetch.
fetch_max_bytes: 1048576
# The maximum amount of time the server will block before answering
# the fetch request if there isn't data immediately available.
fetch_max_wait: 250ms
# Consume request will wait at most this long until for a message from a
# topic to become available before expiring.
long_polling_timeout: 3s
# The maximum number of unacknowledged messages allowed for a particular
# group-topic-partition at a time. When this number is reached subsequent
# consume requests will return long polling timeout errors, until some of
# the pending messages are acknowledged.
max_pending_messages: 300
# The maximum number of retries Kafka-Pixy will make to offer an
# unack message. Messages that exceeded the number of retries are
# discarded by Kafka-Pixy and acknowledged in Kafka. Zero retries
# means that messages will be offered just once.
#
# If you want Kafka-Pixy to retry indefinitely, then set this
# parameter to -1.
max_retries: -1
# How frequently to commit offsets to Kafka.
offsets_commit_interval: 500ms
# If a request to a Kafka-Pixy fails for any reason, then it should wait this
# long before retrying.
retry_backoff: 500ms
# Period of time that Kafka-Pixy should keep a subscription for a
# topic by a group in absence of requests to from the consumer group.
subscription_timeout: 15s
# Configuration for securely accessing the gRPC and web servers
tls:
# Path to the server certificate file.
# Required if using gRPC SSL/TLS or HTTPS.
# certificate_path: /usr/local/etc/server.crt
# Path to the server certificate key file.
# Required if using gRPC SSL/TLS or HTTPS.
# key_path: /usr/local/etc/server.key
# A list of defined loggers, multiple loggers are allowed and each log line will be sent to every logger defined.
logging:
# Logs to stdout in human readable format
- name: console
severity: info
# # Logs to stdout in a JSON format
# - name: json
# severity: info
# # Logs to kafka topic in JSON format
# - name: udplog
# severity: error
# # Logs to syslog
# - name: syslog
# severity: debug