mirror of https://github.com/infosecn1nja/HELK.git
kafka optimization, better support group consumption, and more detailed documentation of some of the settings
parent
ae3275e6f1
commit
114144ac3f
|
@ -1,6 +1,6 @@
|
||||||
# HELK Kafka input conf file
|
# HELK Kafka input conf file
|
||||||
# HELK build Stage: Alpha
|
# HELK build Stage: Alpha
|
||||||
# Author: Roberto Rodriguez (@Cyb3rWard0g)
|
# Author: Roberto Rodriguez (@Cyb3rWard0g), Nate Guagenti (@neu5ron)
|
||||||
# License: GPL-3.0
|
# License: GPL-3.0
|
||||||
|
|
||||||
input {
|
input {
|
||||||
|
@ -9,22 +9,25 @@ input {
|
||||||
topics => ["winlogbeat","winevent","SYSMON_JOIN","filebeat"]
|
topics => ["winlogbeat","winevent","SYSMON_JOIN","filebeat"]
|
||||||
decorate_events => true
|
decorate_events => true
|
||||||
codec => "json"
|
codec => "json"
|
||||||
auto_offset_reset => "latest"
|
############################# HELK Kafka Group Consumption #############################
|
||||||
############################# HELK Optimizing Throughput & Latency #############################
|
# Enable logstash to not continously restart consumption of docs/logs it already has. However if you need it to, then change the 'group_id' value to something else (ex: could be a simple value like '100_helk_logstash')
|
||||||
fetch_min_bytes => "1"
|
enable_auto_commit => "true"
|
||||||
request_timeout_ms => "40000"
|
# During group_id or client_id changes, the kafka clinet will consume from earliest document so as not to lose data
|
||||||
############################# HELK Optimizing Durability #############################
|
auto_offset_reset => "earliest"
|
||||||
enable_auto_commit => "false"
|
|
||||||
############################# HELK Optimizing Availability #############################
|
|
||||||
connections_max_idle_ms => "540000"
|
|
||||||
session_timeout_ms => "30000"
|
|
||||||
max_poll_interval_ms => "300000"
|
|
||||||
#############################
|
|
||||||
max_poll_records => "500"
|
|
||||||
# If you have multiple logstash instances, this is your ID so that each instance consumes a slice of the Kafka pie.
|
# If you have multiple logstash instances, this is your ID so that each instance consumes a slice of the Kafka pie.
|
||||||
# No need to change this unless you know what your doing and for some reason have the need
|
# No need to change this unless you know what your doing and for some reason have the need
|
||||||
group_id => "helk_logstash"
|
group_id => "helk_logstash"
|
||||||
# Change to number of Kafka partitions, only change/set if scaling on large environment & customized your Kafka paritions
|
# Change to number of Kafka partitions, only change/set if scaling on large environment & customized your Kafka paritions
|
||||||
#consumer_threads => 1
|
# Default value is 1, read documentation for more info: https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html#plugins-inputs-kafka-consumer_threads
|
||||||
|
consumer_threads => 1
|
||||||
|
############################# HELK Optimizing Throughput #############################
|
||||||
|
#fetch_min_bytes => "1024"
|
||||||
|
#request_timeout_ms => "40000"
|
||||||
|
############################# HELK Optimizing Availability #############################
|
||||||
|
#connections_max_idle_ms => "540000"
|
||||||
|
#session_timeout_ms => "30000"
|
||||||
|
#max_poll_interval_ms => "300000"
|
||||||
|
#############################
|
||||||
|
#max_poll_records => "500"
|
||||||
}
|
}
|
||||||
}
|
}
|
Loading…
Reference in New Issue