kafka optimization, better support group consumption, and more detailed documentation of some of the settings

updates_os_and_scripts
neu5ron 2020-01-22 12:07:41 -05:00
parent ae3275e6f1
commit 114144ac3f
1 changed files with 17 additions and 14 deletions

View File

@ -1,6 +1,6 @@
# HELK Kafka input conf file
# HELK build Stage: Alpha
# Author: Roberto Rodriguez (@Cyb3rWard0g)
# Author: Roberto Rodriguez (@Cyb3rWard0g), Nate Guagenti (@neu5ron)
# License: GPL-3.0
input {
@ -9,22 +9,25 @@ input {
topics => ["winlogbeat","winevent","SYSMON_JOIN","filebeat"]
decorate_events => true
codec => "json"
auto_offset_reset => "latest"
############################# HELK Optimizing Throughput & Latency #############################
fetch_min_bytes => "1"
request_timeout_ms => "40000"
############################# HELK Optimizing Durability #############################
enable_auto_commit => "false"
############################# HELK Optimizing Availability #############################
connections_max_idle_ms => "540000"
session_timeout_ms => "30000"
max_poll_interval_ms => "300000"
#############################
max_poll_records => "500"
############################# HELK Kafka Group Consumption #############################
# Enable logstash to not continously restart consumption of docs/logs it already has. However if you need it to, then change the 'group_id' value to something else (ex: could be a simple value like '100_helk_logstash')
enable_auto_commit => "true"
# During group_id or client_id changes, the kafka clinet will consume from earliest document so as not to lose data
auto_offset_reset => "earliest"
# If you have multiple logstash instances, this is your ID so that each instance consumes a slice of the Kafka pie.
# No need to change this unless you know what your doing and for some reason have the need
group_id => "helk_logstash"
# Change to number of Kafka partitions, only change/set if scaling on large environment & customized your Kafka paritions
#consumer_threads => 1
# Default value is 1, read documentation for more info: https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html#plugins-inputs-kafka-consumer_threads
consumer_threads => 1
############################# HELK Optimizing Throughput #############################
#fetch_min_bytes => "1024"
#request_timeout_ms => "40000"
############################# HELK Optimizing Availability #############################
#connections_max_idle_ms => "540000"
#session_timeout_ms => "30000"
#max_poll_interval_ms => "300000"
#############################
#max_poll_records => "500"
}
}