1620 lines
51 KiB
Plaintext
1620 lines
51 KiB
Plaintext
# Version 9.2.2.20240415
|
||
#
|
||
############################################################################
|
||
# OVERVIEW
|
||
############################################################################
|
||
# DO NOT EDIT THIS FILE!
|
||
# Changes to default files will be lost on update and are difficult to
|
||
# manage and support.
|
||
#
|
||
# Please make any changes to system defaults by overriding them in
|
||
# apps or $SPLUNK_HOME/etc/system/local
|
||
# (See "Configuration file precedence" in the web documentation).
|
||
#
|
||
# To override a specific setting, copy the name of the stanza and
|
||
# setting to the file where you wish to override it.
|
||
#
|
||
# This file configures various limits to the Splunk's search commands.
|
||
# CAUTION: Do not alter the settings in limits.conf unless you know what
|
||
# you are doing.
|
||
#
|
||
# Improperly configured limits may result in splunkd crashes and/or
|
||
# memory overuse.
|
||
#
|
||
############################################################################
|
||
# GLOBAL SETTINGS
|
||
############################################################################
|
||
|
||
[default]
|
||
max_mem_usage_mb = 200
|
||
|
||
[searchresults]
|
||
maxresultrows = 50000
|
||
# Maximum number of times to try in the atomic write operation
|
||
# (1 = no retries)
|
||
tocsv_maxretry = 5
|
||
# Retry period is 1/2 second (500 milliseconds)
|
||
tocsv_retryperiod_ms = 500
|
||
|
||
compression_level = 1
|
||
|
||
[search_info]
|
||
# These setting control logging of error messages to info.csv
|
||
# All messages will be logged to search.log regardless of these settings.
|
||
# maximum number of error messages to log in info.csv
|
||
# Set to 0 to remove limit, may affect search performance
|
||
max_infocsv_messages = 20
|
||
# log level = DEBUG | INFO | WARN | ERROR
|
||
infocsv_log_level = INFO
|
||
# Log warnings if search returns no results because user has no
|
||
# permissions to search on queried indexes.
|
||
show_warn_on_filtered_indexes = false
|
||
# Log level of messages when search returns no results because user has
|
||
# no permissions to search on queried indexes.
|
||
filteredindexes_log_level = DEBUG
|
||
|
||
|
||
[subsearch]
|
||
# Maximum number of results to return from a subsearch.
|
||
maxout = 10000
|
||
# Maximum number of seconds to run a subsearch before finalizing.
|
||
maxtime = 60
|
||
# Time to cache a given subsearch's results.
|
||
ttl = 300
|
||
|
||
############################################################################
|
||
# SEARCH COMMAND
|
||
############################################################################
|
||
# This section contains the settings for the search command.
|
||
# The settings are organized in subsections by type of setting.
|
||
|
||
[search]
|
||
|
||
############################################################################
|
||
# Batch search
|
||
############################################################################
|
||
# This section contains settings for batch search.
|
||
|
||
# Allow batch mode which searches in non-time order for certain classes
|
||
# of searches.
|
||
allow_batch_mode = true
|
||
|
||
# When batch mode attempts to retry the search on a peer that failed wait
|
||
# at least this many seconds.
|
||
batch_retry_min_interval = 5
|
||
|
||
# When batch mode attempts to retry the search on a peer that failed wait
|
||
# at most this many seconds.
|
||
batch_retry_max_interval = 300
|
||
|
||
# After a retry attempt fails increase the time to wait before trying
|
||
# again by this scaling factor.
|
||
batch_retry_scaling = 1.5
|
||
|
||
# When in batch mode what is the max number of index values to read in
|
||
# at one time.
|
||
batch_search_max_index_values = 10000000
|
||
|
||
# Number of search pipelines created per batch search
|
||
batch_search_max_pipeline = 1
|
||
|
||
# Default size of the aggregator queue to which all the search pipelines
|
||
# dump the search results on the indexer.
|
||
batch_search_max_results_aggregator_queue_size = 100000000
|
||
|
||
# Default size of the serialized results queue where all the serialized
|
||
# results are kept before transmission.
|
||
batch_search_max_serialized_results_queue_size = 100000000
|
||
|
||
|
||
############################################################################
|
||
# Bundles
|
||
############################################################################
|
||
# This section contains settings for bundles and bundle replication.
|
||
|
||
# Avoid loading remote bundles in splunkd.
|
||
load_remote_bundles = false
|
||
|
||
# Bundle replication file ttl.
|
||
replication_file_ttl = 600
|
||
|
||
# The minimum bundle replication period.
|
||
replication_period_sec = 60
|
||
|
||
# Whether bundle replication is synchronous (and thus blocking searches).
|
||
sync_bundle_replication = auto
|
||
|
||
# Bundle status expiry time
|
||
bundle_status_expiry_time = 1hr
|
||
|
||
############################################################################
|
||
# Concurrency
|
||
############################################################################
|
||
# This section contains settings for search concurrency limits.
|
||
# If 'total_search_concurrency_limit = auto', the total limit of concurrent
|
||
# historical searches is
|
||
# max_hist_searches = max_searches_per_cpu x number_of_cpus + base_max_searches.
|
||
|
||
# The maximum number of concurrent historical searches in the search head.
|
||
total_search_concurrency_limit = auto
|
||
|
||
# The base number of concurrent historical searches.
|
||
base_max_searches = 6
|
||
|
||
# Max real-time searches = max_rt_search_multiplier x max historical searches.
|
||
max_rt_search_multiplier = 1
|
||
|
||
# The maximum number of concurrent historical searches per CPU.
|
||
max_searches_per_cpu = 1
|
||
|
||
# Whether maximum number of concurrent searches are enforced cluster-wide
|
||
# for admission of adhoc searches
|
||
shc_adhoc_quota_enforcement = off
|
||
|
||
############################################################################
|
||
# Distributed search
|
||
############################################################################
|
||
# This section contains settings for distributed search connection
|
||
# information.
|
||
|
||
# Limit on the skew permitted when adding a search peer.
|
||
# Peers with a skew larger than this will be rejected.
|
||
addpeer_skew_limit = 600
|
||
|
||
# Defaults to download all remote logs other than saved search logs and
|
||
# oneshot search logs.
|
||
fetch_remote_search_log = disabledSavedSearches
|
||
|
||
# Maximum size of the chunk queue.
|
||
max_chunk_queue_size = 10000000
|
||
|
||
# Search results combiner maximum in-memory buffer size (in events).
|
||
max_combiner_memevents = 50000
|
||
|
||
# Absolute value of largest time skew we will tolerate between the search
|
||
# head and the peer (in seconds).
|
||
max_tolerable_skew = 60
|
||
|
||
# Maximum number of worker threads in Round Robin policy.
|
||
max_workers_searchparser = 5
|
||
|
||
# The minimum number of results blobs to keep for consumption by the
|
||
# search head.
|
||
results_queue_min_size = 10
|
||
|
||
# Corresponds to the size of the results queue in the dispatch fetch level
|
||
result_queue_max_size = 100000000
|
||
|
||
# If all currently active peers have finished with the search wait this
|
||
# many seconds before giving up on peers we are attempting to reconnect
|
||
# to for a retry.
|
||
results_queue_read_timeout_sec = 900
|
||
|
||
# by default we do not enable throttling
|
||
remote_search_requests_throttling_type = disabled
|
||
|
||
# By default, send user capabilities to the search peers.
|
||
remote_search_requests_send_capabilities_list = true
|
||
|
||
# By default, allow remote search execution even if the capability list is missing.
|
||
remote_search_requests_reject_if_capabilities_list_absent = false
|
||
|
||
############################################################################
|
||
# Field stats
|
||
############################################################################
|
||
# This section contains settings for field statistics.
|
||
|
||
# How often to update the field summary statistics, as a ratio to the
|
||
# elapsed run time so far.
|
||
fieldstats_update_freq = 0
|
||
|
||
# Maximum period for updating field summary statistics in seconds.
|
||
fieldstats_update_maxperiod = 60
|
||
|
||
# The minimum frequency of a field displayed in the /summary endpoint.
|
||
min_freq = 0.01
|
||
|
||
|
||
############################################################################
|
||
# History
|
||
############################################################################
|
||
# This section contains settings for search history.
|
||
|
||
# Enable search history?
|
||
enable_history = true
|
||
|
||
# Max number of searches to store in history
|
||
# for each user/app, if search_history_storage_mode is csv;
|
||
# for each user, if search_history_storage_mode is kvstore.
|
||
max_history_length = 500
|
||
|
||
# Max time search history records to store in history (for each user).
|
||
max_history_storage_retention_time = 90d
|
||
|
||
# History storage
|
||
search_history_storage_mode = csv
|
||
|
||
############################################################################
|
||
# Memory tracker
|
||
############################################################################
|
||
# This section contains settings for the memory tracker.
|
||
|
||
# If memory tracker is disabled, search won't be terminated even if it
|
||
# exceeds the memory limit.
|
||
# By default memory tracking is disabled.
|
||
enable_memory_tracker = false
|
||
|
||
# Default value for percentage memory usage for the splunk search
|
||
# process is set to 25%.
|
||
search_process_memory_usage_percentage_threshold = 25
|
||
|
||
# Default value for memory usage for the Splunk search process is set to 4GB.
|
||
search_process_memory_usage_threshold = 4000
|
||
|
||
|
||
############################################################################
|
||
# Meta search
|
||
############################################################################
|
||
# This section contains settings for meta search.
|
||
|
||
# Allow inexact metasearch?
|
||
allow_inexact_metasearch = false
|
||
|
||
|
||
############################################################################
|
||
# Misc
|
||
############################################################################
|
||
# This section contains miscellaneous search settings.
|
||
|
||
|
||
# Determines if the saved searches handler uses a removable cache
|
||
use_removable_search_cache = true
|
||
|
||
# Specifies after how long a paused search should be auto canceled,
|
||
# in seconds.
|
||
# 0 means do not auto cancel the paused search.
|
||
auto_cancel_after_pause = 0
|
||
|
||
dispatch_dir_warning_size = 5000
|
||
|
||
# Enable concatenation of successively occurring evals into a single
|
||
# comma separated eval during generation of data model searches.
|
||
enable_datamodel_meval = true
|
||
|
||
# Determines whether or not scoped conditional expansion of knowledge
|
||
# objects occurs during search string expansion. This only applies on
|
||
# the search head.
|
||
# NOTE: Do not change unless instructed to do so by Splunk Support.
|
||
enable_conditional_expansion = true
|
||
|
||
# If true, always dispatch saved searches as the requesting user.
|
||
# The default for dispatchAs in the savedsearches.conf.spec.in file
|
||
# is 'owner'.
|
||
force_saved_search_dispatch_as_user = false
|
||
|
||
# Max length of custom job id when passing spawning new job.
|
||
max_id_length = 150
|
||
|
||
# Specifies the maximum length of a generated or custom search job ID before
|
||
# the Splunk software shortens the directory name. The search job ID itself
|
||
# remains the same.
|
||
max_id_length_before_hash = 230
|
||
|
||
# Specifies whether the Splunk software reruns all or elements of a currently
|
||
# running search process when there are indexer failures in an indexer
|
||
# clustering environment.
|
||
search_retry = false
|
||
|
||
|
||
# Sets how long, in seconds, 'search_retry' waits to get updated
|
||
# indexer information.
|
||
search_retry_waiting_time = 70
|
||
|
||
# Maximum number of attempts made to retry a historical search before failing
|
||
# Only applied when search_retry is set to true
|
||
search_retry_max_historical = 15
|
||
|
||
# Stack size of the search executing thread.
|
||
stack_size = 4194304
|
||
|
||
# Use precomputed summaries if possible?
|
||
summary_mode = all
|
||
|
||
# Track indextime range of searches (shown in job inspector).
|
||
track_indextime_range = true
|
||
|
||
# By default use bloom filter.
|
||
use_bloomfilter = true
|
||
|
||
# By default use metadata elimination.
|
||
use_metadata_elimination = true
|
||
|
||
# Serialization format and compression algorithm used for search results
|
||
results_serial_format = srs
|
||
results_compression_algorithm = zstd
|
||
|
||
# Record search telemetry in search_telemetry.json in the dispatch dir
|
||
# The setting in telemetry.conf controls whether the data is sent back,
|
||
# this setting controls whether we generate search telemetry data in the
|
||
# dispatch dir. Search telemetry data is also put into _introspection.
|
||
record_search_telemetry = true
|
||
|
||
|
||
# Number of files to use as a threshold at which to stop adding more
|
||
# files to var/run/splunk/search_telemetry for indexing search telemetry
|
||
# data. This setting applies only to telemetry on the search head.
|
||
search_telemetry_file_limit = 500
|
||
|
||
|
||
# Limit, in bytes, for each of the constituent components of the search
|
||
# telemetry json representation
|
||
search_telemetry_component_limit = 10000
|
||
|
||
# track of the number of events of each sourcetype that match a search
|
||
track_matching_sourcetypes = true
|
||
|
||
# timeout to launch a search job
|
||
search_launch_timeout_seconds = 180
|
||
|
||
# timeout to initialize startup configuration, in milliseconds
|
||
search_startup_config_timeout_ms = 3000
|
||
|
||
# maximum number of tracked search result sourcetypes to add to audit.log
|
||
max_audit_sourcetypes = 100
|
||
|
||
use_search_evaluator_v2 = true
|
||
|
||
# The maximum number of field metadata displayed in the /jobs/fieldmeta endpoint.
|
||
max_fieldmeta_cnt_ui = 1000
|
||
|
||
############################################################################
|
||
# Parsing
|
||
############################################################################
|
||
# This section contains settings related to parsing searches.
|
||
|
||
# Max recursion depth for macros.
|
||
# Considered a search exception if macro expansion does not stop after
|
||
# this many levels.
|
||
max_macro_depth = 100
|
||
|
||
# Max recursion depth for subsearch.
|
||
# Considered a search exception if subsearch does not stop after
|
||
# this many levels.
|
||
max_subsearch_depth = 8
|
||
|
||
# The minimum length of a prefix before a * to ask the index about.
|
||
min_prefix_len = 1
|
||
|
||
# When true, always search the lexicon for both field::val and val for
|
||
# field=val searches, unless INDEXED=true is set for the field in
|
||
# in fields.conf (in which case only field::val is searched)
|
||
always_include_indexedfield_lispy = true
|
||
|
||
# When set to true, we will scope every indexed field=val statement
|
||
# with sourcetype and convert it to the indexed form (field::val)
|
||
indexed_fields_expansion = true
|
||
|
||
############################################################################
|
||
# Preview
|
||
############################################################################
|
||
# This section contains settings for previews.
|
||
|
||
# The maximum time to spend generating previews, as a fraction of total
|
||
# search time.
|
||
preview_duty_cycle = 0.25
|
||
|
||
|
||
############################################################################
|
||
# Quota or queued searches
|
||
############################################################################
|
||
# This section contains settings for quota or queued searches.
|
||
|
||
# Default setting for allowing async jobs to be queued if quota violation.
|
||
default_allow_queue = true
|
||
|
||
# The maximum number of times to retry to dispatch a search when the
|
||
# quota has been reached.
|
||
dispatch_quota_retry = 4
|
||
|
||
# Milliseconds between retrying to dispatch a search if a quota has been
|
||
# reached. We retry the given number of times, with each successive wait
|
||
# 2x longer than the previous.
|
||
dispatch_quota_sleep_ms = 100
|
||
|
||
# Enforce cumulative role based quotas
|
||
enable_cumulative_quota = false
|
||
|
||
# how often to retry queued jobs (in seconds)
|
||
queued_job_check_freq = 1
|
||
|
||
|
||
############################################################################
|
||
# Reading chunk controls
|
||
############################################################################
|
||
# This section contains settings for reading chunk controls.
|
||
|
||
# max_results_perchunk, min_results_perchunk, and target_time_perchunk
|
||
# are multiplied by this for a long running search.
|
||
chunk_multiplier = 5
|
||
|
||
# Time in seconds until a search is considered "long running”.
|
||
long_search_threshold = 2
|
||
|
||
# Maximum raw size of results for each call to search (in dispatch).
|
||
# 0 = no limit, not affected by chunk_multiplier.
|
||
max_rawsize_perchunk = 100000000
|
||
|
||
# Maximum results per call to search (in dispatch).
|
||
# Must be <= maxresultrows.
|
||
max_results_perchunk = 2500
|
||
|
||
# Minimum results per call to search (in dispatch).
|
||
# Must be <= max_results_perchunk.
|
||
min_results_perchunk = 100
|
||
|
||
# Target duration of a particular call to fetch search results in ms.
|
||
target_time_perchunk = 2000
|
||
|
||
|
||
############################################################################
|
||
# Real-time
|
||
############################################################################
|
||
# This section contains settings for real-time searches.
|
||
|
||
# For real-time searches in the UI, maximum number of events stored
|
||
# (as a FIFO buffer).
|
||
realtime_buffer = 10000
|
||
|
||
|
||
############################################################################
|
||
# Remote storage
|
||
############################################################################
|
||
# This section contains settings for remote storage.
|
||
|
||
# Maximum number of remote buckets to localize as a look-ahead on searches.
|
||
bucket_localize_max_lookahead = 5
|
||
|
||
# Specifies which bucket prediction algorithm to use.
|
||
bucket_predictor = consec_not_needed
|
||
|
||
|
||
############################################################################
|
||
# Results storage
|
||
############################################################################
|
||
# This section contains settings for storing final search results.
|
||
|
||
# The maximum number of end results to store globally
|
||
# (when status_buckets=0).
|
||
max_count = 500000
|
||
|
||
# By default, no timeline information is retained. UI will supply the
|
||
# status_buckets as needed.
|
||
status_buckets = 0
|
||
|
||
# Truncate report output to max_count?
|
||
truncate_report = false
|
||
|
||
# Do we write multi-file results to results_dir?
|
||
write_multifile_results_out = true
|
||
|
||
# For event searches should we read final results from the timeliner
|
||
read_final_results_from_timeliner = true
|
||
|
||
# Field filters are turned off by default in the preview release.
|
||
field_filters = false
|
||
|
||
############################################################################
|
||
# Search process
|
||
############################################################################
|
||
# This section contains settings for search process configurations.
|
||
|
||
# Even if the search process has not been idle for the above time, check its
|
||
# internal caches for stale data after this many searches.
|
||
idle_process_cache_search_count = 8
|
||
|
||
# If a search process is idle for this many seconds, take the opportunity
|
||
# to scan its internal caches for stale data.
|
||
idle_process_cache_timeout = 0.5
|
||
|
||
# Periodically we'll check if we have too many idle search processes.
|
||
# This controls how often that happens (in seconds).
|
||
idle_process_reaper_period = auto
|
||
|
||
# Inside a search process, keep up to this many compiled regex artifacts
|
||
# before checking for stale ones. Normally the above idle_process_cache_*
|
||
# settings will check for stale entries before this limit is hit.
|
||
idle_process_regex_cache_hiwater = 2500
|
||
|
||
# When running a search, scan at most this many idle processes before
|
||
# launching a new one.
|
||
launcher_max_idle_checks = auto
|
||
|
||
# Number of server threads dedicated to managing communication with
|
||
# search processes.
|
||
# Negative number means automatically pick a sensible value
|
||
launcher_threads = -1
|
||
|
||
# Maximum number of preforked search processes that are idle
|
||
# and wait for next search execution
|
||
max_idle_process_count = auto
|
||
|
||
# Memory (RSS) limit of a search process that can be idle and reusable, in KB
|
||
# Number 0 is set to use the system default, 1024*1024 KB (1GB)
|
||
# Negative number means no limit
|
||
max_idle_process_memory = auto
|
||
|
||
# The number of search processes constructing a pool to run searches, which is
|
||
# dependent on system resources (CPU and memory) available to Splunk server
|
||
# Number 0 is set to automatically pick a sensible value
|
||
# Negative number means no limit
|
||
max_search_process_pool = 2048
|
||
|
||
# When reaping idle search processes, allow one to be reaped if it is
|
||
# not using the most recent configuration bundle, and its bundle has not
|
||
# been used in at least this many seconds.
|
||
max_old_bundle_idle_time = auto
|
||
|
||
# On UNIX we can run more that one search per process.
|
||
# Set this to a number greater than one to enable.
|
||
max_searches_per_process = 500
|
||
|
||
# When running more than one search per process, limit the number of new
|
||
# searches that can be started before allowing time to service the ones
|
||
# that are already running
|
||
max_searches_started_per_cycle = 30
|
||
|
||
# When running more than one search per process, don't allow a process to
|
||
# accumulate more than this number of seconds running searches. Note that a
|
||
# search can run longer than this without being terminated, it only prevents
|
||
# the process from being used for another search
|
||
max_time_per_process = auto
|
||
|
||
# When running more than one search per process, do not reuse a process
|
||
# if it is older than this number of seconds. This is different than
|
||
# max_time_per_process because it includes time the process spent idle.
|
||
process_max_age = 7200.0
|
||
|
||
# Don't reuse a process that last served a different user unless it has
|
||
# been idle this long (in seconds).
|
||
process_min_age_before_user_change = auto
|
||
|
||
search_process_mode = auto
|
||
|
||
# Whether to increase oom_score of search processes to make splunk more stable.
|
||
search_process_configure_oom_score_adj = true
|
||
|
||
# The value added to search process, ranging between 0 and 1000.
|
||
# Only applies when 'search_process_configure_oom_score_adj' is set to true.
|
||
search_process_set_oom_score_adj = 700
|
||
|
||
############################################################################
|
||
# search_messages.log
|
||
############################################################################
|
||
|
||
# Specifies whether splunkd promotes user-facing search messages
|
||
# from $SPLUNK_HOME/var/run/splunk/dispatch/<sid>/info.csv to
|
||
# $SPLUNK_HOME/var/log/splunk/search_messages.log.
|
||
log_search_messages = true
|
||
|
||
# When 'log_search_messages = true', this setting specifies the lowest
|
||
# severity of message that splunkd logs to search_messages.log.
|
||
# Splunkd ignores all messages with a lower severity.
|
||
# Possible values in ascending order: DEBUG, INFO, WARN, ERROR
|
||
search_messages_severity = WARN
|
||
|
||
############################################################################
|
||
# Search reuse
|
||
############################################################################
|
||
# This section contains settings for search reuse.
|
||
|
||
|
||
############################################################################
|
||
# Splunk Analytics for Hadoop
|
||
############################################################################
|
||
# This section contains settings for use with Splunk Analytics for Hadoop.
|
||
|
||
# The maximum time to spend doing reduce, as a fraction of total search time.
|
||
reduce_duty_cycle = 0.25
|
||
|
||
# The frequency with which try to reduce intermediate data when there is
|
||
# a non-streaming and non-stateful streaming command. (0 = never)
|
||
reduce_freq = 10
|
||
|
||
|
||
############################################################################
|
||
# Status
|
||
############################################################################
|
||
# This section contains settings for search status.
|
||
|
||
# The number of search job metadata to cache in RAM.
|
||
status_cache_size = 10000
|
||
|
||
|
||
############################################################################
|
||
# Timelines
|
||
############################################################################
|
||
# This section contains settings for timelines.
|
||
|
||
# Size of thread pool for remote event download framework.
|
||
remote_event_download_initialize_pool = 5
|
||
remote_event_download_finalize_pool = 5
|
||
remote_event_download_local_pool = 5
|
||
|
||
# Allow timeline to be map/reduced?
|
||
remote_timeline = true
|
||
|
||
# Whether to fetch all events accessible through the timeline from the
|
||
# remote peers before the job is considered done.
|
||
remote_timeline_fetchall = 1
|
||
|
||
# Minimum number of peers required to utilize remote timelining.
|
||
remote_timeline_min_peers = 1
|
||
|
||
# How often to touch remote artifacts to keep them from being reaped
|
||
# when search has not finished? (in seconds).
|
||
remote_timeline_touchperiod = 300
|
||
|
||
# Timeouts for fetching remote timeline events.
|
||
remote_timeline_connection_timeout = 5
|
||
remote_timeline_send_timeout = 10
|
||
remote_timeline_receive_timeout = 10
|
||
|
||
# In ms
|
||
search_keepalive_frequency = 30000
|
||
|
||
# Maximum number of uninterrupted keepalives before the connection is closed.
|
||
search_keepalive_max = 100
|
||
|
||
# Enable timeline preview
|
||
timeline_events_preview = false
|
||
|
||
############################################################################
|
||
# TTL
|
||
############################################################################
|
||
# This section contains time to live (ttl) settings.
|
||
|
||
# The length of time to persist search cache entries (in seconds).
|
||
cache_ttl = 300
|
||
|
||
# How long jobs are saved for by default.
|
||
default_save_ttl = 604800
|
||
|
||
# How long searches should be stored on disk once failed.
|
||
failed_job_ttl = 86400
|
||
|
||
# How long should searches run for a search head live on the indexers.
|
||
remote_ttl = 600
|
||
|
||
# How long searches should be stored on disk once completed.
|
||
ttl = 600
|
||
|
||
|
||
# Timeout value for checking search marker files like hotbucketmarker or backfill
|
||
# marker.
|
||
check_search_marker_done_interval = 60
|
||
|
||
# Time interval of sleeping between subsequent search marker files checks.
|
||
check_search_marker_sleep_interval = 1
|
||
|
||
# How long srtemp sub-directories should be kept before they are deleted
|
||
srtemp_dir_ttl = 86400
|
||
|
||
############################################################################
|
||
# Distributed search throttling
|
||
############################################################################
|
||
# This section contains settings for distributed search throttling (peers
|
||
# side) information.
|
||
[search_throttling::per_cpu]
|
||
max_concurrent = 12
|
||
|
||
[search_throttling::physical_ram]
|
||
min_memory_per_search = 134217728
|
||
|
||
############################################################################
|
||
# OTHER COMMAND SETTINGS
|
||
############################################################################
|
||
# This section contains the stanzas for the SPL commands, except for the
|
||
# search command, which is in a separate section.
|
||
|
||
|
||
[anomalousvalue]
|
||
maxresultrows = 50000
|
||
|
||
# Maximum number of distinct values for a field.
|
||
maxvalues = 0
|
||
# Maximum size in bytes of any single value
|
||
# (truncated to this size if larger).
|
||
maxvaluesize = 0
|
||
|
||
|
||
[associate]
|
||
maxfields = 10000
|
||
maxvalues = 0
|
||
maxvaluesize = 0
|
||
|
||
|
||
[autoregress]
|
||
maxp = 10000
|
||
maxrange = 1000
|
||
|
||
[collect]
|
||
# Setting for multivalue field representations in collect command.
|
||
format_multivalue_collect = false
|
||
# Setting for collect command to add quotation marks based on major breakers.
|
||
collect_ignore_minor_breakers = false
|
||
|
||
[concurrency]
|
||
# Maximum concurrency level to keep record of.
|
||
max_count = 10000000
|
||
|
||
|
||
[correlate]
|
||
maxfields = 1000
|
||
|
||
|
||
[ctable]
|
||
# This stanza contains settings for the contingency/ctable/counttable
|
||
# command.
|
||
maxvalues = 1000
|
||
|
||
[dbinspect]
|
||
maxresultrows = 50000
|
||
|
||
[discretize]
|
||
# This stanza contains settings for the bin/bucket/discretize command.
|
||
maxbins = 50000
|
||
# if maxbins not specified or = 0, defaults to searchresults::maxresultrows
|
||
|
||
|
||
[findkeywords]
|
||
maxevents = 50000
|
||
|
||
|
||
[geostats]
|
||
# At the lowest level of the tree, i.e. ZL=0 (when we are zoomed out to
|
||
# the world level ), what is the size of each gridcell in terms of latitude
|
||
# and longitude (degrees)?
|
||
# Valid values for zl_0_gridcell_latspan are from 0 to 180.0, and
|
||
# for zl_0_gridcell_longspan are from 0 to 360.0.
|
||
# Rest of the zoom level gridcell sizes are auto-tuning, i.e. will
|
||
# reduce by a factor of 2 at each additional level.
|
||
zl_0_gridcell_latspan = 22.5
|
||
zl_0_gridcell_longspan = 45.0
|
||
# Configures the filtering/search strategy for events on the map.
|
||
# Currently experimental.
|
||
filterstrategy = 2
|
||
# How many levels of clustering will be done in geostats.
|
||
maxzoomlevel = 9
|
||
|
||
|
||
[inputcsv]
|
||
# Maximum number of retries for creating a tmp directory (with random
|
||
# name in SPLUNK_HOME/var/run/splunk).
|
||
mkdir_max_retries = 100
|
||
|
||
|
||
[join]
|
||
# The join command subsearch is restricted by two settings, 'subsearch_maxout'
|
||
# and the 'maxresultrows' setting in the [searchresults] stanza.
|
||
subsearch_maxout = 50000
|
||
subsearch_maxtime = 60
|
||
|
||
[kmeans]
|
||
maxdatapoints = 100000000
|
||
maxkvalue = 1000
|
||
maxkrange = 100
|
||
|
||
|
||
[lookup]
|
||
# Maximum size of static lookup file to use a in-memory index for.
|
||
max_memtable_bytes = 26214400
|
||
# Maximum size of static lookup file to use when using a lookup()
|
||
# eval function in the ingest context
|
||
# Defaults to 10MB
|
||
ingest_max_memtable_bytes = 10485760
|
||
# Period of time after which we should refresh in-memory lookup
|
||
# tables being used with lookup() eval function at ingest time.
|
||
ingest_lookup_refresh_period_secs = 60
|
||
# Maximum reverse lookup matches (for search expansion).
|
||
max_reverse_matches = 50
|
||
# Default setting for if non-memory file lookups (for large files)
|
||
# should batch queries.
|
||
# Can be overridden using a lookup table's stanza in transforms.conf.
|
||
batch_index_query = true
|
||
# When doing batch request, what's the most matches to retrieve?
|
||
# If more than this limit of matches would otherwise be retrieved,
|
||
# we will fall back to non-batch mode matching.
|
||
batch_response_limit = 5000000
|
||
# Maximum number of lookup error messages that should be logged.
|
||
max_lookup_messages = 20
|
||
# time to live for an indexed csv
|
||
indexed_csv_ttl = 300
|
||
# keep alive token file period
|
||
indexed_csv_keep_alive_timeout = 30
|
||
# max time for the CSV indexing
|
||
indexed_csv_inprogress_max_timeout = 300
|
||
# whether we want to error on invalid lookups or let them proceed.
|
||
input_errors_fatal = false
|
||
# Should KV Store lookups be indexed at time of bundle replication?
|
||
enable_splunkd_kv_lookup_indexing = true
|
||
|
||
|
||
[metadata]
|
||
maxresultrows = 10000
|
||
# The most metadata results to fetch from each indexer.
|
||
maxcount = 100000
|
||
bucket_localize_max_lookahead = 10
|
||
|
||
[metric_alerts]
|
||
condition_evaluation_interval = 1
|
||
search_delay = 15s+
|
||
search_ttl = 2p
|
||
honor_action = false
|
||
|
||
[msearch]
|
||
chunk_size = 1000
|
||
target_per_timeseries = 5
|
||
|
||
[mvcombine]
|
||
max_mem_usage_mb = 500
|
||
|
||
|
||
[mvexpand]
|
||
max_mem_usage_mb = 500
|
||
|
||
|
||
[outputlookup]
|
||
# Specifies if the outputlookup command should check if the user
|
||
# has write permission on the lookup file.
|
||
# The permission is set in the .meta file.
|
||
outputlookup_check_permission = false
|
||
# Specifies the context where the lookup file will be created for the first time.
|
||
create_context = app
|
||
|
||
[rare]
|
||
maxresultrows = 50000
|
||
# Maximum distinct value vectors to keep track of.
|
||
maxvalues = 0
|
||
maxvaluesize = 0
|
||
|
||
[rest]
|
||
allow_reload = false
|
||
|
||
[set]
|
||
maxresultrows = 50000
|
||
|
||
|
||
[sort]
|
||
# maximum number of concurrent files to open
|
||
maxfiles = 64
|
||
|
||
|
||
[spath]
|
||
# Number of characters to read from an XML or JSON event when
|
||
# auto extracting.
|
||
extraction_cutoff = 5000
|
||
extract_all = true
|
||
|
||
|
||
[stats]
|
||
max_keymap_rows = 1000000
|
||
maxresultrows = 50000
|
||
maxvalues = 0
|
||
maxvaluesize = 0
|
||
# For streamstats's maximum window size.
|
||
max_stream_window = 10000
|
||
# For rdigest, used to approximate order statistics (median, percentiles).
|
||
rdigest_k = 100
|
||
rdigest_maxnodes = 1
|
||
tdigest_k = 50
|
||
tdigest_max_buffer_size = 1000
|
||
tmpfile_compression = lz4
|
||
tmpfile_compression_level = 0
|
||
perc_digest_type = tdigest
|
||
list_maxsize = 100
|
||
min_chunk_size_kb = 64
|
||
max_chunk_size_kb = 4096
|
||
chunk_size_double_every = 100
|
||
# Determines whether to return results for searches with time-sensitive
|
||
# aggregations and missing or invalid timestamps in input events.
|
||
check_for_invalid_time = false
|
||
|
||
[sistats]
|
||
max_keymap_rows = 1000000
|
||
maxvalues = 0
|
||
maxvaluesize = 0
|
||
rdigest_k = 100
|
||
rdigest_maxnodes = 1
|
||
tdigest_k = 50
|
||
tdigest_max_buffer_size = 1000
|
||
perc_digest_type = tdigest
|
||
max_valuemap_bytes = 100000
|
||
|
||
[top]
|
||
maxresultrows = 50000
|
||
# Maximum distinct value vectors to keep track of.
|
||
maxvalues = 0
|
||
maxvaluesize = 0
|
||
|
||
|
||
[transactions]
|
||
# Maximum number of open transaction or events in open.
|
||
# Transaction before transaction eviction happens.
|
||
maxopentxn = 5000
|
||
maxopenevents = 100000
|
||
|
||
|
||
[tscollect]
|
||
# Default value of 'squashcase' arg if not specified by the command.
|
||
squashcase = false
|
||
# Default value of 'keepresults' arg if not specified by the command.
|
||
keepresults = false
|
||
# The max allowed size of tsidx files to create in megabytes.
|
||
# ’0’ implies no limit
|
||
optimize_max_size_mb = 256
|
||
|
||
|
||
[tstats]
|
||
# Whether we apply role-based search filters when users run tstats
|
||
# on normal index data (never applied on data from tscollect or
|
||
# data model acceleration).
|
||
apply_search_filter = true
|
||
# Default value of 'summariesonly' arg if not specified by the command.
|
||
summariesonly = false
|
||
# Default value of 'allow_old_summaries' arg if not specified
|
||
# by the command.
|
||
allow_old_summaries = false
|
||
# By default we retrieve up to ten million events at once from a
|
||
# TSIDX file when answering queries.
|
||
chunk_size = 10000000
|
||
# By default, do not include non-numeric values when applying
|
||
# searches that filter on numeric values.
|
||
include_events_omitted_when_filtering_numeric_values = false
|
||
# Number of search pipelines created per batch search
|
||
batch_search_max_pipeline = 1
|
||
# Should tstats use bloomfilters to eliminate buckets
|
||
use_bloomfilter = true
|
||
update_datamodel_usage_stats = true
|
||
|
||
[mstats]
|
||
time_bin_limit = 1000000
|
||
# Should mstats use bloomfilters to eliminate buckets
|
||
use_bloomfilter = true
|
||
|
||
[typeahead]
|
||
maxcount = 1000
|
||
max_servers = 2
|
||
fetch_multiplier = 50
|
||
use_cache = true
|
||
cache_ttl_sec = 300
|
||
min_prefix_length = 1
|
||
max_concurrent_per_user = 3
|
||
banned_segments =
|
||
|
||
|
||
[typer]
|
||
# In eventtyping, pay attention to first N characters of any
|
||
# attribute (e.g., _raw), including individual tokens. Can be
|
||
# overridden by supplying the typer operator with the argument.
|
||
# maxlen (e.g. "|typer maxlen=300").
|
||
maxlen = 10000
|
||
|
||
|
||
[xyseries]
|
||
max_mem_usage_mb = 200
|
||
|
||
|
||
############################################################################
|
||
# GENERAL
|
||
############################################################################
|
||
# This section contains the stanzas for a variety of general settings.
|
||
|
||
[auto_summarizer]
|
||
cache_timeout = 600
|
||
maintenance_period = 1800
|
||
return_actions_with_normalized_ids = fromcontext
|
||
normalized_summaries = true
|
||
detailed_dashboard = true
|
||
shc_accurate_access_counts = false
|
||
disable_transparent_mode_federation = false
|
||
|
||
|
||
[http_input]
|
||
# The max number of tokens reported by logging input metrics.
|
||
max_number_of_tokens = 10000
|
||
# The interval (in seconds) of logging input metrics report.
|
||
metrics_report_interval = 60
|
||
# The max request content length (800MB, to match HTTP server).
|
||
max_content_length = 838860800
|
||
# The max number of ACK channels.
|
||
max_number_of_ack_channel = 1000000
|
||
# The max number of acked requests pending query.
|
||
max_number_of_acked_requests_pending_query = 10000000
|
||
# The max number of acked requests pending query per ACK channel.
|
||
max_number_of_acked_requests_pending_query_per_ack_channel = 1000000
|
||
|
||
|
||
[indexpreview]
|
||
# Maximum number of bytes to read from each file during preview.
|
||
max_preview_bytes = 2000000
|
||
# Maximum number of results to emit per call to preview data generator.
|
||
max_results_perchunk = 2500
|
||
# Loosely-applied maximum on number of preview data objects held in memory.
|
||
soft_preview_queue_size = 100
|
||
|
||
|
||
[inputproc]
|
||
# Threshold size (in mb) to trigger fishbucket rolling to a new db.
|
||
file_tracking_db_threshold_mb = 500
|
||
# Approximate ceiling on source types & fingerprints in learned app.
|
||
learned_sourcetypes_limit = 1000
|
||
|
||
# Maximum size (in mb) of heap allowed to be created by Splunk modular
|
||
# input MonitorNoHandle.
|
||
monitornohandle_max_heap_mb = 0
|
||
|
||
|
||
[kv]
|
||
# When non-zero, the point at which kv should stop creating new columns.
|
||
maxcols = 512
|
||
# Maximum number of keys auto kv can generate.
|
||
# Set this value to 0 to not impose any limit on auto kv limit and indexed kv limit.
|
||
limit = 100
|
||
# Maximum number of key-value pairs that can be extracted at index time.
|
||
# Set this value to 0 to not impose any limit on indexed kv limit.
|
||
indexed_kv_limit = 200
|
||
# Truncate _raw to to this size and then do auto KV.
|
||
maxchars = 10240
|
||
|
||
max_extractor_time = 1000
|
||
avg_extractor_time = 500
|
||
|
||
|
||
[kvstore]
|
||
# The max number of accelerations that can be assigned to a single collection.
|
||
# Valid values range from 0 to 50
|
||
max_accelerations_per_collection = 10
|
||
# The max number of fields that can be part of an acceleration.
|
||
# Valid values range from 0 to 30
|
||
max_fields_per_acceleration = 10
|
||
# The max number of rows that will be returned per query.
|
||
max_rows_per_query = 50000
|
||
# The max number of queries that can be run as part of the same batch.
|
||
max_queries_per_batch = 1000
|
||
# The max size of a query result in MB.
|
||
max_size_per_result_mb = 50
|
||
# The max size of a batch save operation in MB.
|
||
max_size_per_batch_save_mb = 50
|
||
# The max number of documents of a batch save operation.
|
||
max_documents_per_batch_save = 1000
|
||
# The max size of a batched query result in MB.
|
||
max_size_per_batch_result_mb = 100
|
||
# The max number of rows in memory before flushing them to CSV projection.
|
||
max_rows_in_memory_per_dump = 200
|
||
# The max number of threads to use for outputlookup.
|
||
max_threads_per_outputlookup = 1
|
||
|
||
[kvstore_migration]
|
||
# The interval in seconds at which the status of KV Store migration or
|
||
# KV Store upgrade is polled for search head cluster members.
|
||
periodic_timer_interval = 10
|
||
# The maximum number of intervals that a search head cluster member's
|
||
# failed status can remain unchanged during KV Store migration or upgrade.
|
||
max_failed_status_unchanged_count = 30
|
||
|
||
[input_channels]
|
||
max_inactive = auto
|
||
lowater_inactive = auto
|
||
inactive_eligibility_age_seconds = 330
|
||
|
||
[ldap]
|
||
# Maximum number of users we will attempt to precache from LDAP after
|
||
# reloading auth.
|
||
max_users_to_precache = 1000
|
||
# Controls whether we allow login when we find multiple entries with the
|
||
# same value for the username attribute.
|
||
allow_multiple_matching_users = true
|
||
|
||
|
||
[metrics]
|
||
# The number of series to include in the per_x_thruput reports in
|
||
# metrics.log.
|
||
maxseries = 10
|
||
|
||
# 30 seconds metrics logging interval
|
||
[tcpin_connections]
|
||
interval = 30
|
||
[thruput:thruput]
|
||
interval = 30
|
||
[thruput:index_thruput]
|
||
interval = 30
|
||
[queue]
|
||
interval = 30
|
||
[dutycycle]
|
||
interval = 30
|
||
[search_concurrency]
|
||
interval = 30
|
||
[searchscheduler]
|
||
interval = 30
|
||
[executor]
|
||
interval = 30
|
||
[jobs]
|
||
interval = 30
|
||
[search_pool]
|
||
interval = 30
|
||
[smartbus]
|
||
interval = 30
|
||
[pipeline_lb_cpu-stashparsing]
|
||
interval = 120
|
||
[pipeline_agg_cpu-stashparsing]
|
||
interval = 120
|
||
[pipeline_lb_cpu-parsing]
|
||
interval = 120
|
||
[pipeline_mp_cpu-parsing]
|
||
interval = 120
|
||
[pipeline_agg_cpu-merging]
|
||
interval = 120
|
||
[pipeline_msp_cpu-typing]
|
||
interval = 120
|
||
[pipeline:regexextractionprocessor-stashparsing]
|
||
interval = 120
|
||
[pipeline:regexextractionprocessor-typing]
|
||
interval = 120
|
||
[pipeline:regexextractionprocessor-ruleset]
|
||
interval = 120
|
||
|
||
[metrics:tcpin_connections]
|
||
# Keep each connection metrics.
|
||
aggregate_metrics = false
|
||
|
||
# Keep _tcp_Bps, _tcp_KBps, _tcp_avg_thruput, _tcp_Kprocessed that can
|
||
# be derived from kb.
|
||
suppress_derived_info = false
|
||
|
||
|
||
[pdf]
|
||
# The max number of rows that the pdfgen rendering engine (not PDF Report
|
||
# Server app) will render for any individual table or event listing.
|
||
max_rows_per_table = 1000
|
||
|
||
# The number of seconds after which the pdfgen render endpoint will timeout
|
||
# if it has not yet finished rendering the PDF output.
|
||
render_endpoint_timeout = 3600
|
||
|
||
# The number of seconds after which the Chromium engine will timeout if the
|
||
# engine still needs to render the dashboard output.
|
||
# This setting does not impact the render_chromium_screenshot_delay.
|
||
render_chromium_timeout = 30
|
||
|
||
# The number of seconds after which the Chromium engine takes a screenshot
|
||
# of a dashboard to render before exporting the dashboard.
|
||
# This setting does not impact the render_chromium_timeout setting.
|
||
render_chromium_screenshot_delay = 0
|
||
|
||
|
||
[restapi]
|
||
# Maximum result rows to be return by /events or /results getters from
|
||
# REST API.
|
||
maxresultrows = 50000
|
||
|
||
# Regex constraint on time_format and output_time_format for search
|
||
# endpoints.
|
||
time_format_reject = [<>!]
|
||
|
||
# Truncate the properties over this length in the contents dictionary
|
||
# of a job entry from the jobs endpoint.
|
||
# 0 means don't truncate.
|
||
jobscontentmaxcount = 0
|
||
|
||
# Determines whether we want to hard error for REST command searches
|
||
restprocessor_errors_fatal = false
|
||
|
||
# The maximum number of persistent processes that EAI custom REST handlers can create to serve REST API calls in persistent mode.
|
||
max_persistent_connections = 3000
|
||
|
||
|
||
[realtime]
|
||
# Default options for indexer support of real-time searches.
|
||
# These can all be overridden for a single search via REST API arguments.
|
||
|
||
# Size of queue for each real-time search.
|
||
queue_size = 10000
|
||
|
||
# Should indexer block if a queue is full?
|
||
blocking = false
|
||
|
||
# Maximum time to block if the queue is full.
|
||
# Meaningless if blocking = false.
|
||
max_blocking_secs = 60
|
||
|
||
# Should the indexer prefilter events for efficiency?
|
||
indexfilter = true
|
||
|
||
# Should real-time windowed searches backfill with historical data by default?
|
||
default_backfill = true
|
||
|
||
# Should real-time windowed searches sort events to be in descending time order?
|
||
enforce_time_order = true
|
||
|
||
# Should we use indexedRealtime by default?
|
||
indexed_realtime_use_by_default = false
|
||
|
||
# Number of seconds to wait for disk flushes to finish with
|
||
# indexed/continuous/pseudo realtime search.
|
||
indexed_realtime_disk_sync_delay = 60
|
||
|
||
# Minimum seconds to wait between component index searches during an
|
||
# indexed realtime search.
|
||
indexed_realtime_default_span = 1
|
||
|
||
# Max number of seconds allowed to fall behind realtime before we drop data
|
||
# and reset back to the default span from realtime.
|
||
indexed_realtime_maximum_span = 0
|
||
|
||
# Frequency to fetch updated bucket list.
|
||
indexed_realtime_update_interval = 30
|
||
|
||
# This limits the frequency that we will trigger alerts during a
|
||
# realtime search.
|
||
alerting_period_ms = 0
|
||
|
||
[rex]
|
||
match_limit = 100000
|
||
depth_limit = 1000
|
||
|
||
[reversedns]
|
||
# Max percent of time allowed for reverse dns lookups for incoming
|
||
# forwarder connections before WARN is logged in splunkd.log.
|
||
# Sanity check diagnostic for slow lookups.
|
||
rdnsMaxDutyCycle = 10
|
||
|
||
|
||
[scheduler]
|
||
# Uses a separate thread to fetch scheduled and auto summarize saved searches
|
||
# asynchronously.
|
||
async_saved_search_fetch = true
|
||
# The interval at which scheduled and auto summarize saved searches
|
||
# will be fetched asynchronously.
|
||
async_saved_search_interval = 30
|
||
|
||
# The interval at which scheduled saved searches will be evaluated for
|
||
# admission rules asynchronously.
|
||
async_admission_eval_interval = 600
|
||
|
||
saved_searches_disabled = false
|
||
|
||
|
||
# The maximum number of searches the scheduler can run, as a percentage
|
||
# of the maximum number of concurrent searches.
|
||
max_searches_perc = 50
|
||
|
||
# Fraction of concurrent scheduler searches to use for auto summarization.
|
||
auto_summary_perc = 50
|
||
|
||
# Every search should run as soon possible after its next scheduled time.
|
||
# However, each is penalized by its average runtime thus allowing
|
||
# shorter-running searches to run sooner and not potentially starve.
|
||
#
|
||
# However, since many searches run in fractions of a second and the
|
||
# priority type is integral and based on seconds, adding a raw runtime is
|
||
# too small to alter the result. Therefore, we scale the runtime.
|
||
priority_runtime_factor = 10
|
||
|
||
# A potential issue with the priority_runtime_factor is that now
|
||
# longer-running searches may get starved. To balance this out, make a
|
||
# search's priority lower (better) the more times it has been skipped.
|
||
#
|
||
# The adjustment should be normalized by the search's period, i.e., an
|
||
# infrequent search that has been skipped should get a lower (better) score
|
||
# than a frequent search that has been skipped the same number of times.
|
||
#
|
||
# Eventually, this adjustment will outweigh any worse priority due to a long
|
||
# runtime. The priority_skipped_factor controls how quickly this happens.
|
||
priority_skipped_factor = 1
|
||
|
||
# The amount of time, in seconds, to delay retrying a scheduled search that
|
||
# failed to dispatch (usually due to hitting concurrency limits).
|
||
dispatch_retry_delay = 0
|
||
|
||
# The maximum number of minutes to defer running continuous scheduled searches
|
||
# while waiting for the KV Store to come up in order to load historical data.
|
||
# This is used to prevent gaps in continuous scheduled searches when splunkd
|
||
# was down.
|
||
#
|
||
# Use [<int>]<unit> to specify a duration; a missing <int> defaults to 1.
|
||
# Relevant units are: s, sec, second, secs, seconds, m, min, minute, mins,
|
||
# minutes.
|
||
# For example: "60s" = 60 seconds, "5m" = 5 minutes.
|
||
search_history_load_timeout = 2m
|
||
|
||
# The number of runtimes kept for each search that are used to calculate the
|
||
# historical average runtime during search prioritization.
|
||
search_history_max_runtimes = 10
|
||
|
||
# The maximum amount of time to run missed continuous scheduled searches for
|
||
# once Splunk comes back up in the event it was down.
|
||
#
|
||
# Use [<int>]<unit> to specify a duration; a missing <int> defaults to 1.
|
||
# Relevant units are: min, minute, mins, minutes, h, hr, hour, hrs, hours, d,
|
||
# day, days, w, week, weeks, mon, month, months.
|
||
# For example: "5m" = 5 minutes, "1h" = 1 hour.
|
||
#
|
||
# A value of 0 means no lookback.
|
||
max_continuous_scheduled_search_lookback = 24h
|
||
|
||
# The amount of time to "look back" when reporting introspection statistics.
|
||
# For example: what is the number of dispatched searches in the last 60 minutes?
|
||
#
|
||
# Use [<int>]<unit> to specify a duration; a missing <int> defaults to 1.
|
||
# Relevant units are: m, min, minute, mins, minutes, h, hr, hour, hrs, hours,
|
||
# d, day, days, w, week, weeks.
|
||
# For example: "5m" = 5 minutes, "1h" = 1 hour.
|
||
introspection_lookback = 1h
|
||
|
||
# Maximum number of results to load when triggering an action.
|
||
max_action_results = 50000
|
||
|
||
action_execution_threads = 10
|
||
|
||
actions_queue_size = 500
|
||
|
||
actions_queue_timeout = 30
|
||
|
||
alerts_max_count = 50000
|
||
|
||
alerts_max_history = 7d
|
||
|
||
alerts_expire_period = 120
|
||
|
||
persistence_period = 30
|
||
|
||
# Maximum number of lock files to keep around for each scheduled search.
|
||
# Effective only if search head pooling is enabled, the most recent files
|
||
# are kept.
|
||
max_lock_files = 5
|
||
|
||
# The lock file reaper should clean lock files that are this old (in seconds).
|
||
max_lock_file_ttl = 86400
|
||
|
||
max_per_result_alerts = 500
|
||
|
||
scheduled_view_timeout = 60m
|
||
|
||
# Scheduler timeout for printing a throttled warning message
|
||
# if we're hitting scheduler concurrency limits.
|
||
concurrency_message_throttle_time = 10m
|
||
|
||
# By default the scheduler should not run jobs on itself in search head
|
||
# pooling mode. It should dispatch to pool members.
|
||
shp_dispatch_to_member = true
|
||
# In 6.3 and beyond Search Head Clustering has implemented role quota
|
||
# enforcement. Set this to true to enable this feature.
|
||
shc_role_quota_enforcement = false
|
||
shc_syswide_quota_enforcement = false
|
||
|
||
|
||
[search_metrics]
|
||
# Add more detail to the per-search metrics.
|
||
debug_metrics = false
|
||
|
||
|
||
[show_source]
|
||
# Maximum events retrievable by show source.
|
||
max_count = 10000
|
||
max_timebefore = 1day
|
||
max_timeafter = 1day
|
||
distributed = true
|
||
# Maximum events we will request in the distributed show source.
|
||
# Likely all of these will not be used.
|
||
distributed_search_limit = 30000
|
||
|
||
|
||
[slc]
|
||
# Maximum number of clusters to create.
|
||
maxclusters = 10000
|
||
|
||
|
||
[slow_peer_disconnect]
|
||
# Settings for the heuristic that will detect and disconnect slow peers
|
||
# towards the end of a search that has returned a large volume of data.
|
||
|
||
disabled = true
|
||
# is this feature enabled.
|
||
# Defaults to true
|
||
|
||
batch_search_activation_fraction = 0.9
|
||
# The fraction of peers that must have completed before we start disconnecting.
|
||
# This is only applicable to batch search because the slow peers will not hold
|
||
# back the fast peers.
|
||
# Defaults to 0.9
|
||
|
||
packets_per_data_point = 500
|
||
# Rate statistics will be sampled once every packets_per_data_point packets.
|
||
|
||
sensitivity = 0.3
|
||
# Sensitivity of the heuristic to newer values. For larger values of
|
||
# sensitivity the heuristic will give more weight to newer statistic.
|
||
|
||
grace_period_before_disconnect = 0.10
|
||
# If the heuristic consistently claims that the peer is slow for at least
|
||
# <grace_period_before_disconnect>*life_time_of_collector seconds then only
|
||
# will we disconnect the peer.
|
||
|
||
threshold_data_volume = 100
|
||
# The volume of uncompressed data that must have accumulated in KB from
|
||
# a peer before we consider them in the heuristic.
|
||
|
||
threshold_connection_life_time = 5
|
||
# All peers will be given an initial grace period of at least these many
|
||
# seconds before we consider them in the heuristic.
|
||
|
||
bound_on_disconnect_threshold_as_fraction_of_mean = 0.2
|
||
# If network is too homogenous resulting in very low standard deviations
|
||
# this value may be tweaked to ensure that the thresholds we set are not
|
||
# too close to the mean. If threshold is an upper bound
|
||
# threshold >= mean*(1+bound_on_threshold) and if the threshold is a
|
||
# lower bound threshold <= mean*(1-bound_on_threshold).
|
||
# The actual threshold is computed during the search based on the mean
|
||
# and std. deviations of network statistics.
|
||
|
||
|
||
[summarize]
|
||
poll_buckets_until_maxtime = false
|
||
bucket_refresh_interval = 30
|
||
bucket_refresh_interval_cluster = 120
|
||
auto_finalize_secs_after_maxtime = 300
|
||
|
||
|
||
[system_checks]
|
||
insufficient_search_capabilities = enabled
|
||
orphan_searches = enabled
|
||
installed_files_integrity = enabled
|
||
installed_files_integrity_interval = 12h
|
||
# See limits.conf.spec file for details.
|
||
|
||
|
||
[thruput]
|
||
# Throughput limiting at index time.
|
||
maxKBps = 0
|
||
|
||
|
||
[viewstates]
|
||
# Is the viewstate reaper enabled?
|
||
enable_reaper = true
|
||
# How often does the reaper run?
|
||
reaper_freq = 86400
|
||
# How many viewstates does the reaper consider "acceptable"?
|
||
reaper_soft_warn_level = 1000
|
||
# Teaper eligibility age.
|
||
ttl = 86400
|
||
|
||
[scheduled_views]
|
||
enable_reaper = true
|
||
reaper_freq = 86400
|
||
|
||
############################################################################
|
||
# OPTIMIZATION
|
||
############################################################################
|
||
# This section contains global and specific optimization settings
|
||
|
||
|
||
[search_optimization]
|
||
enabled = true
|
||
|
||
[search_optimization::search_expansion]
|
||
enabled = true
|
||
|
||
[search_optimization::replace_append_with_union]
|
||
enabled = true
|
||
|
||
[search_optimization::merge_union]
|
||
enabled = true
|
||
|
||
[search_optimization::insert_redistribute_command]
|
||
enabled = true
|
||
|
||
[search_optimization::predicate_split]
|
||
enabled = true
|
||
|
||
[search_optimization::predicate_push]
|
||
enabled = true
|
||
|
||
[search_optimization::predicate_merge]
|
||
enabled = true
|
||
inputlookup_merge = true
|
||
merge_to_base_search = true
|
||
|
||
[search_optimization::projection_elimination]
|
||
enabled = true
|
||
|
||
[search_optimization::required_field_values]
|
||
enabled = true
|
||
fields = eventtype, tag
|
||
|
||
[search_optimization::search_flip_normalization]
|
||
enabled = true
|
||
|
||
[search_optimization::reverse_calculated_fields]
|
||
enabled = true
|
||
|
||
[search_optimization::search_sort_normalization]
|
||
enabled = true
|
||
|
||
[search_optimization::eval_merge]
|
||
enabled = true
|
||
|
||
[search_optimization::replace_table_with_fields]
|
||
enabled = true
|
||
|
||
[search_optimization::replace_stats_cmds_with_tstats]
|
||
enabled = true
|
||
detect_search_time_field_collisions = true
|
||
|
||
[search_optimization::replace_datamodel_stats_cmds_with_tstats]
|
||
enabled = true
|
||
|
||
[search_optimization::replace_chart_cmds_with_tstats]
|
||
enabled = true
|
||
detect_search_time_field_collisions = true
|
||
|
||
[search_optimization::set_required_fields]
|
||
stats = false
|
||
|
||
[directives]
|
||
required_tags = enabled
|
||
required_eventtypes = enabled
|
||
read_summary = enabled
|
||
|
||
[parallelreduce]
|
||
# The maximum number of valid indexers that can be used as intermediate
|
||
# reducers in the reducing phase of a parallel reduce operation.
|
||
maxReducersPerPhase = 20
|
||
# The percentage of valid indexers that can be selected from the search peers
|
||
# as intermediate reducers for a parallel reduce search operation.
|
||
winningRate = 50
|
||
# Timeout value setting to ensure that adequate time is provided for indexers and intermediate indexers to get paired
|
||
rdinPairingTimeout = 30
|
||
# The percentage of search queries to run as prjob in total traffic.
|
||
autoAppliedPercentage = 0
|
||
# Use parallel reduce processing to improve the performance of qualifying
|
||
# ad-hoc searches.
|
||
autoAppliedToAdhocSearches = false
|
||
# The maximum amount of preview cache memory usage allowed for parallel reduce
|
||
# search, in MB.
|
||
maxPreviewMemUsageMb = 100
|
||
# Feature flag: preview for parallel reduce
|
||
enablePreview = true
|
||
# The list of commands that will not utilize parallel reduce
|
||
disabledCommandList = addinfo
|
||
|
||
[rollup]
|
||
# Threshold on lowest rollup interval allowed
|
||
minSpanAllowed = 300
|
||
|
||
[mcollect]
|
||
# Sets the output format from mcollect to use single value format always.
|
||
always_use_single_value_output = true
|
||
|
||
[auth]
|
||
enable_install_apps = false
|
||
|
||
[segmenter]
|
||
use_segmenter_v2 = true
|
||
|
||
|
||
|
||
[watchdog]
|
||
stack_files_ttl = 7d
|
||
stack_files_removal_period = 1h
|
||
|
||
|
||
[ingest_actions]
|
||
rfs.provider.rawdata_limit_mb = 1024
|
||
rfs.provider.max_workers = 4
|
||
rfsS3DestinationOff = false
|
||
|
||
|
||
[spl2]
|
||
origin = all
|
||
|