You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
8121 lines
290 KiB
8121 lines
290 KiB
# Version 9.2.2.20240415
|
|
# DO NOT EDIT THIS FILE!
|
|
# Changes to default files will be lost on update and are difficult to
|
|
# manage and support.
|
|
#
|
|
# Make changes to system defaults by overriding them in
|
|
# apps or $SPLUNK_HOME/etc/system/local
|
|
# (See "Configuration file precedence" in the web documentation).
|
|
#
|
|
# To override a specific setting, copy the name of the stanza and
|
|
# setting to the file where you wish to override it.
|
|
#
|
|
# This file contains an example messages.conf of attribute/value pairs for
|
|
# configuring externalized strings.
|
|
#
|
|
|
|
|
|
[UIAPPS]
|
|
name = UI Apps Handler
|
|
|
|
[UIAPPS:AUTO_UPLOAD_OF_MODULES_FAILED]
|
|
message = The automatic upload of modules failed during the app installation.
|
|
severity = error
|
|
capabilities = edit_local_apps,install_apps
|
|
|
|
|
|
[ABSTRACT]
|
|
name = Abstract Operator
|
|
|
|
[ABSTRACT:SRANGE_ERROR__S_LD_D]
|
|
message = The value of %s is invalid: %ld. The valid range is 1-%d.
|
|
severity = error
|
|
|
|
|
|
[ADDINFO]
|
|
name = AddInfo Processor
|
|
|
|
[ADDINFO:ERROR_DELETING_FILE__S]
|
|
message = Error deleting temporary file '%s', after copying to sinkhole.
|
|
severity = error
|
|
|
|
[ADDINFO:ERROR_MOVING_FILE__S_S]
|
|
message = Error moving file '%s' to '%s'.
|
|
severity = error
|
|
|
|
[ADDINFO:FAILED_TO_WRITE_TEMP_FILE__S]
|
|
message = Could not open the following temporary file for writing: '%s'.
|
|
severity = error
|
|
|
|
[ADDINFO:INVALID_FILENAME__S]
|
|
message = File option '%s' is invalid. Filename contains one of the following prohibited characters: period (.), forward slash (/), or backslash (\).
|
|
severity = error
|
|
|
|
[ADDINFO:INVALID_FORMAT__S]
|
|
message = Format option '%s' is invalid.
|
|
severity = error
|
|
|
|
[ADDINFO:INVALID_OUTPUT_FORMAT__S]
|
|
message = output_format '%s' is invalid. Options are "raw" or "hec".
|
|
severity = error
|
|
|
|
[ADDINFO:OPTION_NOT_ALLOWED_HEC__S]
|
|
message = The '%s' option is not allowed with output_format=hec.
|
|
severity = error
|
|
|
|
[ADDINFO:INVALID_TIME_FORMAT__S]
|
|
message = Time format option '%s' is invalid.
|
|
severity = error
|
|
|
|
[ADDINFO:METRIC_DIMS_REQUIRED]
|
|
message = Must specify at least one dimension field if split mode.
|
|
severity = error
|
|
|
|
[ADDINFO:METRIC_INDEX_REQUIRED]
|
|
message = Must specify a valid metric index.
|
|
severity = error
|
|
|
|
[ADDINFO:METRIC_INCORRECT_SPLIT_OPTION]
|
|
message = The selected 'split' option is invalid. Usage: split=true, false or allnums.
|
|
severity = error
|
|
|
|
[ADDINFO:METRIC_UNSUPPORTED_OPTION__S]
|
|
message = Option '%s' is not supported in Splunk 8.0.0 and earlier.
|
|
severity = error
|
|
|
|
[ADDINFO:MISSING_ARGS]
|
|
message = Missing arguments. Usage: setfields field1=\"value1\", field2=\"value2\"...
|
|
severity = error
|
|
|
|
[ADDINFO:MISSING_BREAKERS]
|
|
message = The segmenters.conf file is missing either major or minor breakers in field values.
|
|
action = Ensure that MAJOR and MINOR breakers are specified in the segmenters.conf file when the 'collect_ignore_minor_breakers' setting is set to 'true'.
|
|
severity = error
|
|
|
|
[ADDINFO:MISSING_DELIMITER__S]
|
|
message = Missing the '=' key-value delimiter. Ignoring token '%s'.
|
|
severity = warn
|
|
|
|
[ADDINFO:MISSING_FIELD_LIST]
|
|
message = Missing a valid field list. Usage: setfields field1=\"value1\", field2=\"value2\"...
|
|
severity = error
|
|
|
|
[ADDINFO:NO_SUMMARY_INDEX_RESULTS]
|
|
message = No results to summary index.
|
|
severity = warn
|
|
|
|
[ADDINFO:NO_PERMISSION]
|
|
message = You have insufficient privileges to run this command.
|
|
severity = error
|
|
|
|
[ADDINFO:PATH_OPTION_DEPRECATED]
|
|
message = The 'path' option has been deprecated. Set option 'spool' to false to write output to $SPLUNK_HOME/var/run/splunk/<file>.
|
|
severity = warn
|
|
|
|
[ADDINFO:RESERVED_FILENAME]
|
|
message = The filename provided is reserved and cannot be used with the 'collect' command.
|
|
severity = error
|
|
|
|
[ADDINFO:SUCCESSFULLY_WROTE_FILE__S]
|
|
message = Successfully wrote file to '%s'.
|
|
severity = info
|
|
|
|
|
|
[ANALYSIS_PROCESSOR]
|
|
name = Analysis Processor
|
|
|
|
[ANALYSIS_PROCESSOR:ARG_IS_REQUIRED__S]
|
|
message = '%s' argument is required.
|
|
severity = error
|
|
|
|
|
|
[WALKLEX]
|
|
name = Walklex Processor
|
|
|
|
[WALKLEX:ERROR_SRCHFILTER_PRESENT]
|
|
message = If you have a role with a search filter, to run 'walklex' you must also have a role with one of the following capabilities: 'admin_all_objects' , 'run_walklex'.
|
|
severity = error
|
|
|
|
[WALKLEX:FLEX_INDEX_NOT_SUPPORTED]
|
|
message = The walklex operation is not supported on flex indexes.
|
|
severity = error
|
|
|
|
|
|
[ANOMALIES]
|
|
name = Anomalies
|
|
|
|
[ANOMALIES:BAD_DENYLIST_THRESHOLD]
|
|
message = The denylist threshold is invalid. It must be between 0 and 1.0.
|
|
severity = error
|
|
|
|
[ANOMALIES:BAD_COMPRESSION]
|
|
message = Compression failed. Aborting search.
|
|
severity = error
|
|
|
|
[ANOMALIES:BAD_THRESHOLD]
|
|
message = The threshold is invalid. It must be between 0 and 1.0.
|
|
severity = error
|
|
|
|
[ANOMALIES:BAD_MAXVALUES]
|
|
message = The maxvalues setting is invalid. Maxvalues must be between 10 and 10000.
|
|
severity = error
|
|
|
|
[ANOMALIES:SILOING_FIELD_NOT_FOUND]
|
|
message = A separating field was not found. Carrying on without it.
|
|
severity = info
|
|
|
|
[ANOMALIES:DOTS_IN_DENYLIST_FILE_NAME]
|
|
message = Denylist file name can not contain '..'.
|
|
severity = error
|
|
|
|
[ANOMALIES:DEPRECATION_INFO__S_S]
|
|
message = The '%s' parameter is deprecated. Use '%s' instead.
|
|
severity = info
|
|
|
|
|
|
[ANOMALY]
|
|
name = Anomaly Processor
|
|
|
|
[ANOMALY:INVALID_ACTION]
|
|
message = The 'action' option value is invalid for this method. It must be 'filter' (default), 'annotate', or 'summary'.
|
|
severity = error
|
|
|
|
[ANOMALY:INVALID_IQR_ACTION]
|
|
message = The 'action' option value is invalid for this method. It must be 'tf' (default), 'transform', 'rm', or 'remove'.
|
|
severity = error
|
|
|
|
[ANOMALY:INVALID_MAXANOFREQ]
|
|
message = The 'maxanofreq' option value is invalid. It must be > 0 and <= 1.0.
|
|
severity = error
|
|
|
|
[ANOMALY:INVALID_METHOD]
|
|
message = The 'method' option value is invalid. It must be 'histogram', 'zscore', or 'iqr'.
|
|
severity = error
|
|
|
|
[ANOMALY:INVALID_MINNORMFREQ]
|
|
message = The 'minnormfreq' option value is invalid. It must be >= 0 and <= 1.0.
|
|
severity = error
|
|
|
|
[ANOMALY:INVALID_MINSUPFREQ]
|
|
message = The 'minsupfreq' option value is invalid. It must be >= 0 and <= 1.0.
|
|
severity = error
|
|
|
|
[ANOMALY:INVALID_PARAM]
|
|
message = The 'param' option value is invalid. It must be >= 0.0.
|
|
severity = error
|
|
|
|
[ANOMALY:INVALID_PTHRESH]
|
|
message = The 'pthresh' option value is invalid. It must be >= 0.0 and <= 1.0.
|
|
severity = error
|
|
|
|
|
|
[APPSERVER]
|
|
name = App Server
|
|
|
|
[APPSERVER:APPSERVER_PORT_ZERO]
|
|
message = Running Splunk Web in legacy mode by setting appServerPorts=0 has been removed.
|
|
action = Set appServerPorts to a valid port or list of ports in web.conf under [settings].
|
|
severity = error
|
|
capabilities = edit_web_settings
|
|
help = message.legacy.splunkweb.deprecated
|
|
|
|
[APPSERVER:ROOT_PY_NOT_EXIST]
|
|
message = App server script does not exist.
|
|
action = Reinstall Splunk software.
|
|
severity = error
|
|
|
|
[APPSERVER:APPSERVER_NOT_RESPONDING__S]
|
|
message = Appserver at %s never started up.
|
|
action = Set `appServerProcessLogStderr` to "true" under [settings] in web.conf. Restart, try the operation again, and review splunkd.log for any messages that contain "UiAppServer - From appserver".
|
|
severity = error
|
|
|
|
|
|
[APPEND]
|
|
name = Append Processor
|
|
|
|
[APPEND:SUBSEARCH_LAST_ARG]
|
|
message = The last argument must be a subsearch.
|
|
severity = error
|
|
|
|
[APPEND:CAN_ONLY_BE_CALLED_AFTER_REPORTING__S]
|
|
message = You can only use %s after a reporting command (such as stats, chart, or timechart).
|
|
severity = error
|
|
|
|
|
|
[ARULE]
|
|
name = Arule Processor
|
|
|
|
[ARULE:BAD_CONF_OPT_VAL__F]
|
|
message = The 'conf' option value must be > 0.0 and <= 1.0, got '%f'.
|
|
severity = error
|
|
|
|
[ARULE:BAD_SUP_OPT_VAL__D]
|
|
message = The 'sup' option value must be >= 0, got '%d'.
|
|
severity = error
|
|
|
|
[ARULE:NO_FIELDS_SPECIFIED]
|
|
message = No fields specified.
|
|
severity = error
|
|
|
|
[ARULE:TABLE_DATA_BAD_GIVEN_OR_IMPLIED_FIELD]
|
|
message = Table data had no/bad given field or implied field.
|
|
severity = error
|
|
|
|
[ARULE:TABLE_DATA_MISSING_GIVEN_OR_IMPLIED_FIELDS]
|
|
message = Table data missing given or implied fields.
|
|
severity = error
|
|
|
|
|
|
[ASSOCIATION_PROCESSOR]
|
|
name = Association Processor
|
|
|
|
[ASSOCIATION_PROCESSOR:BAD_FIELD_SIZE]
|
|
message = You cannot restrict the analysis to a single field.
|
|
severity = error
|
|
|
|
[ASSOCIATION_PROCESSOR:BAD_SUPPORT_COUNT]
|
|
message = The minimum support count (supcnt) must be > 0.
|
|
severity = error
|
|
|
|
[ASSOCIATION_PROCESSOR:COUNT_IS_ZERO]
|
|
message = APKeyInfo::isCandidateKey() was called with total_count of 0.
|
|
severity = error
|
|
|
|
[ASSOCIATION_PROCESSOR:NO_ASSOC_FOUND_1]
|
|
message = Found no associations. Consider decreasing the minimum support.
|
|
severity = info
|
|
|
|
[ASSOCIATION_PROCESSOR:NO_ASSOC_FOUND_2]
|
|
message = Found no associations. Consider decreasing the minimum support or the minimum entropy improvement.
|
|
severity = info
|
|
|
|
[ASSOCIATION_PROCESSOR:NO_EVENTS_OR_FIELDS]
|
|
message = Found no events or fields to analyze.
|
|
severity = debug
|
|
|
|
[ASSOCIATION_PROCESSOR:NO_FIELDS]
|
|
message = Found no events containing the specified fields.
|
|
severity = debug
|
|
|
|
|
|
[AST]
|
|
name = AST Optimizer
|
|
|
|
[AST:DATASET_EXPECT_JSON_OBJECT]
|
|
message = Expected a JSON object.
|
|
severity = error
|
|
|
|
[AST:DATASET_INVALID_ATTRIBUTE__S_S]
|
|
message = Dataset '%s' has invalid '%s' attribute.
|
|
severity = error
|
|
|
|
[AST:DATASET_MISSING_ATTRIBUTE__S_S]
|
|
message = Dataset '%s' has no '%s' attribute.
|
|
severity = error
|
|
|
|
[AST:EXPECT_ARRAY__S]
|
|
message = Expected an array of '%s' attribute.
|
|
severity = error
|
|
|
|
[AST:INVALID_JSON_DOCUMENT]
|
|
message = Expected a valid JSON document.
|
|
severity = error
|
|
|
|
[AST:MISSING_ATTRIBUTE__S]
|
|
message = Object has no '%s' attribute.
|
|
severity = error
|
|
|
|
[AST:MISSING_ATTRIBUTE_FOR__S_S]
|
|
message = %s has no '%s' attribute.
|
|
severity = error
|
|
|
|
[AST:INVALID_ATTRIBUTE__S_S]
|
|
message = Object has invalid '%s' attribute: %s
|
|
severity = error
|
|
|
|
[AST:INVALID_ATTRIBUTE_FOR__S_S_S]
|
|
message = %s has invalid '%s' attribute: %s
|
|
severity = error
|
|
|
|
[AST:NOT_PARSE_ONLY_MODE]
|
|
message = Expect parse_only mode.
|
|
severity = error
|
|
|
|
[AST:INVALID_NUM_SUBSEARCHES__D]
|
|
message = There is an invalid number of subsearches in this search command : %d
|
|
severity = error
|
|
|
|
|
|
[AUDIT]
|
|
name = Audit Event Generator
|
|
|
|
[AUDIT:START_OF_EVENT_DROPS]
|
|
message = Now skipping indexing of internal audit events, because the downstream queue is not accepting data. Will keep dropping events until data flow resumes.
|
|
action = Review system health: ensure downstream indexing and/or forwarding are operating correctly.
|
|
severity = warn
|
|
capabilities = admin_all_objects
|
|
|
|
|
|
[AUTH]
|
|
name = Authentication manager
|
|
|
|
[AUTH:GENERAL_LOGIN_FAILURE_MSG]
|
|
message = Login failed
|
|
severity = error
|
|
|
|
[AUTH:ACTION_FORBIDDEN]
|
|
message = Action forbidden.
|
|
severity = error
|
|
|
|
|
|
[AUTO_REGRESSION_PROCESSOR]
|
|
name = Auto Regression Processor
|
|
|
|
[AUTO_REGRESSION_PROCESSOR:FIELD_NAME_EXPECTED]
|
|
message = A field name was expected.
|
|
severity = error
|
|
|
|
[AUTO_REGRESSION_PROCESSOR:FIELD_NAME_EXPECTED_AFTER_KEYWORD__S]
|
|
message = A field name is expected after '%s'.
|
|
severity = error
|
|
|
|
[AUTO_REGRESSION_PROCESSOR:INVALID_FIELD_NAME__S]
|
|
message = The '%s' field name is invalid.
|
|
severity = error
|
|
|
|
[AUTO_REGRESSION_PROCESSOR:INVALID_P_END__S]
|
|
message = The '%s' value for the 'p' range end is invalid.
|
|
severity = error
|
|
|
|
[AUTO_REGRESSION_PROCESSOR:INVALID_P_RANGE_END__LU]
|
|
message = The range of 'p' values cannot exceed %lu.
|
|
severity = error
|
|
|
|
[AUTO_REGRESSION_PROCESSOR:INVALID_P_START__S]
|
|
message = The '%s' value for the 'p' range start is invalid.
|
|
severity = error
|
|
|
|
[AUTO_REGRESSION_PROCESSOR:INVALID_P_VALUE__S]
|
|
message = The value '%s' for 'p' is invalid.
|
|
severity = error
|
|
|
|
[AUTO_REGRESSION_PROCESSOR:NEW_FIELD_NAME_NOT_ALLOWED_FOR_RANGE]
|
|
message = You cannot specify new field name when you specify a range for 'p'.
|
|
severity = error
|
|
|
|
|
|
[BATCH_SEARCH]
|
|
name = Batch Search
|
|
|
|
[BATCH_SEARCH:TOO_MANY_EVENTS_IN_A_SECOND__D_D]
|
|
message = The search failed. More than %d events found at time %d.
|
|
severity = error
|
|
|
|
[BATCH_SEARCH:UNABLE_TO_CREATE_DIR__S]
|
|
message = The search failed. Unable to create directory %s.
|
|
severity = error
|
|
|
|
[BATCH_SEARCH:UNABLE_TO_READ_TEMP]
|
|
message = The search failed. Unable to read temp files on disk.
|
|
severity = error
|
|
|
|
[BATCH_SEARCH:UNABLE_TO_WRITE_TEMP]
|
|
message = The search failed. Unable to write temp files to disk.
|
|
severity = error
|
|
|
|
|
|
[BRANCHED_PROCESSOR]
|
|
name = Branched Processor
|
|
|
|
[BRANCHED_PROCESSOR:INVALID_POSTSEARCH__S_S]
|
|
message = Post-process searches cannot contain a generating command. The post-process search '%s' currently generates events.
|
|
action = Rewrite the post-process search so that it does not include generating commands in base search '%s'.
|
|
severity = error
|
|
capabilities = search
|
|
|
|
[BRANCHED_PROCESSOR:UNABLE_TO_CREATE_DIR]
|
|
message = Could not create branch subdirectory.
|
|
severity = error
|
|
|
|
[BRANCHED_PROCESSOR:BRANCH_EXCEPTION__S]
|
|
message = Caught an exception in branch: %s.
|
|
severity = error
|
|
|
|
|
|
[BUCKET_CACHE]
|
|
name = Bucket Cache
|
|
|
|
[BUCKET_CACHE:INDEXED_KV_LIMIT_REACHED__ZU]
|
|
message = The search you ran exceeded the indexed field extraction limit='%zu' while it was extracting fields. If the search references a field that is not part of the set of indexed fields that the search annotates to the results that it generates, those results might be incomplete or incorrect.
|
|
severity = info
|
|
action = To ensure that all fields are extracted for search, set limits.conf: [kv] / indexed_kv_limit to a number that is higher than the number of fields contained in the files that you index.
|
|
|
|
|
|
[BUNDLE_REPLICATION]
|
|
name = Bundle Replication
|
|
|
|
[BUNDLE_REPLICATION:RFS_UPLOAD_PROBLEM__S]
|
|
message = Problem replicating config (bundle) to Remote File Storage (RFS) location %s.
|
|
capabilities = edit_search_server
|
|
|
|
[BUNDLE_REPLICATION:UPLOAD_BUNDLE_DATA_PROBLEM__S_S]
|
|
message = Problem replicating config (bundle) to search peer ' %s ', %s.
|
|
capabilities = edit_search_server
|
|
|
|
[BUNDLE_REPLICATION:REPLICATION_ABORTED__S]
|
|
message = Bundle Replication aborted for search peer %s.
|
|
severity = error
|
|
action = Please check for relevant errors in splunkd.log in $SPLUNK_HOME/var/log/splunk.
|
|
capabilities = search
|
|
|
|
[BUNDLE_REPLICATION:REPLICATION_FAILED__S]
|
|
message = Bundle Replication failed for search peer %s.
|
|
severity = error
|
|
action = Please check for relevant errors in splunkd.log in $SPLUNK_HOME/var/log/splunk.
|
|
capabilities = search
|
|
|
|
[BUNDLE_REPLICATION:EXCEEDS_MAX_CONTENT_LENGTH__LD_S_LD]
|
|
message = Knowledge bundle upload size=%ld for searchhead=%s exceeds server.conf [httpServer] max_content_length=%ld. This may affect the performance of distributed search or lead to excessive disk usage on search peers. Consider removing large files from the knowledge bundle, especially any large lookup table files that change frequently.
|
|
severity = warn
|
|
capabilities = admin_all_objects
|
|
help = message.knowledgebundle.size
|
|
target = log
|
|
|
|
[BUNDLE_REPLICATION:BUNDLE_SIZE_EXCEEDS_WARN__LLD_LLD_LLD]
|
|
message = Knowledge bundle size=%lldMB exceeds %lld percentage of max limit=%lldMB. If knowledge bundle size keeps going up and hits the max limit, distributed searches will be running against an outdated knowledge bundle.
|
|
severity = warn
|
|
action = Please remove/disable files from knowledge bundle or increase maxBundleSize in distsearch.conf.
|
|
capabilities = edit_search_server
|
|
help = message.knowledgebundle.size
|
|
|
|
[BUNDLE_REPLICATION:BUNDLE_SIZE_EXCEEDS_LIMIT__LLD_LLD]
|
|
message = Knowledge bundle size=%lldMB exceeds max limit=%lldMB. Distributed searches are running against an outdated knowledge bundle.
|
|
severity = error
|
|
action = Please remove/disable files from knowledge bundle or increase maxBundleSize in distsearch.conf.
|
|
capabilities = edit_search_server
|
|
help = message.knowledgebundle.size
|
|
|
|
[BUNDLE_REPLICATION:BUNDLE_SIZE_EXCEEDS_HARD_LIMIT__LLD_LLD]
|
|
message = Knowledge bundle size=%lldMB exceeds hard limit=%lldMB.
|
|
severity = error
|
|
action = Please remove/disable files from knowledge bundle.
|
|
capabilities = edit_search_server
|
|
help = message.knowledgebundle.size
|
|
|
|
|
|
[CHUNK_PROCESSOR]
|
|
name = Chunk Processor
|
|
|
|
[CHUNK_PROCESSOR:INVALID_FIELD]
|
|
message = Invalid field.
|
|
severity = error
|
|
|
|
[CHUNK_PROCESSOR:USAGE]
|
|
message = Last argument must be a subsearch.
|
|
severity = error
|
|
|
|
|
|
[CHUNKED]
|
|
name = Chunked Extern Processor
|
|
|
|
[CHUNKED:INVALID_MAXCHUNKSIZE]
|
|
message = Invalid maxchunksize value in commands.conf: must be a non-negative integer.
|
|
severity = warn
|
|
|
|
[CHUNKED:INVALID_MAXWAIT]
|
|
message = Invalid maxwait value in commands.conf: must be a non-negative integer.
|
|
severity = warn
|
|
|
|
[CHUNKED:INVALID_PYTHON_VERSION__S_S]
|
|
message = The Python version '%s' for command '%s' in commands.conf is invalid.
|
|
severity = warn
|
|
|
|
[CHUNKED:INVALID_RESULT]
|
|
message = Invalid message received from external search command during search, see search.log.
|
|
severity = error
|
|
|
|
[CHUNKED:MERGE_ERROR__S]
|
|
message = Invalid '%s' from external search process: out of range or out of order.
|
|
severity = error
|
|
|
|
[CHUNKED:NON_ZERO_EXIT__D]
|
|
message = External search command exited unexpectedly with non-zero error code %d.
|
|
severity = error
|
|
|
|
[CHUNKED:SCRIPT_NOT_FOUND__S]
|
|
message = Could not find file: '%s'.
|
|
severity = error
|
|
|
|
[CHUNKED:SETUP_ERROR]
|
|
message = Invalid message received from external search command during setup, see search.log.
|
|
severity = error
|
|
|
|
[CHUNKED:UNEXPECTED_EXIT]
|
|
message = External search command exited unexpectedly.
|
|
severity = error
|
|
|
|
[CHUNKED:WRITE_ERROR]
|
|
message = Failed to send message to external search command, see search.log.
|
|
severity = error
|
|
|
|
|
|
[CLUSTER]
|
|
name = Event-Clustering Search Processor
|
|
|
|
[CLUSTER:BAD_DISTANCE_TYPE__S]
|
|
message = The distance type '%s' is invalid.
|
|
severity = error
|
|
|
|
[CLUSTER:BAD_FIELD]
|
|
message = Cluster number field name cannot be empty string.
|
|
severity = error
|
|
|
|
[CLUSTER:BAD_NUM_CLUSTERS__S]
|
|
message = The number of clusters (%s) is invalid.
|
|
severity = error
|
|
|
|
[CLUSTER:BAD_REPS_OR_MAXITER]
|
|
message = The reps or maxIter value is invalid.
|
|
severity = error
|
|
|
|
[CLUSTER:BAD_SEED]
|
|
message = Random seed must be > 0.
|
|
severity = error
|
|
|
|
[CLUSTER:BAD_THRESHOLD__F]
|
|
message = The threshold '%f' is invalid.
|
|
severity = error
|
|
|
|
[CLUSTER:KMEANS_FAIL]
|
|
message = K-means clustering failed.
|
|
severity = error
|
|
|
|
[CLUSTER:KMEANS_FAIL_FOR__U]
|
|
message = K-means clustering failed for k = %u.
|
|
severity = error
|
|
|
|
[CLUSTER:MAX_DATA_POINTS_REACHED__LU]
|
|
message = Reached maximum data points limit (%lu). Some events will be ignored. Edit limits.conf to change limits.
|
|
severity = warn
|
|
|
|
[CLUSTER:NO_ALL_NUMERICAL_FIELD_FOUND]
|
|
message = Found no completely numerical fields.
|
|
severity = warn
|
|
|
|
[CLUSTER:NO_RESULTS_WITH_ALL_NUMERICAL_FIELDS]
|
|
message = Found no results with all numerical values for the specified fields.
|
|
severity = warn
|
|
|
|
[CLUSTER:NOT_ENOUGH_RESULTS_FOR_CLUSTER_COUNT__U_U]
|
|
message = There aren't enough qualifying results (%u) for the specified number of clusters (%u).
|
|
severity = warn
|
|
|
|
|
|
[CO_FILTER]
|
|
name = CoFilter Processor
|
|
|
|
[CO_FILTER:INCORRECT_NUMBER_FIELDS]
|
|
message = Incorrect number of fields, expected two.
|
|
severity = error
|
|
|
|
[CO_FILTER:OUT_OF_MEMORY]
|
|
message = Out of memory.
|
|
severity = error
|
|
|
|
[CO_FILTER:SOME_FIELDS_ARE_MISSING]
|
|
message = Some expected fields are missing in input.
|
|
severity = error
|
|
|
|
|
|
[COMMON]
|
|
name = Common Value Processor
|
|
|
|
[COMMON:BAD_FIELD_NAMES]
|
|
message = The output count field cannot have the same name as the output percent field.
|
|
severity = error
|
|
|
|
[COMMON:BAD_OUT_MISSING_FIELDS]
|
|
message = Corrupt output from pretop or prerare (missing fields).
|
|
severity = error
|
|
|
|
[COMMON:BAD_OUT_MISSING_SPLIT]
|
|
message = Corrupt output from pretop or prerare (missing split by fields).
|
|
severity = error
|
|
|
|
[COMMON:BAD_OUT_ZERO_COUNT]
|
|
message = The pretop or prerare output is corrupt (zero count).
|
|
severity = error
|
|
|
|
[COMMON:CNT_FIELD_CONFLICT__S]
|
|
message = The output count field conflicts with the input field '%s'. Use the 'countfield' option to specify a different name.
|
|
severity = error
|
|
|
|
[COMMON:DUPLICATE_FIELD__S]
|
|
message = The '%s' field is specified multiple times.
|
|
severity = error
|
|
|
|
[COMMON:DUPLICATE_SPLIT_FIELD__S]
|
|
message = The split by field '%s' cannot be repeated.
|
|
severity = error
|
|
|
|
[COMMON:EMPTY_SPLITFIELDS]
|
|
message = Field(s) to split on are expected after the 'by' keyword.
|
|
severity = error
|
|
|
|
[COMMON:FAILED_TO_RENAME_FILE__S_S]
|
|
message = Failed to rename file '%s' to '%s'.
|
|
severity = warn
|
|
|
|
[COMMON:HIGH_LIMIT__LU]
|
|
message = The limit must be <= %lu.
|
|
severity = error
|
|
|
|
[COMMON:INTERMEDIATE_STORAGE_LIMIT_REACHED]
|
|
message = Reached the intermediate storage limit. Output might not be completely accurate. If necessary, edit limits.conf.
|
|
severity = warn
|
|
|
|
[COMMON:NO_FIELDS]
|
|
message = No fields were specified.
|
|
severity = error
|
|
|
|
[COMMON:PCT_FIELD_CONFLICT__S]
|
|
message = The output percent field conflicts with the input field '%s'. Use the 'percentfield' option to specify a different name.
|
|
severity = error
|
|
|
|
[COMMON:RESERVED_FIELD__S]
|
|
message = The '%s' field name is reserved for internal use.
|
|
severity = error
|
|
|
|
|
|
[CONCURRENCY]
|
|
name = Concurrency Processor
|
|
|
|
[CONCURRENCY:EVENTS_MISSING_FIELDS__LU]
|
|
message = %lu events were ignored due to missing or invalid start or duration fields.
|
|
severity = warn
|
|
|
|
[CONCURRENCY:INVALID_OUTPUT_FIELD]
|
|
message = The specified output field is invalid.
|
|
severity = error
|
|
|
|
[CONCURRENCY:INVALID_START_FIELD]
|
|
message = The specified start field is invalid.
|
|
severity = error
|
|
|
|
[CONCURRENCY:LIMIT_REACHED__LU]
|
|
message = Concurrency limit reached (%lu) for some events.
|
|
severity = warn
|
|
|
|
[CONCURRENCY:NO_VALID_DURATION_FIELD]
|
|
message = A valid duration field is required.
|
|
severity = error
|
|
|
|
|
|
[CONFIG_MULTI_KV]
|
|
name = Conf Multi KV
|
|
|
|
[CONFIG_MULTI_KV:MISSING_CONF_FILE]
|
|
message = Failed to find the multikv.conf configuration file.
|
|
severity = error
|
|
|
|
[CONFIG_MULTI_KV:MISSING_VALID_CONFIG]
|
|
message = Failed to find a valid configuration for multikv stanza =
|
|
severity = error
|
|
|
|
|
|
[CONTINGENCY]
|
|
name = Contingency Processor
|
|
|
|
[CONTINGENCY:BAD_OPT_VAL__S_LU]
|
|
message = The value of option '%s' must be <= %lu.
|
|
severity = error
|
|
|
|
[CONTINGENCY:BAD_ROW_COL_FIELDS]
|
|
message = You must specify valid and distinct row and column fields.
|
|
severity = error
|
|
|
|
[CONTINGENCY:BAD_ROW_COL_MIN_VALS]
|
|
message = The min row and col covers must be > 0.0 and <= 1.0.
|
|
severity = error
|
|
|
|
[CONTINGENCY:CORRUPT_TABLE_DATA__S]
|
|
message = Corrupt data from pre-ctable. The '%s' field is not numerical.
|
|
severity = error
|
|
|
|
[CONTINGENCY:INVALID_TOTALSTR__S]
|
|
message = The value of the 'totalstr' option must be a valid field name ('%s' is invalid).
|
|
severity = error
|
|
|
|
[CONTINGENCY:ROW_COL_NOT_FOUND__S_S]
|
|
message = Unable to find row '%s' and/or col '%s' fields in the results.
|
|
severity = warn
|
|
|
|
|
|
[CONTINUITY]
|
|
name = Continuity Processor
|
|
|
|
[CONTINUITY:APPENDING_EXTRANEOUS_EVENTS__LU]
|
|
message = Appending %lu extraneous events to the end.
|
|
severity = warn
|
|
|
|
[CONTINUITY:APPENDING_EXTRANEOUS_EVENTS_MAYBE_MISUSE__LU]
|
|
message = Appending %lu extraneous events to the end. Likely a result of makecontinuous command misuse.
|
|
severity = warn
|
|
|
|
[CONTINUITY:AVOIDGAPS_IGNORED_WITH_SPAN]
|
|
message = When you specify 'span', Splunk ignores 'avoidgaps'.
|
|
severity = info
|
|
|
|
[CONTINUITY:DUP_VALS_DETECTED__S]
|
|
message = Unexpected duplicate values in field '%s' have been detected.
|
|
severity = error
|
|
|
|
[CONTINUITY:INVALID_OPT_VAL__S_S]
|
|
message = The value for option %s is invalid: '%s'.
|
|
severity = error
|
|
|
|
[CONTINUITY:MULTI_FILE_UNSUPPORTED]
|
|
message = Multifile input is not supported. Operating on in-memory results only.
|
|
severity = error
|
|
|
|
[CONTINUITY:NUMERICAL_INSTABILITY]
|
|
message = There is a potential numerical stability issue with the given value range.
|
|
severity = error
|
|
|
|
[CONTINUITY:TOO_MANY_ROWS__LU]
|
|
message = The specified span would result in too many (>%lu) rows.
|
|
severity = error
|
|
|
|
[CONTINUITY:VAL_GT_VAL__S_S_S_S]
|
|
message = The %s value '%s' must be > the %s value '%s'.
|
|
severity = error
|
|
|
|
|
|
[CONVERSION]
|
|
name = Conversion Processor
|
|
|
|
[CONVERSION:FILL_PATTERN_FAIL__S_S_S]
|
|
message = Could not fill the rename pattern '%s' using the field '%s' (matched wildcard '%s').
|
|
severity = error
|
|
|
|
[CONVERSION:INVALID_ARGUMENT__S]
|
|
message = The argument '%s' is invalid.
|
|
severity = error
|
|
|
|
[CONVERSION:INVALID_CONVERSION_SPECIFIER]
|
|
message = The conversion specifier is invalid. It must be convert_type(key).
|
|
severity = error
|
|
|
|
[CONVERSION:INVALID_CONVERSION_TYPE__S]
|
|
message = The conversion type '%s' is invalid.
|
|
severity = error
|
|
|
|
[CONVERSION:INVALID_FIELD_NAME__S]
|
|
message = The field name '%s' is invalid.
|
|
severity = error
|
|
|
|
[CONVERSION:INVALID_TIME_FORMAT__S]
|
|
message = The time format '%s' is invalid.
|
|
severity = error
|
|
|
|
[CONVERSION:INVALID_WILDCARD__S]
|
|
message = The wildcard specifier '%s' is invalid. It contains consecutive '*' chars.
|
|
severity = error
|
|
|
|
[CONVERSION:MULTIPLE_FIELD_SPEC__S]
|
|
message = The field '%s' is specified multiple times. It should be specified no more than once.
|
|
severity = error
|
|
|
|
[CONVERSION:WILDCARD_MISMATCH]
|
|
message = There is a wildcards mismatch between the key specifier and the rename specifier.
|
|
severity = error
|
|
|
|
|
|
[COPY_RESULTS_PROCESSOR]
|
|
name = Copy Results Processor
|
|
|
|
[COPY_RESULTS_PROCESSOR:APP_PERMISSION_DENIED__S]
|
|
message = Permission denied. You do not have sufficient privileges to write to the '%s' application.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:CANNOT_FIND_JOB__S]
|
|
message = Cannot find a job with the search_id '%s'.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:CANNOT_FIND_RESULTS__S]
|
|
message = Cannot find results for search_id '%s'.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:CANNOT_RENAME_TEMP_FILE__S_S]
|
|
message = Could not rename temporary file '%s' to '%s'.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:CANNOT_RENAME_TEMP_FILE_WIN__S_S_S]
|
|
message = Could not rename temporary file '%s' to '%s'. Error: '%s'.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:COPY_FAILED__S_S]
|
|
message = Failed to copy results of search_id '%s' to path '%s'.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:EMPTY_SEARCH_ID]
|
|
message = You must provide a search id.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:INVALID_APPLICATION__S]
|
|
message = The destination application '%s' does not exist.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:NO_DESTINATION_FILE]
|
|
message = No destination file is provided.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:PERMISSION_DENIED__S]
|
|
message = Permission denied. Cannot access contents of job with search_id '%s'.
|
|
severity = error
|
|
|
|
[COPY_RESULTS_PROCESSOR:SID_INVALID_CHAR__S]
|
|
message = Job_id '%s' is invalid. Valid IDs are not reserved filenames, do not start with '.', and contain only letters, numbers, or the following characters: '_ .!#$%%&'()+,-;=@[]^`{}~'.
|
|
severity = error
|
|
|
|
|
|
[CORRELATION]
|
|
name = Correlation Processor
|
|
|
|
[CORRELATION:INVALID_CORR_TYPE__S]
|
|
message = The correlation type '%s' is invalid.
|
|
severity = error
|
|
|
|
[CORRELATION:MULTIPLE_FIELD_SPEC__S]
|
|
message = The field '%s' is specified multiple times. It should only be specified once.
|
|
severity = error
|
|
|
|
[CORRELATION:NO_QUALIFYING_FIELDS]
|
|
message = No qualifying fields exist in the data.
|
|
severity = warn
|
|
|
|
[CORRELATION:SINGLE_FIELD]
|
|
message = Splunk cannot create a correlation based on a single field. You must provide at least one other field.
|
|
severity = error
|
|
|
|
|
|
[CURSOREDSEARCH]
|
|
name = Cursored Search
|
|
|
|
[CURSOREDSEARCH:SUBSECOND_ORDER]
|
|
message = Events might not be returned in sub-second order due to search memory limits. See search.log for more information.
|
|
action = Increase the value of the following limits.conf setting:[search]:max_rawsize_perchunk.
|
|
severity = error
|
|
capabilities = search
|
|
|
|
|
|
[ADAPTIVESEARCH]
|
|
name = Adaptive Search
|
|
|
|
[ADAPTIVESEARCH:AST_TRANSLATION_ERROR]
|
|
message = Unable to convert the original search to ast-doc.
|
|
|
|
[ADAPTIVESEARCH:AST_OPTIMIZE_ERROR]
|
|
message = Unable to optimize the ast search.
|
|
|
|
[ADAPTIVESEARCH:FALLBACK_TO_LEGACY_SEARCH__S]
|
|
message = Fallback to legacy Splunk search. Reason='%s'
|
|
|
|
[DATA_FEDERATION]
|
|
name = Data Federation
|
|
|
|
[DATA_FEDERATION:PROVIDER_TYPE_INVALID__S]
|
|
message = Federated provider '%s' type is invalid.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_STANDARD_MISCONFIGURED_ERROR__S]
|
|
message = Federated provider '%s' is misconfigured. 'useFSHKnowledgeObjects' must be set to '0' or 'false' for a standard mode provider. Automatically setting 'useFSHKnowledgeObjects' to 'false'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_TRANSPARENT_MISCONFIGURED_ERROR__S]
|
|
message = Federated provider '%s' is misconfigured. 'useFSHKnowledgeObjects' must be set to '1' or 'true' for a transparent mode provider. Automatically setting 'useFSHKnowledgeObjects' to 'true'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_STANDARD_MISCONFIGURED_EXCEPTION__S]
|
|
message = Federated provider '%s' is misconfigured. 'useFSHKnowledgeObjects' must be set to '0' or 'false' for a standard mode provider.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_TRANSPARENT_MISCONFIGURED_EXCEPTION__S]
|
|
message = Federated provider '%s' is misconfigured. 'useFSHKnowledgeObjects' must be set to '1' or 'true' for a transparent mode provider.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_MISSING_FIELD__S_S]
|
|
message = Required field '%s' is missing in the federated provider '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_NOT_DEFINED__S]
|
|
message = The federated provider '%s' is either disabled or not defined.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_NOT_DEFINED_INDEX__S_S]
|
|
message = The federated provider '%s' is incorrectly defined for federated index '%s'.
|
|
action = Ask your admin to review the federated index configuration in server.conf and correct any errors they find.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_HOST_INVALID__S_S]
|
|
message = Host '%s' in federated provider '%s' is invalid.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_PORT_INVALID__S_S]
|
|
message = Port '%s'in federated provider '%s' is invalid.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:MISSING_ARG__S]
|
|
message = This federated provider requires the '%s' argument.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILED_INDEX_EXTRACTION]
|
|
message = Failed to extract federated indexes.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:EXCEED_ALLOWED_INDEX_NUM]
|
|
message = Search command can only accept one federated index.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:VERBOSE_MODE_DEACTIVATED_STANDARD]
|
|
message = The search is cancelled because verbose mode is deactivated for federated search.
|
|
action = Run the federated search in fast mode.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:VERBOSE_MODE_DEACTIVATED_TRANSPARENT]
|
|
message = The search was not sent to the federated provider because verbose mode is deactivated for federated search. Only the local portion of the search has been processed.
|
|
action = Run the federated search in fast mode to process both its remote and local sides.
|
|
severity = warn
|
|
|
|
[DATA_FEDERATION:FAILED_EXTRACT_DEPLOYMENT__S]
|
|
message = Failed to extract deployment information for federated index '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:MSTATS_FROM_CLAUSE]
|
|
message = Unexpected syntax. The mstats command does not support the FROM clause.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILED_EXTRACT_REMOTE_DEPLOYMENT__S]
|
|
message = Failed to extract remote information for data model dataset '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILED_EXTRACT_TSTATS_DEPLOYMENT__S]
|
|
message = Failed to extract tstats deployment information for federated index '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILED_REMOTE_INDEX_EXTRACTION__S]
|
|
message = Failed to extract the remote federated index '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:DM_SS_NOT_SUPPORTED__S]
|
|
message = Invalid dataset type specified for federated index '%s'. In federated searches, the 'search' command supports only the index dataset type.
|
|
action = For federated searches, use the 'from' command to reference saved search datasets.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FROM_INVALID_DATASET__S]
|
|
message = Invalid dataset type specified for federated index '%s'. In federated searches, the 'from' command supports only the saved search dataset type.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:DM_SS_FAILED_REWRITE__S]
|
|
message = During the federated search process, the system failed to convert the 'from' dataset for federated index '%s'.
|
|
action = When you use the 'from' command for a federated search, ensure that it is referencing a valid dataset type.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNKNOWN_DATASET_TYPE]
|
|
message = Unknown federated dataset type.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNSUPPORTED_FEDERATED_SEARCH__S]
|
|
message = This federated search is not currently supported. search='%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_FEDERATED_INDEX_NAME__S]
|
|
message = Invalid federated index name. name='%s'
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_FEDERATED_PROVIDER__S]
|
|
message = Invalid federated provider. provider='%s'
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:SHOULD_NOT_CHANGE_PROVIDER]
|
|
message = You cannot edit the name of a federated provider.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_DATA_SET__S]
|
|
message = Invalid dataset. dataset='%s'
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:SHOULD_NOT_CHANGE_DATASET]
|
|
message = You cannot edit the 'federated.dataset' value for a federated index after the federated index has been created.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:SDS_FEATURE_FLAG_DISABLED]
|
|
message = Federated Search for Amazon S3 is disabled.
|
|
action = To create a federated index with a federated provider of the 'aws_s3' type, enable Federated Search for Amazon S3 by setting 'enabled=true' within the 'structured_data_service' stanza in limits.conf.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:SDS_FEATURE_FLAG_DISABLED_PROVIDER]
|
|
message = Federated Search for Amazon S3 is disabled.
|
|
action = Enable Federated Search for Amazon S3 by setting 'enabled=true' within the 'structured_data_service' stanza in limits.conf.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_AWS_S3_DATA_SET_TYPE__S]
|
|
message = Invalid dataset type for dataset='%s'.
|
|
action = Provide a dataset type of 'aws_glue_table'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:SHOULD_NOT_CHOOSE_AWS_GLUE_TABLE_DATA_SET_TYPE]
|
|
message = You cannot select 'aws_glue_table' as the federated index dataset type without also setting the 'database' and 'aws_glue_tables_allowlist' for the related federated provider.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_AWS_GLUE_TABLE_DATA_SET_TABLE_NAME__S]
|
|
message = Invalid AWS Glue table='%s'. The table must be listed on the 'aws_glue_tables_allowlist' for this federated provider.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNSUPPORTED_CHAR_IN_AWS_GLUE_TABLE__S]
|
|
message = The AWS Glue table '%s' has an unsupported character in its name. Valid AWS Glue table names contain only lowercase letters, numbers, hyphens, underscores, and wildcards.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_VALUE_IN_ACS_API_ENV__S]
|
|
message = The server.acs_api.environment value '%s' in server.conf is invalid. Valid values include 'prod' or 'staging'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNSUPPORTED_CHAR_IN_ACS_API_VERS__S]
|
|
message = The server.acs_api.version value '%s' in server.conf has an unsupported character. Valid values contain only uppercase letters, lowercase letters and numbers.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_PATH_PREFIX__S]
|
|
message = Invalid path: "%s". AWS S3 location path URIs must start with "s3://".
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_KMS_KEY_ARN_PREFIX__S]
|
|
message = Invalid AWS KMS key ARN: "%s". The format of an AWS KMS key ARN is as follows: "arn:<aws_partition>:kms:<region>:<account-id>:key/<key-id>".
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:EMPTY_PATH]
|
|
message = Invalid path. AWS S3 location path URIs cannot be empty.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:WILDCARDS_IN_MIDDLE_OF_THE_PATH__S]
|
|
message = Invalid path: "%s". The AWS S3 location path can contain wildcards only at the end of the path.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:MISSING_TIMEFIELD]
|
|
message = A time field is required for this federated index.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_TIME_FIELD_EMPTY]
|
|
message = The 'Time Field' cannot be empty.
|
|
action = Give the 'Time field' a value. If you do not want to provide a 'Time field', select 'Time Settings not required'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_UNIX_TIME_FIELD_EMPTY]
|
|
message = The UNIX time field cannot be empty.
|
|
action = When time field is given, the UNIX time field must not be empty and different from the time field.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:MISSING_TIMEFORMAT]
|
|
message = A time format string is required for the time field associated with this federated index.
|
|
action = Provide a valid time format string, in strptime() format.
|
|
help = sds.time.format
|
|
|
|
[DATA_FEDERATION:INVALID_TIMEFORMAT]
|
|
message = Time format string is invalid.
|
|
action = Provide a valid time format string, in strptime() format.
|
|
help = sds.time.format
|
|
|
|
[DATA_FEDERATION:MISSING_PARTITION_TIME_ARG__S]
|
|
message = Partition time setting '%s' is missing.
|
|
action = Specify a time field name, time format, and field type for each partition time level you define.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNIX_TIME_FIELD_SAME_AS_TIME_FIELD__S]
|
|
message = The 'Unix time field' '%s' is same as 'Time field' value.
|
|
action = Change the value of the 'Unix time field' so it does not match the value of the 'Time field'. If the 'Time field' is set to '_time', the 'Unix time field' must be a unique field name that is not shared by any other fields in the dataset to which the federated index maps.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PARTITION_SETTINGS_LENGTH]
|
|
message = Each partition time setting level must contain a time field, time format, and time field type.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_PARTITION_TIME_FIELD_EMPTY]
|
|
message = Partition time field cannot be empty.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_PARTITION_TIME_FORMAT_EMPTY]
|
|
message = Partition time format cannot be empty.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_PARTITION_TIME_TYPE_EMPTY]
|
|
message = Partition time type cannot be empty.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_PARTITION_TIME_FORMAT__S]
|
|
message = Partition time format '%s' is invalid.
|
|
action = Provide a valid time format string, using Splunk-supported strptime() time format variables.
|
|
help = sds.time.format
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_PARTITION_TIME_TYPE__S]
|
|
message = Partition time field type '%s' is not allowed.
|
|
action = Set the type to either string, integer, or date.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNSUPPORTED_ARGUMENT__S_S]
|
|
message = The argument '%s' is invalid for a federated provider with a type of '%s'. Provide a valid argument.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNSUPPORTED_ARGUMENT_FOR_DATASET__S_S]
|
|
message = The argument '%s' is invalid for dataset '%s'.
|
|
action = To support this argument, change the dataset type to 'aws_s3_path'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNSUPPORTED_ARGUMENT_FOR_AWS_S3_PATH_DATASET__S]
|
|
message = The argument '%s' is invalid for 'aws_s3_path' type dataset.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:S3_PATH_NOT_ALLOWED__S]
|
|
message = The aws_s3_path '%s' is not allowed.
|
|
action = Provide an Amazon S3 path that is listed on the federated provider's 'aws_s3_paths_allowlist' or is a subpath of one of the 'aws_s3_paths_allowlist' paths.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:RESOURCE_SHARE_MISSING_EXTERNALID__S]
|
|
message = Invalid resource share name: "%s". The AWS provided resource share name must be suffixed with the provided external ID.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_RESOURCE_SHARE_ARN__S]
|
|
message = Invalid resource share ARN: "%s". The format of an AWS resource share ARN is as follows: "arn:<aws_partition>:ram:<region>:<account-id>:resource-share/<resource-share-id>".
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_RESOURCE_SHARE_REGION__S]
|
|
message = Invalid region found in resource share ARN: "%s". The resource share must be located in the same region as your Splunk instance.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:ACCOUNT_REGION_IN_USE__S_S]
|
|
message = A federated provider with account ID '%s' in region '%s' already exists.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:EDIT_RESOURCE_SHARE_MATCH]
|
|
message = For updating resources in a Lake Provider, the same resource share information as originally input must be used.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:WILDCARDS_EXPANSION_FAILED]
|
|
message = Wildcard failed to expand a federated index.
|
|
severity = error
|
|
action = Consult the Job Inspector for this search and review the information at the search.job link for possible solutions to this issue. For further assistance resolving the failure, contact Customer Support, or, if you have a support contract, file a new case using the Splunk Support Portal.
|
|
|
|
[DATA_FEDERATION:WILDCARDS_UNEXPECTED_INDEX__S]
|
|
message = Found unexpected index='%s' while matching federated index names to a wildcard.
|
|
severity = error
|
|
action = Rewrite your search so that the wildcard returns only names of federated indexes. If the problem persists, contact Customer Support, or, if you have a support contract, file a new case using the Splunk Support Portal.
|
|
|
|
[DATA_FEDERATION:PROVIDER_DETAILS_MISSING__S]
|
|
message = Unable to obtain details for federated provider='%s'.
|
|
severity = error
|
|
action = Review the federated indexes you are using in this search and verify that their federated providers exist.
|
|
|
|
[DATA_FEDERATION:INVALID_FILE_TYPE__S]
|
|
message = File type '%s' is invalid. Valid file types are 'CSV', 'JSON' and 'Parquet'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_COMPRESSION_TYPE__S]
|
|
message = Compression type '%s' is invalid. Valid compression types are 'GZIP', 'BZIP2' and 'NONE'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_COMPRESSION_TYPE_FOR_PARQUET__S]
|
|
message = Compression type '%s' is invalid. Valid compression type for 'Parquet' file type is 'NONE'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_FEDERATED_TYPE__S]
|
|
message = Invalid value for federated provider type. provider='%s'
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:NOT_ALLOWED_SPECIAL_CHARACTER_IN_SEARCH__S]
|
|
message = The federated index name '%s' contains invalid special characters.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:NOT_ALLOWED_SPECIAL_CHARACTER_IN_TSTATS__S]
|
|
message = The federated index name '%s' used in the tstats command contains invalid special characters.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FEDERATED_DEPLOYMENT_DOES_NOT_HAVE_FSH_BUNDLE__S_S]
|
|
message = Federated provider '%s' does not yet have the knowledge object for '%s'.
|
|
action = Try your search again in a moment. If the error persists contact Customer Support, or, if you have a support contract, file a new case using the Splunk Support Portal.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FEDERATED_DEPLOYMENT_DOES_NOT_SUPPORT_ASYNC_MODE]
|
|
message = Federated providers support only sync rest call.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_IN_USE__S_D]
|
|
message = Federated provider '%s' is already referenced by %d index(es).
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_IN_USE_MODE_CHANGE__S]
|
|
message = The federated provider '%s' cannot be set to transparent mode because it is currently referenced by a federated index.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_HOST_PORT_INVALID__S_S]
|
|
message = Host/Port '%s' in federated provider '%s' is invalid.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_HOST_IP_DISALLOWED__S_S]
|
|
message = Host IP '%s' in federated provider '%s' is not allowed.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDER_INVALID_BOOL__S_S]
|
|
message = Field '%s' in federated provider '%s' is invalid.
|
|
action = Correct values can be 0|1 or false|true
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILURE_TO_RETRIEVE_STACK_REGION]
|
|
message = Unable to retrieve the AWS region in which your Splunk Cloud Platform environment is running.
|
|
action = If you know the AWS region of your Splunk Cloud Platform deployment you can enter it manually. Otherwise, contact Customer Support, or, if you have a support contract, file a new case using the Splunk Support Portal.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILURE_TO_RETRIEVE_STACK_DETAILS]
|
|
message = Unable to retrieve the AWS stack details for your Splunk Cloud Platform environment.
|
|
action = Confirm that your Splunk Cloud Platform deployment is on AWS. Then, contact Customer Support, or, if you have a support contract, file a new case using the Splunk Support Portal.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILURE_TO_DETERMINE_REGION_FROM_DATACATALOG_FOR_PROVIDER__S]
|
|
message = Unable to retrieve region from data catalog for provider: '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILURE_TO_DETERMINE_ACCOUNT_ID_FROM_DATACATALOG_FOR_PROVIDER__S]
|
|
message = Unable to retrieve the account ID from the data catalog for provider: '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:EXTERNAL_ID_FS_EMPTY]
|
|
message = The external ID for federated indexes is currently not set.
|
|
action = To ensure the External ID is set properly, contact Splunk support.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:EXTERNAL_ID_INGEST_EMPTY]
|
|
message = The external ID for quick indexes is currently not set.
|
|
action = To ensure the External ID is set properly, contact Splunk support.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:UNEXPECTED_STACK_ARN_FORMAT__S]
|
|
message = Unexpected format for AWS stack ARN:'%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILURE_TO_SET_POLICY]
|
|
message = Unable to apply updated deployment permissions to your Splunk Cloud Platform environment.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILURE_TO_GET_POLICY]
|
|
message = Unable to retrieve the deployment permissions applied to your Splunk Cloud Platform environment.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:TOKEN_AUTH_DISABLED]
|
|
message = You must enable token authentication so Splunk software can apply updated deployment permissions to your Splunk Cloud Platform environment.
|
|
action = Go to Settings > Tokens and enable token authentication. Then go to Settings > Federated search and select Update Permissions.
|
|
severity = error
|
|
help = sds.authtoken.enable
|
|
|
|
[DATA_FEDERATION:FAILURE_IN_PASSWORD_ENCRYPTION__S]
|
|
message = Failed to save password for remote provider: '%s'.
|
|
action = Try again and if the problem continues contact Splunk support.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILURE_IN_PASSWORD_EXTRACTION__S]
|
|
message = Unable to obtain password information for federated provider: '%s'.
|
|
action = Select 'Test Connection' to validate the password for the federated provider.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FAILURE_TO_CREATE_AST_FROM_SEARCH__S]
|
|
message = This federated search '%s' contains SPL that is currently unsupported.
|
|
action = Review the documentation on writing and running federated searches.
|
|
severity = error
|
|
help = learnmore.run.federated.search
|
|
|
|
[DATA_FEDERATION:PROVIDER_EMPTY_STRING__S_S]
|
|
message = Field '%s' in the federated provider '%s' requires a value.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:REMOTE_QUERY_GENERATION_ERROR__S]
|
|
message = Failed to create remote query for the federated search. Reason='%s'
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:PROVIDERS_EXTRACTED_INFO__LLU]
|
|
message = Number of involved federated deployments is %llu.
|
|
severity = info
|
|
|
|
[DATA_FEDERATION:FED_INDEX_NOT_AUTHORIZED__S_S]
|
|
message = User '%s' does not have access to index '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:INVALID_PROVIDER_MODE__S_S]
|
|
message = The 'mode' value of '%s' for federated provider '%s' is invalid.
|
|
severity = error
|
|
action = Give 'mode' a value of either 'standard' or 'transparent'.
|
|
|
|
[DATA_FEDERATION:IGNORE_FED_REMOTE_DATASET__S]
|
|
message = The federated index '%s' is mapped to a federated provider that is in 'transparent' mode. The remote dataset that this federated index maps to will be ignored.
|
|
severity = warn
|
|
|
|
[DATA_FEDERATION:REMOTE_SERVICE_ACC_CAP_NOT_SET]
|
|
message = The 'FSH_MANAGE' capability is not set on the federated provider service account. This capability is required when 'Local Knowledge Objects' is enabled for the provider.
|
|
severity = error
|
|
action = Enable the 'FSH_MANAGE' capability on the service account of the federated provider.
|
|
|
|
[DATA_FEDERATION:FED_INDEX_NOT_FOUND__S]
|
|
message = Federated index '%s' was not found.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FED_INDEX_ALREADY_EXISTS__S]
|
|
message = Federated index '%s' already exists.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FEDERATED_PROVIDER_IGNORED_HB_FAILED__S]
|
|
message = Not sending search to federated provider '%s' because the provider cannot be reached.
|
|
action = Verify validity of the federated provider ip:hostname.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:TRANSPARENT_PROVIDERS_USE_SAME_PROVIDER__S_S_S_S]
|
|
message = Transparent mode federated providers '%s' and '%s' point to the same host or cluster '%s'. For this search, using only '%s'.
|
|
action = Delete one of these two providers.
|
|
severity = warn
|
|
|
|
[DATA_FEDERATION:FEDERATED_PROVIDER_HB_INTERVAL_ERROR__S_S]
|
|
message = heartbeatInterval value '%s' should be greater than '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:CONTROL_COMMANDS_MAX_THREADS_ERROR__S_S]
|
|
message = The maximum number of search process threads for a federated search action (such as a search pause or search cancellation) on federated provider '%s' should be greater than '%s'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:CONTROL_COMMANDS_MAX_TIME_THRESHOLD_ERROR__S_S]
|
|
message = message = The supplied maximum timeout threshold '%s' for a federated search action (such as a search pause cancellation) must be equal to or greater than the default of '%s' seconds.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:TRANSPARENT_PROVIDER_CANT_SET_SPECIAL_APP__S_S]
|
|
message = Transparent remote provider '%s' cannot have a dedicated 'appContext' '%s.
|
|
action = Do not pass an appContext arguement for a transparent remote provider through rest API or federated.conf
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:CONNECTIVITY_HB_FAILURES_THRESHOLD__S_LU]
|
|
message = connectivityFailuresThreshold value '%s' should be greater than '%lu'.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:REMOTE_PROVIDER_CAN_NOT_LOOP_BACK_TO_ITSELF__S]
|
|
message = Remote provider '%s' points to the federated search head.
|
|
action = Change the ip:host of this remote provider.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FEDERATED_SEARCH_STANZA_MISSING__%s]
|
|
message = Cannot locate a 'federated_search' stanza in server.conf. Cannot change '%s' setting.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FEDERATED_SEARCH_VERBOSE_STANZA_MISSING__%s]
|
|
message = Cannot locate a 'general' stanza in federated.conf. Cannot change '%s' setting.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:TRANSPARENT_MODE_SETTING_VALUE_FORMAT__S]
|
|
message = Transparent mode setting '%s' is invalid.
|
|
severity = error
|
|
action = Give 'transparent_mode' a value of either 'true' or 'false'.
|
|
|
|
[DATA_FEDERATION:FEDERATED_GENERAL_SETTINGS_MISSING]
|
|
message = Cannot find federated general settings from configuration.
|
|
severity = error
|
|
action = Try again and if the problem continues contact Splunk support.
|
|
|
|
[DATA_FEDERATION:LOOKUP_COMMAND_BEFORE_TSTATS_GENERATING_COMMAND]
|
|
message = A 'lookup' command has been discovered before a 'tstats' command.
|
|
severity = warn
|
|
action = Review the search and ensure the lookup is placed after the last generating command.
|
|
|
|
[DATA_FEDERATION:SEARCH_FORCED_TO_RUN_LOCALLY__S]
|
|
message = The search '%s' contains commands that force it to be processed only on your local Splunk deployment. This search is not processed on remote federated providers.
|
|
severity = warn
|
|
|
|
[DATA_FEDERATION:FEDERATED_SEARCH_UNSUPPORTED_CMD_STANDARD_MODE__S]
|
|
message = The '%s' command is not supported by standard mode federated searches.
|
|
action = Rewrite your search so that it does not use this command, or remove federated index references from the search so that the search is not federated.
|
|
severity = error
|
|
|
|
[DATA_FEDERATION:FEDERATED_SEARCH_UNSUPPORTED_CMD_TRANSPARENT_MODE__S]
|
|
message = The '%s' command is not supported by transparent mode federated searches and it can't run on any of the remote providers.
|
|
severity = warn
|
|
|
|
[DATA_FEDERATION:FEDERATED_SEARCH_UNSUPPORTED_CMD_OLD_VERSION_TRANSPARENT_MODE__S_S_S]
|
|
message = The '%s' command is not supported by transparent mode federated searches on these remote providers with lower versions: '%s'.
|
|
action = To make it work, upgrade the federated providers to higher versions with full support. %s
|
|
severity = warn
|
|
|
|
|
|
[DATA_MODEL_HANDLER]
|
|
name = Data Model Handler
|
|
|
|
[DATA_MODEL_HANDLER:BAD_EVAL_OUTPUT_FIELD]
|
|
message = Cannot have multiple output fields for Eval.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:BASE_SEARCH_EMPTY]
|
|
message = Cannot have empty base search for root search dataset.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:CANNOT_ACCELERATE__S]
|
|
message = Cannot accelerate data model '%s'. Only root event or streaming-based root search can be accelerated.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:CYCLE_DETECTED__S_S]
|
|
message = Detected a cyclic dependency while creating datamodel=%s in objectName=%s.
|
|
action = Locate and remove the cyclic dependency, then reattempt the operation.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:DUPLICATE_FIELD__S_S]
|
|
message = Cannot add field '%s' because it already exists in dataset '%s'.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:DUPLICATE_OBJECT_NAME__S]
|
|
message = Cannot add dataset '%s' because a dataset with that ID already exists.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:EMPTY_EXPRESSION__S]
|
|
message = Cannot have empty expression for %s.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:EMPTY_FIELD_NAME]
|
|
message = Invalid field name.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:EMPTY_INPUT_FIELD__S]
|
|
message = Cannot have empty input field for %s.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:EMPTY_LOOKUP_NAME]
|
|
message = Cannot have empty lookup name.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:EMPTY_OUTPUT_FIELD__S]
|
|
message = Cannot have empty output field for %s.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:FAILED_TO_LOAD__S]
|
|
message = Error loading data model '%s'
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:FIELD_INFO_ALREADY_EXISTS]
|
|
message = Fields and field_coverage arguments can not be used together.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:FIELD_NOT_FOUND__S_S]
|
|
message = The dataset '%s' has no field '%s'.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:INVALID_ACCELERATION_PARAMETER__S_S]
|
|
message = Acceleration setting for data model '%s.%s' is invalid.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:INVALID_FIELD_COVERAGE]
|
|
message = Invalid field_coverage value.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:INVALID_FIELD_TYPE]
|
|
message = Invalid field type.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:INVALID_GEOIP_FIELD__S]
|
|
message = Invalid GeoIP output field '%s'.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:INVALID_JSON__S]
|
|
message = JSON for data model '%s' is invalid.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:MODEL_ALREADY_EXISTS__S]
|
|
message = A data model with the name '%s' already exists.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:MISSING_KEY__S]
|
|
message = Key '%s' was missing from JSON document.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:MISSING_PARENT__S]
|
|
message = Could not load parent dataset '%s'.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:NO_OBJECTS__S]
|
|
message = Cannot accelerate data model with no datasets: '%s'.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:NOT_FOUND__S]
|
|
message = Data model '%s' was not found.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:NULL_CHILD__S]
|
|
message = Child dataset of parent '%s' is null.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:PIPE_IN_CONSTRAINT]
|
|
message = Dataset constraints cannot contain pipes.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:SPACE_IN_OUTPUT_FIELD__S]
|
|
message = Output field names in %s cannot contain spaces.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:TRANSACTION_INVALID_GROUPBY_FIELD]
|
|
message = This root transaction dataset includes one or more invalid Group by fields.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:TRANSACTION_MISSING_GROUPBY_OBJECT__S]
|
|
message = Could not load dataset '%s' in group.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:TRANSACTION_MISSING_GROUPBY_SPAN_PAUSE]
|
|
message = Root transaction datasets require at least one Group by field, Max Pause value, or Max Span value.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:TRANSACTION_MISSING_OBJECTS_TO_GROUP]
|
|
message = Root transaction datasets require at least one dataset to group.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:TRANSACTION_MULTIPLE_OBJECTS_TO_GROUP_NOT_BASE_EVENT]
|
|
message = Transacting over multiple datasets requires all datasets to group be of type BaseEvent.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:RESERVED_OBJECT_NAME__S]
|
|
message = Dataset name '%s' is reserved. Choose another.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:SUBSEARCH_IN_CONSTRAINT]
|
|
message = Dataset constraints cannot contain subsearches.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:UNDEFINED_OBJECT_TYPE__S_S]
|
|
message = JSON for data model '%s' had bad 'parentName' value '%s'.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:EXPLICIT_INDEX_SCOPE]
|
|
message = Dataset constraints must specify at least one index.
|
|
severity = error
|
|
|
|
[DATA_MODEL_HANDLER:ACCELERATION_STORE_VALUE_INVALID__S_S_S]
|
|
message = The summary type '%s' is invalid. Valid summary type values are '%s' or '%s'.
|
|
severity = error
|
|
|
|
[SEARCHPROC:FOUND_CYCLIC_DEPENDENCY_WHEN_EXPANDING__S]
|
|
message = Detected a cyclic dependency between datasets when attempting to run knowledge object '%s'.
|
|
action = Locate and remove the cyclic dependency, then reattempt usage of the knowledge object.
|
|
severity = error
|
|
|
|
|
|
[DEDUP]
|
|
name = Dedup Operator
|
|
|
|
[DEDUP:EMPTY_SORTBY_FIELD]
|
|
message = You must provide a sortby field name.
|
|
severity = error
|
|
|
|
[DEDUP:INVALID_LIMIT]
|
|
message = The limit option is invalid. It must be >= 1.
|
|
severity = error
|
|
|
|
[DEDUP:MISSING_FIELDS]
|
|
message = At least one field must be given as an argument.
|
|
severity = error
|
|
|
|
[DEDUP:MULTIPLE_FIELD_SPEC__S]
|
|
message = The field '%s' is specified multiple times. It should only be specified once.
|
|
severity = error
|
|
|
|
[DEDUP:SORTBY_NOT_FOR_PREDEDUP]
|
|
message = The sortby clause is not for prededup.
|
|
severity = error
|
|
|
|
|
|
[DEPLOYMENT_SERVER]
|
|
name = Deployment Server
|
|
|
|
[DEPLOYMENT_SERVER:CANNOT_ACQUIRE_LOCK_FILE__S]
|
|
message = Failed to reload. This deployment server cannot acquire the lock file '%s'. Another deployment server is likely also currently processing a reload.
|
|
action = Wait for the other deployment server to complete its reload, and try again. However, if the process holding the lock has been killed, you must manually remove the lock file to rectify the situation.
|
|
severity = error
|
|
|
|
[DEPLOYMENT_SERVER:RELOAD_CONFLICT_FOUND]
|
|
message = The local deployment server found a conflict that prevented it from performing a reload.
|
|
severity = error
|
|
|
|
[DEPLOYMENT_SERVER:RELOAD_CONFLICT_YET_TO_SYNC]
|
|
message = Another deployment server has committed some changes to serverclass.conf, but those changes have not yet been synced to the local serverclass.conf file on this deployment server.
|
|
action = Wait for the local deployment server to complete its next reload check cycle.
|
|
severity = error
|
|
|
|
[DEPLOYMENT_SERVER:RELOAD_CONFLICT_MANUALLY_MODIFIED]
|
|
message = The following serverclass.conf files have been edited directly in the shared directory. Avoid directly editing files in the shared directory.
|
|
action = Review the changes to the shared files and manually migrate those changes to the corresponding serverclass.conf file on the local deployment server. Then remove the serverclass.conf file in the shared directory and run `splunk reload deploy-server`. You can add `-ignore-conflict true` to the command to force the reload and overwrite the file in the shared directory.
|
|
severity = error
|
|
|
|
|
|
[DISCRETIZATION]
|
|
name = Discretization Processor
|
|
|
|
[DISCRETIZATION:ALIGN_TIMESPAN_ONLY]
|
|
message = The timealign option is only valid for time based discretization.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:EXPECTED_FIELD__S]
|
|
message = Expected a field name after '%s'.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:FAILED_VAL_FIELD__S_S]
|
|
message = Failed to discretize value '%s' of field '%s'.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:INVALID_ALIGNTIME]
|
|
message = Invalid value for aligntime. Must be 'earliest', 'latest', or a valid time (relative times e.g. '-1d@d' are acceptable).
|
|
severity = error
|
|
|
|
[DISCRETIZATION:INVALID_LOG_SPAN__S_S]
|
|
message = The log span '%s' is invalid. %s.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:INVALID_NUM_BINS__LU]
|
|
message = The number of bins must be >= 2 and <= %lu.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:INVALID_NUM_SPAN__S]
|
|
message = The numerical span '%s' is invalid. It must be > 0.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:INVALID_OPT_VAL__S_S]
|
|
message = The value for option %s (%s) is invalid.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:INVALID_OPT_VAL_MOD_NOT_ZERO__S_S]
|
|
message = The value for option %s (%s) is invalid. When span is expressed using a sub-second unit (ds, cs, ms, us), the span value needs to be < 1 second, and 1 second must be evenly divisible by the span value.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:MISSING_FIELD]
|
|
message = You must specify a field to discretize.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:MISSING_FIELD__S]
|
|
message = Field '%s' does not exist in the data.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:NOT_NUMERICAL_FIELD__S]
|
|
message = Field '%s' should have numerical values.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:TOO_MANY_BINS__S_LU]
|
|
message = Span value '%s' results in too many (> %lu) bins. Edit limits.conf to change limits.
|
|
severity = error
|
|
|
|
[DISCRETIZATION:VAL_GTE_VAL__S_S]
|
|
message = The %s value must be >= %s value.
|
|
severity = error
|
|
|
|
|
|
[DISK_MON]
|
|
name = Disk Monitor
|
|
|
|
[DISK_MON:INSUFFICIENT_DISK_SPACE_ERROR__S_LLU_LLU_S_S]
|
|
message = The index processor has paused data flow. Current free disk space on partition '%s' has fallen to %lluMB, below the minimum of %lluMB. Data writes to index path '%s'cannot safely proceed.
|
|
action = Increase free disk space on partition '%s' by removing or relocating data.
|
|
severity = warn
|
|
help = message.stall.indexer.diskspace
|
|
capabilities = indexes_edit
|
|
|
|
|
|
[DISPATCHCOMM]
|
|
name = Dispatch Command Processor
|
|
|
|
[DISPATCHCOMM:CANNOT_DISPATCH_SEARCH_ON_DED_FORWARDER]
|
|
message = Cannot dispatch search on a Universal Forwarder.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:CANNOT_FIND_JOB_INFO_FILE__S]
|
|
message = Could not find the expected job info file '%s'.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:CANNOT_MATCH_ROLE_WITH_USERNAME__S]
|
|
message = Dispatch Manager could not associate a role with username '%s'.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:CHANGING_PRIORITY_WHILE_RUNNING_NOT_ALLOWED__S]
|
|
message = Changing priority '%s' while running as a splunkd thread is not allowed. Ignoring.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:CLUSTER_PEER_FAILURE_RETRY__S]
|
|
message = A portion of the search ended prematurely due to failure for indexer: %s. Attempting to recover the search.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:CLUSTER_RETRY_SUCCESS]
|
|
message = Cluster has recovered from premature search termination on peer(s).
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:COMMAND_ORDER_ERROR_S]
|
|
message = This command cannot be invoked after the command '%s' because it expects events in descending time order.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:CORRUPT_FILE__S]
|
|
message = The file '%s' is corrupt.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:ERROR_WHILE_CHANGING_JOB_PRIORITY__M_S]
|
|
message = Received the following while changing the job priority (%m): '%s'.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:EXCLUDED_QUARANTINED_PEERS]
|
|
message = One or more peers has been excluded from the search because they have been quarantined. Use "splunk_server=*" to search these peers. This might affect search performance.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:QUARANTINED_PEERS_CAUSED_SEARCH_FAILURE]
|
|
message = The search job has failed because some peers have been quarantined and allow_partial_results is false.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_CONN_HSE__S_S]
|
|
message = Failed to connect with url '%s' because of %s.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_CONN_STATUS__S_S_D]
|
|
message = Failed to connect with url '%s' because of %s. status code = %d.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_CREATE_DIR__S]
|
|
message = Failed to create a directory at %s.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_DOWNLOAD_DIR__S_S_S]
|
|
message = Failed to download dispatch directory for search sid=%s ddir=%s err="%s"
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_FIND_INFO__S]
|
|
message = Failed to find the info file to %s. Search process not started.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_SETUP_GEN__S]
|
|
message = Failed generation setup, reason: %s.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_TO_REAP_BUNDLE_DIRECTORY__S_M]
|
|
message = Failed to reap bundle_directory '%s' because of %m.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_TO_START_PROCESS__S]
|
|
message = Failed to start the search process for sid=%s.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FAILED_WRITE_INFO__S]
|
|
message = Failed to write the info file to %s.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FILE_NO_LONGER_EXISTS__S]
|
|
message = File '%s' no longer exists.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FIRST_COMMAND]
|
|
message = This command must be the first command of a search.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INPUT_RESULTS_IGNORED]
|
|
message = Input results into the dispatch command are being ignored.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:INSUFFICIENT_PRIVILEGES__S]
|
|
message = The user '%s' does not have sufficient search privileges.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_FIRST_COMMAND__S]
|
|
message = The '%s' command cannot be the first command in a search.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_ID__S]
|
|
message = The ID value is invalid. ..%s is not allowed.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_MAXRESULTS]
|
|
message = The 'maxresults' option must have value > 0.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_SEARCH_TARGET_INDEX]
|
|
message = One or more indexes specified in search does not exist on any of the queried peer(s).
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_SEARCH_TARGET_OPTIMIZATION]
|
|
message = Search filters specified using splunk_server/splunk_server_group do not match any search peer.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:INVALID_TTL]
|
|
message = The 'ttl' option must have value > 0.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_VALUE_FOR_AUTO_CANCEL__S]
|
|
message = The auto_cancel value '%s' is invalid.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_VALUE_FOR_AUTO_PAUSE__S]
|
|
message = The auto_pause value '%s' is invalid.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:LICENSE_STATUS__D]
|
|
message = Local Search Feature disabled by licenser (status=%d).
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:LOW_SEARCH_DISK_SPACE__LLU_S_S]
|
|
message = The minimum free disk space (%lluMB) reached for %s. %s
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:MAX_CONCURRENT_CUM_ROLE_BASED_SEARCHES__S_S_LU_LU]
|
|
message = The maximum number of concurrent %s searches for the role=%s has been reached. quota=%lu usage=%lu.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:MAX_CONCURRENT_PER_INSTANCE_SEARCHES__S]
|
|
message = The maximum number of concurrent %s searches on this instance has been reached.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:MAX_CONCURRENT_PER_INSTANCE_ADHOC_SEARCHES__S]
|
|
message = The maximum number of concurrent %s ad hoc searches on this instance has been reached.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:MAX_CONCURRENT_SEARCHES__S_LU_LU]
|
|
message = This search could not be dispatched because the role-based concurrency limit of historical searches for user "%s" has been reached (usage=%lu, quota=%lu).
|
|
action = Wait for some of your running historical searches to complete or ask your Splunk administrator to increase the search concurrency limit of historical searches for your role in authorize.conf.
|
|
severity = error
|
|
capabilities = search
|
|
help = learnmore.security.searchfilter.rolelimits
|
|
message_alternate = The maximum number of concurrent historical searches for this user based on their role quota has been reached.
|
|
|
|
[DISPATCHCOMM:MAX_CONCURRENT_SEARCHES_ALL_ROLES__U_LU_LU_S_S]
|
|
message = The maximum number of concurrent searches has been reached for all roles (roles.count=%u) usage=%lu quota=%lu user=%s while trying to start search. sid=%s
|
|
severity = error
|
|
capabilities = search
|
|
help = learnmore.concurrent.search.limit
|
|
|
|
[DISPATCHCOMM:MAX_CONCURRENT_SEARCHES_CLUSTER_WIDE__S_LU_LU]
|
|
message = This search could not be dispatched because the role-based concurrency limit of historical searches for user "%s" on this cluster has been reached (usage=%lu, quota=%lu).
|
|
action = Wait for some of your running historical searches to complete or ask your Splunk administrator to increase the search concurrency limit of historical searches for your role in authorize.conf.
|
|
severity = error
|
|
capabilities = search
|
|
help = learnmore.security.searchfilter.rolelimits
|
|
message_alternate = The maximum number of concurrent historical searches for this user on this cluster based on their role quota has been reached.
|
|
|
|
[DISPATCHCOMM:MAX_CONCURRENT_SYSWIDE_SEARCHES__S]
|
|
message = The maximum number of concurrent %s searches on this cluster has been reached.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:MAX_CONCURRENT_SYSWIDE_ADHOC_SEARCHES__S]
|
|
message = The maximum number of concurrent %s ad hoc searches on this cluster has been reached.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:MEMORY_PERCENTAGE_EXCEEDED__S_F_F]
|
|
message = The search process with sid=%s was forcefully terminated because its relative physical memory usage (%f percent) has exceeded the 'search_process_memory_usage_percentage_threshold' (%f percent) setting in limits.conf.
|
|
severity = error
|
|
capabilities = search
|
|
help = message.search.threshhold.reached
|
|
|
|
[DISPATCHCOMM:MEMORY_USAGE_EXCEEDED__S_F_F]
|
|
message = The search processs with sid=%s was forcefully terminated because its physical memory usage (%f MB) has exceeded the 'search_process_memory_usage_threshold' (%f MB) setting in limits.conf.
|
|
severity = error
|
|
capabilities = search
|
|
help = message.search.threshhold.reached
|
|
|
|
[DISPATCHCOMM:MEMORY_USAGE_PERCENTAGE_EXCEEDED__S_F_F_F_F]
|
|
message = The search process with sid=%s was forcefully terminated because both its physical memory usage (%f MB) and its relative physical memory usage (%f percent) have exceeded the 'search_process_memory_usage_threshold' (%f MB) and 'search_process_memory_usage_percentage_threshold' (%f percent) settings in limits.conf.
|
|
severity = error
|
|
capabilities = search
|
|
help = message.search.threshhold.reached
|
|
|
|
[DISPATCHCOMM:MORE_THAN_ONE_RENAME_TAG_DETECTED_FOR_SOURCE_TYPE__S]
|
|
message = More than one rename tag is detected for sourcetype '%s'.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:NO_CONCURRENT_CUM_SEARCHES_LIMIT_APPLIED__S]
|
|
message = This user: '%s' has one or more Role(s) with cumulativeSrchJobsQuota set to 0. Cumulative concurrent historical search limits will not be applied to this user.
|
|
action = To apply this limit, ensure that this user does not have any roles with cumulativeSrchJobsQuota set to 0 in authorize.conf. CumulativeSrchJobsQuota can also be set in the UI by a Standard search limit field under a Role search job limit section.
|
|
severity = debug
|
|
capabilities = search
|
|
|
|
[DISPATCHCOMM:NO_RT_CONCURRENT_CUM_SEARCHES_LIMIT_APPLIED__S]
|
|
message = This user: '%s' has one or more Role(s) with cumulativeRTSrchJobsQuota set to 0. Cumulative concurrent real-time search limits will not be applied to this user.
|
|
action = To apply this limit, ensure that this user does not have any roles with cumulativeRTSrchJobsQuota set to 0 in authorize.conf. CumulativeRTSrchJobsQuota can also be set in the UI by a Real-time search limit field under a Role search job limit section.
|
|
severity = debug
|
|
capabilities = search
|
|
|
|
[DISPATCHCOMM:NO_USER_CONTEXT]
|
|
message = No user context has been set. You cannot run the search.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_USER_CONTEXT__S]
|
|
message = User='%s' is invalid. You cannot run the search as this user.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:CAPABILITIES_NOT_FOUND__S]
|
|
message = Capabilities for user='%s' not found. You can't run the search as this user.
|
|
action = Contact your administrator to get the correct capability added to your role.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:NOT_LOGGED_IN]
|
|
message = The user is not logged in.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:NUM_PIPELINE_EXCEEDED__U_U]
|
|
message = Failed to launch search since requested number of pipeline requested=%u exceeds max search pipeline=%u.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:OVER_DISK_QUOTA__S_LU_LU]
|
|
message = This search could not be dispatched because the role-based disk usage quota of search artifacts for user "%s" has been reached (usage=%luMB, quota=%luMB).
|
|
action = Use the [[/app/search/job_manager|Job Manager]] to delete some of your search artifacts, or ask your Splunk administrator to increase the disk quota of search artifacts for your role in authorize.conf.
|
|
severity = error
|
|
capabilities = search
|
|
message_alternate = The maximum disk usage quota for this user has been reached.
|
|
help = learnmore.security.searchfilter.rolelimits
|
|
|
|
[DISPATCHCOMM:OVER_DISK_QUOTA_CLUSTER_WIDE__S_LU_LU]
|
|
message = This search could not be dispatched because the role-based disk usage quota of search artifacts for user "%s" on this cluster has been reached (usage=%luMB, quota=%luMB).
|
|
action = Use the [[/app/search/job_manager|Job Manager]] to delete some of your search artifacts, or ask your Splunk administrator to increase the disk quota of search artifacts for your role in authorize.conf.
|
|
severity = error
|
|
capabilities = search
|
|
help = learnmore.security.searchfilter.rolelimits
|
|
message_alternate = The maximum disk usage quota for this user on this cluster has been reached.
|
|
|
|
[DISPATCHCOMM:OVER_RT_SEARCH_QUOTA__S_LU_LU]
|
|
message = This search could not be dispatched because the role-based concurrency limit of real-time searches for user "%s" has been reached (usage=%lu, quota=%lu).
|
|
action = Use the [[/app/search/job_manager|Job Manager]] to cancel some of your running real-time searches or ask your Splunk administrator to increase the search concurrency limit of real-time searches for your role in authorize.conf.
|
|
severity = error
|
|
capabilities = search
|
|
help = learnmore.security.searchfilter.rolelimits
|
|
message_alternate = The maximum number of concurrent real-time searches for this user based on their role quota has been reached.
|
|
|
|
[DISPATCHCOMM:OVER_RT_SEARCH_QUOTA_CLUSTER_WIDE__S_LU_LU]
|
|
message = This search could not be dispatched because the role-based concurrency limit of real-time searches for user "%s" on this cluster has been reached (usage=%lu, quota=%lu).
|
|
action = Use the [[/app/search/job_manager|Job Manager]] to cancel some of your running real-time searches or ask your Splunk administrator to increase the search concurrency limit of real-time searches for your role in authorize.conf.
|
|
severity = error
|
|
capabilities = search
|
|
help = learnmore.security.searchfilter.rolelimits
|
|
message_alternate = The maximum number of concurrent real-time searches for this user on this cluster based on their role quota has been reached.
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_DELETED_PEER__S]
|
|
message = Search job failed because peer '%s' is down
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_EOF]
|
|
message = Search results might be incomplete: the search process on a peer's search ended prematurely.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_EOF__S]
|
|
message = Reading error while waiting for external result provider %s. Search results might be incomplete.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_EOF__S_S_S]
|
|
message = Search results might be incomplete: the search process on peer %s ended prematurely. This can be caused by a variety of reasons.
|
|
action = Consult the [[%s|search.log]] for the remote search and check for a possible crash log in the $SPLUNK_HOME/var/log/splunkd directory for %s.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_ERP_READ_ERROR__S]
|
|
message = Reading error while waiting for external result provider %s. Search results might be incomplete.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_READ_ERROR__S]
|
|
message = Reading error while waiting for peer %s. Search results might be incomplete. This can occur if the peer unexpectedly closes or resets the connection during a planned restart.
|
|
action = Try running the search again. If the problem persists, confirm network connectivity between this instance and the peer, and review search.log and splunkd.log on the peer to check its activity.
|
|
severity = warn
|
|
help = learnmore.idxc.searchable.upgrade
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_TIMEOUT__S]
|
|
message = Timed out waiting for peer %s. Search results might be incomplete.
|
|
action = If this occurs frequently, receiveTimeout in distsearch.conf might need to be increased.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_UNKNOWN_ERROR__S]
|
|
message = Unknown error for indexer: %s. Search Results might be incomplete.
|
|
action = If this occurs frequently, check on the peer.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_ERROR_RATE_LIMIT_ERROR__S]
|
|
message = Rate limit error returned by the peer %s. Search results might be incomplete. This will occur when the peer rejects the connection due to high number of running searches.
|
|
action = Try running the search again. If the problem persists, confirm the number of searches running on the peer is below the configured threshold, and review search.log and splunkd.log on the peer to check its activity.
|
|
severity = warn
|
|
help = learnmore.idxc.searchable.upgrade
|
|
|
|
[DISPATCHCOMM:PEER_FAILURE_RETRY__S]
|
|
message = Peer %s's search ended prematurely. Attempting to reconnect and resume.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:PEER_IS_SLOW__S]
|
|
message = Peer '%s' was disconnected because it was too slow and holding up the completion of this search. This can be disabled in limits.conf [slow_peer_disconnect].
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_NOT_COMPATIBLE__S_S]
|
|
message = The search was not run on the remote peer '%s' due to incompatible peer version ('%s').
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_PARSE_FAIL__S]
|
|
message = The search process on the peer:%s failed to parse the search or construct search pipelines.
|
|
action = Check the peer search.log and validate your search.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:PEER_PIPE_EXCEPTION__S]
|
|
message = Search results might be incomplete: the search process on the peer:%s ended prematurely.
|
|
action = Check the peer log, such as $SPLUNK_HOME/var/log/splunk/splunkd.log and as well as the search.log for the particular search.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_RETRY_SUCCESS__S]
|
|
message = Successfully resumed search on %s.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:PHASED_EXECUTION_MODE_DEPRECATED]
|
|
message = Splunk platform search execution methods are deprecated.
|
|
action = Contact your administrator to remove the 'phased_execution_mode' setting in limits.conf, so this message is not displayed again.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PREVIEW_GENERATE_FAIL__S]
|
|
message = Failed to generate preview results %s.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:PREV_STREAM_ERROR__S]
|
|
message = This command cannot be invoked after the command '%s', which is not distributable streaming.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:PREV_STATEFUL_ERROR_S]
|
|
message = This command cannot be invoked after the command '%s', which is not streaming.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:PRIORITY_OUT_OF_BOUNDS__S]
|
|
message = The priority '%s' is out of bounds. The valid range is [0 - 10].
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:QUERY_COMPLETED_BUT_EARLIEST_TIME_NOT_ZERO]
|
|
message = The query has completed, but the earliest time (et) has not yet been set to zero.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:REAL_TIME_NOT_SUPPORTED]
|
|
message = This command is not supported in a real-time search.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:REPLAY_FINISHED]
|
|
message = Replay has finished.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:RESOURCE_FAILURE]
|
|
message = Operating system thread limit reached; could not run search.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:RT_WINDOW_NOT_SUPPORTED]
|
|
message = This search does not support a windowed real-time time range.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:SCHEDULER_DISABLED]
|
|
message = The report scheduler has been disabled by an administrator. Scheduled report and alert searches are not being run, and their actions are not being performed.
|
|
action = To restore this service, Contact your Splunk administrator.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:SEARCH_AUTO_FINALIZED_DISK_LIMIT__LLU]
|
|
message = Search auto-finalized after disk usage limit (%lluMB) reached.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:SEARCH_AUTO_FINALIZED_EVENT_LIMIT__LU]
|
|
message = Search auto-finalized after %lu events limit reached.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:SEARCH_AUTO_FINALIZED_TIME_LIMIT__LLU]
|
|
message = The search auto-finalized after it reached its time limit: %llu seconds.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:SEARCH_BUNDLE_THROTTLING]
|
|
message = Search bundle throttling is occurring because the limit for number of bundles with pending lookups for indexing has been exceeded. This could be the result of large lookup files updating faster than Splunk software can index them. Throttling ends when this instance has caught up with indexing of lookups.
|
|
action = If you see this often, contact your Splunk administrator about tuning lookup sizes and max_memtable_bytes.
|
|
severity = warn
|
|
capabilities = search
|
|
help = message.lookup.preindex_throttling
|
|
|
|
[DISPATCHCOMM:SEARCH_END_RETRY_FAILURE]
|
|
message = Failed to recover from indexer failure(s). Search results might be incomplete.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:SEARCH_LAUNCH_TIMEOUT__S]
|
|
message = The search job with sid=%s failed to launch successfully after the timeout interval elapsed.
|
|
action = If search jobs time out frequently before successfully launching, check whether the server running Splunk software is overloaded. Alternatively, adjust the 'search_launch_timeout_seconds' setting in the limits.conf file.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:ATTEMPT_RERUN_SEARCH]
|
|
message = The original search failed due to changes in the indexer cluster. The search has been run again.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:SEARCH_FINALIZED]
|
|
message = Search finalized.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:WATCHDOG_TRIGGERED__S]
|
|
message = Manual watchdog action triggered for splunk search process with sid=%s.
|
|
severity = info
|
|
|
|
[DISPATCHCOMM:START_TIME_AFTER_END_TIME__LU_LU]
|
|
message = The start_time value %lu should be earlier than the end_time value %lu.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:STREAM_ERROR_TIMEOUT]
|
|
message = Timed out waiting on peers. If this occurs frequently, the 'results_queue_read_timeout_sec' setting in limits.conf might need to be increased. Search results might be incomplete.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:TIME_ORDER_REQUIRED]
|
|
message = This search requires events to be in descending time order, but the preceding search does not guarantee time-ordered events.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:TOO_MANY_JOB_DIRS__LU_LU]
|
|
message = The number of search artifacts in the dispatch directory is higher than recommended (count=%lu, warning threshold=%lu) and could have an impact on search performance.
|
|
action = Remove excess search artifacts using the "splunk clean-dispatch" CLI command, and review artifact retention policies in limits.conf and savedsearches.conf. You can also raise this warning threshold in limits.conf / dispatch_dir_warning_size.
|
|
severity = warn
|
|
capabilities = admin_all_objects
|
|
help = message.dispatch.artifacts
|
|
|
|
[DISPATCHCOMM:UNABLE_TO_OBTAIN_USER_CONTEXT]
|
|
message = Unable to obtain a valid user context for the dispatch thread.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:USAGE]
|
|
message = Usage: dispatch [options] [remoteserverlist] '['<subpipeline>']'.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:WARN_TOO_MANY_CONCURRENT_PER_INSTANCE__S]
|
|
message = The instance is approaching the maximum number of %s searches that can be run concurrently.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:WARN_TOO_MANY_CONCURRENT_SYSWIDE__S]
|
|
message = The cluster is approaching the maximum number of %s searches that can be run concurrently.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:PEER_FAILURE_CAUSED_SEARCH_FAILURE__S_S]
|
|
message = The search job has failed due to err='%s' for the peer=%s.
|
|
action = Check peer status and try running the search again.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:SOME_PEERS_NOT_SEARCHABLE_CAUSED_SEARCH_FAILURE]
|
|
message = The search job has failed because some peers were not searchable. Results will be incomplete.
|
|
action = Check peer status and try running the search again.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:INVALID_SECRET]
|
|
message = Invalid secret in server.conf.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:EMPTY_SECRET]
|
|
message = Empty pass4symkey in server.conf.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:FSH_PASSWORD_NOTFOUND__S]
|
|
message = Password information for remote provider: '%s' not found. Please configure remote user correctly.
|
|
severity = error
|
|
|
|
[DISPATCHCOMM:PARTIAL_RESULTS__S]
|
|
message = Bucket repairs are currently in progress for the search process with search_id="%s". The search may have returned partial results.
|
|
action = If you continue to see this warning, contact your Splunk administrator.
|
|
severity = warn
|
|
|
|
[DISPATCHCOMM:DIFFERENT_USER_SUMMARY__S]
|
|
message = This search uses report acceleration summaries with summary_id='%s', which are managed by another user.
|
|
severity = warn
|
|
|
|
|
|
[DISPATCH_JOBS]
|
|
name = Dispatch Job
|
|
|
|
[DISPATCH_JOBS:CHANGED_WORKLOAD_POOL__S_S]
|
|
message = The search job's workload pool was changed to %s, for sid=%s.
|
|
severity = info
|
|
help = learnmore.use_workloads
|
|
|
|
[DISPATCH_JOBS:INVALID_POSTSEARCH__S_S]
|
|
message = Post-process searches cannot contain a generating command. The post-process search '%s' currently generates events.
|
|
action = Rewrite the post-process search so that it does not include generating commands in base search '%s'.
|
|
severity = error
|
|
capabilities = search
|
|
|
|
[DISPATCH_JOBS:INVALID_WORKLOAD_POOL]
|
|
message = Cannot specify workload_pool multiple times.
|
|
action = Remove the duplicate workload_pool parameter, and re-run the search.
|
|
severity = error
|
|
help = learnmore.use_workloads
|
|
|
|
[DISPATCH_JOBS:INVALID_SEARCH_WORKLOAD_POOL__S]
|
|
message = Failed to find workload pool: %s.
|
|
action = Assign the search to a valid workload pool, and try again.
|
|
severity = error
|
|
help = learnmore.use_workloads
|
|
|
|
[DISPATCH_JOBS:MISSING_LIST_OR_SELECT_WORKLOAD_POOLS_CAPABILITY]
|
|
message = User has insufficient permissions to perform this operation.
|
|
action = list_workload_pools and select_workload_pools capabilities are required.
|
|
severity = error
|
|
help = learnmore.workload_capabilities
|
|
|
|
[DISPATCH_JOBS:MISSING_WORKLOAD_POOL]
|
|
message = You are missing the workload pool for the set workload pool action.
|
|
action = Provide correct workload_pool parameter, and repeat the set workload pool action.
|
|
severity = error
|
|
help = learnmore.use_workloads
|
|
|
|
[DISPATCH_JOBS:SEARCH_JOB_NOT_ALLOWED_IN_DETENTION]
|
|
message = This instance is currently in Detention mode and does not allow running new search jobs. This is likely due to an ongoing rolling restart of the search head cluster.
|
|
action = Login to another search head to run your search again.
|
|
severity = info
|
|
capabilities = search
|
|
help = message.sh.detention
|
|
|
|
[DISPATCH_JOBS:WARN_REST_API__S_S_S]
|
|
message = A REST call to the deprecated endpoint (%s) was made via app=%s with URL=%s.
|
|
action = Rerun your search using another endpoint that is not deprecated.
|
|
severity = warn
|
|
|
|
|
|
[DISPATCH_RUNNER]
|
|
name = Dispatch Runner
|
|
|
|
[DISPATCH_RUNNER:SLOW_CONFIG_INITIAL__S_LLU_S]
|
|
message = Configuration initialization for %s took longer than expected (%llums) when dispatching a search with search ID %s. This might indicate an issue with underlying storage performance or the knowledge bundle size.
|
|
severity = warn
|
|
action = If you want this message displayed more or less often, change the value of the 'search_startup_config_timeout_ms' setting in "limits.conf" to a lower or higher number.
|
|
capabilities = admin_all_objects
|
|
|
|
|
|
[DISPATCH_SEARCH]
|
|
name = Dispatch Runner
|
|
|
|
[DISPATCH_SEARCH:CANNOT_DISPOSE_RUNNING_SEARCHID__S]
|
|
message = Cannot dispose the currently running search (searchid='%s').
|
|
severity = warn
|
|
|
|
|
|
[DISTRIBUTED]
|
|
name = Distributed Search Handler
|
|
|
|
[DISTRIBUTED:BUNDLE_REPLICATION_FAILURE__S_S]
|
|
message = Bundle replication to peer named '%s' at %s failed.
|
|
severity = error
|
|
|
|
[DISTRIBUTED:BUNDLE_REPLICATION_SUCCESS__S_S]
|
|
message = Bundle replication to peer named '%s' at %s succeeded.
|
|
severity = info
|
|
|
|
[DISTRIBUTED:DISTRIBUTED_SEARCH_DISABLED__S_S]
|
|
message = Unable to distribute to the peer named %s at uri %s because this instance is a part of a cluster and distributed search functionality has been disabled.
|
|
severity = warn
|
|
|
|
[DISTRIBUTED:REPLICATION_FAILED__S_S_S_S]
|
|
message = Bundle replication to peer named %s at uri %s was unsuccessful. ReplicationStatus: %s - Failure info: %s.
|
|
action = Verify connectivity to the search peer, that the search peer is up, and that an adequate level of system resources are available. See the Troubleshooting Manual for more information.
|
|
severity = warn
|
|
|
|
[DISTRIBUTED:SEARCH_UNSUPPORTED_PEERS__S_S_S]
|
|
message = Search Head '%s' running Splunk version '%s' does not support distributing searches to the following peers: %s.
|
|
severity = warn
|
|
|
|
[DISTRIBUTED:UNABLE_TO_DISTRIBUTE__S_S_S_S]
|
|
message = Unable to distribute to peer named %s at uri=%s using the uri-scheme=%s because peer has status=%s.
|
|
action = Verify uri-scheme, connectivity to the search peer, that the search peer is up, and that an adequate level of system resources are available. See the Troubleshooting Manual for more information.
|
|
severity = warn
|
|
|
|
[DISTRIBUTED:UNABLE_TO_CONNECT__S_S]
|
|
message = Unable to establish a connection to peer %s. Send failure while pushing public key to search peer = %s.
|
|
severity = error
|
|
|
|
|
|
[DISTRIBUTED_BUNDLE_REPLICATION]
|
|
name = Distributed Bundle Replication Manager
|
|
|
|
[DISTRIBUTED_BUNDLE_REPLICATION:LARGE_LOOKUP_FILE__S]
|
|
message = The current bundle directory contains a large lookup file that might cause bundle replication fail. The path to the directory is %s.
|
|
severity = warn
|
|
capabilities = admin_all_objects
|
|
|
|
|
|
[DUMP]
|
|
name = Dump
|
|
|
|
[DUMP:FAILED_TO_CREATE_DIR__S]
|
|
message = Failed to create dir: %s.
|
|
severity = error
|
|
|
|
[DUMP:FAILED_TO_CREATE_TMP_DIR__S]
|
|
message = Failed to create tmp dir: %s.
|
|
severity = error
|
|
|
|
[DUMP:FAILED_TO_GET_MTIME_OF_PATH__S]
|
|
message = Failed to get Mtime of path: %s.
|
|
severity = error
|
|
|
|
[DUMP:FAILED_TO_GET_SIZE_OF_FILE__S]
|
|
message = Failed to get the size of file %s.
|
|
severity = error
|
|
|
|
[DUMP:FAILED_TO_RENAME_FILE__S_S]
|
|
message = Failed to rename file from %s to %s.
|
|
severity = error
|
|
|
|
[DUMP:INVALID_OPTION__S_S]
|
|
message = '%s' is invalid. %s.
|
|
severity = error
|
|
|
|
[DUMP:MISSING_REQUIRED_OPTION__S]
|
|
message = '%s' is required.
|
|
severity = error
|
|
|
|
[DUMP:PARENT_NOT_RUNNING__S]
|
|
message = Parent search job '%s' is not running. Stopping...
|
|
severity = error
|
|
|
|
[DUMP:INVALID_PATHNAME_CONTAINS_NUL__S]
|
|
message = Invalid pathname for _dstpath: '%s'. The pathname contains a NULL character.
|
|
severity = error
|
|
|
|
[DUMP:INVALID_FILENAME_CONTAINS_NUL__S]
|
|
message = Invalid filename for basefilename: '%s'. The filename contains a NULL character.
|
|
severity = error
|
|
|
|
[DUMP:DOTS_IN_PATHNAME]
|
|
message = '_dstpath' should not contain '..'
|
|
severity = error
|
|
|
|
[DUMP:INVALID_FILENAME__S]
|
|
message = '_dstpath' option '%s' is invalid. Directory names contain reserved strings or characters from the following list: CON, PRN, AUX, NUL, COM1, COM2, COM3, COM4, COM5, COM6, COM7, COM8, COM9, LPT1, LPT2, LPT3, LPT4, LPT5, LPT6, LPT7, LPT8, LPT9, <, >, :, /, \, |, ?, *.
|
|
severity = error
|
|
|
|
|
|
[EVAL]
|
|
name = Evaluator
|
|
|
|
[EVAL:BAD_DEST_BRACKETS]
|
|
message = The destination field is invalid. {} brackets must be closed.
|
|
severity = error
|
|
|
|
[EVAL:BAD_FUNC__S]
|
|
message = The '%s' function is unsupported or undefined.
|
|
severity = error
|
|
|
|
[EVAL:BOOLEAN_RESULT]
|
|
message = Fields cannot be assigned a boolean result. Instead, try if([bool expr], [expr], [expr]).
|
|
severity = error
|
|
|
|
[EVAL:CONSUME_FAIL__S]
|
|
message = The expression is malformed. Expected %s.
|
|
severity = error
|
|
|
|
[EVAL:FAILED_PARSE]
|
|
message = Failed to parse the provided arguments. Usage: eval dest_key = expression.
|
|
severity = error
|
|
|
|
[EVAL:INVALID_DEST]
|
|
message = The destination key is invalid.
|
|
severity = error
|
|
|
|
[EVAL:INVALID_EXPRESSION]
|
|
message = The expression is malformed.
|
|
severity = error
|
|
|
|
[EVAL:INVALID_FUNC_ARGS__S]
|
|
message = The arguments to the '%s' function are invalid.
|
|
severity = error
|
|
|
|
[EVAL:INVALID_NUMBER__S]
|
|
message = The number %s is invalid.
|
|
severity = error
|
|
|
|
[EVAL:INVALID_OP__S]
|
|
message = The operator at '%s' is invalid.
|
|
severity = error
|
|
|
|
[EVAL:INVALID_UNARY_OP]
|
|
message = The expression is malformed. The unary op is invalid.
|
|
severity = error
|
|
|
|
[EVAL:MATCH_FAIL__C]
|
|
message = The expression is malformed. Expected %c.
|
|
severity = error
|
|
|
|
[EVAL:MISSING_AND]
|
|
message = The expression is malformed. An 'AND' term is missing.
|
|
severity = error
|
|
|
|
[EVAL:MISSING_ARGS]
|
|
message = Arguments are missing. Usage: eval dest_key = expression.
|
|
severity = error
|
|
|
|
[EVAL:MISSING_COMP_TERM]
|
|
message = The expression is malformed. A comparison term is missing.
|
|
severity = error
|
|
|
|
[EVAL:MISSING_FACTOR]
|
|
message = The expression is malformed. The factor is missing.
|
|
severity = error
|
|
|
|
[EVAL:MISSING_OR]
|
|
message = The expression is malformed. An 'OR' term is missing.
|
|
severity = error
|
|
|
|
[EVAL:MISSING_TERM]
|
|
message = The expression is malformed. A term is missing.
|
|
severity = error
|
|
|
|
[EVAL:TYPE_FAIL_BOOL__S]
|
|
message = Type checking failed. '%s' only takes boolean arguments.
|
|
severity = error
|
|
|
|
[EVAL:TYPE_FAIL_CONCAT]
|
|
message = Type checking failed. The '.' operator only takes strings and numbers.
|
|
severity = error
|
|
|
|
[EVAL:TYPE_FAIL_DIFF__S]
|
|
message = Type checking failed. The '%s' operator received different types.
|
|
severity = error
|
|
|
|
[EVAL:TYPE_FAIL_NUM__S]
|
|
message = Type checking failed. '%s' only takes numbers.
|
|
severity = error
|
|
|
|
[EVAL:TYPE_FAIL_PLUS]
|
|
message = Type checking failed. '+' only takes two strings or two numbers.
|
|
severity = error
|
|
|
|
[EVAL:UNEXPECTED_CHAR__S]
|
|
message = The expression is malformed. An unexpected character is reached at '%s'.
|
|
severity = error
|
|
|
|
[EVAL:SQL_TYPE_NOT_ACCEPTED__S]
|
|
message = The expression contains an argument of type '%s', which is not supported by 'sdselect'.
|
|
severity = error
|
|
|
|
[EVAL:INVALID_SPLUNK_TIME_FORMAT__S]
|
|
message = The Splunk time format string '%s' is invalid.
|
|
help = sds.index.timesettings
|
|
severity = error
|
|
|
|
[EVAL:INVALID_PARTITION_TIME_FIELD_FORMAT__S]
|
|
message = The partition time field format string '%s' is invalid.
|
|
severity = error
|
|
|
|
[EVAL:INVALID_PARTITION_TIME_FIELD_TYPE__S]
|
|
message = The partition time field type string '%s' is invalid'.
|
|
severity = error
|
|
|
|
[EVAL:MULTIPLE_DATE_TYPE_PARTITION_FIELDS]
|
|
message = A set of partition time fields is allowed only one field with a 'date' time field type.
|
|
severity = error
|
|
|
|
[EVAL:MAX_LIMIT_REACHED_FOR_PREKEY_REPLACEMENT]
|
|
message = Your search includes dynamic field name creation on the left side of an eval expression that has resulted in unbounded recursive replacements.
|
|
action = Rewrite the eval expression in your search to eliminate the recursive field name creation that uses curly braces ( { } ), or clean up the data of the field on the right side of the expression to ensure that it doesn't generate bracketed field names after creation that are recursive.
|
|
severity = error
|
|
help = message.eval_command.field_names
|
|
|
|
[EVAL:PARSE_STRUCT_COLUMN_TYPE_ERROR__S]
|
|
message = Fail to parse struct column type value: %s
|
|
action = If you receive this error, report it to Splunk Support.
|
|
severity = error
|
|
|
|
|
|
[EVENT_COUNT_PROCESSOR]
|
|
name = Event Count Processor
|
|
|
|
[EVENT_COUNT_PROCESSOR:FAILED_TO_ADD_NEW_RESULT]
|
|
message = Failed to add new result.
|
|
severity = error
|
|
|
|
[EVENT_COUNT_PROCESSOR:NO_KEY_FOUND_IN_RESULTS__S]
|
|
message = No '%s' key is found in the results.
|
|
severity = error
|
|
|
|
[EVENT_COUNT_PROCESSOR:UNABLE_TO_RETRIEVE__LU]
|
|
message = Unable to retrieve '%lu'.
|
|
severity = error
|
|
|
|
[EVENT_COUNT_PROCESSOR:UNEXPECTED_EXCEPTION__S]
|
|
message = Unexpected exception found: '%s'.
|
|
severity = error
|
|
|
|
|
|
[EVENT_TYPE]
|
|
name = Event Type
|
|
|
|
[EVENT_TYPE:INVALID_COLOR__S]
|
|
message = Unsupported event type color=%s.
|
|
severity = error
|
|
|
|
[EVENT_TYPE:INVALID_PRIORITY__S]
|
|
message = The priority field should be a number greater than -1. priority=%s.
|
|
severity = error
|
|
|
|
[EVENT_TYPE:MUST_BE_SIMPLE_SEARCH]
|
|
message = Event type search string cannot be a search pipeline or contain a subsearch.
|
|
severity = error
|
|
|
|
[EVENT_TYPE:NAME_CANNOT_HAVE_WILDCARDS]
|
|
message = The event type name cannot contain the '*' wildcard character.
|
|
severity = error
|
|
|
|
[EVENT_TYPE:PARSING_ERROR__S_S]
|
|
message = Error while parsing event type search: %s. Message: %s.
|
|
severity = error
|
|
|
|
|
|
[EXTERN]
|
|
name = Extern Processor
|
|
|
|
[EXTERN:DISABLED_COMMAND_CREATERSS]
|
|
message = The 'createrss' search command is disabled.
|
|
action = To re-enable the 'createrss' command, set the 'enable_createrss_command' setting in limits.conf to 'true'.
|
|
severity = error
|
|
|
|
[EXTERN:DISABLED_COMMAND__S]
|
|
message = The external search command '%s' is disabled in commands.conf.
|
|
severity = error
|
|
|
|
[EXTERN:GETINFO_FAILED__S]
|
|
message = Getinfo probe failed for external search command '%s'.
|
|
severity = error
|
|
|
|
[EXTERN:INVALID_COMMAND__S]
|
|
message = The external search command '%s' does not exist in commands.conf.
|
|
severity = error
|
|
|
|
[EXTERN:INVALID_PYTHON_VERSION__S_S]
|
|
message = The Python version '%s' for command '%s' in commands.conf is invalid.
|
|
severity = error
|
|
|
|
[EXTERN:INVALID_TYPE__S_S]
|
|
message = The type '%s' for command '%s' in commands.conf is invalid.
|
|
severity = error
|
|
|
|
[EXTERN:NO_PERMISSION__S_S]
|
|
message = You do not have a role with the capability='%s' required to run this command='%s'.
|
|
severity = error
|
|
action = Contact your Splunk administrator to request that this capability be added to your role.
|
|
|
|
[EXTERN:SCRIPT_EX_FAILED__S]
|
|
message = Script execution failed for external search command '%s'.
|
|
severity = error
|
|
|
|
[EXTERN:SCRIPT_NONZERO_RETURN__S_D_S]
|
|
message = External search command '%s' returned error code %d. %s.
|
|
severity = error
|
|
|
|
[EXTERN:SEARCH_RESULTS_INFO_READ_FAIL]
|
|
message = Failed to read Search Results Info file modified by external search command.
|
|
severity = error
|
|
|
|
[EXTERN:TIME_MISSING_FAIL__S]
|
|
message = Could not locate the time (_time) field on some results returned from the external search command '%s'.
|
|
severity = error
|
|
|
|
[EXTERN:TIME_ORDER_FAIL__S]
|
|
message = The external search command '%s' did not return events in descending time order, as expected.
|
|
severity = error
|
|
|
|
[EXTERN:RUNSHELLSCRIPT_INVALID_NUM_ARGS__LLU]
|
|
message = Invalid number of arguments provided for 'runshellscript'. Expected at least 9 arguments or at most 10 arguments, got %llu.
|
|
severity = error
|
|
|
|
[EXTERN:RUNSHELLSCRIPT_INVALID_SID__S]
|
|
message = Invalid search ID '%s'.
|
|
severity = error
|
|
|
|
[EXTERN:RUNSHELLSCRIPT_CANNOT_FIND_SID__S]
|
|
message = Cannot find search ID '%s'.
|
|
severity = error
|
|
|
|
[EXTERN:RUNSHELLSCRIPT_PERMISSION__S]
|
|
message = Permission denied. You cannot access artifacts of search ID '%s'.
|
|
severity = error
|
|
|
|
[EXTERN:RUNSHELLSCRIPT_RESULTS_FILE_PATH_ARG_DEPRECATED]
|
|
message = The results file path argument is deprecated.
|
|
severity = info
|
|
|
|
[EXTERN:RUNSHELLSCRIPT_INVALID_PATH__S]
|
|
message = Invalid path to results file '%s'.
|
|
severity = error
|
|
|
|
|
|
[FIELD_FORMAT]
|
|
name = Field Render Processor
|
|
|
|
[FIELD_FORMAT:FAILED_PARSE]
|
|
message = Failed to parse the provided arguments. Usage: fieldformat field = expression.
|
|
severity = error
|
|
|
|
[FIELD_FORMAT:INVALID_DEST]
|
|
message = The destination field is invalid.
|
|
severity = error
|
|
|
|
[FIELD_FORMAT:MISSING_ARGS]
|
|
message = Arguments are missing. Usage: fieldformat field = expression.
|
|
severity = error
|
|
|
|
|
|
[FIELDS]
|
|
name = Fields
|
|
|
|
[FIELDS:INVALID_FIELD__S]
|
|
message = Invalid field name '%s'.
|
|
severity = error
|
|
|
|
[FIELDS:NO_ARGS]
|
|
message = Must specify at least one valid field name (can contain wildcards).
|
|
severity = error
|
|
|
|
[FIELDS:NO_RESULTS]
|
|
message = No matching fields exist.
|
|
severity = info
|
|
|
|
|
|
[FILE_CLASSIFIER]
|
|
name = File Classifier
|
|
|
|
[FILE_CLASSIFIER:CANNOT_OPEN__S]
|
|
message = Unable to open '%s'.
|
|
severity = warn
|
|
|
|
[FILE_CLASSIFIER:ERROR_GETTING_REGEX__S_S]
|
|
message = Error (%s) encountered while getting breaking regex for new unknown type '%s'.
|
|
severity = error
|
|
|
|
[FILE_CLASSIFIER:ERROR_GETTING_TYPE__S_S]
|
|
message = Error (%s) encountered while getting file type for '%s'.
|
|
severity = error
|
|
|
|
[FILE_CLASSIFIER:FOUND_GOOD_REGEX__S_S_S]
|
|
message = Found a good breaking regex (%s) for the new sourcetype '%s' created from %s.
|
|
severity = info
|
|
|
|
[FILE_CLASSIFIER:FOUND_REGULAR_TIMESTAMP__S_D]
|
|
message = Found regular timestamps for '%s' before %d.
|
|
severity = info
|
|
|
|
[FILE_CLASSIFIER:FOUND_TIMESTAMP_PREFIX__S_S]
|
|
message = Found a regular timestamp prefix for '%s' with '%s'.
|
|
severity = info
|
|
|
|
[FILE_CLASSIFIER:INCREASING_MAX_DIST__LU]
|
|
message = Configuration has a suspiciously high number of sourcetypes (%lu). To prevent ballooning of more source types, the default maxDist has been increased.
|
|
severity = warn
|
|
|
|
[FILE_CLASSIFIER:INVALID_FILE__S_S]
|
|
message = The file '%s' is invalid. Reason: %s.
|
|
severity = warn
|
|
|
|
[FILE_CLASSIFIER:INVALID_NOT_LEARNING__S_S]
|
|
message = Not learning file '%s'. %s.
|
|
severity = error
|
|
|
|
[FILE_CLASSIFIER:NEW_KNOWN_TYPE__S_S]
|
|
message = Attempting to make a new unknown type '%s'. Single-line type: %s.
|
|
severity = info
|
|
|
|
[FILE_CLASSIFIER:NO_GOOD_REGEX__S]
|
|
message = Unable to get a good breaking regex for new unknown type '%s'.
|
|
severity = info
|
|
|
|
[FILE_CLASSIFIER:NO_SOURCETYPE_FOUND__S]
|
|
message = No sourcetype found for '%s'. Ignoring.
|
|
severity = error
|
|
|
|
[FILE_CLASSIFIER:SJIS__S]
|
|
message = Filename '%s' looks like sjis character encoding. Consider specifying 'CHARSET=sjis'.
|
|
severity = warn
|
|
|
|
[FILE_CLASSIFIER:TOO_FEW_LINES__LU_S_LU]
|
|
message = Only able to read %lu lines from '%s.' %lu lines are required.
|
|
severity = warn
|
|
|
|
[FILE_CLASSIFIER:TRAINING__S]
|
|
message = Training on the type '%s'.
|
|
severity = info
|
|
|
|
[FILE_CLASSIFIER:UNABLE_TO_CONVERT_TO_UTF8__S]
|
|
message = Unable to convert character set '%s' to UTF8. Using existing content as is.
|
|
severity = error
|
|
|
|
[FILE_CLASSIFIER:UNKNOWN_SETTING__S]
|
|
message = The '%s' setting in source-classifier.conf is unknown.
|
|
severity = warn
|
|
|
|
|
|
[FILLNULL_PROCESSOR]
|
|
name = Fillnull Processor
|
|
|
|
[FILLNULL_PROCESSOR:EMPTY_FIELD]
|
|
message = You must provide a field name.
|
|
severity = error
|
|
|
|
[FILLNULL_PROCESSOR:MULTI_FIELD_SPEC__S]
|
|
message = Field '%s' cannot be specified multiple times.
|
|
severity = error
|
|
|
|
[FILLNULL_PROCESSOR:FILE_READ_ERROR__S]
|
|
message = Could not read file '%s'.
|
|
severity = error
|
|
|
|
|
|
[FIND_KEY_WORDS]
|
|
name = Find Key Words
|
|
|
|
[FIND_KEY_WORDS:INVALID_LABEL_INDEX]
|
|
message = The label field supplied is invalid. The label field must be a field with integer values.
|
|
severity = error
|
|
|
|
[FIND_KEY_WORDS:NO_LABEL_FIELD]
|
|
message = Must specify 'labelfield'.
|
|
severity = error
|
|
|
|
[FIND_KEY_WORDS:SEARCH_IS_WEIRD]
|
|
message = Eventtyper search is malformed.
|
|
severity = error
|
|
|
|
|
|
[FOLDERIZE]
|
|
name = Folderize Operator
|
|
|
|
[FOLDERIZE:ATTR_REQUIRED]
|
|
message = Folderize requires an 'attr' value.
|
|
severity = error
|
|
|
|
[FOLDERIZE:BAD_COUNT_ON_ATTR__S_S]
|
|
message = Folderize encountered a bad count value '%s' on attribute '%s'.
|
|
severity = error
|
|
|
|
|
|
[FOREACH]
|
|
name = ForEach Processor
|
|
|
|
[FOREACH:INVALID_MODE__S]
|
|
message = An invalid mode was specified: '%s'. Supported modes are "multivalue", "json_array", and "multifield".
|
|
action = Specify a valid mode. Then, run your search again.
|
|
severity = error
|
|
|
|
[FOREACH:NO_FIELD_PROVIDED]
|
|
message = A field was not provided for the 'foreach' command to iterate over.
|
|
action = Specify a field for the 'foreach' command to iterate over. Then, run your search again.
|
|
severity = error
|
|
|
|
[FOREACH:MULTIPLE_FIELDS]
|
|
message = More than one field was provided for the 'foreach' command to iterate over. The 'foreach' command expects exactly one field when mode=json_array or mode=multivalue.
|
|
action = Change your search to specify only one field. Then, run your search again.
|
|
severity = error
|
|
|
|
[FOREACH:ITEMSTR_WITHOUT_MODE]
|
|
message = The 'itemstr' argument must be specified with a mode.
|
|
action = Specify mode as either "multivalue" or "json_array". Alternatively, remove the 'itemstr' argument.
|
|
severity = error
|
|
|
|
[FOREACH:ITERSTR_WITHOUT_MODE]
|
|
message = The 'iterstr' argument must be specified with a mode.
|
|
action = Specify mode as either "multivalue" or "json_array". Alternatively, remove the 'iterstr' argument.
|
|
severity = error
|
|
|
|
[FOREACH:ONLY_ONE_EVAL]
|
|
message = The 'foreach' command only permits a single eval statement when iterating over multivalued fields or JSON arrays.
|
|
action = Change your subsearch to have a single 'eval' statement (with one or more assignments). Then, run your search again.
|
|
severity = error
|
|
|
|
|
|
[FORMAT]
|
|
name = Format Processor
|
|
|
|
[FORMAT:BAD_ARGS]
|
|
message = The '<resultstart> <colstart> <colseparator> <colend> <rowseparator> <resultend>' arguments must be specified together or not at all.
|
|
severity = error
|
|
|
|
[FORMAT:FIELD_EMPTY]
|
|
message = You must provide a 'field' value.
|
|
severity = error
|
|
|
|
[FORMAT:LOWERFIELD_EMPTY]
|
|
message = You must provide a 'lowerfield' value.
|
|
severity = error
|
|
|
|
[FORMAT:ONLY_FIRST_MAXRESULTS__LU_LU]
|
|
message = The 'format' command is using only the first %lu (of %lu) results.
|
|
severity = warn
|
|
|
|
[FORMAT:UPPERFIELD_EMPTY]
|
|
message = You must provide a 'upperfield' value.
|
|
severity = error
|
|
|
|
|
|
[FROMJSON]
|
|
name = FromJson Processor
|
|
|
|
[FROMJSON:NO_FIELD_FOUND]
|
|
message = No field was provided for 'fromjson' to expand.
|
|
severity = error
|
|
|
|
[FROMJSON:MULTIPLE_FIELDS]
|
|
message = More than one field was provided for 'fromjson' to expand; 'fromjson' expects exactly one field.
|
|
severity = error
|
|
|
|
|
|
[GENERAL]
|
|
name = General
|
|
|
|
[GENERAL:UNABLE_TO_FIND_INDEX__S]
|
|
message = Unable to find the '%s' index.
|
|
severity = error
|
|
|
|
[GENERAL:UNABLE_TO_PARSE_SEARCH__S]
|
|
message = Unable to parse the search: %s.
|
|
severity = error
|
|
|
|
[GENERAL:SKIPPED_TSIDX_FILES_CORRUPTION__S]
|
|
message = Some scanned tsidx files in '%s' were unreadable. Results may be incomplete. Consider running fsck.
|
|
|
|
[GENERAL:SKIPPED_TSIDX_FILES_OOMEM__S]
|
|
message = Out of memory while scanning tsidx files in '%s'. Results may be incomplete
|
|
|
|
|
|
[GEO_STATS]
|
|
name = Geo Stats Processor
|
|
|
|
[GEO_STATS:INVALID_BINSPAN]
|
|
message = Binspan string cannot pass verification.
|
|
severity = error
|
|
|
|
[GEO_STATS:INVALID_BOUNDS]
|
|
message = Invalid latitude and longitudinal bounds.
|
|
severity = warn
|
|
|
|
[GEO_STATS:INVALID_LATITUDE]
|
|
message = Latitude values must be within the valid range of -90.0 and 90.0.
|
|
severity = warn
|
|
|
|
[GEO_STATS:INVALID_LATITUDE_LONGITUDE]
|
|
message = Invalid latitude/longitude and hence result will be skipped.
|
|
severity = error
|
|
|
|
[GEO_STATS:INVALID_LATSPAN__F]
|
|
message = Invalid latspan=%f. latspan must be within the valid range of 0.0 and 180.0.
|
|
severity = error
|
|
|
|
[GEO_STATS:INVALID_LONGSPAN__F]
|
|
message = Invalid longspan=%f. longspan must be within the valid range of 0.0 and 360.0.
|
|
severity = error
|
|
|
|
[GEO_STATS:INVALID_MAXCLUSTERS__D]
|
|
message = maxclusters argument for geofilter has to be >= %d which is number of bins at lowest zoom level.
|
|
severity = warn
|
|
|
|
[GEO_STATS:INVALID_MAXZOOMLEVEL__D]
|
|
message = Invalid maxzoomlevel=%d. Maxzoomlevel must be within the valid range of 0 and 18.
|
|
severity = error
|
|
|
|
[GEO_STATS:LARGE_SPLIT_BY_CARDINALITY__S_D]
|
|
message = The split by field %s has a large number of unique values %d. Chart column set will be trimmed to 10. Use globallimit argument to control column count.
|
|
severity = warn
|
|
|
|
[GEO_STATS:MISSING_ARGUMENTS]
|
|
message = Invalid search arguments for geostats.
|
|
severity = error
|
|
|
|
[GEO_STATS:MULTIPLE_SPLIT_BY]
|
|
message = Multiple split-by/group-by fields not allowed in geoviz when rendering using pie-chart. Use translatetoxy=false.
|
|
severity = error
|
|
|
|
[GEO_STATS:SKIPPED_RESULTS__LLU]
|
|
message = Forced to skip results in geostats due to invalid latitude/longitude count='%llu'.
|
|
severity = warn
|
|
|
|
[GEO_STATS:SPAN_ZOOM_LEVEL_NOT_ALL_SET]
|
|
message = binspanlat and binspanlong need to be set together.
|
|
severity = error
|
|
|
|
[GEO_STATS:SUBCOMMAND_EXCEPTION__S]
|
|
message = Geostats error while processing subcommand. exception : '%s'.
|
|
severity = error
|
|
|
|
|
|
[HEAD]
|
|
name = Head Processor
|
|
|
|
[HEAD:BOOLEAN_EXPR_OR_POSITIVE_INTEGER]
|
|
message = The argument must be a positive number or a boolean expression.
|
|
severity = error
|
|
|
|
[HEAD:BOOLEAN_EXPR]
|
|
message = The argument must be a boolean expression.
|
|
severity = error
|
|
|
|
[HEAD:NUM_RESULTS_MUST_BE_POS]
|
|
message = The number of results must be a positive number.
|
|
severity = error
|
|
|
|
|
|
[INDEXER]
|
|
name = Index Processor
|
|
|
|
[INDEXER:INVALID_METRIC_VALUE__S_S_S_S_S]
|
|
message = The metric value=%s is not valid for source=%s, sourcetype=%s, host=%s, %s. Metric event data with an invalid metric value cannot be indexed.
|
|
action = Ensure the input metric data is not malformed.
|
|
severity = warn
|
|
capabilities = indexes_edit
|
|
|
|
[INDEXER:METRIC_MULTIPLE_UNDERSCORE_VALUE_KEYS__S_S_S_S]
|
|
message = The metric event from source=%s, sourcetype=%s, host=%s, %s has multiple _value keys.
|
|
action = We recommend using the newer structure for metrics data point, where you have one or more keys of the form "metric_name:<metric>" (e.g..."metric_name:cpu.idle") with corresponding floating point values. Otherwise, if you want to use the legacy approach, ensure you have only one "_value" key specified.
|
|
severity = warn
|
|
capabilities = indexes_edit
|
|
|
|
|
|
[INDEXER:METRIC_IGNORING_LEGACY_FIELDS__S_S_S_S_S_S]
|
|
message = Ignoring legacy fields for metric event from source=%s, sourcetype=%s, host=%s, %s, metric_name=%s _value=%s.
|
|
action = If these keys are safe to ignore, no further action required. Otherwise, ensure metrics data is properly structured with one or more keys of "metric_name:<metric>" with corresponding floating point value.
|
|
severity = info
|
|
capabilities = indexes_edit
|
|
|
|
[INDEXER:METRIC_VALUE_NOT_NUMERIC_TYPE__S_S_S_S_S]
|
|
message = The metric value=%s provided for source=%s, sourcetype=%s, host=%s, %s is not a floating point value. Using a "numeric" type rather than a "string" type is recommended to avoid indexing inefficiencies.
|
|
action = Ensure the metric value is provided as a floating point number and not as a string. For instance, provide 123.001 rather than "123.001".
|
|
severity = warn
|
|
capabilities = indexes_edit
|
|
|
|
[INDEXER:BAD_METRIC_EVENT__S_S_S_S]
|
|
message = The metric event is not properly structured, source=%s, sourcetype=%s, host=%s, %s. Metric event data without a metric name and properly formated numerical values are invalid and cannot be indexed.
|
|
action = Ensure the input metric data is not malformed, have one or more keys of the form "metric_name:<metric>" (e.g..."metric_name:cpu.idle") with corresponding floating point values.
|
|
severity = warn
|
|
capabilities = indexes_edit
|
|
|
|
[INDEXER:THROTTLING_TSIDX__S_S]
|
|
message = The index processor has paused data flow. Too many tsidx files in idx=%s bucket="%s" , waiting for the splunk-optimize indexing helper to catch up merging them.
|
|
action = Ensure reasonable disk space is available, and that I/O write throughput is not compromised.
|
|
severity = warn
|
|
help = message.stall.tsidx.hot
|
|
capabilities = indexes_edit
|
|
|
|
[INDEXER:AMBIGUOUS_METRIC_NAMES__S_S_S_S]
|
|
message = A metric event with multiple, ambiguous "metric_name" fields has been found for source=%s, sourcetype=%s, host=%s, %s. This event is considered to be malformed. It cannot be indexed.
|
|
action = Ensure the input metric data has just one "metric_name" defined.
|
|
severity = warn
|
|
capabilities = indexes_edit
|
|
|
|
[INDEXER:EMPTY_METRIC_NAME__S_S_S_S]
|
|
message = The metric data point is not properly structured: source=%s, sourcetype=%s, host=%s, %s. Metric data points that have names that are empty or composed entirely of white spaces cannot be indexed.
|
|
action = Ensure the input metric has one or more keys with the form 'metric_name:<metric_name>' (for example: 'metric_name:cpu.idle') with corresponding floating point values.
|
|
severity = warn
|
|
capabilities = indexes_edit
|
|
|
|
|
|
[INDEX_SCOPED_SEARCH]
|
|
name = Index Scoped Search
|
|
|
|
[INDEX_SCOPED_SEARCH:SEARCH_FAILED_MORE_THAN_EVENTS_FOUND_AT_TIME__D_D]
|
|
message = The search failed. More than %d events found at time %d.
|
|
severity = error
|
|
|
|
|
|
[IDXCLUSTER]
|
|
name = Indexer Clustering
|
|
|
|
[IDXCLUSTER:ADD_PEER_FAILED__S_S]
|
|
message = Failed to add peer '%s' to the master. Error=%s.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:BUNDLE_PUSH_FAILED_ALL_PEERS_DOWN]
|
|
message = All peers went down during bundle push. The new bundle cannot be applied until the peers return to the cluster. The main cause for all peers going down during bundle push is a very large bundle. In that case, reduce the bundle size and push it again. For more information, search for "configuration bundle issues" in the documentation.
|
|
severity = warn
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:CLUSTER_ROLLING_RESTART_SHUTDOWN_REQUIRES_MANUAL_INTERVENTION__S]
|
|
message = The indexer %s is shutdown due to rolling restart and requires manual intervention for restart to proceed.
|
|
action = Restart the indexer manually.
|
|
severity = info
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:CLUSTER_TOO_MANY_STREAMING_ERRORS_TO_TARGET__S]
|
|
message = Too many bucket replication errors to target peer=%s. Will stop streaming data from hot buckets to this target while errors persist.
|
|
action = Check for network connectivity from the cluster peer reporting this issue to the replication port of target peer. If this condition persists, you can temporarily put that peer in manual detention.
|
|
severity = warn
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:CONFIG_VALIDATION_FAILURE__S_S_S]
|
|
message = Config validation failure reported in peer=%s guid=%s. %s.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:DISTSEARCH_DISABLED]
|
|
message = The searchhead is a part of a cluster but distributed search has been disabled.
|
|
severity = info
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:FAILED_TO_LOAD_CONFIG__S]
|
|
message = Failure to load cluster config (server.conf) Error = %s.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:FAILED_TO_MAKE_BUCKET_SEARCHABLE__S_U]
|
|
message = Failed to make bucket = %s searchable, retry count = %u.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:FAILED_TO_REGISTER_WITH_MASTER__S_S]
|
|
message = Failed to register with cluster master reason: %s [ event=addPeer status=retrying %s ].
|
|
severity = warn
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:INDEXER_CANNOT_BE_RESTARTED__S_S]
|
|
message = Could not restart peer=%s due to peer status=%s. Skipping this peer from rolling restart entirely.
|
|
severity = warn
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:INDEXING_NOT_READY]
|
|
message = Indexing not ready; fewer than replication_factor peers are up.
|
|
severity = warn
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:MULTISITE_MODE_MISMATCH__S_S]
|
|
message = Master has multisite %s but peer %s a site configuration.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:PARTIAL_RESULTS__S_S]
|
|
message = The search process with sid=%s on peer=%s might have returned partial results due to a reading error while waiting for the peer. This can occur if the peer unexpectedly closes or resets the connection during a planned restart.
|
|
action = Try running the search again.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
help = learnmore.idxc.searchable.upgrade
|
|
|
|
[IDXCLUSTER:PK_NOT_SENT_TO_PEER__S]
|
|
message = The public key has not been sent to the peer. Cannot add peer %s to the searchhead's peer list.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:REPLICATION_PORT_USE_ERROR__U]
|
|
message = Clustering initialization failed. Could not bind to replication port (%u). Ensure that port is not in use.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:ROLLING_UPGRADE_FINALIZE]
|
|
message = Cluster is no longer in rolling upgrade mode.
|
|
severity = info
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:ROLLING_UPGRADE_INIT]
|
|
message = Cluster is in rolling upgrade mode.
|
|
severity = info
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:SEARCHHEAD_EMPTY_PEER_LIST]
|
|
message = Waiting for requisite number of peers to join the cluster.
|
|
severity = info
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:SEARCHHEAD_INCOMPLETE_GENERATION__S]
|
|
message = One or more replicated indexes might not be fully searchable. Some search results might be incomplete or duplicated during bucket fix up.
|
|
action = For more information, check the cluster manager page on the master - splunkd URI: %s.
|
|
severity = warn
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:SEARCHHEAD_MULTISITE_ERR__S]
|
|
message = Site '%s' is not on the master's list of available sites. To fix, add it to the 'available_sites' attribute in the master's server.conf file.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:SEARCHHEAD_MULTISITE_MODE_MISMATCH__S_S]
|
|
message = Master has multisite %s but %s the 'multisite' attribute.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:SEARCHHEAD_NO_PEER_INFO__S]
|
|
message = The searchhead is unable to update the peer information. Error = %s.
|
|
severity = error
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:SEARCH_WAS_DEFERRED__S_S_S_D]
|
|
message = Search %s created by %s on the %s app was deferred to run after the searchable rolling restart or upgrade is completed. There are currently %d deferred searches in total.
|
|
severity = warn
|
|
capabilities = list_indexer_cluster
|
|
|
|
[IDXCLUSTER:SEARCH_WAS_SKIPPED__S_S_S]
|
|
message = Search %s created by %s on the %s app was skipped during the searchable rolling restart or upgrade.
|
|
severity = warn
|
|
capabilities = list_indexer_cluster
|
|
|
|
|
|
[INPUT_CSV]
|
|
name = Input CSV
|
|
|
|
[INPUT_CSV:FAILED_CREATING_DIR__LU]
|
|
message = Unable to create temporary directory after %lu retries.
|
|
severity = error
|
|
|
|
[INPUT_CSV:FILE_OPEN_FAIL__S_M]
|
|
message = Unable to open file '%s'. error='%m'.
|
|
severity = error
|
|
|
|
[INPUT_CSV:FILE_READ_FAIL__S]
|
|
message = File '%s' could not be opened for reading.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INCONSISTENT_NUMBER_OF_COLUMNS__LU]
|
|
message = Encountered %lu 'inconsistent number of column' errors while reading input.
|
|
severity = warn
|
|
|
|
[INPUT_CSV:INVALID_FILENAME__S]
|
|
message = '%s' is not a valid filename.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INVALID_FILE_READ__S]
|
|
message = You cannot read the file '%s' outside of the secure directory.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INVALID_JOB_DIRECTORY]
|
|
message = This search does not have a valid job directory.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INVALID_LOOKUP_FILE__S]
|
|
message = Unable to read lookup file '%s'.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INVALID_LOOKUP_TABLE__S]
|
|
message = The lookup table '%s' is invalid.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INVALID_LOOKUP_TABLE_TYPE__S]
|
|
message = The lookup table '%s' requires a .csv or KV store lookup definition.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INVALID_MAX__LU]
|
|
message = The 'max' option value is out of range. It must be between 1 and %lu.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INVALID_OPTION__S]
|
|
message = The option argument '%s' is invalid.
|
|
severity = error
|
|
|
|
[INPUT_CSV:INVALID_START__LU]
|
|
message = The 'start' option value is invalid. It must be < %lu.
|
|
severity = error
|
|
|
|
[INPUT_CSV:LOOKUP_FILE_DOES_NOT_EXIST]
|
|
message = The lookup file does not exist.
|
|
severity = error
|
|
|
|
[INPUT_CSV:LOOKUP_POSSIBLE_MAC_LINE_ENDINGS__S]
|
|
message = Lookup file '%s' might use mac-style line endings, which are unsupported.
|
|
severity = warn
|
|
|
|
[INPUT_CSV:LOOKUP_READ__S]
|
|
message = Successfully read lookup file '%s'.
|
|
severity = info
|
|
|
|
[INPUT_CSV:NOT_A_FILE__S]
|
|
message = '%s' is not a file.
|
|
severity = error
|
|
|
|
[INPUT_CSV:NO_FILES]
|
|
message = The input file is missing.
|
|
severity = error
|
|
|
|
[INPUT_CSV:NO_PERMISSION]
|
|
message = You have insufficient privileges to input a file from var/run/splunk/csv. You can input a temporary csv file (emitted by outputcsv within the same search) by passing 'dispatch=t' as an option.
|
|
severity = error
|
|
|
|
[INPUT_CSV:UNABLE_TO_COPY_TEMP_FILE__S_M]
|
|
message = Unable to copy temporary file '%s'. error='%m'.
|
|
severity = error
|
|
|
|
[INPUT_CSV:UNABLE_TO_CREATE_TEMP_FILE__S_M]
|
|
message = Unable to create temporary file '%s'. error='%m'.
|
|
severity = error
|
|
|
|
[INPUT_CSV:USAGE__S]
|
|
message = Usage: %s [chunk=<bool>] <filename>.
|
|
severity = error
|
|
|
|
|
|
[INSTALLED_FILES_INTEGRITY]
|
|
name = Installed Files Integrity Checker
|
|
|
|
[INSTALLED_FILES_INTEGRITY:DISABLED_BUG]
|
|
message = Integrity Checker ran and completed, but claimed it is disabled; this is a bug. As a result, no trustworthy file integrity information can be produced.
|
|
severity = error
|
|
capabilities = admin_all_objects
|
|
help = message.validate.files
|
|
|
|
[INSTALLED_FILES_INTEGRITY:FOUND_INTEGRITY_PROBLEMS__LLU_S]
|
|
message = File Integrity checks found %llu files that did not match the system-provided manifest.
|
|
action = Review the list of problems reported by the InstalledFileHashChecker in splunkd.log [[/app/search/integrity_check_of_installed_files?form.splunk_server=%s|File Integrity Check View]] ; potentially restore files from installation media, change practices to avoid changing files, or work with support to identify the problem.
|
|
severity = warn
|
|
capabilities = admin_all_objects
|
|
help = message.validate.files
|
|
|
|
[INSTALLED_FILES_INTEGRITY:INVALID_BUT_OK_BUG]
|
|
message = Integrity Checker returned state OK but was not valid(), this is a bug. As a result, file integrity information is not available.
|
|
severity = error
|
|
capabilities = admin_all_objects
|
|
help = message.validate.files
|
|
|
|
[INSTALLED_FILES_INTEGRITY:IN_PROGRESS_BUG]
|
|
message = Integrity Checker ran and completed, but claimed it is still in progress; this is a bug. As a result, no trustworthy file integrity information can be produced.
|
|
severity = error
|
|
capabilities = admin_all_objects
|
|
help = message.validate.files
|
|
|
|
[INSTALLED_FILES_INTEGRITY:MANIFEST_READ_FAILURE]
|
|
message = Unable to access or parse the contents of manifest file in SPLUNK_HOME directory. As a result, file integrity information is not available.
|
|
action = Verify manifest file in SPLUNK_HOME directory is still present, and that the splunk service user context will have read-access.
|
|
severity = warn
|
|
capabilities = admin_all_objects
|
|
help = message.validate.files
|
|
|
|
|
|
[IP_LOCATION]
|
|
name = Geo IP Processor
|
|
|
|
[IP_LOCATION:BAD_LANG]
|
|
message = Could not parse 'lang' parameter.
|
|
severity = error
|
|
|
|
|
|
[ISEARCH_RESULT_INFRASTRUCTURE]
|
|
name = ISearch Result Infrastructure
|
|
|
|
[ISEARCH_RESULT_INFRASTRUCTURE:NO_USER_CONTEXT]
|
|
message = No user context has been set. You cannot run the search.
|
|
severity = error
|
|
|
|
[ISEARCH_RESULT_INFRASTRUCTURE:RESOURCE_FAILURE]
|
|
message = Operating system thread limit reached; search could not be run.
|
|
severity = error
|
|
|
|
[ISEARCH_RESULT_INFRASTRUCTURE:RES_PROV_LOCAL_FAIL__S]
|
|
message = Failed to create result provider for local peer with the stream '%s'.
|
|
severity = error
|
|
|
|
[ISEARCH_RESULT_INFRASTRUCTURE:RES_PROV_REM_FAIL__S_S_S]
|
|
message = Failed to create result provider for remote peer '%s' at uri '%s' with the stream '%s'.
|
|
severity = error
|
|
|
|
[ISEARCH_RESULT_INFRASTRUCTURE:RP_FAIL__S]
|
|
message = Failed to start search on peer '%s'.
|
|
severity = warn
|
|
|
|
|
|
[JOINPROC]
|
|
name = Join Processor
|
|
|
|
[JOINPROC:ALIAS_FIELDS_MISMATCH__LU_LU]
|
|
message = Mismatch in join field(s), left= %lu right=%lu.
|
|
severity = error
|
|
|
|
[JOINPROC:FIELDS_NOT_IN_DATA]
|
|
message = The following join field(s) do not exist in the data.
|
|
severity = error
|
|
|
|
[JOINPROC:SORT_ERROR]
|
|
message = Encountered an internal error while sorting the search results.
|
|
severity = error
|
|
|
|
[JOINPROC:UNSPECIFIED_FIELDS__S]
|
|
message = Field(s) to selfjoin on are unspecified. %s.
|
|
severity = error
|
|
|
|
[JOINPROC:USAGE]
|
|
message = Usage: join (<join-options>)* <join-constraint> <dataset>.
|
|
severity = error
|
|
|
|
|
|
[JSONTXN]
|
|
name = JSON Transaction Processor
|
|
|
|
[JSONTXN:MISSING_ACTION]
|
|
message = Action field is required.
|
|
severity = error
|
|
|
|
[JSONTXN:MISSING_CORRELATION]
|
|
message = At least one correlation field is required.
|
|
severity = error
|
|
|
|
[JSONTXN:MISSING_MAXSPAN]
|
|
message = Missing max span.
|
|
severity = error
|
|
|
|
[JSONTXN:INVALID_MAXSPAN]
|
|
message = Invalid max span.
|
|
severity = error
|
|
|
|
|
|
[KEYPROVIDER]
|
|
name = Server-side Encryption (SSE) for Remote Storage
|
|
|
|
[KEYPROVIDER:KMS_DISABLED_EXCEPTION__S_S]
|
|
message = AWS Key Management Service reported that key_id=%s is disabled, specified for volume=%s. Splunk is unable to upload/download data to/from remote storage. This will affect searches as well as indexing.
|
|
action = Check the value for remote.s3.kms.key_id in indexes.conf and ensure this KMS key_id is enabled in AWS.
|
|
severity = error
|
|
capabilities = edit_encryption_key_provider
|
|
|
|
[KEYPROVIDER:KMS_NOT_FOUND_EXCEPTION__S_S]
|
|
message = AWS Key Management Service reported that key_id=%s could not be found, specified for volume=%s. Splunk is unable to upload/download data to/from remote storage. This will affect searches as well as indexing.
|
|
action = Check the value for remote.s3.kms.key_id in indexes.conf and ensure this KMS key_id exists in AWS.
|
|
severity = error
|
|
capabilities = edit_encryption_key_provider
|
|
|
|
[KEYPROVIDER:GCP_KMS_EXCEPTION__S_S]
|
|
message = Google Cloud Key Management Service reported issues with key=%s specified for volume=%s. The indexer is unable to upload/download data to/from remote storage. This will affect searches as well as indexing.
|
|
action = Check the values for remote.gs.gcp_kms.* in indexes.conf and ensure this KMS key exists in Google Cloud and is enabled.
|
|
severity = error
|
|
capabilities = edit_encryption_key_provider
|
|
|
|
|
|
[KV_FORM_PROCESSOR]
|
|
name = KVForm Processor
|
|
|
|
[KV_FORM_PROCESSOR:EMPTY_FORM__S]
|
|
message = The form '%s' is empty.
|
|
severity = warn
|
|
|
|
[KV_FORM_PROCESSOR:UNKNOWN_FORM__S]
|
|
message = The form '%s' is unknown.
|
|
severity = warn
|
|
|
|
|
|
[KV_STORE]
|
|
name = KV Store
|
|
|
|
[KV_STORE:CLUSTER_HAS_NOT_CONFIGURED__S]
|
|
message = Cluster has not been configured on this member. %s.
|
|
severity = error
|
|
|
|
[KV_STORE:CLUSTER_IS_INACTIVE__S]
|
|
message = Cluster is inactive. %s.
|
|
severity = error
|
|
|
|
[KV_STORE:FAILED_CLUSTER__S]
|
|
message = Failed to synchronize configuration with KVStore cluster. %s.
|
|
severity = error
|
|
|
|
[KV_STORE:FAILED_SYNC_COLLECTIONS_CONF]
|
|
message = Failed to sync collection configurations. See splunkd.log for details.
|
|
severity = error
|
|
|
|
[KV_STORE:FAILED_TO_AUTH]
|
|
message = Failed to connect to KVStore cluster. Authentication error. Check splunkd.log for details.
|
|
severity = error
|
|
|
|
[KV_STORE:FAILED_TO_COMMUNICATE__S]
|
|
message = Failed to establish communication with KVStore. See splunkd.log for details. %s.
|
|
severity = error
|
|
|
|
[KV_STORE:FAILED_TO_LOAD_LOCAL_CLUSTER_INFO__S]
|
|
message = Could not retrieve local cluster information. %s.
|
|
severity = error
|
|
|
|
[KV_STORE:FAILED_TO_START]
|
|
message = Failed to start KV Store process. See mongod.log and splunkd.log for details.
|
|
severity = error
|
|
|
|
[KV_STORE:FAILED__S]
|
|
message = KV Store changed status to failed. %s.
|
|
severity = error
|
|
|
|
[KV_STORE:FAILED_TO_MIGRATE]
|
|
message = Failed to start KV Store due to a previously failed migration.
|
|
action = Rerun the migration using the "splunk migrate migrate-kvstore" CLI command.
|
|
severity = error
|
|
capabilities = admin_all_objects
|
|
help = message.kvstore.migration
|
|
|
|
[KV_STORE:LOCAL_FAILED_CLUSTER__S]
|
|
message = Local KV Store has replication issues. See introspection data and mongod.log for details. %s.
|
|
severity = error
|
|
|
|
[KV_STORE:LOCAL_INSTANCE_STATE__S]
|
|
message = Local instance has state %s.
|
|
severity = error
|
|
|
|
[KV_STORE:NOT_AVAILABLE_IN_BUILD]
|
|
message = KVStore is not available in current build.
|
|
severity = error
|
|
|
|
[KV_STORE:TERMINATED]
|
|
message = KVStore process terminated.
|
|
severity = error
|
|
|
|
[KV_STORE:TERMINATED_ABNORMALLY__D_S]
|
|
message = KV Store process terminated abnormally (exit code %d, status %s). See mongod.log and splunkd.log for details.
|
|
severity = error
|
|
|
|
[KV_STORE:KVSTORE_MAINTENANCE]
|
|
message = You cannot use lookup commands that require KV store while it is in maintenance mode. To move KV store out of maintenance mode, contact your Splunk administrator.
|
|
severity = info
|
|
|
|
[KV_STORE:KVSTORE_BACKUP_FAILED]
|
|
message = Failed to backup KV Store.
|
|
action = Check for errors in the splunkd.log file in the $SPLUNK_HOME/var/log/splunk directory.
|
|
capabilities = edit_server,edit_kvstore
|
|
severity = error
|
|
|
|
[KV_STORE:KVSTORE_RESTORE_FAILED]
|
|
message = Failed to restore KV Store.
|
|
action = Check for errors in the splunkd.log file in the $SPLUNK_HOME/var/log/splunk directory.
|
|
capabilities = edit_server,edit_kvstore
|
|
severity = error
|
|
|
|
[KV_STORE:KVSTORE_WT_MIGRATION_FAILED]
|
|
message = Failed to migrate KV Store storage engine to WiredTiger. This prevented running the latest version of KV Store.
|
|
action = Migrate KV Store storage engine to WiredTiger and then upgrade KV Store. The Mmapv1 engine is deprecated.
|
|
capabilities = edit_server,edit_kvstore
|
|
severity = error
|
|
help = message.kvstore.troubleshootingmigrate
|
|
|
|
[KV_STORE:KVSTORE_UPGRADE_FAILED__S]
|
|
message = Failed to upgrade KV Store to the latest version.
|
|
action = KV Store is running an old version, service(%s). Resolve upgrade errors and try to upgrade KV Store to the latest version again.
|
|
capabilities = admin_all_objects
|
|
severity = error
|
|
help = message.kvstore.troubleshootingupgrade
|
|
|
|
[KV_STORE:KVSTORE_WT_MIGRATION_SHC]
|
|
message = Detected KV Store storage engine Mmapv1.
|
|
action = Migrate the KV Store storage engine to WiredTiger and then upgrade KV Store. The Mmapv1 engine is deprecated. See the Splunk platform documentation for more information.
|
|
capabilities = edit_server,edit_kvstore
|
|
severity = warn
|
|
help = message.kvstore.shctroubleshootingmigrate
|
|
|
|
[KV_STORE:KVSTORE_UPGRADE_SHC__S]
|
|
message = KV Store is running an old version, service(%s).
|
|
action = See the Troubleshooting Manual on KV Store upgrades for more information.
|
|
capabilities = admin_all_objects
|
|
severity = warn
|
|
help = message.kvstore.shctroubleshootingupgrade
|
|
|
|
|
|
[KV_TRANSFORMER]
|
|
name = KV Transformer
|
|
|
|
[KV_TRANSFORMER:EVENT_TYPE_EXTRACTIONS_NOT_SUPPORTED]
|
|
message = Extracting fields based on event type is not supported during the main search. See Splunk platform documentation documentation for more information.
|
|
severity = debug
|
|
|
|
[KV_TRANSFORMER:FAILED_TO_MATCH_GROUP__S_S]
|
|
message = Failed to find a matching value group for the capturing key_group '%s'. Expecting to find val_group '%s'.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:INDEXOUTOFBOUNDS_INVALID_FORMAT_GROUP__I_S]
|
|
message = IndexOutOfBounds invalid The FORMAT capturing group id: id=%i, transform_name='%s'.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:INVALID_KV_PARSER__S]
|
|
message = Invalid key-value parser, ignoring it, transform_name='%s'.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:INVALID_TOKENIZER__S_S]
|
|
message = Invalid TOKENIZER '%s' for field '%s': No capturing groups.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:INVALID_TOKENIZER__S_S_S]
|
|
message = Invalid TOKENIZER '%s' for field '%s': %s.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:INVALID_TRANSFORM__S]
|
|
message = The transform '%s' is invalid. Its regex has no capturing groups, but its FORMAT has capturing group references.
|
|
severity = error
|
|
|
|
[KV_TRANSFORMER:MISSING_FORMAT__S]
|
|
message = Missing FORMAT for: transform_name='%s'.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:MISSING_REGEX__S]
|
|
message = Missing REGEX for: transform_name='%s'.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:NO_VALID_KEYS_IN_FORMAT__S]
|
|
message = No valid key names found in FORMAT for transform_name='%s'.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:PARSE_INIT_FAIL__S]
|
|
message = Failed to properly initialize the key-value parser for transform_name '%s'. You must define least one delimiter.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:PARSE_KV_FAIL__S]
|
|
message = Failed to parse the key-value pair configuration for transform '%s'.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:REGEX_HAS_NO_CAPTURING_GROUPS__S_S]
|
|
message = Regex '%s' has no capturing groups, transform_name='%s'.
|
|
severity = warn
|
|
|
|
[KV_TRANSFORMER:RE_COMPILE_FAIL__S_S_S]
|
|
message = Cannot compile RE \"%s\" for transform '%s': %s.
|
|
severity = error
|
|
|
|
|
|
[LM_LICENSE]
|
|
name = License Manager
|
|
|
|
[LM_LICENSE:AWS_MARKETPLACE_METERING_FAILURE__LD__D]
|
|
message = Failed to report hourly license usage to AWS Marketplace.
|
|
action = Correct before $t%ld to avoid search restrictions by ensuring Splunk instance can reach AWS endpoints. The next metering attempt will be in %d minutes.
|
|
severity = warn
|
|
capabilities = license_edit
|
|
|
|
[LM_LICENSE:AWS_MARKETPLACE_METERING_RESTRICTED__D]
|
|
message = Search restrictions in place due to failure to reach AWS endpoints in the last %d hours.
|
|
action = Reenable search by ensuring Splunk instance can reach AWS endpoints. The next metering attempt will be in one minute.
|
|
severity = warn
|
|
capabilities = license_edit
|
|
|
|
[LM_LICENSE:EXPIRED_STATUS__LD]
|
|
message = Your license has expired as of $t%ld.
|
|
action = $CONTACT_SPLUNK_SALES_TEXT$
|
|
capabilities = license_edit
|
|
|
|
[LM_LICENSE:EXPIRING_STATUS__LD]
|
|
message = Your license will soon expire on $t%ld.
|
|
action = $CONTACT_SPLUNK_SALES_TEXT$
|
|
capabilities = license_edit
|
|
|
|
[LM_LICENSE:MANAGER_CONNECTION_ERROR__S_LD_LD]
|
|
message = Failed to contact license manager: reason='%s', first failure time=%ld ($t%ld).
|
|
severity = warn
|
|
capabilities = license_edit
|
|
|
|
[LM_LICENSE:PEER_WARNING_COUNT_SELF__S]
|
|
message = Daily indexing volume limit exceeded.%s
|
|
action = See [[/manager/search/licenseusage|License Manager]] for details.
|
|
severity = warn
|
|
capabilities = license_edit
|
|
|
|
[LM_LICENSE:PEER_WARNING_COUNT__U_S]
|
|
message = Daily indexing volume limit exceeded for %u peers.%s
|
|
action = See [[/manager/search/licenseusage|License Manager]] for details.
|
|
severity = warn
|
|
capabilities = license_edit
|
|
|
|
[LM_LICENSE:ENTERPRISE_POLICY_ENFORCED__U_U]
|
|
message = Per the Splunk Enterprise license policy in effect, search is disabled after %u warnings over a %u-day window.
|
|
|
|
[LM_LICENSE:PEERS_SUBJECT_TO_ENFORCEMENT]
|
|
message = One or more of these peers might be subject to license enforcement.
|
|
|
|
[LM_LICENSE:SELF_SUBJECT_TO_ENFORCEMENT]
|
|
message = Your Splunk deployment is subject to license enforcement.
|
|
|
|
[LM_LICENSE:PEER_WARNING__LD_S]
|
|
message = License warning issued within past 24 hours: $t%ld.
|
|
action = Refer to the License Usage Report view on license manager '%s' to find out more.
|
|
severity = warn
|
|
capabilities = license_edit
|
|
|
|
[LM_LICENSE:WARNINGS_GENERATED]
|
|
message = Licensing warnings will be generated today.
|
|
action = See [[/manager/search/licenseusage|License Manager]] for details.
|
|
severity = warn
|
|
capabilities = license_view_warnings
|
|
help = learnmore.license.features
|
|
|
|
[LM_LICENSE:EMPTY_LM_URI]
|
|
message = High Availability Redundancy mode is enabled, but this License Manager is not configured in a cluster with other License Managers.
|
|
severity = warn
|
|
help = learnmore.license.features
|
|
|
|
[LM_LICENSE:NOT_RESPONSIVE_URI__S]
|
|
message = High Availability Redundancy mode is enabled, but this License Manager cannot reach other License Managers with the URIs: %s.
|
|
severity = warn
|
|
help = learnmore.license.features
|
|
|
|
[LM_LICENSE:NOT_SAME_LICENSE_URI__S]
|
|
message = High Availability Redundancy mode is enabled, but the licenses installed on this License Manager and %s are not the same.
|
|
severity = warn
|
|
help = learnmore.license.features
|
|
|
|
|
|
[LOAD_JOB]
|
|
name = Load Job
|
|
|
|
[LOAD_JOB:CANNOT_FIND_JOB__S]
|
|
message = Cannot find job_id '%s'.
|
|
severity = error
|
|
|
|
[LOAD_JOB:INVALID_SAVEDSEARCH__S]
|
|
message = The savedsearch argument format is invalid. Expecting '%s'.
|
|
severity = error
|
|
|
|
[LOAD_JOB:JOB_HAS_NO_EVENTS__S]
|
|
message = There are no events in the artifacts of job_id '%s'.
|
|
severity = error
|
|
|
|
[LOAD_JOB:NOT_DONE_YET__S]
|
|
message = Artifacts are unavailable because the job (job_id='%s') is still running.
|
|
severity = error
|
|
|
|
[LOAD_JOB:NO_ARTIFACTS_TIMERANGE__S]
|
|
message = Cannot find artifacts within the search time range for savedsearch_ident '%s'.
|
|
severity = error
|
|
|
|
[LOAD_JOB:NO_ARTIFACTS__S]
|
|
message = Cannot find artifacts for savedsearch_ident '%s'.
|
|
severity = error
|
|
|
|
[LOAD_JOB:PERMISSION__S]
|
|
message = Permission denied. Cannot access artifacts of job_id '%s'.
|
|
severity = error
|
|
|
|
[LOAD_JOB:PROXYING_502_RESPONSE__S]
|
|
message = Error proxying the search artifact for job '%s'. Either the job does not exist, or there was an error communicating to the searchhead clustering captain.
|
|
severity = error
|
|
|
|
[LOAD_JOB:PROXYING_ADHOC_JOB__S]
|
|
message = The search artifact for job '%s' is not available because we cannot proxy an ad-hoc job in a searchhead cluster. Run the search locally.
|
|
severity = error
|
|
|
|
[LOAD_JOB:PROXYING_RUNNING_JOB__S_S]
|
|
message = The search artifact for job '%s' is not available because we cannot proxy a job that is not yet completed in searchhead clustering. Wait for the job to finish or run it locally. Job state='%s'.
|
|
severity = error
|
|
|
|
[LOAD_JOB:SID_INVALID_CHAR__S]
|
|
message = Job_id '%s' is invalid. Valid IDs are not reserved filenames, do not start with '.', and contain only letters, numbers, or the following characters: '_ .!#$%%&'()+,-;=@[]^`{}~'.
|
|
severity = error
|
|
|
|
[LOAD_JOB:TIMEOUT_WAITING_FOR__S]
|
|
message = Timed out while waiting for parent job '%s' to finish running.
|
|
severity = error
|
|
|
|
|
|
[LOOKUP]
|
|
name = Lookup Operator
|
|
|
|
[LOOKUP:ALL_FIELDS_IN_LOOKUP_TABLE_SPECIFIED_AS_LOOKUPS_LEAVING_NO_DESTINATION_FIELDS]
|
|
message = All of the fields in the lookup table are specified as lookups, leaving no destination fields.
|
|
severity = error
|
|
|
|
[LOOKUP:CANNOT_PERFORM_LOOKUP__S]
|
|
message = Lookup '%s' returned with errors. Cannot perform lookup.
|
|
severity = error
|
|
|
|
[LOOKUP:CANNOT_USE_TIMEFIELD_AS_A_LOOKUP_FIELD]
|
|
message = You cannot use timefield as a lookup field.
|
|
severity = error
|
|
|
|
[LOOKUP:MISSING_DESTINATION_FIELD_IN_LOOKUP_TABLE__S_S]
|
|
message = Cannot find the destination field '%s' in the lookup table '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:MISSING_SOURCE_FIELD_IN_LOOKUP_TABLE__S_S]
|
|
message = Cannot find the source field '%s' in the lookup table '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:COULD_NOT_FIND_REQUIRED_FOR_LOOKUP__S_S]
|
|
message = Could not find '%s'. It is required for lookup '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:COULD_NOT_GET_SIZE_OF_FILE__S]
|
|
message = Could not get the size of file '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:COULD_NOT_READ_LOOKUP_TABLE_FILE__S]
|
|
message = Could not read lookup table file '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:COULD_NOT_REOPEN_LOOKUP_TABLE_FILE__S]
|
|
message = Failed to re-open lookup file: '%s'
|
|
severity = error
|
|
|
|
[LOOKUP:DISABLED_LOOKUP__S]
|
|
message = The lookup table '%s' is disabled. Contact your system administrator.
|
|
severity = error
|
|
|
|
[LOOKUP:EMPTY_CSV_FILE__S_S]
|
|
message = The following csv lookup file for table '%s' is empty: %s
|
|
severity = error
|
|
|
|
[LOOKUP:ERROR_FOR_CONF_LOOKUP_TABLE__S_S_S]
|
|
message = Error '%s' for conf '%s' and lookup table '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:ERROR_FOR_CONF_LOOKUP__S_S_S]
|
|
message = Error '%s' for conf '%s' and lookup '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:ERROR_READING_LOOKUP_TABLE__S]
|
|
message = Error reading lookup table '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:EVENT_TIME_FIELD_SPECIFIED_FOR_NON_TEMPORAL_LOOKUP__S]
|
|
message = event_time_field option is not supported for lookup: '%s' as it is not a temporal lookup.
|
|
severity = error
|
|
|
|
[LOOKUP:EXPECTING_LOOKUP_TABLE_NAME_AS_FIRST_ARGUMENT]
|
|
message = Expecting lookup table name as the first argument.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_DISABLED__S]
|
|
message = External command based lookup '%s' is disabled because KV Store is disabled.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_FAILED__S]
|
|
message = External command based lookup '%s' is not available because KV Store initialization has failed. Contact your system administrator.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_REQUIRES_AT_LEAST_2_DISTINCT_FIELDS_IN__S_S]
|
|
message = The external-command-based lookup '%s' requires at least 2 distinct fields in '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_REQUIRES_KEY__S_S]
|
|
message = The external-command-based lookup '%s' requires the '%s' key.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_SHUTTING_DOWN__S]
|
|
message = External command based lookup '%s' is not available because KV Store is shutting down.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_STARTING__S]
|
|
message = External command based lookup '%s' is not available because KV Store initialization has not completed yet.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_STOPPING__S]
|
|
message = External command based lookup '%s' is not available because KV Store is being stopped.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_MAINTENANCE__S]
|
|
message = External command based lookup '%s' is not available because KV Store is in maintenance-mode.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_LOOKUP_UNKNOWN__S]
|
|
message = External command based lookup '%s' is not available because KV Store status is currently unknown.
|
|
severity = error
|
|
|
|
[LOOKUP:EXTERNAL_COMMAND_BASED_PYTHON_RUNTIME_UNKNOWN__S_S]
|
|
message = External command based lookup '%s' failed because the selected Python runtime was not found.
|
|
severity = error
|
|
|
|
[LOOKUP:FIELDS_EMPTY]
|
|
message = Field names cannot be empty.
|
|
severity = error
|
|
|
|
[LOOKUP:FILTER_STRING_NOT_OPTIMIZED__S]
|
|
message = The '%s' filter could not be optimized for search results.
|
|
severity = error
|
|
|
|
[LOOKUP:FILTER_STRING_NOT_VERIFIED__S]
|
|
message = The '%s' filter could not be verified. It might contain invalid operators, or could not be optimized for search results.
|
|
severity = error
|
|
|
|
[LOOKUP:GEO_HEX_LOOKUP_RES_MISSING]
|
|
message = The lookup input requires a res field.
|
|
severity = error
|
|
|
|
[LOOKUP:GEO_HEX_INVALID_PLATFORM]
|
|
message = The platform does not support H3.
|
|
severity = error
|
|
|
|
[LOOKUP:GEO_LOOKUP_LATITUDE_MISSING]
|
|
message = The lookup input requires a latitude field.
|
|
severity = error
|
|
|
|
[LOOKUP:GEO_LOOKUP_LONGITUDE_MISSING]
|
|
message= The lookup input requires a longitude field.
|
|
severity = error
|
|
|
|
[LOOKUP:IMPLICIT_TABLE__S]
|
|
message = Assuming implicit lookup table with filename '%s'.
|
|
severity = info
|
|
|
|
[LOOKUP:INITIALIZE_INDEX_ERROR__S]
|
|
message = Error initializing index: '%s'
|
|
severity = error
|
|
|
|
[LOOKUP:INPUT_FIELD_NOT_SPECIFIED]
|
|
message = Must specify one or more lookup fields.
|
|
severity = error
|
|
|
|
[LOOKUP:INVALID_CANNOT_BE_USED_FOR_LOOKUP__S_S]
|
|
message = '%s' is invalid. It cannot be used for lookup '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:INVALID_COMPARATOR_PASSED_TO_EXPAND_SEARCH__S]
|
|
message = The invalid comparator '%s' was passed to expandSearch.
|
|
severity = warn
|
|
|
|
[LOOKUP:INVALID_LOOKUP__S]
|
|
message = Could not construct lookup '%s'. See search.log for more details.
|
|
severity = error
|
|
|
|
[LOOKUP:INVALID_MAX_MATCHES__S_LU]
|
|
message = The value for '%s' is invalid. It must be > 0 and <= %lu.
|
|
severity = warn
|
|
|
|
[LOOKUP:INVALID_MIN_MATCHES__S_LU]
|
|
message = The value for '%s' is invalid. It must be >= 0 and <= %lu.
|
|
severity = warn
|
|
|
|
[LOOKUP:INVALID_PATHNAME_CONTAINS_NUL__S]
|
|
message = Invalid pathname for lookup: '%s'. Pathname contains a NUL character.
|
|
severity = error
|
|
|
|
[LOOKUP:INVALID_VALUE__S_S]
|
|
message = The value for %s is invalid. Assuming %s.
|
|
severity = warn
|
|
|
|
[LOOKUP:DEPRECATED_VALUE_CSMATCH__S]
|
|
message = The default case-sensitive behavior for reverse lookup '%s' will change in future releases to be case-insensitive.
|
|
action = Enable DEBUG logging on the LookupDataProvider component to see all reverse lookup definitions returning this error. For each lookup definition, set 'reverse_lookup_honor_case_sensitive_match=false' to use the new behavior and disable this message.
|
|
help = learnmore.troubleshooting.enabledebuglogging
|
|
severity = info
|
|
|
|
[LOOKUP:INVALID_VALUE_FOR_PYTHON_VERSION__S_S]
|
|
message = Invalid value of %s for lookup: '%s'. Using system default.
|
|
severity = warn
|
|
|
|
[LOOKUP:INVALID_VALUE_FOR_TIMEFORMAT__S]
|
|
message = The value for timeformat '%s' is invalid.
|
|
severity = error
|
|
|
|
[LOOKUP:DEFAULT_VALUE_FOR_TIMEFORMAT]
|
|
message = The value for timeformat is not provided and hence using default time format.
|
|
severity = debug
|
|
|
|
[LOOKUP:KVSTORE_DISABLED]
|
|
message = KV Store is disabled in this Splunk distribution.
|
|
severity = error
|
|
|
|
[LOOKUP:KVSTORE_RETURNED_ERROR__S_D]
|
|
message = External lookup table '%s' returned error code %d. Results might be incorrect.
|
|
severity = error
|
|
|
|
[LOOKUP:KV_STORE_DATA_FILE_DOES_NOT_EXIST__S]
|
|
message = Failed to find KV store data file: '%s'
|
|
severity = error
|
|
|
|
[LOOKUP:KV_STORE_TABLE_EMPTY_NOT_REPLICATED__S_S]
|
|
message = The '%s' KV Store lookup table is empty or has not yet been replicated to the search peer (path used is: %s).
|
|
severity = error
|
|
|
|
[LOOKUP:LOOKUP_FIELDS_NOT_INDEXED]
|
|
message = Unable to perform the lookup. One or more fields required for the lookup are not indexed. If index_fields_list is set for the lookup configuration in transforms.conf, verify that it lists all required fields for the lookup.
|
|
severity = error
|
|
|
|
[LOOKUP:MAX_MATCHES_CANNOT_BE_LESS_THAN_MIN_MATCHES__LU]
|
|
message = The max_matches value cannot be less than that of min_matches. Setting max_matches = min_matches (%lu).
|
|
severity = warn
|
|
|
|
[LOOKUP:MIN_LARGER_THAN_MAX__S_LD_S_LD]
|
|
message = The value for '%s' (%ld) must be >= '%s' (%ld). Reverting to defaults.
|
|
severity = warn
|
|
|
|
[LOOKUP:MISSING_ALIAS__S]
|
|
message = Missing alias after field '%s' in lookup command.
|
|
severity = error
|
|
|
|
[LOOKUP:MISSING_WHERE_CLAUSE__S]
|
|
message = The '%s' keyword must be followed by a search clause.
|
|
severity = error
|
|
|
|
[LOOKUP:PATH_SEPARATOR_IN_FILENAME__S_S]
|
|
message = Lookup %s cannot be used because the configured path to the lookup file contains path separators, which are unsupported. Using %s instead. Lookup files are expected in $SPLUNK_HOME/etc/system/lookups/ or $SPLUNK_HOME/etc/<app_name>/lookups/. Move the lookup file there and reconfigure the "file" setting without path separators.
|
|
severity = error
|
|
|
|
[LOOKUP:PERMISSIONS_FAILED_DISABLED__S_S_S]
|
|
message = Lookup failed for user '%s' because collection '%s' in app '%s' is disabled.
|
|
severity = error
|
|
|
|
[LOOKUP:PERMISSIONS_FAILED_NOT_FOUND__S_S_S]
|
|
message = Lookup failed because collection '%s' in app '%s' does not exist, or user '%s' does not have read access.
|
|
severity = error
|
|
|
|
[LOOKUP:PERMISSIONS_FAILED_UNKNOWN__S_S_S_S]
|
|
message = Lookup failed for collection '%s' in app '%s' for user '%s': %s.
|
|
severity = error
|
|
|
|
[LOOKUP:POSSIBLE_MAC_LINE_ENDINGS__S_S]
|
|
message = File for lookup table '%s' might be using unsupported mac-style line endings (carriage returns only): %s.
|
|
severity = warn
|
|
|
|
[LOOKUP:SCRIPT_RETURNED_NONZERO_REVERSE__S_D]
|
|
message = Script for lookup table '%s' returned non-zero (%d) for a reverse lookup; assuming reverse lookup is unknown.
|
|
severity = error
|
|
|
|
[LOOKUP:SCRIPT_RETURNED_NONZERO__S_D]
|
|
message = Script for lookup table '%s' returned error code %d. Results might be incorrect.
|
|
severity = error
|
|
|
|
[LOOKUP:TABLE_DOES_NOT_EXIST_REFERENCED_BY_CONF__S_S]
|
|
message = The lookup table '%s' does not exist. It is referenced by configuration '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:TABLE_EMPTY__S]
|
|
message = Lookup table '%s' is empty.
|
|
severity = info
|
|
|
|
[LOOKUP:TABLE_NAME_EMPTY]
|
|
message = Lookup table names cannot be empty.
|
|
severity = error
|
|
|
|
[LOOKUP:TABLE_NOT_EXIST_OR_NOT_AVAILABLE__S]
|
|
message = The lookup table '%s' does not exist or is not available.
|
|
severity = error
|
|
|
|
[LOOKUP:TABLE_NOT_REPLICATED__S_S]
|
|
message = The '%s' KV Store lookup table is empty or has not yet been replicated to the search peer (path used is: %s).
|
|
severity = error
|
|
|
|
[LOOKUP:TIME_FIELD_NOT_SPECIFIED_IN_FIELDS_LIST__S_S]
|
|
message = The "time_field" ('%s') was not specified in the "fields_list" for lookup '%s'.
|
|
action = Add the "time_field" setting to the "fields_list" for lookup '%s', and then rerun your search.
|
|
severity = error
|
|
|
|
[LOOKUP:UNABLE_TO_PARSE_EVENT_TIME_FIELD__S]
|
|
message = Unable to parse event_time_field='%s', check whether it is in epoch format.
|
|
severity = warn
|
|
|
|
[LOOKUP:UNRESOLVED_LOOKUPS_ON_LOOKUP_TABLE_DESTRUCTION]
|
|
message = There were unresolved lookups on lookup table destruction.
|
|
severity = warn
|
|
|
|
[LOOKUP:UNSUPPORTED_COMMAND_TYPE_FOR_LOOKUP__S_S]
|
|
message = Command type '%s' is unsupported for lookup '%s'.
|
|
severity = error
|
|
|
|
[LOOKUP:USAGE_LOOKUPTEST_FIELD_CMP_VALUE]
|
|
message = Usage: lookuptest <field> <cmp> <value>.
|
|
severity = error
|
|
|
|
[LOOKUP:WILDCARD_REQUIRE_INMEM__S]
|
|
message = Error using lookup table '%s': CIDR and wildcard matching is restricted to lookup files under the in-memory size limit.
|
|
severity = error
|
|
|
|
[LOOKUP:INGEST_LOOKUP_INVALID_PROVIDER]
|
|
message = A lookup() eval function is misconfigured. See search.log for more details.
|
|
severity = error
|
|
|
|
[LOOKUP:INGEST_LOOKUP_NON_JSON_ARGUMENTS_PROVIDED]
|
|
message = A lookup() eval function is misconfigured. Such functions require JSON-formatted input and output fields.
|
|
severity = error
|
|
|
|
[LOOKUP:INGEST_LOOKUP_INPUT_NON_JSONOBJECT_ARGUMENTS_PROVIDED]
|
|
message = A lookup() eval function is misconfigured. Such functions require JSON objects for their input fields.
|
|
severity = error
|
|
|
|
[LOOKUP:INGEST_LOOKUP_OUTPUT_NON_JSONARRAY_ARGUMENTS_PROVIDED]
|
|
message = A lookup() eval function is misconfigured. Such functions require JSON arrays for their output fields.
|
|
severity = error
|
|
|
|
[LOOKUP:SSC_LOOKUP_PROVIDER_NON_EXACT_MATCHES_NOT_SUPPORTED__S]
|
|
message = Lookup '%s' specified non exact matches. Non exact matches are not supported.
|
|
severity = error
|
|
|
|
[LOOKUP:LOOKUP_FIELD_LEADS_TO_CYCLE__S]
|
|
message = Cannot expand lookup field '%s' due to a reference cycle in the lookup configuration.
|
|
action = Check search.log for details and update the lookup configuration to remove the reference cycle.
|
|
severity = warn
|
|
help = lookup.reference.cycle
|
|
|
|
|
|
[FIELDALIASER]
|
|
name = Field Aliaser
|
|
|
|
[FIELDALIASER:LEGACY_FIELDALIAS_MODE__S_S_S]
|
|
message = This search relies on field aliasing behavior that is deprecated. Not removing dest='%s' despite src='%s' missing for conf='%s'.
|
|
severity = warn
|
|
help = manage.fieldalias.behavior
|
|
|
|
|
|
[MANUAL_DETENTION]
|
|
name = Manual Detention
|
|
|
|
[MANUAL_DETENTION:SET_SEARCH_HEAD_MANUAL_DETENTION]
|
|
message = This search head is in manual detention. It will not run new searches while in detention.
|
|
severity = info
|
|
capabilities = list_search_head_clustering
|
|
|
|
|
|
[TOJSON]
|
|
name = ToJson Processor
|
|
|
|
[TOJSON:INVALID_FIELD__S]
|
|
message = The field '%s' is invalid.
|
|
severity = error
|
|
|
|
[TOJSON:DUPLICATE_FIELD__S]
|
|
message = The field '%s' was already specified.
|
|
severity = error
|
|
|
|
[TOJSON:NO_ENDING_PARENTHESES__S]
|
|
message = The field '%s' has no ending parentheses for the associated cast type.
|
|
severity = error
|
|
|
|
|
|
[MAKERESULTS]
|
|
name = Makeresults Processor
|
|
|
|
[MAKERESULTS:EXCEEDING_MAX_INLINE__LLU]
|
|
message = You have reached the inline data character limit for 'makeresults'. Inlined data must be %llu characters or less.
|
|
severity = error
|
|
|
|
[MAKERESULTS:NEED_JSON_OBJECTS__S]
|
|
message = Incorrectly-formatted JSON data detected. The data entry '%s' is not permitted in the 'data' option. Only comma-separated JSON objects are permitted.
|
|
severity = error
|
|
|
|
[MAKERESULTS:JSON_ARRAY_NEEDED]
|
|
message = Incorrectly-formatted JSON data detected. Make sure your JSON-formatted data starts with '[' and ends with ']' and consists of JSON objects.
|
|
severity = error
|
|
|
|
[MAKERESULTS:FORMAT_AND_DATA_REQUIRED]
|
|
message = You must specify both 'format' and 'data' arguments for 'makeresults' to read inline data.
|
|
action = If you are providing inline data, specify both 'format' and 'data'. If you are not providing inline data, do not specify either argument.
|
|
severity = error
|
|
|
|
[MAKERESULTS:FORMAT_SPECIFIED_NO_OTHER_OPTIONS]
|
|
message = When 'makeresults' generates events from inline data, it does not allow arguments other than 'format' and 'data'.
|
|
action = If you are providing inline data for 'makeresults', specify only the 'format' and 'data' arguments.
|
|
severity = error
|
|
|
|
[MAKERESULTS:INVALID_FORMAT__S]
|
|
message = An invalid 'format' was specified: %s. Valid 'format' options are 'csv' and 'json'.
|
|
severity = error
|
|
|
|
|
|
[MAP]
|
|
name = Map Operator
|
|
|
|
[MAP:CANNOT_RUN__S]
|
|
message = Unable to run query '%s'.
|
|
severity = warn
|
|
|
|
[MAP:COUNT_TOO_LARGE__LU_LU]
|
|
message = The search result count (%lu) exceeds maximum (%lu), using max. To override it, set maxsearches appropriately.
|
|
severity = warn
|
|
|
|
[MAP:NO_SAVED_SPLUNK__S]
|
|
message = Unable to find saved search '%s'.
|
|
severity = error
|
|
|
|
[MAP:USAGE]
|
|
message = Usage: (search="subsearch" | saved_search_name).
|
|
severity = error
|
|
|
|
[MAP:VALUE_NOT_FOUND__S]
|
|
message = Did not find value for required attribute '%s'.
|
|
severity = error
|
|
|
|
|
|
[META_SEARCH]
|
|
name = Meta Search
|
|
|
|
[META_SEARCH:INVALID_NEED_RAW]
|
|
message = Invalid metasearch. Rawdata is required for this search.
|
|
severity = error
|
|
|
|
[META_SEARCH:FLEX_INDEX_SEARCH_SLOW]
|
|
message = This search is querying a flex index. Expect slower search performance.
|
|
severity = info
|
|
|
|
|
|
[METADATA]
|
|
name = Metadata Search Processor
|
|
|
|
[METADATA:COULD_NOT_RETRIEVE_MAIN_VALUE_FOR_ROW_OF_TYPE_SKIPPING__S]
|
|
message = Could not retrieve the main value for a row of type '%s'. Skipping.
|
|
severity = warn
|
|
|
|
[METADATA:COULD_NOT_RETRIEVE_TOTALCOUNT_VALUE_FOR_ROW_OF_TYPE_SKIPPING__S]
|
|
message = Could not retrieve totalCount value for a row of type '%s'. Skipping.
|
|
severity = warn
|
|
|
|
[METADATA:MUST_SPECIFY_TYPE_ARGUMENT_TO_METADATA_AS_IN_TYPEHOSTS]
|
|
message = You must specify a 'type' argument to 'metadata', as in 'type=hosts'.
|
|
severity = error
|
|
|
|
[METADATA:INVALID_TYPE_ARGUMENT_TO_METADATA]
|
|
message = The 'type' argument to 'metadata' must be one of the following: 'hosts', 'sources', or 'sourcetypes'.
|
|
severity = error
|
|
|
|
[METADATA:THIS_USER_HAS_INSUFFICIENT_PERMISSIONS_TO_GET_METADATA]
|
|
message = You have insufficient permissions to get metadata.
|
|
severity = warn
|
|
|
|
[METADATA:RTWINDOW_NOT_SUPPORTED]
|
|
message = Windowed real-time mode not supported.
|
|
severity = error
|
|
|
|
[METADATA:INVALID_DATATYPE]
|
|
message = Invalid 'datatype'. Possible 'datatype' values include 'event' and 'metric'.
|
|
severity = error
|
|
|
|
|
|
[METRICS]
|
|
name = Metrics Processor
|
|
|
|
[METRICS:APPEND_ONLY_IN_PRESTATS]
|
|
message = The append argument is only valid for prestats mode.
|
|
severity = error
|
|
|
|
[METRICS:COMPUTED_TIMESPAN__S]
|
|
message = Computed timespan = %s
|
|
severity = info
|
|
|
|
[METRICS:EVAL_PROC_ERROR]
|
|
message = Error setting up eval processor for normalized command.
|
|
severity = error
|
|
|
|
[METRICS:EVAL_PROC_INVALID__S]
|
|
message = _normEvalProc invalid in %s.
|
|
severity = error
|
|
|
|
[METRICS:EXPANDED_AND_ORIGINAL_FILTER__S_S]
|
|
message = Initial expanded filtering search: '%s', original search filter: '%s'.
|
|
severity = info
|
|
|
|
[METRICS:FAILED_TIMESPAN_COMPUTE]
|
|
message = Failed to automatically compute an appropriate timespan.
|
|
severity = error
|
|
|
|
[METRICS:MCATALOG_REALTIME_DISALLOWED]
|
|
message = The mcatalog command does not allow real-time search.
|
|
severity = error
|
|
|
|
[METRICS:WC_AGGREGATION_REALTIME_DISALLOWED]
|
|
message = The mstats command does not allow real-time search with a wildcard '*' aggregation.
|
|
severity = error
|
|
|
|
[METRICS:MCATALOG_SPAN_DISALLOWED]
|
|
message = The span argument is invalid for an mcatalog search.
|
|
severity = error
|
|
|
|
[METRICS:MINIFIED_BUCKETS_NOT_SUPPORTED]
|
|
message = Reduced buckets were found but TSIDX reduction is not applicable for metric indexes. Search results might be incomplete.
|
|
action = Consult search.log for more details regarding which buckets were reduced.
|
|
severity = warn
|
|
|
|
[METRICS:MISSING_GROUPBY_FIELDS__S]
|
|
message = Missing field list after '%s' keyword.
|
|
severity = error
|
|
|
|
[METRICS:MISSING_RENAME]
|
|
message = Missing rename after 'AS' argument.
|
|
severity = error
|
|
|
|
[METRICS:MISSING_SEARCH_AFTER_WHERE]
|
|
message = Missing search clause after 'WHERE' keyword.
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_SOURCE_INDEX_UNSPECIFIED]
|
|
message = Metrics Source Index is not specified for roll up.
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_TARGET_INDEX_UNSPECIFIED]
|
|
message = Metrics Target Index is not specified for roll up.
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_INDEX_NOT_EXIST__S]
|
|
message = Index does not exist: %s
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_INDEX_NOT_METRICS__S]
|
|
message = Index is not of type metric: %s
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_TARGET_INDEX_PERMISSION__S]
|
|
message = User does not have permission on specified target index: %s
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_USER_CANNOT_RUN_MCOLLECT]
|
|
message = Insufficient Privileges - User has no run_mcollect Capability.
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_MISSING_OR_INVALID_SPAN__S]
|
|
message = Missing or Invalid timespan: %s
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_SPAN_TOO_SMALL__S_S]
|
|
message = Timespan specified '%s' is smaller than minimum supported timespan of '%s' seconds
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_INVALID_AGGREGATE__S]
|
|
message = Invalid aggregate specified: %s
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_DIMENSION_LIST_TYPE_INVALID__S]
|
|
message = Dimension list type specified dimension-list-type='%s' is not valid
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_METRIC_LIST_TYPE_INVALID__S]
|
|
message = Metric list type specified metric-list-type='%s' is not valid
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_INVALID_OVERRIDE_AGGREGATE__S_S]
|
|
message = An invalid aggregate '%s' is specified in metric override option '%s'
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_METRIC_OVERRIDE_INVALID_FORMAT__S]
|
|
message = Metric overrides specified metric-overrides='%s' is not in valid format.
|
|
severity = error
|
|
|
|
[METRICS:MROLLUP_MCATALOG_SEARCH_FAILED__S_S]
|
|
message = Metrics Catalog Search '%s' to fetch metrics demensions failed. Exception = '%s'
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_INVALID_SPAN__S]
|
|
message = Invalid timespan: %s
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_INVALID_EVERY__S]
|
|
message = Invalid time for every: %s
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_EVERY_NOT_APPLICABLE__S]
|
|
message = every: %s is applicable only with a valid non auto timespan.
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_EVERY_IS_LESS_THAN_TIMESPAN__S_S]
|
|
message = every: %s cannot be smaller than timespan : %s
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_NORMALIZED_NO_METRIC_NAME_FILTER]
|
|
message = Cannot filter on 'metric_name' in normalized syntax.
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_NO_TIMESERIES_FILTER]
|
|
message = Cannot filter on the '_timeseries' field.
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_NO_MIXED_SYNTAX]
|
|
message = Cannot include both normalized and denormalized calculations.
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_NO_WC_RENAMES]
|
|
message = Cannot use wildcards in mstats renames.
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_REALTIME_SEARCH_ONLY__S]
|
|
message = The following argument can only be applied to real-time search: %s
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_REALTIME_WINDOWED_SEARCH_ONLY__S]
|
|
message = The following argument can only be applied to windowed real-time search: %s
|
|
severity = error
|
|
|
|
[METRICS:MSTATS_WC_PRESTATS_DISALLOWED]
|
|
message = Wildcard arguments are not supported for mstats in prestats mode.
|
|
severity = error
|
|
|
|
[METRICS:NORMALIZATION_FAILURE__S]
|
|
message = Failed to properly normalize the following argument: %s
|
|
severity = error
|
|
|
|
[METRICS:NORMALIZING_EVAL_SEARCH__S]
|
|
message = Mstats normalizing eval search arguments: %s
|
|
severity = info
|
|
|
|
[METRICS:NORMALIZING_RENAME_SEARCH__S]
|
|
message = Mstats normalizing rename search arguments: %s
|
|
severity = info
|
|
|
|
[METRICS:NO_GROUPBY__S]
|
|
message = Grouping by %s is not allowed.
|
|
severity = error
|
|
|
|
[METRICS:NO_METRICS_INDEX]
|
|
message = The specified metrics index was not found on the local host. Set a dummy stats processor to render search results properly.
|
|
severity = info
|
|
|
|
[METRICS:NO_METRICS_INDEX_ACCESS]
|
|
message = You do not have access to the specified metrics indexes on the local host.
|
|
severity = debug
|
|
|
|
[METRICS:NO_METRICS_INDEX_SPECIFIED__S]
|
|
message = No metrics index specified in %s clause, will search from default metric indexes.
|
|
severity = info
|
|
|
|
[METRICS:NO_REALTIME_TIME_BOUNDS]
|
|
message = The WHERE clause time brounds cannot be supported by a real-time search.
|
|
severity = error
|
|
|
|
[METRICS:NO_TIME_GROUPBY]
|
|
message = Specify the span argument to group events by time.
|
|
severity = error
|
|
|
|
[METRICS:PSTATS_PROC_INVALID__S]
|
|
message = _pStatsProc invalid in %s.
|
|
severity = error
|
|
|
|
[METRICS:REALTIME_FAILED_CONNECT]
|
|
message = Failed to connect for real-time.
|
|
severity = error
|
|
|
|
[METRICS:REALTIME_INITIALIZATION_FAILED]
|
|
message = Failed to initialize internal real-time window data structure.
|
|
severity = error
|
|
|
|
[METRICS:REMOTE_MSTATS_SEARCH__S]
|
|
message = Remote mstats search: %s
|
|
severity = debug
|
|
|
|
[METRICS:REPEATED_GROUPBY__S]
|
|
message = Repeated groupby field: %s
|
|
severity = error
|
|
|
|
[METRICS:RTSEARCH_READ_ERROR__S]
|
|
message = Error reading from rtsearch endpoint: error code = %s
|
|
severity = error
|
|
|
|
[METRICS:RTWINDOW_NULL]
|
|
message = _pRTWindowProc unexpectedly NULL in execute_input
|
|
severity = error
|
|
|
|
[METRICS:RT_RETRIEVAL_SUCCESS__LU]
|
|
message = Successfully retrieved %lu results from RealtimeSearchResultInfrastructure::fetchresults()
|
|
severity = debug
|
|
|
|
[METRICS:TERM_SEARCH_NOT_ALLOWED__S]
|
|
message = Metrics indexes do not support term-based search: term=%s
|
|
severity = error
|
|
|
|
[METRICS:TSTATS_ARGS__S]
|
|
message = Initialized tstats with the following arguments: %s
|
|
severity = info
|
|
|
|
[METRICS:TSTATS_PROC_INVALID]
|
|
message = Internal error, failed to construct tstats internal structure.
|
|
severity = error
|
|
|
|
[METRICS:WHERE_NO_MATCHING_EVENTS]
|
|
message = The WHERE clause does not match any events. Returning no results.
|
|
severity = warn
|
|
|
|
|
|
[SENDMODALERT]
|
|
name = Mod Alert Processor
|
|
|
|
[SENDMODALERT:ACTION_NOT_FOUND__S]
|
|
message = Alert action "%s" not found.
|
|
severity = error
|
|
|
|
[SENDMODALERT:CANNOT_ACCESS_RESULTS_FILE__S]
|
|
message = Cannot access results_file: '%s'. Permission denied.
|
|
severity = error
|
|
|
|
[SENDMODALERT:CANNOT_FIND_JOB__S]
|
|
message = Cannot find a job with the search_id '%s'.
|
|
severity = error
|
|
|
|
[SENDMODALERT:INVALID_PAYLOAD_FORMAT__S]
|
|
message = Invalid 'payload_format' specified ('%s'), expecing 'json' or 'xml'.
|
|
severity = error
|
|
|
|
[SENDMODALERT:RESULTS_FILE_CONTAINS_PATH_SEPARATOR__S]
|
|
message = Results path is invalid. Found path separator character: '%s'.
|
|
severity = error
|
|
|
|
[SENDMODALERT:RESULTS_FILE_PATH_DIR__S]
|
|
message = Results path is invalid. Path points to a directory and not a file: '%s'.
|
|
severity = error
|
|
|
|
[SENDMODALERT:RESULTS_FILE_PATH_OUTSIDE_DISPATCH_DIR__S]
|
|
message = Results path is invalid. Path points outside of dispatch directory: '%s'.
|
|
severity = error
|
|
|
|
[SENDMODALERT:SCRIPT_ERROR__S]
|
|
message = Alert script returned error code %s.
|
|
severity = error
|
|
|
|
[SENDMODALERT:SCRIPT_EXEC_FAILED]
|
|
message = Alert script execution failed.
|
|
severity = error
|
|
|
|
[SENDMODALERT:SCRIPT_NOT_FOUND__S]
|
|
message = Alert action script for action "%s" not found.
|
|
severity = error
|
|
|
|
[SENDMODALERT:INVALID_PYTHON_VERSION__S_S]
|
|
message = The Python version '%s' for modular alert '%s' in alert_actions.conf is invalid.
|
|
severity = error
|
|
|
|
[SENDMODALERT:ALLOWLIST_STATUS_NOT_DEFINED]
|
|
message = No allowlist_status setting found in alert_actions.conf. Blocking all webhook alerts by default.
|
|
severity = error
|
|
|
|
[SENDMODALERT:URL_NOT_ALLOWLISTED__S_S]
|
|
message = Search sid="%s" failed because the specified webhook URL="%s" did not match an entry in the list of allowed URLs. Please contact your administrator to add this webhook to the allowed list.
|
|
severity = error
|
|
|
|
|
|
[MODULAR_UTILITY]
|
|
name = Modular Utility
|
|
|
|
[MODULAR_UTILITY:UNABLE_TO_SET_CURRENT_WORKING_DIRECTORY]
|
|
message = Unable to find correct working directory for modular utility.
|
|
severity = error
|
|
|
|
|
|
[MOD_INPUT]
|
|
name = Modular Inputs
|
|
|
|
[MOD_INPUT:INIT_FAILURE__S_S_S]
|
|
message = Unable to initialize modular input "%s" defined in %s: %s.
|
|
severity = error
|
|
capabilities = edit_scripted
|
|
|
|
|
|
[MULTI_KV_TRANSFORMER]
|
|
name = Multi KV Transformer
|
|
|
|
[MULTI_KV_TRANSFORMER:INVALID_ARGUMENT__S]
|
|
message = The argument '%s' is invalid.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:INVALID_FORCEHEADER]
|
|
message = The forceheader value is invalid. It must be >= 1.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:INVALID_LAST_LINE]
|
|
message = The last_line value is invalid. It must be >= 1 and > start_line.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:INVALID_MAXNEWRESULTS]
|
|
message = The maxnewresults value is invalid. It must be >= 1.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:INVALID_MAX_HEADER_LHD]
|
|
message = The max_header_lookahead value is invalid. It must be >= 1.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:INVALID_MAX_HEADER_LINE]
|
|
message = The max_header_line value is invalid. It must be >= 1.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:INVALID_OPTION__S]
|
|
message = The option '%s' is invalid.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:INVALID_START_LINE]
|
|
message = The start_line value is invalid. It must be >= 1.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:KEYWORD_SPEC_FAIL__S]
|
|
message = The '%s' keyword must be specified only once.
|
|
severity = error
|
|
|
|
[MULTI_KV_TRANSFORMER:MAXNEWRESULTS_DEPRECATED]
|
|
message = The 'maxnewresults' option to multikv is deprecated. Ignoring option.
|
|
severity = info
|
|
|
|
|
|
[MULTI_REPORT]
|
|
name = Multi Report Processor
|
|
|
|
[MULTI_REPORT:TOO_FEW_PIPELINES]
|
|
message = At least two search pipelines must be specified.
|
|
severity = error
|
|
|
|
|
|
[MULTISEARCH]
|
|
name = MultiSearch Processor
|
|
|
|
[MULTISEARCH:ONLY_STREAM__D]
|
|
message = Multisearch subsearches might only contain purely streaming operations (subsearch %d contains a non-streaming command).
|
|
severity = error
|
|
|
|
[MULTISEARCH:TOO_FEW_PIPELINES]
|
|
message = At least two searches must be specified.
|
|
severity = error
|
|
|
|
|
|
[MULTI_VALUE_PROCESSOR]
|
|
name = Multi Value Processor
|
|
|
|
[MULTI_VALUE_PROCESSOR:BAD_TOKENIZER_REGEX]
|
|
message = The tokenizer regular expression is invalid.
|
|
severity = error
|
|
|
|
[MULTI_VALUE_PROCESSOR:EMPTY_DELIMITER]
|
|
message = Provide a delimiter value.
|
|
severity = error
|
|
|
|
[MULTI_VALUE_PROCESSOR:FIELD_NOT_IN_DATA__S]
|
|
message = Field '%s' does not exist in the data.
|
|
severity = warn
|
|
|
|
[MULTI_VALUE_PROCESSOR:MISSING_FIELD_NAME]
|
|
message = A field name is expected.
|
|
severity = error
|
|
|
|
[MULTI_VALUE_PROCESSOR:OPTION_CONFLICT]
|
|
message = Cannot specify both delim and tokenizer options.
|
|
severity = error
|
|
|
|
|
|
[NAMED_OBJECT]
|
|
name = Named Object
|
|
|
|
[NAMED_OBJECT:DM_MISSING_OBJECT__S]
|
|
message = Missing dataset for data model '%s'.
|
|
severity = error
|
|
|
|
[NAMED_OBJECT:INVALID__S]
|
|
message = Invalid dataset-type '%s'.
|
|
severity = error
|
|
|
|
[NAMED_OBJECT:MISSING_DATASET_TYPE]
|
|
message = Missing dataset-type specifier, expected dataset-type:dataset-name. Verify your search string.
|
|
severity = error
|
|
|
|
[NAMED_OBJECT:MISSING_DATASET_NAME]
|
|
message = Missing dataset-name specifier, expected dataset-type:dataset-name. Verify your search string.
|
|
severity = error
|
|
|
|
[NAMED_OBJECT:USAGE__S]
|
|
message = Invalid dataset specifier '%s', expected dataset-type:dataset-name. Verify your search string.
|
|
severity = error
|
|
|
|
|
|
[NEW_SERIES_FILTER_PROCESSOR]
|
|
name = New Series Filter Processor
|
|
|
|
[NEW_SERIES_FILTER_PROCESSOR:INVALID_AGGREGATOR__S]
|
|
message = The aggregator field '%s' is invalid.
|
|
severity = error
|
|
|
|
[NEW_SERIES_FILTER_PROCESSOR:INVALID_COMP__S]
|
|
message = The comparator '%s' is invalid.
|
|
severity = error
|
|
|
|
[NEW_SERIES_FILTER_PROCESSOR:INVALID_CRITERIA__S]
|
|
message = The criteria '%s' is invalid.
|
|
severity = error
|
|
|
|
[NEW_SERIES_FILTER_PROCESSOR:INVALID_THRESHOLD__S]
|
|
message = The numerical threshold '%s' is invalid.
|
|
severity = error
|
|
|
|
[NEW_SERIES_FILTER_PROCESSOR:INVALID_USAGE]
|
|
message = The usage is invalid.
|
|
severity = error
|
|
|
|
[NEW_SERIES_FILTER_PROCESSOR:INVALID_USAGE__S]
|
|
message = The usage is invalid. Expected usage: %s <xfield> <seriesfield> <aggregator> <comp> <criteria> [options].
|
|
severity = error
|
|
|
|
[NEW_SERIES_FILTER_PROCESSOR:SETMV_FAIL__S]
|
|
message = Encountered a setMultiValues() error on the '%s' field.
|
|
severity = error
|
|
|
|
|
|
[NEW_CHART_PROCESSOR]
|
|
name = New Chart Processor
|
|
|
|
[NEW_CHART_PROCESSOR:CANNOT_BE_EMPTY__S]
|
|
message = Provide a value for '%s'.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:CONTINUITY_PROCESSOR_FOUND_EVENT_WITH_MISSING_FIELD__S]
|
|
message = The ContinuityProcessor processed an event missing the '%s' field.
|
|
severity = warn
|
|
|
|
[NEW_CHART_PROCESSOR:EMPTY_SPLITBY]
|
|
message = Provide a split-by field value.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EMPTY_X_AXIS]
|
|
message = Provide an x-axis field value.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EVAL_DIRECT_FIELD]
|
|
message = Only the split-by and x-axis fields can be directly referenced in the eval expression.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EVAL_ERROR__S]
|
|
message = Encountered error '%s' while parsing the eval expression.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EVAL_HAS_NO_FIELDS__S]
|
|
message = The eval expression has no fields: '%s'.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EVAL_NOPER__S]
|
|
message = You cannot use 'per_*' aggregators in eval expression '%s'.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EVAL_REQ_RENAME__S]
|
|
message = The eval expression '%s' must be renamed.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EVAL_REQ_SPLITBY]
|
|
message = Complex eval expressions are only supported when you have specified a split-by field.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EXPECTED_SPLITBY]
|
|
message = A split-by field is expected.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EXPECTED_X_AFTER__S]
|
|
message = An x-axis field value is expected after the '%s' keyword.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:EXPECTED_X_AXIS]
|
|
message = An x-axis field value is expected.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:INVALID_AGG__S]
|
|
message = The series aggregator function '%s' is invalid.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:INVALID_ARGUMENT__S]
|
|
message = The argument '%s' is invalid.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:INVALID_FIELD_NAME__S]
|
|
message = The field name '%s' is invalid.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:INVALID_SPECIFIER__S]
|
|
message = The specifier '%s' is invalid. It must be in form <func>(<field>). For example: max(size).
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:INVALID_SUBEVAL__S]
|
|
message = The dynamically evaled field expression '%s' is invalid. 'eval(...)' must have a single expression as an argument.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:MALFORMED_FIELD__S]
|
|
message = The data field '%s' is malformed.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:MISSING_FIELDS]
|
|
message = You must specify data field(s) to chart.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:NO_EFFECT_OPTIONS_WITH_SPLITBY__S]
|
|
message = The following options were specified but have no effect when a split-by clause is not provided: %s.
|
|
severity = warn
|
|
|
|
[NEW_CHART_PROCESSOR:NO_EFFECT_OPTIONS_WITH_WHERE__S]
|
|
message = The following options were specified but have no effect when the 'where' clause is given: %s.
|
|
severity = warn
|
|
|
|
[NEW_CHART_PROCESSOR:NO_MULTI_FIELDS]
|
|
message = When you specify a split-by field, only single functions applied to a non-wildcarded data field are allowed.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:RENAME_CONFLICT__S]
|
|
message = The specifier '%s' is specified multiple times and renamed with conflicting field names.
|
|
severity = error
|
|
|
|
[NEW_CHART_PROCESSOR:SERIES_FILTER_DISABLED]
|
|
message = Series filtering is disabled if a split-by field is used in conjunction with multiple data series.
|
|
severity = warn
|
|
|
|
[NEW_CHART_PROCESSOR:SUBCOMMAND_EXCEPTION__S]
|
|
message = Caught a subcommand exception: %s.
|
|
severity = error
|
|
|
|
|
|
[ORCHESTRATOR]
|
|
name = Orchestrator
|
|
|
|
[ORCHESTRATOR:TERMINATED_ABNORMALLY__D_S]
|
|
message = Orchestrator process terminated abnormally (exit code %d, status %s). See splunkd.log for details.
|
|
severity = error
|
|
|
|
|
|
[OUTPUT_CSV]
|
|
name = Output CSV
|
|
|
|
[OUTPUT_CSV:APPEND_TO_GZ]
|
|
message = Cannot append to a gzipped file.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:CONTAINS_DIRECTORY_PATH__S]
|
|
message = '%s' must be a filename, not a path.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:CREATEDIR_FAIL__S]
|
|
message = Failed to create directory '%s'.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:CREATE_DIR_FAIL__S]
|
|
message = Could not create a directory for multi-file output '%s'.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:CREATING_EMPTY_FILE__S]
|
|
message = No results. Created empty file '%s'.
|
|
severity = warn
|
|
|
|
[OUTPUT_CSV:CREATING_EMPTY_KVSTORE__S]
|
|
message = No results. Created empty collection '%s'.
|
|
severity = info
|
|
|
|
[OUTPUT_CSV:DOTS_IN_FILENAME]
|
|
message = Filenames may not contain '..'.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:HEADER_READ_FAIL__S]
|
|
message = Error reading internal file header '%s'.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:INVALID_FILENAME_ABS__S]
|
|
message = The file name '%s' is invalid, absolute paths are not permitted.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:INVALID_FILENAME__S]
|
|
message = The file name '%s' is invalid.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:INVALID_JOBDIR]
|
|
message = This search does not have a valid job directory.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:NOPERMISSION]
|
|
message = You have insufficient privileges to output to var/run/splunk/csv. You can output a temporary csv file (that can only be used within the same search) by passing 'dispatch=t' as an option.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:NOT_REPLACE_WITH_EMPTY__S]
|
|
message = No results. Retaining existing lookup file '%s'.
|
|
severity = info
|
|
|
|
[OUTPUT_CSV:NO_LOOKUP_NAME]
|
|
message = A lookup table name or file name is required.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:NO_RESULTS_TO_WRITE_KVSTORE__S_S]
|
|
message = Found no results to %s to collection '%s'.
|
|
severity = warn
|
|
|
|
[OUTPUT_CSV:NO_RESULTS_TO_WRITE__S_S]
|
|
message = Found no results to %s to file '%s'.
|
|
severity = warn
|
|
|
|
[OUTPUT_CSV:RESULTS_WRITTEN_KVSTORE__S]
|
|
message = Results written to collection '%s'.
|
|
severity = info
|
|
|
|
[OUTPUT_CSV:RESULTS_WRITTEN__S_S]
|
|
message = Results written to file '%s' on serverName='%s'.
|
|
severity = info
|
|
|
|
[OUTPUT_CSV:TOO_MANY_RESULTS]
|
|
message = Too many results for memory. Not all results are included in this output.
|
|
severity = warn
|
|
|
|
[OUTPUT_CSV:WRITE_FILE_FAIL__S_S_S]
|
|
message = Could not %s to file '%s': %s.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:WRITE_FILE_ISSUES__S]
|
|
message = Lookup file '%s' may not have been written.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:WRITE_KVSTORE_FAIL__S_S_S]
|
|
message = Could not %s to collection '%s': %s.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:WRITE_RESULT_FAIL__S]
|
|
message = Could not write to the result file '%s'.
|
|
severity = error
|
|
|
|
[OUTPUT_CSV:UNABLE_TO_CREATE_CSVDIR]
|
|
message = Could not create csv dir.
|
|
severity = error
|
|
|
|
|
|
|
|
[OUTPUT_LOOKUP]
|
|
name = Output Lookup
|
|
|
|
[OUTPUT_LOOKUP:NOPERMISSION]
|
|
message = You have insufficient privileges to perform this operation.
|
|
severity = error
|
|
|
|
[OUTPUT_LOOKUP:INVALID_CREATE_CONTEXT__S]
|
|
message = Invalid create_context option '%s'
|
|
severity = error
|
|
|
|
[OUTPUT_LOOKUP:IGNORE_CREATE_CONTEXT]
|
|
message = createinapp option is being used. Ignoring create_context option.
|
|
severity = warn
|
|
|
|
|
|
[PIVOT]
|
|
name = Pivot Evaluator
|
|
|
|
[PIVOT:ASTERISK_IN_LABEL__S]
|
|
message = Cannot use asterisk in label '%s'.
|
|
severity = error
|
|
|
|
[PIVOT:BAD_FIELD_TYPE_FOR_SORT__S]
|
|
message = Cannot sort using field of type '%s'.
|
|
severity = error
|
|
|
|
[PIVOT:BAD_FIELD_TYPE_FOR_SPLIT__S]
|
|
message = Cannot split using field of type '%s'.
|
|
severity = error
|
|
|
|
[PIVOT:BAD_LIMIT_TYPE__S]
|
|
message = Invalid limit type '%s' for sort.
|
|
severity = error
|
|
|
|
[PIVOT:BAD_OBJECT_FOR_COUNT__S]
|
|
message = Cannot get row count for dataset '%s'.
|
|
severity = error
|
|
|
|
[PIVOT:BAD_STATSFN_FOR_FIELD_TYPE__S_S]
|
|
message = Cannot use '%s' on field type '%s'.
|
|
severity = error
|
|
|
|
[PIVOT:EMPTY_FIELD_NAME_FOR_SPLIT]
|
|
message = Must use non-empty field name for split.
|
|
severity = error
|
|
|
|
[PIVOT:EMPTY_OBJECT_NAME]
|
|
message = Pivot requires a base dataset.
|
|
severity = error
|
|
|
|
[PIVOT:EQUAL_IN_LABEL__S]
|
|
message = Cannot use equals sign in label '%s'.
|
|
severity = error
|
|
|
|
[PIVOT:EXCEEDED_MAX_ROWS]
|
|
message = Search exceeds maximum number of rows in a pivot limit filter.
|
|
severity = error
|
|
|
|
[PIVOT:INVALID_COMPARATOR_FOR_TYPE__S_S]
|
|
message = Cannot filter using '%s' on field type '%s'.
|
|
severity = error
|
|
|
|
[PIVOT:INVALID_FILTER_TYPE]
|
|
message = Pivot Evaluator received and invalid filter type.
|
|
severity = error
|
|
|
|
[PIVOT:INVALID_JSON]
|
|
message = The pivot report JSON was not valid.
|
|
severity = error
|
|
|
|
[PIVOT:INVALID_LIMIT_AMOUNT]
|
|
message = Limit amount is outside the allowed range.
|
|
severity = error
|
|
|
|
[PIVOT:INVALID_REPORT]
|
|
message = The pivot report JSON did not specify a valid report.
|
|
severity = error
|
|
|
|
[PIVOT:INVALID_SEARCH_MODE]
|
|
message = Invalid search mode for pivot search string.
|
|
severity = error
|
|
|
|
[PIVOT:MODEL_NOT_LOADED]
|
|
message = Cannot use Pivot because no data model is loaded.
|
|
severity = error
|
|
|
|
[PIVOT:MUST_SPECIFY_MODEL]
|
|
message = User must specify a data model to pivot on.
|
|
severity = error
|
|
|
|
[PIVOT:NEED_CELL_FOR_COL]
|
|
message = Must have non-empty cells for column split.
|
|
severity = error
|
|
|
|
[PIVOT:NEED_CELL_OR_ROW]
|
|
message = Must have non-empty cells or non-empty rows.
|
|
severity = error
|
|
|
|
[PIVOT:NEED_REPORT]
|
|
message = You must specify either 'pivot_json' or 'pivot_search'.
|
|
severity = error
|
|
|
|
[PIVOT:OBJECT_NOT_LOADED]
|
|
message = Cannot use Pivot because no dataset is loaded.
|
|
severity = error
|
|
|
|
[PIVOT:REPORT_NOT_LOADED]
|
|
message = Cannot use Pivot because no report is loaded.
|
|
severity = error
|
|
|
|
[PIVOT:SEARCH_MALFORMED]
|
|
message = Could not parse pivot search. Search appears to be malformed.
|
|
severity = error
|
|
|
|
[PIVOT:SORT_MISSING_ATTRIBUTE]
|
|
message = Missing field to apply stats function to in sort.
|
|
severity = error
|
|
|
|
[PIVOT:SORT_MISSING_FIELD_NAME]
|
|
message = Missing field name to sort by.
|
|
severity = error
|
|
|
|
[PIVOT:SORT_MISSING_STATSFN]
|
|
message = Missing stats function to sort by.
|
|
severity = error
|
|
|
|
[PIVOT:TOKENIZE_FAIL__S]
|
|
message = Pivot Evaluator failed to tokenize search '%s'.
|
|
severity = error
|
|
|
|
[PIVOT:TOO_MANY_MODELS__S]
|
|
message = Found multiple data models with name '%s'.
|
|
severity = warn
|
|
|
|
[PIVOT:UNKNOWN_STATSFN__S]
|
|
message = Invalid stats function '%s' in cell.
|
|
severity = error
|
|
|
|
|
|
[PREVIEW_GENERATOR]
|
|
name = Preview Generator
|
|
|
|
[PREVIEW_GENERATOR:ACCESS_DENIED]
|
|
message = You have insufficient privileges to perform this operation.
|
|
severity = error
|
|
|
|
[PREVIEW_GENERATOR:CANNOT_PARSE_ARG__S]
|
|
message = Cannot parse argument '%s'.
|
|
severity = error
|
|
|
|
[PREVIEW_GENERATOR:CANNOT_RUN_IN_SEP_PROC]
|
|
message = Cannot run this search in a separate process.
|
|
severity = error
|
|
|
|
[PREVIEW_GENERATOR:EXTRA_ARG__S]
|
|
message = A file to preview has already been specified, rejecting '%s'.
|
|
severity = error
|
|
|
|
[PREVIEW_GENERATOR:NO_PREVIEW_FILE]
|
|
message = A file to preview is required.
|
|
severity = error
|
|
|
|
|
|
[PRJOB]
|
|
name = PRJob Processor
|
|
|
|
[PRJOB:FIRST_COMMAND_EXCEPTION__S]
|
|
message = The '%s' command can be used only as the first command on a search.
|
|
severity = error
|
|
|
|
[PRJOB:PHASED_EXECUTION_DISABLED]
|
|
message = To use the 'prjob' command, phased_execution_mode must be set to 'multithreaded' or 'auto' in limits.conf. Ignoring the 'prjob' command.
|
|
severity = warn
|
|
|
|
[PRJOB:INSERT_REDISTRIBUTE_COMMAND_DISABLED]
|
|
message = Search optimization for insert_redistribute_command has been disabled in limits.conf.
|
|
action = To use the 'prjob' command, set 'enabled=true' in [search_optimization::insert_redistribute_command].
|
|
severity = error
|
|
|
|
[PRJOB:PROCESSOR_EXPECT_ARGUMENT]
|
|
message = The 'prjob' command expects subsearch as the only argument.
|
|
severity = error
|
|
|
|
[PRJOB:PROCESSOR_INVALID_ARGUMENT__S_LD]
|
|
message = Invalid option value. Expecting a positive integer for option '%s'. Instead got '%ld'.
|
|
severity = error
|
|
|
|
[PRJOB:PROCESSOR_INVALID_REDUCER_NUM__S]
|
|
message = Invalid argument value for 'prjob' command. Expected a positive integer for 'num_of_reducers' argument. Received '%s'. Search will continue with default value for 'num_of_reducers'.
|
|
severity = warn
|
|
target = auto
|
|
|
|
[PRJOB:REAL_TIME_NOT_SUPPORTED]
|
|
message = Real-time search does not support the 'redistribute' or 'prjob' commands.
|
|
action = Remove the 'redistribute' or 'prjob' command from the search, or change the Time Range Picker for the search so the search does not run in real time.
|
|
severity = error
|
|
|
|
|
|
[READ_SUMMARY]
|
|
name = Read Summary Directive
|
|
|
|
[READ_SUMMARY:DM_ERROR__S_S_S]
|
|
message = Issue occurred with data model '%s'. Issue: '%s' Reason: '%s'.
|
|
severity = warn
|
|
capabilities = search
|
|
|
|
[READ_SUMMARY:FAILED_PARSE]
|
|
message = Failed to parse options. Clearing out read-summary arguments.
|
|
severity = warn
|
|
capabilities = search
|
|
|
|
|
|
[RETURN]
|
|
name = Return Processor
|
|
|
|
[RETURN:INVALID_LIMIT]
|
|
message = The limit specified is invalid. It must be > 0.
|
|
severity = error
|
|
|
|
[RETURN:INVALID_USAGE]
|
|
message = Usage: [count] attr, $attrval, alias=attr, ...
|
|
severity = error
|
|
|
|
|
|
[RDIN]
|
|
name = Redistribute In Processor
|
|
|
|
[RDIN:ENDPOINT_INVALID_OPTION__S]
|
|
message = The redistribute-in request has an invalid option: %s.
|
|
|
|
[RDIN:STREAM_EXCEPTION__S]
|
|
message = The redistribute-in process failed because: %s.
|
|
|
|
|
|
[RDOUT]
|
|
name = Redistribute Out Processor
|
|
|
|
[RDOUT:PIPELINE_EXEC_FAIL__S]
|
|
message = The redistribute-out process has failed because: %s.
|
|
severity = error
|
|
|
|
[RDOUT:PIPELINE_INTERNAL_FAIL]
|
|
message = The redistribute-out process failed. Check search.log for details.
|
|
severity = error
|
|
|
|
[RDOUT:PIPELINE_UNKNOWN_EXCEPTION]
|
|
message = An unknown exception occurred during the redistribute-out process.
|
|
severity = error
|
|
|
|
|
|
[REDISTPROC]
|
|
name = Redistribute Processor
|
|
|
|
[REDISTPROC:CANNOT_EXECUTE_ON_SEARCH_HEAD]
|
|
message = Cannot redistribute events that have been aggregated at the search head.
|
|
severity = error
|
|
|
|
[REDISTPROC:FAILED_TO_AUTO_DETECT_KEYS]
|
|
message = Unable to autodetect redistribute fields for the remote phase of the search. Provide the fields.
|
|
severity = error
|
|
|
|
[REDISTPROC:GENERATES_EMPTY_REMOTE_PHASE]
|
|
message = Redundant redistribute detected. Remove the redundant command.
|
|
severity = error
|
|
|
|
[REDISTPROC:INVALID_ARGUMENT__S]
|
|
message = Invalid argument: '%s'
|
|
severity = error
|
|
|
|
[REDISTPROC:MISSING_FIELDS_AFTER_BY]
|
|
message = Must specify at least one field after 'by'.
|
|
severity = error
|
|
|
|
[REDISTPROC:PHASED_EXECUTION_DISABLED]
|
|
message = To use the 'redistribute' command, phased_execution_mode must be set to 'multithreaded' or 'auto' in limits.conf. Ignoring the 'redistribute' command.
|
|
severity = warn
|
|
|
|
[REDISTPROC:REAL_TIME_NOT_SUPPORTED]
|
|
message = Real-time search does not support the 'redistribute' or 'prjob' commands.
|
|
action = Remove the 'redistribute' or 'prjob' command from the search, or change the Time Range Picker for the search so the search does not run in real time.
|
|
severity = error
|
|
|
|
[REDISTPROC:PRJOB_COMMAND_DETECTED]
|
|
message = Internal optimizer error: Found the 'prjob' command where it is not required.
|
|
action = Remove the 'prjob' command from the search.
|
|
severity = error
|
|
|
|
|
|
[REGEX]
|
|
name = Regex
|
|
|
|
[REGEX:INVALID_REGEX__S_S]
|
|
message = The regex '%s' is invalid. %s.
|
|
severity = error
|
|
|
|
[REGEX:USAGE]
|
|
message = Usage: regex <field> (=|!=) <regex>.
|
|
severity = error
|
|
|
|
|
|
[REMOTE_LOGIN]
|
|
name = Remote Login
|
|
|
|
[REMOTE_LOGIN:DENIED_DISABLED__S]
|
|
message = Remote login disabled by '%s' in server.conf.
|
|
severity = error
|
|
|
|
[REMOTE_LOGIN:DENIED_FREE__S]
|
|
message = Remote login disabled because you are using a free license which does not provide authentication. To resolve either switch to the forwarder-only license or the enterprise trial license included with the product. To override this and enable unauthenticated remote management, edit the '%s' setting in your server.conf file.
|
|
severity = error
|
|
|
|
[REMOTE_LOGIN:DENIED_PASSWORD__S_S]
|
|
message = Remote login has been disabled for '%s' with the default password. Either set the password, or override by changing the '%s' setting in your server.conf file.
|
|
severity = error
|
|
|
|
|
|
[REMOTE_STORAGE]
|
|
name = Remote Storage
|
|
|
|
[REMOTE_STORAGE:CANNOT_LOCALIZE_BUCKET_GEN__S]
|
|
message = The search process with search_id="%s" may have returned partial results.
|
|
action = Try running your search again. If you see this error repeatedly, review search.log for details or contact your Splunk administrator.
|
|
severity = error
|
|
|
|
[REMOTE_STORAGE:BUCKET_INTEGRITY_CHECK_FAILURE__S_S]
|
|
message = Data integrity check failed for bucket="%s". The search process with search_id="%s" might have returned partial results.
|
|
action = Review the search.log file for details and contact your Splunk administrator to investigate further.
|
|
severity = error
|
|
|
|
|
|
[RENAME]
|
|
name = Rename Operator
|
|
|
|
[RENAME:INVALID_FIELD_NAME__S]
|
|
message = Invalid field name '%s'.
|
|
severity = error
|
|
|
|
[RENAME:WILDCARD_MISMATCH__S_S]
|
|
message = Wildcard mismatch: '%s' as '%s'.
|
|
severity = error
|
|
|
|
[RENAME:USAGE]
|
|
message = Usage: rename [old_name AS/TO/-> new_name]+.
|
|
severity = error
|
|
|
|
[RENAME:MULTIPLE_RENAMES_TO__S]
|
|
message = Multiple renames to field '%s' detected. Only the last one will appear, and previous 'from' fields will be dropped.
|
|
severity = warn
|
|
|
|
|
|
|
|
[REPLACE]
|
|
name = Replace Processor
|
|
|
|
[REPLACE:CANNOT_FILL_PATTERN__S_S_S]
|
|
message = Could not fill pattern '%s' for string '%s' using matching pattern '%s'.
|
|
severity = warn
|
|
|
|
[REPLACE:USAGE]
|
|
message = Usage: replace [orig_str WITH new_str]+ [IN field1, field2, ...].
|
|
severity = error
|
|
|
|
[REPLACE:WC_CONSEC_STARS]
|
|
message = Wildcards might not have consecutive '*' characters.
|
|
severity = error
|
|
|
|
[REPLACE:WC_UNMATCHED_STARS]
|
|
message = Wildcards and their replacements must have a matching number of '*' characters.
|
|
severity = error
|
|
|
|
[REQUIRE]
|
|
name = Require Processor
|
|
|
|
[REQUIRE:EMPTY_RESULTS]
|
|
message = The 'require' command received zero events or results; the search will be intentionally stopped.
|
|
severity = error
|
|
|
|
[REQUIRE:INVALID_USAGE_IN_REALTIME_SEARCH]
|
|
message = The 'require' command cannot be used in real-time searches.
|
|
severity = error
|
|
|
|
|
|
[RESTPROC]
|
|
name = REST Processor
|
|
|
|
[RESTPROC:REST_BAD_URI_PATH__S_S]
|
|
message = Failed to fetch REST endpoint uri=%s from server %s.
|
|
action = Check that the URI path provided exists in the REST API.
|
|
severity = error
|
|
capabilities = search
|
|
help = message.rest.reference.uris
|
|
|
|
[RESTPROC:REST_INVALID_URI]
|
|
message = Invalid REST endpoint provided.
|
|
action = Check that the URI path provided exists in the REST API.
|
|
severity = error
|
|
capabilities = search
|
|
help = message.rest.reference.uris
|
|
|
|
[RESTPROC:REST_LOCAL_ONLY]
|
|
message = Restricting results of the "rest" operator to the local instance because you do not have the "dispatch_rest_to_indexers" capability.
|
|
severity = warn
|
|
help = message.dispatch.rest.capabilities
|
|
|
|
[RESTPROC:REST_STATUS_NOT_OK__S_D_S]
|
|
message = The REST request on the endpoint URI %s returned HTTP 'status not OK': code=%d, %s.
|
|
severity = error
|
|
|
|
[RESTPROC:STRICT_UNSUPPORTED]
|
|
message = 'strict' is not supported for peers below 8.0.5. Make sure your REST search doesn't include 'strict'.
|
|
severity = error
|
|
|
|
[RESTPROC:NO_PERMISSION__S]
|
|
message = You do not have a role with the rest_access_server_endpoints capability that is required to run the 'rest' command with the endpoint=%s.
|
|
severity = error
|
|
action = Contact your Splunk administrator to request that this capability be added to your role.
|
|
|
|
[RESTPROC:MISSING_URL]
|
|
message = The required URL for the Splunk platform REST API endpoint is not specified.
|
|
action = Specify the URL for the endpoint.
|
|
severity = error
|
|
|
|
[RESTPROC:NO_RELOAD]
|
|
message = The '_reload' action can't be used with the 'rest' search command.
|
|
severity = error
|
|
|
|
[RESTPROC:RELOAD_ACTION_DEPRECATED]
|
|
message = Use of the '_reload' action with the 'rest' command is deprecated. Don't use the '_reload' action in a search with the 'rest' command.
|
|
action = Contact your Splunk administrator to set 'allow_reload=false' in limits.conf. To implement '_reload' functionality, use HTTP POST calls.
|
|
severity = warn
|
|
|
|
|
|
[REST_SEARCH]
|
|
name = Rest Search
|
|
|
|
[REST_SEARCH:BAD_PRIORITY]
|
|
message = The priority should be in the following range: [0-10].
|
|
severity = error
|
|
|
|
[REST_SEARCH:CHANGED_PRIORITY__LU]
|
|
message = The search job's priority was changed to %lu.
|
|
severity = info
|
|
|
|
[REST_SEARCH:CHANGED_TTL__LU]
|
|
message = The ttl of the search job was changed to %lu.
|
|
severity = info
|
|
|
|
[REST_SEARCH:EMPTY_SEARCH]
|
|
message = Empty search.
|
|
severity = error
|
|
|
|
[REST_SEARCH:FAILED_TO_LOCATE_JOB_STATUS__S]
|
|
message = Failed to locate job status for job=%s.
|
|
severity = error
|
|
|
|
[REST_SEARCH:FAILED_TO_RETRIEVE_JOB__S]
|
|
message = Failed to retrieve artifacts for job=%s
|
|
severity = error
|
|
|
|
[REST_SEARCH:INTERNAL_SERVER_ERROR]
|
|
message = Internal server error.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ABORT_ON]
|
|
message = Invalid abort_on.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ACTION]
|
|
message = Invalid action.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ADD_EXPORT_OFFSET]
|
|
message = Invalid add_export_offset.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ADD_EXPORT_TIMESTAMP]
|
|
message = Invalid add_export_timestamp.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ADD_SUMMARY_METADATA]
|
|
message = Invalid add_summary_to_metadata.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ALLOW_PREVIEW]
|
|
message = Invalid allow_preview.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ALLOW_QUEUE]
|
|
message = Invalid allow_queue.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ARGUMENT]
|
|
message = Invalid argument.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ATTACHMENT]
|
|
message = Invalid attachment.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_AUTO_CANCEL]
|
|
message = Invalid auto_cancel.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_AUTO_FINALIZE_EVENTS]
|
|
message = Invalid auto_finalize_ec.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_AUTO_PAUSE]
|
|
message = Invalid auto_pause.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_CONFIG]
|
|
message = Invalid config.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_COUNT]
|
|
message = Invalid count.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INTERNAL_PROVIDER_ERROR]
|
|
message = Can not find distributed search configuration to dispatch request.
|
|
action = Make sure this deployment is healthy and verify that its indexer peers are configured correctly.
|
|
severity = error
|
|
|
|
[REST_SEARCH:SERVER_LOOKUP__S]
|
|
message = Cannot find server '%s' to dispatch request.
|
|
action = Make sure this deployment is healthy.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_CREATE_PROCESS]
|
|
message = Invalid spawn_process.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_EARLIEST_TIME]
|
|
message = Invalid earliest_time.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_EMAIL_LIST]
|
|
message = Invalid email_list.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_EMAIL_RESULTS]
|
|
message = Invalid email_results.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_EMAIL_SUBJECT]
|
|
message = Invalid email_subject.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ENABLE_EVENT_STREAM]
|
|
message = Invalid enable_event_stream.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ENABLE_LOOKUPS]
|
|
message = Invalid enable_lookups.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_END_TIME]
|
|
message = Invalid end_time.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_EVENTSVIEWER]
|
|
message = Invalid Events Viewer.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_EXEC_MODE]
|
|
message = Invalid exec_mode.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_EXPORT_XML_WITH_WRAPPER]
|
|
message = Invalid export_XML_with_wrapper.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_FIELD]
|
|
message = Invalid field.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_FIELD_LIST]
|
|
message = Invalid field_list.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_FORCE_REPLICATION]
|
|
message = Invalid force_bundle_replication.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_GENERATION_ID]
|
|
message = Invalid generation_ID.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_GEO_BOUNDS_NORTH]
|
|
message = Invalid geo_bounds_north.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_GEO_BOUNDS_SOUTH]
|
|
message = Invalid geo_bounds_south.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_GEO_LAT_FIELD]
|
|
message = Invalid geo_lat_field.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_GEO_LON_FIELD]
|
|
message = Invalid geo_lon_field.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_ID]
|
|
message = Invalid id.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_INDEX_EARLIEST]
|
|
message = Invalid index_earliest.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_INDEX_LATEST]
|
|
message = Invalid index_latest.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_INDEX_RANGE]
|
|
message = index_latest must be after index_earliest.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_LATEST_TIME]
|
|
message = Invalid latest_time: latest_time must be after earliest_time.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_MAX_COUNT]
|
|
message = Invalid max_count.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_MAX_LINES]
|
|
message = Invalid max_lines.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_MAX_TIME]
|
|
message = Invalid max_time.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_MESSAGE_LEVEL]
|
|
message = Invalid message_level.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_MIN_FREQ]
|
|
message = Invalid min_freq.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_NEGATE]
|
|
message = Invalid negate.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_NOW]
|
|
message = Invalid now.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_OFFSET]
|
|
message = Invalid offset.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_OFFSET_FIELD]
|
|
message = Invalid offset_field.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_OUTPUT_MODE]
|
|
message = Invalid output_mode.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_OUTPUT_TIME_FORMAT]
|
|
message = Invalid output_time_format.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_PARSE_ONLY]
|
|
message = Invalid parse_only.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_IGNORE_PARSE_ERROR]
|
|
message = Invalid ignore_parse_error argument.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_PEER]
|
|
message = Invalid peer.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_PREVIEW]
|
|
message = Invalid preview.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_PREVIEW_FREQ]
|
|
message = Invalid preview_freq.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_PROFILE]
|
|
message = Invalid profile.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_PROVENANCE]
|
|
message = Invalid provenance.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_QUERY]
|
|
message = Invalid query.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_REDUCE_FREQ]
|
|
message = Invalid reduce_freq.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RELOAD_MACROS]
|
|
message = Invalid reload_macros.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_REPLAY_ET]
|
|
message = Invalid replay_et.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_REPLAY_LT]
|
|
message = Invalid replay_lt.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_REPLAY_SPEED]
|
|
message = Invalid replay_speed.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_REUSE_MAX_SECONDS_AGO]
|
|
message = Invalid reuse_max_seconds_ago.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_BACKFILL]
|
|
message = Invalid rt_backfill.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_BLOCKING]
|
|
message = Invalid rt_blocking.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_CONNECT_TIMEOUT]
|
|
message = Invalid rt_connect_timeout.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_EARLIEST]
|
|
message = Invalid earliest_time for a real-time search.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_INDEXED]
|
|
message = Invalid indexedRealtime.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_INDEXED_OFFSET]
|
|
message = Invalid indexedRealtimeOffset.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_INDEXFILTER]
|
|
message = Invalid rt_indexfilter.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_LATEST]
|
|
message = Invalid latest_time for a real-time search.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_MAXBLOCKSECS]
|
|
message = Invalid rt_maxblocksecs.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_MAXIMUM_SPAN]
|
|
message = Invalid rt_maximum_span.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_QUEUE_SIZE]
|
|
message = Invalid rt_queue_size.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_RECEIVE_TIMEOUT]
|
|
message = Invalid rt_receive_timeout.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_RT_SEND_TIMEOUT]
|
|
message = Invalid rt_read_timeout.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SAMPLE_RATIO]
|
|
message = Invalid sample_ratio.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SAMPLE_SEED]
|
|
message = Invalid sample_seed.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SEARCH_LEVEL]
|
|
message = Invalid adhoc_search_level.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SEARCH_LOG]
|
|
message = Invalid search log.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SEARCH_MODE]
|
|
message = Invalid search_mode.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SEARCH_STATE_MSGS]
|
|
message = Invalid search_state_msgs.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SEGMENTATION]
|
|
message = Invalid segmentation.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SERVER_NAME]
|
|
message = Invalid server_name.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SHOW_INCOMPLETE]
|
|
message = Invalid show_incomplete.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SHOW_METADATA]
|
|
message = Invalid show_metadata.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SHOW_OFFSET]
|
|
message = Invalid show_offset.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_START_TIME]
|
|
message = Invalid start_time.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_STATUS_BUCKETS]
|
|
message = Invalid status buckets.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SUMMARY_MODE]
|
|
message = Invalid summary_mode.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SURROUNDING]
|
|
message = Invalid surrounding.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SYNC_REPLICATION]
|
|
message = Invalid sync_bundle_replication.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TIME]
|
|
message = Invalid time.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TIMELINE]
|
|
message = Invalid timeline.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TIMELINE_FREQ]
|
|
message = Invalid timeline_freq.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TIMEOUT]
|
|
message = Invalid timeout.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TIMERANGE]
|
|
message = Invalid timerange.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TIMEZONE]
|
|
message = Invalid tz.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TIME_FORMAT]
|
|
message = Invalid time_format.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TOP_COUNT]
|
|
message = Invalid top_count.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TRUNCATION_MODE]
|
|
message = Invalid truncation_mode.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_PROXY_REQUEST]
|
|
message = Invalid proxy request.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SEARCH_LISTENER]
|
|
message = Invalid 'search_listener' argument.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SEARCH_LISTENER_URI]
|
|
message = Invalid URI provided in 'search_listener' argument.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_SEARCH_LISTENER_DISABLED]
|
|
message = The 'search_listener' argument is disabled. Set 'allow_search_listener_api_param = true' in the [search] stanza in the limits.conf file to enable this argument.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_TTL__S]
|
|
message = The ttl should be a positive integer: %s.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_USE_HISTOGRAM]
|
|
message = Invalid use_histogram.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_VALUE]
|
|
message = Invalid value.
|
|
severity = error
|
|
|
|
[REST_SEARCH:INVALID_WARN_UNUSED_ARGS]
|
|
message = Invalid warn_unused_arguments.
|
|
severity = error
|
|
|
|
[REST_SEARCH:JOB_CANCELLED]
|
|
message = Search job cancelled.
|
|
severity = info
|
|
|
|
[REST_SEARCH:JOB_FINALIZED]
|
|
message = Search job finalized.
|
|
severity = info
|
|
|
|
[REST_SEARCH:JOB_GEN_PREVIEW_OFF]
|
|
message = Search job results preview disabled.
|
|
severity = info
|
|
|
|
[REST_SEARCH:JOB_GEN_PREVIEW_ON]
|
|
message = Search job results preview enabled.
|
|
severity = info
|
|
|
|
[REST_SEARCH:JOB_PAUSED]
|
|
message = Search job paused.
|
|
severity = info
|
|
|
|
[REST_SEARCH:JOB_SAVED]
|
|
message = Search job saved.
|
|
severity = info
|
|
|
|
[REST_SEARCH:JOB_TOUCHED]
|
|
message = Search job touched.
|
|
severity = info
|
|
|
|
[REST_SEARCH:JOB_UNPAUSED]
|
|
message = Search job continued.
|
|
severity = info
|
|
|
|
[REST_SEARCH:JOB_UNSAVED]
|
|
message = Search job unsaved.
|
|
severity = info
|
|
|
|
[REST_SEARCH:METHOD_NOT_ALLOWED]
|
|
message = The method is not allowed.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISMATCH_SORTVEC]
|
|
message = Number of sort_key and sort_dir arguments do not match.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_ACTION]
|
|
message = Missing action.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_GEO_LAT_FIELD]
|
|
message = You are missing latitude argument for latfield.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_GEO_LON_FIELD]
|
|
message = You are missing longitude argument for longfield.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_GEO_BOUNDS_SOUTH]
|
|
message = You are missing south bound.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_GEO_BOUNDS_NORTH]
|
|
message = You are missing north bound.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_GEO_BOUNDS_EAST]
|
|
message = You are missing east bound.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_GEO_BOUNDS_WEST]
|
|
message = You are missing west bound.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_PRIORITY]
|
|
message = You are missing the priority for the set priority action.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MISSING_TTL]
|
|
message = You are missing the ttl argument for the ttl action.
|
|
severity = error
|
|
|
|
[REST_SEARCH:MUST_CREATE_PROCESS]
|
|
message = Your search must be executed in a separate process.
|
|
severity = error
|
|
|
|
[REST_SEARCH:ONLY_ONE_ACTION]
|
|
message = Only one action per call is allowed.
|
|
severity = error
|
|
|
|
[REST_SEARCH:PERMISSION_DENIED]
|
|
message = Permission denied.
|
|
severity = error
|
|
|
|
[REST_SEARCH:REAL_TIME_MUST_START_WITH_SEARCH]
|
|
message = A real-time search must start with the search command.
|
|
severity = error
|
|
|
|
[REST_SEARCH:REAL_TIME_SID_MUST_START_WITH__S_S]
|
|
message = The search id of a real-time search must start with %s, sid=%s.
|
|
severity = error
|
|
|
|
[REST_SEARCH:STATUS_READ_FAILED]
|
|
message = Unable to read the job status.
|
|
severity = error
|
|
|
|
[REST_SEARCH:UNKNOWN_ACTION]
|
|
message = Unknown action.
|
|
severity = error
|
|
|
|
[REST_SEARCH:UNKNOWN_ENDPOINT]
|
|
message = Unknown endpoint.
|
|
severity = error
|
|
|
|
[REST_SEARCH:UNKNOWN_SID]
|
|
message = Unknown sid.
|
|
severity = error
|
|
|
|
[REST_SEARCH:ZOMBIE_PROCESS]
|
|
message = The search job terminated unexpectedly.
|
|
severity = error
|
|
|
|
[REST_SEARCH:OOMKILL_PROCESS]
|
|
message = Your search has been terminated. This is most likely due to an out of memory condition.
|
|
action = Either edit your search to reduce memory requirements or contact your Splunk administrator.
|
|
severity = error
|
|
|
|
|
|
[REST_TYPEAHEAD]
|
|
name = REST Typeahead Endpoint
|
|
|
|
[REST_TYPEAHEAD:INVALID_EARLIEST_TIME]
|
|
message = Invalid earliest_time.
|
|
severity = error
|
|
|
|
[REST_TYPEAHEAD:INVALID_LATEST_TIME]
|
|
message = Invalid latest_time: latest_time must be after earliest_time.
|
|
severity = error
|
|
|
|
[REST_TYPEAHEAD:INVALID_TIME_FORMAT]
|
|
message = Invalid time format.
|
|
severity = error
|
|
|
|
|
|
[RESTART_REQUIRED]
|
|
name = Restart Required
|
|
|
|
[RESTART_REQUIRED:CONTACT_CLOUD_SUPPORT]
|
|
message = Splunk must be restarted for changes to take effect.
|
|
action = Contact Splunk Cloud Support to complete the restart.
|
|
severity = warn
|
|
capabilities = restart_reason
|
|
|
|
[RESTART_REQUIRED:CONTACT_SYSTEM_ADMIN]
|
|
message = Splunk must be restarted for changes to take effect.
|
|
action = Contact your system administrator to complete the restart.
|
|
severity = warn
|
|
capabilities = restart_reason
|
|
|
|
[RESTART_REQUIRED:INITIATE_RESTART]
|
|
message = Splunk must be restarted for changes to take effect.
|
|
action = [[/manager/search/control|Click here to restart from the Manager]].
|
|
severity = warn
|
|
capabilities = restart_splunkd
|
|
|
|
|
|
[REQD_EVENTTYPES_DIR]
|
|
name = Required Event Types Directive
|
|
|
|
[REQD_EVENTTYPES_DIR:BAD_ARGS__S]
|
|
message = Failed to retrieve arg '%s'.
|
|
severity = warn
|
|
capabilities = search
|
|
|
|
[REQD_EVENTTYPES_DIR:NO_REQUIRED_FOUND]
|
|
message = Found no existing required event types. Falling back to all event types unless another directive provides valid required eventtypes.
|
|
severity = warn
|
|
capabilities = search
|
|
|
|
|
|
[REQD_TAGS_DIR]
|
|
name = Required Tags Directive
|
|
|
|
[REQD_TAGS_DIR:BAD_ARGS__S]
|
|
message = Failed to retrieve arg '%s'.
|
|
severity = warn
|
|
capabilities = search
|
|
|
|
[REQD_TAGS_DIR:NO_REQUIRED_FOUND]
|
|
message = Found no existing required tags. Falling back to all tags unless another directive provides valid required tags.
|
|
severity = warn
|
|
capabilities = search
|
|
|
|
|
|
[REX]
|
|
name = Rex Command
|
|
|
|
[REX:BAD_REGEX__S]
|
|
message = The regex '%s' does not extract anything. It should specify at least one named group. Format: (?<name>...).
|
|
severity = error
|
|
|
|
[REX:BAR_REGEX__S_S]
|
|
message = Encountered the following error while compiling the regex '%s': %s.
|
|
severity = error
|
|
|
|
[REX:USAGE]
|
|
message = Usage: regex [field=<field>] <regex>.
|
|
severity = error
|
|
|
|
|
|
[ROLLUP]
|
|
name = Rollup Processor
|
|
|
|
[ROLLUP:ARG_INVALID__S]
|
|
message = '%s' is invalid.
|
|
severity = error
|
|
|
|
[ROLLUP:ARG_FORMAT_INCORRECT__S]
|
|
message = Incorrect format for '%s'.
|
|
severity = error
|
|
|
|
[ROLLUP:INDEX_NOT_EXIST__S_S]
|
|
message = Cannot %s policy for index='%s'. The index does not exist.
|
|
severity = error
|
|
|
|
[ROLLUP:INDEX_NOT_METRIC_DATATYPE__S_S]
|
|
message = Cannot %s policy for index='%s'. This is not a metric index. Rollup policies can be applied only to metric indexes.
|
|
severity = error
|
|
|
|
[ROLLUP:INDEX_IS_DISABLED__S_S]
|
|
message = Cannot %s policy for index='%s'. The index is disabled.
|
|
severity = error
|
|
|
|
[ROLLUP:SAVED_SEARCHES_NOT_CREATE__S]
|
|
message = Will not create the automated scheduled search for index='%s'. Index or policy is disabled.
|
|
severity = error
|
|
|
|
[ROLLUP:INVALID_STANZA_NAME__S]
|
|
message = Stanza name='%s' is invalid.
|
|
severity = error
|
|
|
|
[ROLLUP:RAW_INDEX_NAME_CANNOT_BE_EMPTY]
|
|
message = The name of the source index for a rollup policy cannot be an empty string.
|
|
severity = error
|
|
|
|
[ROLLUP:RAW_INDEX_NAME_CANNOT_HAVE_SPACES]
|
|
message = The name of the source index for a rollup policy cannot have leading or trailing spaces.
|
|
severity = error
|
|
|
|
[ROLLUP:POLICY_EXIST__S_S]
|
|
message = Cannot %s policy for index='%s'. This rollup policy already exists.
|
|
severity = error
|
|
|
|
[ROLLUP:SAVED_SEARCHES_ACTION_FAILED__S_S_S]
|
|
message = Cannot %s policy for index='%s'. It failed to %s the automated scheduled search that creates the rollup summary
|
|
severity = error
|
|
|
|
[ROLLUP:POLICY_NOT_FOUND__S_S]
|
|
message = Cannot %s policy for index='%s'. No policy found for this index.
|
|
severity = error
|
|
|
|
[ROLLUP:PROXY_FAILED__S]
|
|
message = Proxy failed. %s
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_INDEX_NOT_SPECIFIED]
|
|
message = Failed to apply rollup policy. The source index is empty or not specified.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_INDEX_NOT_EXIST__S]
|
|
message = Failed to apply the rollup policy to index='%s'. That index does not exist.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_INDEX_NOT_METRIC_DATATYPE__S]
|
|
message = Failed to apply the the rollup policy to index='%s'. This is not a metric index. Rollup policies can be applied only to metric indexes.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_INDEX_IS_DISABLED__S]
|
|
message = Failed to apply the rollup policy to index ='%s'. The index is disabled.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_ARG_NOT_VALID__S_S_S]
|
|
message = Failed to apply rollup policy to index='%s'. %s='%s' is not valid.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_SUMMARY_NOT_SPECIFIED__S]
|
|
message = Failed to apply rollup policy to index='%s'. One or more rollup summaries must be specified for the rollup policy.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_SUMMARIES_WITH_SAME_SPAN_AND_TARGET_INDEX__S_S]
|
|
message = Failed to apply rollup policy to index='%s'. Multiple rollup summaries with equivalent spans have been specified for target index='%s'.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_DEFAULT_AGG_NOT_VALID__S_S_S_S_S]
|
|
message = Failed to apply rollup policy to index='%s'. %s='%s' is not valid. Modify this setting with one of the following methods: 1. Make a POST operation to the catalog/metricstore/rollup/{index} endpoint to update the '%s' setting. 2. Edit the 'defaultAggregation' setting in %s.conf and restart the system to apply your change.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_METRIC_OVERRIDES_AGGREGATION_NOT_VALID__S_S_S]
|
|
message = Failed to apply rollup policy to index='%s'. The metrics override aggregation function='%s' is not valid for the metric: %s.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_SUMMARY_SPAN_BELOW_MIN__S_S_S_S]
|
|
message = Failed to apply rollup policy to index='%s'. %s='%s' is below the minimum timespan for the searches that build the rollup summaries=%s seconds.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_SUMMARY_SPAN_CANNOT_BE_CRON_SCHEDULED__S_S_S]
|
|
message = Failed to apply rollup policy to index='%s'. %s='%s' cannot be cron scheduled.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_SUMMARY_INDEX_NOT_EXIST__S_S_S]
|
|
message = Failed to apply rollup policy to index='%s'. %s='%s' does not exist.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_SUMMARY_INDEX_IS_DISABLED__S_S_S]
|
|
message = Failed to apply rollup policy to index='%s'. %s='%s' is disabled.
|
|
severity = error
|
|
|
|
[ROLLUP:VALIDATION_SUMMARY_INDEX_NOT_METRIC_DATATYPE__S_S_S]
|
|
message = Failed to apply rollup policy to index='%s'. %s='%s' is not a metric datatype.
|
|
severity = error
|
|
|
|
[ROLLUP:MULTIPLE_VALUES_FOR_SINGLE_ARGUMENT__S]
|
|
message = Cannot have multiple values for argument '%s'.
|
|
severity = error
|
|
|
|
|
|
[RT_ORDER]
|
|
name = RTOrder Processor
|
|
|
|
[RT_ORDER:INVALID_MAX_BUFFER__LU]
|
|
message = max_buffer_size must be > 0 and <= %lu.
|
|
severity = error
|
|
|
|
[RT_ORDER:INVALID_SPAN__S]
|
|
message = Invalid buffer_span '%s'.
|
|
severity = error
|
|
|
|
|
|
[RUNAS]
|
|
name = Runas Command Processor
|
|
|
|
[RUNAS:DISABLED]
|
|
message = The ability to run a search as the owner of the search is disabled.
|
|
action = Ask your Splunk administrator if you expect to be able to dispatch run-as-owner modules.
|
|
severity = error
|
|
|
|
[RUNAS:INPUT_RESULTS_IGNORED]
|
|
message = Input results are being ignored on searches designated to run as the owner of the search.
|
|
severity = warn
|
|
|
|
[RUNAS:USAGE]
|
|
message = Usage: runas [options] '['<subpipeline>']'.
|
|
severity = error
|
|
|
|
[RUNAS:START_TIME_AFTER_END_TIME__LU_LU]
|
|
message = The 'start_time' value %lu should be earlier than the 'end_time' value %lu.
|
|
severity = error
|
|
|
|
|
|
[SAML]
|
|
name = Saml
|
|
|
|
[SAML:AUTHENTICATING]
|
|
message = Authenticating...
|
|
severity = info
|
|
|
|
[SAML:ERROR_GROUPS_MISSING]
|
|
message = Saml response does not contain group information.
|
|
severity = error
|
|
|
|
[SAML:ERROR_PARSE_TIME_TIPS_TO_FIX]
|
|
message = Verify the time in the saml response from IDP is in UTC time format.
|
|
severity = error
|
|
|
|
[SAML:ERROR_PARSE_TIME__S]
|
|
message = Could not parse '%s' time.
|
|
severity = error
|
|
|
|
[SAML:ERROR_ROLE_MAPPING_UNKNOWN]
|
|
message = No valid Splunk role found in local mapping.
|
|
severity = error
|
|
|
|
[SAML:LOGOUT]
|
|
message = Logging out...
|
|
severity = info
|
|
|
|
[SAML:MISMATCH_FIELD_TIPS_TO_FIX]
|
|
message = Ensure the configuration in Splunk matches the configuration in the IdP.
|
|
severity = error
|
|
|
|
[SAML:MISMATCH_FIELD__S]
|
|
message = The '%s' field in the saml response from the IdP does not match the configuration.
|
|
severity = error
|
|
|
|
[SAML:MISSING_FIELD_TIPS_TO_FIX__S]
|
|
message = Fix the configuration in the IdP to include '%s' in the saml response to complete login successfully.
|
|
severity = error
|
|
|
|
[SAML:MISSING_FIELD__S]
|
|
message = The '%s' field is missing in the saml response from the IdP.
|
|
severity = error
|
|
|
|
[SAML:REDIRECT_AFTER_LOGOUT]
|
|
message = Redirecting after logout...
|
|
severity = info
|
|
|
|
[SAML:TIME_VERIFICATION_FAILED__S]
|
|
message = The '%s' condition could not be verified successfully. The saml response is not valid.
|
|
severity = error
|
|
|
|
[SAML:UNSUPPORTED_SIGNATURE_ALGORITHM]
|
|
message = Unsupported signature algorithm.
|
|
severity = warn
|
|
|
|
[SAML:ROLE_DN_LOCKED_FOR_CN__S_S_S]
|
|
message = The IdP returned role Distinguished Name (DN) '%s', which matches configured role map Common Name (CN) '%s'. This role DN has now been locked to the role map CN, using the '%s' role. Future assertions that contain this CN but do not match the locked DN will be rejected.
|
|
severity = info
|
|
|
|
[SAML:ROLE_DN_ALREADY_LOCKED_FOR_CN__S_S]
|
|
message = A locked role Distinguished Name (DN) '%s', already exists for the configured role Common Name (CN) '%s'.
|
|
severity = warn
|
|
|
|
[SAML:IDP_CERTIFICATE_EXPIRING_SOON__S__D]
|
|
message = The identity provider certificate or certificate chain %s will expire in %d day(s). Update the certificate(s) before they expire to avoid losing access to this instance.
|
|
severity = warn
|
|
|
|
|
|
[SAVED_SEARCH_AUDITOR]
|
|
name = Saved Search Auditor
|
|
|
|
[SAVED_SEARCH_AUDITOR:INSUF_CAPABILITIES__S]
|
|
message = The user is missing the following capabilities and therefore cannot run the search as configured: %s. The search will run with the default values for the missing capabilities.
|
|
severity = error
|
|
|
|
|
|
[SAVED_SEARCH-EAI]
|
|
name = Saved Search Admin Handler
|
|
|
|
[SAVED_SEARCH-EAI:BOTH_TIMES_SHOULD_START_RT]
|
|
message = Earliest and latest times should either both start with "rt" or none can.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:CANNOT_CREATE__S]
|
|
message = Unable to create saved search with name '%s'. A saved search with that name already exists.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:CANNOT_DISPATCH_DISABLED__S]
|
|
message = Saved search "%s" cannot be executed because it is disabled.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:CANNOT_EDIT__S]
|
|
message = Unable to save changes to saved search with name '%s'.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:CANNOT_PARSE_ALERT_CONDITION__S]
|
|
message = Cannot parse alert condition. %s.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:CANNOT_PARSE_TIME__S_S]
|
|
message = Cannot parse time argument '%s': '%s'.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:CANNOT_SCHEDULE_RTSEARCH]
|
|
message = Real time searches cannot be executed by the scheduler
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:INVALID_ALERT_ARGS__S_S]
|
|
message = Invalid %s="%s".
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:INVALID_ALLOW_SKEW__S_S]
|
|
message = Invalid value "%s" for "%s": must be either a duration or a percentage.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:INVALID_CRON__S]
|
|
message = Invalid cron_schedule="%s".
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:INVALID_SCHED_PRIORITY__S_S]
|
|
message = Invalid value "%s" for "%s": must be one of "default", "higher", or "highest".
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:INVALID_SCHED_WINDOW__S_S]
|
|
message = Invalid value "%s" for "%s": must be either "auto" or in the range 0-44640.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:INVALID_SUMMARY_INDEX__S]
|
|
message = Index name=%s does not exist. The summary index must exist in order for a scheduled search to populate it.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:LATEST_AFTER_EARLIEST]
|
|
message = Latest time must be after earliest time.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:MISSING_REQUIRED_ARG__S]
|
|
message = Missing required argument: %s.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:NOT_FOUND__S]
|
|
message = Cannot find saved search with name '%s'.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:NO_CRON_SCHEDULE]
|
|
message = No cron schedule specified.
|
|
severity = error
|
|
|
|
[SAVED_SEARCH-EAI:WILL_NOT_RUN_AFTER_TRIAL__S]
|
|
message = This scheduled search will not run after the Splunk %s Trial License expires.
|
|
severity = warn
|
|
|
|
|
|
[METRIC_ALERT]
|
|
name = Metric Alert Messages
|
|
|
|
[METRIC_ALERT:METRIC_ALERT_ALREADY_EXISTS__S]
|
|
message = A metric alert '%s' already exists.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:SAVEDSEARCH_ALREADY_EXISTS__S]
|
|
message = A metric alert cannot have the same name ('%s') as an existing savedsearch
|
|
severity = error
|
|
|
|
[METRIC_ALERT:METRIC_ALERT_NOT_EXISTING__S]
|
|
message = A metric alert with name '%s' does not exist.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:ALERT_NAME_CANNOT_BE_EMPTY__S]
|
|
message = Metric alert name cannot be empty for '%s'.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:ALERT_NAME_LONGER_THAN_100__S]
|
|
message = Metric alert name cannot be longer than 100 chars for '%s'.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:METRIC_INDEX_CANNOT_BE_EMPTY__S]
|
|
message = metric_indexes cannot be empty for '%s'.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:EMPTY_INDEX__S]
|
|
message = metric_indexes cannot have empty index for '%s'.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:EMPTY_GROUPBY__S]
|
|
message = The groupby setting cannot have an empty dimension for '%s'.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:INDEX_NOT_ACCESSIBLE__S_S_S_S_S]
|
|
message = You (user='%s') do not have access to metric index '%s' for '%s'. If the current environment is a distributed environment, please make sure that the roles user '%s' has on Search Head add the index '%s' to srchIndexesAllowed.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:POST_WITH_WILDCARDED_USER_OR_APP]
|
|
message = Cannot edit/create a metric alert for wildcarded users or applications.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:MANAGER_FAILS_TO__S_S]
|
|
message = Metric alert server fails to %s metric alert '%s'.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:ALERT_CONDITION_CANNOT_BE_EMPTY__S]
|
|
message = Metric alert condition cannot be empty for '%s'.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:INVALID_CONDITION_EVALUATOR]
|
|
message = Invalid metric alert condition evaluator: must evaluate a condition expression first.
|
|
severity = error
|
|
|
|
[METRIC_ALERT:CONDITION_PARSE_FAILED__S_S]
|
|
message = Failed to parse metric alert condition. condition="%s". error="%s".
|
|
severity = error
|
|
|
|
[METRIC_ALERT:CONDITION_PARSE_FAILED_ON_AGGREGATE__S_S_S]
|
|
message = Failed to parse aggregate of metric alert condition. condition="%s". aggregate="%s". error="%s".
|
|
severity = error
|
|
|
|
[METRIC_ALERT:CONDITION_EVALUATE_FAILED__S_S]
|
|
message = Failed to evaluate metric alert condition. condition="%s". error="%s".
|
|
severity = error
|
|
|
|
[METRIC_ALERT:FIELD_PARSE_FAILED__S_S_S_S_S]
|
|
message = Failed to parse field in %s. field="%s". %s="%s". error="%s".
|
|
severity = error
|
|
|
|
|
|
[SAVED_SPLUNK]
|
|
name = Saved Splunk Processor
|
|
|
|
[SAVED_SPLUNK:CANNOT_FIND__S]
|
|
message = Unable to find saved search named '%s'.
|
|
severity = error
|
|
|
|
[SAVED_SPLUNK:ERROR_BUILDING__S_S]
|
|
message = Encountered the following error while building a search for saved search '%s': %s
|
|
severity = error
|
|
|
|
[SAVED_SPLUNK:USAGE__S]
|
|
message = Usage: %s <name> [options].
|
|
severity = error
|
|
|
|
|
|
[SAVED_SPLUNKER]
|
|
name = Saved Splunker
|
|
|
|
[SAVED_SPLUNKER:CANNOT_ASSUME_SYSTEM_CONTEXT]
|
|
message = Could not assume system context. Search scheduler will not run. Report this to Splunk support.
|
|
severity = error
|
|
|
|
[SAVED_SPLUNKER:CANNOT_EXECUTE_RTSEARCH__S]
|
|
message = Real time searches cannot be executed by the scheduler. Disabling schedule for savedsearch_id="%s".
|
|
severity = error
|
|
|
|
[SAVED_SPLUNKER:CANNOT_GET_HOSTNAME]
|
|
message = Cannot get hostname. Defaulting to localhost.
|
|
severity = warn
|
|
|
|
[SAVED_SPLUNKER:FAILED_USER_CONTEXT__S]
|
|
message = Failed to set user context for saved search with id '%s'. Disabling its schedule.
|
|
severity = warn
|
|
|
|
[SAVED_SPLUNKER:MAX_CONCURRENT__LU_LU]
|
|
message = The maximum number of concurrent scheduled searches has been reached (%lu). %lu ready-to-run scheduled searches are pending.
|
|
severity = warn
|
|
|
|
[SAVED_SPLUNKER:NO_SUMMARY_INDEX]
|
|
message = The search scheduler is disabled by the license Splunk is using. Scheduled searches that populate a summary index were found, but they will not be executed. This might affect dashboard panels that depend on the summary index. [!/help?location=learnmore.license.features Learn more].
|
|
severity = warn
|
|
|
|
[SAVED_SPLUNKER:ORPHANED_SEARCHES__LLU_LLU]
|
|
message = Splunk has found %llu orphaned searches owned by %llu unique disabled users.
|
|
severity = info
|
|
|
|
[SAVED_SPLUNKER:SCHEDULER_THREAD_PREVIOUSLY_STARTED__S]
|
|
message = %s: Scheduler thread previously started.
|
|
severity = error
|
|
|
|
[SAVED_SPLUNKER:UNKNOWN_ERROR_IN__S]
|
|
message = Unknown error in %s.
|
|
severity = error
|
|
|
|
[SAVED_SPLUNKER:UNKNOWN_ERROR_WHILE_PROCESSING_SAVED_SEARCH__S]
|
|
message = Unknown error while processing actions for saved search '%s'.
|
|
severity = error
|
|
|
|
[SAVED_SPLUNKER:UNKNOWN_RELATION__S]
|
|
message = Relation '%s' is unknown.
|
|
severity = error
|
|
|
|
[SAVED_SPLUNKER:DURABLE_CURSOR_FAILURE__S]
|
|
message = Unable to schedule the durable search: '%s'.
|
|
severity = error
|
|
|
|
|
|
[SCRIPT]
|
|
name = Script
|
|
|
|
[SCRIPT:CANNOT_FIND__S_S]
|
|
message = Cannot find program '%s' or script '%s'.
|
|
severity = error
|
|
|
|
[SCRIPT:CHILD_ERROR]
|
|
message = Could not start child process.
|
|
severity = error
|
|
|
|
[SCRIPT:DEPRECATED_SCRIPT_TYPE__S_S]
|
|
message = Deprecated use of '%s'. The script type argument '%s' will be ignored.
|
|
severity = warn
|
|
|
|
[SCRIPT:INPUT_PIPE_ERROR]
|
|
message = Could not create the input pipe.
|
|
severity = error
|
|
|
|
[SCRIPT:MAXINPUTS_TOO_SMALL__LD_S]
|
|
message = Maxinputs must be at least %ld, command name="%s".
|
|
severity = warn
|
|
|
|
[SCRIPT:NO_NEEDED_AUTH_STRING__S]
|
|
message = Unable to get an authentication string for the external search command name="%s".
|
|
severity = warn
|
|
|
|
[SCRIPT:NO_SCRIPT_PERMS__S_S]
|
|
message = You do not have permission to run the '%s' script %s.
|
|
severity = error
|
|
|
|
[SCRIPT:OUTPUT_PIPE_ERROR]
|
|
message = Encountered error while setting up the output pipe.
|
|
severity = error
|
|
|
|
[SCRIPT:PIPE_ERROR__D]
|
|
message = Received the following pipe error: %d.
|
|
severity = error
|
|
|
|
[SCRIPT:SCRIPT_ERROR__S_S]
|
|
message = Encountered the following error while running the '%s' script: %s.
|
|
severity = error
|
|
|
|
[SCRIPT:SECURITY_ERROR__S]
|
|
message = You cannot run a script (%s) outside of the secure directory.
|
|
severity = error
|
|
|
|
[SCRIPT:USAGE_ARG_COUNT]
|
|
message = Requires at least one argument: <script-name> [<arguments>].
|
|
severity = error
|
|
|
|
[SCRIPT:USAGE_LANG]
|
|
message = Only Python and Perl scripts are supported.
|
|
severity = error
|
|
|
|
|
|
[SEARCHFACTORY]
|
|
name = Search Factory
|
|
|
|
[SEARCHFACTORY:UNKNOWN_OP__S]
|
|
message = Unknown search command '%s'.
|
|
severity = error
|
|
capabilities = search
|
|
|
|
|
|
[SHCLUSTER]
|
|
name = Search Head Clustering
|
|
|
|
[SHCLUSTER:CONF_REPLICATION_CAPTAIN_DISCONNECTED__S]
|
|
message = The search head cluster captain (%s) is disconnected; skipping configuration replication.
|
|
severity = warn
|
|
capabilities = list_search_head_clustering
|
|
help = learnmore.shc.confreplication
|
|
|
|
[SHCLUSTER:CONF_REPLICATION_PULL_STALL__S_S]
|
|
message = Search head cluster member (%s) is having problems pulling configurations from the search head cluster captain (%s). Changes from the other members are not replicating to this member, and changes on this member are not replicating to other members.
|
|
action = Consider performing a destructive configuration resync on this search head cluster member.
|
|
severity = error
|
|
capabilities = list_search_head_clustering
|
|
help = learnmore.shc.confreplication
|
|
|
|
[SHCLUSTER:CONF_REPLICATION_PUSH_STALL__S_S]
|
|
message = Search head cluster member (%s) is having problems pushing configurations to the search head cluster captain (%s). Changes on this member are not replicating to other members.
|
|
severity = warn
|
|
capabilities = list_search_head_clustering
|
|
help = learnmore.shc.confreplication
|
|
|
|
[SHCLUSTER:RAFT_FILE_CORRUPTION__S_S]
|
|
message = Received an invalid mgmt_uri %s for the search head cluster. This cluster member %s might be in a bad state.
|
|
severity = error
|
|
capabilities = admin_all_objects
|
|
help = message.shc.troubleshooting.raft
|
|
|
|
[SHCLUSTER:RAFT_METADATA_FILE_CORRUPTION]
|
|
message = Search head cluster member has a corrupted raft state.
|
|
severity = error
|
|
capabilities = admin_all_objects
|
|
help = message.shc.troubleshooting.raft
|
|
|
|
|
|
[APPDEPLOYHANDLER]
|
|
name = Search Head Cluster Deployer
|
|
|
|
[APPDEPLOYHANDLER:CONFIG_BUNDLE_CONTAINS_DEFAULT_APPS__S_S_S]
|
|
message = The configuration bundle contains the following default apps: %s. %s Do not use the deployer to push default apps to the search head cluster members. Read the topic "Use the deployer to distribute apps and configuration updates" in the Distributed Search manual on docs.splunk.com for details. %s If you are sure you want to do this, push the configuration bundle again using the "-push-default-apps true" option.
|
|
help = learnmore.shc.deployer
|
|
severity = warn
|
|
capabilities = list_search_head_clustering
|
|
|
|
[APPDEPLOYHANDLER:CONFIG_BUNDLE_PUSH_NOT_SUPPORTED_FROM_SHC_MEMBER]
|
|
message = This operation is not supported on a search head cluster member. Use the deployer. Read the topic "Use the deployer to distribute apps and configuration updates" in the Distributed Search manual on docs.splunk.com for details.
|
|
help = learnmore.shc.deployer
|
|
severity = error
|
|
capabilities = list_search_head_clustering
|
|
|
|
|
|
[SRCH_OPTIMIZER]
|
|
name = Search Optimizer
|
|
|
|
[SRCH_OPTIMIZER:OPTIMIZATION_FAILED]
|
|
message = Search optimization failed. The search will run as written; however, suboptimal search performance might occur.
|
|
severity = info
|
|
capabilities = search
|
|
|
|
|
|
[SEARCH_PARSER]
|
|
name = Search Parser
|
|
|
|
[SEARCH_PARSER:EMPTY_MACRO_EXPR]
|
|
message = You must provide a macro expression.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:EMPTY_NAME]
|
|
message = You must provide macro and argument names.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:INVALID_MACRO_EMPTY_NAME__S]
|
|
message = The macro expression '%s' is invalid. A macro name is required.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:INVALID_MACRO_EXPR__S_S]
|
|
message = The macro expression '%s' is invalid. %s.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:INVALID_MACRO_NOCLOSE__S]
|
|
message = The macro expression '%s' is invalid. Expected closing ')'.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:INVALID_MACRO_PARTIAL_NAME__S]
|
|
message = The macro expression '%s' is invalid. All arguments must be named or none can be named.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:INVALID_NAME__S]
|
|
message = The name '%s' is invalid. Macro and argument names might only include alphanumerics, '_' and '-'.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:INVOKE_ARG_MISMATCH__S]
|
|
message = The macro expression '%s' is invalid. The argument list does not match the definition.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRODEF_ARG_LIST_IGNORED__S_S]
|
|
message = '%s' does not expect any arguments. Ignoring the '%s' conf key.
|
|
severity = warn
|
|
|
|
[SEARCH_PARSER:MACRODEF_ARG_NUM__S_S]
|
|
message = The argument list for macro '%s' is invalid. Expected %s elements.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRODEF_NOT_FOUND_ARGS__S_S]
|
|
message = Unable to find a definition for macro '%s'. It is expected in the '%s' conf key.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRODEF_NOT_FOUND__S]
|
|
message = The search specifies a macro '%s' that cannot be found. Reasons include: the macro name is misspelled, you do not have "read" permission for the macro, or the macro has not been shared with this application. Click Settings, Advanced search, Search Macros to view macro information.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRODEF_NO_ARG_LIST__S_S]
|
|
message = The argument list for macro '%s' is missing. It is expected in the '%s' conf key.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRODEF_REPEAT_ARG__S]
|
|
message = The argument list for macro '%s' is invalid. It contains repeated names.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRO_EVAL_FAIL__S]
|
|
message = The definition of macro '%s' is expected to be an eval expression that returns a string.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRO_MAX_DEPTH__LU]
|
|
message = Reached maximum recursion depth (%lu) while expanding macros. Check for infinitely recursive macro definitions.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRO_PAREN_END__S]
|
|
message = Error for macro argument expression '%s'. Arguments that start with '(' must end with ')'.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MACRO_PAREN__S]
|
|
message = Macro argument expression '%s' has mismatched parens.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MISMATCHED_CLOSE_BRACKET]
|
|
message = Mismatched ']'.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MISMATCHED_OPEN_BRACKET]
|
|
message = Mismatched '['.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MISSING_CLOSE_TICK]
|
|
message = Missing a closing tick mark for macro expansion.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:MISSING_SEARCH_COMMAND__C_U_S]
|
|
message = Missing a search command before '%c'. Error at position '%u' of search query '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:SUBSEARCH_CMD_ARG__U_S]
|
|
message = Subsearches are only valid as arguments to commands. Error at position '%u' of search query '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:SUBSEARCH_MAX_DEPTH__LU]
|
|
message = Cannot run this search as it contains too many nested subsearches. Maximum nested subsearches allowed: %lu.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:TRAILING_ESCAPE]
|
|
message = The trailing escape character is invalid.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:UNBALANCED_QUOTES]
|
|
message = Unbalanced quotes.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:VALIDATION_FAIL__S_S]
|
|
message = Encountered the following error while validating macro '%s': %s.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:VALIDATION_INVALID__S]
|
|
message = The validation expression is invalid: '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:COMMENT_NOT_TERMINATED]
|
|
message = Missing comment closing tickmarks.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:INVALID_SEARCH__S]
|
|
message = The search is invalid: '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_PARSER:INVALID_SPL2__S]
|
|
message = The SPL2 query is invalid: '%s'.
|
|
severity = error
|
|
|
|
|
|
[SEARCH_PIPELINE]
|
|
name = Search Pipeline
|
|
|
|
[SEARCH_PIPELINE:UNKNOWN_ARG_EXCEPT__S]
|
|
message = Encountered an unknown exception while evaluating arguments for command: '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_PIPELINE:UNKNOWN_EXEC_EXCEPT__S]
|
|
message = Encountered an unknown exception while executing command: '%s'.
|
|
severity = error
|
|
|
|
|
|
[SEARCHPROC]
|
|
name = Search Processor
|
|
|
|
[SEARCHPROC:DENIED_INDEX_PERMISSION__S]
|
|
message = Permission denied to index '%s'.
|
|
severity = error
|
|
|
|
[SEARCHPROC:EMPTY_FIELD_NAME]
|
|
message = Field names cannot be empty strings.
|
|
severity = error
|
|
|
|
[SEARCHPROC:INVALID_OPTION__S]
|
|
message = Option '%s' is invalid.
|
|
severity = error
|
|
|
|
[SEARCHPROC:INVALID_OPT_VAL__S_S]
|
|
message = Invalid option value. Expecting a '%s' for option '%s'.
|
|
severity = error
|
|
|
|
[SEARCHPROC:INVALID_OPT_VAL__S_S_S]
|
|
message = Invalid option value. Expecting a '%s' for option '%s'. Instead got '%s'.
|
|
severity = error
|
|
|
|
[SEARCHPROC:MAXOUT_BIGGER_THAN_MAXRESULTS__LU_LU_LU]
|
|
message = The subsearch 'maxout' value of %lu cannot be greater than the limits.conf [search] 'maxresultrows' value of %lu. Lowering 'maxout' to %lu.
|
|
severity = warn
|
|
|
|
[SEARCHPROC:MISMATCHED_QUOTES]
|
|
message = Mismatched quotes.
|
|
severity = error
|
|
|
|
[SEARCHPROC:MISMATCHED_QUOTES_AND_PAREN]
|
|
message = Mismatched quotes and/or parenthesis.
|
|
severity = error
|
|
|
|
[SEARCHPROC:MULTIPLE_FIELD_SPEC__S]
|
|
message = Field '%s' should not be specified more than once.
|
|
severity = error
|
|
|
|
[SEARCHPROC:MULTIPLE_OPT_SPEC__S]
|
|
message = Option '%s' should not be specified more than once.
|
|
severity = error
|
|
|
|
[SEARCHPROC:NO_AVAILABLE_INDEXES]
|
|
message = There are no matching indexes that you have permission to access.
|
|
severity = error
|
|
|
|
[SEARCHPROC:REALTIME_SUBSEARCH]
|
|
message = Subsearches of a real-time search run over all-time unless explicit time bounds are specified within the subsearch.
|
|
severity = info
|
|
capabilities = rtsearch
|
|
|
|
[SEARCHPROC:SUBSEARCH_ARG__S]
|
|
message = Subsearch evaluated to the following search expression: %s
|
|
severity = debug
|
|
capabilities = search
|
|
|
|
[SEARCHPROC:SUBSEARCH_MAXOUT_EXCEEDED__LU_S_LU]
|
|
message = Subsearch produced %lu results, truncating to maxout [%s] %lu.
|
|
severity = info
|
|
capabilities = search
|
|
|
|
[SEARCHPROC:SUBSEARCH_MAXOUT_EXCEEDED__S_LU]
|
|
message = Subsearch results is truncated to maximum [%s] %lu.
|
|
severity = info
|
|
capabilities = search
|
|
|
|
[SEARCHPROC:TIMEOUT__LU]
|
|
message = Timed out (%lu secs) while waiting for subsearch results.
|
|
severity = error
|
|
|
|
[SEARCHPROC:UNKNOWN_SUBSEARCH_FAIL]
|
|
message = Unknown subsearch failure.
|
|
severity = error
|
|
|
|
|
|
[SEARCH_RESULTS]
|
|
name = Search Results
|
|
|
|
[SEARCH_RESULTS:CREATE_TMP_FILE_FAIL__S]
|
|
message = Unable to create or write to temporary file '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:CSV_HEADER_PARSE_FAIL]
|
|
message = Unexpected error parsing CSV header.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:ERROR_READING_FILE__S]
|
|
message = Encountered an error while reading file '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:FILE_WRITE_FAIL__S]
|
|
message = Could not write to file '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:GZOPEN_FAIL__S]
|
|
message = Unable to ::gzopen() temporary file '%s' for writing.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:GZWRITE_ERROR__M]
|
|
message = Encountered the following gzwrite error: %m.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:INFO_LOGGING_LIMIT_REACHED__LU]
|
|
message = The limit has been reached for log messages in info.csv. %lu messages have not been written to info.csv. Refer to search.log for these messages or limits.conf to configure this limit.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:INVALID_INDEX]
|
|
message = The index into SearchResult is invalid.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:MEMORY_LIMIT_EXCEEDED_IN_PROCESSOR__S_LU_LU_S_S_S]
|
|
message = command.%s: output will be truncated at %lu results due to excessive memory usage. Memory threshold of %luMB as configured in %s / [%s] / %s has been reached.
|
|
severity = warn
|
|
|
|
[SEARCH_RESULTS:MEMORY_LIMIT_EXCEEDED_RESULTS_INCOMPLETE__LU]
|
|
message = Reached the maximum in-memory consumption limit for search results (%lu MB), results might be incomplete. Increase the max_mem_usage_mb setting in limits.conf.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:MISSING_RESULTS_INFO__S]
|
|
message = Unable to find the results info CSV at %s.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:PARSE_ERROR__S]
|
|
message = Unable to parse '%s'.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:RETRY_RENAME_FAIL_WIN__S_S_LU_LU_S]
|
|
message = Unable to update '%s'; could not rename temporary file '%s': Retried %lu times, period=%lu ms. Internal error: '%s'.
|
|
severity = info
|
|
|
|
[SEARCH_RESULTS:RETRY_RENAME_FAIL__S_S_LU_LU_M]
|
|
message = Unable to update '%s'; could not rename temporary file '%s': Retried %lu times, period=%lu ms. error='%m'.
|
|
severity = info
|
|
|
|
[SEARCH_RESULTS:SEARCH_METRICS_ERROR__S]
|
|
message = Failed to update search metric: '%s', Search Metrics results mightbe incorrect.
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:SPLITFILE_WRITE_FAIL__S]
|
|
message = Could not write to new file '%s' in splitFile().
|
|
severity = error
|
|
|
|
[SEARCH_RESULTS:SRSSERIALIZER_MAX_STR_LEN_EXCEEDED__LU]
|
|
message = Events are not displayed in the search results because _raw fields exceed the limit of %lu characters.
|
|
action = Ensure that _raw fields are below the given character limit or switch to the CSV serialization format by setting 'results_serial_format=csv' in limits.conf. Switching to the CSV serialization format will reduce search performance.
|
|
severity = error
|
|
|
|
|
|
[SET]
|
|
name = Set Processor
|
|
|
|
[SET:USAGE_3]
|
|
message = Three arguments are required: [<search pipeline>] [+(union) | -(diff) | ^(intersect)] [<search pipeline>].
|
|
severity = error
|
|
|
|
[SET:USAGE_OPERATOR]
|
|
message = The set operator is invalid. It must be either +(union), -(diff), or ^(intersect).
|
|
severity = error
|
|
|
|
|
|
[SHC_UPGRADING]
|
|
name = SHC Upgrading
|
|
|
|
[SHC_UPGRADING:SET_SHC_UPGRADING]
|
|
message = Upgrade of this search head cluster is in progress.
|
|
severity = info
|
|
capabilities = list_search_head_clustering
|
|
|
|
|
|
[SHPOOL]
|
|
name = SHPooling Manager
|
|
|
|
[SHPOOL:FAILED_TO_LOAD_CONFIG__S]
|
|
message = Failure to load shpool config (server.conf) Error = %s.
|
|
severity = error
|
|
|
|
[SHPOOL:FAILED_TO_REGISTER_WITH_MASTER__S_S]
|
|
message = Failed to register with shpool captain reason: %s [ event=addPeer status=retrying %s ].
|
|
severity = warn
|
|
|
|
[SHPOOL:INDEXING_NOT_READY]
|
|
message = Search head pool is not ready; fewer than replication_factor peers are up.
|
|
severity = warn
|
|
|
|
[SHPOOL:REPLICATION_PORT_USE_ERROR__U]
|
|
message = Search head clustering initialization failed. Could not bind to replication port (%u). Ensure that port is not in use.
|
|
severity = error
|
|
|
|
[SHPOOL:ROLLING_RESTART_MIGHT_SKIP_SOME_PEERS__U]
|
|
message = A rolling restart was initiated for peers requiring a restart. But some peers have not heartbeat recently (last %u seconds) and might not yet have advertised restart required to the captain. Such peers might be skipped in this rolling restart and might need to be restarted manually.
|
|
severity = warn
|
|
|
|
|
|
[SIMPLE_LOG_CLUSTERING_PROCESSOR]
|
|
name = Simple Log Clustering Processor
|
|
|
|
[SIMPLE_LOG_CLUSTERING_PROCESSOR:BAD_THRESHOLD]
|
|
message = The threshold must be > 0.0 and < 1.0.
|
|
severity = error
|
|
|
|
[SIMPLE_LOG_CLUSTERING_PROCESSOR:EMPTY_COUNTFIELD]
|
|
message = The countfield name must not be empty.
|
|
severity = error
|
|
|
|
[SIMPLE_LOG_CLUSTERING_PROCESSOR:EMPTY_LABELFIELD]
|
|
message = The labelfield name must not be empty.
|
|
severity = error
|
|
|
|
[SIMPLE_LOG_CLUSTERING_PROCESSOR:MAX_CLUSTERS_REACHED__LU]
|
|
message = Reached the maximum number of clusters (%lu). Some results were not clustered.
|
|
severity = warn
|
|
|
|
[SIMPLE_LOG_CLUSTERING_PROCESSOR:UNKNOWN_MATCH_TYPE]
|
|
message = The 'match' type is unknown. Valid values are 'termlist', 'termset', and 'ngramset'.
|
|
severity = error
|
|
|
|
|
|
[SORT]
|
|
name = Sort Processor
|
|
|
|
[SORT:COPY_FILE_FAIL__S_S_M ]
|
|
message = Failed to copy file '%s' to '%s' because of: %m.
|
|
severity = error
|
|
|
|
[SORT:EMPTY_ARGUMENT]
|
|
message = The empty string argument is invalid.
|
|
severity = error
|
|
|
|
[SORT:EMPTY_FIELD_NAME__S]
|
|
message = The empty field name in the argument is invalid: %s'.
|
|
severity = error
|
|
|
|
[SORT:MISMATCHED_PARENS__S]
|
|
message = There is a mismatched parenthesis in field specifier '%s'.
|
|
severity = error
|
|
|
|
[SORT:MISSING_FIELDS]
|
|
message = You must specify fields to sort.
|
|
severity = error
|
|
|
|
[SORT:OPEN_FILE_FAIL__S]
|
|
message = Could not open or create file '%s' for writing.
|
|
severity = error
|
|
|
|
[SORT:READ_HEADER_FAIL__S]
|
|
message = Failed to read header for internal file '%s'.
|
|
severity = error
|
|
|
|
[SORT:RENAME_FILE_FAIL__S_S]
|
|
message = Failed to rename file '%s' to '%s'.
|
|
severity = error
|
|
|
|
[SORT:WRITE_FILE_FAIL__S]
|
|
message = Could not write to file '%s'.
|
|
severity = error
|
|
|
|
|
|
[SPATH]
|
|
name = SPath
|
|
|
|
[SPATH:NO_PATH_SPECIFIED]
|
|
message = You have not specified a path. Try using "path=mypath" as an argument to spath.
|
|
severity = error
|
|
|
|
|
|
[STATS]
|
|
name = Stats Processor
|
|
|
|
[STATS:AFTER_BOOLEAN_EXPR]
|
|
message = reset_after must be a boolean expression.
|
|
severity = error
|
|
|
|
[STATS:BAD_NUM_WILDCARDS__S_S]
|
|
message = The number of wildcards between field specifier '%s' and rename specifier '%s' do not match. Note: empty field specifiers implies all fields, e.g. sum() == sum(*).
|
|
severity = error
|
|
|
|
[STATS:BAD_PERCENTILE]
|
|
message = Percentile must be a floating point number that is >= 0 and < 100.
|
|
severity = error
|
|
|
|
[STATS:BAD_TIMESPAN]
|
|
message = Invalid timespan specified for sparkline.
|
|
severity = error
|
|
|
|
[STATS:BEFORE_BOOLEAN_EXPR]
|
|
message = reset_before must be a boolean expression.
|
|
severity = error
|
|
|
|
[STATS:CANNOT_RENAME_FILE__S_S]
|
|
message = Could not rename file '%s' to '%s'.
|
|
severity = error
|
|
|
|
[STATS:COULD_NOT_FILL_RENAME_PATTERN__S]
|
|
message = Did not properly fill rename pattern '%s'.
|
|
severity = warn
|
|
|
|
[STATS:DEPRECATION_WARNING]
|
|
message = This search uses deprecated 'stats' command syntax. This syntax implicitly translates '<function>' or '<function>()' to '<function>(*)', except for cases where the function is 'count'. Use '<function>(*)' instead.
|
|
severity = info
|
|
|
|
[STATS:DEPRECATION_WARNING__S_S]
|
|
message = Using deprecated stats feature '%s'. Please use '%s' instead.
|
|
severity = info
|
|
|
|
[STATS:EVAL_FIELD_REQUIRES_RENAME__S]
|
|
message = You must specify a rename for the aggregation specifier on the dynamically evaluated field '%s'.
|
|
severity = error
|
|
|
|
[STATS:EVAL_NOMULTIFUNC]
|
|
message = You cannot use multi-function specifiers ('all' or 'default') with dynamically evaluated fields.
|
|
severity = error
|
|
|
|
[STATS:FAILING_WRITING_FILE__S]
|
|
message = Failed to write to file '%s'.
|
|
severity = error
|
|
|
|
[STATS:INVALID_AGG_SPEC__S]
|
|
message = The aggregation specifier '%s' is invalid. The aggregation specifier must be in [func_name]([key]) format.
|
|
severity = error
|
|
|
|
[STATS:INVALID_ARGUMENT__S]
|
|
message = The argument '%s' is invalid.
|
|
severity = error
|
|
|
|
[STATS:INVALID_EVAL_FIELD_SPEC__S]
|
|
message = The dynamically evaluated field specifier '%s' is invalid. The field specifier must be non-empty, start with '{', and end with '}'.
|
|
severity = error
|
|
|
|
[STATS:INVALID_EVAL__S_S]
|
|
message = The eval expression for dynamic field '%s' is invalid. Error='%s'.
|
|
severity = error
|
|
|
|
[STATS:INVALID_FUNC_SPARKLINE]
|
|
message = Invalid aggregation function for sparkline.
|
|
severity = error
|
|
|
|
[STATS:INVALID_RENAME]
|
|
message = The rename value is invalid.
|
|
severity = error
|
|
|
|
[STATS:INVALID_SPARKLINE]
|
|
message = Invalid sparkline specifier.
|
|
severity = error
|
|
|
|
[STATS:INVALID_TIMESTAMP__S]
|
|
message = The _time or _origtime field is required by the following stats function(s) and is missing: %s. The results of the function(s) may be invalid.
|
|
action = Verify that your indexing configuration adds those fields to your events, or update your search so that they are not omitted.
|
|
severity = info
|
|
|
|
[STATS:INVALID_TIME_WINDOW]
|
|
message = time_window must be a valid time span.
|
|
severity = error
|
|
|
|
[STATS:INVALID_WILDCARD]
|
|
message = The wildcard field is invalid. It contains consecutive wildcards.
|
|
severity = error
|
|
|
|
[STATS:INVALID_WINSIZE__LU_LU]
|
|
message = The streaming window size %lu is invalid (maximum size = %lu).
|
|
severity = error
|
|
|
|
[STATS:MISSING_FIELDS_AFTER_BY]
|
|
message = You must specify at least one field after 'by'.
|
|
severity = error
|
|
|
|
[STATS:MISSING_FIELD_AFTER_AS]
|
|
message = You must specify a field name after 'as'.
|
|
severity = error
|
|
|
|
[STATS:MULTI_FUNC_RENAME]
|
|
message = You cannot specify a rename for multi-function specifiers ('all' or 'default').
|
|
severity = error
|
|
|
|
[STATS:NAME_CONFLICT__S]
|
|
message = The output field '%s' cannot have the same name as a group-by field.
|
|
severity = error
|
|
|
|
[STATS:NO_EVENTS_FOUND__S]
|
|
message = No events found containing field(s) '%s'.
|
|
severity = debug
|
|
|
|
[STATS:NO_RESULTS_DIRECTORY__S]
|
|
message = The results directory has not been created: '%s'.
|
|
severity = error
|
|
|
|
[STATS:ONLY_SPARKCOUNT]
|
|
message = Sparklines not specific to a field must use the "count" aggregator.
|
|
severity = error
|
|
|
|
[STATS:PRESTATS_ERROR]
|
|
message = Corrupt information from pre-stats:
|
|
severity = warn
|
|
|
|
[STATS:REPEAT_BYFIELD__S]
|
|
message = Repeated group-by field '%s'.
|
|
severity = error
|
|
|
|
[STATS:REPEAT_RENAME__LLU_S_S]
|
|
message = %llu duplicate rename field(s). Original renames: %s. Duplicate renames: %s.
|
|
severity = error
|
|
|
|
[STATS:RESERVED_FIELD__S_S]
|
|
message = The field name '%s' is invalid. All fields starting with '%s' are reserved for internal use. Rename your field.
|
|
severity = error
|
|
|
|
[STATS:ROW_TOO_LARGE]
|
|
message = The data in each row is too large to aggregate. Consider increasing max_mem_usage_mb.
|
|
severity = error
|
|
|
|
[STATS:TIME_WINDOW_MUST_BE_GLOBAL]
|
|
message = Cannot set global to false when using a time window.
|
|
severity = error
|
|
|
|
[STATS:TIME_WINDOW_MUST_INCLCUR]
|
|
message = Cannot set current to false when using a time window.
|
|
severity = error
|
|
|
|
[STATS:TIME_WINDOW_REQUIRES_ORDER]
|
|
message = time_window can only be used on input that is sorted in time order (both ascending and descending order are ok).
|
|
severity = error
|
|
|
|
[STATS:SPILL_THREAD_EXCEPTION__S]
|
|
message = Exception caught in stats spill thread. Error='%s'.
|
|
severity = error
|
|
|
|
[STATS:VALUE_LIMIT_REACHED__S_LU]
|
|
message = '%s' command: Limit of '%lu' for values reached. Additional values may have been truncated or ignored.
|
|
severity = warn
|
|
|
|
|
|
[SMARTBUS]
|
|
name = Smartbus
|
|
|
|
[SMARTBUS:RQ_BUS_UPLOAD_MESSAGE_FAILED__S_S_S_LLU_LLU]
|
|
message = Failed to upload messages to remote queue bus with name=%s and type=%s for server_name=%s with num_attempts=%llu and spent_time_s=%llus. This could have an impact on ingestion throughput.
|
|
severity = error
|
|
action = Check Splunk remote_queue configuration in outputs.conf as well as service availability for Amazon AWS services.
|
|
capabilities = list_remote_output_queue
|
|
|
|
[SMARTBUS:RQ_DATASTORE_UPLOAD_MESSAGE_FAILED__S_S_S_LLU_LLU]
|
|
message = Failed to upload messages to remote queue datastore with name=%s and type=%s for server_name=%s with num_attempts=%llu and spent_time_s=%llus. This could have an impact on ingestion throughput.
|
|
severity = error
|
|
action = Check Splunk remote_queue configuration in outputs.conf as well as service availability for Amazon AWS services.
|
|
capabilities = list_remote_output_queue
|
|
|
|
[SMARTBUS:RQ_BUS_DOWNLOAD_MESSAGE_FAILED__S_S_S_LLU_LLU]
|
|
message = Failed to download messages from remote queue bus with name=%s and type=%s for server_name=%s with num_attempts=%llu and spent_time_s=%llus. This could have an impact on ingestion throughput.
|
|
severity = error
|
|
action = Check Splunk remote_queue configuration in inputs.conf as well as service availability for Amazon AWS services.
|
|
capabilities = list_remote_input_queue
|
|
|
|
[SMARTBUS:RQ_DATASTORE_DOWNLOAD_MESSAGE_FAILED__S_S_S_LLU_LLU]
|
|
message = Failed to download messages from remote queue datastore with name=%s and type=%s for server_name=%s with num_attempts=%llu and spent_time_s=%llus. This could have an impact on ingestion throughput.
|
|
severity = error
|
|
action = Check Splunk remote_queue configuration in inputs.conf as well as service availability for Amazon AWS services.
|
|
capabilities = list_remote_input_queue
|
|
|
|
[SMARTBUS:RQ_RENEW_MESSAGE_FAILED__S_S_S_LLU_LLU]
|
|
message = Failed to renew messages from remote queue with name=%s and type=%s for server_name=%s with num_attempts=%llu and spent_time=%llus. This could have an impact on ingestion throughput.
|
|
severity = error
|
|
action = Check Splunk remote_queue configuration in inputs.conf as well as service availability for Amazon AWS services.
|
|
capabilities = list_remote_input_queue
|
|
|
|
[SMARTBUS:RQ_ACK_MESSAGE_FAILED__S_S_S_LLU_LLU]
|
|
message = Failed to ack messages from remote queue with name=%s and type=%s for server_name=%s with num_attempts=%llu and spent_time=%llus. This could have an impact on ingestion throughput.
|
|
severity = error
|
|
action = Check Splunk remote_queue configuration in inputs.conf as well as service availability for Amazon AWS services.
|
|
capabilities = list_remote_input_queue
|
|
|
|
|
|
[STREAM_SEARCH]
|
|
name = Stream Search
|
|
|
|
[STREAM_SEARCH:BUNDLES_SETUP_FAIL__S]
|
|
message = Failed to create a bundles setup with server name '%s'. Using peer's local bundles to execute the search, results might not be correct.
|
|
severity = error
|
|
|
|
[STREAM_SEARCH:EVAL_FAILED__S]
|
|
message = Streamed search failed to eval %s.
|
|
severity = error
|
|
|
|
[STREAM_SEARCH:EXEC_FAILED__S]
|
|
message = Streamed search execute failed because: %s.
|
|
severity = error
|
|
|
|
[STREAM_SEARCH:INSUFF_PRIVILEGES]
|
|
message = Streamed search failed. You have insufficient privileges to run the search.
|
|
severity = error
|
|
|
|
[STREAM_SEARCH:NO_ROLE_IN_URL]
|
|
message = Roles did not come in as part of the url.
|
|
severity = warn
|
|
|
|
[STREAM_SEARCH:NO_USER_PARAMETER]
|
|
message = Unable to find a 'user' parameter in the streamed search. Attempting to acquire the user context.
|
|
severity = warn
|
|
|
|
[STREAM_SEARCH:PARSE_FAILED__S]
|
|
message = Streamed search parse failed because %s.
|
|
severity = error
|
|
|
|
[STREAM_SEARCH:SERVER_NAME_FAIL]
|
|
message = Found no server name in the url. Cannot determine the correct context.
|
|
severity = error
|
|
|
|
[STREAM_SEARCH:TCPPIP_VALID_AFTER_CLOSE]
|
|
message = TcpPipe valid is valid after close.
|
|
severity = info
|
|
|
|
|
|
[STREAMING_DELETE_OPERATOR]
|
|
name = Streaming Delete Operator
|
|
|
|
[STREAMING_DELETE_OPERATOR:PERMISSION]
|
|
message = You have insufficient privileges to delete events.
|
|
severity = error
|
|
|
|
[STREAMING_DELETE_OPERATOR:RTSEARCH_NOT_SUPPORTED]
|
|
message = You cannot delete events using a real-time search.
|
|
severity = error
|
|
|
|
[STREAMING_DELETE_OPERATOR:FLEX_INDEX_NOT_SUPPORTED]
|
|
message = The delete operation is not supported on flex indexes.
|
|
severity = error
|
|
|
|
|
|
[STRING_CONCATENATE_PROCESSOR]
|
|
name = String Concatenate Processor
|
|
|
|
[STRING_CONCATENATE_PROCESSOR:BAD_LAST_FIELD]
|
|
message = The last field must be the destination field name.
|
|
severity = error
|
|
|
|
[STRING_CONCATENATE_PROCESSOR:EMPTY_FIELD]
|
|
message = Fields cannot be empty.
|
|
severity = error
|
|
|
|
[STRING_CONCATENATE_PROCESSOR:INVALID_ARGUMENT__S]
|
|
message = The argument '%s' is invalid.
|
|
severity = error
|
|
|
|
[STRING_CONCATENATE_PROCESSOR:TOO_FEW_ARGS__S]
|
|
message = There are not enough fields. Usage: %s srcfield1 srcfield2 ... srcfieldn dest_field.
|
|
severity = error
|
|
|
|
|
|
[SURROUNDING_DATA_OPERATOR]
|
|
name = Surrounding Data Operator
|
|
|
|
[SURROUNDING_DATA_OPERATOR:DATA_GAPS_OR_TAMPERED_DATA]
|
|
message = Could not validate this source. There might be gaps in this data or this data might have been tampered with. See splunkd.log.
|
|
severity = warn
|
|
|
|
[SURROUNDING_DATA_OPERATOR:MISSING_REQUIRED_ARGUMENTS__S]
|
|
message = The following required arguments were not provided to the SurroundingDataOperator: %s.
|
|
severity = error
|
|
|
|
[SURROUNDING_DATA_OPERATOR:POSSIBLE_TAMPERING]
|
|
message = Detected possible tampering with this source.
|
|
severity = warn
|
|
|
|
[SURROUNDING_DATA_OPERATOR:SOURCE_CONSISTENCY_GAPS]
|
|
message = Could not validate the consistency of this source. There might be gaps in this data.
|
|
severity = warn
|
|
|
|
[SURROUNDING_DATA_OPERATOR:TOO_MANY_RESULTS_IN_A_SINGLE_SECOND__LU]
|
|
message = Too many events (> %lu) in a single second.
|
|
severity = error
|
|
|
|
[SURROUNDING_DATA_OPERATOR:UNABLE_TO_FIND_ANY_SURROUNDING_EVENTS]
|
|
message = Unable to find any surrounding events.
|
|
severity = error
|
|
|
|
[SURROUNDING_DATA_OPERATOR:UNABLE_TO_PARSE_ID__S]
|
|
message = Unable to parse id '%s'.
|
|
severity = error
|
|
|
|
[SURROUNDING_DATA_OPERATOR:UNKNOWN_EXCEPTION_OCCURRED_WHILE_READING_RESULTS]
|
|
message = An unknown exception occurred while reading results in the surrounding data processor.
|
|
severity = error
|
|
|
|
[SURROUNDING_DATA_OPERATOR:UNKNOWN_EXCEPTION_OCCURRED_WHILE_VERIFYING_BLOCK_SIGNATURE]
|
|
message = An unknown exception occurred while verifying the block signature.
|
|
severity = error
|
|
|
|
[SURROUNDING_DATA_OPERATOR:VALID_CONSISTENCY]
|
|
message = Splunk validated the consistency of this data and found no gaps.
|
|
severity = info
|
|
|
|
|
|
[TAGS]
|
|
name = Tags Annotation Processor
|
|
|
|
[TAGS:DEPRECATION_INFO__S_S]
|
|
message = The '%s' parameter is deprecated. Use '%s' instead.
|
|
severity = info
|
|
|
|
|
|
[TCPOUT]
|
|
name = TCP Output Processor
|
|
|
|
[TCPOUT:FORWARDING_BLOCKED__S_S_S_LD]
|
|
message = The TCP output processor has paused the data flow. Forwarding to host_dest=%s inside output group %s from host_src=%s has been blocked for blocked_seconds=%ld. This can stall the data flow towards indexing and other network outputs.
|
|
action = Review the receiving system's health in the Splunk Monitoring Console. It is probably not accepting data.
|
|
severity = warn
|
|
help = message.stall.splunktcpout
|
|
capabilities = edit_forwarders
|
|
|
|
|
|
[TIME_LINER]
|
|
name = Time Liner
|
|
|
|
[TIME_LINER:COMMIT_VIOLATION__LU_LU]
|
|
message = Ignored %lu events because they were after the commit time (%lu).
|
|
severity = error
|
|
|
|
[TIME_LINER:FETCH_ERROR]
|
|
message = Some events cannot be displayed because they cannot be fetched from the remote search peer(s). This is likely caused by the natural expiration of the related remote search jobs. To view the omitted events, run the search again.
|
|
severity = error
|
|
|
|
|
|
[TOTALS]
|
|
name = New Series Filter Processor
|
|
|
|
[TOTALS:EMPTY_FIELD_NAME]
|
|
message = The comparator '%s' is invalid.
|
|
severity = error
|
|
|
|
|
|
[TRANSAM]
|
|
name = Transaction
|
|
|
|
[TRANSAM:DEPRECATED_OPTION__S]
|
|
message = Transaction option '%s' has been deprecated. Ignoring its value.
|
|
severity = warn
|
|
|
|
[TRANSAM:EVICTED_INFO]
|
|
message = Some transactions have been discarded. To include them, add keepevicted=true to your transaction command.
|
|
severity = info
|
|
|
|
[TRANSAM:FIELDS_OPT_INVALID]
|
|
message = The fields option is invalid when a list of fields is provided in the argument list.
|
|
severity = error
|
|
|
|
[TRANSAM:NO_CONSTRAINTS]
|
|
message = There must be at least one constraint for finding transactions.
|
|
severity = error
|
|
|
|
[TRANSAM:TOO_NEW_CACHED__LU_LU]
|
|
message = this.cached_results (%lu) should be >= txn.event_count (%lu). Setting to 0.
|
|
severity = warn
|
|
|
|
[TRANSAM:UNKNOWN_TRANSACTION__S]
|
|
message = Transaction '%s' is unknown.
|
|
severity = error
|
|
|
|
|
|
[TS_COLLECT_PROCESSOR]
|
|
name = TS Collect Processor
|
|
|
|
[TS_COLLECT_PROCESSOR:MKDIR_FAIL__S]
|
|
message = Failed to create directory for namespace='%s'.
|
|
severity = error
|
|
|
|
[TS_COLLECT_PROCESSOR:OPEN_TSIDX_FAIL__S]
|
|
message = Failed to initialize TSIDX directory for namespace='%s'.
|
|
severity = error
|
|
|
|
[TS_COLLECT_PROCESSOR:SYNC__S_S]
|
|
message = Failed to sync TSIDX directory for namespace='%s' reason='%s'.
|
|
severity = error
|
|
|
|
[TS_COLLECT_PROCESSOR:TXN_CLOSE__S_D]
|
|
message = Failed to finish TSIDX event in namespace='%s' errcode=%d.
|
|
severity = error
|
|
|
|
[TS_COLLECT_PROCESSOR:TXN_PUT__S_S_D]
|
|
message = Failed to add token in namespace='%s' token='%s' errcode=%d.
|
|
severity = error
|
|
|
|
[TS_COLLECT_PROCESSOR:TXN_START__S_D]
|
|
message = Failed to create TSIDX event in namespace='%s' errcode=%d.
|
|
severity = error
|
|
|
|
[TS_COLLECT_PROCESSOR:DEPRECATED_USAGE]
|
|
message = The tscollect command is deprecated.
|
|
severity = warn
|
|
|
|
[TSTATS]
|
|
name = Tsidx Stats Processor
|
|
|
|
[TSTATS:MINIFIED_BUCKETS_SKIPPED]
|
|
message = Some minified buckets have been excluded from the search.You can use the include_reduced_buckets=t option of the tstats command to include the results.
|
|
severity = warn
|
|
|
|
[TSTATS:MINIFIED_BUCKETS_NOT_SUPPORTED__S]
|
|
message = Reduced buckets were found in index='%s'. Tstats searches are not supported on reduced buckets. Search results will be incorrect.
|
|
severity = error
|
|
|
|
[TSTATS:MULTIPLE_TIME_RANGES]
|
|
message = Your search results might be incorrect. The tstats command does not support multiple time ranges.
|
|
action = Instead, use multiple tstats commands with append mode.
|
|
severity = warn
|
|
|
|
[TSTATS:TOO_MANY_BINS]
|
|
message = This job reaches the time bin limit for searches that group results by time. This limit prevents the Splunk platform from running out of memory.
|
|
action = Reduce your time range or increase your span.
|
|
severity = error
|
|
|
|
[TSTATS:TOO_MANY_BINS__S]
|
|
message = This job reaches the time bin limit for searches that group results by time. This limit prevents the Splunk platform from running out of memory.
|
|
action = Reduce your time range or increase your span (the span must be at least %s for the given time range).
|
|
severity = error
|
|
|
|
[TSTATS:FLEX_INDEX_NOT_SUPPORTED]
|
|
message = The tstats operation is not supported on flex indexes.
|
|
severity = error
|
|
|
|
[TSTATS:WHERE_CLAUSE_NON_INDEXED_FIELDS]
|
|
message = This 'tstats' search might contain fields that are not configured as indexed in fields.conf.
|
|
action = To ensure that a field is properly indexed, make sure that 'INDEXED = true' is set for that field in the fields.conf file, and that the fields.conf file reflects fields that are actually indexed in all relevant buckets. To confirm which fields are indexed in buckets, run a 'walklex' search.
|
|
help = message.reference.tstatsindex
|
|
severity = debug
|
|
|
|
|
|
[SUMMARIZE]
|
|
name = Bucket Summary Processor
|
|
|
|
[SUMMARIZE:PEER_NOT_FINISHED_AFTER_MAXTIME_EXCEEDED__U_U]
|
|
message = One or more search peers didn't finish even after max_time=%u seconds. Total elapsed=%u seconds. Finalizing summarization search.
|
|
severity = warn
|
|
|
|
[SUMMARIZE:SUMMARY_CORRUPTED__S]
|
|
message = Your search includes results from corrupted data model summaries, from Splunk version %s.
|
|
action = To get accurate results, set 'allow_old_summaries=false' in your tstats search or rebuild your summaries.
|
|
severity = warn
|
|
|
|
[SUMMARIZE:SUMMARY_CREATION_FAILED_FOR_BUCKET__S_S]
|
|
message = A data model summary couldn't be created for the index bucket '%s', because '%s'. Splunk software will retry the summary creation in the next scheduled runtime.
|
|
action = If the failure persists after the summary creation is retried, check the search.log in the dispatch directory for the failed summary creation search to get more information about the error. For further assistance resolving the failure, contact Customer Support, or, if you have a support contract, file a new case using the Splunk Support Portal.
|
|
severity = warn
|
|
|
|
|
|
|
|
[TYPEAHEAD]
|
|
name = TypeaHead Operator
|
|
|
|
[TYPEAHEAD:COUNT_IS_A_REQUIRED_ARGUMENT]
|
|
message = Count is a required argument.
|
|
severity = error
|
|
|
|
[TYPEAHEAD:COUNT_TOO_BIG__S_S]
|
|
message = The count value (%s) cannot be greater than the allowed maximum (%s).
|
|
severity = warn
|
|
|
|
[TYPEAHEAD:INSUFFICIENT_PRIVILEGES]
|
|
message = You do not have privileges to run the typeahead command.
|
|
severity = error
|
|
|
|
|
|
[TYPE_DISCOVERER]
|
|
name = Type Discoverer
|
|
|
|
[TYPE_DISCOVERER:INVALID_NUM_ARGS]
|
|
message = Only one optional argument is expected: <grouping-field>.
|
|
severity = error
|
|
|
|
[TYPE_DISCOVERER:MANY_RESULTS_CUTTING_OFF]
|
|
message = Too many results to train on all of them. Using only first results.
|
|
severity = warn
|
|
|
|
|
|
[UNIFIEDSEARCH]
|
|
name = Unified Search
|
|
|
|
[UNIFIEDSEARCH:CANNOT_FIND_EVENT_TYPE__S]
|
|
message = Eventtype '%s' does not exist or is disabled.
|
|
severity = warn
|
|
|
|
[UNIFIEDSEARCH:CANNOT_FIND_SAVEDSEARCH__S]
|
|
message = Savedsearch '%s' does not exist or is disabled.
|
|
severity = warn
|
|
|
|
[UNIFIEDSEARCH:DEBUG_CMD_NO_PERMISSION]
|
|
message = You do not have permission to invoke debugging commands.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:DEPRECATED_USAGE__S_S]
|
|
message = Search syntax '%s' is deprecated. Use '%s' instead.
|
|
severity = warn
|
|
|
|
[UNIFIEDSEARCH:EARLIEST_TIME_PREVENTS_SEARCH]
|
|
message = You do not have permission to search data in this time range.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FAILED_TO_GET_SEARCH_FILTER__S]
|
|
message = Failed to get the search filter for user '%s'.
|
|
severity = error
|
|
capabilities = search
|
|
|
|
[UNIFIEDSEARCH:FAILED_TO_GET_FIELD_FILTER__S_S]
|
|
message = Failed to get the field filter for user '%s'. Error message: '%s'.
|
|
action = Ask your administrator to review the field filter configuration and make any necessary corrections.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:INVALID_FIELD_FILTER__S_S_S]
|
|
message = Invalid sed expression for field '%s' : %s. Error message: %s
|
|
action = Ask your administrator to review the field filter configuration and make any necessary corrections.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:INVALID_FIELD_FILTER_LIMIT__S]
|
|
message = Invalid field filter limit : %s.
|
|
action = Ask your administrator to review the field filter limit configuration and make any necessary corrections.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_ERROR__S_S]
|
|
message = Error encountered while executing sed expression for field '%s' : %s.
|
|
action = Retry the search. If this error persists, ask your administrator to review the field filter configuration.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_INTERNAL_FIELD_CHECK__S]
|
|
message = '%s' is an internal field other than _raw, and it has been included in a field filter configuration.
|
|
action = Replace this field with a non-internal indexed field or _raw.
|
|
severity = warn
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_API_NAME_ALREADY_EXISTS]
|
|
message = A field filter already exists with this name. Specify a new name for this field filter.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_API_ONE_ACTION_ONLY]
|
|
message = You have defined more than one action in a field filter, but this API supports only one action per field filter.
|
|
action = Update the field filter configuration to use only one action.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_API_INVALID_LIMIT]
|
|
message = The field filter limit key must be empty or use one of the following keywords: 'host', 'source', or 'sourcetype'.
|
|
action = Check the field filter limit and make any necessary corrections.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_API_INVALID_NAME]
|
|
message = The field filter name you specified contains unsupported characters.
|
|
action = Enter a new field filter name containing alphanumeric characters and underscores ( _ ).
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_API_EMPTY_ACTION_FIELD]
|
|
message = The field name for the action is empty.
|
|
action = Use a valid field name to continue.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_API_EMPTY_INDEX]
|
|
message = The name of a target index is not specified.
|
|
action = Specify a valid index name to continue.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FIELD_FILTER_RESTRICTED__S]
|
|
message = Your search is restricted by a field filter that applies to one of your search indexes. To run '%s', you must have a role with one of the following capabilities: 'admin_all_objects' , 'run_commands_ignoring_field_filter'.
|
|
severity = error
|
|
action = Ask your admin if your role has the appropriate capabilities and then retry the search.
|
|
|
|
[UNIFIEDSEARCH:FILTERED_INDEXES_METRIC_TYPE]
|
|
message = This command only searches event indexes. To search metric indexes, use the mstats command.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:FILTERED_INDEXES_NO_PERMISSION]
|
|
message = All indexes removed from search due to permission.
|
|
severity = warn
|
|
|
|
[UNIFIEDSEARCH:LICENSE_REQUIRE_RENEW]
|
|
message = Your Splunk license expired or you have exceeded your license limit too many times. Renew your Splunk license by visiting www.splunk.com/store or calling 866.GET.SPLUNK.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:LICENSE_REQUIRE_RENEW_LITE]
|
|
message = Your Splunk Light license expired or you have exceeded your license limit too many times. Renew your Splunk Light license by visiting visiting www.splunk.com/goto/estore.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:METASEARCH_MUST_BE_FIRST]
|
|
message = metasearch must be specified as the first command.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:MINIFIED_BUCKETS_WILL_BE_SLOWER]
|
|
message = Search on most recent data has completed. Expect slower search speeds as we search the reduced buckets.
|
|
severity = warn
|
|
|
|
[UNIFIEDSEARCH:REAL_TIME_DISABLED__S]
|
|
message = Real-time search is disabled for peer '%s'.
|
|
severity = info
|
|
|
|
[UNIFIEDSEARCH:REAL_TIME_MUST_BE_FIRST]
|
|
message = Real-time search must be specified as the first command.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:REAL_TIME_NOPERMISSION]
|
|
message = You do not have permission to spawn real-time searches.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:REAL_TIME_FLEX]
|
|
message = Real-time search is not supported on flex indexes.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:REGEXEXCEPTION__S]
|
|
message = RegexException: '%s'.
|
|
severity = error
|
|
|
|
[UNIFIEDSEARCH:SEARCH_CONTAINS_INFIX_WILDCARD__S]
|
|
message = The term '%s' contains a wildcard in the middle of a word or string. This might cause inconsistent results if the characters that the wildcard represents include punctuation.
|
|
severity = warn
|
|
capabilities = search
|
|
help = learnmore.wildcards.search
|
|
|
|
[UNIFIEDSEARCH:UNKNOWN_ERROR_OCCURRED_WHILE_PARSING_SEARCH]
|
|
message = An unknown error occurred while parsing the search.
|
|
severity = error
|
|
capabilities = search
|
|
|
|
|
|
[UNIONPROC]
|
|
name = Union Processor
|
|
|
|
[UNIONPROC:EMPTY]
|
|
message = Missing arguments. Expecting a list of datasets and subsearches.
|
|
severity = error
|
|
|
|
[UNIONPROC:INVALID_NUM_DATASETS__LU]
|
|
message = Expected at least 2 datasets, got %lu.
|
|
severity = error
|
|
|
|
[UNIONPROC:INVALID__S]
|
|
message = Invalid argument '%s'. Expecting a list of datasets and subsearches.
|
|
severity = error
|
|
|
|
[UNIONPROC:MAX_SUBSEARCH_EXCEEDED__LU]
|
|
message = Maximum allowed subsearches is %lu.
|
|
severity = error
|
|
|
|
[UNIONPROC:SYNTAX_UNSUPPORTED]
|
|
message = Named syntax not supported with federated inline searches.
|
|
severity = error
|
|
|
|
|
|
[ULIMITS]
|
|
name = User Limits
|
|
|
|
[ULIMITS:LOW_RESOURCE_LIMIT__S_LLU_LLU]
|
|
message = A system resource limit on this machine is below the minimum recommended value: system_resource = %s; current_limit = %llu; recommended_minimum_value = %llu.
|
|
severity = warn
|
|
capabilities = admin_all_objects
|
|
action = Change the operating system resource limits to meet the minimum recommended values for Splunk Enterprise.
|
|
help = healthcheck.software.ulimit
|
|
|
|
[ULIMITS:TRANSPARENT_HUGEPAGES]
|
|
message = This instance is running on a machine that has kernel transparent huge pages enabled. This can significantly reduce performance and is against best practices.
|
|
severity = warn
|
|
capabilities = admin_all_objects
|
|
action = Turn off kernel transparent huge pages using the method that is most appropriate for your Linux distribution.
|
|
help = healthcheck.software.hugepages
|
|
|
|
|
|
[WHERE]
|
|
name = Where Operator
|
|
|
|
[WHERE:BAD_RESULT]
|
|
message = The expression is invalid. The result of a 'where' expression must be boolean.
|
|
severity = error
|
|
|
|
|
|
[WORKLOAD_MGR]
|
|
name = Workload Manager
|
|
|
|
[WORKLOAD_MGR:OK_WITH_WARNINGS__S]
|
|
message = Warnings encountered in Workload Management config: %s
|
|
severity = warn
|
|
action = Check configuration and ensure all referenced users/roles/indexes/apps in Workload Management rules are valid.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:DEFAULT_SEARCH_POOL_READ_FAILED]
|
|
message = Failed to read/process default pool in the search category.
|
|
severity = error
|
|
action = Define the default pool under category search in the [[/manager/system/workload_management|Workload Management UI]] or in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:DEFAULT_INGEST_POOL_READ_FAILED]
|
|
message = Failed to read/process default pool in the ingest category.
|
|
severity = error
|
|
action = Define the default pool under category ingest in the [[/manager/system/workload_management|Workload Management UI]] or in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:DEFAULT_SEARCH_POOL_SAME_AS_INGEST_POOL_IN_GENERAL]
|
|
message = Default search pool cannot be the same as the ingest pool in the general stanza.
|
|
severity = error
|
|
action = Specify default search pool that is different from the default ingest pool in the general stanza in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:DEFAULT_SEARCH_POOL_FAILED_READ_IN_GENERAL_STANZA]
|
|
message = Default search pool is not set in the general stanza.
|
|
severity = error
|
|
action = Specify the default search pool in the general stanza in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:DEFAULT_INGEST_POOL_READ_FAILED_IN_GENERAL_STANZA]
|
|
message = Default ingest pool is not set in the general stanza.
|
|
severity = error
|
|
action = Specify the default ingest pool in the general stanza in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_POOL_FAILED_READ__S]
|
|
message = Failed to read/process workload pool = %s.
|
|
severity = error
|
|
action = Specify a valid workload pool in the [[/manager/system/workload_management|Workload Management UI]] or in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:CONFIG_WORKLOAD_POOLS_NOT_FOUND]
|
|
message = No workload pools found in the configuration.
|
|
severity = error
|
|
action = Specify valid workload pool(s) in the [[/manager/system/workload_management|Workload Management UI]] or in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_POOL_FAILED_SETUP__S]
|
|
message = Failed to setup workload pool = %s.
|
|
severity = error
|
|
action = Validate that both the cpu and mem cgroups for the workload pool on the system are correct.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.linux_cgroups
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_POOL_INVALID_CATEGORY__S]
|
|
message = Workload category is invalid for pool = %s.
|
|
severity = error
|
|
action = Specify a valid category which is "search", "ingest" or "misc".
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.linux_cgroups
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_RULE_FAILED_READ__S]
|
|
message = Failed to read/process workload rule = %s.
|
|
severity = error
|
|
action = Specify a valid workload rule in the [[/manager/system/workload_management|Workload Management UI]] or in the workload_rules.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_RULE_ORDER_MISSING]
|
|
message = Failed to find/process required rule order stanza.
|
|
severity = error
|
|
action = Define the workload_rules_order stanza with rules in the correct order in the workload_rules.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_CATEGORY_FAILED_READ__S]
|
|
message = Failed to read/process workload category = %s.
|
|
severity = error
|
|
action = Specify correct parameters for workload category in the [[/manager/system/workload_management|Workload Management UI]] or in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_CATEGORY_FAILED_SETUP__S]
|
|
message = Failed to setup workload category = %s.
|
|
severity = error
|
|
action = Validate that both the cpu and mem cgroups for the workload category on the system are correct.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_CATEGORY_HAS_MORE_THAN_ONE_POOL]
|
|
message = Workload Category "ingest" and/or "misc" has more than one workload pool defined.
|
|
severity = error
|
|
action = Define only one workload pool in the "ingest" and "misc" category in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_POOL_DUPLICATE_DEFAULT_POOL__S]
|
|
message = Failed to read duplicate default category pool=%s.
|
|
severity = error
|
|
action = Define only one default category in a category in the workload_pools.conf file.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_INDEXERS_CONFIGURATION_OUT_OF_SYNC]
|
|
message = The indexer configuration for workload management is not reflected on disk.
|
|
severity = error
|
|
action = The workload management configuration has been updated and is operational, but the configuration is not yet reflected on the indexers. For future compatibility, copy the latest workload_pools.conf configuration from a search head to the cluster master and push the configuration to the peer nodes.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_MGMT_MIGRATION_FAILED]
|
|
message = Failed to migrate workload management to the latest version.
|
|
severity = error
|
|
action = The existing configuration might be incorrect. If so, fix the configuration in workload_pools.conf and retry upgrade/restart. Alternatively, delete the old configuration, restart, and add the configuration in the new version. For help, contact Splunk support with the results of running bin/splunk diag.
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_MIXED_CONFIGURATION]
|
|
message = The workload pools configuration has some pools defined without a category.
|
|
severity = error
|
|
action = Define all workload pools with a valid category value.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_CATEGORY_MISC_POOL__S]
|
|
message = Cannot define the workload pool=%s under misc category when the category's weights are 0.
|
|
severity = error
|
|
action = Change the workload misc category's cpu and memory weights to values greater than 0.
|
|
capabilities = edit_workload_pools
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_RULE_MISSING_MESSAGE_FOR_ALERT_RULE__S]
|
|
message = A user message is required for workload rule=%s if the 'alert' action is selected.
|
|
severity = warn
|
|
action = Specify a user message for rules with the 'alert' action.
|
|
capabilities = edit_workload_rules
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_RULE_DEFAULT_USER_MESSAGE_FOR_ABORT_RULE__S]
|
|
message = Your search has been terminated by workload rule=%s because it exceeded the specified runtime.
|
|
severity = warn
|
|
action = Edit your search to fit the workload rule constraints or contact your Splunk administrator.
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_BASE_DIR_CHECK_TITLE__S]
|
|
message = %s Splunk base directory check
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_BASE_DIR_CHECK_MITIGATION__S_S]
|
|
message = %s Splunk base directory %s is missing.
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_BASE_DIR_PERM_TITLE__S]
|
|
message = %s Splunk base directory permissions
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_BASE_DIR_PERM_MITIGATION__S_S]
|
|
message = %s Splunk base directory %s requires read and write permissions.
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_PLATFORMTYPE_TITLE]
|
|
message = Operating System
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_PLATFORMTYPE_MITIGATION]
|
|
message = Operating system must be Linux.
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_CGROUP_VERSION_TITLE]
|
|
message = Cgroup Version
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_CGROUP_VERSION_MITIGATION]
|
|
message = Cgroup must be version 1 and it must be properly mounted.
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_UNIT_FILE_CHECK_TITLE]
|
|
message = Unit file check
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_UNIT_FILE_CHECK_MITIGATION__S]
|
|
message = Unit file %s is missing. Restart Splunk then rerun preflight checks.
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_SETTING_DELEGATE_TITLE]
|
|
message = Delegate property set to true
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_SETTING_DELEGATE_MITIGATION]
|
|
message = The 'Delegate' property in the unit file must be set to 'true'. Restart Splunk then rerun preflight checks.
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_SPLUNK_LAUNCHED_UNDER_SYSTEMD_TITLE]
|
|
message = Splunk launched under systemd
|
|
|
|
[WORKLOAD_MGR:WORKLOAD_PREFLIGHT_SPLUNK_LAUNCHED_UNDER_SYSTEMD_MITIGATION]
|
|
message = In the unit file, the 'Restart' property must be set to 'always'. The 'ExecStart' property must include '_internal_launch_under_systemd'. Make sure the up-to-date unit file is loaded.
|
|
|
|
|
|
[WORKLOAD_ADMISSION]
|
|
name = Admission Manager
|
|
|
|
[WORKLOAD_ADMISSION:WORKLOAD_ADMISSION_ENABLED_WITH_WARNINGS__S]
|
|
message = Warnings encountered in Workload Management Admission Rules config: %s
|
|
severity = warn
|
|
action = Check Admission Rules configuration and ensure all referenced users/roles/indexes/apps are valid.
|
|
capabilities = edit_workload_rules
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_ADMISSION:WORKLOAD_ADMISSION_PREFILTER_RULE_FAILED_READ__S]
|
|
message = Failed to read/process search prefilter rule = %s.
|
|
severity = error
|
|
action = Specify a valid search prefilter rule in the [[/manager/system/workload_management|Admission Rules UI]] or in the workload_rules.conf file.
|
|
capabilities = edit_workload_rules
|
|
help = learnmore.configure_workloads
|
|
|
|
[WORKLOAD_ADMISSION:WORKLOAD_ADMISSION_PREFILTER_RULE_DEFAULT_MESSAGE_SCHEDULED_SEARCH]
|
|
message = This search meets the filter conditions defined by an admission rule and will not be executed
|
|
severity = info
|
|
|
|
[WORKLOAD_ADMISSION:WORKLOAD_ADMISSION_PREFILTER_RULE_DEFAULT_MESSAGE_ADHOC_SEARCH__S]
|
|
message = This search meets the filter conditions defined by admission rule '%s' and will not be executed.
|
|
severity = info
|
|
action = Contact your Splunk administrator if you believe this action was taken in error.
|
|
|
|
|
|
[XY_SERIES]
|
|
name = XY Series
|
|
|
|
[XY_SERIES:INVALID_WILDCARD_FIELD__S]
|
|
message = Wildcard field '%s' cannot be used in xyseries command for xfield or yfield.
|
|
severity = error
|
|
|
|
[XY_SERIES:MISSING_DATA_FIELDS]
|
|
message = At least one data field must be specified.
|
|
severity = error
|
|
|
|
|
|
[REMOVED_COMMAND]
|
|
name = Removed Command
|
|
|
|
[REMOVED_COMMAND:COMMAND__S]
|
|
message = The '%s' command has been removed.
|
|
help = message.removed.commands
|
|
severity = error
|
|
|
|
|
|
[CLILIB_BUNDLE_PATHS]
|
|
name = CLILIB Bundle paths
|
|
|
|
[CLILIB_BUNDLE_PATHS:ERROR_APP_INSTALL__S]
|
|
message = Application installation failed because: %s.
|
|
severity = error
|
|
|
|
[CLILIB_BUNDLE_PATHS:ERROR_ARCHIVE_ABS_PATH]
|
|
message = The archive would extract to an absolute path.
|
|
severity = error
|
|
|
|
[CLILIB_BUNDLE_PATHS:ERROR_ARCHIVE_NO_APP]
|
|
message = The archive does not contain an application subdirectory.
|
|
severity = error
|
|
|
|
[CLILIB_BUNDLE_PATHS:ERROR_ARCHIVE_MULTIPLE_APPS__S_S]
|
|
message = The archive contains more than one application subdirectory: '%s' and '%s'.
|
|
severity = error
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_MIGRATE_START__S]
|
|
message = Beginning migration of /etc/bundles/%s ...
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_MIGRATE_END__S]
|
|
message = Migration of /etc/bundles/%s is complete.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_MIGRATE_OMIT__S]
|
|
message = Omitting the '%s' directory from migration.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_MIGRATE_COLLISION__S_S]
|
|
message = Application '%s' already exists: %s.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_MIGRATE_CLEANUP__S]
|
|
message = Cleaning up %s.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_MIGRATE_MOVE_DRYRUN__S]
|
|
message = Would move '%s'.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_MIGRATE_MERGE_CONF__S_S]
|
|
message = Merging %s into %s.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_MIGRATE_IGNORE_DUP__S_S]
|
|
message = Ignoring %s because %s already exists.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:WARN_MIGRATE_NO_CREATE__S]
|
|
message = Cannot create %s.
|
|
severity = warn
|
|
|
|
[CLILIB_BUNDLE_PATHS:WARN_MIGRATE_DEP]
|
|
message = Directories in /etc/bundles/ are ignored by Splunk's configuration system:
|
|
severity = warn
|
|
|
|
[CLILIB_BUNDLE_PATHS:WARN_MIGRATE_CONF]
|
|
message = Splunk no longer writes to this file. See $SPLUNK_HOME/etc/system/local.
|
|
severity = warn
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_EXPORT_OMIT__S]
|
|
message = Export is not required for %s.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_EXPORT_FILE__S]
|
|
message = Exporting %s.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:INFO_IMPORT_FILE__S]
|
|
message = Importing %s.
|
|
severity = info
|
|
|
|
[CLILIB_BUNDLE_PATHS:ERROR_METADATA_WRITE]
|
|
message = Failed to copy out application metadata.
|
|
severity = error
|
|
|
|
|
|
[MSEARCH]
|
|
name = Metric Preview
|
|
|
|
[MSEARCH:NOPERMISSION]
|
|
message = Your role does not have the capability required to perform this operation.
|
|
severity = error
|
|
|
|
[MSEARCH:INVALID_TIME_PARAMETER__S]
|
|
message = Invalid time format for '%s'.
|
|
action = Use a relative time format (-1d@d), UNIX timestamp (1234567890), or ISO8601 format (2000-00-00T12:34:56-05:00).
|
|
severity = error
|
|
|
|
|
|
|
|
[SDS]
|
|
name = Federated Search for Amazon S3
|
|
|
|
[SDS:UNSUPPORTED_OPTION__S]
|
|
message = Unsupported command option '%s'. The option is ignored.
|
|
severity = warn
|
|
|
|
[SDS:MISSING_ARGUMENT__S]
|
|
message = Missing required argument '%s'.
|
|
severity = error
|
|
|
|
[SDS:INVALID_ARGUMENT__S_S]
|
|
message = The '%s' argument is invalid: %s
|
|
severity = error
|
|
|
|
[SDS:SDSSEARCH_CLIENT_ERROR__S]
|
|
message = The Federated Search for Amazon S3 client failed to run the search. It encountered the following error: %s
|
|
severity = error
|
|
|
|
[SDS:FEATURE_FLAG_DISABLED]
|
|
message = This search has failed. Federated Search for Amazon S3 is disabled. Searches with this data cannot be processed.
|
|
action = To run this search, enable Federated Search for Amazon S3.
|
|
|
|
[SDS:GETTING_DATASET_FAILED__S]
|
|
message = Failed to get the following federated index: %s.
|
|
severity = error
|
|
|
|
[SDS:NO_ACCESS_TO_DATASET__S]
|
|
message = Your role does not have access to the following federated index: %s.
|
|
action = Contact your Splunk administrator.
|
|
severity = error
|
|
|
|
[SDS:GETTING_PROVIDER_FAILED__S]
|
|
message = Failed to get the following federated provider: %s.
|
|
severity = error
|
|
|
|
[SDS:PARSING_DATASET_PARAMETER_FAILED__S_S]
|
|
message = Failed to parse '%s' setting in federated index '%s'.
|
|
severity = error
|
|
|
|
[SDS:PARSING_PROVIDER_PARAMETER_FAILED__S_S]
|
|
message = Failed to parse '%s' setting in federated provider '%s'.
|
|
severity = error
|
|
|
|
[SDS:MISSING_REQUIRED_TIME_FORMAT__S]
|
|
message = A time format string is required for time field '%s'.
|
|
help = sds.index.timesettings
|
|
severity = error
|
|
|
|
[SDS:FIELD_NAME_CANNOT_HAVE_WILDCARD__S]
|
|
message = You cannot apply a wildcard to the '%s' field.
|
|
severity = error
|
|
|
|
[SDS:FIELD_NAME_CANNOT_HAVE_DOUBLE_QUOTE__S]
|
|
message = Field '%s' has a double-quote ('"') character.
|
|
action = Quote non-nested field names with single-quote characters ('\'').
|
|
severity = error
|
|
|
|
[SDS:FIELD_NAME_CANNOT_HAVE_INVALID_QUOTING__S]
|
|
message = Field '%s' uses the quote character ('\'') incorrectly.
|
|
action = Quote non-nested field names fully with single-quote characters ('\''). Do not quote nested field names at all. For example, 'audit.log' and audit.log are both valid field names, but audit.'log' is an invalid field name.
|
|
severity = error
|
|
|
|
[SDS:MISSING_KEYWORD__S]
|
|
message = Missing '%s' keyword.
|
|
severity = error
|
|
|
|
[SDS:INVALID_FIELD__S_S]
|
|
message = Invalid %s field: %s.
|
|
severity = error
|
|
|
|
[SDS:ORDERBY_FIELD_NOT_WITHIN_GROUPBY_AND_NOT_AN_AGGREGATION__S]
|
|
message = The search is performing statistics on fields. Field: %s must be an aggregation expression or appear in a 'groupby' clause.
|
|
severity = error
|
|
|
|
[SDS:ORDERBY_FILED_HAS_RENAME__S]
|
|
message = Field: %s is an aggregation result field and has a rename.
|
|
action = Use the renamed field in the 'orderby' clause.
|
|
severity = error
|
|
|
|
[SDS:REDUNDANT_ASC_DESC_KEYWORD__S]
|
|
message = More than one ASC/DESC keyword appended after field: %s.
|
|
action = Edit the search so that the field has only one ASC or DESC keyword, if any.
|
|
severity = error
|
|
|
|
[SDS:NO_FIELD_BEFORE_ASC_DESC_KEYWORD]
|
|
message = The ASC or DESC keyword must be preceded by a field.
|
|
action = Add a field before the ASC or DESC keyword.
|
|
severity = error
|
|
|
|
[SDS:MISSING_FIELD_LIST_AFTER_KEYWORD__S]
|
|
message = Missing field list after '%s' keyword.
|
|
action = Add a list of fields after the keyword.
|
|
severity = error
|
|
|
|
[SDS:WRONG_NUMBER_OF_LIMIT_VALUES__D]
|
|
message = Expect a single value after 'limit' keyword, but got %d.
|
|
severity = error
|
|
|
|
[SDS:WRONG_TYPE_OF_LIMIT_VALUE__S]
|
|
message = The 'limit' value you have provided in this search is incorrect: '%s'
|
|
action = Replace the incorrect value with an unsigned integer.
|
|
severity = error
|
|
|
|
[SDS:FAIL_TO_SNAP_TIME__S_S_S]
|
|
message = Failed to snap time: time=%s, span=%s, tz=%s
|
|
action = Contact Splunk Support, or, if you have a support contract, file a new case using the Splunk Support Portal.
|
|
severity = error
|
|
|
|
[SDS:TIME_RANGE_SUBSTITUTED]
|
|
message = The time range you selected has been replaced by the time range you provided in the 'WHERE' clause.
|
|
severity = info
|
|
|
|
[SDS:REUSE_SEARCH_RESULTS]
|
|
message = The results for the last successful run of this search are reused for this run of the search.
|
|
action = If you want the next run of this search to have the potential to return new results, add 'reuse_search_results=false' to its search string.
|
|
severity = info
|
|
|
|
[SDS:MISSING_UNIX_TIME_FIELD_FOR_TIME_SPAN]
|
|
message = A 'Unix time field' is required for the groupby clause of this time span search.
|
|
action = Apply the the 'Unix time field' for the federated index invoked in your search to the groupby clause of your search.
|
|
severity = error
|
|
|
|
[SDS:UNIX_TIME_FIELD_FOUND_IN_DATASET_SCHEMA__S]
|
|
message = The UNIX time field '%s' is found in the dataset schema.
|
|
action = The UNIX time field name must be a unique field name that is not shared by any other fields in the dataset to which the federated index maps.
|
|
severity = error
|
|
|
|
[SDS:FIELDS_NOT_FOUND__S]
|
|
message = The following fields in your search do not exist in your dataset: [%s] Having nonexistent fields in your search might cause your search to not return results.
|
|
action = In your search, replace nonexistent fields with fields that exist in your dataset.
|
|
severity = warn
|
|
|
|
[SDS:PER_SEARCH_BYTES_SCAN_EXCEED_LIMITS]
|
|
message = Your search exceeds your bytes-scanned-per-search control limit.
|
|
action = Contact your Splunk Support representative if you need to change this control limit.
|
|
severity = error
|
|
|
|
[SDS:TOO_MANY_SEARCH_REQUESTS]
|
|
message = Too many searches are running concurrently.
|
|
action = Wait for a moment and try to run your search again. If this situation persists, contact your Splunk Support representative.
|
|
severity = error
|
|
|
|
[SDS:SEARCH_TIMEOUT]
|
|
message = Your search has timed out after running too long without completing.
|
|
action = To reduce the amount of data that must be scanned for this search, add filters to the search. Then run the search again.
|
|
severity = error
|
|
|
|
[SDS:QUERY_ENGINE_INTERNAL_ERROR]
|
|
message = This search returns more results than Federated Search for Amazon S3 can currently support.
|
|
action = Reduce the potential result set of your search by adding filters to the search or reducing its time range, and try again.
|
|
severity = error
|
|
|
|
[SDS:KMS_KEY_NOT_FOUND]
|
|
message = This search requires an encryption key that you have not provided.
|
|
action = Verify that the definition of the federated provider for this search has AWS KMS key Amazon resource names (ARNs) for the data you are searching.
|
|
severity = error
|
|
|
|
[SDS:SEARCH_RUN_FAILED__S_S]
|
|
message = Failed to run search, state=%s, reason="%s"
|
|
severity = error
|
|
|
|
[SDS:FEDERATED_INDEX_DISABLED__S]
|
|
message = This search has failed. The federated index "%s" is turned off. Searches that reference this federated index cannot be processed.
|
|
action = To run this search, turn the federated index on.
|
|
|
|
[SDS:FEDERATED_PROVIDER_DISABLED__S_S]
|
|
message = This search has failed. The federated provider "%s" associated with the federated index "%s" is turned off. Searches that reference this federated index cannot be processed.
|
|
action = To run this search, turn the federated provider on.
|
|
|
|
|
|
[UNARCHIVE_COMMAND]
|
|
name = Unarchive Command
|
|
|
|
[UNARCHIVE_COMMAND:START_MODE__S]
|
|
message = 'unarchive_cmd_start_mode' is set to 'shell' for source='%s', which allows 'unarchive_cmd' to run muliple commands.
|
|
severity = warn
|
|
action = Change 'unarchive_cmd_start_mode' to 'direct'.
|
|
target = log
|