You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

561 lines
74 KiB

# General
[instrumentation.lastSent]
search = index=_telemetry source=telemetry sourcetype=splunk_telemetry_log status=success | fillnull value=anonymous visibility | eval anonymous_send_time = if(visibility LIKE "%anonymous%", _time, null) | eval license_send_time = if(visibility LIKE "%license%", _time, null) | eval support_send_time = if(visibility LIKE "%support%", _time, null) | stats latest(anonymous_send_time) as latest_anonymous_send_time latest(license_send_time) as latest_license_send_time latest(support_send_time) as latest_support_send_time
[instrumentation.reportingErrorCount]
search = index=_telemetry source=telemetry sourcetype=splunk_telemetry_log status=failed | fillnull value=anonymous visibility | stats count(eval(visibility LIKE "%anonymous%")) as anonymous_errors count(eval(visibility LIKE "%license%")) as license_errors count(eval(visibility LIKE "%support%")) as support_errors
# Anonymous
# For splunk core <= 7.0.x and splunk_instrumentation <= 3.0.x, anonymous usage data is indexed in _telemtry
# For later versions, data is indexed in _introspection
[instrumentation.anonymized.eventsByTime]
search = (index=_introspection OR index=_telemetry) sourcetype=splunk_telemetry source="http-stream" visibility=*anonymous* | append [| savedsearch instrumentation.licenseUsage]
# Support
# For splunk core <= 7.0.x and splunk_instrumentation <= 3.0.x, support usage data is indexed in _telemtry
# For later versions, data is indexed in _introspection
[instrumentation.support.eventsByTime]
search = (index=_introspection OR index=_telemetry) sourcetype=splunk_telemetry source="http-stream" visibility=*support* | append [| savedsearch instrumentation.licenseUsage]
# Deployment
[instrumentation.deployment.clustering.indexer]
search = | makeresults annotate=true | append [localop | rest /services/cluster/config] | sort -mode | head 1 | eval data=if(mode=="master","{\"host\":\""+splunk_server+"\",\"timezone\":\""+strftime(now(),"%z")+"\",\"multiSite\":"+multisite+",\"summaryReplication\":"+if(summary_replication=1,"true","false")+",\"enabled\":true,\"replicationFactor\":"+tostring(replication_factor)+",\"siteReplicationFactor\":"+coalesce(replace(replace(site_replication_factor, "origin", "\"origin\""), "total", "\"total\""), "null")+",\"siteSearchFactor\":"+coalesce(replace(replace(site_search_factor, "origin", "\"origin\""), "total", "\"total\""),"null")+",\"searchFactor\":"+tostring(search_factor)+"}","{\"host\":\""+splunk_server+"\",\"timezone\":\""+strftime(now(),"%z")+"\",\"enabled\":false}") | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.deployment.forwarders]
search = index=_internal source=*metrics.log* TERM(group=tcpin_connections) (TERM(connectionType=cooked) OR TERM(connectionType=cookedSSL)) fwdType=* guid=* | rename sourceIp as forwarderHost | eval connectionType=case(fwdType=="uf" or fwdType=="lwf" or fwdType=="full", fwdType, 1==1,"Splunk fwder") | eval version=if(isnull(version),"pre 4.2",version) | bin _time span=1d | stats sum(kb) as kb, latest(connectionType) as connectionType, latest(arch) as arch, latest(os) as os, latest(version) as version, latest(protocolLevel) as protocolLevel, latest(forwarderHost) as forwarderHost by guid _time| eval protocolLevel=if(isnull(protocolLevel), "no info", tostring(protocolLevel)) | stats estdc(forwarderHost) as numHosts estdc(guid) as numInstances `instrumentation_distribution_values(kb)` by connectionType arch os version _time protocolLevel| eval data="{\"hosts\":"+tostring(numHosts)+",\"instances\":"+tostring(numInstances)+",\"architecture\":\""+arch+"\",\"os\":\""+os+"\",\"splunkVersion\":\""+version+"\",\"type\":\""+connectionType+"\", \"protocolLevel\":\""+protocolLevel+"\", \"bytes\":{" + `instrumentation_distribution_strings("kb",1024,0)` + "}}" | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.deployment.app]
search = | rest /services/apps/local | eval _time=now() | fields splunk_server title updated version disabled | eval data="{\"host\":\""+splunk_server+"\",\"name\":\""+title+"\",\"version\":\""+coalesce(version, "")+"\",\"enabled\":"+if(disabled=0, "true", "false")+"}" | eval date=strftime(_time, "%Y-%m-%d") | fields data _time date
[instrumentation.deployment.node]
search = index=_introspection sourcetype=splunk_disk_objects component::Partitions | bin _time span=1d | stats latest(data.free) as partitionFree, latest(data.capacity) as partitionCapacity by host data.fs_type data.mount_point _time | eval partitionUtilized=round(1-partitionFree/partitionCapacity,2) | eval partitions="{\"utilization\":"+`instrumentation_number_format(partitionUtilized,1,2)`+",\"capacity\":"+`instrumentation_number_format(partitionCapacity,1048576,0)`+",\"fileSystem\":\""+'data.fs_type' + "\"}" | stats delim="," values(partitions) as partitions by host _time | rename _time as date | mvcombine partitions | rename date as _time | join type=left host _time [search index=_introspection sourcetype=splunk_resource_usage component::Hostwide | eval cpuUsage = 'data.cpu_system_pct' + 'data.cpu_user_pct' | rename data.mem_used as memUsage | bin _time span=1d | stats latest(data.cpu_count) as coreCount, latest(data.virtual_cpu_count) as virtualCoreCount, latest(data.mem) as memAvailable, latest(data.splunk_version) as splunkVersion, latest(data.cpu_arch) as cpuArch, latest(data.os_name) as osName, latest(data.os_name_ext) as osNameExt, latest(data.os_version) as osVersion, `instrumentation_distribution_values(cpuUsage)`, `instrumentation_distribution_values(memUsage)`, latest(data.instance_guid) as guid by host _time] | fillnull value="null" coreCount virtualCoreCount memAvailable | eval splunkVersion=coalesce("\""+splunkVersion+"\"", "null"), cpuArch=coalesce("\""+cpuArch+"\"", "null"), osName=coalesce("\""+osName + "\"", "null"), osNameExt=coalesce("\""+osNameExt+"\"", "null"), osVersion=coalesce("\""+osVersion+"\"", "null"), guid=coalesce("\""+guid+"\"", "null") | eval data = "{\"guid\":"+guid+",\"host\":\""+replace(host,"\"", "\\\"")+"\",\"partitions\": " + coalesce("[" + partitions + "]", "null") + ",\"cpu\":{\"architecture\":"+cpuArch+",\"coreCount\":" + tostring(coreCount)+ ",\"virtualCoreCount\":"+tostring(virtualCoreCount)+",\"utilization\":{" + `instrumentation_distribution_strings("cpuUsage",.01,2)` + "}},\"memory\":"+"{\"capacity\":"+ `instrumentation_number_format(memAvailable,1048576,0)`+",\"utilization\":{" + `instrumentation_distribution_strings("memUsage",1/memAvailable,2)` + "}},\"os\":"+osName+",\"osExt\":"+osNameExt + ",\"osVersion\":"+osVersion+",\"splunkVersion\":"+splunkVersion+"}" | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.deployment.index]
search = | rest /services/data/indexes | join type=outer splunk_server title [| rest /services/data/indexes-extended] \
| append [| rest /services/data/indexes datatype=metric | join type=outer splunk_server title [| rest /services/data/indexes-extended datatype=metric]] \
| eval warm_bucket_size = if(isnotnull('bucket_dirs.home.warm_bucket_size'), 'bucket_dirs.home.warm_bucket_size', 'bucket_dirs.home.size') \
| eval cold_bucket_size_gb = tostring(round(coalesce('bucket_dirs.cold.bucket_size', 'bucket_dirs.cold.size', 0) / 1024, 2)) \
| eval warm_bucket_size_gb = tostring(round(coalesce(warm_bucket_size,0) / 1024, 2)) \
| eval hot_bucket_size = tostring(round(coalesce(total_size / 1024 - cold_bucket_size_gb - warm_bucket_size_gb, 0),2)) \
| eval hot_bucket_size_gb = tostring(round(coalesce(hot_bucket_size,0) / 1024, 2)) \
| eval thawed_bucket_size_gb = tostring(round(coalesce('bucket_dirs.thawed.bucket_size', 'bucket_dirs.thawed.size',0) / 1024, 2)) \
| eval warm_bucket_count = tostring(coalesce('bucket_dirs.home.warm_bucket_count', 0)) \
| eval hot_bucket_count = tostring(coalesce('bucket_dirs.home.hot_bucket_count',0)) \
| eval cold_bucket_count = tostring(coalesce('bucket_dirs.cold.bucket_count',0)) \
| eval thawed_bucket_count = tostring(coalesce('bucket_dirs.thawed.bucket_count',0)) \
| eval home_event_count = tostring(coalesce('bucket_dirs.home.event_count',0)) \
| eval cold_event_count = tostring(coalesce('bucket_dirs.cold.event_count',0)) \
| eval thawed_event_count = tostring(coalesce('bucket_dirs.thawed.event_count',0)) \
| eval home_bucket_capacity_gb = coalesce(if('homePath.maxDataSizeMB' == 0, "\"unlimited\"", round('homePath.maxDataSizeMB' / 1024, 2)), "\"unlimited\"") \
| eval cold_bucket_capacity_gb = coalesce(if('coldPath.maxDataSizeMB' == 0, "\"unlimited\"", round('coldPath.maxDataSizeMB' / 1024, 2)), "\"unlimited\"") \
| eval currentDBSizeGB = tostring(round(coalesce(currentDBSizeMB,0) / 1024, 2)) \
| eval maxTotalDataSizeGB = tostring(if(maxTotalDataSizeMB = 0, "\"unlimited\"", coalesce(round(maxTotalDataSizeMB / 1024, 2), "null"))) \
| eval maxHotBuckets= if(maxHotBuckets="auto","\"auto\"", maxHotBuckets) \
| eval minTime = tostring(coalesce(strptime(minTime,"%Y-%m-%dT%H:%M:%S%z"),"null")) \
| eval maxTime = tostring(coalesce(strptime(maxTime,"%Y-%m-%dT%H:%M:%S%z"),"null")) \
| eval total_bucket_count = tostring(if(isnotnull(total_bucket_count), total_bucket_count, 0)) \
| eval totalEventCount = tostring(coalesce(totalEventCount, 0)) \
| eval total_raw_size_gb = tostring(coalesce(round(total_raw_size / 1024, 2), "null")) \
| eval timeResolution = IF('metric.timestampResolution'=="ms","millisec","sec") \
| eval index_type = coalesce(datatype ,"event") \
| rename eai:acl.app as App \
| eval _time=now() \
| fields splunk_server, title,index_type, timeResolution,\
currentDBSizeGB, totalEventCount, total_bucket_count, \
total_raw_size_gb, minTime, maxTime, home_bucket_capacity_gb, cold_bucket_capacity_gb, \
hot_bucket_size_gb, warm_bucket_size_gb, cold_bucket_size_gb, thawed_bucket_size_gb, \
hot_bucket_count, warm_bucket_count, cold_bucket_count, thawed_bucket_count, \
home_event_count, cold_event_count, thawed_event_count, \
maxTotalDataSizeGB, maxHotBuckets, maxWarmDBCount App _time \
| eval data="{\"host\":\""+splunk_server+"\",\"name\":\""+title+"\",\"type\":\""+index_type+"\",\"timeResolution\":\""+timeResolution+"\",\"app\":\""+App+"\",\"total\":{\"currentDBSizeGB\":"+currentDBSizeGB+",\"maxDataSizeGB\":"+maxTotalDataSizeGB+",\"events\":"+totalEventCount+",\"buckets\":"+total_bucket_count+",\"rawSizeGB\":"+total_raw_size_gb+",\"minTime\":"+minTime+",\"maxTime\":"+maxTime+"},\"buckets\":{\"homeCapacityGB\":"+home_bucket_capacity_gb+",\"homeEventCount\":"+home_event_count+",\"coldCapacityGB\":"+cold_bucket_capacity_gb+",\"hot\":{\"sizeGB\":"+hot_bucket_size_gb+",\"count\":"+hot_bucket_count+",\"max\":"+maxHotBuckets+"},\"warm\":{\"sizeGB\":"+warm_bucket_size_gb+",\"count\":"+warm_bucket_count+"},\"cold\":{\"sizeGB\":"+cold_bucket_size_gb+",\"count\":"+cold_bucket_count+",\"events\":"+cold_event_count+"},\"thawed\":{\"sizeGB\":"+thawed_bucket_size_gb+",\"count\":"+thawed_bucket_count+",\"events\":"+thawed_event_count+"}}}" \
| eval date=strftime(_time, "%Y-%m-%d") | fields data _time date
[instrumentation.deployment.dscluster]
search = index=_dsphonehome earliest=-24h@h | stats dc(data.clientId) as total_clients \
| appendcols [ | search index=_dsphonehome earliest=-24h@h | rex field=source ".*phonehomes_(?<ds_guid>.*).log" | stats dc(data.clientId) as clients_connected by ds_guid | makejson ds_guid, clients_connected output=count_by_ds | fields count_by_ds | mvcombine delim="," count_by_ds | nomv count_by_ds | eval count_by_ds="["+count_by_ds+"]" ] \
| appendcols [ | search index=_dsphonehome earliest=-24h@h | join data.clientId max=1 [search index=_dsclient] | stats dc(data.clientId) as version_package_count by data.splunkVersion data.package | rename data.splunkVersion as version | rename data.package as package| makejson version package version_package_count output=count_by_version_and_package | fields count_by_version_and_package | mvcombine delim="," count_by_version_and_package | nomv count_by_version_and_package | eval count_by_version_and_package="["+count_by_version_and_package+"]" ] \
| fields total_clients count_by_ds count_by_version_and_package \
| eval data="{\"total_clients\":"+tostring(total_clients)+",\"count_by_ds\":"+count_by_ds+",\"count_by_version_and_package\":"+count_by_version_and_package+"}" | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | fields data _time date
[instrumentation.deployment.index.config]
search = | rest /services/configs/conf-indexes | eval _time=now() | fields splunk_server title tsidxWritingLevel | eval data="{\"host\":\""+splunk_server+"\",\"index\":\""+title+"\",\"tsidxWritingLevel\":"+tsidxWritingLevel+"}" | eval date=strftime(_time, "%Y-%m-%d") | fields data _time date
[instrumentation.deployment.remoteupgrade]
search = index=_internal TERM(group=deploy-server) TERM(name=upgrade_status) source=*metrics.log* earliest=-24h@h\
|stats sum(nRemoteUpgrade) AS total_nRemoteUpgrade, sum(nRemoteUpgradeSuccess) AS total_nRemoteUpgradeSuccess, sum(nRemoteUpgradeFailure) AS total_nRemoteUpgradeFailure, sum(nRemoteUpgradePkg*) AS total_nRemoteUpgradePkg* sum(nRemoteUpgradeOS*) AS total_nRemoteUpgradeOS* \
| fields total_nRemoteUpgrade, total_nRemoteUpgradeSuccess, total_nRemoteUpgradeFailure, total_nRemoteUpgradePkg*, total_nRemoteUpgradeOS* \
| eval totalnRemoteUpgradePkgStr="", total_nRemoteUpgradeOsStr="" \
| foreach total_nRemoteUpgradePkg* \
[eval totalnRemoteUpgradePkgStr = totalnRemoteUpgradePkgStr.",\"total_remote_upgrade_pkg"."<<MATCHSTR>>"."\":".tostring('<<FIELD>>')] \
| foreach total_nRemoteUpgradeOS* \
[eval total_nRemoteUpgradeOsStr = total_nRemoteUpgradeOsStr.",\"total_remote_upgrade_os"."<<MATCHSTR>>"."\":".tostring('<<FIELD>>')] \
| fields total_nRemoteUpgrade, total_nRemoteUpgradeSuccess, total_nRemoteUpgradeFailure, totalnRemoteUpgradePkgStr, total_nRemoteUpgradeOsStr \
| eval data="{\"total_remote_upgrade\":"+tostring(total_nRemoteUpgrade)+",\"total_remote_ugprade_success\":"+tostring(total_nRemoteUpgradeSuccess)+",\"total_remote_ugprade_failure\":"+tostring(total_nRemoteUpgradeFailure)+totalnRemoteUpgradePkgStr+total_nRemoteUpgradeOsStr+"}" \
| eval _time=now() \
| eval date=strftime(_time, "%Y-%m-%d") \
| fields data _time date
# Licensing
[instrumentation.licenseUsage]
# Why start with append? Otherwise, when running this saved search by itself, the results of the
# stats command are not reflected in the events. Instead, the events tab will only show the events
# as they existed in the pipeline before stats.
search = NOT() | append [search index=_telemetry type=RolloverSummary | eval date=strftime(_time-43200, "%Y-%m-%d") | eval licenseIDs=coalesce(replace(replace(replace(replace(licenseGuids,"\[","[\""),"\]","\"]"),",","\",\"")," ", ""),"null"), subgroup_id=coalesce(subgroupId, "Production"), group_id=coalesce("\""+licenseGroup+"\"", "null"), lmGuid=coalesce("\""+guid+"\"", "null"), productType=coalesce("\""+productType+"\"", "null"), type_id=if(substr(stack,1,16)="fixed-sourcetype", "fixed-sourcetype",stack) | stats max(_time) as lastTime latest(stacksz) as stack_quota, latest(poolsz) as pool_quota, sum(b) as consumption by pool stack host lmGuid licenseIDs type_id group_id subgroup_id productType date | rename stack as stack_id | eval pool="{\"quota\":" + pool_quota+",\"consumption\":"+consumption+"}" | stats delim="," values(pool) as pools, max(lastTime) as lastTime max(stack_quota) as stack_quota sum(consumption) as stack_consumption by stack_id group_id subgroup_id type_id lmGuid host licenseIDs productType date | mvcombine pools | eval _raw="{\"component\":\"licensing.stack\",\"data\":{\"host\":\""+host+"\",\"guid\":"+lmGuid+",\"name\":\""+replace(stack_id,"\"", "\\\"")+"\",\"type\":\"" + type_id + "\",\"subgroup\":\"" + subgroup_id + "\",\"product\":"+productType+",\"quota\":" + stack_quota+",\"consumption\":"+stack_consumption+",\"pools\":["+pools+"],\"licenseIDs\":"+licenseIDs+"}, \"date\":\""+date+"\",\"visibility\":\"anonymous,license\"}", _time=lastTime]
[instrumentation.licensing.stack]
search = index=_telemetry source=*license_usage_summary.log* sourcetype=splunkd TERM(type=RolloverSummary) | eval date=strftime(_time, "%m-%d-%Y"), licenseIDs=coalesce(replace(replace(replace(replace(licenseGuids,"\[","[\""),"\]","\"]"),",","\",\"")," ", ""),"null"), subgroup_id=coalesce(subgroupId, "Production"), group_id=coalesce("\""+licenseGroup+"\"", "null"), lmGuid=coalesce("\""+guid+"\"", "null"), productType=coalesce("\""+productType+"\"", "null"), type_id=if(substr(stack,1,16)="fixed-sourcetype", "fixed-sourcetype",stack) | stats latest(stacksz) as stack_quota, latest(poolsz) as pool_quota, sum(b) as consumption by pool stack host lmGuid licenseIDs type_id group_id subgroup_id productType date | rename stack as stack_id | eval pool="{\"quota\":" + pool_quota+",\"consumption\":"+consumption+"}" | stats delim="," values(pool) as pools, max(stack_quota) as stack_quota sum(consumption) as stack_consumption by stack_id group_id subgroup_id type_id lmGuid host licenseIDs productType date | mvcombine pools | eval data="{\"host\":\""+host+"\",\"guid\":"+lmGuid+",\"name\":\""+replace(stack_id,"\"", "\\\"")+"\",\"type\":\"" + type_id + "\",\"subgroup\":\"" + subgroup_id + "\",\"product\":"+productType+",\"quota\":" + stack_quota+",\"consumption\":"+stack_consumption+",\"pools\":["+pools+"],\"licenseIDs\":"+licenseIDs+"}" | eval _time=strptime(date, "%m-%d-%Y")-43200 | fields data _time
[instrumentation.licensing.lmredundancy.errors]
search = index=_internal sourcetype=splunkd component=LMStackMgr error=LMRedundancy | table host type text _time | eval date=strftime(_time, "%Y-%m-%d") | dedup text date | eval data="{\"host\":\""+host+"\",\"errorType\":\""+type+"\"}" | fields _time date data
# Performance
[instrumentation.performance.indexing]
search = index=_internal TERM(group=thruput) TERM(name=index_thruput) source=*metrics.log* | bin _time span=30s | stats sum(kb) as kb sum(instantaneous_kbps) as instantaneous_kbps by host _time | bin _time span=1d | stats sum(kb) as totalKB `instrumentation_distribution_values(instantaneous_kbps)` by host _time | eval data="{\"host\":\""+host+"\",\"thruput\":{\"total\":" + tostring(round(totalKB*1024)) + "," + `instrumentation_distribution_strings("instantaneous_kbps",1024,0)`+"}}" | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.performance.search]
search = index=_audit sourcetype=audittrail TERM(action=search) TERM(info=completed) total_run_time=* | eval search_et=if(search_et="N/A", 0, search_et) | eval search_lt=if(search_lt="N/A", exec_time, min(exec_time,search_lt)) | eval timerange=search_lt-search_et | bin _time span=1d | stats latest(searched_buckets) as searched_buckets latest(total_slices) as total_slices latest(scan_count) as scan_count latest(timerange) as timerange latest(total_run_time) as runtime by search_id _time | stats `instrumentation_distribution_values(runtime)`, `instrumentation_distribution_values(searched_buckets)`, `instrumentation_distribution_values(total_slices)`, `instrumentation_distribution_values(scan_count)`, `instrumentation_distribution_values(timerange)` count as numSearches by _time | eval data="{\"searches\":"+tostring(numSearches)+",\"latency\":{"+`instrumentation_distribution_strings("runtime",1,2)`+"},\"buckets\":{"+`instrumentation_distribution_strings("searched_buckets",1,2)`+"},\"slices\":{"+`instrumentation_distribution_strings("total_slices",1,2)`+"},\"scanCount\":{"+`instrumentation_distribution_strings("scan_count",1,2)`+"},\"dayRange\":{"+`instrumentation_distribution_strings("timerange",1/86400,2)`+"}}" | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
# Templates
[instrumentation.anonymous.firstEvent]
search = (index=_introspection OR index=_telemetry) sourcetype=splunk_telemetry source="http-stream" visibility=*anonymous* | append [savedsearch instrumentation.licenseUsage] | where date >= "$beginDate$" AND date <= "$endDate$" | head 1
[instrumentation.support.firstEvent]
search = (index=_introspection OR index=_telemetry) sourcetype=splunk_telemetry source="http-stream" visibility=*support* | append [savedsearch instrumentation.licenseUsage] | where date >= "$beginDate$" AND date <= "$endDate$" | head 1
[instrumentation.license.firstEvent]
search = | savedsearch instrumentation.licenseUsage | where date >= "$beginDate$" AND date <= "$endDate$" | head 1
[instrumentation.reporting]
search = index=_telemetry source=telemetry sourcetype=splunk_telemetry_log | fields _raw | spath | eval time_formatted = strftime(_time, "%Y-%m-%d %H:%M:%S") | search (status=success OR status=failed)
[instrumentation.reporting.errors]
search = index=_telemetry source=telemetry sourcetype=splunk_telemetry_log status=failed visibility=*$visibility$*
# Usage
[instrumentation.usage.app.page]
search = index=_internal sourcetype=splunk_web_access uri_path="/*/app/*/*" NOT uri_path="/*/static/*" | eval uri_parts=split(uri_path, "/"),locale=mvindex(uri_parts,1), app=mvindex(uri_parts,3), page=mvindex(uri_parts,4) | bin _time span=1d | eventstats estdc(user) as appUsers count as appOccurrences by app _time | bin _time span=1d | stats latest(locale) as locale count as occurrences estdc(user) as users by app page appUsers appOccurrences _time | sort app -occurrences | streamstats count as pageRank by app _time | where pageRank<=10 | eval data="{\"app\":\""+app+"\",\"page\":\""+page+"\",\"locale\":\""+locale+"\",\"occurrences\":" + tostring(occurrences) + ",\"users\":" + tostring(users) + "}" | eval data=if(pageRank==1,data+";{\"app\":\""+app+"\",\"locale\":\""+locale+"\",\"occurrences\":" + tostring(appOccurrences) + ",\"users\":" + tostring(appUsers) + "}", data) | stats values(data) as data by app appOccurrences appUsers _time | sort _time -appOccurrences | streamstats count as appRank by _time | where appRank<=25 | mvexpand data | makemv delim=";" data | mvexpand data | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.indexing.sourcetype]
search = index=_internal source=*metrics.log* TERM(group=per_sourcetype_thruput) | bin _time span=1d | stats sum(ev) as events, sum(kb) as size, estdc(host) as hosts by series _time | eval data="{\"name\":\""+replace(series,"\"", "\\\"") + "\",\"events\":"+tostring(events)+",\"bytes\":"+tostring(round(size*1024))+",\"hosts\":"+tostring(hosts)+"}" | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.search.concurrent]
search = index=_introspection sourcetype=splunk_resource_usage component::PerProcess data.search_props.sid::* | bin _time span=10s | stats estdc(data.search_props.sid) AS concurrent_searches by _time host | bin _time span=1d | stats `instrumentation_distribution_values(concurrent_searches)` by host _time | eval data="{\"host\":\""+host+"\",\"searches\":{" + `instrumentation_distribution_strings("concurrent_searches",1,0)` +"}}" | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.search.type]
search = index=_introspection sourcetype=splunk_resource_usage component::PerProcess data.search_props.sid::* | rename data.search_props.type as searchType | bin _time span=1d | stats estdc(data.search_props.sid) AS search_count by searchType _time | eval data="\""+searchType+"\":"+tostring(search_count) | stats delim="," values(data) as data by _time | rename _time as date | mvcombine data | eval data="{"+data+"}" | rename date as _time | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.users.active]
search = index=_audit sourcetype=audittrail TERM(action=search) user!="splunk-system-user" user!="n/a" | bin _time span=1d | stats estdc(user) as active by _time | eval data="{\"active\":"+tostring(active)+"}" | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.kvstore]
search = | rest splunk_server=local /services/kvstore/info | appendcols [ | rest splunk_server=local /services/server/introspection/kvstore/collectionstats | mvexpand data | spath input=data | rex field=ns "(?<App>.*)\.(?<Collection>.*)" | eval dbsize=round(size/1024/1024, 2) | stats sum(dbsize) as data_size count(ns) as numOfCollections | eval collData = "\"usage.dataSizeMB\": \"" + data_size + "\", \"usage.numOfCollections\": \"" + numOfCollections + "\""] | eval data = "{" | foreach usage.* [eval data = data + "\"<<FIELD>>\":\"" + '<<FIELD>>' + "\", " ] | eval data = data + collData | eval data = rtrim(data, ", ") + "}", _time = now(), date=strftime(_time, "%Y-%m-%d") | fields data _time date
# kvstore backup/restore
[instrumentation.usage.kvstorebackuprestore]
search = index=_internal sourcetype=splunkd source=*splunkd.log* component=KVStoreBackupRestore (started AND archiveName AND method) | eval usageMethod = if(method==2, "pointInTime", "nonPointInTime") | stats count by usageMethod, action | makejson usageMethod, action, count output=data | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
#Topology
[instrumentation.topology.deployment.clustering.member]
search = | localop | rest /services/cluster/master/peers | eval data="{\"master\":\""+splunk_server+"\",\"member\":{\"host\":\""+label+"\",\"guid\":\""+title+"\",\"status\":\""+status+"\"},\"site\":\""+site+"\"}" | where isnotnull(data) | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.topology.deployment.clustering.searchhead]
search = | localop | rest /services/cluster/master/searchheads | where splunk_server!=label | eval data="{\"master\":\""+splunk_server+"\",\"searchhead\":{\"host\":\""+label+"\",\"guid\":\""+title+"\",\"status\":\""+status+"\"},\"site\":\""+site+"\"}" | where isnotnull(data) | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.topology.deployment.shclustering.member]
search = | localop | rest /services/shcluster/captain/members | eval data="{\"site\":\""+site+"\",\"captain\":\""+splunk_server+"\",\"member\":{\"host\":\""+label+"\",\"guid\":\""+title+"\",\"status\":\""+status+"\"}}" | where isnotnull(data) | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.topology.deployment.distsearch.peer]
search = | localop | rest /services/search/distributed/peers | eval data="{\"host\":\""+splunk_server+"\",\"peer\":{\"host\":\""+peerName+"\",\"guid\":\""+guid+"\",\"status\":\""+status+"\"}}" | where isnotnull(data) | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.topology.deployment.licensing.slave]
search = | localop | rest /services/licenser/slaves | eval data="{\"master\":\""+splunk_server+"\",\"slave\":{\"host\":\""+label+"\",\"guid\":\""+title+"\",\"pool\":\""+active_pool_ids+"\"}}" | where isnotnull(data) | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.topology.deployment.licensing.manager]
search = | localop | rest /services/licenser/managers | eval data="{\"host\":\""+lm_manager_label+"\",\"guid\":\""+title+"\"}" | where isnotnull(data) | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
#Workload management
[instrumentation.usage.workloadManagement.enabled]
search = NOT() | append [rest splunk_server=local /services/workloads/status | eval support='general.isSupported', enabled='general.enabled', os_name='general.os_name', os_version='general.os_version'| fields support, enabled, os_name, os_version]
[instrumentation.usage.workloadManagement.category]
search = NOT() | append [rest splunk_server=local /services/workloads/categories | eval data="\""+title+"\":{\"allocated cpu percent\":\""+cpu_allocated_percent+"\", \"allocated mem limit\":\""+mem_allocated_percent+"\"}" | stats list(data) AS categoryList | eval categoryCombined=mvjoin(categoryList, ", ") | fields categoryCombined]
[instrumentation.usage.workloadManagement.pools]
search = NOT() | append [rest splunk_server=local /services/workloads/pools | eval isDeafultPool=if(default_category_pool=1, "True", "False"), poolList="\""+title+"\":{\"allocated cpu percent\":\""+cpu_allocated_percent+"\", \"allocated mem limit\":\""+mem_allocated_percent+"\", \"default category pool\":\""+isDeafultPool+"\"}" | stats list(poolList) AS poolList, count BY category | eval poolList="\""+category+"\":{\"count\":"+count+","+mvjoin(poolList, ", ")+"}" | stats sum(count) AS poolTotal list(poolList) AS poolList| eval poolCombined=mvjoin(poolList, ", ") | fields poolCombined, poolTotal]
[instrumentation.usage.workloadManagement.rules]
search = NOT() | append [rest splunk_server=local /services/workloads/rules | eval data="\""+title+"\":{\"order\":\""+order+"\", \"predicate\":\""+predicate+"\", \"workload pool\":\""+workload_pool+"\"}" | stats list(data) AS ruleList, count AS ruleTotal by splunk_server | eval ruleCombined=mvjoin(ruleList, ", ") | fields ruleTotal, ruleCombined]
[instrumentation.usage.workloadManagement.report]
action.outputtelemetry = 1
action.outputtelemetry.param.anonymous = 1
action.outputtelemetry.param.support = 1
action.outputtelemetry.param.license = 0
action.outputtelemetry.param.optinrequired = 3
action.outputtelemetry.param.component = usage.workloadManagement.report
action.outputtelemetry.param.input = data
action.outputtelemetry.param.type = aggregate
alert.suppress = 0
alert.track = 0
counttype = number of events
cron_schedule = 0 3 * * 1
dispatch.earliest_time = -1w
dispatch.latest_time = now
display.general.type = statistics
display.page.search.tab = statistics
enableSched = 1
quantity = 0
relation = greater than
search = |rest splunk_server=local /services/server/info | appendcols [|rest splunk_server=local /servicesNS/nobody/splunk_instrumentation/telemetry | fields telemetrySalt]| eval telemetrySalt=if(isnull(telemetrySalt), "", telemetrySalt), hashHost=sha1(telemetrySalt+splunk_server), roleCombine=mvjoin(server_roles, ", ") | fields guid, hashHost, roleCombine| appendcols [|savedsearch instrumentation.usage.workloadManagement.enabled] | appendcols [|savedsearch instrumentation.usage.workloadManagement.category]| appendcols [|savedsearch instrumentation.usage.workloadManagement.pools] | appendcols [|savedsearch instrumentation.usage.workloadManagement.rules] | fillnull value=0 | eval data="{\"host\": \""+hashHost+"\", \"guid\": \""+guid+"\", \"wlm supported\": \""+support+"\", \"os\": \""+os_name+"\", \"osVersion\": \""+os_version+"\", \"wlm enabled\": \""+enabled+"\", \"server roles\": \""+roleCombine+"\"", poolTotal=if(isnull(poolTotal),0, poolTotal), ruleTotal=if(isnull(ruleTotal),0, ruleTotal) | eval data=if(support==1, data+", \"categories\":{"+categoryCombined+"}, \"pools\":{\"total count\":\""+poolTotal+"\""+ if(poolTotal>0, ", "+poolCombined+"", "") + "}, \"rules\":{\"total count\":\""+ruleTotal+"\""+if(ruleTotal>0, ", "+ruleCombined, "")+"}}", data+"}"), _time=now(), date=strftime(_time, "%Y-%m-%d")| fields _time date data
#Admission rules
[instrumentation.usage.admissionRules.enabled]
search = NOT() | append [rest splunk_server=local /services/workloads/status/admission-control-status | fields enabled]
[instrumentation.usage.admissionRules.rules]
search = NOT() | append [rest splunk_server=local "/services/workloads/rules?workload_rule_type=search_filter" | `hash_admission_rule_predicate(index)` | `hash_admission_rule_predicate(app)` | `hash_admission_rule_predicate(user)` | `hash_admission_rule_predicate(role)` | eval data="\""+sha1(title)+"\":{\"predicate\":\""+predicate+"\"}" | stats list(data) AS ruleList, count AS ruleTotal | eval ruleCombined=mvjoin(ruleList, ", ") | fields ruleTotal, ruleCombined]
[instrumentation.usage.admissionRules.rulesTriggered]
search = index=_internal sourcetype=wlm_monitor prefilter_action=filter | stats count by prefilter_rule | fields prefilter_rule, count | eval data="{\"searchFilterRule\":\""+sha1(prefilter_rule)+"\", \"filteredSearchesCount\":\""+count+"\"}" | stats list(data) AS rulesTriggered, count AS rulesTriggeredTotal | eval rulesTriggeredCombined=mvjoin(rulesTriggered, ", ") | fields rulesTriggeredTotal, rulesTriggeredCombined
[instrumentation.usage.admissionRules.report]
action.outputtelemetry = 1
action.outputtelemetry.param.anonymous = 1
action.outputtelemetry.param.support = 1
action.outputtelemetry.param.license = 0
action.outputtelemetry.param.optinrequired = 3
action.outputtelemetry.param.component = usage.admissionRules.report
action.outputtelemetry.param.input = data
action.outputtelemetry.param.type = aggregate
alert.suppress = 0
alert.track = 0
counttype = number of events
cron_schedule = 0 3 * * 1
dispatch.earliest_time = -1w
dispatch.latest_time = now
display.general.type = statistics
display.page.search.tab = statistics
enableSched = 1
quantity = 0
relation = greater than
search = |rest splunk_server=local /services/server/info | appendcols [|rest splunk_server=local /servicesNS/nobody/splunk_instrumentation/telemetry | fields telemetrySalt]| eval telemetrySalt=if(isnull(telemetrySalt), "", telemetrySalt), hashHost=sha1(telemetrySalt+splunk_server), roleCombine=mvjoin(server_roles, ", ") | fields guid, hashHost, roleCombine| appendcols [|savedsearch instrumentation.usage.admissionRules.enabled] | appendcols [|savedsearch instrumentation.usage.admissionRules.rules] | appendcols [|savedsearch instrumentation.usage.admissionRules.rulesTriggered] | fillnull value=0 | eval data="{\"host\": \""+hashHost+"\", \"guid\": \""+guid+"\", \"admissionRulesEnabled\": \""+enabled+"\", \"serverRoles\": \""+roleCombine+"\", \"rules\":{\"totalCount\":\""+ruleTotal+"\""+if(ruleTotal>0, ", "+ruleCombined, "")+"}, \"rulesTriggered\":["+if(rulesTriggeredTotal>0, rulesTriggeredCombined, "")+"]}", _time=now(), date=strftime(_time, "%Y-%m-%d")| fields _time date data
#Password policy management
[instrumentation.usage.passwordPolicy.config]
action.outputtelemetry = 1
action.outputtelemetry.param.anonymous = 1
action.outputtelemetry.param.support = 1
action.outputtelemetry.param.license = 0
action.outputtelemetry.param.optinrequired = 3
action.outputtelemetry.param.component = usage.passwordPolicy.config
action.outputtelemetry.param.input = data
action.outputtelemetry.param.type = aggregate
alert.suppress = 0
alert.track = 0
counttype = number of events
cron_schedule = 0 3 * * 1
dispatch.earliest_time = -1w
dispatch.latest_time = now
display.general.type = statistics
display.page.search.tab = statistics
enableSched = 1
quantity = 0
relation = greater than
search = |rest splunk_server=local /services/admin/Splunk-auth/splunk_auth| join type=left splunk_server [|rest splunk_server=local /services/server/info | fields guid, splunk_server] | appendcols [|rest splunk_server=local /servicesNS/nobody/splunk_instrumentation/telemetry | fields telemetrySalt]| eval telemetrySalt=if(isnull(telemetrySalt), "", telemetrySalt), hashHost=sha1(telemetrySalt+splunk_server)| replace "1" with "true", "0" with "false" in enablePasswordHistory,expireUserAccounts, forceWeakPasswordChange, lockoutUsers, verboseLoginFailMsg | eval data="{\"host\": \""+hashHost+"\",\"guid\": \""+guid+"\", \"constant login time\":\""+constantLoginTime+"\", \"enable password history\":\""+enablePasswordHistory+"\", \"expiration alert in days\":\""+expireAlertDays+"\", \"days until password expires\":\""+expirePasswordDays+"\", \"enable password expiration\":\""+expireUserAccounts+"\", \"force existing users to change weak passwords\":\""+forceWeakPasswordChange+"\", \"failed login attempts\":\""+lockoutAttempts+"\", \"lockout duration in minutes\":\""+lockoutMins+"\", \"lockout threshold in minutes\":\""+lockoutThresholdMins+"\", \"enable lockout users\":\""+lockoutUsers+"\", \"minimum number of digits\":\""+minPasswordDigit+"\", \"minimum number of characters\":\""+minPasswordLength+"\", \"minimum number of lowercase letters\":\""+minPasswordLowercase+"\", \"minimum number of special characters\":\""+minPasswordSpecial+"\", \"minimum number of uppercase letters\":\""+minPasswordUppercase+"\", \"password history count\":\""+passwordHistoryCount+"\", \"enable verbose login fail message\":\""+verboseLoginFailMsg+"\"}",_time=now(), date=strftime(_time, "%Y-%m-%d") | fields data _time date
#Health monitoring
[instrumentation.usage.healthMonitor.report]
action.outputtelemetry = 1
action.outputtelemetry.param.anonymous = 1
action.outputtelemetry.param.support = 1
action.outputtelemetry.param.license = 0
action.outputtelemetry.param.optinrequired = 3
action.outputtelemetry.param.component = usage.healthMonitor.report
action.outputtelemetry.param.input = data
action.outputtelemetry.param.type = aggregate
alert.suppress = 0
alert.track = 0
counttype = number of events
cron_schedule = 0 3 * * 1
dispatch.earliest_time = -1w
dispatch.latest_time = now
display.general.type = statistics
display.page.search.tab = statistics
enableSched = 1
quantity = 0
relation = greater than
search = |rest splunk_server=local /services/server/health-config | eval thresh="" | foreach indicator*red,indicator*yellow [eval thresh =if('<<FIELD>>'!="", thresh+"\"<<FIELD>>\":" + '<<FIELD>>' + ",", thresh)] | eval thresh=rtrim(thresh, ","), enabled=if(disabled=='' or disabled==0 or isnull(disabled), 1,0) | eval feature="\""+title+"\":{\"threshold\": {"+thresh+"}, \"enabled\": \""+enabled+"\"}", distinct=if(like(title, "feature%"), "feature", "alert") | eval disable=coalesce('alert.disabled', disabled), action=coalesce('alert.actions','action.to','action.url', 'action.integration_url_override') | eval action=if(action=="" or isnull(action), "empty", action) | eval alert="\""+title+"\": {\"disabled\": \""+disable+"\", \"action/ action.to/ action.url/ action.integration_url_override\": \""+action+"\"}" | stats list(alert) AS alertList, list(feature) AS feaList by distinct | eval alertCombined=mvjoin(alertList, ","), feaCombined=mvjoin(feaList, ",") | eval alertCombined="\"alert\":{"+alertCombined+"}" | eval feaCombined=if(distinct=="alert", null, feaCombined), alertCombined=if(distinct=="feature", null, alertCombined) | eval dataCombined=coalesce(alertCombined, feaCombined) | stats list(dataCombined) AS dataList| eval data=mvjoin(dataList, ",") | eval data="{"+data+"}",_time=now(), date=strftime(_time, "%Y-%m-%d") | fields data _time date
#Health Reporter Usage
[instrumentation.usage.healthMonitor.currentState]
search = | rest /services/apps/local splunk_server=local | search title=search | rename version as splunk_version | fields splunk_version \
| appendcols [| rest services/server/health-config/distributed_health_reporter splunk_server=local | eval dist_hr_enabled=if(isnull(disabled) or disabled=='' or disabled==0, 1, 0) | fields dist_hr_enabled] \
| appendcols [| search index=_internal earliest=-1d source=*splunkd_ui_access.log ("server/health/splunkd/details" OR "server/health/deployment/details") | stats count as click_count] \
| appendcols [ \
| rest services/server/health/splunkd/details splunk_server=local \
| fields + features.* health | rename health as features.health \
| fields - *.reasons.* *.messages.* \
| foreach features.* [ eval newname="splunkd."+replace(lower("<<MATCHSTR>>"), " ", "_") | rex field=newname mode=sed "s/features\.|\.health//g" | eval {newname}='<<FIELD>>'] \
| fields - features.*, newname \
| transpose column_name="features" | rename "row 1" as current_color \
| join type=outer features [ \
| search index=_internal earliest=-1d source=*health.log component=PeriodicHealthReporter \
| stats count as num, values(color) as colors by node_path \
| rename node_path as features \
| eval colors = mvjoin(colors, ",") \
| eval worst_color = if(match(colors, "red"), "red", (if(match(colors, "yellow"), "yellow", "green"))) \
| fields features, num, colors, worst_color \
] \
| sort by features \
| eval nodes="", combin_column=1 \
| foreach features* [eval nodes = "{\"nodePath\": \"" + '<<FIELD>>' + "\", \"color\": \"" + current_color + "\", \"worstColorInLast24Hours\": \"" + worst_color + "\"}"] \
| stats count list(nodes) as node_list by combin_column \
| eval node_status = mvjoin(node_list, ", ") \
] \
| eval data = "{\"splunkVersion\": \"" + splunk_version + "\", \"distribuedHealthReporter\": {\"enabled\": " + dist_hr_enabled + "}, \"healthReportClicks\": " + click_count + ", \"nodeStatus\": [" + node_status + "]}", _time=now(), date=strftime(_time, "%Y-%m-%d") \
| fields data _time date
#Authentication methods
[instrumentation.usage.authMethod.config]
action.outputtelemetry = 1
action.outputtelemetry.param.anonymous = 1
action.outputtelemetry.param.support = 1
action.outputtelemetry.param.license = 0
action.outputtelemetry.param.optinrequired = 3
action.outputtelemetry.param.component = usage.authMethod.config
action.outputtelemetry.param.input = data
action.outputtelemetry.param.type = aggregate
alert.suppress = 0
alert.track = 0
counttype = number of events
cron_schedule = 0 3 * * 1
dispatch.earliest_time = -1w
dispatch.latest_time = now
display.general.type = statistics
display.page.search.tab = statistics
enableSched = 1
quantity = 0
relation = greater than
search = |rest splunk_server=local /services/admin/auth-services| join type=left splunk_server [|rest splunk_server=local /services/server/info | fields guid, splunk_server] | appendcols [|rest splunk_server=local /servicesNS/nobody/splunk_instrumentation/telemetry | fields telemetrySalt]| eval telemetrySalt=if(isnull(telemetrySalt), "", telemetrySalt), hashHost=sha1(telemetrySalt+splunk_server)| eval data="{\"host\": \""+hashHost+"\",\"guid\": \""+guid+"\", \"authentication method\": \""+active_authmodule+"\",\"mfa type\": " +"\"" + if(mfa_type=="", "none", mfa_type) +"\"}", _time=now(), date=strftime(_time, "%Y-%m-%d") | fields data _time date
#JsonWebToken metrics
[instrumentation.authentication.jwt]
search = | rest splunk_server=local /services/properties/authorize/tokens_auth/disabled | table value | rename value as disabled | appendcols [| search index=_audit action=create_token | stats count | rename count as created] | appendcols [| rest splunk_server=local /services/configs/conf-authentication | eval test=if(isnull(scriptPath), 0, 1) | table test | stats sum | rename sum(test) as scriptedExtensionsEnabled] | appendcols [| search index = _internal source=*/splunkd.log jsonwebtoken validation failed | stats count | rename count as failures] | makejson output=data
#S2 configuration
[instrumentation.usage.smartStore.global]
search = |rest splunk_server=local /services/configs/conf-server | where title in ("cachemanager","diskUsage", "clustering") | eval data="\""+title+"\":",hotlist_recency_secs=if(isnull(hotlist_recency_secs), "none", hotlist_recency_secs), hotlist_bloom_filter_recency_hours=if(isnull(hotlist_bloom_filter_recency_hours), "none", hotlist_bloom_filter_recency_hours) | eval data=if(title="diskUsage", data+"{\"minFreeSpace\":\""+minFreeSpace+"\"}", data), data=if(title="cachemanager", data+"{\"eviction_padding\":\""+eviction_padding+"\",\"max_cache_size\":\""+max_cache_size+"\", \"hotlist_recency_secs\":\""+hotlist_recency_secs+"\", \"hotlist_bloom_filter_recency_hours\":\""+hotlist_bloom_filter_recency_hours+"\"}", data), data=if(title="clustering", data+"{\"mode\":\""+mode+"\""+if(mode="master", ",\"search_factor\":\""+search_factor+"\",\"multisite\":\""+multisite+"\",\"site_replication_factor\":\""+site_replication_factor+"\",\"site_search_factor\":\""+site_search_factor+"\"}", "}"), data) | stats list(data) AS dataList BY splunk_server | eval globalConfig="\"global config\":{" + mvjoin(dataList, ",") + "}" | fields globalConfig, splunk_server
[instrumentation.usage.smartStore.perIndex]
search = |rest splunk_server=local /services/configs/conf-indexes | appendcols [|rest splunk_server=local /servicesNS/nobody/splunk_instrumentation/telemetry | fields telemetrySalt]| eval title_dist=if(match(title, "^([^_].*?)\s*"),"external","internal"), s2Enabled=if(isnotnull(remotePath),"SmartStore enabled", "non-SmartStore enabled"),hotlist_recency_secs=if(isnull(hotlist_recency_secs), "none", hotlist_recency_secs), hotlist_bloom_filter_recency_hours=if(isnull(hotlist_bloom_filter_recency_hours), "none", hotlist_bloom_filter_recency_hours) | makejson frozenTimePeriodInSecs, hotlist_recency_secs, hotlist_bloom_filter_recency_hours, maxHotSpanSecs, maxGlobalDataSizeMB, output="indexConfig" | eval telemetrySalt=if(isnull(telemetrySalt), "", telemetrySalt), hashTitle=sha1(telemetrySalt+title), title_combine=title_dist+"_"+hashTitle, indexConfig="\""+title_combine+"\":" + indexConfig | stats list(hashTitle) AS titleList,list(indexConfig) AS indexList BY s2Enabled, splunk_server | eval indexConfig=mvjoin(indexList, ","), titleCombined="\""+s2Enabled+"\":\"" + mvjoin(titleList, ",") +"\"" | stats list(titleCombined) AS s2List, list(indexConfig) AS indexList BY splunk_server| eval s2Enabled="\"list of indexes\":{" + mvjoin(s2List, ",") + "}", indexConfig="\"per index config\":{" + mvjoin(indexList, ",") + "}" | fields s2Enabled, indexConfig, splunk_server
[instrumentation.usage.smartStore.capacity]
search = |rest splunk_server=local /services/server/status/partitions-space | makejson available, capacity, free, fs_type, output="cap" | eval cap="\""+title+"\": "+cap+"" | stats list(cap) AS capList BY splunk_server | eval capCombined="\"total storage capacity\":{" + mvjoin(capList, ", ") + "}" | fields capCombined, splunk_server
[instrumentation.usage.smartStore.accountTypes]
search = |rest splunk_server=local /services/configs/conf-indexes | eval scheme=if(storageType == "remote", mvindex(split(path,":"), 0), "local") | eval scheme=case(scheme=="gs","gcp",scheme=="s3","aws",scheme!="local",scheme) | stats list(scheme) as accountTypes delim="," by splunk_server | nomv accountTypes | fillnull value="" accountTypes | eval accountTypes="\"accountTypes\":\"" + accountTypes + "\"" | fields accountTypes, splunk_server
[instrumentation.usage.smartStore.config]
action.outputtelemetry = 1
action.outputtelemetry.param.anonymous = 1
action.outputtelemetry.param.support = 1
action.outputtelemetry.param.license = 0
action.outputtelemetry.param.optinrequired = 3
action.outputtelemetry.param.component = usage.smartStore.Config
action.outputtelemetry.param.input = data
action.outputtelemetry.param.type = aggregate
alert.suppress = 0
alert.track = 0
counttype = number of events
cron_schedule = 0 3 * * 1
dispatch.earliest_time = -1w
dispatch.latest_time = now
display.general.type = statistics
display.page.search.tab = statistics
enableSched = 1
quantity = 0
relation = greater than
search = |savedsearch instrumentation.usage.smartStore.global | join type=left splunk_server [|savedsearch instrumentation.usage.smartStore.perIndex] | join type=left splunk_server [|savedsearch instrumentation.usage.smartStore.capacity] | join type=left splunk_server [|savedsearch instrumentation.usage.smartStore.accountTypes] | eval data="{"+globalConfig+", "+capCombined+", "+indexConfig+", "+s2Enabled+", "+accountTypes+"}",_time=now(), date=strftime(_time, "%Y-%m-%d") | fields data _time date
#Metrics
[instrumentation.usage.search.report_acceleration]
search = | localop | rest /servicesNS/-/-/admin/summarization | stats count as existing_report_accelerations, sum(summary.access_count) as access_count_of_existing_report_accelerations | makejson access_count_of_existing_report_accelerations(int) existing_report_accelerations(int) output="data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
#searchtelemetry
[instrumentation.usage.search.searchTelemetry]
search = index=_introspection sourcetype=search_telemetry | rename search_commands{}.name as name, search_commands{}.duration as duration | stats perc50(duration), perc90(duration), perc95(duration), perc99(duration), sum(duration) as totalDuration, sum(bytes_read) as sumBytesRead, count(bytes_read) as countBytesRead max(bytes_read) as maxBytesRead by name, type | makejson output=searchTypeInformation | fields searchTypeInformation | mvcombine delim="," searchTypeInformation | nomv searchTypeInformation | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | eval data="{ \"searchTypeInformation\" : [".searchTypeInformation."]}" | fields _time date data
#searchtelemetry sourcetypeusage
[instrumentation.usage.search.searchtelemetry.sourcetypeUsage]
search = index=_audit | stats count(sourcetype_count__*) as * | makejson output=sourcetypeUsage | fields sourcetypeUsage | mvcombine delim="," sourcetypeUsage | nomv sourcetypeUsage | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | eval data="{ \"sourcetypeUsage\" : [".sourcetypeUsage."]}" | fields _time date data
#Lookup Definitions
[instrumentation.usage.lookups.lookupDefinitions]
search = |rest splunk_server=local /services/admin/transforms-lookup getsize=true | eval name = 'eai:acl.app' + "." + title | rename "eai:acl.sharing" AS sharing | eval is_temporal = if(isnull(time_field),0,1) | table name type is_temporal size sharing | join type=left name [rest splunk_server=local /services/admin/kvstore-collectionstats | table data | mvexpand data | spath input=data | table ns size | rename ns as name] | eval name=sha1(name) | makejson output=lookups | stats list(lookups) as lookups | eval data = "{ \"lookups\" : [" . mvjoin(lookups,",") . "]}", _time = now(), date=strftime(_time, "%Y-%m-%d") | fields data _time date
#Bundle Replication
[instrumentation.performance.bundleReplication]
search = index=_internal source=*/metrics.log TERM(group=bundles_uploads) | bin _time span=1d | stats count as bundles_uploads_count avg(peer_count) as avg_peer_count avg(average_baseline_bundle_bytes) as avg_baseline_bundle_bytes max(average_baseline_bundle_bytes) as max_baseline_bundle_bytes avg(average_delta_bundle_bytes) as avg_delta_bundle_bytes max(average_delta_bundle_bytes) as max_delta_bundle_bytes sum(total_count) as total_count sum(delta_count) as total_delta_count sum(success_count) as total_success_count sum(baseline_count) as total_baseline_count sum(already_present_count) as total_already_present_count sum(total_msec_spent) as total_msec_spent sum(delta_msec_spent) as total_delta_msec_spent sum(total_bytes) as total_bytes sum(delta_bytes) as total_delta_bytes by host _time | makejson output=data | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
#Bundle Replication Cycle
[instrumentation.performance.bundleReplicationCycle]
search = index=_internal source=*/metrics.log splunk_server=local TERM(group=bundle_replication) TERM(name=cycle_dispatch) \
| stats count(cycle_id) as cycleCount avg(peer_count) as avgPeerCount avg(peer_success_count) as avgPeerSuccessCount avg(replication_time_msec) as avgReplicationTimeMsec avg(bundle_bytes) as avgBundleBytes avg(delta_bundle_bytes) as avgDeltaBundleBytes \
| appendcols [| rest /services/search/distributed/bundle/replication/config splunk_server=local | fields replicationPolicy] \
| eval avgPeerCount=round(avgPeerCount,2) | eval avgPeerSuccessCount=round(avgPeerSuccessCount,2) \
| eval avgReplicationTimeMsec=round(avgReplicationTimeMsec,2) | eval avgBundleBytes=round(avgBundleBytes,2) | eval avgDeltaBundleBytes=round(avgDeltaBundleBytes,2) \
| makejson output=data | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
#Metrics Info
[instrumentation.usage.metrics]
search = | mcatalog values(_dims) values(sourcetype) values(metric_type) where index=* earliest=-15m by metric_name, index | stats count(values(_dims)) AS dimension_count list(values(sourcetype)) AS sourcetype list(values(metric_type)) AS metrictype by metric_name, index | eval metrictype = if(isnull(metrictype), "N/A", metrictype) | fields metric_name, index, dimension_count, sourcetype, metrictype | eval data="{ \"metricName\" : \""+metric_name+"\", \"indexName\" : \""+index+"\", \"dimensionCount\" : \""+dimension_count+"\", \"sourcetype\" : \""+sourcetype+"\", \"metricType\" : \""+metrictype+"\"}", _time = now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
#Rollup
[instrumentation.usage.rollup]
search = | rest servicesNS/-/-/catalog/metricstore/rollup | eval summaryCount=0, target_index_list="", metricOverrideCount=0, name=title, hasDimensionList=if(isnull(dimensionList), "false", "true") | foreach summaries*rollupIndex [| eval summaryCount=if(isnull('<<FIELD>>'), summaryCount, summaryCount+1)] | foreach aggregation* [| eval metricOverrideCount=if(isnull('<<FIELD>>'), metricOverrideCount, metricOverrideCount+1)] | foreach summaries*rollupIndex [eval target_index_list=if(isnotnull('<<FIELD>>'), target_index_list.",".'<<FIELD>>', target_index_list)] | eval targetIndex=split('target_index_list',",")| mvexpand targetIndex | search NOT targetIndex="" | join type=left targetIndex [| rest /services/data/indexes datatype=metric | eval targetIndexDBSizeGB_temp=tostring(round(coalesce(currentDBSizeMB,0) / 1024, 2)) | stats sum(targetIndexDBSizeGB_temp) as targetIndexDBSizeGB by title | rename title as targetIndex | fields targetIndex, targetIndexDBSizeGB] | fields name, defaultAggregation, summaryCount, hasDimensionList, metricOverrideCount, targetIndex, targetIndexDBSizeGB | eval targetIndexDBSizeGB=if(targetIndexDBSizeGB==0, "0 (Check Index to Verify)", targetIndexDBSizeGB) | makejson name defaultAggregation targetIndex targetIndexDBSizeGB hasDimensionList summaryCount(int) metricOverrideCount(int) output="data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
# Feature Flags
[instrumentation.deployment.featureFlags]
search = | rest /services/properties splunk_server=local | rename title as conf | map maxsearches=200 search="| rest /services/properties/$conf$ fillContents=1 | rename title as featureFlag | search featureFlag=_feature-flag*" | fields featureFlag, name, creationDate, disabled, description | makejson featureFlag, name, description, disabled, creationDate output=data | outputtelemetry input=data anonymous=true support=true component="deployment.featureFlags" optinrequired=3 type=event
# Savedsearches Alert
[instrumentation.usage.savedSearches.alerts]
search = | rest servicesNS/-/-/admin/savedsearch | search NOT title=instrumentation.* | eval name=sha1(title), alertConditionType=alert_type, actionList=actions, triggerMode=if('alert.digest_mode'==1, "Once", "For each result"), alertSeverity='alert.severity', alertSuppress=if('alert.suppress'==1, "Yes", "No"), alertSuppressGroup=if('alert.suppress.group_name'=="", "N/A", sha1('alert.suppress.group_name')), alertTrackable=if('alert.track'==1, "Yes", "No"), cronSchedule=cron_schedule, dispatchAllowPartialResults=if('dispatch.allow_partial_results'==1, "Yes", "No") | fields name, alertConditionType, actionList, triggerMode, alertSeverity, alertTrackable, alertSuppress, alertSuppressGroup, cronSchedule, dispatchAllowPartialResults | makejson name alertConditionType actionList triggerMode alertSeverity alertTrackable alertSuppress cronSchedule alertSuppressGroup dispatchAllowPartialResults output="data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
# Streaming Metric Alert
[instrumentation.usage.streamingMetricAlerts]
search = | rest servicesNS/-/-/alerts/metric_alerts | eval name=sha1(title), hasFilter=if(filter=="", "No", "Yes"), hasGroupby=if(groupby=="", "No", "Yes"), triggerActionPerGroup=if('trigger.action_per_group'==1, "Yes", "No"), triggerEvaluationPerGroup=if('trigger.evaluation_per_group'==1, "Yes", "No"), triggerSuppress=if('trigger.suppress'==1, "Yes", "No"), triggerPrepare=if(isnotnull('trigger.prepare'), "Yes", "No"), alertTrackable=if('splunk_ui.track'==1, "Yes", "No"), triggerThreshold=if(isnotnull('trigger.threshold'), 'trigger.threshold', "N/A"), actionList="", hasDescription=if(description=="", "No", "Yes"), alertSeverity=if(isnull('splunk_ui.severity'), "N/A", 'splunk_ui.severity'), triggerExpires='trigger.expires', triggerMaxTracked='trigger.max_tracked' | eval actionList=if('action.email'==1, actionList.","."email", actionList), actionList=if('action.logevent'==1, actionList.","."logevent", actionList), actionList=if('action.rss'==1, actionList.","."rss", actionList), actionList=if('action.script'==1, actionList.","."script", actionList), actionList=if('action.webhook'==1, actionList.","."webhook", actionList) | eval actionList=if(actionList=="", actionList, substr(actionList,2)) | eval hasLabels="No", hasComplexCondition="No" | foreach label* [| eval hasLabels=if(isnull('<<FIELD>>'), hasLabels, "Yes")] | eval hasMultipleMetricIndexes=if(match(metric_indexes, ","), "Yes", "No"), hasComplexCondition=if(match(condition, "OR") OR match(condition, "AND"), "Yes", hasComplexCondition) | fields name, hasFilter, hasGroupby, triggerActionPerGroup, triggerEvaluationPerGroup, triggerThreshold, triggerSuppress, triggerPrepare, alertTrackable, actionList, hasDescription, alertSeverity, triggerExpires, triggerMaxTracked, hasLabels, hasMultipleMetricIndexes, hasComplexCondition | makejson name hasFilter hasGroupby triggerActionPerGroup triggerEvaluationPerGroup triggerThreshold triggerSuppress triggerPrepare alertTrackable actionList hasDescription alertSeverity triggerExpires triggerMaxTracked hasLabels hasMultipleMetricIndexes hasComplexCondition output="data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
# Durable Search
[instrumentation.usage.durableSearch]
search = | rest servicesNS/-/-/admin/savedsearch | search NOT title=instrumentation.* AND NOT durable.track_time_type="" | eval name=sha1(title), durableTrackTimeType='durable.track_time_type', durableLagTime='durable.lag_time', durableBackfillType='durable.backfill_type', durableMaxBackfillIntervals='durable.max_backfill_intervals', enableSummaryIndex=if('action.summary_index'==1, "Yes", "No") | fields name, durableTrackTimeType, durableLagTime, durableBackfillType, durableMaxBackfillIntervals, enableSummaryIndex | makejson name durableTrackTimeType durableLagTime durableBackfillType durableMaxBackfillIntervals enableSummaryIndex output="data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
#
[instrumentation.usage.rest]
search = index=_internal useragent=splunk-sdk* | eval endpointuri = case(like(uri_path, "%/authorization/capabilities%"), "authorization/capabilities", like(uri_path, "%/authorization/roles%"), "authorization/roles", like(uri_path, "%/configs/conf-%s%"), "configs/conf-%s", like(uri_path, "%/properties/%"), "properties", like(uri_path, "%/saved/eventtypes%"), "saved/eventtypes", like(uri_path, "%/deployment/client%"), "deployment/client", like(uri_path, "%/deployment/tenants%"), "deployment/tenants", like(uri_path, "%/deployment/server%"), "deployment/server", like(uri_path, "%/deployment/serverclass%"), "deployment/serverclass", like(uri_path, "%/storage/passwords%"), "storage/passwords", like(uri_path, "%/services/receivers/stream%"), "/services/receivers/stream", like(uri_path, "%/services/receivers/simple%"), "/services/receivers/simple", like(uri_path, "%/authentication/users%"), "authentication/users", like(uri_path, "%/saved/searches%"), "saved/searches", like(uri_path, "%/data/modular_inputs%"), "data/modular_inputs", like(uri_path, "%/data/input%"), "data/input", like(uri_path, "%/data/indexes%"), "data/indexes", like(uri_path, "%/alerts/fired_alerts%"), "/alerts/fired_alerts", like(uri_path, "%messages%"), "messages", like(uri_path, "%/search/jobs%"), "search/jobs" ) | stats count by endpointuri, status, method, useragent | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | eval data="{ \"endpointUri\" : \""+endpointuri+"\", \"status\" : \""+status+"\", \"method\" : \""+method+"\", \"useragent\" : \""+useragent+"\", \"count\" : \""+count+"\" }" | fields _time date data
# bucket merge
[instrumentation.usage.bucketmerge.standalone]
search=index=_internal source=*splunkd-utility.log* component=BucketMergerCmd command=merge OR command=dryrun OR command=list | table command, newBucketsCount, oldBucketsCount, durationSec | makejson command newBucketsCount oldBucketsCount durationSec output="data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.bucketmerge.clustered]
search = index=_internal source=*mergebuckets.log* ("all peers" action=merge OR action=dryrun) OR action=list | eval command=action | eval newBucketsCount=mergedBuckets | eval oldBucketsCount=totalBucketsToMerge | eval indexersCount=if(isnotnull(peers),peers,0) | eval bucketsFailedToMergeCount=if(isnotnull(bucketsUnableToMerge),bucketsUnableToMerge,0) |table command, newBucketsCount, oldBucketsCount, bucketsFailedToMergeCount, indexersCount | makejson command newBucketsCount oldBucketsCount bucketsFailedToMergeCount indexersCount output="data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
# local-proxy feature usage
[instrumentation.usage.localproxy.config]
search = | rest splunk_server=local /services/local_proxy/config | eval disabled='info.disabled', max_concurrent_requests='info.max_concurrent_requests', response_timeout_ms='info.response_timeout_ms' | makejson disabled, max_concurrent_requests, response_timeout_ms output=data | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.localproxy.info]
search = index=_internal sourcetype=splunkd component=LocalProxyRestHandler log_level=INFO pending_responses duration_msec status=200 | bin _time span=1m | stats avg(duration_msec) as avgLatency min(duration_msec) as minLatency max(duration_msec) as maxLatency avg(pending_responses) as avgRequests min(pending_responses) as minRequests max(pending_responses) as maxRequests count as requests by _time | makejson output=data | eval date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.localproxy.client]
search = index=_internal sourcetype=splunkd_ui_access status=200 method!=GET | rex field=uri "(.*)/services(?<matching>/.*)" | join left=uiAccess right=lpAccess where uiAccess.matching = lpAccess.uri_path [| search index=_internal component=LocalProxyRestHandler sourcetype=splunkd log_level=INFO status=200 uri_path | stats count by uri_path | fields uri_path] | stats count by uiAccess.matching, uiAccess.useragent | fields uiAccess.matching, uiAccess.useragent, count | rename uiAccess.useragent as client, uiAccess.matching as uri | makejson uri,client, count output=data | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
#supervisor stability
[instrumentation.usage.supervisor.stability]
search = index=_internal sourcetype=splunkd component=SidecarThread log_level=ERROR Sidecar "failed to run" Status | stats count by Sidecar | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | makejson Sidecar, count output=data | fields _time date data
# config tracker feature usage
[instrumentation.usage.configtracker.info]
search = | rest splunk_server=local /services/config_tracker/config | eval disabled='info.disabled', mode='info.mode', denylist='info.denylist', exclude_fields='info.exclude_fields', uses_inotify='info.uses_inotify' | makejson disabled, mode, denylist, exclude_fields, uses_inotify output=data | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.usage.configtracker.introspection]
search = index=_audit action=search info=granted (search="*index=_c*" OR search="*index=*_configtracker*") NOT (search=*"index=*_audit"*) | stats dc(user) AS "user_count" count AS "total_search_count" | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | makejson user_count total_search_count output=data | fields data _time
[instrumentation.usage.configtracker.searches]
search = index="_configtracker" | top limit=500 data.action data.path data.changes{}.stanza data.changes{}.properties{}.name | rex field=data.path ".*\/etc\/*(?<path>.*)" | eval path = "$SPLUNK_HOME/etc/" + 'path' | eval stanza = 'data.changes{}.stanza' | eval prop = 'data.changes{}.properties{}.name' | eval _time=now() | eval date=strftime(_time, "%Y-%m-%d") | makejson path stanza prop count output=data | fields data _time
[instrumentation.usage.cmredundancy]
search = index=_internal source=*metrics.log* splunk_server=local group=cm_redundancy* | stats sum(indexers_pinged_for_switchover) as indexersPingedForSwitchover, sum(indexers_reachable_by_this_cm) as indexersReachableByThisCm, sum(indexers_can_reach_active_cm) as indexersCanReachActiveCm, sum(hb_received) as hbReceived, sum(retry_sent) as retrySent, sum(switchover_attempts) as switchoverAttempts, sum(lb_healthcheck_count) as lbHealthcheckCount, median(standby_bundle_sync_rate_kBps) as standbyBundleSyncRateKBps, median(standby_generation_sync_avg_elapsed_sec) as standbyGenerationSyncAvgElapsedSec | makejson indexersPingedForSwitchover indexersReachableByThisCm indexersCanReachActiveCm hbReceived retrySent switchoverAttempts lbHealthcheckCount standbyBundleSyncRateKBps standbyGenerationSyncAvgElapsedSec output="data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
# s3 tsidx streaming compression
[instrumentation.usage.s3tsidxcompression]
search = index=_internal source=*/metrics.log group=s3client name=get | stats sum(num_files) as downloadedFiles, sum(decompressed_kb) as downloadedDecompressedKb, sum(compressed_kb) as downloadedCompressedKb | appendcols [| search index=_internal source=*/metrics.log group=s3client name=put | stats sum(num_files) as uploadedFiles, sum(decompressed_kb) as uploadedOriginalKb, sum(compressed_kb) as uploadedCompressedKb] | makejson downloadedFiles downloadedCompressedKb downloadedDecompressedKb uploadedFiles uploadedOriginalKb uploadedCompressedKb output=data | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
#KVStore upgrade
[instrumentation.upgrade.kvstore.standalone]
search = index=_internal source="*migration.log*" (start_time=* OR stop_time=*) | stats latest(start_time) as start latest(stop_time) as stop | appendcols [| rest /services/server/info] |eval a = strptime(start,"%Y-%m-%d %H:%M:%S") | eval b = strptime(stop,"%Y-%m-%d %H:%M:%S") | eval durationSecs = floor(b - a) | makejson durationSecs server_roles start stop output=data | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
[instrumentation.upgrade.kvstore.shc]
search = index=_internal component=SHCMasterKVStoreMigrationState "Completed KVStore update for search head cluster" | eval duration = end_time - start_time | eval version = version | eval type = type | makejson start_time end_time duration type version output=data | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data
# TLS feature usage
[instrumentation.usage.tlsBestPractices]
search = | rest /services/configs/conf-server/kvstore | eval sslVerifyServerCert=if(isnull(sslVerifyServerCert),"unset",sslVerifyServerCert), splunk_server=sha256(splunk_server), kvstore_configuredSystem=if(app_dir="system","true", "false") \
| stats values(sslVerifyServerCert) as kvstore_sslVerifyServerCert values(eai:acl.app) as kvstore_configuredApp by splunk_server | eval kvstore_configuredSystem=if(kvstore_configuredApp="system","true","false") | fields kvstore_sslVerifyServerCert, splunk_server, kvstore_configuredSystem \
| append [| rest /services/configs/conf-server/sslConfig | eval sslVerifyServerName=if(isnull(sslVerifyServerName),"unset",sslVerifyServerName), splunk_server=sha256(splunk_server) | stats values(sslVerifyServerName) as servername_sslVerifyServerName values(eai:acl.app) as servername_configuredApp by splunk_server | eval servername_configuredSystem=if(servername_configuredApp="system","true","false") | fields servername_sslVerifyServerCert, splunk_server, servername_configuredSystem] \
| append [| rest /services/configs/conf-server/sslConfig | eval sslVerifyServerCert=if(isnull(sslVerifyServerCert),"unset",sslVerifyServerCert), splunk_server=sha256(splunk_server) | stats values(eai:acl.app) as global_configuredApp values(sslVerifyServerCert) as global_sslVerifyServerCert by splunk_server | eval global_configuredSystem=if(global_configuredApp="system","true","false") | fields global_sslVerifyServerCert, splunk_server, global_configuredSystem] \
| append [| rest /services/configs/conf-pythonSslClientConfig | eval sslVerifyServerCert=if(isnull(sslVerifyServerCert),"unset",sslVerifyServerCert), splunk_server=sha256(splunk_server)| stats values(eai:acl.app) as python_configuredApp values(sslVerifyServerCert) as python_sslVerifyServerCert by splunk_server | eval python_configuredSystem=if(python_configuredApp="system","true","false") | fields python_sslVerifyServerCert, splunk_server, python_configuredSystem] \
| append [| rest /services/configs/conf-web/settings | eval mgmtHostPort=if(isnull(mgmtHostPort),"unset",mgmtHostPort), splunk_server=sha256(splunk_server)| stats values(eai:acl.app) as fwdrMgmtHostPort_configuredApp values(mgmtHostPort) as fwdr_mgmtHostPort by splunk_server | eval fwdrMgmtHostPort_configuredSystem=if(fwdrMgmtHostPort_configuredApp="system","true","false") | fields fwdrMgmtHostPort_sslVerifyServerCert, splunk_server, fwdrMgmtHostPort_configuredSystem] \
| append [| rest /services/configs/conf-server/sslConfig | eval cliVerifyServerName=if(isnull(cliVerifyServerName),"feature",cliVerifyServerName), splunk_server=sha256(splunk_server) | stats values(cliVerifyServerName) as servername_cliVerifyServerName values(eai:acl.app) as servername_configuredApp by splunk_server | eval cli_configuredSystem=if(cli_configuredApp="system","true","false") | fields cli_sslVerifyServerCert, splunk_server, cli_configuredSystem] | stats values(*) as * by splunk_server | makejson output=data \
| outputtelemetry input=data anonymous=true support=true component="usage.tlsBestPractices" optinrequired=3 type=event
# Federated Search Usage
[instrumentation.usage.federatedsearch.fsh]
search = index=_audit [| rest /services/authentication/users splunk_server=local | dedup splunk_server | rename splunk_server AS host | table host] sourcetype=audittrail TERM(action=search) user!="splunk-system-user" user!="n/a" is_federated_search=1 | eval search_type="fsh_search" | table _time user app search_type info search_id has_error_warn fully_completed_search total_run_time exec_time | eval user=sha256(user) | makejson output=data | fields + data _time\
| outputtelemetry input=data anonymous=true support=true component="usage.federatedsearch.fsh" optinrequired=3 type=event
[instrumentation.usage.federatedsearch.rsh]
search = index=_audit \
[| rest /services/server/status splunk_server=local \
| dedup splunk_server \
| rename splunk_server AS host \
| table host] \
sourcetype=audittrail TERM(action=search) user!="splunk-system-user" user!="n/a" federated_user!="" \
| eval search_type="rsh_search" \
| table _time user app search_type info search_id has_error_warn fully_completed_search total_run_time exec_time federated_sid federated_user federated_version federated_guid use_fsh_ko fsh_streaming_phase_only\
| eval user=sha256(user), federated_user=sha256(federated_user) \
| makejson output=data \
| fields + data _time\
| outputtelemetry input=data anonymous=true support=true component="usage.federatedsearch.rsh" optinrequired=3 type=event
# Monitoring Console
[instrumentation.app.splunk_monitoring_console.info]
search = | rest splunk_server=local /services/server/roles | fields role_list \
| appendcols [ | rest splunk_server=local /servicesNS/-/splunk_monitoring_console/configs/conf-splunk_monitoring_console_assets/settings \
| eval mode = case(configuredPeers="", "standalone", true(), "distributed") \
| fields disabled, mode, mc_auto_config ] | makejson output=data \
| eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data \
#Ingest actions usages
[instrumentation.usage.ingestactions.rulesets]
search = | rest splunk_server=local /services/data/ingest/rulesets serialize_rules=true | spath input=rules | rename {}.* as * \
| fields dest | mvexpand dest| eval dest=split(dest,",") | mvexpand dest | eval dest = replace(dest,"rfs:", "") | dedup dest \
| join type=left [|rest splunk_server=local services/data/ingest/rfsdestinations \
| rex field=path "(?<destinationType>.*?):" | rename title as dest | fields dest destinationType] \
| stats count by destinationType | eval data="\""+destinationType+"\":"+count| fields data \
| append [| rest splunk_server=local /services/data/ingest/rulesets serialize_rules=true | spath input=rules | rename {}.* as * \
| stats count by action | eval data="\""+action+"\":"+count | fields data \
| append [| rest splunk_server=local /services/data/ingest/rulesets serialize_rules=true | spath input=rules | rename {}.* as * \
| stats count by clone | where clone = "true" | eval data="\"clone\":"+count | fields data]] \
| append [| rest splunk_server=local /services/data/ingest/rulesets serialize_rules=true | spath input=rules | rename {}.* as * \
| stats count(match) as maskRegexCount count(cond.filter.match) as filterRegexCount count(cond.filter.expr) as filterEvalExprCount count(cond.route.match) as routeRegexCount count(cond.route.expr) as routeEvalExprCount dc(expr) as uniqueIndexCount \
| makejson output=data| fields data | eval data = replace(data, "{", "")| eval data = replace(data, "}", "")] \
| mvcombine delim="," data | nomv data | eval data = "{" + data +"} " | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data \
[instrumentation.usage.ingestactions.destinations]
search = | rest splunk_server=local services/data/ingest/rfsdestinations | rex field=path "(?<destinationType>.*?):" | rename remote.s3.* as * \
| eval authMethodAccesskey=if(destinationType="s3"AND isnotnull(access_key) AND isnotnull(secret_key) , "true", "false") \
| eval authMethodIAM=if(destinationType="s3"AND isnull(access_key) AND isnull(secret_key) , "true", "false") \
| rename signature_version as signatureVersion supports_versioning as supportsVersioning url_version as urlVersion \
| fields authMethodAccesskey authMethodIAM batchSizeThresholdKB batchTimeout compression destinationType dropEventsOnUploadError encryption signatureVersion supportsVersioning urlVersion \
| makejson output=data| fields data | mvcombine delim="," data | nomv data | eval data="{ \"destinations\" : ["+data+"]}"| eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data \
[instrumentation.usage.ingestactions.deletions]
search = index=_internal sourcetype=splunkd source=*splunkd.log* component=IngestActionsRulesHandler OR component=IngestActionsRfsDestinationHandler ("Deleted ruleset" OR "Deleted rfs destination") \
| eval actionType=if(match(_raw, "Deleted ruleset"), "rulesetDeletion", "destinationDeletion") \
| stats count by actionType | eval data="\""+actionType+"\":"+count | fields data | mvcombine delim="," data | nomv data \
| eval data = "{"+data+"}" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data \
#Field filters telemetry
[instrumentation.usage.search.fieldFilters]
search = | rest /servicesNS/-/-/authorization/fieldfilters \
| eval filterName = sha1(title), isFieldFilteringEnabled = if('disabled'==1, "false", "true"), isTargetIndexSpecified = if(isnull('index'), "false", "true"), limitType = 'limit.key', fieldType = if('action.field'=="_raw", "raw", "indexed"), replacementMethod = case('action.operator'= ="sha256()","SHA256", 'action.operator'=="sha512()","SHA512",'action.operator'=="null()","NULL",true(),"STRING"), areExemptionRolesSpecified = if(isnull('roleExemptions'), "false", "true") \
| fields filterName, isFieldFilteringEnabled, isTargetIndexSpecified, limitType, fieldType, replacementMethod, areExemptionRolesSpecified \
| makejson filterName isFieldFilteringEnabled isTargetIndexSpecified limitType fieldType replacementMethod areExemptionRolesSpecified output=" data" | eval _time=now(), date=strftime(_time, "%Y-%m-%d") | fields _time date data

Powered by BW's shoe-string budget.