index=_internal sourcetype=splunkd_access source=*splunkd_access.log | rex field=_raw "- - - (?P<response>.*)ms" | rex field=_raw "\"GET\s(?<endpoint>[^\?\d]+)" | table _time, endpoint, response | stats avg(response) as "Avg Response Time" by endpoint | eval "Avg Response Time"=tostring(round('Avg Response Time',2))."ms" | rename endpoint as "REST API Endpoint" | sort - "Avg Response Time"
0 comments
| tstats latest(_time) as "Last Received" by index, sourcetype | stats list(sourcetype) as sourcetype, list("Last Received") as "Last Received" by index | eval "Last Received"=strftime('Last Received',"%m/%d/%Y %H:%M:%S")
0 comments
index=_internal sourcetype=splunkd HttpListener "Socket error from " | rex "(?<errorLog>WARN\s+HttpListener\s.*?Socket error.*)" | rex field=errorLog "WARN\s+HttpListener\s.*?Socket error from\s+(?<ip>[^ ]+)" | rename ip as uf_ip | stats earliest(_time) AS Earliest latest(_time) as Latest count by uf_ip host errorLog | eval Earliest=strftime(Earliest,"%b %m, %Y %H:%M:%S") | eval Latest=strftime(Latest,"%b %m, %Y %H:%M:%S") | table Earliest Latest uf_ip errorLog count | sort - count | rename Earliest as "First Seen", Latest as "Last Seen", uf_ip as "Source IP", errorLog as "Error Log", count as Count
0 comments
index=_internal sourcetype=splunkd source="*license_usage.log*" type=Usage | timechart span=1d avg(b) as b | predict b future_timespan=7
0 comments
index=_internal sourcetype=scheduler result_count | stats avg(result_count) min(result_count) max(result_count), sparkline avg(run_time) min(run_time) max(run_time) sum(run_time) values(host) AS hosts count AS execution_count by savedsearch_name, app | join savedsearch_name type=outer [| rest /servicesNS/-/-/saved/searches | fields title eai:acl.owner cron_schedule dispatch.earliest_time dispatch.latest_time search | rename title AS savedsearch_name eai:acl.app AS App eai:acl.owner AS Owner cron_schedule AS "Cron Schedule" dispatch.earliest_time AS "Dispatch Earliest Time" dispatch.latest_time AS "Dispatch Latest Time"]| rename savedsearch_name AS "Saved Search Name" search AS "SPL Query" app AS App | makemv delim="," values(host) | sort - avg(run_time) | table "Saved Search Name", App, Owner, "SPL Query", "Dispatch Earliest Time" "Dispatch Latest Time" "Cron Schedule" hosts, execution_count, *(result_count), sum(run_time) *(run_time), sparkline | rename sparkline as Trend, min(run_time) as "Min Run Time", max(run_time) as "Max Run Time", avg(run_time) as "Avg Run Time", min(result_count) as "Min Results", max(result_count) as "Max Results", avg(result_count) as "Avg Results", execution_count as "Times Run", sum(run_time) as "Total Time Spent"
0 comments
index=_internal source=*metrics.log group=queue (name=parsingqueue OR name=indexqueue OR name=typingqueue OR name=aggqueue) | timechart avg(current_size) by name | rename *queue as *
0 comments
index=_internal sourcetype=scheduler | stats count as total, count(eval(status="skipped")) as skipped | eval pct=round(skipped/total * 100, 0) | rangemap field=pct low=0-10, elevated=10-20 severe=20-50 critical=50-100 | eval pct = pct . "%" | fields pct, range | rename pct as "Percent Skipped Searches", range as State
0 comments
|dbinspect index=* | search index!=_* | eval state=case(state=="warm" OR state=="hot","hot/warm",1=1, state) | chart dc(bucketId) over index by state
0 comments
| dbinspect index=* | search index!=_* | chart dc(bucketId) over splunk_server by index | rename splunk_server as Indexer
0 comments
index=_introspection sourcetype=splunk_resource_usage component=PerProcess host=* | eval process = 'data.process', args = 'data.args', sid = 'data.search_props.sid', elapsed = 'data.elapsed', mem_used = 'data.mem_used', mem = 'data.mem', pct_memory = 'data.pct_memory', app = 'data.search_props.app', type = 'data.search_props.type', mode = 'data.search_props.mode', user = 'data.search_props.user', role = 'data.search_props.role', process_class = case( process=="splunk-optimize","index service", process=="sh" OR process=="ksh" OR process=="bash" OR like(process,"python%") OR process=="powershell","scripted input", process=="mongod", "KVStore"), process_class = case( process=="splunkd" AND (like(args,"-p %start%") OR like(args,"service")),"splunkd server", process=="splunkd" AND isnotnull(sid),"search", process=="splunkd" AND (like(args,"fsck%") OR like(args,"recover-metadata%") OR like(args,"cluster_thing")),"index service", process=="splunkd" AND args=="instrument-resource-usage", "scripted input", (like(process,"python%") AND like(args,"%/appserver/mrsparkle/root.py%")) OR like(process,"splunkweb"),"Splunk Web", isnotnull(process_class), process_class), process_class = if(isnull(process_class),"other",process_class) | stats latest(data.mem_used) AS resource_usage_dedup latest(process_class) AS process_class by data.pid, _time | stats sum(resource_usage_dedup) AS resource_usage by _time, process_class | timechart minspan=10s median(resource_usage) AS "Resource Usage" by process_class
0 comments