| rest splunk_server=local /services/server/info | table host host_fqdn host_resolved
0 comments
| rest /services/server/info | eval secUp=now()-startup_time, minutesUp=secUp/60 | table serverName, server_roles, secUp, minutesUp | rename serverName as "Splunk Server", server_roles as "Server Roles", secUp as "Uptime (sec)", minutesUp as "Uptime (min)"
0 comments
index=_internal sourcetype=scheduler result_count | stats avg(result_count) min(result_count) max(result_count), sparkline avg(run_time) min(run_time) max(run_time) sum(run_time) values(host) AS hosts count AS execution_count by savedsearch_name, app | join savedsearch_name type=outer [| rest /servicesNS/-/-/saved/searches | fields title eai:acl.owner cron_schedule dispatch.earliest_time dispatch.latest_time search | rename title AS savedsearch_name eai:acl.app AS App eai:acl.owner AS Owner cron_schedule AS "Cron Schedule" dispatch.earliest_time AS "Dispatch Earliest Time" dispatch.latest_time AS "Dispatch Latest Time"]| rename savedsearch_name AS "Saved Search Name" search AS "SPL Query" app AS App | makemv delim="," values(host) | sort - avg(run_time) | table "Saved Search Name", App, Owner, "SPL Query", "Dispatch Earliest Time" "Dispatch Latest Time" "Cron Schedule" hosts, execution_count, *(result_count), sum(run_time) *(run_time), sparkline | rename sparkline as Trend, min(run_time) as "Min Run Time", max(run_time) as "Max Run Time", avg(run_time) as "Avg Run Time", min(result_count) as "Min Results", max(result_count) as "Max Results", avg(result_count) as "Avg Results", execution_count as "Times Run", sum(run_time) as "Total Time Spent"
0 comments
index=_internal source=*metrics.log group=queue (name=parsingqueue OR name=indexqueue OR name=typingqueue OR name=aggqueue) | timechart avg(current_size) by name | rename *queue as *
0 comments
index=_internal sourcetype=scheduler | stats count as total, count(eval(status="skipped")) as skipped | eval pct=round(skipped/total * 100, 0) | rangemap field=pct low=0-10, elevated=10-20 severe=20-50 critical=50-100 | eval pct = pct . "%" | fields pct, range | rename pct as "Percent Skipped Searches", range as State
0 comments
|dbinspect index=* | search index!=_* | eval state=case(state=="warm" OR state=="hot","hot/warm",1=1, state) | chart dc(bucketId) over index by state
0 comments
| dbinspect index=* | search index!=_* | chart dc(bucketId) over splunk_server by index | rename splunk_server as Indexer
0 comments
index=_introspection sourcetype=splunk_resource_usage component=PerProcess host=* | eval process = 'data.process', args = 'data.args', sid = 'data.search_props.sid', elapsed = 'data.elapsed', mem_used = 'data.mem_used', mem = 'data.mem', pct_memory = 'data.pct_memory', app = 'data.search_props.app', type = 'data.search_props.type', mode = 'data.search_props.mode', user = 'data.search_props.user', role = 'data.search_props.role', process_class = case( process=="splunk-optimize","index service", process=="sh" OR process=="ksh" OR process=="bash" OR like(process,"python%") OR process=="powershell","scripted input", process=="mongod", "KVStore"), process_class = case( process=="splunkd" AND (like(args,"-p %start%") OR like(args,"service")),"splunkd server", process=="splunkd" AND isnotnull(sid),"search", process=="splunkd" AND (like(args,"fsck%") OR like(args,"recover-metadata%") OR like(args,"cluster_thing")),"index service", process=="splunkd" AND args=="instrument-resource-usage", "scripted input", (like(process,"python%") AND like(args,"%/appserver/mrsparkle/root.py%")) OR like(process,"splunkweb"),"Splunk Web", isnotnull(process_class), process_class), process_class = if(isnull(process_class),"other",process_class) | stats latest(data.mem_used) AS resource_usage_dedup latest(process_class) AS process_class by data.pid, _time | stats sum(resource_usage_dedup) AS resource_usage by _time, process_class | timechart minspan=10s median(resource_usage) AS "Resource Usage" by process_class
0 comments
| tstats latest(_time) as Latest where index=* by host sourcetype index | eval now=now() | eval time_since_last=round(((now-Latest)/60)/60,2) | stats list(host) as host, list(sourcetype) as sourcetype, list(Latest) as "Latest Event" list(time_since_last) as "Time since last event (hours)" by index | convert ctime("Latest Event")
0 comments
index=_internal sourcetype=splunkd "has reached maxKBps" | rex "Current data throughput \((?<kb>\S+)" | eval throughput=case(kb < 500, "256", kb > 499 AND kb < 520, "512", kb > 520 AND kb < 770 ,"768", kb>771 AND kb<1210, "1024", 1=1, ">1024") | stats count as Count sparkline as Trend by host, throughput | where Count >= 1 | rename host as "Host" throughput as "Throughput rate(kb)" count as "Hit Count"| sort -"Throughput rate(kb)",-Count
0 comments