add the rest of the netdata charts

This commit is contained in:
Jonathan Math 2021-04-12 13:51:44 -05:00
parent b411261524
commit ce932af379
2 changed files with 109 additions and 81 deletions

View file

@ -23,6 +23,7 @@ class Service(SimpleService):
self.chart_name = self.configuration.get("chart_name")
self.endpoint = self.configuration.get("endpoint")
self.context = self.configuration.get("context")
self.chart_type = self.configuration.get("type", "line")
self.order = [
self.configuration.get("chart_name")
]

View file

@ -1,93 +1,120 @@
# netdata python.d.plugin configuration for example
#
# This file is in YaML format. Generally the format is:
#
# name: value
#
# There are 2 sections:
# - global variables
# - one or more JOBS
#
# JOBS allow you to collect values from multiple sources.
# Each source will have its own set of charts.
#
# JOB parameters have to be indented (using spaces only, example below).
# ----------------------------------------------------------------------
# Global Variables
# These variables set the defaults for all JOBs, however each JOB
# may define its own, overriding the defaults.
# update_every sets the default data collection frequency.
# If unset, the python.d.plugin default is used.
# update_every: 1
# priority controls the order of charts at the netdata dashboard.
# Lower numbers move the charts towards the top of the page.
# If unset, the default for python.d.plugin is used.
# priority: 60000
# penalty indicates whether to apply penalty to update_every in case of failures.
# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
# Attempts to start the job are made once every autodetection_retry.
# This feature is disabled by default.
# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
#
# The default JOBS share the same *name*. JOBS with the same name
# are mutually exclusive. Only one of them will be allowed running at
# any time. This allows autodetection to try several alternatives and
# pick the one that works.
#
# Any number of jobs is supported.
#
# All python.d.plugin JOBS (for all its modules) support a set of
# predefined parameters. These are:
#
# job_name:
# name: myname # the JOB's name as it will appear on the dashboard
# # dashboard (by default is the job_name)
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
#
# num_lines: 4 # the number of lines to create
# lower: 0 # the lower bound of numbers to randomly sample from
# upper: 100 # the upper bound of numbers to randomly sample from
#
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
app_servers:
name: "app servers"
update_every: 1 # the JOB's data collection frequency
priority: 60000 # the JOB's order on the dashboard
title: "App Server Page Loads"
chart_name: "app_servers"
endpoint: "/app-servers"
title: "NewsBlur App Server Page Loads" #the chart title
chart_name: "app_servers" # the chart name used in newsblur.chart.py
endpoint: "/app-servers" # the monitor endpoint
type: "stacked"
app_times:
name: "app times"
update_every: 1
priority: 60000
title: "Newsblur App Times"
title: "NewsBlur App Times"
chart_name: "app_times"
endpoint: "/app-times"
# if you wanted to make another job to run in addition to the one above then
# you would just uncomment the job configuration below.
# two_lines:
# name: "Two Lines" # the JOB's name as it will appear on the dashboard
# num_lines: 2 # the number of lines to create
# lower: 50 # the lower bound of numbers to randomly sample from
# upper: 75 # the upper bound of numbers to randomly sample from
classifiers:
name: "classifiers"
update_every: 1
priority: 60000
title: "Newsblur Classifiers"
chart_name: "classifiers"
endpoint: "/classifiers"
db_times:
name: "db times"
update_every: 1
priority: 60000
title: "Newsblur DB Times"
chart_name: "db_times"
endpoint: "/db-times"
fetching_history:
name: "fetching history"
update_every: 1
priority: 60000
title: "Newsblur Fetching History"
chart_name: "fetching_history"
endpoint: "/errors"
feed_counts:
name: "feed counts"
update_every: 1
priority: 60000
title: "Newsblur Feed Counts"
chart_name: "feed_counts"
endpoint: "/feed-counts"
feeds:
name: "feeds"
update_every: 1
priority: 60000
title: "Newsblur Feeds & Subscriptions"
chart_name: "feeds"
endpoint: "/feeds"
load_times:
name: "load times"
update_every: 1
priority: 60000
title: "NewsBlur Load Times"
chart_name: "load_times"
endpoint: "/load-times"
stories:
name: "stories"
update_every: 1
priority: 60000
title: "NewsBlur Stories"
chart_name: "stories"
endpoint: "/stories"
task_codes:
name: "task codes"
update_every: 1
priority: 60000
title: "NewsBlur Task Codes"
chart_name: "task_codes"
endpoint: "/task-codes"
task_pipeline:
name: "task pipeline"
update_every: 1
priority: 60000
title: "NewsBlur Task Pipeline"
chart_name: ""
endpoint: ""
task_servers:
name: "task servers"
update_every: 1
priority: 60000
title: "NewsBlur Task Server Fetched"
chart_name: "task_servers"
endpoint: "/task-servers"
task_times:
name: "task server times"
update_every: 1
priority: 60000
title: "NewsBlur Task Server Times"
chart_name: "task_times"
endpoint: "/task-times"
updates:
name: "updates"
update_every: 1
priority: 60000
title: "NewsBlur Updates"
chart_name: "updates"
endpoint: "/updates"
users:
name: "users"
update_every: 1
priority: 60000
title: "NewsBlur Users"
chart_name: "users"
endpoint: "/users"