got example chart working for netdata. WIP to get newsblur data collector working. Volumes set up correctly for netdata.

This commit is contained in:
Jonathan Math 2021-04-06 13:33:30 -05:00
parent a85633296b
commit e9a4949b1c
5 changed files with 10795 additions and 5 deletions

3
.gitignore vendored
View file

@ -84,3 +84,6 @@ clients/android/NewsBlur/settings.gradle
**/node_modules
*.tfstate*
.terraform*
/docker/netdata/netdatacache/*
/docker/netdata/netdatalib/*

View file

@ -172,13 +172,9 @@ services:
volumes:
- ./docker/netdata/netdatalib:/var/lib/netdata
- ./docker/netdata/netdatacache:/var/cache/netdata
- ./docker/netdata/netdata.conf:/etc/netdata/netdata.conf
- ./docker/netdata/netdataconfig/netdata.conf:/etc/netdata/netdata.conf
- ./docker/netdata/netdataconfig/python.d/newsblur_data_request.py:/usr/libexec/netdata/python.d/newsblur_data_request.chart.py
- ./docker/netdata/netdataconfig/python.d.conf:/usr/lib/netdata/conf.d/python.d.conf
- ./docker/netdata/netdataconfig/python.d/newsblur_data_request.conf:/usr/lib/netdata/conf.d/python.d/newsblur_data_request.conf
- ./utils/netdata/config/go.d.conf:/usr/lib/netdata/conf.d/go.d.conf
- /etc/passwd:/host/etc/passwd:ro
- /etc/group:/host/etc/group:ro
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /etc/os-release:/host/etc/os-release:ro

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,109 @@
# netdata python.d.plugin configuration
#
# This file is in YaML format.
# Generally the format is:
#
# name: value
#
# Enable / disable the whole python.d.plugin (all its modules)
enabled: yes
# ----------------------------------------------------------------------
# Enable / Disable python.d.plugin modules
#default_run: yes
#
# If "default_run" = "yes" the default for all modules is enabled (yes).
# Setting any of these to "no" will disable it.
#
# If "default_run" = "no" the default for all modules is disabled (no).
# Setting any of these to "yes" will enable it.
# Enable / Disable explicit garbage collection (full collection run). Default is enabled.
gc_run: yes
# Garbage collection interval in seconds. Default is 300.
gc_interval: 300
# apache: yes
# apache_cache has been replaced by web_log
# adaptec_raid: yes
# alarms: yes
# am2320: yes
# anomalies: no
apache_cache: no
# beanstalk: yes
# bind_rndc: yes
# boinc: yes
# ceph: yes
chrony: no
# couchdb: yes
# dns_query_time: yes
# dnsdist: yes
# dockerd: yes
# dovecot: yes
# elasticsearch: yes
# energid: yes
newsblur_data_request: yes
# this is just an example
example: yes
# exim: yes
# fail2ban: yes
# freeradius: yes
# gearman: yes
go_expvar: no
# gunicorn_log has been replaced by web_log
gunicorn_log: no
# haproxy: yes
# hddtemp: yes
# httpcheck: yes
hpssa: no
# icecast: yes
# ipfs: yes
# isc_dhcpd: yes
# litespeed: yes
logind: no
# megacli: yes
# memcached: yes
# mongodb: yes
# monit: yes
# mysql: yes
# nginx: yes
# nginx_plus: yes
# nvidia_smi: yes
# nginx_log has been replaced by web_log
nginx_log: no
# nsd: yes
# ntpd: yes
# openldap: yes
# oracledb: yes
# ovpn_status_log: yes
# phpfpm: yes
# portcheck: yes
# postfix: yes
# postgres: yes
# powerdns: yes
# proxysql: yes
# puppet: yes
# rabbitmq: yes
# redis: yes
# rethinkdbs: yes
# retroshare: yes
# riakkv: yes
# samba: yes
# sensors: yes
# smartd_log: yes
# spigotmc: yes
# springboot: yes
# squid: yes
# traefik: yes
# tomcat: yes
# tor: yes
# uwsgi: yes
# varnish: yes
# w1sensor: yes
# web_log: yes

View file

@ -0,0 +1,87 @@
# netdata python.d.plugin configuration for example
#
# This file is in YaML format. Generally the format is:
#
# name: value
#
# There are 2 sections:
# - global variables
# - one or more JOBS
#
# JOBS allow you to collect values from multiple sources.
# Each source will have its own set of charts.
#
# JOB parameters have to be indented (using spaces only, example below).
# ----------------------------------------------------------------------
# Global Variables
# These variables set the defaults for all JOBs, however each JOB
# may define its own, overriding the defaults.
# update_every sets the default data collection frequency.
# If unset, the python.d.plugin default is used.
# update_every: 1
# priority controls the order of charts at the netdata dashboard.
# Lower numbers move the charts towards the top of the page.
# If unset, the default for python.d.plugin is used.
# priority: 60000
# penalty indicates whether to apply penalty to update_every in case of failures.
# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
# penalty: yes
# autodetection_retry sets the job re-check interval in seconds.
# The job is not deleted if check fails.
# Attempts to start the job are made once every autodetection_retry.
# This feature is disabled by default.
# autodetection_retry: 0
# ----------------------------------------------------------------------
# JOBS (data collection sources)
#
# The default JOBS share the same *name*. JOBS with the same name
# are mutually exclusive. Only one of them will be allowed running at
# any time. This allows autodetection to try several alternatives and
# pick the one that works.
#
# Any number of jobs is supported.
#
# All python.d.plugin JOBS (for all its modules) support a set of
# predefined parameters. These are:
#
# job_name:
# name: myname # the JOB's name as it will appear on the dashboard
# # dashboard (by default is the job_name)
# # JOBs sharing a name are mutually exclusive
# update_every: 1 # the JOB's data collection frequency
# priority: 60000 # the JOB's order on the dashboard
# penalty: yes # the JOB's penalty
# autodetection_retry: 0 # the JOB's re-check interval in seconds
#
# Additionally to the above, example also supports the following:
#
# num_lines: 4 # the number of lines to create
# lower: 0 # the lower bound of numbers to randomly sample from
# upper: 100 # the upper bound of numbers to randomly sample from
#
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
four_lines:
name: "Four Lines" # the JOB's name as it will appear on the dashboard
update_every: 1 # the JOB's data collection frequency
priority: 60000 # the JOB's order on the dashboard
penalty: yes # the JOB's penalty
autodetection_retry: 0 # the JOB's re-check interval in seconds
num_lines: 4 # the number of lines to create
lower: 0 # the lower bound of numbers to randomly sample from
upper: 100 # the upper bound of numbers to randomly sample from
# if you wanted to make another job to run in addition to the one above then
# you would just uncomment the job configuration below.
# two_lines:
# name: "Two Lines" # the JOB's name as it will appear on the dashboard
# num_lines: 2 # the number of lines to create
# lower: 50 # the lower bound of numbers to randomly sample from
# upper: 75 # the upper bound of numbers to randomly sample from