Caching newsblur_users.py statistics.

This commit is contained in:
Samuel Clay 2022-07-28 22:47:00 -04:00
parent 78547af24b
commit ee3b9e61a5
4 changed files with 49 additions and 11 deletions

View file

@ -35,4 +35,6 @@
"files.associations": {
"*.yml": "ansible"
},
"nrf-connect.toolchain.path": "${nrf-connect.toolchain:1.9.1}",
"C_Cpp.default.configurationProvider": "nrf-connect",
}

View file

@ -125,4 +125,18 @@ You got the downtime message either through email or SMS. This is the order of o
crack are automatically fixed after 24 hours, but if many feeds fall through due to a bad
deploy or electrical failure, you'll want to accelerate that check by just draining the
tasked feeds pool, adding those feeds back into the queue. This command is idempotent.
## Python 3
### Switching to a new redis server
When the new redis server is connected to the primary redis server:
# db-redis-story2 = moving to new server
# db-redis-story = old server about to be shutdown
make celery_stop
make maintenance_on
apd -l db-redis-story2 -t replicaofnoone
aps -l db-redis-story,db-redis-story2 -t consul
make maintenance_off
make task

View file

@ -5,6 +5,7 @@ from django.shortcuts import render
from django.views import View
from apps.profile.models import Profile, RNewUserQueue
from apps.statistics.models import MStatistics
class Users(View):
@ -12,16 +13,33 @@ class Users(View):
last_year = datetime.datetime.utcnow() - datetime.timedelta(days=365)
last_month = datetime.datetime.utcnow() - datetime.timedelta(days=30)
last_day = datetime.datetime.utcnow() - datetime.timedelta(minutes=60*24)
expiration_sec = 60*60 # 1 hour
data = {
'all': User.objects.count(),
'yearly': Profile.objects.filter(last_seen_on__gte=last_year).count(),
'monthly': Profile.objects.filter(last_seen_on__gte=last_month).count(),
'daily': Profile.objects.filter(last_seen_on__gte=last_day).count(),
'premium': Profile.objects.filter(is_premium=True).count(),
'archive': Profile.objects.filter(is_archive=True).count(),
'pro': Profile.objects.filter(is_pro=True).count(),
'queued': RNewUserQueue.user_count(),
'all': MStatistics.get('munin:users_yearly',
User.objects.count(),
set_default=True, expiration_sec=expiration_sec),
'yearly': MStatistics.get('munin:users_yearly',
Profile.objects.filter(last_seen_on__gte=last_year).count(),
set_default=True, expiration_sec=expiration_sec),
'monthly': MStatistics.get('munin:users_monthly',
Profile.objects.filter(last_seen_on__gte=last_month).count(),
set_default=True, expiration_sec=expiration_sec),
'daily': MStatistics.get('munin:users_daily',
Profile.objects.filter(last_seen_on__gte=last_day).count(),
set_default=True, expiration_sec=expiration_sec),
'premium': MStatistics.get('munin:users_premium',
Profile.objects.filter(is_premium=True).count(),
set_default=True, expiration_sec=expiration_sec),
'archive': MStatistics.get('munin:users_archive',
Profile.objects.filter(is_archive=True).count(),
set_default=True, expiration_sec=expiration_sec),
'pro': MStatistics.get('munin:users_pro',
Profile.objects.filter(is_pro=True).count(),
set_default=True, expiration_sec=expiration_sec),
'queued': MStatistics.get('munin:users_queued',
RNewUserQueue.user_count(),
set_default=True, expiration_sec=expiration_sec),
}
chart_name = "users"
chart_type = "counter"

View file

@ -28,12 +28,16 @@ class MStatistics(mongo.Document):
return "%s: %s" % (self.key, self.value)
@classmethod
def get(cls, key, default=None):
def get(cls, key, default=None, set_default=False, expiration_sec=None):
obj = cls.objects.filter(key=key).first()
if not obj:
if set_default:
cls.set(key, default, expiration_sec=expiration_sec)
return default
if obj.expiration_date and obj.expiration_date < datetime.datetime.now():
obj.delete()
if set_default:
cls.set(key, default, expiration_sec=expiration_sec)
return default
return obj.value