Reducing number of workers on docker-compose. Also removing extraneous task servers and consolidating to task-celery for feeds and task-work for cron and work

This commit is contained in:
Samuel Clay 2021-03-17 20:13:58 -04:00
parent 15cfd7a592
commit d52a8d362c
6 changed files with 8 additions and 52 deletions

View file

@ -19,8 +19,6 @@
when: item.service_name in inventory_hostname when: item.service_name in inventory_hostname
with_items: with_items:
- service_name: task-celery - service_name: task-celery
- service_name: task-beat
- service_name: task-search
- service_name: task-work - service_name: task-work
register: register:
@ -33,8 +31,6 @@
when: item.service_name in inventory_hostname when: item.service_name in inventory_hostname
with_items: with_items:
- service_name: task-celery - service_name: task-celery
- service_name: task-beat
- service_name: task-search
- service_name: task-work - service_name: task-work
tags: tags:
- never - never
@ -49,8 +45,6 @@
when: item.service_name in inventory_hostname when: item.service_name in inventory_hostname
with_items: with_items:
- service_name: task-celery - service_name: task-celery
- service_name: task-beat
- service_name: task-search
- service_name: task-work - service_name: task-work
tags: tags:
- never - never

View file

@ -29,13 +29,9 @@
when: "item.container_name in inventory_hostname" when: "item.container_name in inventory_hostname"
with_items: with_items:
- container_name: task-celery - container_name: task-celery
command: "celery worker -A newsblur_web --loglevel=INFO -Q new_feeds,push_feeds,update_feeds" command: "celery worker -A newsblur_web --loglevel=INFO -Q new_feeds,push_feeds,update_feeds,search_indexer"
- container_name: task-beat
command: "celery worker -A newsblur_web --loglevel=INFO -Q beat_feeds_task -c 1"
- container_name: task-search
command: "celery worker -A newsblur_web --loglevel=INFO -Q search_indexer -c 4"
- container_name: task-work - container_name: task-work
command: "celery worker -A newsblur_web --loglevel=INFO -Q work_queue" command: "celery worker -A newsblur_web --loglevel=INFO -Q beat_feeds_task,work_queue,cron_queue"
- name: Register celery_task in consul - name: Register celery_task in consul
tags: consul tags: consul
@ -48,8 +44,6 @@
when: "item.service_name in inventory_hostname" when: "item.service_name in inventory_hostname"
with_items: with_items:
- service_name: task-celery - service_name: task-celery
- service_name: task-beat
- service_name: task-search
- service_name: task-work - service_name: task-work
- name: Reload celery - name: Reload celery
@ -59,7 +53,5 @@
when: app_changed.changed and item.container_name in inventory_hostname when: app_changed.changed and item.container_name in inventory_hostname
with_items: with_items:
- container_name: task-celery - container_name: task-celery
- container_name: task-beat
- container_name: task-search
- container_name: task-work - container_name: task-work
changed_when: app_changed.changed changed_when: app_changed.changed

View file

@ -1,3 +1,4 @@
import os
import psutil import psutil
import math import math
@ -29,3 +30,5 @@ if workers <= 4:
if workers > 8: if workers > 8:
workers = 8 workers = 8
if os.environ.get('DOCKERBUILD', False):
workers = 1

View file

@ -102,6 +102,7 @@ MONGODB_SLAVE = {
# Celery RabbitMQ/Redis Broker # Celery RabbitMQ/Redis Broker
BROKER_URL = "redis://db_redis:6579/0" BROKER_URL = "redis://db_redis:6579/0"
CELERY_RESULT_BACKEND = BROKER_URL CELERY_RESULT_BACKEND = BROKER_URL
CELERY_WORKER_CONCURRENCY = 1
REDIS = { REDIS = {
'host': 'db_redis', 'host': 'db_redis',

View file

@ -391,7 +391,7 @@ resource "digitalocean_droplet" "db-mongo-analytics" {
} }
resource "digitalocean_droplet" "task-celery" { resource "digitalocean_droplet" "task-celery" {
count = 2 count = 10
image = var.droplet_os image = var.droplet_os
name = "task-celery${count.index+1}" name = "task-celery${count.index+1}"
region = var.droplet_region region = var.droplet_region
@ -425,37 +425,3 @@ resource "digitalocean_droplet" "task-work" {
command = "cd ..; ansible-playbook -l ${self.name} ansible/provision.yml" command = "cd ..; ansible-playbook -l ${self.name} ansible/provision.yml"
} }
} }
resource "digitalocean_droplet" "task-search" {
image = var.droplet_os
name = "task-search"
region = var.droplet_region
size = var.droplet_size
ssh_keys = [digitalocean_ssh_key.default.fingerprint]
provisioner "local-exec" {
command = "/srv/newsblur/ansible/utils/generate.py; sleep 120"
}
provisioner "local-exec" {
command = "cd ..; ansible-playbook -l ${self.name} ansible/setup_root.yml"
}
provisioner "local-exec" {
command = "cd ..; ansible-playbook -l ${self.name} ansible/provision.yml"
}
}
resource "digitalocean_droplet" "task-beat" {
image = var.droplet_os
name = "task-beat"
region = var.droplet_region
size = var.droplet_size
ssh_keys = [digitalocean_ssh_key.default.fingerprint]
provisioner "local-exec" {
command = "/srv/newsblur/ansible/utils/generate.py; sleep 120"
}
provisioner "local-exec" {
command = "cd ..; ansible-playbook -l ${self.name} ansible/setup_root.yml"
}
provisioner "local-exec" {
command = "cd ..; ansible-playbook -l ${self.name} ansible/provision.yml"
}
}

View file

@ -957,7 +957,7 @@ class Dispatcher:
self.feeds_count = feeds_count self.feeds_count = feeds_count
def run_jobs(self): def run_jobs(self):
if self.options['single_threaded']: if self.options['single_threaded'] or self.num_threads == 1:
return dispatch_workers(self.feeds_queue[0], self.options) return dispatch_workers(self.feeds_queue[0], self.options)
else: else:
for i in range(self.num_threads): for i in range(self.num_threads):