Merge branch 'master' into pro

* master:
  Moving pg basebackup to a backup role
  Adding a secondary postgres that starts from a pg_basebackup. Still needs testing.
  Adding timestamps to backup logs
  No TTY/input needed on backups.
  Paths are in different environments.
  Renaming postgresql backups
  Adding postgres restore command and updating backup script.
This commit is contained in:
Samuel Clay 2022-05-02 11:42:52 -04:00
commit 7870eee728
7 changed files with 87 additions and 64 deletions

View file

@ -41,7 +41,7 @@
# - dir: /backup_redis/ # - dir: /backup_redis/
# file: "{{ redis_filename }}" # file: "{{ redis_filename }}"
tags: never, restore_postgres, restore_mongo, restore_redis, restore_redis_story tags: never, restore_postgres, restore_mongo, restore_redis, restore_redis_story
- name: Restore postgres - name: Restore postgres
block: block:
- name: pg_restore - name: pg_restore
@ -76,3 +76,13 @@
command: "mv -f /srv/newsblur/backups/{{ redis_story_filename }} /srv/newsblur/docker/volumes/redis/dump.rdb" command: "mv -f /srv/newsblur/backups/{{ redis_story_filename }} /srv/newsblur/docker/volumes/redis/dump.rdb"
ignore_errors: yes ignore_errors: yes
tags: never, restore_redis_story tags: never, restore_redis_story
- name: Start postgres basebackup on secondary
become: yes
command:
docker exec postgres pg_basebackup -h db-postgres.service.nyc1.consul -p 5432 -U newsblur -D /var/lib/postgresql/data -Fp -R -Xs -P -c fast
# when: (inventory_hostname | regex_replace('[0-9]+', '')) in ['db-postgres-secondary']
tags:
- never
- restore_pg_basebackup

View file

@ -10,5 +10,5 @@
- name: reload postgres config - name: reload postgres config
become: yes become: yes
command: docker exec postgres pg_ctl reload command: docker exec -u postgres postgres pg_ctl reload
listen: reload postgres listen: reload postgres

View file

@ -8,21 +8,24 @@
register: updated_config register: updated_config
- name: Ensure postgres archive directory - name: Ensure postgres archive directory
become: yes
file: file:
path: /srv/newsblur/docker/volumes/postgres/archive path: /srv/newsblur/docker/volumes/postgres/archive
state: directory state: directory
mode: 0777 mode: 0755
- name: Ensure postgres backup directory - name: Ensure postgres backup directory
become: yes
file: file:
path: /srv/newsblur/backups path: /srv/newsblur/docker/volumes/postgres/backups
state: directory state: directory
mode: 0777 mode: 0755
- name: Ensure postgres data directory
file:
path: /srv/newsblur/docker/volumes/postgres/data
state: directory
mode: 0755
- name: Start postgres docker containers - name: Start postgres docker containers
become: yes
docker_container: docker_container:
name: postgres name: postgres
image: postgres:13 image: postgres:13
@ -34,7 +37,6 @@
POSTGRES_PASSWORD: "{{ postgres_password }}" POSTGRES_PASSWORD: "{{ postgres_password }}"
hostname: "{{ inventory_hostname }}" hostname: "{{ inventory_hostname }}"
networks_cli_compatible: yes networks_cli_compatible: yes
# network_mode: host
network_mode: default network_mode: default
networks: networks:
- name: newsblurnet - name: newsblurnet
@ -43,15 +45,21 @@
ports: ports:
- 5432:5432 - 5432:5432
volumes: volumes:
- /srv/newsblur/docker/volumes/postgres:/var/lib/postgresql - /srv/newsblur/docker/volumes/postgres/data:/var/lib/postgresql/data
- /srv/newsblur/docker/volumes/postgres/archive:/var/lib/postgresql/archive
- /srv/newsblur/docker/volumes/postgres/backups:/var/lib/postgresql/backups
- /srv/newsblur/docker/postgres/postgres.conf:/etc/postgresql/postgresql.conf - /srv/newsblur/docker/postgres/postgres.conf:/etc/postgresql/postgresql.conf
- /srv/newsblur/docker/postgres/postgres_hba-13.conf:/etc/postgresql/pg_hba.conf - /srv/newsblur/docker/postgres/postgres_hba-13.conf:/etc/postgresql/pg_hba.conf
- /srv/newsblur/backups/:/var/lib/postgresql/backup/
restart_policy: unless-stopped restart_policy: unless-stopped
when: (inventory_hostname | regex_replace('[0-9]+', '')) in ['db-postgres-primary', 'db-postgres']
- name: Change ownership in postgres docker container
command: >
docker exec postgres chown -fR postgres.postgres /var/lib/postgresql
- name: Ensure newsblur role in postgres - name: Ensure newsblur role in postgres
shell: > shell: >
sleep 5; sleep 15;
docker exec postgres createuser -s newsblur -U postgres; docker exec postgres createuser -s newsblur -U postgres;
docker exec postgres createdb newsblur -U newsblur; docker exec postgres createdb newsblur -U newsblur;
register: ensure_role register: ensure_role
@ -77,11 +85,10 @@
register: app_changed register: app_changed
- name: Add sanity checkers cronjob for disk usage - name: Add sanity checkers cronjob for disk usage
become: yes
cron: cron:
name: disk_usage_sanity_checker name: disk_usage_sanity_checker
user: root minute: "0"
cron_file: /etc/cron.hourly/disk_usage_sanity_checker hour: "0"
job: >- job: >-
docker pull newsblur/newsblur_python3:latest; docker pull newsblur/newsblur_python3:latest;
docker run --rm -it docker run --rm -it
@ -105,5 +112,5 @@
name: postgres backup name: postgres backup
minute: "0" minute: "0"
hour: "4" hour: "4"
job: /srv/newsblur/docker/postgres/backup_postgres.sh 1> /var/log/postgres_backup.log 2>&1 job: /srv/newsblur/docker/postgres/backup_postgres.sh >> /var/log/postgres_backup.log 2>&1

View file

@ -1,19 +1,25 @@
#!/usr/bin/env bash #!/usr/bin/env bash
now=$(date '+%Y-%m-%d-%H-%M') now=$(date '+%Y-%m-%d-%H-%M')
BACKUP_FILENAME="backup_postgresql_${now}.sql"
BACKUP_PATH="/var/lib/postgresql/backups/"
UPLOAD_PATH="/srv/newsblur/docker/volumes/postgres/backups/"
BACKUP_FILE="${BACKUP_PATH}${BACKUP_FILENAME}"
UPLOAD_FILE="${UPLOAD_PATH}${BACKUP_FILENAME}"
BACKUP_FILE="/var/lib/postgresql/backup/backup_postgresql_${now}.sql" echo $(date -u) "---> PG dumping - ${now}: ${BACKUP_FILE}"
echo "---> PG dumping - ${now}: ${BACKUP_FILE}" sudo docker exec postgres sh -c "mkdir -p $BACKUP_PATH"
sudo docker exec -it postgres sh -c '/usr/lib/postgresql/13/bin/pg_dump -U newsblur -h 127.0.0.1 -Fc newsblur > ${BACKUP_FILE}' sudo docker exec postgres sh -c "/usr/lib/postgresql/13/bin/pg_dump -U newsblur -h 127.0.0.1 -Fc newsblur > $BACKUP_FILE"
echo " ---> Uploading postgres backup to S3"
echo $(date -u) " ---> Uploading postgres backup to S3"
sudo docker run --user 1000:1001 --rm \ sudo docker run --user 1000:1001 --rm \
-v /srv/newsblur:/srv/newsblur \ -v /srv/newsblur:/srv/newsblur \
--network=host \ --network=host \
newsblur/newsblur_python3 \ newsblur/newsblur_python3 \
python /srv/newsblur/utils/backups/backup_psql.py $BACKUP_FILE python /srv/newsblur/utils/backups/backup_psql.py $UPLOAD_FILE
# Don't delete backup since the backup_mongo.py script will rm them # Don't delete backup since the backup_mongo.py script will rm them
## rm /opt/mongo/newsblur/backup/backup_mongo_${now}.tgz ## rm /opt/mongo/newsblur/backup/backup_mongo_${now}.tgz
## rm /opt/mongo/newsblur/backup/backup_mongo_${now} ## rm /opt/mongo/newsblur/backup/backup_mongo_${now}
echo " ---> Finished uploading backups to S3" echo "\n$(date -u) ---> Finished uploading backups to S3"

View file

@ -317,10 +317,10 @@ restore_command = 'cp /var/lib/postgresql/archive/%f %p' # command to use to re
#promote_trigger_file = '' # file name whose presence ends recovery #promote_trigger_file = '' # file name whose presence ends recovery
hot_standby = on # "off" disallows queries during recovery hot_standby = on # "off" disallows queries during recovery
# (change requires restart) # (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries max_standby_archive_delay = 900s # max delay before canceling queries
# when reading WAL from archive; # when reading WAL from archive;
# -1 allows indefinite delay # -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries max_standby_streaming_delay = 900s # max delay before canceling queries
# when reading streaming WAL; # when reading streaming WAL;
# -1 allows indefinite delay # -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name #wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name

View file

@ -23,45 +23,44 @@ resource "digitalocean_ssh_key" "default" {
public_key = file("/srv/secrets-newsblur/keys/docker.key.pub") public_key = file("/srv/secrets-newsblur/keys/docker.key.pub")
} }
resource "digitalocean_project" "NewsBlur_Docker" { # resource "digitalocean_project" "NewsBlur_Docker" {
name = "NewsBlur Docker" # name = "NewsBlur Docker"
environment = "Production" # environment = "Production"
description = "Infrastructure glued together with consul" # description = "Infrastructure glued together with consul"
} # }
resource "digitalocean_project_resources" "NewsBlur_Docker" { # resource "digitalocean_project_resources" "NewsBlur_Docker" {
project = digitalocean_project.NewsBlur_Docker.id # project = digitalocean_project.NewsBlur_Docker.id
resources = flatten([ # resources = flatten([
digitalocean_droplet.db-consul.*.urn, # digitalocean_droplet.db-consul.*.urn,
digitalocean_droplet.www.*.urn, # digitalocean_droplet.www.*.urn,
digitalocean_droplet.app-django.*.urn, # digitalocean_droplet.app-django.*.urn,
digitalocean_droplet.app-counts.*.urn, # digitalocean_droplet.app-counts.*.urn,
digitalocean_droplet.app-push.*.urn, # digitalocean_droplet.app-push.*.urn,
digitalocean_droplet.app-refresh.*.urn, # digitalocean_droplet.app-refresh.*.urn,
digitalocean_droplet.blog.*.urn, # digitalocean_droplet.blog.*.urn,
digitalocean_droplet.staging-web.*.urn, # digitalocean_droplet.staging-web.*.urn,
digitalocean_droplet.discovery.*.urn, # digitalocean_droplet.discovery.*.urn,
digitalocean_droplet.node-text.*.urn, # digitalocean_droplet.node-text.*.urn,
digitalocean_droplet.node-socket.*.urn, # digitalocean_droplet.node-socket.*.urn,
digitalocean_droplet.node-favicons.*.urn, # digitalocean_droplet.node-favicons.*.urn,
digitalocean_droplet.node-images.*.urn, # digitalocean_droplet.node-images.*.urn,
digitalocean_droplet.node-page.*.urn, # digitalocean_droplet.node-page.*.urn,
digitalocean_droplet.db-elasticsearch.*.urn, # digitalocean_droplet.db-elasticsearch.*.urn,
digitalocean_droplet.db-redis-user.*.urn, # digitalocean_droplet.db-redis-user.*.urn,
digitalocean_droplet.db-redis-sessions.*.urn, # digitalocean_droplet.db-redis-sessions.*.urn,
digitalocean_droplet.db-redis-story.*.urn, # digitalocean_droplet.db-redis-story.*.urn,
digitalocean_droplet.db-redis-pubsub.*.urn, # digitalocean_droplet.db-redis-pubsub.*.urn,
digitalocean_droplet.db-postgres.*.urn, # digitalocean_droplet.db-postgres.*.urn,
digitalocean_droplet.db-mongo-primary.*.urn, # digitalocean_droplet.db-mongo-primary.*.urn,
digitalocean_droplet.db-mongo-secondary.*.urn, # digitalocean_droplet.db-mongo-secondary.*.urn,
digitalocean_droplet.db-mongo-analytics.*.urn, # digitalocean_droplet.db-mongo-analytics.*.urn,
digitalocean_droplet.db-metrics.*.urn, # digitalocean_droplet.db-metrics.*.urn,
digitalocean_droplet.db-sentry.*.urn, # digitalocean_droplet.db-sentry.*.urn,
digitalocean_droplet.task-celery.*.urn, # digitalocean_droplet.task-celery.*.urn,
digitalocean_droplet.task-work.*.urn # digitalocean_droplet.task-work.*.urn
]) # ])
# }
}
# ################# # #################
# # Resources # # # Resources #
@ -411,8 +410,9 @@ resource "digitalocean_droplet" "db-redis-pubsub" {
} }
resource "digitalocean_droplet" "db-postgres" { resource "digitalocean_droplet" "db-postgres" {
count = 2
image = var.droplet_os image = var.droplet_os
name = "db-postgres" name = "db-postgres${count.index+1}"
region = var.droplet_region region = var.droplet_region
size = var.droplet_size_160 size = var.droplet_size_160
ssh_keys = [digitalocean_ssh_key.default.fingerprint] ssh_keys = [digitalocean_ssh_key.default.fingerprint]

View file

@ -39,7 +39,7 @@ s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_acc
hostname = socket.gethostname().replace('-','_') hostname = socket.gethostname().replace('-','_')
full_path = sys.argv[1] full_path = sys.argv[1]
backup_filename = os.path.basename(full_path) backup_filename = os.path.basename(full_path)
s3_object_name = f'backup_{hostname}/backup_{hostname}_{backup_filename}.sql' s3_object_name = f'backup_{hostname}/{backup_filename}.sql'
print('Uploading %s to %s on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET)) print('Uploading %s to %s on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET))
s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(full_path)) s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(full_path))
os.remove(full_path) os.remove(full_path)