Merge branch 'master' into dejal

This commit is contained in:
David Sinclair 2022-07-05 19:07:45 -07:00
commit 979394be04
473 changed files with 8217 additions and 3378 deletions

View file

@ -7,6 +7,8 @@
"--ignore=E501,W293,W503,W504,E302,E722,E226,E221,E402,E401"
],
"python.pythonPath": "~/.virtualenvs/newsblur3/bin/python",
"editor.bracketPairColorization.enabled": true,
"editor.guides.bracketPairs":"active",
"git.ignoreLimitWarning": true,
"search.exclude": {
"clients": true,

View file

@ -35,14 +35,21 @@ nbup:
- RUNWITHMAKEBUILD=True CURRENT_UID=${CURRENT_UID} CURRENT_GID=${CURRENT_GID} docker compose up -d --build --remove-orphans
coffee:
- coffee -c -w **/*.coffee
migrations:
- docker exec -it newsblur_web ./manage.py makemigrations
makemigration: migrations
datamigration:
- docker exec -it newsblur_web ./manage.py makemigrations --empty $(app)
migration: migrations
migrate:
- docker exec -it newsblur_web ./manage.py migrate
shell:
- RUNWITHMAKEBUILD=True CURRENT_UID=${CURRENT_UID} CURRENT_GID=${CURRENT_GID} docker-compose exec newsblur_web ./manage.py shell_plus
- docker exec -it newsblur_web ./manage.py shell_plus
bash:
- RUNWITHMAKEBUILD=True CURRENT_UID=${CURRENT_UID} CURRENT_GID=${CURRENT_GID} docker-compose exec newsblur_web bash
- docker exec -it newsblur_web bash
# allows user to exec into newsblur_web and use pdb.
debug:
- RUNWITHMAKEBUILD=True CURRENT_UID=${CURRENT_UID} CURRENT_GID=${CURRENT_GID} docker attach ${newsblur}
- docker attach ${newsblur}
log:
- RUNWITHMAKEBUILD=True docker compose logs -f --tail 20 newsblur_web newsblur_node
logweb: log
@ -54,7 +61,14 @@ logmongo:
alllogs:
- RUNWITHMAKEBUILD=True docker compose logs -f --tail 20
logall: alllogs
# brings down containers
mongo:
- docker exec -it db_mongo mongo --port 29019
redis:
- docker exec -it db_redis redis-cli -p 6579
postgres:
- docker exec -it db_postgres psql -U newsblur
stripe:
- stripe listen --forward-to localhost/zebra/webhooks/v2/
down:
- RUNWITHMAKEBUILD=True docker compose -f docker-compose.yml -f docker-compose.metrics.yml down
nbdown: down
@ -73,10 +87,20 @@ keys:
- openssl dhparam -out config/certificates/dhparam-2048.pem 2048
- openssl req -x509 -nodes -new -sha256 -days 1024 -newkey rsa:2048 -keyout config/certificates/RootCA.key -out config/certificates/RootCA.pem -subj "/C=US/CN=Example-Root-CA"
- openssl x509 -outform pem -in config/certificates/RootCA.pem -out config/certificates/RootCA.crt
- openssl req -new -nodes -newkey rsa:2048 -keyout config/certificates/localhost.key -out config/certificates/localhost.csr -subj "/C=US/ST=YourState/L=YourCity/O=Example-Certificates/CN=localhost.local"
- openssl req -new -nodes -newkey rsa:2048 -keyout config/certificates/localhost.key -out config/certificates/localhost.csr -subj "/C=US/ST=YourState/L=YourCity/O=Example-Certificates/CN=localhost"
- openssl x509 -req -sha256 -days 1024 -in config/certificates/localhost.csr -CA config/certificates/RootCA.pem -CAkey config/certificates/RootCA.key -CAcreateserial -out config/certificates/localhost.crt
- cat config/certificates/localhost.crt config/certificates/localhost.key > config/certificates/localhost.pem
- /usr/bin/security add-trusted-cert -d -r trustAsRoot -k /Library/Keychains/System.keychain ./config/certificates/RootCA.crt
- sudo /usr/bin/security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ./config/certificates/RootCA.crt
# Doesn't work yet
mkcert:
- mkdir config/mkcert
- docker run -v $(shell pwd)/config/mkcert:/root/.local/share/mkcert brunopadz/mkcert-docker:latest \
/bin/sh -c "mkcert -install && \
mkcert -cert-file /root/.local/share/mkcert/mkcert.pem \
-key-file /root/.local/share/mkcert/mkcert.key localhost"
- cat config/mkcert/rootCA.pem config/mkcert/rootCA-key.pem > config/certificates/localhost.pem
- sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ./config/mkcert/rootCA.pem
# Digital Ocean / Terraform
list:
@ -158,6 +182,8 @@ deploy_staging:
staging: deploy_staging
celery_stop:
- ansible-playbook ansible/deploy.yml -l task --tags stop
sentry:
- ansible-playbook ansible/setup.yml -l sentry -t sentry
maintenance_on:
- ansible-playbook ansible/deploy.yml -l web --tags maintenance_on
maintenance_off:
@ -170,7 +196,14 @@ oldfirewall:
- ANSIBLE_CONFIG=/srv/newsblur/ansible.old.cfg ansible-playbook ansible/all.yml -l db --tags firewall
repairmongo:
- sudo docker run -v "/srv/newsblur/docker/volumes/db_mongo:/data/db" mongo:4.0 mongod --repair --dbpath /data/db
mongodump:
- docker exec -it db_mongo mongodump --port 29019 -d newsblur -o /data/mongodump
- cp -fr docker/volumes/db_mongo/mongodump docker/volumes/mongodump
# - docker exec -it db_mongo cp -fr /data/db/mongodump /data/mongodump
# - docker exec -it db_mongo rm -fr /data/db/
mongorestore:
- cp -fr docker/volumes/mongodump docker/volumes/db_mongo/
- docker exec -it db_mongo mongorestore --port 29019 -d newsblur /data/db/mongodump/newsblur
# performance tests
perf-cli:

View file

@ -5,14 +5,10 @@
when: "'haproxy' in group_names"
- import_playbook: playbooks/deploy_node.yml
when: "'node' in group_names"
- import_playbook: playbooks/deploy_monitor.yml
when: "'postgres' in group_names"
- import_playbook: playbooks/deploy_monitor.yml
when: "'mongo' in group_names"
- import_playbook: playbooks/deploy_monitor.yml
- import_playbook: playbooks/deploy_redis.yml
when: "'redis' in group_names"
- import_playbook: playbooks/deploy_monitor.yml
when: "'elasticsearch' in group_names"
when: '"postgres" in group_names or "mongo" in group_names or "redis" in group_names or "elasticsearch" in group_names'
- import_playbook: playbooks/deploy_task.yml
when: "'task' in group_names"
- import_playbook: playbooks/deploy_staging.yml

View file

@ -0,0 +1,21 @@
---
- name: DEPLOY -> monitor
hosts: db
gather_facts: false
vars_files:
- ../env_vars/base.yml
tasks:
- name: Turning off secondary for redis by deleting redis_replica.conf
copy:
dest: /srv/newsblur/docker/redis/redis_replica.conf
content: ""
tags:
- never
- replicaofnoone
- name: Setting Redis REPLICAOF NO ONE
shell: docker exec redis redis-cli REPLICAOF NO ONE
tags:
- never
- replicaofnoone

View file

@ -20,4 +20,4 @@
- {role: 'mongo-exporter', tags: ['mongo-exporter', 'metrics']}
- {role: 'monitor', tags: 'monitor'}
- {role: 'flask_metrics', tags: ['flask-metrics', 'metrics']}
- {role: 'benchmark', tags: 'benchmark'}
# - {role: 'benchmark', tags: 'benchmark'}

View file

@ -1,8 +1,12 @@
---
- name: Ensure backups directory
become: yes
file:
path: /srv/newsblur/backups
path: /srv/newsblur/docker/volumes/postgres/backups/
state: directory
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
tags: restore_postgres
- name: Ensure pip installed
become: yes
@ -17,7 +21,7 @@
- name: Set backup vars
set_fact:
redis_story_filename: backup_redis_story_2021-04-13-04-00.rdb.gz
postgres_filename: backup_postgresql_2022-02-03-04-00.sql.gz
postgres_filename: backup_postgresql_2022-05-03-04-00.sql.sql
mongo_filename: backup_mongo_2021-03-15-04-00.tgz
redis_filename: backup_redis_2021-03-15-04-00.rdb.gz
tags: never, restore_postgres, restore_mongo, restore_redis, restore_redis_story
@ -25,20 +29,24 @@
- name: Download archives
amazon.aws.aws_s3:
bucket: "newsblur-backups"
object: "{{ item.dir }}{{ item.file }}"
dest: "/srv/newsblur/backups/{{ item.file }}"
object: "{{ item.s3_dir }}{{ item.file }}"
dest: "{{ item.backup_dir }}{{ item.file }}"
mode: get
overwrite: different
aws_access_key: "{{ lookup('ini', 'aws_access_key_id section=default file=/srv/secrets-newsblur/keys/aws.s3.token') }}"
aws_secret_key: "{{ lookup('ini', 'aws_secret_access_key section=default file=/srv/secrets-newsblur/keys/aws.s3.token') }}"
with_items:
# - dir: /redis_story/
# - s3_dir: /redis_story/
# backup_dir: /srv/newsblur/backups
# file: "{{ redis_story_filename }}"
- dir: /postgres/
- s3_dir: /backup_db_postgres2/
backup_dir: /srv/newsblur/docker/volumes/postgres/backups/
file: "{{ postgres_filename }}"
# - dir: /mongo/
# - s3_dir: /mongo/
# backup_dir: /srv/newsblur/backups
# file: "{{ mongo_filename }}"
# - dir: /backup_redis/
# - s3_dir: /backup_redis/
# backup_dir: /srv/newsblur/backups
# file: "{{ redis_filename }}"
tags: never, restore_postgres, restore_mongo, restore_redis, restore_redis_story
@ -48,7 +56,7 @@
become: yes
command: |
docker exec -i postgres bash -c
"pg_restore -U newsblur --role=newsblur --dbname=newsblur /var/lib/postgresql/backup/{{ postgres_filename }}"
"pg_restore -U newsblur --role=newsblur --dbname=newsblur /var/lib/postgresql/backups/{{ postgres_filename }}"
tags: never, restore_postgres
- name: Restore mongo
@ -105,3 +113,14 @@
- never
- pg_basebackup
- name: Promote secondary postgres to primary
block:
- name: pg_ctl promote
become: yes
command:
docker exec -it postgres su - postgres -c "/usr/lib/postgresql/13/bin/pg_ctl -D /var/lib/postgresql/data promote"
# when: (inventory_hostname | regex_replace('[0-9]+', '')) in ['db-postgres-secondary']
tags:
- never
- pg_promote

View file

@ -48,14 +48,6 @@
group: "{{ ansible_effective_group_id|int }}"
recurse: yes
- name: Copy /etc/hosts from old installation (remove when upgraded)
become: yes
copy:
src: /srv/secrets-newsblur/configs/hosts
dest: /etc/hosts
tags: hosts
notify: reload dnsmasq
- name: "Add inventory_hostname to /etc/hosts"
become: yes
lineinfile:

View file

@ -109,6 +109,17 @@
state: touch
mode: 0666
- name: Add spam.py for task-work
become: yes
copy:
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
src: /srv/secrets-newsblur/spam/spam.py
dest: /srv/newsblur/apps/social/spam.py
when: "'task-work' in inventory_hostname"
tags:
- spam
- name: Add sanity checkers cronjob for feeds fetched
become: yes
copy:

View file

@ -3,7 +3,9 @@
become: yes
file:
state: directory
mode: 0777
mode: 0755
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
path: /var/log/mongodb
- name: Block for mongo volume
@ -23,6 +25,8 @@
file:
path: "/mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}"
state: directory
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
- name: Mount volume read-write
become: yes
@ -32,15 +36,6 @@
fstype: xfs
opts: defaults,discard
state: mounted
- name: Set permissions on volume
become: yes
file:
path: "/mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}"
state: directory
owner: 999
group: 999
recurse: yes
when: (inventory_hostname | regex_replace('[0-9]+', '')) in ['db-mongo-secondary', 'db-mongo-analytics']
@ -49,24 +44,37 @@
copy:
content: "{{ mongodb_keyfile }}"
dest: /srv/newsblur/config/mongodb_keyfile.key
owner: 999
group: 999
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
mode: 0400
tags:
- keyfile
- name: Set permissions on mongo volume
become: yes
file:
path: "/mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}"
state: directory
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
recurse: yes
- name: Make backup directory
become: yes
file:
path: "/mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}/backup/"
state: directory
mode: 0777
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
mode: 0755
- name: Create symlink to mounted volume for backups to live
file:
state: link
src: "/mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}/backup"
path: /srv/newsblur/backup
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
force: yes
- name: Start db-mongo docker container
@ -88,6 +96,7 @@
# ports:
# - "27017:27017"
command: --config /etc/mongod.conf
user: 1000:1001
volumes:
- /mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}:/data/db
- /srv/newsblur/ansible/roles/mongo/templates/mongo.conf:/etc/mongod.conf
@ -115,7 +124,7 @@
ports:
- "27017:27017"
command: --config /etc/mongod.conf
user: 999:999
user: 1000:1001
volumes:
- /mnt/{{ inventory_hostname | regex_replace('db-|-', '') }}:/data/db
- /srv/newsblur/ansible/roles/mongo/templates/mongo.analytics.conf:/etc/mongod.conf
@ -204,15 +213,26 @@
dest: /srv/newsblur/newsblur_web/local_settings.py
register: app_changed
- name: Add mongo backup log
become: yes
file:
path: /var/log/mongo_backup.log
state: touch
mode: 0755
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
when: '"db-mongo-secondary1" in inventory_hostname'
- name: Add mongo backup
cron:
name: mongo backup
minute: "0"
hour: "4"
job: /srv/newsblur/docker/mongo/backup_mongo.sh
job: /srv/newsblur/docker/mongo/backup_mongo.sh >> /var/log/mongo_backup.log 2>&1
when: '"db-mongo-secondary1" in inventory_hostname'
tags:
- mongo-backup
- cron
# - name: Add mongo starred_stories+stories backup
# cron:

View file

@ -1,6 +1,6 @@
{
"service": {
"name": "db-mongo-staging",
"name": "db-mongo",
"id": "{{ inventory_hostname }}",
"tags": [
"db"

View file

@ -20,6 +20,17 @@
mode: 0600
line: 'SERVER_NAME = "{{ inventory_hostname }}"'
- name: Copy imageproxy secrets
copy:
src: /srv/secrets-newsblur/settings/imageproxy.key
dest: /srv/imageproxy.key
register: app_changed
notify: restart node
with_items:
- node-images
- staging
when: item in inventory_hostname
- name: Get the volume name
shell: ls /dev/disk/by-id/ | grep -v part
register: volume_name_raw
@ -105,9 +116,13 @@
- "{{ item.ports }}"
env:
NODE_ENV: "production"
IMAGEPROXY_CACHE: "memory:200:4h"
IMAGEPROXY_SIGNATUREKEY: "@/srv/imageproxy.key"
IMAGEPROXY_VERBOSE: "1"
restart_policy: unless-stopped
volumes:
- /srv/newsblur/node:/srv/node
- /srv/imageproxy.key:/srv/imageproxy.key
with_items:
- container_name: imageproxy
image: ghcr.io/willnorris/imageproxy

View file

@ -13,7 +13,7 @@
"checks": [{
"id": "{{inventory_hostname}}-ping",
{% if item.target_host == "node-images" %}
"http": "http://{{ ansible_ssh_host }}:{{ item.port }}/sc,sN1megONJiGNy-CCvqzVPTv-TWRhgSKhFlf61XAYESl4=/http:/samuelclay.com/static/images/2019%20-%20Cuba.jpg",
"http": "http://{{ ansible_ssh_host }}:{{ item.port }}/sc,seLJDaKBog3LLEMDe8cjBefMhnVSibO4RA5boZhWcVZ0=/https://samuelclay.com/static/images/2019%20-%20Cuba.jpg",
{% elif item.target_host == "node-favicons" %}
"http": "http://{{ ansible_ssh_host }}:{{ item.port }}/rss_feeds/icon/1",
{% elif item.target_host == "node-text" %}

View file

@ -7,29 +7,20 @@
notify: reload postgres
register: updated_config
- name: Ensure postgres archive directory
- name: Create Postgres docker volumes with correct permissions
become: yes
file:
path: /srv/newsblur/docker/volumes/postgres/archive
path: "{{ item }}"
state: directory
mode: 0755
- name: Ensure postgres backup directory
become: yes
file:
path: /srv/newsblur/docker/volumes/postgres/backups
state: directory
mode: 0755
- name: Ensure postgres data directory
become: yes
file:
path: /srv/newsblur/docker/volumes/postgres/data
state: directory
mode: 0755
recurse: yes
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
with_items:
- /srv/newsblur/docker/volumes/postgres/archive
- /srv/newsblur/docker/volumes/postgres/backups
- /srv/newsblur/docker/volumes/postgres/data
- name: Start postgres docker containers
become: yes
docker_container:
name: postgres
image: postgres:13
@ -48,6 +39,7 @@
- postgres
ports:
- 5432:5432
user: 1000:1001
volumes:
- /srv/newsblur/docker/volumes/postgres/data:/var/lib/postgresql/data
- /srv/newsblur/docker/volumes/postgres/archive:/var/lib/postgresql/archive
@ -62,6 +54,7 @@
become: yes
command: >
docker exec postgres chown -fR postgres.postgres /var/lib/postgresql
ignore_errors: yes
- name: Ensure newsblur role in postgres
become: yes

View file

@ -1,6 +1,10 @@
{
"service": {
{% if inventory_hostname.startswith('db-postgres3') %}
"name": "db-postgres",
{% else %}
"name": "db-postgres-secondary",
{% endif %}
"tags": [
"db"
],

View file

@ -10,25 +10,19 @@
notify: restart redis
register: updated_config
- name: Turning off secondary for redis by deleting redis_replica.conf
copy:
dest: /srv/newsblur/docker/redis/redis_replica.conf
content: ""
tags:
# - never
- replicaofnoone
- name: Create Redis docker volume with correct permissions
file:
path: /srv/newsblur/docker/volumes/redis
state: directory
recurse: yes
owner: "{{ ansible_effective_user_id|int }}"
group: "{{ ansible_effective_group_id|int }}"
- name: Setting Redis REPLICAOF NO ONE
shell: docker exec redis redis-cli REPLICAOF NO ONE
tags:
# - never
- replicaofnoone
- name: Start redis docker containers
become: yes
docker_container:
name: redis
image: redis:6.2.6
image: redis:6.2.7
state: started
command: /usr/local/etc/redis/redis_server.conf
container_default_behavior: no_defaults

View file

@ -1,6 +1,10 @@
{
"service": {
"name": "{{ inventory_hostname|regex_replace('\d+', '') }}",
{% if inventory_hostname in ["db-redis-user", "db-redis-story", "db-redis-session", "db-redis-pubsub"] %}
"name": "{{ inventory_hostname|regex_replace('\d+', '') }}",
{% else %}
"name": "{{ inventory_hostname|regex_replace('\d+', '') }}-staging",
{% endif %}
"id": "{{ inventory_hostname }}",
"tags": [
"redis"
@ -8,13 +12,13 @@
"port": 6379,
"checks": [{
"id": "{{inventory_hostname}}-ping",
{% if inventory_hostname == 'db-redis-story' %}
{% if inventory_hostname.startswith('db-redis-story') %}
"http": "http://{{ ansible_ssh_host }}:5579/db_check/redis_story?consul=1",
{% elif inventory_hostname == 'db-redis-user' %}
{% elif inventory_hostname.startswith('db-redis-user') %}
"http": "http://{{ ansible_ssh_host }}:5579/db_check/redis_user?consul=1",
{% elif inventory_hostname == 'db-redis-pubsub' %}
{% elif inventory_hostname.startswith('db-redis-pubsub') %}
"http": "http://{{ ansible_ssh_host }}:5579/db_check/redis_pubsub?consul=1",
{% elif inventory_hostname == 'db-redis-sessions' %}
{% elif inventory_hostname.startswith('db-redis-sessions') %}
"http": "http://{{ ansible_ssh_host }}:5579/db_check/redis_sessions?consul=1",
{% else %}
"http": "http://{{ ansible_ssh_host }}:5000/db_check/redis?consul=1",

View file

@ -1,13 +1,4 @@
---
- name: Ensure /srv directory exists
become: yes
file:
path: /srv
state: directory
mode: 0755
owner: nb
group: nb
# - name: Ensure nb /srv/newsblur owner
# become: yes
# file:

View file

@ -86,7 +86,6 @@
user: 1000:1001
volumes:
- /srv/newsblur:/srv/newsblur
- /etc/hosts:/etc/hosts
- name: Register web app in consul
tags: consul

View file

@ -2,7 +2,7 @@ from django.conf.urls import url
from apps.monitor.views import ( AppServers, AppTimes,
Classifiers, DbTimes, Errors, FeedCounts, Feeds, LoadTimes,
Stories, TasksCodes, TasksPipeline, TasksServers, TasksTimes,
Updates, Users
Updates, Users, FeedSizes
)
urlpatterns = [
url(r'^app-servers?$', AppServers.as_view(), name="app_servers"),
@ -11,6 +11,7 @@ urlpatterns = [
url(r'^db-times?$', DbTimes.as_view(), name="db_times"),
url(r'^errors?$', Errors.as_view(), name="errors"),
url(r'^feed-counts?$', FeedCounts.as_view(), name="feed_counts"),
url(r'^feed-sizes?$', FeedSizes.as_view(), name="feed_sizes"),
url(r'^feeds?$', Feeds.as_view(), name="feeds"),
url(r'^load-times?$', LoadTimes.as_view(), name="load_times"),
url(r'^stories?$', Stories.as_view(), name="stories"),

View file

@ -4,6 +4,7 @@ from apps.monitor.views.newsblur_classifiers import Classifiers
from apps.monitor.views.newsblur_dbtimes import DbTimes
from apps.monitor.views.newsblur_errors import Errors
from apps.monitor.views.newsblur_feed_counts import FeedCounts
from apps.monitor.views.newsblur_feed_sizes import FeedSizes
from apps.monitor.views.newsblur_feeds import Feeds
from apps.monitor.views.newsblur_loadtimes import LoadTimes
from apps.monitor.views.newsblur_stories import Stories

View file

@ -0,0 +1,42 @@
from django.conf import settings
from django.shortcuts import render
from django.views import View
from django.db.models import Sum
import redis
from apps.rss_feeds.models import Feed, DuplicateFeed
from apps.push.models import PushSubscription
from apps.statistics.models import MStatistics
class FeedSizes(View):
def get(self, request):
fs_size_bytes = MStatistics.get('munin:fs_size_bytes')
if not fs_size_bytes:
fs_size_bytes = Feed.objects.aggregate(Sum('fs_size_bytes'))['fs_size_bytes__sum']
MStatistics.set('munin:fs_size_bytes', fs_size_bytes, 60*60*12)
archive_users_size_bytes = MStatistics.get('munin:archive_users_size_bytes')
if not archive_users_size_bytes:
archive_users_size_bytes = Feed.objects.filter(archive_subscribers__gte=1).aggregate(Sum('fs_size_bytes'))['fs_size_bytes__sum']
MStatistics.set('munin:archive_users_size_bytes', archive_users_size_bytes, 60*60*12)
data = {
'fs_size_bytes': fs_size_bytes,
'archive_users_size_bytes': archive_users_size_bytes,
}
chart_name = "feed_sizes"
chart_type = "counter"
formatted_data = {}
for k, v in data.items():
formatted_data[k] = f'{chart_name}{{category="{k}"}} {v}'
context = {
"data": formatted_data,
"chart_name": chart_name,
"chart_type": chart_type,
}
return render(request, 'monitor/prometheus_data.html', context, content_type="text/plain")

View file

@ -9,14 +9,18 @@ from apps.profile.models import Profile, RNewUserQueue
class Users(View):
def get(self, request):
last_year = datetime.datetime.utcnow() - datetime.timedelta(days=365)
last_month = datetime.datetime.utcnow() - datetime.timedelta(days=30)
last_day = datetime.datetime.utcnow() - datetime.timedelta(minutes=60*24)
data = {
'all': User.objects.count(),
'yearly': Profile.objects.filter(last_seen_on__gte=last_year).count(),
'monthly': Profile.objects.filter(last_seen_on__gte=last_month).count(),
'daily': Profile.objects.filter(last_seen_on__gte=last_day).count(),
'premium': Profile.objects.filter(is_premium=True).count(),
'archive': Profile.objects.filter(is_archive=True).count(),
'pro': Profile.objects.filter(is_pro=True).count(),
'queued': RNewUserQueue.user_count(),
}
chart_name = "users"

View file

@ -67,9 +67,7 @@ class RedisGrafanaMetric(View):
return render(request, 'monitor/prometheus_data.html', context, content_type="text/plain")
class RedisActiveConnection(RedisGrafanaMetric):
def get_context(self):
def get_fields(self):
return (
('connected_clients', dict(

View file

@ -148,7 +148,7 @@ class EmailNewsletter:
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user, user.email)])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=True)
msg.send()
logging.user(user, "~BB~FM~SBSending first newsletter email to: %s" % user.email)

View file

@ -327,8 +327,7 @@ def api_unread_story(request, trigger_slug=None):
found_feed_ids = [feed_id]
found_trained_feed_ids = [feed_id] if usersub.is_trained else []
stories = usersub.get_stories(order="newest", read_filter="unread",
offset=0, limit=limit,
default_cutoff_date=user.profile.unread_cutoff)
offset=0, limit=limit)
else:
folder_title = feed_or_folder
if folder_title == "Top Level":

View file

@ -10,6 +10,8 @@ from apps.social.models import MSocialProfile
PLANS = [
("newsblur-premium-36", mark_safe("$36 / year <span class='NB-small'>($3/month)</span>")),
("newsblur-premium-archive", mark_safe("$99 / year <span class='NB-small'>(~$8/month)</span>")),
("newsblur-premium-pro", mark_safe("$299 / year <span class='NB-small'>(~$25/month)</span>")),
]
class HorizRadioRenderer(forms.RadioSelect):

View file

@ -185,7 +185,7 @@ class SQLLogToConsoleMiddleware:
and not getattr(settings, 'DEBUG_QUERIES_SUMMARY_ONLY', False)
):
t = Template(
"{% for sql in sqllog %}{% if not forloop.first %} {% endif %}[{{forloop.counter}}] {{sql.color}}{{sql.time}}~SNs~FW: {{sql.sql|safe}}{% if not forloop.last %}\n{% endif %}{% endfor %}"
"{% for sql in sqllog %}{% if not forloop.first %} {% endif %}[{{forloop.counter}}] {{sql.color}}{{sql.time}}~SN~FW: {{sql.sql|safe}}{% if not forloop.last %}\n{% endif %}{% endfor %}"
)
logging.debug(
t.render(

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,18 @@
# Generated by Django 3.1.10 on 2022-01-11 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile', '0004_auto_20220110_2106'),
]
operations = [
migrations.AddField(
model_name='profile',
name='is_archive',
field=models.BooleanField(blank=True, default=False, null=True),
),
]

View file

@ -0,0 +1,18 @@
# Generated by Django 3.1.10 on 2022-01-13 21:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile', '0005_profile_is_archive'),
]
operations = [
migrations.AddField(
model_name='profile',
name='days_of_unread',
field=models.IntegerField(default=30, blank=True, null=True),
),
]

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,18 @@
# Generated by Django 3.1.10 on 2022-02-07 19:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile', '0007_auto_20220125_2108'),
]
operations = [
migrations.AddField(
model_name='profile',
name='paypal_sub_id',
field=models.CharField(blank=True, max_length=24, null=True),
),
]

View file

@ -0,0 +1,24 @@
# Generated by Django 3.1.10 on 2022-02-08 23:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('profile', '0008_profile_paypal_sub_id'),
]
operations = [
migrations.CreateModel(
name='PaypalIds',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paypal_sub_id', models.CharField(blank=True, max_length=24, null=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='paypal_ids', to=settings.AUTH_USER_MODEL)),
],
),
]

View file

@ -0,0 +1,18 @@
# Generated by Django 3.1.10 on 2022-02-14 20:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile', '0009_paypalids'),
]
operations = [
migrations.AddField(
model_name='profile',
name='active_provider',
field=models.CharField(blank=True, max_length=24, null=True),
),
]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -15,6 +15,33 @@ def EmailNewPremium(user_id):
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_premium_email()
@app.task()
def FetchArchiveFeedsForUser(user_id):
# subs = UserSubscription.objects.filter(user=user_id)
# user_profile = Profile.objects.get(user__pk=user_id)
# logging.user(user_profile.user, f"~FCBeginning archive feed fetches for ~SB~FG{subs.count()} feeds~SN...")
UserSubscription.fetch_archive_feeds_for_user(user_id)
@app.task()
def FetchArchiveFeedsChunk(user_id, feed_ids):
# logging.debug(" ---> Fetching archive stories: %s for %s" % (feed_ids, user_id))
UserSubscription.fetch_archive_feeds_chunk(user_id, feed_ids)
@app.task()
def FinishFetchArchiveFeeds(results, user_id, start_time, starting_story_count):
# logging.debug(" ---> Fetching archive stories finished for %s" % (user_id))
ending_story_count, pre_archive_count = UserSubscription.finish_fetch_archive_feeds(user_id, start_time, starting_story_count)
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_premium_archive_email(ending_story_count, pre_archive_count)
@app.task(name="email-new-premium-pro")
def EmailNewPremiumPro(user_id):
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_premium_pro_email()
@app.task(name="premium-expire")
def PremiumExpire(**kwargs):
# Get expired but grace period users

View file

@ -11,10 +11,17 @@ urlpatterns = [
url(r'^set_collapsed_folders/?', views.set_collapsed_folders),
url(r'^paypal_form/?', views.paypal_form),
url(r'^paypal_return/?', views.paypal_return, name='paypal-return'),
url(r'^paypal_archive_return/?', views.paypal_archive_return, name='paypal-archive-return'),
url(r'^stripe_return/?', views.paypal_return, name='stripe-return'),
url(r'^switch_stripe_subscription/?', views.switch_stripe_subscription, name='switch-stripe-subscription'),
url(r'^switch_paypal_subscription/?', views.switch_paypal_subscription, name='switch-paypal-subscription'),
url(r'^is_premium/?', views.profile_is_premium, name='profile-is-premium'),
url(r'^paypal_webhooks/?', include('paypal.standard.ipn.urls'), name='paypal-webhooks'),
url(r'^paypal_ipn/?', include('paypal.standard.ipn.urls'), name='paypal-ipn'),
url(r'^is_premium_archive/?', views.profile_is_premium_archive, name='profile-is-premium-archive'),
# url(r'^paypal_ipn/?', include('paypal.standard.ipn.urls'), name='paypal-ipn'),
url(r'^paypal_ipn/?', views.paypal_ipn, name='paypal-ipn'),
url(r'^paypal_webhooks/?', views.paypal_webhooks, name='paypal-webhooks'),
url(r'^stripe_form/?', views.stripe_form, name='stripe-form'),
url(r'^stripe_checkout/?', views.stripe_checkout, name='stripe-checkout'),
url(r'^activities/?', views.load_activities, name='profile-activities'),
url(r'^payment_history/?', views.payment_history, name='profile-payment-history'),
url(r'^cancel_premium/?', views.cancel_premium, name='profile-cancel-premium'),

View file

@ -1,6 +1,8 @@
import re
import stripe
import requests
import datetime
import dateutil
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_protect, csrf_exempt
@ -15,7 +17,7 @@ from django.urls import reverse
from django.shortcuts import render
from django.core.mail import mail_admins
from django.conf import settings
from apps.profile.models import Profile, PaymentHistory, RNewUserQueue, MRedeemedCode, MGiftCode
from apps.profile.models import Profile, PaymentHistory, RNewUserQueue, MRedeemedCode, MGiftCode, PaypalIds
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory
from apps.profile.forms import StripePlusPaymentForm, PLANS, DeleteAccountForm
from apps.profile.forms import ForgotPasswordForm, ForgotPasswordReturnForm, AccountSettingsForm
@ -25,14 +27,17 @@ from apps.rss_feeds.models import MStarredStory, MStarredStoryCounts
from apps.social.models import MSocialServices, MActivity, MSocialProfile
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
from utils import json_functions as json
import json as python_json
from utils.user_functions import ajax_login_required
from utils.view_functions import render_to, is_true
from utils.user_functions import get_user
from utils import log as logging
from vendor.paypalapi.exceptions import PayPalAPIResponseError
from paypal.standard.forms import PayPalPaymentsForm
from paypal.standard.ipn.views import ipn as paypal_standard_ipn
SINGLE_FIELD_PREFS = ('timezone','feed_pane_size','hide_mobile','send_emails',
INTEGER_FIELD_PREFS = ('feed_pane_size', 'days_of_unread')
SINGLE_FIELD_PREFS = ('timezone','hide_mobile','send_emails',
'hide_getting_started', 'has_setup_feeds', 'has_found_friends',
'has_trained_intelligence')
SPECIAL_PREFERENCES = ('old_password', 'new_password', 'autofollow_friends', 'dashboard_date',)
@ -50,6 +55,10 @@ def set_preference(request):
if preference_value in ['true','false']: preference_value = True if preference_value == 'true' else False
if preference_name in SINGLE_FIELD_PREFS:
setattr(request.user.profile, preference_name, preference_value)
elif preference_name in INTEGER_FIELD_PREFS:
setattr(request.user.profile, preference_name, int(preference_value))
if preference_name in preferences:
del preferences[preference_name]
elif preference_name in SPECIAL_PREFERENCES:
if preference_name == 'autofollow_friends':
social_services = MSocialServices.get_user(request.user.pk)
@ -185,6 +194,7 @@ def set_view_setting(request):
feed_order_setting = request.POST.get('feed_order_setting')
feed_read_filter_setting = request.POST.get('feed_read_filter_setting')
feed_layout_setting = request.POST.get('feed_layout_setting')
feed_dashboard_count_setting = request.POST.get('feed_dashboard_count_setting')
view_settings = json.decode(request.user.profile.view_settings)
setting = view_settings.get(feed_id, {})
@ -192,6 +202,7 @@ def set_view_setting(request):
if feed_view_setting: setting['v'] = feed_view_setting
if feed_order_setting: setting['o'] = feed_order_setting
if feed_read_filter_setting: setting['r'] = feed_read_filter_setting
if feed_dashboard_count_setting: setting['d'] = feed_dashboard_count_setting
if feed_layout_setting: setting['l'] = feed_layout_setting
view_settings[feed_id] = setting
@ -259,7 +270,58 @@ def set_collapsed_folders(request):
response = dict(code=code)
return response
@ajax_login_required
def paypal_ipn(request):
try:
return paypal_standard_ipn(request)
except AssertionError:
# Paypal may have sent webhooks to ipn, so redirect
logging.user(request, f" ---> Paypal IPN to webhooks redirect: {request.body}")
return paypal_webhooks(request)
def paypal_webhooks(request):
try:
data = json.decode(request.body)
except python_json.decoder.JSONDecodeError:
# Kick it over to paypal ipn
return paypal_standard_ipn(request)
logging.user(request, f" ---> Paypal webhooks {data.get('event_type', '<no event_type>')} data: {data}")
if data['event_type'] == "BILLING.SUBSCRIPTION.CREATED":
# Don't start a subscription but save it in case the payment comes before the subscription activation
user = User.objects.get(pk=int(data['resource']['custom_id']))
user.profile.store_paypal_sub_id(data['resource']['id'], skip_save_primary=True)
elif data['event_type'] in ["BILLING.SUBSCRIPTION.ACTIVATED", "BILLING.SUBSCRIPTION.UPDATED"]:
user = User.objects.get(pk=int(data['resource']['custom_id']))
user.profile.store_paypal_sub_id(data['resource']['id'])
# plan_id = data['resource']['plan_id']
# if plan_id == Profile.plan_to_paypal_plan_id('premium'):
# user.profile.activate_premium()
# elif plan_id == Profile.plan_to_paypal_plan_id('archive'):
# user.profile.activate_archive()
# elif plan_id == Profile.plan_to_paypal_plan_id('pro'):
# user.profile.activate_pro()
user.profile.cancel_premium_stripe()
user.profile.setup_premium_history()
if data['event_type'] == "BILLING.SUBSCRIPTION.ACTIVATED":
user.profile.cancel_and_prorate_existing_paypal_subscriptions(data)
elif data['event_type'] == "PAYMENT.SALE.COMPLETED":
user = User.objects.get(pk=int(data['resource']['custom']))
user.profile.setup_premium_history()
elif data['event_type'] == "PAYMENT.CAPTURE.REFUNDED":
user = User.objects.get(pk=int(data['resource']['custom_id']))
user.profile.setup_premium_history()
elif data['event_type'] in ["BILLING.SUBSCRIPTION.CANCELLED", "BILLING.SUBSCRIPTION.SUSPENDED"]:
custom_id = data['resource'].get('custom_id', None)
if custom_id:
user = User.objects.get(pk=int(custom_id))
else:
paypal_id = PaypalIds.objects.get(paypal_sub_id=data['resource']['id'])
user = paypal_id.user
user.profile.setup_premium_history()
return HttpResponse("OK")
def paypal_form(request):
domain = Site.objects.get_current().domain
if settings.DEBUG:
@ -289,11 +351,20 @@ def paypal_form(request):
# Output the button.
return HttpResponse(form.render(), content_type='text/html')
@login_required
def paypal_return(request):
return render(request, 'reader/paypal_return.xhtml', {
'user_profile': request.user.profile,
})
@login_required
def paypal_archive_return(request):
return render(request, 'reader/paypal_archive_return.xhtml', {
'user_profile': request.user.profile,
})
@login_required
def activate_premium(request):
return HttpResponseRedirect(reverse('index'))
@ -304,7 +375,6 @@ def profile_is_premium(request):
# Check tries
code = 0
retries = int(request.GET['retries'])
profile = Profile.objects.get(user=request.user)
subs = UserSubscription.objects.filter(user=request.user)
total_subs = subs.count()
@ -315,12 +385,42 @@ def profile_is_premium(request):
if not request.user.profile.is_premium:
subject = "Premium activation failed: %s (%s/%s)" % (request.user, activated_subs, total_subs)
message = """User: %s (%s) -- Email: %s""" % (request.user.username, request.user.pk, request.user.email)
mail_admins(subject, message, fail_silently=True)
request.user.profile.is_premium = True
request.user.profile.save()
mail_admins(subject, message)
request.user.profile.activate_premium()
profile = Profile.objects.get(user=request.user)
return {
'is_premium': profile.is_premium,
'is_premium_archive': profile.is_archive,
'code': code,
'activated_subs': activated_subs,
'total_subs': total_subs,
}
@ajax_login_required
@json.json_view
def profile_is_premium_archive(request):
# Check tries
code = 0
retries = int(request.GET['retries'])
subs = UserSubscription.objects.filter(user=request.user)
total_subs = subs.count()
activated_subs = subs.filter(feed__archive_subscribers__gte=1).count()
if retries >= 30:
code = -1
if not request.user.profile.is_premium_archive:
subject = "Premium archive activation failed: %s (%s/%s)" % (request.user, activated_subs, total_subs)
message = """User: %s (%s) -- Email: %s""" % (request.user.username, request.user.pk, request.user.email)
mail_admins(subject, message)
request.user.profile.activate_archive()
profile = Profile.objects.get(user=request.user)
return {
'is_premium': profile.is_premium,
'is_premium_archive': profile.is_archive,
'code': code,
'activated_subs': activated_subs,
'total_subs': total_subs,
@ -340,7 +440,7 @@ def save_ios_receipt(request):
logging.user(request, "~BM~FBSending iOS Receipt email: %s %s" % (product_identifier, transaction_identifier))
subject = "iOS Premium: %s (%s)" % (request.user.profile, product_identifier)
message = """User: %s (%s) -- Email: %s, product: %s, txn: %s, receipt: %s""" % (request.user.username, request.user.pk, request.user.email, product_identifier, transaction_identifier, receipt)
mail_admins(subject, message, fail_silently=True)
mail_admins(subject, message)
else:
logging.user(request, "~BM~FBNot sending iOS Receipt email, already paid: %s %s" % (product_identifier, transaction_identifier))
@ -360,7 +460,7 @@ def save_android_receipt(request):
logging.user(request, "~BM~FBSending Android Receipt email: %s %s" % (product_id, order_id))
subject = "Android Premium: %s (%s)" % (request.user.profile, product_id)
message = """User: %s (%s) -- Email: %s, product: %s, order: %s""" % (request.user.username, request.user.pk, request.user.email, product_id, order_id)
mail_admins(subject, message, fail_silently=True)
mail_admins(subject, message)
else:
logging.user(request, "~BM~FBNot sending Android Receipt email, already paid: %s %s" % (product_id, order_id))
@ -473,6 +573,88 @@ def stripe_form(request):
}
)
@login_required
def switch_stripe_subscription(request):
plan = request.POST['plan']
if plan == "change_stripe":
return stripe_checkout(request)
elif plan == "change_paypal":
paypal_url = request.user.profile.paypal_change_billing_details_url()
return HttpResponseRedirect(paypal_url)
switch_successful = request.user.profile.switch_stripe_subscription(plan)
logging.user(request, "~FCSwitching subscription to ~SB%s~SN~FC (%s)" %(
plan,
'~FGsucceeded~FC' if switch_successful else '~FRfailed~FC'
))
if switch_successful:
return HttpResponseRedirect(reverse('stripe-return'))
return stripe_checkout(request)
def switch_paypal_subscription(request):
plan = request.POST['plan']
if plan == "change_stripe":
return stripe_checkout(request)
elif plan == "change_paypal":
paypal_url = request.user.profile.paypal_change_billing_details_url()
return HttpResponseRedirect(paypal_url)
approve_url = request.user.profile.switch_paypal_subscription_approval_url(plan)
logging.user(request, "~FCSwitching subscription to ~SB%s~SN~FC (%s)" %(
plan,
'~FGsucceeded~FC' if approve_url else '~FRfailed~FC'
))
if approve_url:
return HttpResponseRedirect(approve_url)
paypal_return = reverse('paypal-return')
if plan == "archive":
paypal_return = reverse('paypal-archive-return')
return HttpResponseRedirect(paypal_return)
@login_required
def stripe_checkout(request):
stripe.api_key = settings.STRIPE_SECRET
domain = Site.objects.get_current().domain
plan = request.POST['plan']
if plan == "change_stripe":
checkout_session = stripe.billing_portal.Session.create(
customer=request.user.profile.stripe_id,
return_url="http://%s%s?next=payments" % (domain, reverse('index')),
)
return HttpResponseRedirect(checkout_session.url, status=303)
price = Profile.plan_to_stripe_price(plan)
session_dict = {
"line_items": [
{
'price': price,
'quantity': 1,
},
],
"mode": 'subscription',
"metadata": {"newsblur_user_id": request.user.pk},
"success_url": "http://%s%s" % (domain, reverse('stripe-return')),
"cancel_url": "http://%s%s" % (domain, reverse('index')),
}
if request.user.profile.stripe_id:
session_dict['customer'] = request.user.profile.stripe_id
else:
session_dict["customer_email"] = request.user.email
checkout_session = stripe.checkout.Session.create(**session_dict)
logging.user(request, "~BM~FBLoading Stripe checkout")
return HttpResponseRedirect(checkout_session.url, status=303)
@render_to('reader/activities_module.xhtml')
def load_activities(request):
user = get_user(request)
@ -519,11 +701,37 @@ def payment_history(request):
}
}
next_invoice = None
stripe_customer = user.profile.stripe_customer()
paypal_api = user.profile.paypal_api()
if stripe_customer:
try:
invoice = stripe.Invoice.upcoming(customer=stripe_customer.id)
for lines in invoice.lines.data:
next_invoice = dict(payment_date=datetime.datetime.fromtimestamp(lines.period.start),
payment_amount=invoice.amount_due/100.0,
payment_provider="(scheduled)",
scheduled=True)
break
except stripe.error.InvalidRequestError:
pass
if paypal_api and not next_invoice and user.profile.premium_renewal and len(history):
next_invoice = dict(payment_date=history[0].payment_date+dateutil.relativedelta.relativedelta(years=1),
payment_amount=history[0].payment_amount,
payment_provider="(scheduled)",
scheduled=True)
return {
'is_premium': user.profile.is_premium,
'is_archive': user.profile.is_archive,
'is_pro': user.profile.is_pro,
'premium_expire': user.profile.premium_expire,
'premium_renewal': user.profile.premium_renewal,
'active_provider': user.profile.active_provider,
'payments': history,
'statistics': statistics,
'next_invoice': next_invoice,
}
@ajax_login_required
@ -541,15 +749,16 @@ def cancel_premium(request):
def refund_premium(request):
user_id = request.POST.get('user_id')
partial = request.POST.get('partial', False)
provider = request.POST.get('provider', None)
user = User.objects.get(pk=user_id)
try:
refunded = user.profile.refund_premium(partial=partial)
refunded = user.profile.refund_premium(partial=partial, provider=provider)
except stripe.error.InvalidRequestError as e:
refunded = e
except PayPalAPIResponseError as e:
refunded = e
return {'code': 1 if refunded else -1, 'refunded': refunded}
return {'code': 1 if type(refunded) == int else -1, 'refunded': refunded}
@staff_member_required
@ajax_login_required

View file

@ -61,7 +61,7 @@ def push_callback(request, push_id):
# Don't give fat ping, just fetch.
# subscription.feed.queue_pushed_feed_xml(request.body)
if subscription.feed.active_premium_subscribers >= 1:
if subscription.feed.active_subscribers >= 1:
subscription.feed.queue_pushed_feed_xml("Fetch me", latest_push_date_delta=latest_push_date_delta)
MFetchHistory.add(feed_id=subscription.feed_id,
fetch_type='push')

View file

@ -3,6 +3,8 @@ import time
import re
import redis
import pymongo
import celery
import mongoengine as mongo
from operator import itemgetter
from pprint import pprint
from utils import log as logging
@ -109,11 +111,13 @@ class UserSubscription(models.Model):
@classmethod
def story_hashes(cls, user_id, feed_ids=None, usersubs=None, read_filter="unread", order="newest",
include_timestamps=False, group_by_feed=True, cutoff_date=None,
across_all_feeds=True):
include_timestamps=False, group_by_feed=False, cutoff_date=None,
across_all_feeds=True, store_stories_key=None, offset=0, limit=500):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
pipeline = r.pipeline()
user = User.objects.get(pk=user_id)
story_hashes = {} if group_by_feed else []
is_archive = user.profile.is_archive
if not feed_ids and not across_all_feeds:
return story_hashes
@ -123,17 +127,31 @@ class UserSubscription(models.Model):
feed_ids = [sub.feed_id for sub in usersubs]
if not feed_ids:
return story_hashes
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
cutoff_date = user.profile.unread_cutoff
feed_counter = 0
unread_ranked_stories_keys = []
expire_unread_stories_key = False
after_unread_pipeline = r.pipeline()
read_dates = dict()
manual_unread_pipeline = r.pipeline()
manual_unread_feed_oldest_date = dict()
oldest_manual_unread = None
for us in usersubs:
read_dates[us.feed_id] = int(max(us.mark_read_date, cutoff_date).strftime('%s'))
for feed_id_group in chunks(feed_ids, 20):
user_unread_stories_feed_key = f"uU:{user_id}:{us.feed_id}"
manual_unread_pipeline.exists(user_unread_stories_feed_key)
results = manual_unread_pipeline.execute()
for i, us in enumerate(usersubs):
if results[i]:
user_unread_stories_feed_key = f"uU:{user_id}:{us.feed_id}"
oldest_manual_unread = r.zrevrange(user_unread_stories_feed_key, -1, -1, withscores=True)
manual_unread_feed_oldest_date[us.feed_id] = int(oldest_manual_unread[0][1])
for feed_id_group in chunks(feed_ids, 10):
pipeline = r.pipeline()
for feed_id in feed_id_group:
stories_key = 'F:%s' % feed_id
@ -141,12 +159,13 @@ class UserSubscription(models.Model):
read_stories_key = 'RS:%s:%s' % (user_id, feed_id)
unread_stories_key = 'U:%s:%s' % (user_id, feed_id)
unread_ranked_stories_key = 'zU:%s:%s' % (user_id, feed_id)
expire_unread_stories_key = False
max_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
min_score = read_dates[feed_id] + 1
# TODO: Remove above +1 and switch below to AGGREGATE='MAX', which may obviate the need
# for the U:%s keys and just work with the zF: & RS: directly into zU:
pipeline.sdiffstore(unread_stories_key, stories_key, read_stories_key)
expire_unread_stories_key = True
else:
@ -160,113 +179,73 @@ class UserSubscription(models.Model):
min_score, max_score = max_score, min_score
pipeline.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
byscorefunc(unread_ranked_stories_key, min_score, max_score, withscores=include_timestamps)
pipeline.delete(unread_ranked_stories_key)
if order == 'oldest':
pipeline.zremrangebyscore(unread_ranked_stories_key, 0, min_score-1)
pipeline.zremrangebyscore(unread_ranked_stories_key, max_score+1, 2*max_score)
else:
pipeline.zremrangebyscore(unread_ranked_stories_key, 0, max_score-1)
pipeline.zremrangebyscore(unread_ranked_stories_key, min_score+1, 2*min_score)
# If archive premium user has manually marked an older story as unread
if is_archive and feed_id in manual_unread_feed_oldest_date:
if order == 'oldest':
min_score = manual_unread_feed_oldest_date[feed_id]
else:
max_score = manual_unread_feed_oldest_date[feed_id]
pipeline.zunionstore(unread_ranked_stories_key, [unread_ranked_stories_key, user_unread_stories_feed_key], aggregate="MAX")
if settings.DEBUG and False:
debug_stories = r.zrevrange(unread_ranked_stories_key, 0, -1, withscores=True)
print((" ---> Story hashes (%s/%s - %s/%s) %s stories: %s" % (
min_score, datetime.datetime.fromtimestamp(min_score).strftime('%Y-%m-%d %T'),
max_score, datetime.datetime.fromtimestamp(max_score).strftime('%Y-%m-%d %T'),
len(debug_stories),
debug_stories)))
if not store_stories_key:
byscorefunc(unread_ranked_stories_key, min_score, max_score, withscores=include_timestamps, start=offset, num=limit)
unread_ranked_stories_keys.append(unread_ranked_stories_key)
after_unread_pipeline.delete(unread_ranked_stories_key)
if expire_unread_stories_key:
pipeline.delete(unread_stories_key)
after_unread_pipeline.delete(unread_stories_key)
results = pipeline.execute()
for hashes in results:
if not isinstance(hashes, list): continue
if group_by_feed:
story_hashes[feed_ids[feed_counter]] = hashes
feed_counter += 1
else:
story_hashes.extend(hashes)
return story_hashes
def get_stories(self, offset=0, limit=6, order='newest', read_filter='all', withscores=False,
hashes_only=False, cutoff_date=None, default_cutoff_date=None):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
renc = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL_ENCODED)
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
ignore_user_stories = False
stories_key = 'F:%s' % (self.feed_id)
read_stories_key = 'RS:%s:%s' % (self.user_id, self.feed_id)
unread_stories_key = 'U:%s:%s' % (self.user_id, self.feed_id)
unread_ranked_stories_key = 'z%sU:%s:%s' % ('h' if hashes_only else '',
self.user_id, self.feed_id)
if withscores or not offset or not rt.exists(unread_ranked_stories_key):
rt.delete(unread_ranked_stories_key)
if not r.exists(stories_key):
# print " ---> No stories on feed: %s" % self
return []
elif read_filter == 'all' or not r.exists(read_stories_key):
ignore_user_stories = True
unread_stories_key = stories_key
else:
r.sdiffstore(unread_stories_key, stories_key, read_stories_key)
sorted_stories_key = 'zF:%s' % (self.feed_id)
r.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
if not ignore_user_stories:
r.delete(unread_stories_key)
dump = renc.dump(unread_ranked_stories_key)
if dump:
pipeline = rt.pipeline()
pipeline.delete(unread_ranked_stories_key)
pipeline.restore(unread_ranked_stories_key, 1*60*60*1000, dump)
pipeline.execute()
r.delete(unread_ranked_stories_key)
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
if read_filter == "unread":
cutoff_date = max(cutoff_date, self.mark_read_date)
elif default_cutoff_date:
cutoff_date = default_cutoff_date
if not store_stories_key:
for hashes in results:
if not isinstance(hashes, list): continue
if group_by_feed:
story_hashes[feed_ids[feed_counter]] = hashes
feed_counter += 1
else:
story_hashes.extend(hashes)
if order == 'oldest':
byscorefunc = rt.zrangebyscore
if read_filter == 'unread':
min_score = int(time.mktime(cutoff_date.timetuple())) + 1
else:
min_score = int(time.mktime(cutoff_date.timetuple())) - 1000
max_score = current_time
else:
byscorefunc = rt.zrevrangebyscore
min_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
max_score = int(time.mktime(cutoff_date.timetuple())) + 1
else:
max_score = 0
if settings.DEBUG and False:
debug_stories = rt.zrevrange(unread_ranked_stories_key, 0, -1, withscores=True)
print((" ---> Unread all stories (%s - %s) %s stories: %s" % (
min_score,
max_score,
len(debug_stories),
debug_stories)))
story_ids = byscorefunc(unread_ranked_stories_key, min_score,
max_score, start=offset, num=500,
withscores=withscores)[:limit]
if withscores:
story_ids = [(s[0], int(s[1])) for s in story_ids]
if store_stories_key:
r.zunionstore(store_stories_key, unread_ranked_stories_keys, aggregate="MAX")
if withscores or hashes_only:
return story_ids
elif story_ids:
story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
mstories = MStory.objects(story_hash__in=story_ids).order_by(story_date_order)
stories = Feed.format_stories(mstories)
return stories
else:
return []
after_unread_pipeline.execute()
if not store_stories_key:
return story_hashes
def get_stories(self, offset=0, limit=6, order='newest', read_filter='all', cutoff_date=None):
story_hashes = UserSubscription.story_hashes(self.user.pk, feed_ids=[self.feed.pk],
order=order, read_filter=read_filter,
offset=offset, limit=limit,
cutoff_date=cutoff_date)
story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
stories = Feed.format_stories(mstories)
return stories
@classmethod
def feed_stories(cls, user_id, feed_ids=None, offset=0, limit=6,
order='newest', read_filter='all', usersubs=None, cutoff_date=None,
all_feed_ids=None, cache_prefix=""):
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
across_all_feeds = False
if order == 'oldest':
@ -299,34 +278,24 @@ class UserSubscription(models.Model):
rt.delete(ranked_stories_keys)
rt.delete(unread_ranked_stories_keys)
story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter=read_filter, order=order,
include_timestamps=True,
group_by_feed=False,
include_timestamps=False,
usersubs=usersubs,
cutoff_date=cutoff_date,
across_all_feeds=across_all_feeds)
if not story_hashes:
return [], []
pipeline = rt.pipeline()
for story_hash_group in chunks(story_hashes, 100):
pipeline.zadd(ranked_stories_keys, dict(story_hash_group))
pipeline.execute()
across_all_feeds=across_all_feeds,
store_stories_key=ranked_stories_keys)
story_hashes = range_func(ranked_stories_keys, offset, limit)
if read_filter == "unread":
unread_feed_story_hashes = story_hashes
rt.zunionstore(unread_ranked_stories_keys, [ranked_stories_keys])
else:
unread_story_hashes = cls.story_hashes(user_id, feed_ids=feed_ids,
cls.story_hashes(user_id, feed_ids=feed_ids,
read_filter="unread", order=order,
include_timestamps=True,
group_by_feed=False,
cutoff_date=cutoff_date)
if unread_story_hashes:
for unread_story_hash_group in chunks(unread_story_hashes, 100):
rt.zadd(unread_ranked_stories_keys, dict(unread_story_hash_group))
cutoff_date=cutoff_date,
store_stories_key=unread_ranked_stories_keys)
unread_feed_story_hashes = range_func(unread_ranked_stories_keys, offset, limit)
rt.expire(ranked_stories_keys, 60*60)
@ -334,6 +303,15 @@ class UserSubscription(models.Model):
return story_hashes, unread_feed_story_hashes
def oldest_manual_unread_story_date(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
user_unread_stories_feed_key = f"uU:{self.user_id}:{self.feed_id}"
oldest_manual_unread = r.zrevrange(user_unread_stories_feed_key, -1, -1, withscores=True)
return oldest_manual_unread
@classmethod
def truncate_river(cls, user_id, feed_ids, read_filter, cache_prefix=""):
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
@ -501,7 +479,96 @@ class UserSubscription(models.Model):
if stale_feeds:
stale_feeds = list(set([f.feed_id for f in stale_feeds]))
cls.queue_new_feeds(user, new_feeds=stale_feeds)
@classmethod
def schedule_fetch_archive_feeds_for_user(cls, user_id):
from apps.profile.tasks import FetchArchiveFeedsForUser
FetchArchiveFeedsForUser.apply_async(kwargs=dict(user_id=user_id),
queue='search_indexer',
time_limit=settings.MAX_SECONDS_COMPLETE_ARCHIVE_FETCH)
# Should be run as a background task
@classmethod
def fetch_archive_feeds_for_user(cls, user_id):
from apps.profile.tasks import FetchArchiveFeedsChunk, FinishFetchArchiveFeeds
start_time = time.time()
user = User.objects.get(pk=user_id)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(user.username, 'fetch_archive:start')
subscriptions = UserSubscription.objects.filter(user=user).only('feed')
total = subscriptions.count()
feed_ids = []
starting_story_count = 0
for sub in subscriptions:
try:
feed_ids.append(sub.feed.pk)
except Feed.DoesNotExist:
continue
starting_story_count += MStory.objects(story_feed_id=sub.feed.pk).count()
feed_id_chunks = [c for c in chunks(feed_ids, 1)]
logging.user(user, "~FCFetching archive stories from ~SB%s feeds~SN in %s chunks..." %
(total, len(feed_id_chunks)))
search_chunks = [FetchArchiveFeedsChunk.s(feed_ids=feed_id_chunk,
user_id=user_id
).set(queue='search_indexer')
.set(time_limit=settings.MAX_SECONDS_ARCHIVE_FETCH_SINGLE_FEED,
soft_time_limit=settings.MAX_SECONDS_ARCHIVE_FETCH_SINGLE_FEED-30)
for feed_id_chunk in feed_id_chunks]
callback = FinishFetchArchiveFeeds.s(user_id=user_id,
start_time=start_time,
starting_story_count=starting_story_count).set(queue='search_indexer')
celery.chord(search_chunks)(callback)
@classmethod
def fetch_archive_feeds_chunk(cls, user_id, feed_ids):
from apps.rss_feeds.models import Feed
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
user = User.objects.get(pk=user_id)
logging.user(user, "~FCFetching archive stories from %s feeds..." % len(feed_ids))
for feed_id in feed_ids:
feed = Feed.get_by_id(feed_id)
if not feed: continue
feed.fill_out_archive_stories()
r.publish(user.username, 'fetch_archive:feeds:%s' %
','.join([str(f) for f in feed_ids]))
@classmethod
def finish_fetch_archive_feeds(cls, user_id, start_time, starting_story_count):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
user = User.objects.get(pk=user_id)
subscriptions = UserSubscription.objects.filter(user=user).only('feed')
total = subscriptions.count()
duration = time.time() - start_time
ending_story_count = 0
pre_archive_count = 0
for sub in subscriptions:
try:
ending_story_count += MStory.objects(story_feed_id=sub.feed.pk).count()
pre_archive_count += Feed.get_by_id(sub.feed.pk).number_of_stories_to_store(pre_archive=True)
except Feed.DoesNotExist:
continue
new_story_count = ending_story_count - starting_story_count
logging.user(user, f"~FCFinished archive feed fetches for ~SB~FG{subscriptions.count()} feeds~FC~SN: ~FG~SB{new_story_count:,} new~SB~FC, ~FG{ending_story_count:,} total (pre-archive: {pre_archive_count:,} stories)")
logging.user(user, "~FCFetched archive stories from ~SB%s feeds~SN in ~FM~SB%s~FC~SN sec." %
(total, round(duration, 2)))
r.publish(user.username, 'fetch_archive:done')
return ending_story_count, min(pre_archive_count, starting_story_count)
@classmethod
def identify_deleted_feed_users(cls, old_feed_id):
users = UserSubscriptionFolders.objects.filter(folders__contains=old_feed_id).only('user')
@ -667,8 +734,9 @@ class UserSubscription(models.Model):
return
cutoff_date = cutoff_date - datetime.timedelta(seconds=1)
story_hashes = self.get_stories(limit=500, order="newest", cutoff_date=cutoff_date,
read_filter="unread", hashes_only=True)
story_hashes = UserSubscription.story_hashes(self.user.pk, feed_ids=[self.feed.pk],
order="newest", read_filter="unread",
cutoff_date=cutoff_date)
data = self.mark_story_ids_as_read(story_hashes, aggregated=True)
return data
@ -695,6 +763,9 @@ class UserSubscription(models.Model):
RUserStory.mark_read(self.user_id, self.feed_id, story_hash, aggregated=aggregated)
r.publish(self.user.username, 'story:read:%s' % story_hash)
if self.user.profile.is_archive:
RUserUnreadStory.mark_read(self.user_id, story_hash)
r.publish(self.user.username, 'feed:%s' % self.feed_id)
self.last_read_date = datetime.datetime.now()
@ -704,13 +775,26 @@ class UserSubscription(models.Model):
def invert_read_stories_after_unread_story(self, story, request=None):
data = dict(code=1)
if story.story_date > self.mark_read_date:
unread_cutoff = self.user.profile.unread_cutoff
if self.mark_read_date > unread_cutoff:
unread_cutoff = self.mark_read_date
if story.story_date > unread_cutoff:
return data
# Check if user is archive and story is outside unread cutoff
if self.user.profile.is_archive and story.story_date < self.user.profile.unread_cutoff:
RUserUnreadStory.mark_unread(
user_id=self.user_id,
story_hash=story.story_hash,
story_date=story.story_date,
)
data['story_hashes'] = [story.story_hash]
return data
# Story is outside the mark as read range, so invert all stories before.
newer_stories = MStory.objects(story_feed_id=story.story_feed_id,
story_date__gte=story.story_date,
story_date__lte=self.mark_read_date
story_date__lte=unread_cutoff
).only('story_hash')
newer_stories = [s.story_hash for s in newer_stories]
self.mark_read_date = story.story_date - datetime.timedelta(minutes=1)
@ -762,7 +846,7 @@ class UserSubscription(models.Model):
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
read_filter='unread',
cutoff_date=self.user.profile.unread_cutoff)
if not stories:
@ -827,10 +911,9 @@ class UserSubscription(models.Model):
else:
feed_scores['neutral'] += 1
else:
# print " ---> Cutoff date: %s" % date_delta
unread_story_hashes = self.story_hashes(user_id=self.user_id, feed_ids=[self.feed_id],
usersubs=[self],
read_filter='unread', group_by_feed=False,
read_filter='unread',
include_timestamps=True,
cutoff_date=date_delta)
@ -895,6 +978,8 @@ class UserSubscription(models.Model):
# Switch read stories
RUserStory.switch_feed(user_id=self.user_id, old_feed_id=old_feed.pk,
new_feed_id=new_feed.pk)
RUserUnreadStory.switch_feed(user_id=self.user_id, old_feed_id=old_feed.pk,
new_feed_id=new_feed.pk)
def switch_feed_for_classifier(model):
duplicates = model.objects(feed_id=old_feed.pk, user_id=self.user_id)
@ -994,10 +1079,10 @@ class UserSubscription(models.Model):
if not safety_net: return
logging.user(user, "~FBFound ~FR%s unscheduled feeds~FB, scheduling..." % len(safety_net))
logging.user(user, "~FBFound ~FR%s unscheduled feeds~FB, scheduling immediately..." % len(safety_net))
for feed_id in safety_net:
feed = Feed.get_by_id(feed_id)
feed.set_next_scheduled_update()
feed.schedule_feed_fetch_immediately()
@classmethod
def count_subscribers_to_other_subscriptions(cls, feed_id):
@ -1039,7 +1124,8 @@ class UserSubscription(models.Model):
return table
# return cofeeds
class RUserStory:
@classmethod
@ -1051,11 +1137,8 @@ class RUserStory:
ps = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
if not username:
username = User.objects.get(pk=user_id).username
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
feed_ids = set()
friend_ids = set()
@ -1079,7 +1162,6 @@ class RUserStory:
cls.mark_read(user_id, feed_id, story_hash, social_user_ids=friends_with_shares, r=p, username=username, ps=ps)
p.execute()
# p2.execute()
return list(feed_ids), list(friend_ids)
@ -1091,8 +1173,6 @@ class RUserStory:
s = redis.Redis(connection_pool=settings.REDIS_POOL)
if not ps:
ps = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
friend_ids = set()
feed_id, _ = MStory.split_story_hash(story_hash)
@ -1118,6 +1198,8 @@ class RUserStory:
feed_read_key = "fR:%s:%s" % (feed_id, week_of_year)
r.incr(feed_read_key)
# This settings.DAYS_OF_STORY_HASHES doesn't need to consider potential pro subscribers
# because the feed_read_key is really only used for statistics and not unreads
r.expire(feed_read_key, 2*settings.DAYS_OF_STORY_HASHES*24*60*60)
@classmethod
@ -1125,8 +1207,6 @@ class RUserStory:
aggregated=False, r=None, username=None, ps=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=story_feed_id)
@ -1134,9 +1214,7 @@ class RUserStory:
def redis_commands(key):
r.sadd(key, story_hash)
# r2.sadd(key, story_hash)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire(key, Feed.days_of_story_hashes_for_feed(story_feed_id)*24*60*60)
all_read_stories_key = 'RS:%s' % (user_id)
redis_commands(all_read_stories_key)
@ -1156,20 +1234,23 @@ class RUserStory:
key = 'lRS:%s' % user_id
r.lpush(key, story_hash)
r.ltrim(key, 0, 1000)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire(key, Feed.days_of_story_hashes_for_feed(story_feed_id)*24*60*60)
@staticmethod
def story_can_be_marked_read_by_user(story, user):
def story_can_be_marked_unread_by_user(story, user):
message = None
if story.story_date < user.profile.unread_cutoff:
if story.story_date < user.profile.unread_cutoff and not user.profile.is_archive:
# if user.profile.is_archive:
# message = "Story is more than %s days old, change your days of unreads under Preferences." % (
# user.profile.days_of_unread)
if user.profile.is_premium:
message = "Story is more than %s days old, cannot mark as unread." % (
message = "Story is more than %s days old. Premium Archive accounts can mark any story as unread." % (
settings.DAYS_OF_UNREAD)
elif story.story_date > user.profile.unread_cutoff_premium:
message = "Story is more than %s days old. Premiums can mark unread up to 30 days." % (
settings.DAYS_OF_UNREAD_FREE)
message = "Story is older than %s days. Premium has %s days, and Premium Archive can mark anything unread." % (
settings.DAYS_OF_UNREAD_FREE, settings.DAYS_OF_UNREAD)
else:
message = "Story is more than %s days old, cannot mark as unread." % (
message = "Story is more than %s days old, only Premium Archive can mark older stories unread." % (
settings.DAYS_OF_UNREAD_FREE)
return message
@ -1177,7 +1258,6 @@ class RUserStory:
def mark_unread(user_id, story_feed_id, story_hash, social_user_ids=None, r=None, username=None, ps=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=story_feed_id)
@ -1185,9 +1265,7 @@ class RUserStory:
def redis_commands(key):
r.srem(key, story_hash)
# r2.srem(key, story_hash)
r.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire(key, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire(key, Feed.days_of_story_hashes_for_feed(story_feed_id)*24*60*60)
all_read_stories_key = 'RS:%s' % (user_id)
redis_commands(all_read_stories_key)
@ -1231,28 +1309,23 @@ class RUserStory:
@classmethod
def switch_feed(cls, user_id, old_feed_id, new_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
story_hashes = cls.get_stories(user_id, old_feed_id, r=r)
story_hashes = UserSubscription.story_hashes(user_id, feed_ids=[old_feed_id])
# story_hashes = cls.get_stories(user_id, old_feed_id, r=r)
for story_hash in story_hashes:
_, hash_story = MStory.split_story_hash(story_hash)
new_story_hash = "%s:%s" % (new_feed_id, hash_story)
read_feed_key = "RS:%s:%s" % (user_id, new_feed_id)
p.sadd(read_feed_key, new_story_hash)
# p2.sadd(read_feed_key, new_story_hash)
p.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.expire(read_feed_key, Feed.days_of_story_hashes_for_feed(new_feed_id)*24*60*60)
read_user_key = "RS:%s" % (user_id)
p.sadd(read_user_key, new_story_hash)
# p2.sadd(read_user_key, new_story_hash)
p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.expire(read_user_key, Feed.days_of_story_hashes_for_feed(new_feed_id)*24*60*60)
p.execute()
# p2.execute()
if len(story_hashes) > 0:
logging.info(" ---> %s read stories" % len(story_hashes))
@ -1260,9 +1333,7 @@ class RUserStory:
@classmethod
def switch_hash(cls, feed, old_hash, new_hash):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
p = r.pipeline()
# p2 = r2.pipeline()
usersubs = UserSubscription.objects.filter(feed_id=feed.pk, last_read_date__gte=feed.unread_cutoff)
logging.info(" ---> ~SB%s usersubs~SN to switch read story hashes..." % len(usersubs))
@ -1271,18 +1342,13 @@ class RUserStory:
read = r.sismember(rs_key, old_hash)
if read:
p.sadd(rs_key, new_hash)
# p2.sadd(rs_key, new_hash)
p.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.expire(rs_key, feed.days_of_story_hashes*24*60*60)
read_user_key = "RS:%s" % sub.user.pk
p.sadd(read_user_key, new_hash)
# p2.sadd(read_user_key, new_hash)
p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
p.expire(read_user_key, feed.days_of_story_hashes*24*60*60)
p.execute()
# p2.execute()
@classmethod
def read_story_count(cls, user_id):
@ -1733,3 +1799,84 @@ class Feature(models.Model):
class Meta:
ordering = ["-date"]
class RUserUnreadStory:
"""Model to store manually unread stories that are older than a user's unread_cutoff
(same as days_of_unread). This is built for Premium Archive purposes.
If a story is marked as unread but is within the unread_cutoff, no need to add a
UserUnreadStory instance as it will be automatically marked as read according to
the user's days_of_unread preference.
"""
@classmethod
def mark_unread(cls, user_id, story_hash, story_date, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
if isinstance(story_date, float):
story_date = int(story_date)
if not isinstance(story_date, int):
story_date = int(time.mktime(story_date.timetuple()))
feed_id, _ = MStory.split_story_hash(story_hash)
user_unread_stories_key = f"uU:{user_id}"
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
r.zadd(user_unread_stories_key, {story_hash: story_date})
r.zadd(user_unread_stories_feed_key, {story_hash: story_date})
@classmethod
def mark_read(cls, user_id, story_hashes, r=None):
if not isinstance(story_hashes, list):
story_hashes = [story_hashes]
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
pipeline = r.pipeline()
for story_hash in story_hashes:
feed_id, _ = MStory.split_story_hash(story_hash)
user_unread_stories_key = f"uU:{user_id}"
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
pipeline.zrem(user_unread_stories_key, story_hash)
pipeline.zrem(user_unread_stories_feed_key, story_hash)
pipeline.execute()
@classmethod
def unreads(cls, user_id, story_hash):
if not isinstance(story_hash, list):
story_hash = [story_hash]
user_unread_stories = cls.objects.filter(user_id=user_id, story_hash__in=story_hash)
return user_unread_stories
@staticmethod
def get_stories_and_dates(user_id, feed_id, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
user_unread_stories_feed_key = f"uU:{user_id}:{feed_id}"
story_hashes = r.zrange(user_unread_stories_feed_key, 0, -1, withscores=True)
return story_hashes
@classmethod
def switch_feed(cls, user_id, old_feed_id, new_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
p = r.pipeline()
story_hashes = cls.get_stories_and_dates(user_id, old_feed_id, r=r)
for (story_hash, story_timestamp) in story_hashes:
_, hash_story = MStory.split_story_hash(story_hash)
new_story_hash = "%s:%s" % (new_feed_id, hash_story)
read_feed_key = "RS:%s:%s" % (user_id, new_feed_id)
user_unread_stories_feed_key = f"uU:{user_id}:{new_feed_id}"
cls.mark_unread(user_id, new_story_hash, story_timestamp, r=p)
p.execute()
if len(story_hashes) > 0:
logging.info(" ---> %s archived unread stories" % len(story_hashes))

View file

@ -5,6 +5,7 @@ urlpatterns = [
url(r'^$', views.index),
url(r'^buster', views.iframe_buster, name='iframe-buster'),
url(r'^login_as', views.login_as, name='login_as'),
url(r'^welcome', views.welcome_req, name='welcome'),
url(r'^logout', views.logout, name='welcome-logout'),
url(r'^login', views.login, name='welcome-login'),
url(r'^autologin/(?P<username>\w+)/(?P<secret>\w+)/?', views.autologin, name='autologin'),
@ -63,4 +64,5 @@ urlpatterns = [
url(r'^save_search', views.save_search, name='save-search'),
url(r'^delete_search', views.delete_search, name='delete-search'),
url(r'^save_dashboard_river', views.save_dashboard_river, name='save-dashboard-river'),
url(r'^remove_dashboard_river', views.remove_dashboard_river, name='remove-dashboard-river'),
]

View file

@ -37,7 +37,7 @@ from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds
from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags
from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed
from apps.profile.models import Profile, MCustomStyling, MDashboardRiver
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, Feature
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, RUserUnreadStory, Feature
from apps.reader.forms import SignupForm, LoginForm, FeatureForm
from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts, MSavedSearch
from apps.notifications.models import MUserFeedNotification
@ -118,9 +118,9 @@ def index(request, **kwargs):
def dashboard(request, **kwargs):
user = request.user
feed_count = UserSubscription.objects.filter(user=request.user).count()
recommended_feeds = RecommendedFeed.objects.filter(is_public=True,
approved_date__lte=datetime.datetime.now()
).select_related('feed')[:2]
# recommended_feeds = RecommendedFeed.objects.filter(is_public=True,
# approved_date__lte=datetime.datetime.now()
# ).select_related('feed')[:2]
unmoderated_feeds = []
if user.is_staff:
unmoderated_feeds = RecommendedFeed.objects.filter(is_public=False,
@ -146,13 +146,18 @@ def dashboard(request, **kwargs):
'custom_styling' : custom_styling,
'dashboard_rivers' : dashboard_rivers,
'account_images' : list(range(1, 4)),
'recommended_feeds' : recommended_feeds,
# 'recommended_feeds' : recommended_feeds,
'unmoderated_feeds' : unmoderated_feeds,
'statistics' : statistics,
'social_profile' : social_profile,
'debug' : settings.DEBUG,
'debug_assets' : settings.DEBUG_ASSETS,
}, "reader/dashboard.xhtml"
@render_to('reader/dashboard.xhtml')
def welcome_req(request, **kwargs):
return welcome(request, **kwargs)
def welcome(request, **kwargs):
user = get_user(request)
statistics = MStatistics.all()
@ -668,7 +673,7 @@ def load_single_feed(request, feed_id):
if page > 200:
logging.user(request, "~BR~FK~SBOver page 200 on single feed: %s" % page)
raise Http404
assert False
if query:
if user.profile.is_premium:
@ -684,11 +689,10 @@ def load_single_feed(request, feed_id):
story_feed_id=feed_id
).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
stories = Feed.format_stories(mstories)
elif usersub and (read_filter == 'unread' or order == 'oldest'):
stories = usersub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit,
default_cutoff_date=user.profile.unread_cutoff)
elif usersub and read_filter == 'unread':
stories = usersub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit)
else:
stories = feed.get_stories(offset, limit)
stories = feed.get_stories(offset, limit, order=order)
checkpoint1 = time.time()
@ -724,7 +728,6 @@ def load_single_feed(request, feed_id):
unread_story_hashes = UserSubscription.story_hashes(user.pk, read_filter='unread',
feed_ids=[usersub.feed_id],
usersubs=[usersub],
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
story_hashes = [story['story_hash'] for story in stories if story['story_hash']]
starred_stories = MStarredStory.objects(user_id=user.pk,
@ -755,7 +758,7 @@ def load_single_feed(request, feed_id):
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
if usersub:
story['read_status'] = 1
if story['story_date'] < user.profile.unread_cutoff:
if not user.profile.is_archive and story['story_date'] < user.profile.unread_cutoff:
story['read_status'] = 1
elif (read_filter == 'all' or query) and usersub:
story['read_status'] = 1 if story['story_hash'] not in unread_story_hashes else 0
@ -1166,7 +1169,7 @@ def folder_rss_feed(request, user_id, secret_token, unread_filter, folder_slug):
feed_ids, folder_title = user_sub_folders.feed_ids_under_folder_slug(folder_slug)
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids)
if feed_ids and user.profile.is_premium:
if feed_ids and user.profile.is_archive:
params = {
"user_id": user.pk,
"feed_ids": feed_ids,
@ -1263,12 +1266,13 @@ def folder_rss_feed(request, user_id, secret_token, unread_filter, folder_slug):
if story['story_authors']:
story_data['author_name'] = story['story_authors']
rss.add_item(**story_data)
if not user.profile.is_premium:
# TODO: Remove below date hack to accomodate users who paid for premium but want folder rss
if not user.profile.is_archive and (datetime.datetime.now() > datetime.datetime(2023, 7, 1)):
story_data = {
'title': "You must have a premium account on NewsBlur to have RSS feeds for folders.",
'link': "https://%s" % domain,
'description': "You must have a premium account on NewsBlur to have RSS feeds for folders.",
'title': "You must have a premium archive subscription on NewsBlur to have RSS feeds for folders.",
'link': "https://%s/?next=premium" % domain,
'description': "You must have a premium archive subscription on NewsBlur to have RSS feeds for folders.",
'unique_id': "https://%s/premium_only" % domain,
'pubdate': localtime_for_timezone(datetime.datetime.now(), user.profile.timezone),
}
@ -1422,7 +1426,6 @@ def load_river_stories__redis(request):
mstories = stories
unread_feed_story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
read_filter="unread", order=order,
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
else:
stories = []
@ -1676,50 +1679,10 @@ def complete_river(request):
if feed_ids:
stories_truncated = UserSubscription.truncate_river(user.pk, feed_ids, read_filter, cache_prefix="dashboard:")
if page > 1:
if page >= 1:
logging.user(request, "~FC~BBRiver complete on page ~SB%s~SN, truncating ~SB%s~SN stories from ~SB%s~SN feeds" % (page, stories_truncated, len(feed_ids)))
return dict(code=1, message="Truncated %s stories from %s" % (stories_truncated, len(feed_ids)))
@json.json_view
def unread_story_hashes__old(request):
user = get_user(request)
feed_ids = request.GET.getlist('feed_id') or request.GET.getlist('feed_id[]')
feed_ids = [int(feed_id) for feed_id in feed_ids if feed_id]
include_timestamps = is_true(request.GET.get('include_timestamps', False))
usersubs = {}
if not feed_ids:
usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0),
user=user, active=True)
feed_ids = [sub.feed_id for sub in usersubs]
else:
usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0),
user=user, active=True, feed__in=feed_ids)
unread_feed_story_hashes = {}
story_hash_count = 0
usersubs = dict((sub.feed_id, sub) for sub in usersubs)
for feed_id in feed_ids:
if feed_id in usersubs:
us = usersubs[feed_id]
else:
continue
if not us.unread_count_neutral and not us.unread_count_positive:
continue
unread_feed_story_hashes[feed_id] = us.get_stories(read_filter='unread', limit=500,
withscores=include_timestamps,
hashes_only=True,
default_cutoff_date=user.profile.unread_cutoff)
story_hash_count += len(unread_feed_story_hashes[feed_id])
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %
(len(feed_ids), len(story_hash_count)))
return dict(unread_feed_story_hashes=unread_feed_story_hashes)
@json.json_view
def unread_story_hashes(request):
@ -1733,6 +1696,7 @@ def unread_story_hashes(request):
story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
order=order, read_filter=read_filter,
include_timestamps=include_timestamps,
group_by_feed=True,
cutoff_date=user.profile.unread_cutoff)
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %
@ -1819,6 +1783,9 @@ def mark_story_hashes_as_read(request):
return dict(code=-1, message="Missing `story_hash` list parameter.")
feed_ids, friend_ids = RUserStory.mark_story_hashes_read(request.user.pk, story_hashes, username=request.user.username)
if request.user.profile.is_archive:
RUserUnreadStory.mark_read(request.user.pk, story_hashes)
if friend_ids:
socialsubs = MSocialSubscription.objects.filter(
@ -1954,16 +1921,16 @@ def mark_story_as_unread(request):
if not story:
logging.user(request, "~FY~SBUnread~SN story in feed: %s (NOT FOUND)" % (feed))
return dict(code=-1, message="Story not found.")
if usersub:
data = usersub.invert_read_stories_after_unread_story(story, request)
message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
message = RUserStory.story_can_be_marked_unread_by_user(story, request.user)
if message:
data['code'] = -1
data['message'] = message
return data
if usersub:
data = usersub.invert_read_stories_after_unread_story(story, request)
social_subs = MSocialSubscription.mark_dirty_sharing_story(user_id=request.user.pk,
story_feed_id=feed_id,
story_guid_hash=story.guid_hash)
@ -1995,7 +1962,7 @@ def mark_story_hash_as_unread(request):
return data
else:
datas.append(data)
message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
message = RUserStory.story_can_be_marked_unread_by_user(story, request.user)
if message:
data = dict(code=-1, message=message, story_hash=story_hash)
if not is_list:
@ -2872,7 +2839,7 @@ def delete_search(request):
def save_dashboard_river(request):
river_id = request.POST['river_id']
river_side = request.POST['river_side']
river_order = request.POST['river_order']
river_order = int(request.POST['river_order'])
logging.user(request, "~FCSaving dashboard river: ~SB%s~SN (%s %s)" % (river_id, river_side, river_order))
@ -2882,3 +2849,19 @@ def save_dashboard_river(request):
return {
'dashboard_rivers': dashboard_rivers,
}
@required_params('river_id', 'river_side', 'river_order')
@json.json_view
def remove_dashboard_river(request):
river_id = request.POST['river_id']
river_side = request.POST['river_side']
river_order = int(request.POST['river_order'])
logging.user(request, "~FRRemoving~FC dashboard river: ~SB%s~SN (%s %s)" % (river_id, river_side, river_order))
MDashboardRiver.remove_river(request.user.pk, river_side, river_order)
dashboard_rivers = MDashboardRiver.get_user_rivers(request.user.pk)
return {
'dashboard_rivers': dashboard_rivers,
}

View file

@ -0,0 +1,38 @@
# Generated by Django 3.1.10 on 2022-01-10 21:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rss_feeds', '0002_remove_mongo_types'),
]
operations = [
migrations.AlterField(
model_name='feed',
name='feed_address_locked',
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AlterField(
model_name='feed',
name='is_push',
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AlterField(
model_name='feed',
name='s3_icon',
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AlterField(
model_name='feed',
name='s3_page',
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AlterField(
model_name='feed',
name='search_indexed',
field=models.BooleanField(blank=True, default=None, null=True),
),
]

View file

@ -0,0 +1,18 @@
# Generated by Django 3.1.10 on 2022-01-10 21:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rss_feeds', '0003_auto_20220110_2105'),
]
operations = [
migrations.AddField(
model_name='feed',
name='pro_subscribers',
field=models.IntegerField(blank=True, default=0, null=True),
),
]

View file

@ -0,0 +1,18 @@
# Generated by Django 3.1.10 on 2022-01-11 15:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rss_feeds', '0004_feed_pro_subscribers'),
]
operations = [
migrations.AddField(
model_name='feed',
name='archive_subscribers',
field=models.IntegerField(blank=True, default=0, null=True),
),
]

View file

@ -0,0 +1,18 @@
# Generated by Django 3.1.10 on 2022-05-11 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rss_feeds', '0005_feed_archive_subscribers'),
]
operations = [
migrations.AddField(
model_name='feed',
name='fs_size_bytes',
field=models.IntegerField(blank=True, null=True),
),
]

View file

@ -0,0 +1,14 @@
# Generated by Django 3.1.10 on 2022-05-17 13:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rss_feeds', '0006_feed_fs_size_bytes'),
('rss_feeds', '0003_mongo_version_4_0'),
]
operations = [
]

View file

@ -0,0 +1,18 @@
# Generated by Django 3.1.10 on 2022-06-06 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rss_feeds', '0007_merge_20220517_1355'),
]
operations = [
migrations.AddField(
model_name='feed',
name='archive_count',
field=models.IntegerField(blank=True, null=True),
),
]

View file

@ -1,4 +1,5 @@
import difflib
import bson
import requests
import datetime
import time
@ -65,6 +66,8 @@ class Feed(models.Model):
num_subscribers = models.IntegerField(default=-1)
active_subscribers = models.IntegerField(default=-1, db_index=True)
premium_subscribers = models.IntegerField(default=-1)
archive_subscribers = models.IntegerField(default=0, null=True, blank=True)
pro_subscribers = models.IntegerField(default=0, null=True, blank=True)
active_premium_subscribers = models.IntegerField(default=-1)
branch_from_feed = models.ForeignKey('Feed', blank=True, null=True, db_index=True, on_delete=models.CASCADE)
last_update = models.DateTimeField(db_index=True)
@ -90,6 +93,8 @@ class Feed(models.Model):
s3_page = models.BooleanField(default=False, blank=True, null=True)
s3_icon = models.BooleanField(default=False, blank=True, null=True)
search_indexed = models.BooleanField(default=None, null=True, blank=True)
fs_size_bytes = models.IntegerField(null=True, blank=True)
archive_count = models.IntegerField(null=True, blank=True)
class Meta:
db_table="feeds"
@ -100,13 +105,17 @@ class Feed(models.Model):
if not self.feed_title:
self.feed_title = "[Untitled]"
self.save()
return "%s%s: %s - %s/%s/%s" % (
return "%s%s: %s - %s/%s/%s/%s/%s %s stories (%s bytes)" % (
self.pk,
(" [B: %s]" % self.branch_from_feed.pk if self.branch_from_feed else ""),
self.feed_title,
self.num_subscribers,
self.active_subscribers,
self.active_premium_subscribers,
self.archive_subscribers,
self.pro_subscribers,
self.archive_count,
self.fs_size_bytes,
)
@property
@ -134,7 +143,7 @@ class Feed(models.Model):
def favicon_url_fqdn(self):
if settings.BACKED_BY_AWS['icons_on_s3'] and self.s3_icon:
return self.favicon_url
return "http://%s%s" % (
return "https://%s%s" % (
Site.objects.get_current().domain,
self.favicon_url
)
@ -149,11 +158,27 @@ class Feed(models.Model):
@property
def unread_cutoff(self):
if self.active_premium_subscribers > 0:
if self.archive_subscribers and self.archive_subscribers > 0:
return datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD_ARCHIVE)
if self.premium_subscribers > 0:
return datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
return datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD_FREE)
@classmethod
def days_of_story_hashes_for_feed(cls, feed_id):
try:
feed = cls.objects.only('archive_subscribers').get(pk=feed_id)
return feed.days_of_story_hashes
except cls.DoesNotExist:
return settings.DAYS_OF_STORY_HASHES
@property
def days_of_story_hashes(self):
if self.archive_subscribers and self.archive_subscribers > 0:
return settings.DAYS_OF_STORY_HASHES_ARCHIVE
return settings.DAYS_OF_STORY_HASHES
@property
def story_hashes_in_unread_cutoff(self):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
@ -182,6 +207,8 @@ class Feed(models.Model):
'num_subscribers': self.num_subscribers,
'updated': relative_timesince(self.last_update),
'updated_seconds_ago': seconds_timesince(self.last_update),
'fs_size_bytes': self.fs_size_bytes,
'archive_count': self.archive_count,
'last_story_date': self.last_story_date,
'last_story_seconds_ago': seconds_timesince(self.last_story_date),
'stories_last_month': self.stories_last_month,
@ -322,13 +349,9 @@ class Feed(models.Model):
def expire_redis(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
r.expire('F:%s' % self.pk, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire('F:%s' % self.pk, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire('zF:%s' % self.pk, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire('zF:%s' % self.pk, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire('F:%s' % self.pk, self.days_of_story_hashes*24*60*60)
r.expire('zF:%s' % self.pk, self.days_of_story_hashes*24*60*60)
@classmethod
def low_volume_feeds(cls, feed_ids, stories_per_month=30):
@ -592,6 +615,7 @@ class Feed(models.Model):
r.zremrangebyrank('error_feeds', 0, -1)
else:
logging.debug(" ---> No errored feeds to drain")
def update_all_statistics(self, has_new_stories=False, force=False):
recount = not self.counts_converted_to_redis
count_extra = False
@ -604,6 +628,9 @@ class Feed(models.Model):
if force or has_new_stories or count_extra:
self.save_feed_stories_last_month()
if not self.fs_size_bytes or not self.archive_count:
self.count_fs_size_bytes()
if force or (has_new_stories and count_extra):
self.save_popular_authors()
self.save_popular_tags()
@ -630,8 +657,7 @@ class Feed(models.Model):
@classmethod
def setup_feeds_for_premium_subscribers(cls, feed_ids):
logging.info(" ---> ~SN~FMScheduling immediate premium setup of ~SB%s~SN feeds..." %
len(feed_ids))
logging.info(f" ---> ~SN~FMScheduling immediate premium setup of ~SB{len(feed_ids)}~SN feeds...")
feeds = Feed.objects.filter(pk__in=feed_ids)
for feed in feeds:
@ -639,7 +665,8 @@ class Feed(models.Model):
def setup_feed_for_premium_subscribers(self):
self.count_subscribers()
self.set_next_scheduled_update()
self.set_next_scheduled_update(verbose=settings.DEBUG)
self.sync_redis()
def check_feed_link_for_feed_address(self):
@timelimit(10)
@ -707,7 +734,7 @@ class Feed(models.Model):
if status_code not in (200, 304):
self.errors_since_good += 1
self.count_errors_in_history('feed', status_code, fetch_history=fetch_history)
self.set_next_scheduled_update()
self.set_next_scheduled_update(verbose=settings.DEBUG)
elif self.has_feed_exception or self.errors_since_good:
self.errors_since_good = 0
self.has_feed_exception = False
@ -792,7 +819,6 @@ class Feed(models.Model):
total_key = "s:%s" % self.original_feed_id
premium_key = "sp:%s" % self.original_feed_id
last_recount = r.zscore(total_key, -1) # Need to subtract this extra when counting subs
last_recount = r.zscore(premium_key, -1) # Need to subtract this extra when counting subs
# Check for expired feeds with no active users who would have triggered a cleanup
if last_recount and last_recount > subscriber_expire:
@ -816,6 +842,8 @@ class Feed(models.Model):
total = 0
active = 0
premium = 0
archive = 0
pro = 0
active_premium = 0
# Include all branched feeds in counts
@ -831,10 +859,14 @@ class Feed(models.Model):
# now+1 ensures `-1` flag will be corrected for later with - 1
total_key = "s:%s" % feed_id
premium_key = "sp:%s" % feed_id
archive_key = "sarchive:%s" % feed_id
pro_key = "spro:%s" % feed_id
pipeline.zcard(total_key)
pipeline.zcount(total_key, subscriber_expire, now+1)
pipeline.zcard(premium_key)
pipeline.zcount(premium_key, subscriber_expire, now+1)
pipeline.zcard(archive_key)
pipeline.zcard(pro_key)
results = pipeline.execute()
@ -843,13 +875,17 @@ class Feed(models.Model):
active += max(0, results[1] - 1)
premium += max(0, results[2] - 1)
active_premium += max(0, results[3] - 1)
archive += max(0, results[4] - 1)
pro += max(0, results[5] - 1)
original_num_subscribers = self.num_subscribers
original_active_subs = self.active_subscribers
original_premium_subscribers = self.premium_subscribers
original_active_premium_subscribers = self.active_premium_subscribers
logging.info(" ---> [%-30s] ~SN~FBCounting subscribers from ~FCredis~FB: ~FMt:~SB~FM%s~SN a:~SB%s~SN p:~SB%s~SN ap:~SB%s ~SN~FC%s" %
(self.log_title[:30], total, active, premium, active_premium, "(%s branches)" % (len(feed_ids)-1) if len(feed_ids)>1 else ""))
original_archive_subscribers = self.archive_subscribers
original_pro_subscribers = self.pro_subscribers
logging.info(" ---> [%-30s] ~SN~FBCounting subscribers from ~FCredis~FB: ~FMt:~SB~FM%s~SN a:~SB%s~SN p:~SB%s~SN ap:~SB%s~SN archive:~SB%s~SN pro:~SB%s ~SN~FC%s" %
(self.log_title[:30], total, active, premium, active_premium, archive, pro, "(%s branches)" % (len(feed_ids)-1) if len(feed_ids)>1 else ""))
else:
from apps.reader.models import UserSubscription
@ -872,6 +908,22 @@ class Feed(models.Model):
)
original_premium_subscribers = self.premium_subscribers
premium = premium_subs.count()
archive_subs = UserSubscription.objects.filter(
feed__in=feed_ids,
active=True,
user__profile__is_archive=True
)
original_archive_subscribers = self.archive_subscribers
archive = archive_subs.count()
pro_subs = UserSubscription.objects.filter(
feed__in=feed_ids,
active=True,
user__profile__is_pro=True
)
original_pro_subscribers = self.pro_subscribers
pro = pro_subs.count()
active_premium_subscribers = UserSubscription.objects.filter(
feed__in=feed_ids,
@ -881,8 +933,8 @@ class Feed(models.Model):
)
original_active_premium_subscribers = self.active_premium_subscribers
active_premium = active_premium_subscribers.count()
logging.debug(" ---> [%-30s] ~SN~FBCounting subscribers from ~FYpostgres~FB: ~FMt:~SB~FM%s~SN a:~SB%s~SN p:~SB%s~SN ap:~SB%s" %
(self.log_title[:30], total, active, premium, active_premium))
logging.debug(" ---> [%-30s] ~SN~FBCounting subscribers from ~FYpostgres~FB: ~FMt:~SB~FM%s~SN a:~SB%s~SN p:~SB%s~SN ap:~SB%s~SN archive:~SB%s~SN pro:~SB%s" %
(self.log_title[:30], total, active, premium, active_premium, archive, pro))
if settings.DOCKERBUILD:
# Local installs enjoy 100% active feeds
@ -893,15 +945,20 @@ class Feed(models.Model):
self.active_subscribers = active
self.premium_subscribers = premium
self.active_premium_subscribers = active_premium
self.archive_subscribers = archive
self.pro_subscribers = pro
if (self.num_subscribers != original_num_subscribers or
self.active_subscribers != original_active_subs or
self.premium_subscribers != original_premium_subscribers or
self.active_premium_subscribers != original_active_premium_subscribers):
self.active_premium_subscribers != original_active_premium_subscribers or
self.archive_subscribers != original_archive_subscribers or
self.pro_subscribers != original_pro_subscribers):
if original_premium_subscribers == -1 or original_active_premium_subscribers == -1:
self.save()
else:
self.save(update_fields=['num_subscribers', 'active_subscribers',
'premium_subscribers', 'active_premium_subscribers'])
'premium_subscribers', 'active_premium_subscribers',
'archive_subscribers', 'pro_subscribers'])
if verbose:
if self.num_subscribers <= 1:
@ -984,7 +1041,27 @@ class Feed(models.Model):
return 'white'
else:
return 'black'
def fill_out_archive_stories(self, force=False, starting_page=1):
"""
Starting from page 1 and iterating through N pages, determine whether
page(i) matches page(i-1) and if there are any new stories.
"""
before_story_count = MStory.objects(story_feed_id=self.pk).count()
if not force and not self.archive_subscribers:
logging.debug(" ---> [%-30s] ~FBNot filling out archive stories, no archive subscribers" % (
self.log_title[:30]))
return before_story_count, before_story_count
self.update(archive_page=starting_page)
after_story_count = MStory.objects(story_feed_id=self.pk).count()
logging.debug(" ---> [%-30s] ~FCFilled out archive, ~FM~SB%s~SN new stories~FC, total of ~SB%s~SN stories" % (
self.log_title[:30],
after_story_count - before_story_count,
after_story_count))
def save_feed_stories_last_month(self, verbose=False):
month_ago = datetime.datetime.utcnow() - datetime.timedelta(days=30)
stories_last_month = MStory.objects(story_feed_id=self.pk,
@ -1188,7 +1265,8 @@ class Feed(models.Model):
'debug': kwargs.get('debug'),
'fpf': kwargs.get('fpf'),
'feed_xml': kwargs.get('feed_xml'),
'requesting_user_id': kwargs.get('requesting_user_id', None)
'requesting_user_id': kwargs.get('requesting_user_id', None),
'archive_page': kwargs.get('archive_page', None),
}
if getattr(settings, 'TEST_DEBUG', False) and "NEWSBLUR_DIR" in self.feed_address:
@ -1213,7 +1291,7 @@ class Feed(models.Model):
feed = Feed.get_by_id(feed.pk)
if feed:
feed.last_update = datetime.datetime.utcnow()
feed.set_next_scheduled_update()
feed.set_next_scheduled_update(verbose=settings.DEBUG)
r.zadd('fetched_feeds_last_hour', { feed.pk: int(datetime.datetime.now().strftime('%s')) })
if not feed or original_feed_id != feed.pk:
@ -1481,7 +1559,9 @@ class Feed(models.Model):
feed = Feed.objects.get(pk=feed_id)
except Feed.DoesNotExist:
continue
if feed.active_subscribers <= 0 and (not feed.last_story_date or feed.last_story_date < month_ago):
if (feed.active_subscribers <= 0 and
(not feed.archive_subscribers or feed.archive_subscribers <= 0) and
(not feed.last_story_date or feed.last_story_date < month_ago)):
months_ago = 6
if feed.last_story_date:
months_ago = int((now - feed.last_story_date).days / 30.0)
@ -1501,6 +1581,12 @@ class Feed(models.Model):
@property
def story_cutoff(self):
return self.number_of_stories_to_store()
def number_of_stories_to_store(self, pre_archive=False):
if self.archive_subscribers and self.archive_subscribers > 0 and not pre_archive:
return 10000
cutoff = 500
if self.active_subscribers <= 0:
cutoff = 25
@ -1533,7 +1619,7 @@ class Feed(models.Model):
pipeline.get(feed_read_key)
read_stories_per_week = pipeline.execute()
read_stories_last_month = sum([int(rs) for rs in read_stories_per_week if rs])
if read_stories_last_month == 0:
if not pre_archive and read_stories_last_month == 0:
original_cutoff = cutoff
cutoff = min(cutoff, 10)
try:
@ -1545,13 +1631,50 @@ class Feed(models.Model):
if getattr(settings, 'OVERRIDE_STORY_COUNT_MAX', None):
cutoff = settings.OVERRIDE_STORY_COUNT_MAX
return cutoff
return int(cutoff)
def trim_feed(self, verbose=False, cutoff=None):
if not cutoff:
cutoff = self.story_cutoff
return MStory.trim_feed(feed=self, cutoff=cutoff, verbose=verbose)
stories_removed = MStory.trim_feed(feed=self, cutoff=cutoff, verbose=verbose)
if not self.fs_size_bytes:
self.count_fs_size_bytes()
return stories_removed
def count_fs_size_bytes(self):
stories = MStory.objects.filter(story_feed_id=self.pk)
sum_bytes = 0
count = 0
for story in stories:
count += 1
story_with_content = story.to_mongo()
if story_with_content.get('story_content_z', None):
story_with_content['story_content'] = zlib.decompress(story_with_content['story_content_z'])
del story_with_content['story_content_z']
if story_with_content.get('original_page_z', None):
story_with_content['original_page'] = zlib.decompress(story_with_content['original_page_z'])
del story_with_content['original_page_z']
if story_with_content.get('original_text_z', None):
story_with_content['original_text'] = zlib.decompress(story_with_content['original_text_z'])
del story_with_content['original_text_z']
if story_with_content.get('story_latest_content_z', None):
story_with_content['story_latest_content'] = zlib.decompress(story_with_content['story_latest_content_z'])
del story_with_content['story_latest_content_z']
if story_with_content.get('story_original_content_z', None):
story_with_content['story_original_content'] = zlib.decompress(story_with_content['story_original_content_z'])
del story_with_content['story_original_content_z']
sum_bytes += len(bson.BSON.encode(story_with_content))
self.fs_size_bytes = sum_bytes
self.archive_count = count
self.save()
return sum_bytes
def purge_feed_stories(self, update=True):
MStory.purge_feed_stories(feed=self, cutoff=self.story_cutoff)
if update:
@ -1581,8 +1704,11 @@ class Feed(models.Model):
# print "db.stories.remove({\"story_feed_id\": %s, \"_id\": \"%s\"})" % (f, u)
def get_stories(self, offset=0, limit=25, force=False):
stories_db = MStory.objects(story_feed_id=self.pk)[offset:offset+limit]
def get_stories(self, offset=0, limit=25, order="neweat", force=False):
if order == "newest":
stories_db = MStory.objects(story_feed_id=self.pk)[offset:offset+limit]
elif order == "oldest":
stories_db = MStory.objects(story_feed_id=self.pk).order_by('story_date')[offset:offset+limit]
stories = self.format_stories(stories_db, self.pk)
return stories
@ -2116,14 +2242,16 @@ class Feed(models.Model):
# print 'New/updated story: %s' % (story),
return story_in_system, story_has_changed
def get_next_scheduled_update(self, force=False, verbose=True, premium_speed=False):
def get_next_scheduled_update(self, force=False, verbose=True, premium_speed=False, pro_speed=False):
if self.min_to_decay and not force and not premium_speed:
return self.min_to_decay
from apps.notifications.models import MUserFeedNotification
if premium_speed:
self.active_premium_subscribers += 1
if pro_speed:
self.pro_subscribers += 1
spd = self.stories_last_month / 30.0
subs = (self.active_premium_subscribers +
@ -2204,13 +2332,22 @@ class Feed(models.Model):
# Twitter feeds get 2 hours minimum
if 'twitter' in self.feed_address:
total = max(total, 60*2)
# Pro subscribers get absolute minimum
if self.pro_subscribers and self.pro_subscribers >= 1:
if self.stories_last_month == 0:
total = min(total, 60)
else:
total = min(total, settings.PRO_MINUTES_BETWEEN_FETCHES)
if verbose:
logging.debug(" ---> [%-30s] Fetched every %s min - Subs: %s/%s/%s Stories/day: %s" % (
logging.debug(" ---> [%-30s] Fetched every %s min - Subs: %s/%s/%s/%s/%s Stories/day: %s" % (
self.log_title[:30], total,
self.num_subscribers,
self.active_subscribers,
self.active_premium_subscribers,
self.archive_subscribers,
self.pro_subscribers,
spd))
return total
@ -2258,7 +2395,7 @@ class Feed(models.Model):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
if not self.num_subscribers:
logging.debug(' ---> [%-30s] Not scheduling feed fetch immediately, no subs.' % (self.log_title[:30]))
return
return self
if verbose:
logging.debug(' ---> [%-30s] Scheduling feed fetch immediately...' % (self.log_title[:30]))
@ -2738,52 +2875,36 @@ class MStory(mongo.Document):
def sync_redis(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
feed = Feed.get_by_id(self.story_feed_id)
if self.id and self.story_date > UNREAD_CUTOFF:
if self.id and self.story_date > feed.unread_cutoff:
feed_key = 'F:%s' % self.story_feed_id
r.sadd(feed_key, self.story_hash)
r.expire(feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.sadd(feed_key, self.story_hash)
# r2.expire(feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire(feed_key, feed.days_of_story_hashes*24*60*60)
r.zadd('z' + feed_key, { self.story_hash: time.mktime(self.story_date.timetuple()) })
r.expire('z' + feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.zadd('z' + feed_key, self.story_hash, time.mktime(self.story_date.timetuple()))
# r2.expire('z' + feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire('z' + feed_key, feed.days_of_story_hashes*24*60*60)
def remove_from_redis(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
if self.id:
r.srem('F:%s' % self.story_feed_id, self.story_hash)
# r2.srem('F:%s' % self.story_feed_id, self.story_hash)
r.zrem('zF:%s' % self.story_feed_id, self.story_hash)
# r2.zrem('zF:%s' % self.story_feed_id, self.story_hash)
@classmethod
def sync_feed_redis(cls, story_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
feed = Feed.get_by_id(story_feed_id)
stories = cls.objects.filter(story_feed_id=story_feed_id, story_date__gte=UNREAD_CUTOFF)
stories = cls.objects.filter(story_feed_id=story_feed_id, story_date__gte=feed.unread_cutoff)
r.delete('F:%s' % story_feed_id)
# r2.delete('F:%s' % story_feed_id)
r.delete('zF:%s' % story_feed_id)
# r2.delete('zF:%s' % story_feed_id)
logging.info(" ---> [%-30s] ~FMSyncing ~SB%s~SN stories to redis" % (feed and feed.log_title[:30] or story_feed_id, stories.count()))
p = r.pipeline()
# p2 = r2.pipeline()
for story in stories:
story.sync_redis(r=p)
p.execute()
# p2.execute()
def count_comments(self):
from apps.social.models import MSharedStory

View file

@ -7,6 +7,7 @@ from requests.packages.urllib3.exceptions import LocationParseError
from socket import error as SocketError
from mongoengine.queryset import NotUniqueError
from lxml.etree import ParserError
from vendor.readability.readability import Unparseable
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError
from OpenSSL.SSL import Error as OpenSSLError
@ -137,7 +138,7 @@ class TextImporter:
positive_keywords="post, entry, postProp, article, postContent, postField")
try:
content = original_text_doc.summary(html_partial=True)
except (ParserError) as e:
except (ParserError, Unparseable) as e:
logging.user(self.request, "~SN~FRFailed~FY to fetch ~FGoriginal text~FY: %s" % e)
return

View file

@ -1,5 +1,6 @@
import datetime
import base64
import redis
from urllib.parse import urlparse
from utils import log as logging
from django.shortcuts import get_object_or_404, render
@ -80,7 +81,7 @@ def load_feed_favicon(request, feed_id):
not_found = True
if not_found or not feed_icon.data:
return HttpResponseRedirect(settings.MEDIA_URL + 'img/icons/circular/world.png')
return HttpResponseRedirect(settings.MEDIA_URL + 'img/icons/nouns/world.svg')
icon_data = base64.b64decode(feed_icon.data)
return HttpResponse(icon_data, content_type='image/png')
@ -198,6 +199,8 @@ def assemble_statistics(user, feed_id):
stats['last_update'] = relative_timesince(feed.last_update)
stats['next_update'] = relative_timeuntil(feed.next_scheduled_update)
stats['push'] = feed.is_push
stats['fs_size_bytes'] = feed.fs_size_bytes
stats['archive_count'] = feed.archive_count
if feed.is_push:
try:
stats['push_expires'] = localtime_for_timezone(feed.push.lease_expires,
@ -501,16 +504,35 @@ def exception_change_feed_link(request):
@login_required
def status(request):
if not request.user.is_staff:
if not request.user.is_staff and not settings.DEBUG:
logging.user(request, "~SKNON-STAFF VIEWING RSS FEEDS STATUS!")
assert False
return HttpResponseForbidden()
minutes = int(request.GET.get('minutes', 1))
now = datetime.datetime.now()
hour_ago = now - datetime.timedelta(minutes=minutes)
feeds = Feed.objects.filter(last_update__gte=hour_ago).order_by('-last_update')
username = request.GET.get('user', '') or request.GET.get('username', '')
if username:
user = User.objects.get(username=username)
else:
user = request.user
usersubs = UserSubscription.objects.filter(user=user)
feed_ids = usersubs.values('feed_id')
if minutes > 0:
hour_ago = now + datetime.timedelta(minutes=minutes)
feeds = Feed.objects.filter(pk__in=feed_ids, next_scheduled_update__lte=hour_ago).order_by('next_scheduled_update')
else:
hour_ago = now + datetime.timedelta(minutes=minutes)
feeds = Feed.objects.filter(pk__in=feed_ids, last_update__gte=hour_ago).order_by('-last_update')
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
queues = {
'tasked_feeds': r.zcard('tasked_feeds'),
'queued_feeds': r.scard('queued_feeds'),
'scheduled_updates': r.zcard('scheduled_updates'),
}
return render(request, 'rss_feeds/status.xhtml', {
'feeds': feeds
'feeds': feeds,
'queues': queues
})
@json.json_view

View file

@ -1355,7 +1355,9 @@ def shared_stories_rss_feed(request, user_id, username=None):
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
raise Http404
limit = 25
offset = request.GET.get('page', 0) * limit
username = username and username.lower()
profile = MSocialProfile.get_user(user.pk)
params = {'username': profile.username_slug, 'user_id': user.pk}
@ -1383,7 +1385,7 @@ def shared_stories_rss_feed(request, user_id, username=None):
)
rss = feedgenerator.Atom1Feed(**data)
shared_stories = MSharedStory.objects.filter(user_id=user.pk).order_by('-shared_date')[:25]
shared_stories = MSharedStory.objects.filter(user_id=user.pk).order_by('-shared_date')[offset:offset+limit]
for shared_story in shared_stories:
feed = Feed.get_by_id(shared_story.story_feed_id)
content = render_to_string('social/rss_story.xhtml', {

View file

@ -3,6 +3,7 @@ import mongoengine as mongo
import urllib.request, urllib.error, urllib.parse
import redis
import dateutil
import requests
from django.conf import settings
from apps.social.models import MSharedStory
from apps.profile.models import Profile
@ -298,8 +299,8 @@ class MFeedback(mongo.Document):
def collect_feedback(cls):
seen_posts = set()
try:
data = urllib.request.urlopen('https://forum.newsblur.com/posts.json').read()
except (urllib.error.HTTPError) as e:
data = requests.get('https://forum.newsblur.com/posts.json', timeout=3).content
except (urllib.error.HTTPError, requests.exceptions.ConnectTimeout) as e:
logging.debug(" ***> Failed to collect feedback: %s" % e)
return
data = json.decode(data).get('latest_posts', "")

View file

@ -0,0 +1,37 @@
---
layout: post
title: "2022 redesign: new dashboard layout, refreshed stories and story titles, and entirely redrawn icons"
tags: ['web']
---
The launch of the new [Premium Archive subscription tier](/2022/07/01/premium-archive-subscription/) also includes the 2022 redesign. You'll see a third dashboard layout which stretches out your dashboard rivers across the width of the screen.
<img src="/assets/premium-archive-dashboard-comfortable.png" style="width: calc(140%);margin: 12px 0 12px calc(-20%);max-width: none;border: none">
The latest redesign style has more accomodations for spacing and padding around each story title element. The result is a cleaner story title with easier to read headlines. The author has been moved and restyled to be next to the story date. Favicons and unread status indicators have been swapped, and font sizes, colors, and weights have been adjusted.
<img src="/assets/premium-archive-dashboard-compact.png" style="width: calc(140%);margin: 12px 0 12px calc(-20%);max-width: none;border: none">
If you find the interface to be too airy, there is a setting in the main Manage menu allowing you to switch between Comfortable and Compact. The compact interface is denser than before, giving power users a highly detailed view.
Transitions have also been added to help you feel the difference. And there are new animations during many of the transitions that accompany changing settings.
<p>
<video autoplay loop playsinline muted width="500" style="width: 500px;border: 2px solid rgba(0,0,0,0.1)">
<source src="/assets/premium-archive-grid.mp4" type="video/mp4">
</video>
</p>
And lastly, this redesign comes with a suite of all new icons. The goal with this icon redesign is to bring a consistent weight to each icon as well as vectorize them with SVG so they look good at all resolutions.
<img src="/assets/premium-archive-manage-menu.png" style="width: 275px;border: 1px solid #A0A0A0;margin: 24px auto;display: block;">
A notable icon change is the unread indicator, which now has different size icons for both unread stories and focus stories, giving focus stories more depth.
<img src="/assets/premium-archive-unread-dark.png" style="width: 375px;border: 1px solid #A0A0A0;margin: 24px auto;display: block;">
Here's a screenshot that's only possible with the new premium archive, complete with backfilled blog post from the year 2000, ready to be marked as unread.
<img src="/assets/premium-archive-unread.png" style="width: 100%;border: 1px solid #A0A0A0;margin: 24px auto;display: block;">
I tried to find every icon, so if you spot a dialog or menu that you'd like to see given some more love, reach out on the support forum.

View file

@ -0,0 +1,38 @@
---
layout: post
title: NewsBlur Premium Archive subscription keeps all of your stories searchable, shareable, and unread forever
tags: ['web', 'ios', 'android']
---
For $99/year every story from every site you subscribe to will stay in NewsBlur's archive. This new premium tier also allows you to mark any story as unread as well as choose when stories are automatically marked as read. You can now have full control of your story archive, letting you search, share, and read stories forever without having to worry about them being deleted.
The NewsBlur Premium Archive subscription offers you the following:
* <img src="/assets/icons8/icons8-bursts-100.png" style="width: 16px;margin: 0 6px 0 0;display: inline-block;"> Everything in the premium subscription, of course
* <img src="/assets/icons8/icons8-relax-with-book-100.png" style="width: 16px;margin: 0 6px 0 0;display: inline-block;"> Choose when stories are automatically marked as read
* <img src="/assets/icons8/icons8-filing-cabinet-100.png" style="width: 16px;margin: 0 6px 0 0;display: inline-block;"> Every story from every site is archived and searchable forever
* <img src="/assets/icons8/icons8-quadcopter-100.png" style="width: 16px;margin: 0 6px 0 0;display: inline-block;"> Feeds that support paging are back-filled in for a complete archive
* <img src="/assets/icons8/icons8-rss-100.png" style="width: 16px;margin: 0 6px 0 0;display: inline-block;"> Export trained stories from folders as RSS feeds
* <img src="/assets/icons8/icons8-calendar-100.png" style="width: 16px;margin: 0 6px 0 0;display: inline-block;"> Stories can stay unread forever
You can now enjoy a new preference for exactly when stories are marked as read:
<img src="/assets/premium-archive-mark-read-date.png" style="width: 100%;border: 1px solid #A0A0A0;margin: 24px auto;display: block;">
A technical note about the backfilling of your archive:
<blockquote>
<p>NewsBlur uses two techniques to retrieve older stories that are no longer in the RSS feed. The first strategy is to append `?page=2` and `?paged=2` to the RSS feed and seeing if we're about to blindly iterate through the blog's archive. For WordPress and a few other CMSs, this works great and gives us a full archive. </p>
<p>A second technique is to use <a href="https://datatracker.ietf.org/doc/html/rfc5005">RFC 5005</a>, which supports links embedded inside the RSS feed to denote next and previous pages of an archive.</p>
</blockquote>
NewsBlur attempts all of these techniques on every single feed you've subscribed to, and when it's done backfilling stories, you'll receive an email showing you how big your archive grew during this backfill process.
The launch of the new Premium Archive subscription tier also contains the [2022 redesign](/2022/07/01/dashboard-redesign-2022/), which includes a new dashboard layout, a refreshed design for story titles and feed title, and all new icons.
Here's a screenshot that's only possible with the new premium archive, complete with backfilled blog post from the year 2000, ready to be marked as unread.
<img src="/assets/premium-archive-unread.png" style="width: 100%;border: 1px solid #A0A0A0;margin: 24px auto;display: block;">
How's that for an archive?

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="A New Logo for a New Blog" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2011/03/15/a-new-logo-for-a-new-blog/","@type":"BlogPosting","headline":"A New Logo for a New Blog","dateModified":"2011-03-15T08:39:00-04:00","datePublished":"2011-03-15T08:39:00-04:00","description":"Weve come a long way, readers. What started as a fun project to scratch an itch has become a fun project that pays for its ever-increasing self. This week Im going to show how motivated I am about turning NewsBlur into a serious blog reader. And it starts with a collection of a circles:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/03/15/a-new-logo-for-a-new-blog/"},"@context":"https://schema.org"}</script>
{"headline":"A New Logo for a New Blog","dateModified":"2011-03-15T08:39:00-04:00","datePublished":"2011-03-15T08:39:00-04:00","url":"https://blog.newsblur.com/2011/03/15/a-new-logo-for-a-new-blog/","@type":"BlogPosting","description":"Weve come a long way, readers. What started as a fun project to scratch an itch has become a fun project that pays for its ever-increasing self. This week Im going to show how motivated I am about turning NewsBlur into a serious blog reader. And it starts with a collection of a circles:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/03/15/a-new-logo-for-a-new-blog/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Explaining Intelligence" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2011/04/01/explaining-intelligence/","@type":"BlogPosting","headline":"Explaining Intelligence","dateModified":"2011-04-01T11:11:33-04:00","datePublished":"2011-04-01T11:11:33-04:00","description":"If youre not using intelligence classifiers, youre only getting half the value out of NewsBlur. ","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/04/01/explaining-intelligence/"},"@context":"https://schema.org"}</script>
{"headline":"Explaining Intelligence","dateModified":"2011-04-01T11:11:33-04:00","datePublished":"2011-04-01T11:11:33-04:00","url":"https://blog.newsblur.com/2011/04/01/explaining-intelligence/","@type":"BlogPosting","description":"If youre not using intelligence classifiers, youre only getting half the value out of NewsBlur. ","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/04/01/explaining-intelligence/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Where We Are in April" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2011/04/23/where-we-are-in-april/","@type":"BlogPosting","headline":"Where We Are in April","dateModified":"2011-04-23T14:57:02-04:00","datePublished":"2011-04-23T14:57:02-04:00","description":"Hi readers, I want to take a moment to share what Im working on for the month of April:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/04/23/where-we-are-in-april/"},"@context":"https://schema.org"}</script>
{"headline":"Where We Are in April","dateModified":"2011-04-23T14:57:02-04:00","datePublished":"2011-04-23T14:57:02-04:00","url":"https://blog.newsblur.com/2011/04/23/where-we-are-in-april/","@type":"BlogPosting","description":"Hi readers, I want to take a moment to share what Im working on for the month of April:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/04/23/where-we-are-in-april/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Make your own feed reader with NewsBlurs new API" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2011/04/26/make-your-own-feed-reader-with-newsblurs-new-api/","@type":"BlogPosting","headline":"Make your own feed reader with NewsBlurs new API","dateModified":"2011-04-26T06:41:00-04:00","datePublished":"2011-04-26T06:41:00-04:00","description":"Please vote for this blog post on Hacker News: http://news.ycombinator.com/item?id=2485377.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/04/26/make-your-own-feed-reader-with-newsblurs-new-api/"},"@context":"https://schema.org"}</script>
{"headline":"Make your own feed reader with NewsBlurs new API","dateModified":"2011-04-26T06:41:00-04:00","datePublished":"2011-04-26T06:41:00-04:00","url":"https://blog.newsblur.com/2011/04/26/make-your-own-feed-reader-with-newsblurs-new-api/","@type":"BlogPosting","description":"Please vote for this blog post on Hacker News: http://news.ycombinator.com/item?id=2485377.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/04/26/make-your-own-feed-reader-with-newsblurs-new-api/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Blar: A new Android app for NewsBlur" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2011/08/09/blar-a-new-android-app-for-newsblur/","@type":"BlogPosting","headline":"Blar: A new Android app for NewsBlur","dateModified":"2011-08-09T09:44:00-04:00","datePublished":"2011-08-09T09:44:00-04:00","description":"This Summer is shaping up to be the season for mobile apps. Blar, a new Android client for NewsBlur, has just been released. Its available on the Android Market here: https://market.android.com/details?id=bitwrit.Blar. It is created by Harris Munir, who you can contact through his site.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/08/09/blar-a-new-android-app-for-newsblur/"},"@context":"https://schema.org"}</script>
{"headline":"Blar: A new Android app for NewsBlur","dateModified":"2011-08-09T09:44:00-04:00","datePublished":"2011-08-09T09:44:00-04:00","url":"https://blog.newsblur.com/2011/08/09/blar-a-new-android-app-for-newsblur/","@type":"BlogPosting","description":"This Summer is shaping up to be the season for mobile apps. Blar, a new Android client for NewsBlur, has just been released. Its available on the Android Market here: https://market.android.com/details?id=bitwrit.Blar. It is created by Harris Munir, who you can contact through his site.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/08/09/blar-a-new-android-app-for-newsblur/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Customizing the reader, step 1: story titles" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2011/09/30/customizing-the-reader-step-1-story-titles/","@type":"BlogPosting","headline":"Customizing the reader, step 1: story titles","dateModified":"2011-09-30T09:40:30-04:00","datePublished":"2011-09-30T09:40:30-04:00","description":"The iPhone app is now only a few days away from launching. But it took 3 weeks of sitting around in the App Store approval queue before getting here. During that time, I started working on the new customizations that folks have been asking for.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/09/30/customizing-the-reader-step-1-story-titles/"},"@context":"https://schema.org"}</script>
{"headline":"Customizing the reader, step 1: story titles","dateModified":"2011-09-30T09:40:30-04:00","datePublished":"2011-09-30T09:40:30-04:00","url":"https://blog.newsblur.com/2011/09/30/customizing-the-reader-step-1-story-titles/","@type":"BlogPosting","description":"The iPhone app is now only a few days away from launching. But it took 3 weeks of sitting around in the App Store approval queue before getting here. During that time, I started working on the new customizations that folks have been asking for.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/09/30/customizing-the-reader-step-1-story-titles/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="A Social Feed Reader" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2011/10/26/a-social-feed-reader/","@type":"BlogPosting","headline":"A Social Feed Reader","dateModified":"2011-10-26T11:41:23-04:00","datePublished":"2011-10-26T11:41:23-04:00","description":"NewsBlur was released exactly one year ago. You can read the initial reaction on Hacker News: http://news.ycombinator.com/item?id=1834305. Since then, so much has changed and all for the better. Usage is up—way, way up. Premium users are helping the site run. Load times are approaching the goal of less than 100 ms (0.10 sec) per page. In short, things couldnt be better.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/10/26/a-social-feed-reader/"},"@context":"https://schema.org"}</script>
{"headline":"A Social Feed Reader","dateModified":"2011-10-26T11:41:23-04:00","datePublished":"2011-10-26T11:41:23-04:00","url":"https://blog.newsblur.com/2011/10/26/a-social-feed-reader/","@type":"BlogPosting","description":"NewsBlur was released exactly one year ago. You can read the initial reaction on Hacker News: http://news.ycombinator.com/item?id=1834305. Since then, so much has changed and all for the better. Usage is up—way, way up. Premium users are helping the site run. Load times are approaching the goal of less than 100 ms (0.10 sec) per page. In short, things couldnt be better.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2011/10/26/a-social-feed-reader/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="2011: Year in Review" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/01/16/2011-year-in-review/","@type":"BlogPosting","headline":"2011: Year in Review","dateModified":"2012-01-16T20:47:00-05:00","datePublished":"2012-01-16T20:47:00-05:00","description":"Twelve months can be a quick flyby if you dont stop to write everything down. Luckily, a habit Ive kept since July 2009, when I started recording monthly goals for my project, is still going strong.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/01/16/2011-year-in-review/"},"@context":"https://schema.org"}</script>
{"headline":"2011: Year in Review","dateModified":"2012-01-16T20:47:00-05:00","datePublished":"2012-01-16T20:47:00-05:00","url":"https://blog.newsblur.com/2012/01/16/2011-year-in-review/","@type":"BlogPosting","description":"Twelve months can be a quick flyby if you dont stop to write everything down. Luckily, a habit Ive kept since July 2009, when I started recording monthly goals for my project, is still going strong.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/01/16/2011-year-in-review/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="SSL &amp; Stripe.js" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/02/29/ssl-stripejs/","@type":"BlogPosting","headline":"SSL &amp; Stripe.js","dateModified":"2012-02-29T15:45:00-05:00","datePublished":"2012-02-29T15:45:00-05:00","description":"Two big announcements today:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/02/29/ssl-stripejs/"},"@context":"https://schema.org"}</script>
{"headline":"SSL &amp; Stripe.js","dateModified":"2012-02-29T15:45:00-05:00","datePublished":"2012-02-29T15:45:00-05:00","url":"https://blog.newsblur.com/2012/02/29/ssl-stripejs/","@type":"BlogPosting","description":"Two big announcements today:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/02/29/ssl-stripejs/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="From project to profession: going indie on NewsBlur" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/03/01/going-full-time/","@type":"BlogPosting","headline":"From project to profession: going indie on NewsBlur","dateModified":"2012-03-01T11:48:00-05:00","datePublished":"2012-03-01T11:48:00-05:00","description":"Exactly four months ago, Jason Kottke found my project, NewsBlur, and tweeted:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/03/01/going-full-time/"},"@context":"https://schema.org"}</script>
{"headline":"From project to profession: going indie on NewsBlur","dateModified":"2012-03-01T11:48:00-05:00","datePublished":"2012-03-01T11:48:00-05:00","url":"https://blog.newsblur.com/2012/03/01/going-full-time/","@type":"BlogPosting","description":"Exactly four months ago, Jason Kottke found my project, NewsBlur, and tweeted:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/03/01/going-full-time/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="New mobile app for NewsBlur: Web Feeds for Nokia MeeGo" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/03/14/mobile-app-web-feeds-nokia-meego/","@type":"BlogPosting","headline":"New mobile app for NewsBlur: Web Feeds for Nokia MeeGo","dateModified":"2012-03-14T13:14:00-04:00","datePublished":"2012-03-14T13:14:00-04:00","description":"And what a gorgeous mobile app it is. App developer Róbert Márki just released Web Feeds, the first NewsBlur app for Nokia MeeGo. Take a look at these screenshots:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/03/14/mobile-app-web-feeds-nokia-meego/"},"@context":"https://schema.org"}</script>
{"headline":"New mobile app for NewsBlur: Web Feeds for Nokia MeeGo","dateModified":"2012-03-14T13:14:00-04:00","datePublished":"2012-03-14T13:14:00-04:00","url":"https://blog.newsblur.com/2012/03/14/mobile-app-web-feeds-nokia-meego/","@type":"BlogPosting","description":"And what a gorgeous mobile app it is. App developer Róbert Márki just released Web Feeds, the first NewsBlur app for Nokia MeeGo. Take a look at these screenshots:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/03/14/mobile-app-web-feeds-nokia-meego/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Knight News Challenge: NewsBlur" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/03/16/knight-news-challenge/","@type":"BlogPosting","headline":"Knight News Challenge: NewsBlur","dateModified":"2012-03-16T11:29:00-04:00","datePublished":"2012-03-16T11:29:00-04:00","description":"Knight News Challenge: NewsBlur","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/03/16/knight-news-challenge/"},"@context":"https://schema.org"}</script>
{"headline":"Knight News Challenge: NewsBlur","dateModified":"2012-03-16T11:29:00-04:00","datePublished":"2012-03-16T11:29:00-04:00","url":"https://blog.newsblur.com/2012/03/16/knight-news-challenge/","@type":"BlogPosting","description":"Knight News Challenge: NewsBlur","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/03/16/knight-news-challenge/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Building real-time feed updates for NewsBlur with Redis and WebSockets" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/04/02/building-real-time-feed-updates-for-newsblur/","@type":"BlogPosting","headline":"Building real-time feed updates for NewsBlur with Redis and WebSockets","dateModified":"2012-04-02T17:52:00-04:00","datePublished":"2012-04-02T17:52:00-04:00","description":"Today, NewsBlur is going real-time. Blogs using the PubSubHubbub protocol (PuSH), which includes all Blogger, Tumblr, and many Wordpress blogs, will instantaneously show new updates to subscribers on NewsBlur. Making this happen, while not for the faint of heart, was straight-forward enough that Im sharing the recipe I used to get everything hooked up and running smoothly.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/04/02/building-real-time-feed-updates-for-newsblur/"},"@context":"https://schema.org"}</script>
{"headline":"Building real-time feed updates for NewsBlur with Redis and WebSockets","dateModified":"2012-04-02T17:52:00-04:00","datePublished":"2012-04-02T17:52:00-04:00","url":"https://blog.newsblur.com/2012/04/02/building-real-time-feed-updates-for-newsblur/","@type":"BlogPosting","description":"Today, NewsBlur is going real-time. Blogs using the PubSubHubbub protocol (PuSH), which includes all Blogger, Tumblr, and many Wordpress blogs, will instantaneously show new updates to subscribers on NewsBlur. Making this happen, while not for the faint of heart, was straight-forward enough that Im sharing the recipe I used to get everything hooked up and running smoothly.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/04/02/building-real-time-feed-updates-for-newsblur/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Introducing Blurblogs, Roy, and Y Combinator" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/07/30/introducing-blurblogs-roy-and-y-combinator/","@type":"BlogPosting","headline":"Introducing Blurblogs, Roy, and Y Combinator","dateModified":"2012-07-30T11:22:00-04:00","datePublished":"2012-07-30T11:22:00-04:00","description":"What a difference a few months make. NewsBlur was a side-project of mine for two years. In March of this year, I committed myself full-time and went from developing NewsBlur almost entirely on the NYC subway to writing code every waking minute of the day. And now there are three big announcements to make. # 1. NewsBlur is now a *social* news reader The big news of the day is that you can now share stories on NewsBlur. When you share a story, your comments and the original story are posted to your blurblog. Your blurblog is a simple and customizable website. People can comment and reply directly on your blurblog, and you can follow your friends to read the news stories and blog posts that they care about.Since youre good at picking your friends, and your friends are good at picking their friends, you will see friends of friends show up, expanding your network with shared stories that you will enjoy. Its a new way of sharing the news. And because NewsBlur is already an easy to use news reader, its simple to find and share stories that your friends will care about. Every NewsBlur user has their own blurblog. All you have to do is signup for an account on www.newsblur.com and share interesting stories. # 2. Y Combinator For those of you who work with computer science, you may know that a Y-combinator generalizes recursion, abstracting its implementation, and thereby separating it from the actual work of the function in question.[^1] Im pleased as punch to announce an investment in NewsBlur by Y Combinator, the investment firm. Over the past two months, weve been humbled by the roster of experienced partners giving us candid advice. Its their tough love that is the catalyst for the next few months of transitioning NewsBlur from side project to world-class news reader. Expect NewsBlur to become simpler and more refined. # 3. Introducing Roy Yang When Y Combinator accepted me as a solo founder, their first piece of advice was to find a co-founder. Looking at every successful startup, a common pattern emerges. Every great startup has multiple people carrying the load when the company takes off. There is one person on this planet that I would trust as a co-founder. His name is Roy Yang and we have been friends since we met in New York four years ago. We worked together for nearly two years at Daylife, another news startup. I attended his wedding last year in Mexico, and he was the only person I called when I knew I needed somebody talented, focused, and able to complement me on a project that demands enormous time and effort.Roy is now responsible for both iOS apps and is instrumental in challenging me when I think Im right and am clearly not. Hes got the patience of a monk and the determination of a true New Yorker. Follow Roys blurblog to keep up with him. # A glimpse into the future of NewsBlur This summer marks the beginning of NewsBlur as a full-time startup. Look forward to new mobile apps, new designs, and new features. Heres a quick idea of what were working on for the next few weeks:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/07/30/introducing-blurblogs-roy-and-y-combinator/"},"@context":"https://schema.org"}</script>
{"headline":"Introducing Blurblogs, Roy, and Y Combinator","dateModified":"2012-07-30T11:22:00-04:00","datePublished":"2012-07-30T11:22:00-04:00","url":"https://blog.newsblur.com/2012/07/30/introducing-blurblogs-roy-and-y-combinator/","@type":"BlogPosting","description":"What a difference a few months make. NewsBlur was a side-project of mine for two years. In March of this year, I committed myself full-time and went from developing NewsBlur almost entirely on the NYC subway to writing code every waking minute of the day. And now there are three big announcements to make. # 1. NewsBlur is now a *social* news reader The big news of the day is that you can now share stories on NewsBlur. When you share a story, your comments and the original story are posted to your blurblog. Your blurblog is a simple and customizable website. People can comment and reply directly on your blurblog, and you can follow your friends to read the news stories and blog posts that they care about.Since youre good at picking your friends, and your friends are good at picking their friends, you will see friends of friends show up, expanding your network with shared stories that you will enjoy. Its a new way of sharing the news. And because NewsBlur is already an easy to use news reader, its simple to find and share stories that your friends will care about. Every NewsBlur user has their own blurblog. All you have to do is signup for an account on www.newsblur.com and share interesting stories. # 2. Y Combinator For those of you who work with computer science, you may know that a Y-combinator generalizes recursion, abstracting its implementation, and thereby separating it from the actual work of the function in question.[^1] Im pleased as punch to announce an investment in NewsBlur by Y Combinator, the investment firm. Over the past two months, weve been humbled by the roster of experienced partners giving us candid advice. Its their tough love that is the catalyst for the next few months of transitioning NewsBlur from side project to world-class news reader. Expect NewsBlur to become simpler and more refined. # 3. Introducing Roy Yang When Y Combinator accepted me as a solo founder, their first piece of advice was to find a co-founder. Looking at every successful startup, a common pattern emerges. Every great startup has multiple people carrying the load when the company takes off. There is one person on this planet that I would trust as a co-founder. His name is Roy Yang and we have been friends since we met in New York four years ago. We worked together for nearly two years at Daylife, another news startup. I attended his wedding last year in Mexico, and he was the only person I called when I knew I needed somebody talented, focused, and able to complement me on a project that demands enormous time and effort.Roy is now responsible for both iOS apps and is instrumental in challenging me when I think Im right and am clearly not. Hes got the patience of a monk and the determination of a true New Yorker. Follow Roys blurblog to keep up with him. # A glimpse into the future of NewsBlur This summer marks the beginning of NewsBlur as a full-time startup. Look forward to new mobile apps, new designs, and new features. Heres a quick idea of what were working on for the next few weeks:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/07/30/introducing-blurblogs-roy-and-y-combinator/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Take it to the couch with the NewsBlur iPad app" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/09/05/newsblur-ipad-app/","@type":"BlogPosting","headline":"Take it to the couch with the NewsBlur iPad app","dateModified":"2012-09-05T08:21:00-04:00","datePublished":"2012-09-05T08:21:00-04:00","description":"Theres no wrong way to hold an iPad loaded with the brand new NewsBlur iPad app, provided its facing you and turned on.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/09/05/newsblur-ipad-app/"},"@context":"https://schema.org"}</script>
{"headline":"Take it to the couch with the NewsBlur iPad app","dateModified":"2012-09-05T08:21:00-04:00","datePublished":"2012-09-05T08:21:00-04:00","url":"https://blog.newsblur.com/2012/09/05/newsblur-ipad-app/","@type":"BlogPosting","description":"Theres no wrong way to hold an iPad loaded with the brand new NewsBlur iPad app, provided its facing you and turned on.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/09/05/newsblur-ipad-app/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Giving Life to “The People Have Spoken”" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/10/01/giving-life-to-popular/","@type":"BlogPosting","headline":"Giving Life to “The People Have Spoken”","dateModified":"2012-10-01T17:52:00-04:00","datePublished":"2012-10-01T17:52:00-04:00","description":"For a long time, weve maintained The People Have Spoken, the blog of whats popular on NewsBlur, with a simple algorithm that measured how often something was shared. While thats a great way to see the stuff our users really like (Randall Munroe would probably win the NewsBlur equivalent of the Oscar), it makes it harder for everyone to find new stuff that they might not have seen or heard of before. So weve decided to throw a human into the mix. Ill let Allie introduce herself in her own words:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/10/01/giving-life-to-popular/"},"@context":"https://schema.org"}</script>
{"headline":"Giving Life to “The People Have Spoken”","dateModified":"2012-10-01T17:52:00-04:00","datePublished":"2012-10-01T17:52:00-04:00","url":"https://blog.newsblur.com/2012/10/01/giving-life-to-popular/","@type":"BlogPosting","description":"For a long time, weve maintained The People Have Spoken, the blog of whats popular on NewsBlur, with a simple algorithm that measured how often something was shared. While thats a great way to see the stuff our users really like (Randall Munroe would probably win the NewsBlur equivalent of the Oscar), it makes it harder for everyone to find new stuff that they might not have seen or heard of before. So weve decided to throw a human into the mix. Ill let Allie introduce herself in her own words:","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/10/01/giving-life-to-popular/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Do the robot: the official NewsBlur Android app is here" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/10/18/do-the-robot-the-official-newsblur-android-app-is/","@type":"BlogPosting","headline":"Do the robot: the official NewsBlur Android app is here","dateModified":"2012-10-18T11:47:00-04:00","datePublished":"2012-10-18T11:47:00-04:00","description":"Youve been bugging us for two years about it, and now its finally here: NewsBlurs expansion to mobile is complete, with our first-ever official Android app ready and waiting for your device. Thanks to the gifts of money and time from Y Combinator and the programming prowess of Papermill creator Ryan Bateman (otherwise known as @secretsquirrel), you can now join your iOS brethren on the couch with your daily dose of RSS goodness. Level of accompanying smugness is up to you.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/10/18/do-the-robot-the-official-newsblur-android-app-is/"},"@context":"https://schema.org"}</script>
{"headline":"Do the robot: the official NewsBlur Android app is here","dateModified":"2012-10-18T11:47:00-04:00","datePublished":"2012-10-18T11:47:00-04:00","url":"https://blog.newsblur.com/2012/10/18/do-the-robot-the-official-newsblur-android-app-is/","@type":"BlogPosting","description":"Youve been bugging us for two years about it, and now its finally here: NewsBlurs expansion to mobile is complete, with our first-ever official Android app ready and waiting for your device. Thanks to the gifts of money and time from Y Combinator and the programming prowess of Papermill creator Ryan Bateman (otherwise known as @secretsquirrel), you can now join your iOS brethren on the couch with your daily dose of RSS goodness. Level of accompanying smugness is up to you.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/10/18/do-the-robot-the-official-newsblur-android-app-is/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Extreme makeover: NewsBlur iOS app edition" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/10/26/ios-update-1-6/","@type":"BlogPosting","headline":"Extreme makeover: NewsBlur iOS app edition","dateModified":"2012-10-26T14:37:00-04:00","datePublished":"2012-10-26T14:37:00-04:00","description":"Now that NewsBlur has joined the wonderful world of Android, were turning our attention back to the iOS app, with a full-scale feature parity push for the new iPhone 5 and iOS 6. Its perfect for catching up on your reading when you realize that Apple Maps has sent you to the wrong address. Again.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/10/26/ios-update-1-6/"},"@context":"https://schema.org"}</script>
{"headline":"Extreme makeover: NewsBlur iOS app edition","dateModified":"2012-10-26T14:37:00-04:00","datePublished":"2012-10-26T14:37:00-04:00","url":"https://blog.newsblur.com/2012/10/26/ios-update-1-6/","@type":"BlogPosting","description":"Now that NewsBlur has joined the wonderful world of Android, were turning our attention back to the iOS app, with a full-scale feature parity push for the new iPhone 5 and iOS 6. Its perfect for catching up on your reading when you realize that Apple Maps has sent you to the wrong address. Again.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/10/26/ios-update-1-6/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Time for some free NewsBlur swag!" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/10/29/free-newsblur-swag-time/","@type":"BlogPosting","headline":"Time for some free NewsBlur swag!","dateModified":"2012-10-29T15:38:00-04:00","datePublished":"2012-10-29T15:38:00-04:00","description":"Thats right, t-shirts, stickers, buttons, and magnets. Ive got a whole lot of good stuff to send out, so give me some critical info and Ill get you hooked up with the latest in startup love.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/10/29/free-newsblur-swag-time/"},"@context":"https://schema.org"}</script>
{"headline":"Time for some free NewsBlur swag!","dateModified":"2012-10-29T15:38:00-04:00","datePublished":"2012-10-29T15:38:00-04:00","url":"https://blog.newsblur.com/2012/10/29/free-newsblur-swag-time/","@type":"BlogPosting","description":"Thats right, t-shirts, stickers, buttons, and magnets. Ive got a whole lot of good stuff to send out, so give me some critical info and Ill get you hooked up with the latest in startup love.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/10/29/free-newsblur-swag-time/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="The sharing bookmarklet: bringing your online explorations to NewsBlur" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2012/12/17/sharing-bookmarklet/","@type":"BlogPosting","headline":"The sharing bookmarklet: bringing your online explorations to NewsBlur","dateModified":"2012-12-17T12:04:00-05:00","datePublished":"2012-12-17T12:04:00-05:00","description":"There are lots of reasons not to post a cool article youve seen to your blurblog. Maybe you already follow too many blogs, and dont have room to add any more to your feed (in which case, may we humbly recommend a Premium account?) Maybe you dont want everyone to know just how crazy youve gotten about jai-alai or aerotrekking or My Little Pony: Friendship Is Magic. Or maybe you found a cool article on Facebook or Twitter or through an e-mail from a friend, and dont want to go through the hassle of adding the sites whole feed to your NewsBlur dashboard just to post one piece.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/12/17/sharing-bookmarklet/"},"@context":"https://schema.org"}</script>
{"headline":"The sharing bookmarklet: bringing your online explorations to NewsBlur","dateModified":"2012-12-17T12:04:00-05:00","datePublished":"2012-12-17T12:04:00-05:00","url":"https://blog.newsblur.com/2012/12/17/sharing-bookmarklet/","@type":"BlogPosting","description":"There are lots of reasons not to post a cool article youve seen to your blurblog. Maybe you already follow too many blogs, and dont have room to add any more to your feed (in which case, may we humbly recommend a Premium account?) Maybe you dont want everyone to know just how crazy youve gotten about jai-alai or aerotrekking or My Little Pony: Friendship Is Magic. Or maybe you found a cool article on Facebook or Twitter or through an e-mail from a friend, and dont want to go through the hassle of adding the sites whole feed to your NewsBlur dashboard just to post one piece.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2012/12/17/sharing-bookmarklet/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="A blurblog of ones own: new privacy controls" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/01/03/privacy-controls/","@type":"BlogPosting","headline":"A blurblog of ones own: new privacy controls","dateModified":"2013-01-03T00:00:00-05:00","datePublished":"2013-01-03T00:00:00-05:00","description":"Here at NewsBlur HQ, we love greeting each new day by seeing what everyone posts on their blurblogs, but we understand that not everyone might want to have their reading preferences broadcast to the public (or have the public broadcast its opinions on said preferences). So were introducing a special new service for premium account holders that allows you to protect your posts from prying eyes.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/01/03/privacy-controls/"},"@context":"https://schema.org"}</script>
{"headline":"A blurblog of ones own: new privacy controls","dateModified":"2013-01-03T00:00:00-05:00","datePublished":"2013-01-03T00:00:00-05:00","url":"https://blog.newsblur.com/2013/01/03/privacy-controls/","@type":"BlogPosting","description":"Here at NewsBlur HQ, we love greeting each new day by seeing what everyone posts on their blurblogs, but we understand that not everyone might want to have their reading preferences broadcast to the public (or have the public broadcast its opinions on said preferences). So were introducing a special new service for premium account holders that allows you to protect your posts from prying eyes.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/01/03/privacy-controls/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Three Months to Scale NewsBlur" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/03/17/three-months-to-scale-newsblur/","@type":"BlogPosting","headline":"Three Months to Scale NewsBlur","dateModified":"2013-03-17T17:24:00-04:00","datePublished":"2013-03-17T17:24:00-04:00","description":"At 4:16pm last Wednesday I got a short and to-the-point email from Nilay Patel at The Verge with only a link that started with the host “googlereader.blogspot.com”. The sudden spike in NewsBlurs visitors immediately confirmed — Google was shutting down Reader.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/03/17/three-months-to-scale-newsblur/"},"@context":"https://schema.org"}</script>
{"headline":"Three Months to Scale NewsBlur","dateModified":"2013-03-17T17:24:00-04:00","datePublished":"2013-03-17T17:24:00-04:00","url":"https://blog.newsblur.com/2013/03/17/three-months-to-scale-newsblur/","@type":"BlogPosting","description":"At 4:16pm last Wednesday I got a short and to-the-point email from Nilay Patel at The Verge with only a link that started with the host “googlereader.blogspot.com”. The sudden spike in NewsBlurs visitors immediately confirmed — Google was shutting down Reader.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/03/17/three-months-to-scale-newsblur/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="The NewsBlur Redesign" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/05/20/the-newsblur-redesign/","@type":"BlogPosting","headline":"The NewsBlur Redesign","dateModified":"2013-05-20T22:47:00-04:00","datePublished":"2013-05-20T22:47:00-04:00","description":"Not to say that NewsBlur was ugly before today, but it certainly didnt have the loving embrace of a talented designer. So without waiting another moment (or month) I proudly present the NewsBlur redesign.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/05/20/the-newsblur-redesign/"},"@context":"https://schema.org"}</script>
{"headline":"The NewsBlur Redesign","dateModified":"2013-05-20T22:47:00-04:00","datePublished":"2013-05-20T22:47:00-04:00","url":"https://blog.newsblur.com/2013/05/20/the-newsblur-redesign/","@type":"BlogPosting","description":"Not to say that NewsBlur was ugly before today, but it certainly didnt have the loving embrace of a talented designer. So without waiting another moment (or month) I proudly present the NewsBlur redesign.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/05/20/the-newsblur-redesign/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Keyboard Shortcuts Manager" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/05/23/keyboard-shortcuts-manager/","@type":"BlogPosting","headline":"Keyboard Shortcuts Manager","dateModified":"2013-05-23T09:01:00-04:00","datePublished":"2013-05-23T09:01:00-04:00","description":"Hot on the heels of the redesign launch, Im already putting out new features. There are a number of post-redesign priorities on my list, but one of the most requested features is to customize the keyboard shortcuts.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/05/23/keyboard-shortcuts-manager/"},"@context":"https://schema.org"}</script>
{"headline":"Keyboard Shortcuts Manager","dateModified":"2013-05-23T09:01:00-04:00","datePublished":"2013-05-23T09:01:00-04:00","url":"https://blog.newsblur.com/2013/05/23/keyboard-shortcuts-manager/","@type":"BlogPosting","description":"Hot on the heels of the redesign launch, Im already putting out new features. There are a number of post-redesign priorities on my list, but one of the most requested features is to customize the keyboard shortcuts.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/05/23/keyboard-shortcuts-manager/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Read NewsBlur on your Mac with the new ReadKit" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/05/30/read-newsblur-on-your-mac-with-the-new-readkit/","@type":"BlogPosting","headline":"Read NewsBlur on your Mac with the new ReadKit","dateModified":"2013-05-30T12:12:16-04:00","datePublished":"2013-05-30T12:12:16-04:00","description":"ReadKit, a native Mac app for reading Instapaper, Pocket, and NewsBlur on your desktop, completes the RSS reading trifecta. NewsBlur has a web app, native iOS app, and now a native Mac app.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/05/30/read-newsblur-on-your-mac-with-the-new-readkit/"},"@context":"https://schema.org"}</script>
{"headline":"Read NewsBlur on your Mac with the new ReadKit","dateModified":"2013-05-30T12:12:16-04:00","datePublished":"2013-05-30T12:12:16-04:00","url":"https://blog.newsblur.com/2013/05/30/read-newsblur-on-your-mac-with-the-new-readkit/","@type":"BlogPosting","description":"ReadKit, a native Mac app for reading Instapaper, Pocket, and NewsBlur on your desktop, completes the RSS reading trifecta. NewsBlur has a web app, native iOS app, and now a native Mac app.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/05/30/read-newsblur-on-your-mac-with-the-new-readkit/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Text view comes to the NewsBlur iOS app" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/06/04/text-view-comes-to-the-newsblur-ios-app/","@type":"BlogPosting","headline":"Text view comes to the NewsBlur iOS app","dateModified":"2013-06-04T08:31:18-04:00","datePublished":"2013-06-04T08:31:18-04:00","description":"The iOS apps are finding themselves host to a whole slew of additions and enhancements. Today I get to tell you about the iOS apps newest feature: the Text view.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/06/04/text-view-comes-to-the-newsblur-ios-app/"},"@context":"https://schema.org"}</script>
{"headline":"Text view comes to the NewsBlur iOS app","dateModified":"2013-06-04T08:31:18-04:00","datePublished":"2013-06-04T08:31:18-04:00","url":"https://blog.newsblur.com/2013/06/04/text-view-comes-to-the-newsblur-ios-app/","@type":"BlogPosting","description":"The iOS apps are finding themselves host to a whole slew of additions and enhancements. Today I get to tell you about the iOS apps newest feature: the Text view.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/06/04/text-view-comes-to-the-newsblur-ios-app/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="NewsBlur Puzzle T-shirt 2013" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/07/24/newsblur-puzzle-t-shirt-2013/","@type":"BlogPosting","headline":"NewsBlur Puzzle T-shirt 2013","dateModified":"2013-07-24T15:49:00-04:00","datePublished":"2013-07-24T15:49:00-04:00","description":"Last year I was proud to be able to send a free t-shirt and handwritten note to every single user who requested one. It took a few days of writing, stuffing, and mailing to send out a couple hundred t-shirts.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/07/24/newsblur-puzzle-t-shirt-2013/"},"@context":"https://schema.org"}</script>
{"headline":"NewsBlur Puzzle T-shirt 2013","dateModified":"2013-07-24T15:49:00-04:00","datePublished":"2013-07-24T15:49:00-04:00","url":"https://blog.newsblur.com/2013/07/24/newsblur-puzzle-t-shirt-2013/","@type":"BlogPosting","description":"Last year I was proud to be able to send a free t-shirt and handwritten note to every single user who requested one. It took a few days of writing, stuffing, and mailing to send out a couple hundred t-shirts.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/07/24/newsblur-puzzle-t-shirt-2013/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Simple Search for Feeds, Saved Stories, and Blurblogs" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/07/30/simple-search-for-feeds-saved-stories-and-blurblogs/","@type":"BlogPosting","headline":"Simple Search for Feeds, Saved Stories, and Blurblogs","dateModified":"2013-07-30T12:38:23-04:00","datePublished":"2013-07-30T12:38:23-04:00","description":"Search, which can easily be considered one of the most important features of a world-class news reader, is also one of the most difficult features to build.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/07/30/simple-search-for-feeds-saved-stories-and-blurblogs/"},"@context":"https://schema.org"}</script>
{"headline":"Simple Search for Feeds, Saved Stories, and Blurblogs","dateModified":"2013-07-30T12:38:23-04:00","datePublished":"2013-07-30T12:38:23-04:00","url":"https://blog.newsblur.com/2013/07/30/simple-search-for-feeds-saved-stories-and-blurblogs/","@type":"BlogPosting","description":"Search, which can easily be considered one of the most important features of a world-class news reader, is also one of the most difficult features to build.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/07/30/simple-search-for-feeds-saved-stories-and-blurblogs/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Mark as read by number of days and other improvements" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/09/13/mark-as-read-by-number-of-days-and-other/","@type":"BlogPosting","headline":"Mark as read by number of days and other improvements","dateModified":"2013-09-13T17:05:55-04:00","datePublished":"2013-09-13T17:05:55-04:00","description":"Heres a few big improvements for the NewsBlur website.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/09/13/mark-as-read-by-number-of-days-and-other/"},"@context":"https://schema.org"}</script>
{"headline":"Mark as read by number of days and other improvements","dateModified":"2013-09-13T17:05:55-04:00","datePublished":"2013-09-13T17:05:55-04:00","url":"https://blog.newsblur.com/2013/09/13/mark-as-read-by-number-of-days-and-other/","@type":"BlogPosting","description":"Heres a few big improvements for the NewsBlur website.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/09/13/mark-as-read-by-number-of-days-and-other/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Upping unread stories to 30 days for premium accounts" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/09/16/upping-unread-stories-to-30-days-for-premium/","@type":"BlogPosting","headline":"Upping unread stories to 30 days for premium accounts","dateModified":"2013-09-16T17:09:14-04:00","datePublished":"2013-09-16T17:09:14-04:00","description":"While I love shipping new features and fixing bugs, the single largest user request was neither a feature nor a bug. NewsBlur allows for two weeks of unread stories. Once a story is more than 14 days old, it would no longer show up as unread. The justification for this was simple: you have a week to read a story, and have a second week as a grace period.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/09/16/upping-unread-stories-to-30-days-for-premium/"},"@context":"https://schema.org"}</script>
{"headline":"Upping unread stories to 30 days for premium accounts","dateModified":"2013-09-16T17:09:14-04:00","datePublished":"2013-09-16T17:09:14-04:00","url":"https://blog.newsblur.com/2013/09/16/upping-unread-stories-to-30-days-for-premium/","@type":"BlogPosting","description":"While I love shipping new features and fixing bugs, the single largest user request was neither a feature nor a bug. NewsBlur allows for two weeks of unread stories. Once a story is more than 14 days old, it would no longer show up as unread. The justification for this was simple: you have a week to read a story, and have a second week as a grace period.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/09/16/upping-unread-stories-to-30-days-for-premium/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Offline reading with the NewsBlur iOS app" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/09/17/offline-reading-with-the-newsblur-ios-app/","@type":"BlogPosting","headline":"Offline reading with the NewsBlur iOS app","dateModified":"2013-09-17T12:46:00-04:00","datePublished":"2013-09-17T12:46:00-04:00","description":"Today Im launching version 3.0 of the iPhone and iPad app for NewsBlur. This major update brings loads of big features that combine to make the worlds best iOS news reader with the fastest sync in town.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/09/17/offline-reading-with-the-newsblur-ios-app/"},"@context":"https://schema.org"}</script>
{"headline":"Offline reading with the NewsBlur iOS app","dateModified":"2013-09-17T12:46:00-04:00","datePublished":"2013-09-17T12:46:00-04:00","url":"https://blog.newsblur.com/2013/09/17/offline-reading-with-the-newsblur-ios-app/","@type":"BlogPosting","description":"Today Im launching version 3.0 of the iPhone and iPad app for NewsBlur. This major update brings loads of big features that combine to make the worlds best iOS news reader with the fastest sync in town.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/09/17/offline-reading-with-the-newsblur-ios-app/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="The NewsBlur iPhone and iPad app meets iOS 7" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/10/28/the-newsblur-iphone-and-ipad-app-meets-ios-7/","@type":"BlogPosting","headline":"The NewsBlur iPhone and iPad app meets iOS 7","dateModified":"2013-10-28T09:53:00-04:00","datePublished":"2013-10-28T09:53:00-04:00","description":"Apples latest operating system for iOS is a departure from their old aesthetic. So Ive decided to give the NewsBlur iOS app a slightly new look. But even more than how the app looks is how the app works. Tons of new features made it into this mega-release.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/10/28/the-newsblur-iphone-and-ipad-app-meets-ios-7/"},"@context":"https://schema.org"}</script>
{"headline":"The NewsBlur iPhone and iPad app meets iOS 7","dateModified":"2013-10-28T09:53:00-04:00","datePublished":"2013-10-28T09:53:00-04:00","url":"https://blog.newsblur.com/2013/10/28/the-newsblur-iphone-and-ipad-app-meets-ios-7/","@type":"BlogPosting","description":"Apples latest operating system for iOS is a departure from their old aesthetic. So Ive decided to give the NewsBlur iOS app a slightly new look. But even more than how the app looks is how the app works. Tons of new features made it into this mega-release.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/10/28/the-newsblur-iphone-and-ipad-app-meets-ios-7/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Version 3.0 of the NewsBlur Android App" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/11/18/version-3-0-of-the-newsblur-android-app/","@type":"BlogPosting","headline":"Version 3.0 of the NewsBlur Android App","dateModified":"2013-11-18T16:51:00-05:00","datePublished":"2013-11-18T16:51:00-05:00","description":"Hot on the heels of version 3.0 of the NewsBlur iOS app comes the next version of the Android app. A bunch of new features have made it into this release, including the new story navigation pane and the text view.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/11/18/version-3-0-of-the-newsblur-android-app/"},"@context":"https://schema.org"}</script>
{"headline":"Version 3.0 of the NewsBlur Android App","dateModified":"2013-11-18T16:51:00-05:00","datePublished":"2013-11-18T16:51:00-05:00","url":"https://blog.newsblur.com/2013/11/18/version-3-0-of-the-newsblur-android-app/","@type":"BlogPosting","description":"Hot on the heels of version 3.0 of the NewsBlur iOS app comes the next version of the Android app. A bunch of new features have made it into this release, including the new story navigation pane and the text view.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/11/18/version-3-0-of-the-newsblur-android-app/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Background updates and dynamic font sizing on the NewsBlur iOS app" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2013/12/19/background-updates-and-dynamic-font-sizing-on-the/","@type":"BlogPosting","headline":"Background updates and dynamic font sizing on the NewsBlur iOS app","dateModified":"2013-12-19T10:38:00-05:00","datePublished":"2013-12-19T10:38:00-05:00","description":"This week brings us a minor, but major, update for the NewsBlur iOS app. Several new features, some due to new APIs in iOS 7, have made it into the app.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/12/19/background-updates-and-dynamic-font-sizing-on-the/"},"@context":"https://schema.org"}</script>
{"headline":"Background updates and dynamic font sizing on the NewsBlur iOS app","dateModified":"2013-12-19T10:38:00-05:00","datePublished":"2013-12-19T10:38:00-05:00","url":"https://blog.newsblur.com/2013/12/19/background-updates-and-dynamic-font-sizing-on-the/","@type":"BlogPosting","description":"This week brings us a minor, but major, update for the NewsBlur iOS app. Several new features, some due to new APIs in iOS 7, have made it into the app.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2013/12/19/background-updates-and-dynamic-font-sizing-on-the/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Faster parallel network requests for version 3.5 of the NewsBlur Android app" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/01/03/faster-parallel-network-requests-for-version-35/","@type":"BlogPosting","headline":"Faster parallel network requests for version 3.5 of the NewsBlur Android app","dateModified":"2014-01-03T11:35:00-05:00","datePublished":"2014-01-03T11:35:00-05:00","description":"The single biggest criticism Ive heard of the Android app is that it can be slow when loading feeds and then loading stories. That changes today with the release of version 3.5 of the NewsBlur Android app.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/01/03/faster-parallel-network-requests-for-version-35/"},"@context":"https://schema.org"}</script>
{"headline":"Faster parallel network requests for version 3.5 of the NewsBlur Android app","dateModified":"2014-01-03T11:35:00-05:00","datePublished":"2014-01-03T11:35:00-05:00","url":"https://blog.newsblur.com/2014/01/03/faster-parallel-network-requests-for-version-35/","@type":"BlogPosting","description":"The single biggest criticism Ive heard of the Android app is that it can be slow when loading feeds and then loading stories. That changes today with the release of version 3.5 of the NewsBlur Android app.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/01/03/faster-parallel-network-requests-for-version-35/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Three new features for the web: syntax highlighting for source code, adjustable video widths, and footnotes" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/01/06/three-new-features-for-the-web-syntax/","@type":"BlogPosting","headline":"Three new features for the web: syntax highlighting for source code, adjustable video widths, and footnotes","dateModified":"2014-01-06T12:46:00-05:00","datePublished":"2014-01-06T12:46:00-05:00","description":"A few small new features to get your first full week of the new year started off right.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/01/06/three-new-features-for-the-web-syntax/"},"@context":"https://schema.org"}</script>
{"headline":"Three new features for the web: syntax highlighting for source code, adjustable video widths, and footnotes","dateModified":"2014-01-06T12:46:00-05:00","datePublished":"2014-01-06T12:46:00-05:00","url":"https://blog.newsblur.com/2014/01/06/three-new-features-for-the-web-syntax/","@type":"BlogPosting","description":"A few small new features to get your first full week of the new year started off right.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/01/06/three-new-features-for-the-web-syntax/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Saved story tagging" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/01/14/saved-story-tagging/","@type":"BlogPosting","headline":"Saved story tagging","dateModified":"2014-01-14T10:39:00-05:00","datePublished":"2014-01-14T10:39:00-05:00","description":"Its one thing to follow a handful of sites and use NewsBlurs training to only read the stories you want to read. But sometimes you want to come back to stories long after youve read them. You could save the story, but then you would have to either scroll down your saved story list to find the story, or use the new search feature to find it by title or author.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/01/14/saved-story-tagging/"},"@context":"https://schema.org"}</script>
{"headline":"Saved story tagging","dateModified":"2014-01-14T10:39:00-05:00","datePublished":"2014-01-14T10:39:00-05:00","url":"https://blog.newsblur.com/2014/01/14/saved-story-tagging/","@type":"BlogPosting","description":"Its one thing to follow a handful of sites and use NewsBlurs training to only read the stories you want to read. But sometimes you want to come back to stories long after youve read them. You could save the story, but then you would have to either scroll down your saved story list to find the story, or use the new search feature to find it by title or author.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/01/14/saved-story-tagging/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Connect NewsBlur to dozens of web services with IFTTT" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/02/20/connect-newsblur-to-dozens-of-web-services-with/","@type":"BlogPosting","headline":"Connect NewsBlur to dozens of web services with IFTTT","dateModified":"2014-02-20T09:11:28-05:00","datePublished":"2014-02-20T09:11:28-05:00","description":"Wouldnt it be nice if there was a way to automatically copy your saved stories over to Evernote or Twitter or Pinboard? What about an automatic way to keep track of your unread focus (trained) stories in Buffer or Delicious or Dropbox?","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/02/20/connect-newsblur-to-dozens-of-web-services-with/"},"@context":"https://schema.org"}</script>
{"headline":"Connect NewsBlur to dozens of web services with IFTTT","dateModified":"2014-02-20T09:11:28-05:00","datePublished":"2014-02-20T09:11:28-05:00","url":"https://blog.newsblur.com/2014/02/20/connect-newsblur-to-dozens-of-web-services-with/","@type":"BlogPosting","description":"Wouldnt it be nice if there was a way to automatically copy your saved stories over to Evernote or Twitter or Pinboard? What about an automatic way to keep track of your unread focus (trained) stories in Buffer or Delicious or Dropbox?","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/02/20/connect-newsblur-to-dozens-of-web-services-with/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="NewsBlur iOS 4.0 features a new dashboard, gestures and sharing controls" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/03/10/newsblur-ios-40-features-a-new-dashboard/","@type":"BlogPosting","headline":"NewsBlur iOS 4.0 features a new dashboard, gestures and sharing controls","dateModified":"2014-03-10T10:48:08-04:00","datePublished":"2014-03-10T10:48:08-04:00","description":"Today marks the release of version 4.0 of the NewsBlur iOS app. To illustrate the significance of this release Id like to talk about where the app has been.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/03/10/newsblur-ios-40-features-a-new-dashboard/"},"@context":"https://schema.org"}</script>
{"headline":"NewsBlur iOS 4.0 features a new dashboard, gestures and sharing controls","dateModified":"2014-03-10T10:48:08-04:00","datePublished":"2014-03-10T10:48:08-04:00","url":"https://blog.newsblur.com/2014/03/10/newsblur-ios-40-features-a-new-dashboard/","@type":"BlogPosting","description":"Today marks the release of version 4.0 of the NewsBlur iOS app. To illustrate the significance of this release Id like to talk about where the app has been.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/03/10/newsblur-ios-40-features-a-new-dashboard/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Google Reader announced its shutdown exactly a year ago" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/03/13/google-reader-announced-its-shutdown-exactly-a-year-ago/","@type":"BlogPosting","headline":"Google Reader announced its shutdown exactly a year ago","dateModified":"2014-03-13T14:27:00-04:00","datePublished":"2014-03-13T14:27:00-04:00","description":"In this industry, you gotta be tough.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/03/13/google-reader-announced-its-shutdown-exactly-a-year-ago/"},"@context":"https://schema.org"}</script>
{"headline":"Google Reader announced its shutdown exactly a year ago","dateModified":"2014-03-13T14:27:00-04:00","datePublished":"2014-03-13T14:27:00-04:00","url":"https://blog.newsblur.com/2014/03/13/google-reader-announced-its-shutdown-exactly-a-year-ago/","@type":"BlogPosting","description":"In this industry, you gotta be tough.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/03/13/google-reader-announced-its-shutdown-exactly-a-year-ago/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="The new font and style manager" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/04/10/the-new-font-and-style-manager/","@type":"BlogPosting","headline":"The new font and style manager","dateModified":"2014-04-10T11:53:11-04:00","datePublished":"2014-04-10T11:53:11-04:00","description":"This is not just any font and style manager. You now have control over font size, line spacing, story layout, and type faces directly in the reading view. This nifty popover gives you quick access to all of these customizable features.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/04/10/the-new-font-and-style-manager/"},"@context":"https://schema.org"}</script>
{"headline":"The new font and style manager","dateModified":"2014-04-10T11:53:11-04:00","datePublished":"2014-04-10T11:53:11-04:00","url":"https://blog.newsblur.com/2014/04/10/the-new-font-and-style-manager/","@type":"BlogPosting","description":"This is not just any font and style manager. You now have control over font size, line spacing, story layout, and type faces directly in the reading view. This nifty popover gives you quick access to all of these customizable features.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/04/10/the-new-font-and-style-manager/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Unread is a new iOS app with NewsBlur support" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/04/24/unread-is-a-new-ios-app-with-newsblur-support/","@type":"BlogPosting","headline":"Unread is a new iOS app with NewsBlur support","dateModified":"2014-04-24T11:56:00-04:00","datePublished":"2014-04-24T11:56:00-04:00","description":"NewsBlur has a free and open API that all of the native mobile apps and website are built on. But the API is not just for official apps. Numerous third-party developers have built apps on the API and today Im proud to announce a new native iOS app has launched with NewsBlur support.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/04/24/unread-is-a-new-ios-app-with-newsblur-support/"},"@context":"https://schema.org"}</script>
{"headline":"Unread is a new iOS app with NewsBlur support","dateModified":"2014-04-24T11:56:00-04:00","datePublished":"2014-04-24T11:56:00-04:00","url":"https://blog.newsblur.com/2014/04/24/unread-is-a-new-ios-app-with-newsblur-support/","@type":"BlogPosting","description":"NewsBlur has a free and open API that all of the native mobile apps and website are built on. But the API is not just for official apps. Numerous third-party developers have built apps on the API and today Im proud to announce a new native iOS app has launched with NewsBlur support.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/04/24/unread-is-a-new-ios-app-with-newsblur-support/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

View file

@ -23,7 +23,7 @@
<meta name="twitter:card" content="summary" />
<meta property="twitter:title" content="Full text search across all of your subscriptions and folders" />
<script type="application/ld+json">
{"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"url":"https://blog.newsblur.com/2014/04/29/full-text-search-across-all-of-your-subscriptions-and/","@type":"BlogPosting","headline":"Full text search across all of your subscriptions and folders","dateModified":"2014-04-29T04:45:28-04:00","datePublished":"2014-04-29T04:45:28-04:00","description":"Last Summer Simple Search was launched, giving you the ability to search a single feed at a time. Obviously not ideal, but it was far less effort than the big enchilada: full text search across every site you subscribe to. It took a few months to come back to attack the full problem, but that means I could also take the time to do it right.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/04/29/full-text-search-across-all-of-your-subscriptions-and/"},"@context":"https://schema.org"}</script>
{"headline":"Full text search across all of your subscriptions and folders","dateModified":"2014-04-29T04:45:28-04:00","datePublished":"2014-04-29T04:45:28-04:00","url":"https://blog.newsblur.com/2014/04/29/full-text-search-across-all-of-your-subscriptions-and/","@type":"BlogPosting","description":"Last Summer Simple Search was launched, giving you the ability to search a single feed at a time. Obviously not ideal, but it was far less effort than the big enchilada: full text search across every site you subscribe to. It took a few months to come back to attack the full problem, but that means I could also take the time to do it right.","mainEntityOfPage":{"@type":"WebPage","@id":"https://blog.newsblur.com/2014/04/29/full-text-search-across-all-of-your-subscriptions-and/"},"publisher":{"@type":"Organization","logo":{"@type":"ImageObject","url":"https://blog.newsblur.com/assets/newsblur_logo_512.png"}},"@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/assets/main.css">
<link rel="stylesheet" type="text/css" href="https://cloud.typography.com/6565292/711824/css/fonts.css" />

Some files were not shown because too many files have changed in this diff Show more