From e93f434b7c63a87d819171fe80c50983ac573841 Mon Sep 17 00:00:00 2001 From: Steven Eardley Date: Fri, 6 Sep 2024 14:38:03 +0100 Subject: [PATCH] remove old knowledge and commence refactor --- .gitignore | 48 --- LICENSE | 2 +- README.md | 2 +- {doaj => ansible}/README.md | 0 {doaj => ansible}/doaj-hosts.ini | 0 .../production_finalise_python3.yml | 0 .../production_python2_downgrade.yml | 0 .../production_python3_upgrade.yml | 0 .../test_finalise_python3.yml | 0 .../test_python2_downgrade.yml | 0 .../test_python3_upgrade.yml | 0 .../ott/reinstall-monitoring.yml | 0 {doaj => ansible}/ott/upload_ssh_key.yml | 0 {doaj => ansible}/reboot-metrics.yml | 0 {doaj => ansible}/restart.yml | 0 {doaj => ansible}/software-versions.yml | 0 {doaj => ansible}/update-config.yml | 0 {doaj => ansible}/update-site.yml | 0 {doaj => ansible}/update-test-site.yml | 0 backup/backup2s3.sh | 49 --- backup/elasticsearch/article_bulk.py | 57 ---- backup/elasticsearch/backup_es.py | 163 ---------- backup/elasticsearch/bulk_restore.py | 196 ------------ .../sync_doaj_newest_articles.py | 67 ---- backup/restore_from_s3.sh | 27 -- config/anacrontab-doaj-staging | 18 -- config/anacrontab_doaj | 19 -- config/anacrontab_doaj-test | 20 -- config/anacrontab_oag | 18 -- config/cloo_crontab_doaj-staging | 31 -- config/cloo_crontab_ooz | 28 -- config/cloo_doaj-test_crontab | 31 -- config/cloo_doaj_crontab | 32 -- config/cloo_oag_crontab | 29 -- config/nginx/nginx.conf | 99 ------ config/nginx/sites-available/celery-flower | 50 --- .../celery-flower-forward-to-yonce | 50 --- config/nginx/sites-available/contentmine | 25 -- .../cottagelabs.com-forward-to-cl | 37 --- config/nginx/sites-available/default | 135 -------- config/nginx/sites-available/doaj | 41 --- .../doaj-forward-to-DOAJ-with-local-static | 55 ---- .../doaj-forward-to-YONCE-with-local-static | 55 ---- .../doaj-staging-to-DOAJ-STAGING | 39 --- config/nginx/sites-available/doaj-test | 41 --- .../fundfind-forum-forward-to-PINKY | 16 - .../fundfind-forward-to-FUNDFIND | 16 - config/nginx/sites-available/g4he | 17 -- config/nginx/sites-available/harvest | 32 -- .../sites-available/harvester-for-doaj-es | 15 - config/nginx/sites-available/idfind | 12 - .../sites-available/lantern-forward-to-OOZ | 33 -- config/nginx/sites-available/lantern-test | 13 - config/nginx/sites-available/leaps | 81 ----- .../sites-available/leaps-forward-to-EDACCESS | 44 --- config/nginx/sites-available/leviathan | 19 -- .../sites-available/oacwellcome-production | 13 - config/nginx/sites-available/oacwellcome-test | 13 - config/nginx/sites-available/oag | 25 -- .../sites-available/oag-forward-to-yonce | 16 - .../oaspectrum-forward-to-OASPECTRUM | 16 - .../sites-available/oaspectrum-production | 14 - config/nginx/sites-available/ooz-es | 15 - .../sites-available/oozes-forward-to-OOZ | 33 -- .../openaccessbuton.org-forward-to-oabutton | 40 --- config/nginx/sites-available/openrefine | 15 - config/nginx/sites-available/phd | 19 -- .../nginx/sites-available/rc-doaj-to-DOAJTMP | 36 --- config/nginx/sites-available/rc-doaj-to-YONCE | 40 --- config/nginx/sites-available/swap | 77 ----- .../sites-available/swap-forward-to-EDACCESS | 44 --- .../sites-available/test-doaj-to-OOZ-server | 41 --- .../sites-available/test-oag-forward-to-PINKY | 32 -- config/nginx/sites-available/uniboard-test | 15 - config/supervisor/conf.d/README | 3 - config/supervisor/conf.d/bibsoup.net.conf | 7 - config/supervisor/conf.d/doaj-production.conf | 10 - config/supervisor/conf.d/doaj-staging.conf | 10 - config/supervisor/conf.d/doaj-test.conf | 10 - config/supervisor/conf.d/g4he.conf | 9 - .../conf.d/lantern-test-daemon.conf | 9 - config/supervisor/conf.d/lantern-test.conf | 10 - config/supervisor/conf.d/leaps.conf | 7 - config/supervisor/conf.d/metatool.conf | 9 - .../conf.d/oacwellcome-production-daemon.conf | 9 - .../conf.d/oacwellcome-production.conf | 10 - .../conf.d/oacwellcome-test-daemon.conf | 9 - .../supervisor/conf.d/oacwellcome-test.conf | 10 - .../supervisor/conf.d/oag-celery-flower.conf | 6 - config/supervisor/conf.d/oag-celery.conf | 26 -- config/supervisor/conf.d/oag-celery.conf.old | 63 ---- config/supervisor/conf.d/oag.conf | 7 - .../conf.d/oagr-oacwellcome-test-daemon.conf | 9 - .../conf.d/oagr-production-daemon.conf | 9 - .../supervisor/conf.d/oagr-test-daemon.conf | 9 - .../conf.d/oaspectrum-production.conf | 10 - config/supervisor/conf.d/swap.conf | 7 - config/supervisor/conf.d/uniboard-test.conf | 9 - .../conf.d/wellcome_ncbi_objects.conf | 8 - config/supervisor/supervisord.conf | 28 -- deploy_scripts/lantern_develop_deploy.sh | 45 --- deploy_scripts/oacwellcome_develop_deploy.sh | 45 --- .../oaspectrum_master_deploy_to_production.sh | 22 -- .../oaspectrum_master_deploy_to_test.sh | 22 -- disable_network_except_ssh.sh | 34 --- doaj | 1 + doaj/counts.sh | 16 - fabric/doaj/fabfile.py | 252 --------------- fabric/oacwellcome/fabfile.py | 118 ------- fabric/oaspectrum/fabfile.py | 132 -------- fabric/sysadmin/fabfile.py | 191 ------------ fabric/uniboard/fabfile.py | 96 ------ instructions/:opt:README.md | 16 - instructions/README.md | 4 - make_swap.sh | 29 -- new-droplet-digital-ocean.md | 20 -- newrelic/README.md | 44 --- newrelic/newrelic_plugin_agent.cfg | 180 ----------- oag_deploy.sh | 130 -------- pinger.py | 136 --------- setup.sh | 289 ------------------ sorted_swap.sh | 1 - swap.sh | 22 -- swap2ram.sh | 12 - useful_info/elasticsearch-exporter.md | 7 - useful_info/gunicorn-memory-creep.txt | 26 -- useful_info/log-files.txt | 33 -- ...restore-elasticsearch-index-from-backup.md | 41 --- useful_info/server-setup.txt | 7 - 129 files changed, 3 insertions(+), 4547 deletions(-) delete mode 100644 .gitignore rename {doaj => ansible}/README.md (100%) rename {doaj => ansible}/doaj-hosts.ini (100%) rename {doaj => ansible}/ott/2019_python3_upgrade/production_finalise_python3.yml (100%) rename {doaj => ansible}/ott/2019_python3_upgrade/production_python2_downgrade.yml (100%) rename {doaj => ansible}/ott/2019_python3_upgrade/production_python3_upgrade.yml (100%) rename {doaj => ansible}/ott/2019_python3_upgrade/test_finalise_python3.yml (100%) rename {doaj => ansible}/ott/2019_python3_upgrade/test_python2_downgrade.yml (100%) rename {doaj => ansible}/ott/2019_python3_upgrade/test_python3_upgrade.yml (100%) rename {doaj => ansible}/ott/reinstall-monitoring.yml (100%) rename {doaj => ansible}/ott/upload_ssh_key.yml (100%) rename {doaj => ansible}/reboot-metrics.yml (100%) rename {doaj => ansible}/restart.yml (100%) rename {doaj => ansible}/software-versions.yml (100%) rename {doaj => ansible}/update-config.yml (100%) rename {doaj => ansible}/update-site.yml (100%) rename {doaj => ansible}/update-test-site.yml (100%) delete mode 100755 backup/backup2s3.sh delete mode 100644 backup/elasticsearch/article_bulk.py delete mode 100755 backup/elasticsearch/backup_es.py delete mode 100755 backup/elasticsearch/bulk_restore.py delete mode 100755 backup/elasticsearch/sync_doaj_newest_articles.py delete mode 100755 backup/restore_from_s3.sh delete mode 100644 config/anacrontab-doaj-staging delete mode 100644 config/anacrontab_doaj delete mode 100644 config/anacrontab_doaj-test delete mode 100644 config/anacrontab_oag delete mode 100644 config/cloo_crontab_doaj-staging delete mode 100644 config/cloo_crontab_ooz delete mode 100644 config/cloo_doaj-test_crontab delete mode 100644 config/cloo_doaj_crontab delete mode 100644 config/cloo_oag_crontab delete mode 100644 config/nginx/nginx.conf delete mode 100644 config/nginx/sites-available/celery-flower delete mode 100644 config/nginx/sites-available/celery-flower-forward-to-yonce delete mode 100644 config/nginx/sites-available/contentmine delete mode 100644 config/nginx/sites-available/cottagelabs.com-forward-to-cl delete mode 100644 config/nginx/sites-available/default delete mode 100644 config/nginx/sites-available/doaj delete mode 100644 config/nginx/sites-available/doaj-forward-to-DOAJ-with-local-static delete mode 100644 config/nginx/sites-available/doaj-forward-to-YONCE-with-local-static delete mode 100644 config/nginx/sites-available/doaj-staging-to-DOAJ-STAGING delete mode 100644 config/nginx/sites-available/doaj-test delete mode 100644 config/nginx/sites-available/fundfind-forum-forward-to-PINKY delete mode 100644 config/nginx/sites-available/fundfind-forward-to-FUNDFIND delete mode 100644 config/nginx/sites-available/g4he delete mode 100644 config/nginx/sites-available/harvest delete mode 100644 config/nginx/sites-available/harvester-for-doaj-es delete mode 100644 config/nginx/sites-available/idfind delete mode 100644 config/nginx/sites-available/lantern-forward-to-OOZ delete mode 100644 config/nginx/sites-available/lantern-test delete mode 100644 config/nginx/sites-available/leaps delete mode 100644 config/nginx/sites-available/leaps-forward-to-EDACCESS delete mode 100644 config/nginx/sites-available/leviathan delete mode 100644 config/nginx/sites-available/oacwellcome-production delete mode 100644 config/nginx/sites-available/oacwellcome-test delete mode 100644 config/nginx/sites-available/oag delete mode 100644 config/nginx/sites-available/oag-forward-to-yonce delete mode 100644 config/nginx/sites-available/oaspectrum-forward-to-OASPECTRUM delete mode 100644 config/nginx/sites-available/oaspectrum-production delete mode 100644 config/nginx/sites-available/ooz-es delete mode 100644 config/nginx/sites-available/oozes-forward-to-OOZ delete mode 100644 config/nginx/sites-available/openaccessbuton.org-forward-to-oabutton delete mode 100644 config/nginx/sites-available/openrefine delete mode 100644 config/nginx/sites-available/phd delete mode 100644 config/nginx/sites-available/rc-doaj-to-DOAJTMP delete mode 100644 config/nginx/sites-available/rc-doaj-to-YONCE delete mode 100644 config/nginx/sites-available/swap delete mode 100644 config/nginx/sites-available/swap-forward-to-EDACCESS delete mode 100644 config/nginx/sites-available/test-doaj-to-OOZ-server delete mode 100644 config/nginx/sites-available/test-oag-forward-to-PINKY delete mode 100644 config/nginx/sites-available/uniboard-test delete mode 100644 config/supervisor/conf.d/README delete mode 100644 config/supervisor/conf.d/bibsoup.net.conf delete mode 100644 config/supervisor/conf.d/doaj-production.conf delete mode 100644 config/supervisor/conf.d/doaj-staging.conf delete mode 100644 config/supervisor/conf.d/doaj-test.conf delete mode 100644 config/supervisor/conf.d/g4he.conf delete mode 100644 config/supervisor/conf.d/lantern-test-daemon.conf delete mode 100644 config/supervisor/conf.d/lantern-test.conf delete mode 100644 config/supervisor/conf.d/leaps.conf delete mode 100644 config/supervisor/conf.d/metatool.conf delete mode 100644 config/supervisor/conf.d/oacwellcome-production-daemon.conf delete mode 100644 config/supervisor/conf.d/oacwellcome-production.conf delete mode 100644 config/supervisor/conf.d/oacwellcome-test-daemon.conf delete mode 100644 config/supervisor/conf.d/oacwellcome-test.conf delete mode 100644 config/supervisor/conf.d/oag-celery-flower.conf delete mode 100644 config/supervisor/conf.d/oag-celery.conf delete mode 100644 config/supervisor/conf.d/oag-celery.conf.old delete mode 100644 config/supervisor/conf.d/oag.conf delete mode 100644 config/supervisor/conf.d/oagr-oacwellcome-test-daemon.conf delete mode 100644 config/supervisor/conf.d/oagr-production-daemon.conf delete mode 100644 config/supervisor/conf.d/oagr-test-daemon.conf delete mode 100644 config/supervisor/conf.d/oaspectrum-production.conf delete mode 100644 config/supervisor/conf.d/swap.conf delete mode 100644 config/supervisor/conf.d/uniboard-test.conf delete mode 100644 config/supervisor/conf.d/wellcome_ncbi_objects.conf delete mode 100644 config/supervisor/supervisord.conf delete mode 100755 deploy_scripts/lantern_develop_deploy.sh delete mode 100755 deploy_scripts/oacwellcome_develop_deploy.sh delete mode 100755 deploy_scripts/oaspectrum_master_deploy_to_production.sh delete mode 100755 deploy_scripts/oaspectrum_master_deploy_to_test.sh delete mode 100755 disable_network_except_ssh.sh create mode 120000 doaj delete mode 100755 doaj/counts.sh delete mode 100644 fabric/doaj/fabfile.py delete mode 100644 fabric/oacwellcome/fabfile.py delete mode 100644 fabric/oaspectrum/fabfile.py delete mode 100644 fabric/sysadmin/fabfile.py delete mode 100644 fabric/uniboard/fabfile.py delete mode 100644 instructions/:opt:README.md delete mode 100644 instructions/README.md delete mode 100755 make_swap.sh delete mode 100644 new-droplet-digital-ocean.md delete mode 100644 newrelic/README.md delete mode 100644 newrelic/newrelic_plugin_agent.cfg delete mode 100755 oag_deploy.sh delete mode 100755 pinger.py delete mode 100644 setup.sh delete mode 100755 sorted_swap.sh delete mode 100755 swap.sh delete mode 100755 swap2ram.sh delete mode 100644 useful_info/elasticsearch-exporter.md delete mode 100644 useful_info/gunicorn-memory-creep.txt delete mode 100644 useful_info/log-files.txt delete mode 100644 useful_info/restore-elasticsearch-index-from-backup.md delete mode 100644 useful_info/server-setup.txt diff --git a/.gitignore b/.gitignore deleted file mode 100644 index f8b06af..0000000 --- a/.gitignore +++ /dev/null @@ -1,48 +0,0 @@ -# Ansible stuff -*.retry - -this_machine - -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox -nosetests.xml - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Vim swap files -*.swp - -# PyCharm -.idea - -.dotoken.sh diff --git a/LICENSE b/LICENSE index df25a2c..38acbd4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013 Cottage Labs LLP +Copyright (c) 2013-2024 Cottage Labs LLP Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/README.md b/README.md index 42119cc..5cf8344 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ A versioned mix of utility scripts, config files and utilities used at Cottage L Copyright and License ==================== -Copyright 2013 Cottage Labs LLP +Copyright 2013-2024 Cottage Labs LLP Licensed under the `MIT License`_ diff --git a/doaj/README.md b/ansible/README.md similarity index 100% rename from doaj/README.md rename to ansible/README.md diff --git a/doaj/doaj-hosts.ini b/ansible/doaj-hosts.ini similarity index 100% rename from doaj/doaj-hosts.ini rename to ansible/doaj-hosts.ini diff --git a/doaj/ott/2019_python3_upgrade/production_finalise_python3.yml b/ansible/ott/2019_python3_upgrade/production_finalise_python3.yml similarity index 100% rename from doaj/ott/2019_python3_upgrade/production_finalise_python3.yml rename to ansible/ott/2019_python3_upgrade/production_finalise_python3.yml diff --git a/doaj/ott/2019_python3_upgrade/production_python2_downgrade.yml b/ansible/ott/2019_python3_upgrade/production_python2_downgrade.yml similarity index 100% rename from doaj/ott/2019_python3_upgrade/production_python2_downgrade.yml rename to ansible/ott/2019_python3_upgrade/production_python2_downgrade.yml diff --git a/doaj/ott/2019_python3_upgrade/production_python3_upgrade.yml b/ansible/ott/2019_python3_upgrade/production_python3_upgrade.yml similarity index 100% rename from doaj/ott/2019_python3_upgrade/production_python3_upgrade.yml rename to ansible/ott/2019_python3_upgrade/production_python3_upgrade.yml diff --git a/doaj/ott/2019_python3_upgrade/test_finalise_python3.yml b/ansible/ott/2019_python3_upgrade/test_finalise_python3.yml similarity index 100% rename from doaj/ott/2019_python3_upgrade/test_finalise_python3.yml rename to ansible/ott/2019_python3_upgrade/test_finalise_python3.yml diff --git a/doaj/ott/2019_python3_upgrade/test_python2_downgrade.yml b/ansible/ott/2019_python3_upgrade/test_python2_downgrade.yml similarity index 100% rename from doaj/ott/2019_python3_upgrade/test_python2_downgrade.yml rename to ansible/ott/2019_python3_upgrade/test_python2_downgrade.yml diff --git a/doaj/ott/2019_python3_upgrade/test_python3_upgrade.yml b/ansible/ott/2019_python3_upgrade/test_python3_upgrade.yml similarity index 100% rename from doaj/ott/2019_python3_upgrade/test_python3_upgrade.yml rename to ansible/ott/2019_python3_upgrade/test_python3_upgrade.yml diff --git a/doaj/ott/reinstall-monitoring.yml b/ansible/ott/reinstall-monitoring.yml similarity index 100% rename from doaj/ott/reinstall-monitoring.yml rename to ansible/ott/reinstall-monitoring.yml diff --git a/doaj/ott/upload_ssh_key.yml b/ansible/ott/upload_ssh_key.yml similarity index 100% rename from doaj/ott/upload_ssh_key.yml rename to ansible/ott/upload_ssh_key.yml diff --git a/doaj/reboot-metrics.yml b/ansible/reboot-metrics.yml similarity index 100% rename from doaj/reboot-metrics.yml rename to ansible/reboot-metrics.yml diff --git a/doaj/restart.yml b/ansible/restart.yml similarity index 100% rename from doaj/restart.yml rename to ansible/restart.yml diff --git a/doaj/software-versions.yml b/ansible/software-versions.yml similarity index 100% rename from doaj/software-versions.yml rename to ansible/software-versions.yml diff --git a/doaj/update-config.yml b/ansible/update-config.yml similarity index 100% rename from doaj/update-config.yml rename to ansible/update-config.yml diff --git a/doaj/update-site.yml b/ansible/update-site.yml similarity index 100% rename from doaj/update-site.yml rename to ansible/update-site.yml diff --git a/doaj/update-test-site.yml b/ansible/update-test-site.yml similarity index 100% rename from doaj/update-test-site.yml rename to ansible/update-test-site.yml diff --git a/backup/backup2s3.sh b/backup/backup2s3.sh deleted file mode 100755 index f4e4c9a..0000000 --- a/backup/backup2s3.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -now=`date +%Y-%m-%d_%H%M` -backup_file_suffix=".tar.gz" - -if [ $# -ne 4 ] -then - echo "4 arguments needed" - echo "Usage: $0 s3://" - echo - echo "The local backup file will then be called:" - echo " _$backup_file_suffix" - echo - echo "If you are backing up a directory, will be ignored and all files and folders in your directory will be:" - echo " 1. copied to the ', '', '' - sys.exit(1) - -def main(argv=None): - if not argv: - argv = sys.argv - - if len(argv) < 4: - usage() - - try: - batch_size = int(argv[3]) - except ValueError: - print 'ERROR: Batch size must be an integer' - usage() - - no_of_lines = batch_size * 2 # 2 actual lines get read in for every item in the batch - - raw_input('Going to delete DOAJ index NOW. to continue, Ctrl+C to stop now and preserve the index.') - requests.delete('http://localhost:9200/doaj') - - with open(argv[1], 'rb') as i: - data = json.dumps(json.loads(i.read())) # make it obvious when the mapping is not valid JSON, this would throw an exception - ri = requests.post('http://localhost:9200/doaj/article') - r = requests.put('http://localhost:9200/doaj/article/_mapping', data=data) - print 'data mapping', r.status_code - - with open(argv[2], 'rb') as i: - counter = 0 - while True: - counter += 1 - print 'On batch', counter - batch = [] - cur_no_of_lines = 0 - for line in i: - cur_no_of_lines += 1 - if cur_no_of_lines < no_of_lines: - batch.append(line) - else: - break - - if cur_no_of_lines == 0: - break - - data = "".join(batch) - r = requests.post('http://localhost:9200/_bulk', data=data) - if r.status_code != 200: - print ' Batch', counter, 'error. HTTP response:', r.status_code - sys.exit(1) - -if __name__ == '__main__': - main() diff --git a/backup/elasticsearch/backup_es.py b/backup/elasticsearch/backup_es.py deleted file mode 100755 index fbcb26d..0000000 --- a/backup/elasticsearch/backup_es.py +++ /dev/null @@ -1,163 +0,0 @@ -#!/usr/bin/python - -from datetime import datetime -import argparse -import os -import sys -import logging -import subprocess -import shlex -import time - -LOG_FORMAT = '%(asctime)-15s :: %(levelname)-6s: %(message)s' -logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) -logger = logging.getLogger(__name__) - -DEFAULT_HOST = 'localhost' -DEFAULT_PORT = 9200 -DEFAULT_QUERY_SIZE = 10 -DEFAULT_EXEC = ['node', '--nouse-idle-notification', '--expose-gc', '/home/cloo/elasticsearch-exporter', '-m 0.3'] - -# The following options should be (kept) the same as the nodejs app -# "elasticsearch-exporter" options related to backing up stuff. -# Easier to stay consistent - even if this script eventually has its own -# support for exporting data into bulk ES format, or starts using ES 1.x -# snapshots etc. - -# all of these entries must have the "short" and "long" keys defined -# "long" is used for fetching the value passed to this script using argparse -# "short" is used when passing the argument on to the exporter - -EXPORTER_PARAMS = [ - {'short': "-a", 'long': "--host", 'default': DEFAULT_HOST, 'help': "The hostname from which data is to be exported from. Default: {0}".format(DEFAULT_HOST)}, - {'short': "-p", 'long': "--port", 'default': DEFAULT_PORT, 'help': "The port of the source host to talk to. Default: {0}".format(DEFAULT_PORT)}, - {'short': "-i", 'long': "--index", 'help': "Pass the index name when you are exporting a single index. Default: export all indices"}, - {'short': "-t", 'long': "--type", 'help': "The type from which to export data from. Default: the entire index is exported"}, - {'short': "-s", 'long': "--query", 'help': "Define a query that limits what kind of documents are exported from the source. Default: all documents are exported"}, - {'short': "-z", 'long': "--size", 'default': DEFAULT_QUERY_SIZE, 'help': "The maximum number of results to be returned per query. Default: {0}".format(DEFAULT_QUERY_SIZE)}, - {'short': "-g", 'long': "--targetFile", 'help': "The filename base to which the data should be exported (becomes .data and .meta). The format depends on the compression flag (default = compressed). Default filename base: _ . If an index is specified, _. If a type is specified, __. WARNING: define a suitable filename if you're using --query!"}, -] - -def main(argv=None): - if not argv: - argv = sys.argv - - parser = argparse.ArgumentParser() - parser.add_argument("backup_directory", help="Will be created with mkdir -p if it doesn't exist. Probably best if you ensure it exists and has the correct permissions though.") - - for p in EXPORTER_PARAMS: - short = p['short']; long_ = p['long']; default = p.get('default'); help_ = p.get('help'); - parser.add_argument(short, long_, default=default, help=help_) - - # UI / CLI - this param is much more rarely used (if ever), so - # goes at the bottom of the list - parser.add_argument("--exporter", action=StoreAsList, default=DEFAULT_EXEC, help='Which executable to use for the actual export operation. It will get all the short options. Default: "{0}"'.format(' '.join(DEFAULT_EXEC))) - parser.add_argument("--s3-bucket", help="If you specify this (format: s3://BUCKET_NAME ), the resulting backup files will be put on a S3 bucket with the most restrictive access permissions. Existing files will be overwritten, so if you are also using a custom filename with -g or --targetFile, make sure to include a timestamp in it.") - - args=parser.parse_args(argv[1:]) - - filename_base = args.targetFile - if not filename_base: - timestamp = time.strftime('%Y-%m-%d-%H%M%S') - filename_base_elements = [] - nohost = False - if args.index: - filename_base_elements.append('{index}') - nohost = True - if args.type: - filename_base_elements.append('{type}') - nohost = True - - if not nohost: - filename_base_elements.append('{host}') - - filename_base_elements.append('{timestamp}') - filename_base = '_'.join(filename_base_elements) - filename_base = filename_base.format(host=args.host, index=args.index, type=args.type, timestamp=timestamp) - - exporter_args = [] - for p in EXPORTER_PARAMS: - pname = p['long'][2:] - argval = getattr(args, pname) - - if pname == 'targetFile': - argval = filename_base - - if argval: - exporter_args.append(p['short']) - exporter_args.append(str(argval)) # remove the dashes when accessing that argument in the args Namespace obj - - logger.info('Going into {0}.'.format(args.backup_directory)) - cd(args.backup_directory) - - start = datetime.now() - logger.info('Starting export using {exporter} at {time}'.format(exporter=' '.join(args.exporter + exporter_args), time=start)) - output = run_command(args.exporter + exporter_args) - end = datetime.now() - elapsed = end - start - logger.info('Export finished at {time}'.format(time=end)) - logger.info('Export took {time}'.format(time=elapsed)) - - if args.s3_bucket: - upload_filename = filename_base + '.meta' - logger.info('Putting {file} into S3 bucket {bucket}.'.format(file=upload_filename, bucket=args.s3_bucket)) - output = run_command(['s3cmd', 'put', '--acl-private', '-H', upload_filename, args.s3_bucket]) - - upload_filename = filename_base + '.data' - logger.info('Putting {file} into S3 bucket {bucket}.'.format(file=upload_filename, bucket=args.s3_bucket)) - output = run_command(['s3cmd', 'put', '--acl-private', '-H', upload_filename, args.s3_bucket]) - - logger.info('All done.') - - -def cd(directory, recursion_level=0): - if recursion_level > 2: - fail('The attempt to change the current working directory to {0} has failed - this script keeps trying to create it and failing to cd into it repeatedly.'.format(directory)) - try: - os.chdir(directory) - except OSError as e: - if e.errno == 2: - logger.info('Directory {0} does not exist, attempting to create with mkdir -p...'.format(directory)) - output = run_command(['mkdir', '-p', directory]) - cd(directory, recursion_level=recursion_level+1) - else: - raise e - - -def run_command(arguments, *args, **kwargs): - '''Takes a list as 1st argument and passes everything to subprocess.check_output, keeping the signature the same as subprocess.check_output.''' - stderr = kwargs.pop('stderr', subprocess.STDOUT) # capture stderr by default, but allow override via kwargs - - try: - output = subprocess.check_output(arguments, *args, stderr=stderr, **kwargs) - except OSError as e: - if e.errno == 2: - fail('Cannot find the executable ' + arguments[0]) - raise e - except subprocess.CalledProcessError as e: - log(' '.join(e.cmd), 'failed with return code', str(e.returncode), "output below\n", str(e.output), severity='error') - raise e - return output - - -def log(*args, **kwargs): - severity = kwargs.pop('severity', 'debug') # not allowed a keyword arg after *args - logfunc = getattr(logger, severity) - - msg = ' '.join(args) - logfunc(msg) - - -def fail(*args, **kwargs): - log(*args, severity='error') - sys.exit(kwargs.pop('status', 1)) - - -class StoreAsList(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, shlex.split(values)) - - -if __name__ == '__main__': - main() - diff --git a/backup/elasticsearch/bulk_restore.py b/backup/elasticsearch/bulk_restore.py deleted file mode 100755 index 641b933..0000000 --- a/backup/elasticsearch/bulk_restore.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/python - -import requests -import sys -import json -from time import sleep -from datetime import datetime -import argparse - -config = {} -config['RETRY_ES'] = 30 - -def put_mapping(mapping_dict): - for index in mapping_dict: - if not index.startswith('_'): - # create the index first - i = config['ELASTIC_SEARCH_HOST'] - i += '/' + index - ri = requests.put(i) - if ri.status_code != 200: - print 'Failed to create Index:', index, ', HTTP Response:', ri.status_code - print ri.text - sys.exit(3) - # now create each type inside the index - for key, mapping in mapping_dict[index]['mappings'].iteritems(): - im = i + '/' + key + '/_mapping' - exists = requests.get(im) - # do not overwrite existing mappings - if exists.status_code != 200: - themapping = {} - themapping[key] = mapping - r = requests.put(im, json.dumps(themapping)) - if r.status_code != 200: - print 'Failed to do PUT mapping for Index:', index, ', Key:', key, ', HTTP Response:', r.status_code - sys.exit(4) - else: - print 'Mapping OK for Index:', index, ', Key:', key, ', HTTP Response:', r.status_code - else: - print 'Mapping already exists for Index:', index, ', Key:', key - else: - print 'Ignoring {0}, no index names start with _'.format(index) - -def main(argv=None): - if not argv: - argv = sys.argv - - parser = argparse.ArgumentParser() - parser.add_argument("data_filename") - BATCH_SIZE_DEFAULT = 2000 - parser.add_argument("-es", "--elasticsearch-host", default="http://localhost:9200", help="Elasticsearch host and port as a string e.g. \"http://localhost:9200\"") - parser.add_argument("-m", "--mapping-file", help="File containing the mappings to be read into Python and sent to ES.") - parser.add_argument("-nm", "--no-mapping", action="store_true", help="Do not attempt to read mapping file or restore mappings to ES, just bulk data upload.") - parser.add_argument("-i", "--index", help="Pass the index name when you are restoring a single index.") - parser.add_argument("-b", "--batch-size", type=int, help="Size of each batch of items sent to ES. Default: {0}".format(BATCH_SIZE_DEFAULT)) - parser.add_argument("-t", "--dry-run", action="store_true") - parser.add_argument("-y", "--destroy-all", action="store_true", help="Pass this to delete all indices in the target ES instance and skip the question this script would otherwise ask about this.") - parser.add_argument("-n", "--no-destroy-all", action="store_true", help="Pass this to skip deleting all indices in the target ES instance and skip the question this script would otherwise ask about this. NOTE: The script will skip updating the mappings if you do not agree to delete all data. (You have to reindex after a mapping update.)") - parser.add_argument("-s", "--sleep", type=float, help="Number of seconds to sleep between batches, floats are allowed. Pass 0 to disable sleeping. Default: the first digit of your batch size. If sending 20 items, sleep 2 seconds between them. Same for 200 or 200'000 items. Sending 30, 3'000 (and so on) items will sleep for 3 seconds by default.") - args=parser.parse_args(argv[1:]) - - config['ELASTIC_SEARCH_HOST'] = args.elasticsearch_host - config['ELASTIC_SEARCH_HOST'] = config['ELASTIC_SEARCH_HOST'].rstrip('/') - - dont_restore_mapping = args.no_mapping - mapping_file = args.mapping_file - data_filename = args.data_filename - - index = args.index - batch_size = args.batch_size if args.batch_size else BATCH_SIZE_DEFAULT - - dry_run = args.dry_run - - sleep_seconds = args.sleep if args.sleep or args.sleep == 0 else int(str(batch_size)[0]) # Default case: first digit of batch size. 2 for 20 items, 2 for 2000 items, 2 for 20000 items. 3 for 30 items, 4 for 400 items. - - no_of_lines = batch_size * 2 # 2 actual lines get read in for every item in the batch - - if dry_run: - print 'THIS IS A DRY RUN. No operations will actually be performed, including deleting data, updating mappings and sending data to ES in bulk.' - if index: - print 'INDEX:', index - print 'BATCH SIZE:', batch_size - print 'SLEEPING for {0} seconds between batches.'.format(sleep_seconds) - - if index: - delete_what = 'the ' + index + ' index' - else: - delete_what = 'all indices' - - if args.destroy_all: - delete = True - else: - if args.no_destroy_all: - delete = False - elif dont_restore_mapping: - delete = False - else: - # we don't know, we need to ask - print 'If you want to update the mappings you will have to reindex the data. With this in mind...' - delete = raw_input(' Do you want to delete {delete_what} now? Enter "yes, destroy all the data" to do so, or "n" to proceed without deleting anything: '.format(delete_what=delete_what)) - delete = True if delete == "yes, destroy all the data" else False - - if delete: - print 'DELETING all data in {delete_what}.'.format(delete_what=delete_what) - else: - print 'NOT DELETING any data in ES.' - if dont_restore_mapping: - print '... because you chose not to restore mappings.' - - deleted = False - if not dry_run: - if delete: - if index: - r = requests.delete(config['ELASTIC_SEARCH_HOST'] + '/' + index) - else: - r = requests.delete(config['ELASTIC_SEARCH_HOST']) - if r.status_code == 200: - print ' Deleted successfully.' - else: - print ' ES reported a problem deleting the data.' - print ' Status code: {0}.'.format(r.status_code) - print ' HTTP response body:' - print r.text - requests.post(config['ELASTIC_SEARCH_HOST'] + '/_flush') - sleep(15) # 5 sec has been shown to not be enough in practice - deleted = True - else: - print ' DRY RUN: Not deleting anything.' - - if not deleted and not dry_run: - print 'Skipping mappings update, you chose not to delete any data.' - elif dont_restore_mapping: - print 'You chose not to restore mappings or read in the mappings file.' - else: - with open(mapping_file, 'rb') as i: - data = json.loads(i.read()) - if index: - data = {index: data} - if dry_run: - print ' DRY RUN: Mapping loaded successfully from file, but not going to send it to ES.' - else: - put_mapping(data) - - started = datetime.now() - - total_counter = 0 - with open(data_filename, 'rb') as i: - counter = 0 - while True: - counter += 1 - print 'On batch', counter, '. Done {0} items so far.'.format(total_counter / 2.0) - batch = [] - cur_no_of_lines = 0 - for line in i: - cur_no_of_lines += 1 - batch.append(line) - if cur_no_of_lines >= no_of_lines: - break - - if cur_no_of_lines == 0: - break - total_counter += len(batch) - data = "".join(batch) - - if dry_run: - print ' DRY RUN: Batch #{0} constructed, len(batch) = {1}, but not sending to ES.'.format(counter, len(batch)) - else: - r = None - count = 0 - exception = None - while count < config['RETRY_ES']: - count += 1 - try: - r = requests.post(config['ELASTIC_SEARCH_HOST'] + '/_bulk', data=data) - break - except Exception as e: - exception = e - sleep(0.5) - - if exception is not None: - raise exception - - if r.status_code != 200: - print ' Batch', counter, 'error. HTTP response:', r.status_code - print r.text - sys.exit(1) - sleep(sleep_seconds) - print 'Total lines', total_counter - print 'Total items', total_counter / 2.0 - print - print 'Started:', started - ended = datetime.now() - print 'Ended:', ended - print 'Time taken:', ended - started - -if __name__ == '__main__': - main() diff --git a/backup/elasticsearch/sync_doaj_newest_articles.py b/backup/elasticsearch/sync_doaj_newest_articles.py deleted file mode 100755 index 5535acf..0000000 --- a/backup/elasticsearch/sync_doaj_newest_articles.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/python -# Syncs DOAJ newest_articles which are not present in the localhost's doaj index -# Usage: ./sync_doaj_newest_articles.py -# E.g.: ./sync_doaj_newest_articles.py "http://doajtmp:9200/doaj/article/_search?q=admin.in_doaj:true&size=1000&sort=created_date:desc" "http://yonce:9200/doaj/article/_search?q=admin.in_doaj:true&size=1000&sort=created_date:desc" -# IF YOU run this script on yonce, this will copy all newest articles present in doajtmp to yonce. It checks which ones to copy by querying the and calculating which ones from the source are not present in there. - -# Again, this script calculates which articles needs to be synced from 2 -# endpoints, but it will WRITE TO LOCALHOST:9200! -import requests -import sys -import json -import time - -def abort(msg): - raise Exception(msg) - -def main(argv=None): - if not argv: - argv=sys.argv - - src = argv[1] - dst = argv[2] - - testing = False - if len(argv) > 3: - if argv[3] == 'test': - testing = True - - source_data = requests.get(src).text - dest_data = requests.get(dst).text - - s = json.loads(source_data) - d = json.loads(dest_data) - - s_total = s['hits']['total'] - s = s['hits']['hits'] - - d_total = d['hits']['total'] - d = d['hits']['hits'] - - if s_total < d_total: - abort("Source has less docs than destination, aborting since your source and destination could be completely out of sync (both have docs than the other one does not). Investigate, fix and rerun.") - - removed = 0 - for s_hit in s[:]: - for d_hit in d: - if s_hit["_id"] == d_hit["_id"]: - removed += 1 - s.remove(s_hit) - - print 'Putting', len(s), 'newest_articles into ES.' - print 'You have 5 seconds to terminate this script with Ctrl+C if the number seems off.' - time.sleep(5) - - for diff in s: - if testing: - print 'TESTING - would PUT', diff['_id'] - else: - r = requests.put('http://localhost:9200/doaj/article/' + diff['_id'], data=json.dumps(diff['_source'])) - print diff['_id'], r.status_code - if r.status_code not in [200, 201]: - print 'ES error for record', diff['_id'], 'HTTP status code:', r.status_code - - print 'PUT', len(s), 'newest articles into ES.' - -if __name__ == '__main__': - main() diff --git a/backup/restore_from_s3.sh b/backup/restore_from_s3.sh deleted file mode 100755 index 1de7f16..0000000 --- a/backup/restore_from_s3.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -if [ $# -ne 3 ] -then - echo "3 arguments needed" - echo "Usage: $0 s3:// " - echo - echo "All files and folders in your S3 bucket will be:" - echo " 1. synced down from your S3 bucket (only changed ones and new ones will be downloaded) to " - echo " 2. they will then be copied (only changed and new ones) from to " - echo " (No attempts at decompression will be made)" - exit 1 -fi - -bucket=$1 -restore_to_dir=$2 -local_restore_dir=$3 - -if [[ $bucket != s3://* ]]; then - echo "Your bucket name must be prefixed by s3://" - echo "Remember, \"explicit is better than implicit.\" -- Tim Peters" - exit 1 -fi - -# need to run rsync as root in order to preserve root permissions and modification times -s3cmd sync -H --acl-private --no-delete-removed "$bucket" "$local_restore_dir" -sudo rsync -aEhv "$local_restore_dir" "$restore_to_dir" diff --git a/config/anacrontab-doaj-staging b/config/anacrontab-doaj-staging deleted file mode 100644 index 5737d66..0000000 --- a/config/anacrontab-doaj-staging +++ /dev/null @@ -1,18 +0,0 @@ -# /etc/anacrontab: configuration file for anacron - -# See anacron(8) and anacrontab(5) for details. - -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# These replace cron's entries -1 5 cron.daily nice run-parts --report /etc/cron.daily -7 10 cron.weekly nice run-parts --report /etc/cron.weekly -@monthly 15 cron.monthly nice run-parts --report /etc/cron.monthly -# s3cmd, part of the backup script, is not going to find its config file at /home/cloo/.s3cfg if run directly as root -# and to be fair, other things may break too - scripts get developed as cloo, so just run them as cloo -# better for security / reliability too, in case a script is buggy - -# replicate old functionality of just keep 1 daily backup by using -g and a filename without a timestamp, -# but should write some rotation scripts soon and enable keeping more backups by just removing the -g param below -1 140 doaj-sitemap su - cloo -c 'export DOAJENV=production ; nice -n 19 /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/sitemap.py >> /home/cloo/cron-logs/doaj-sitemap_`date +%F_%H%M%S`.log 2>&1' diff --git a/config/anacrontab_doaj b/config/anacrontab_doaj deleted file mode 100644 index 501db17..0000000 --- a/config/anacrontab_doaj +++ /dev/null @@ -1,19 +0,0 @@ -# /etc/anacrontab: configuration file for anacron - -# See anacron(8) and anacrontab(5) for details. - -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# These replace cron's entries -1 5 cron.daily nice run-parts --report /etc/cron.daily -7 10 cron.weekly nice run-parts --report /etc/cron.weekly -@monthly 15 cron.monthly nice run-parts --report /etc/cron.monthly -# s3cmd, part of the backup script, is not going to find its config file at /home/cloo/.s3cfg if run directly as root -# and to be fair, other things may break too - scripts get developed as cloo, so just run them as cloo -# better for security / reliability too, in case a script is buggy - -# replicate old functionality of just keep 1 daily backup by using -g and a filename without a timestamp, -# but should write some rotation scripts soon and enable keeping more backups by just removing the -g param below -1 15 doaj-index-backup su - cloo -c 'nice -n 19 /home/cloo/backups/backup_es.py /home/cloo/backups/elasticsearch-es-exporter/ -g doaj -i doaj --s3-bucket s3://cl-elasticsearch-backups >> /home/cloo/backups/logs/doaj-index_`date +%F-%H%M%S`.log 2>&1' -1 140 doaj-sitemap su - cloo -c 'export DOAJENV=production ; nice -n 19 /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/sitemap.py >> /home/cloo/cron-logs/doaj-sitemap_`date +%F_%H%M%S`.log 2>&1' diff --git a/config/anacrontab_doaj-test b/config/anacrontab_doaj-test deleted file mode 100644 index d0ac7c7..0000000 --- a/config/anacrontab_doaj-test +++ /dev/null @@ -1,20 +0,0 @@ -# /etc/anacrontab: configuration file for anacron - -# See anacron(8) and anacrontab(5) for details. - -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# These replace cron's entries -1 5 cron.daily nice run-parts --report /etc/cron.daily -7 10 cron.weekly nice run-parts --report /etc/cron.weekly -@monthly 15 cron.monthly nice run-parts --report /etc/cron.monthly -# s3cmd, part of the backup script, is not going to find its config file at /home/cloo/.s3cfg if run directly as root -# and to be fair, other things may break too - scripts get developed as cloo, so just run them as cloo -# better for security / reliability too, in case a script is buggy - -# replicate old functionality of just keep 1 daily backup by using -g and a filename without a timestamp, -# but should write some rotation scripts soon and enable keeping more backups by just removing the -g param below -# turn off backups on new infrastruct -#1 15 doaj-index-backup su - cloo -c 'nice -n 19 /home/cloo/backups/backup_es.py /home/cloo/backups/elasticsearch-es-exporter/ -g doaj -i doaj --s3-bucket s3://cl-elasticsearch-backups >> /home/cloo/backups/logs/doaj-index_`date +%F-%H%M%S`.log 2>&1' -1 140 doaj-sitemap su - cloo -c 'export DOAJENV=test ; nice -n 19 /home/cloo/repl/apps/doaj/bin/python /home/cloo/repl/apps/doaj/src/doaj/portality/scripts/sitemap.py >> /home/cloo/cron-logs/doaj-sitemap_`date +%F_%H%M%S`.log 2>&1' diff --git a/config/anacrontab_oag b/config/anacrontab_oag deleted file mode 100644 index eecd710..0000000 --- a/config/anacrontab_oag +++ /dev/null @@ -1,18 +0,0 @@ -# /etc/anacrontab: configuration file for anacron - -# See anacron(8) and anacrontab(5) for details. - -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# These replace cron's entries -1 5 cron.daily nice run-parts --report /etc/cron.daily -7 10 cron.weekly nice run-parts --report /etc/cron.weekly -@monthly 15 cron.monthly nice run-parts --report /etc/cron.monthly -# s3cmd, part of the backup script, is not going to find its config file at /home/cloo/.s3cfg if run directly as root -# and to be fair, other things may break too - scripts get developed as cloo, so just run them as cloo -# better for security / reliability too, in case a script is buggy - -# replicate old functionality of just keep 1 daily backup by using -g and a filename without a timestamp, -# but should write some rotation scripts soon and enable keeping more backups by just removing the -g param below -1 75 oag-index-backup su - cloo -c 'nice -n 19 /home/cloo/backups/backup_es.py /home/cloo/backups/elasticsearch-es-exporter/ -g oag -i oag --s3-bucket s3://cl-elasticsearch-backups >> /home/cloo/backups/logs/oag-index_`date +%F-%H%M%S`.log 2>&1' diff --git a/config/cloo_crontab_doaj-staging b/config/cloo_crontab_doaj-staging deleted file mode 100644 index 17a28d7..0000000 --- a/config/cloo_crontab_doaj-staging +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# m h dom mon dow command -*/30 * * * * export DOAJENV=staging ; /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/ingestarticles.py >> /home/cloo/cron-logs/doaj-ingest-articles_`date +\%F_\%H\%M`.log 2>&1 -*/30 * * * * export NOW=`date +\%F_\%H\%M\%S` ; export DOAJENV=staging ; echo "start timestamp $NOW" >> /home/cloo/cron-logs/doaj-news-fetch.log 2>&1 && /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/news.py >> /home/cloo/cron-logs/doaj-news-fetch.log 2>&1 -*/30 * * * * export DOAJENV=staging ; /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/journalcsv.py >> /home/cloo/cron-logs/doaj-journal-csv_`date +\%F_\%H\%M`.log 2>&1 -0 2 * * * export DOAJENV=staging ; /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/toc.py >> /home/cloo/cron-logs/doaj-toc_`date +\%F_\%H\%M`.log 2>&1 -*/10 * * * * export NOW=`date +\%F_\%H\%M\%S` ; export DOAJENV=staging ; echo -e "\n\nstart timestamp $NOW" >> /home/cloo/cron-logs/doaj-ingest-reapps.log && /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/ingestreapplications.py >> /home/cloo/cron-logs/doaj-ingest-reapps.log 2>&1 diff --git a/config/cloo_crontab_ooz b/config/cloo_crontab_ooz deleted file mode 100644 index a49edc7..0000000 --- a/config/cloo_crontab_ooz +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# m h dom mon dow command -*/30 * * * * export NOW=`date +\%F_\%H\%M\%S` ; export DOAJENV=test ; echo -e "\nstart timestamp $NOW" >> /home/cloo/cron-logs/doaj-journal-csv.log && /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/journalcsv.py >> /home/cloo/cron-logs/doaj-journal-csv.log 2>&1 -*/10 * * * * export NOW=`date +\%F_\%H\%M\%S` ; export DOAJENV=test ; echo -e "\n\nstart timestamp $NOW" >> /home/cloo/cron-logs/doaj-ingest-reapps.log && /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/ingestreapplications.py >> /home/cloo/cron-logs/doaj-ingest-reapps.log 2>&1 diff --git a/config/cloo_doaj-test_crontab b/config/cloo_doaj-test_crontab deleted file mode 100644 index f4074a4..0000000 --- a/config/cloo_doaj-test_crontab +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# m h dom mon dow command -*/30 * * * * export DOAJENV=test ; /home/cloo/repl/apps/doaj/bin/python /home/cloo/repl/apps/doaj/src/doaj/portality/scripts/ingestarticles.py >> /home/cloo/cron-logs/doaj-ingest-articles_`date +\%F_\%H\%M`.log 2>&1 -*/30 * * * * export NOW=`date +\%F_\%H\%M\%S` ; export DOAJENV=test ; echo "start timestamp $NOW" >> /home/cloo/cron-logs/doaj-news-fetch.log 2>&1 && /home/cloo/repl/apps/doaj/bin/python /home/cloo/repl/apps/doaj/src/doaj/portality/scripts/news.py >> /home/cloo/cron-logs/doaj-news-fetch.log 2>&1 -*/30 * * * * export DOAJENV=test ; /home/cloo/repl/apps/doaj/bin/python /home/cloo/repl/apps/doaj/src/doaj/portality/scripts/journalcsv.py >> /home/cloo/cron-logs/doaj-journal-csv_`date +\%F_\%H\%M`.log 2>&1 -*/10 * * * * export NOW=`date +\%F_\%H\%M\%S` ; export DOAJENV=test ; echo -e "\n\nstart timestamp $NOW" >> /home/cloo/cron-logs/doaj-ingest-reapps.log && /home/cloo/repl/apps/doaj/bin/python /home/cloo/repl/apps/doaj/src/doaj/portality/scripts/ingestreapplications.py >> /home/cloo/cron-logs/doaj-ingest-reapps.log 2>&1 - diff --git a/config/cloo_doaj_crontab b/config/cloo_doaj_crontab deleted file mode 100644 index 0fffe55..0000000 --- a/config/cloo_doaj_crontab +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# m h dom mon dow command -*/30 * * * * export DOAJENV=production ; /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/ingestarticles.py >> /home/cloo/cron-logs/doaj-ingest-articles_`date +\%F_\%H\%M`.log 2>&1 -*/30 * * * * export NOW=`date +\%F_\%H\%M\%S` ; export DOAJENV=production ; echo "start timestamp $NOW" >> /home/cloo/cron-logs/doaj-news-fetch.log 2>&1 && /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/news.py >> /home/cloo/cron-logs/doaj-news-fetch.log 2>&1 -*/30 * * * * export DOAJENV=production ; /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/journalcsv.py >> /home/cloo/cron-logs/doaj-journal-csv_`date +\%F_\%H\%M`.log 2>&1 -0 2 * * * export DOAJENV=production ; /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/toc.py >> /home/cloo/cron-logs/doaj-toc_`date +\%F_\%H\%M`.log 2>&1 -*/10 * * * * export NOW=`date +\%F_\%H\%M\%S` ; export DOAJENV=production ; echo -e "\n\nstart timestamp $NOW" >> /home/cloo/cron-logs/doaj-ingest-reapps.log && /opt/doaj/bin/python /opt/doaj/src/doaj/portality/scripts/ingestreapplications.py >> /home/cloo/cron-logs/doaj-ingest-reapps.log 2>&1 - diff --git a/config/cloo_oag_crontab b/config/cloo_oag_crontab deleted file mode 100644 index 325297d..0000000 --- a/config/cloo_oag_crontab +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Edit this file to introduce tasks to be run by cron. -# -# Each task to run has to be defined through a single line -# indicating with different fields when the task will be run -# and what command to run for the task -# -# To define the time you can provide concrete values for -# minute (m), hour (h), day of month (dom), month (mon), -# and day of week (dow) or use '*' in these fields (for 'any').# -# Notice that tasks will be started based on the cron's system -# daemon's notion of time and timezones. -# -# Output of the crontab jobs (including errors) is sent through -# email to the user the crontab file belongs to (unless redirected). -# -# For example, you can run a backup of all your user accounts -# at 5 a.m every week with: -# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/ -# -# For more information see the manual pages of crontab(5) and cron(8) -# -SHELL=/bin/bash -PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - -# m h dom mon dow command -0 * * * * export NOW=`date +\%F_\%H\%M\%S` ; echo "timestamp $NOW" >> /home/cloo/backups/logs/oag-publishers.log 2>&1 && /home/cloo/backups/backup_es.py /home/cloo/backups/oag-publishers-and-licenses/ -i oag -t publisher --s3-bucket s3://oag-publishers-and-licenses >> /home/cloo/backups/logs/oag-publishers.log 2>&1 -0 * * * * export NOW=`date +\%F_\%H\%M\%S` ; echo "timestamp $NOW" >> /home/cloo/backups/logs/oag-publishers.log 2>&1 && /home/cloo/backups/backup_es.py /home/cloo/backups/oag-publishers-and-licenses/ -i oag -t license_statement --s3-bucket s3://oag-publishers-and-licenses >> /home/cloo/backups/logs/oag-licenses 2>&1 - diff --git a/config/nginx/nginx.conf b/config/nginx/nginx.conf deleted file mode 100644 index 8fc13a8..0000000 --- a/config/nginx/nginx.conf +++ /dev/null @@ -1,99 +0,0 @@ -user www-data; -worker_processes 4; -pid /var/run/nginx.pid; - -events { - worker_connections 768; - # multi_accept on; -} - -http { - - ## - # Basic Settings - ## - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - # server_tokens off; - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Drop support for SSL3, it's known to be vulnerable, see https://poodle.io - - server_names_hash_bucket_size 64; - # server_name_in_redirect off; - - include /etc/nginx/mime.types; - default_type application/octet-stream; - - client_max_body_size 50M; - client_body_buffer_size 128k; - - ## - # Logging Settings - ## - - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; - - ## - # Gzip Settings - ## - - gzip on; - gzip_disable "msie6"; - - gzip_vary on; - gzip_proxied any; - gzip_comp_level 6; - gzip_buffers 16 8k; - gzip_http_version 1.1; - gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; - - ## - # nginx-naxsi config - ## - # Uncomment it if you installed nginx-naxsi - ## - - #include /etc/nginx/naxsi_core.rules; - - ## - # nginx-passenger config - ## - # Uncomment it if you installed nginx-passenger - ## - - #passenger_root /usr; - #passenger_ruby /usr/bin/ruby; - - ## - # Virtual Host Configs - ## - - include /etc/nginx/conf.d/*.conf; - include /etc/nginx/sites-enabled/*; -} - - -#mail { -# # See sample authentication script at: -# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript -# -# # auth_http localhost/auth.php; -# # pop3_capabilities "TOP" "USER"; -# # imap_capabilities "IMAP4rev1" "UIDPLUS"; -# -# server { -# listen localhost:110; -# protocol pop3; -# proxy on; -# } -# -# server { -# listen localhost:143; -# protocol imap; -# proxy on; -# } -#} diff --git a/config/nginx/sites-available/celery-flower b/config/nginx/sites-available/celery-flower deleted file mode 100644 index 588ab81..0000000 --- a/config/nginx/sites-available/celery-flower +++ /dev/null @@ -1,50 +0,0 @@ -# You may add here your -# server { -# ... -# } -# statements for each of your virtual hosts to this file - -## -# You should look at the following URL's in order to grasp a solid understanding -# of Nginx configuration files in order to fully unleash the power of Nginx. -# http://wiki.nginx.org/Pitfalls -# http://wiki.nginx.org/QuickStart -# http://wiki.nginx.org/Configuration -# -# Generally, you will want to move this file somewhere, and start with a clean -# file but keep this around for reference. Or just disable in sites-enabled. -# -# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples. -## - -#server { -# listen 5555; -# server_name oag.cottagelabs.com 93.93.131.41; -# -# client_max_body_size 5M; -# client_body_buffer_size 128k; -# -# location / { -# proxy_pass http://localhost:5555/; -# proxy_redirect off; -# proxy_set_header Host $host; -# proxy_set_header X-Real-IP $remote_addr; -# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -# } -#} - -server { - listen 80; - server_name flower.oag.cottagelabs.com flower.test.oag.cottagelabs.com; - - access_log /var/log/nginx/celery-flower.access.log; - error_log /var/log/nginx/celery-flower.error.log; - - location / { - proxy_pass http://localhost:5555/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/celery-flower-forward-to-yonce b/config/nginx/sites-available/celery-flower-forward-to-yonce deleted file mode 100644 index 9406612..0000000 --- a/config/nginx/sites-available/celery-flower-forward-to-yonce +++ /dev/null @@ -1,50 +0,0 @@ -# You may add here your -# server { -# ... -# } -# statements for each of your virtual hosts to this file - -## -# You should look at the following URL's in order to grasp a solid understanding -# of Nginx configuration files in order to fully unleash the power of Nginx. -# http://wiki.nginx.org/Pitfalls -# http://wiki.nginx.org/QuickStart -# http://wiki.nginx.org/Configuration -# -# Generally, you will want to move this file somewhere, and start with a clean -# file but keep this around for reference. Or just disable in sites-enabled. -# -# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples. -## - -#server { -# listen 5555; -# server_name oag.cottagelabs.com 93.93.131.41; -# -# client_max_body_size 5M; -# client_body_buffer_size 128k; -# -# location / { -# proxy_pass http://localhost:5555/; -# proxy_redirect off; -# proxy_set_header Host $host; -# proxy_set_header X-Real-IP $remote_addr; -# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -# } -#} - -server { - listen 80; - server_name flower.oag.cottagelabs.com; - - access_log /var/log/nginx/celery-forward-to-yonce.access.log; - error_log /var/log/nginx/celery-forward-to-yonce.error.log; - - location / { - proxy_pass http://95.85.59.151/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/contentmine b/config/nginx/sites-available/contentmine deleted file mode 100644 index 08d7307..0000000 --- a/config/nginx/sites-available/contentmine +++ /dev/null @@ -1,25 +0,0 @@ -server { - listen 80; - - server_name contentmine.org www.contentmine.org; - - location /static { - alias /opt/contentmine/src/site/portality/static; - autoindex on; - expires max; - } - - location /media { - alias /opt/contentmine/src/site/media; - autoindex on; - expires max; - } - - location / { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_pass http://localhost:5014/; - } -} diff --git a/config/nginx/sites-available/cottagelabs.com-forward-to-cl b/config/nginx/sites-available/cottagelabs.com-forward-to-cl deleted file mode 100644 index a975ff4..0000000 --- a/config/nginx/sites-available/cottagelabs.com-forward-to-cl +++ /dev/null @@ -1,37 +0,0 @@ -server { - listen 80; - server_name .cottagelabs.com .ffts.co.uk .fightingfittrainingsystems.co.uk .rehearsalrecording.com .lynver.co.uk .dandimacg.co.uk; - - access_log /var/log/nginx/cottagelabs.com-forward-to-cl.access.log; - error_log /var/log/nginx/cottagelabs.com-forward-to-cl.error.log; - - location / { - proxy_pass http://178.62.223.99; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} - -server { - listen 443; - server_name .cottagelabs.com; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - access_log /var/log/nginx/cottagelabs.com-forward-to-cl.access.log; - error_log /var/log/nginx/cottagelabs.com-forward-to-cl.error.log; - - location / { - proxy_pass https://178.62.223.99; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} diff --git a/config/nginx/sites-available/default b/config/nginx/sites-available/default deleted file mode 100644 index 8a79fd9..0000000 --- a/config/nginx/sites-available/default +++ /dev/null @@ -1,135 +0,0 @@ -# You may add here your -# server { -# ... -# } -# statements for each of your virtual hosts to this file - -## -# You should look at the following URL's in order to grasp a solid understanding -# of Nginx configuration files in order to fully unleash the power of Nginx. -# http://wiki.nginx.org/Pitfalls -# http://wiki.nginx.org/QuickStart -# http://wiki.nginx.org/Configuration -# -# Generally, you will want to move this file somewhere, and start with a clean -# file but keep this around for reference. Or just disable in sites-enabled. -# -# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples. -## - -# Modification - return 404 NOT FOUND when an unknown server name is accessed -# .. instead of showing Welcome to Nginx. -server { - listen 80 default_server; - server_name ""; - return 404; -} - -server { - #listen 80; ## listen for ipv4; this line is default and implied - #listen [::]:80 default ipv6only=on; ## listen for ipv6 - - root /usr/share/nginx/www; - index index.html index.htm; - - # Make site accessible from http://localhost/ - server_name localhost; - - location / { - # First attempt to serve request as file, then - # as directory, then fall back to index.html - try_files $uri $uri/ /index.html; - # Uncomment to enable naxsi on this location - # include /etc/nginx/naxsi.rules - } - - location /doc/ { - alias /usr/share/doc/; - autoindex on; - allow 127.0.0.1; - deny all; - } - - location /nginx_status { - stub_status on; - access_log off; - allow 127.0.0.1; - deny all; - } - - # Only for nginx-naxsi : process denied requests - #location /RequestDenied { - # For example, return an error code - #return 418; - #} - - #error_page 404 /404.html; - - # redirect server error pages to the static page /50x.html - # - #error_page 500 502 503 504 /50x.html; - #location = /50x.html { - # root /usr/share/nginx/www; - #} - - # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 - # - #location ~ \.php$ { - # fastcgi_split_path_info ^(.+\.php)(/.+)$; - # # NOTE: You should have "cgi.fix_pathinfo = 0;" in php.ini - # - # # With php5-cgi alone: - # fastcgi_pass 127.0.0.1:9000; - # # With php5-fpm: - # fastcgi_pass unix:/var/run/php5-fpm.sock; - # fastcgi_index index.php; - # include fastcgi_params; - #} - - # deny access to .htaccess files, if Apache's document root - # concurs with nginx's one - # - #location ~ /\.ht { - # deny all; - #} -} - - -# another virtual host using mix of IP-, name-, and port-based configuration -# -#server { -# listen 8000; -# listen somename:8080; -# server_name somename alias another.alias; -# root html; -# index index.html index.htm; -# -# location / { -# try_files $uri $uri/ /index.html; -# } -#} - - -# HTTPS server -# -#server { -# listen 443; -# server_name localhost; -# -# root html; -# index index.html index.htm; -# -# ssl on; -# ssl_certificate cert.pem; -# ssl_certificate_key cert.key; -# -# ssl_session_timeout 5m; -# -# ssl_protocols SSLv3 TLSv1; -# ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv3:+EXP; -# ssl_prefer_server_ciphers on; -# -# location / { -# try_files $uri $uri/ /index.html; -# } -#} diff --git a/config/nginx/sites-available/doaj b/config/nginx/sites-available/doaj deleted file mode 100644 index dce8b3c..0000000 --- a/config/nginx/sites-available/doaj +++ /dev/null @@ -1,41 +0,0 @@ -server { - listen 80; - listen 443 ssl; - server_name www.doaj.org; - - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - return 301 $scheme://doaj.org$request_uri; -} - -server { - listen 80; - listen 443 ssl; - server_name doaj.org .doaj.cottagelabs.com; - - access_log /var/log/nginx/doaj.access.log; - error_log /var/log/nginx/doaj.error.log; - - set_real_ip_from 95.85.56.138; - - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://localhost:5050/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /static/ { - alias /opt/doaj/src/doaj/portality/static/; - autoindex off; - expires max; - } -} diff --git a/config/nginx/sites-available/doaj-forward-to-DOAJ-with-local-static b/config/nginx/sites-available/doaj-forward-to-DOAJ-with-local-static deleted file mode 100644 index d7d46ed..0000000 --- a/config/nginx/sites-available/doaj-forward-to-DOAJ-with-local-static +++ /dev/null @@ -1,55 +0,0 @@ -server { - listen 80; - server_name doaj.org www.doaj.org; - - - access_log /var/log/nginx/doaj-forward-to-DOAJ-with-local-static.access.log; - error_log /var/log/nginx/doaj-forward-to-DOAJ-with-local-static.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://178.62.116.49; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } - - location /static/ { - alias /opt/doaj/src/doaj/portality/static/; - autoindex off; - expires max; - } -} - -server { - listen 443; - server_name doaj.org www.doaj.org; - - - access_log /var/log/nginx/doaj-forward-to-DOAJ-with-local-static.access.log; - error_log /var/log/nginx/doaj-forward-to-DOAJ-with-local-static.error.log; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - proxy_read_timeout 600s; - - location / { - proxy_pass https://178.62.116.49; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } - - location /static/ { - alias /opt/doaj/src/doaj/portality/static/; - autoindex off; - expires max; - } -} diff --git a/config/nginx/sites-available/doaj-forward-to-YONCE-with-local-static b/config/nginx/sites-available/doaj-forward-to-YONCE-with-local-static deleted file mode 100644 index 1327efa..0000000 --- a/config/nginx/sites-available/doaj-forward-to-YONCE-with-local-static +++ /dev/null @@ -1,55 +0,0 @@ -server { - listen 80; - server_name doaj.org www.doaj.org; - - - access_log /var/log/nginx/doaj-forward-to-YONCE-with-local-static.access.log; - error_log /var/log/nginx/doaj-forward-to-YONCE-with-local-static.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://95.85.59.151; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } - - location /static/ { - alias /opt/doaj/src/doaj/portality/static/; - autoindex off; - expires max; - } -} - -server { - listen 443; - server_name doaj.org www.doaj.org; - - - access_log /var/log/nginx/doaj-forward-to-YONCE-with-local-static.access.log; - error_log /var/log/nginx/doaj-forward-to-YONCE-with-local-static.error.log; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - proxy_read_timeout 600s; - - location / { - proxy_pass https://95.85.59.151; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } - - location /static/ { - alias /opt/doaj/src/doaj/portality/static/; - autoindex off; - expires max; - } -} diff --git a/config/nginx/sites-available/doaj-staging-to-DOAJ-STAGING b/config/nginx/sites-available/doaj-staging-to-DOAJ-STAGING deleted file mode 100644 index a83795d..0000000 --- a/config/nginx/sites-available/doaj-staging-to-DOAJ-STAGING +++ /dev/null @@ -1,39 +0,0 @@ -server { - listen 80; - server_name staging.doaj.cottagelabs.com; - - access_log /var/log/nginx/doaj-staging-to-DOAJ-STAGING.access.log; - error_log /var/log/nginx/doaj-staging-to-DOAJ-STAGING.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://95.85.48.213; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} - -server { - listen 443; - server_name staging.doaj.cottagelabs.com; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - access_log /var/log/nginx/doaj-staging-to-DOAJ-STAGING.access.log; - error_log /var/log/nginx/doaj-staging-to-DOAJ-STAGING.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass https://95.85.48.213; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/doaj-test b/config/nginx/sites-available/doaj-test deleted file mode 100644 index 8cde57d..0000000 --- a/config/nginx/sites-available/doaj-test +++ /dev/null @@ -1,41 +0,0 @@ -server { - listen 80; - listen 443 ssl; - server_name www.doaj.org; - - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - return 301 $scheme://doaj.org$request_uri; -} - -server { - listen 80; - listen 443 ssl; - server_name doaj.org .doaj.cottagelabs.com; - - access_log /var/log/nginx/doaj.access.log; - error_log /var/log/nginx/doaj.error.log; - - set_real_ip_from 95.85.56.138; - - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://localhost:5004/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /static/ { - alias /opt/doaj/src/doaj/portality/static/; - autoindex off; - expires max; - } -} diff --git a/config/nginx/sites-available/fundfind-forum-forward-to-PINKY b/config/nginx/sites-available/fundfind-forum-forward-to-PINKY deleted file mode 100644 index f597e73..0000000 --- a/config/nginx/sites-available/fundfind-forum-forward-to-PINKY +++ /dev/null @@ -1,16 +0,0 @@ -server { - listen 80; - server_name forum.fundfind.org; - - access_log /var/log/nginx/fundfind-forum-forward-to-PINKY.access.log; - error_log /var/log/nginx/fundfind-forum-forward-to-PINKY.error.log; - - location / { - proxy_pass http://188.226.153.213:4000; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} diff --git a/config/nginx/sites-available/fundfind-forward-to-FUNDFIND b/config/nginx/sites-available/fundfind-forward-to-FUNDFIND deleted file mode 100644 index e5ef607..0000000 --- a/config/nginx/sites-available/fundfind-forward-to-FUNDFIND +++ /dev/null @@ -1,16 +0,0 @@ -server { - listen 80; - server_name fundfind.org www.fundfind.org; - - access_log /var/log/nginx/fundfind-forward-to-MARKTCL.access.log; - error_log /var/log/nginx/fundfind-forward-to-MARKTCL.error.log; - - location / { - proxy_pass http://178.62.128.172; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} diff --git a/config/nginx/sites-available/g4he b/config/nginx/sites-available/g4he deleted file mode 100644 index 2b96cf8..0000000 --- a/config/nginx/sites-available/g4he +++ /dev/null @@ -1,17 +0,0 @@ -server { - listen 80; - server_name g4he.cottagelabs.com; - - #location / { - # root /var/www/g4he/graphview; - # index g4he.html; - #} - - location / { - proxy_pass http://localhost:5003/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/harvest b/config/nginx/sites-available/harvest deleted file mode 100644 index 7585fdb..0000000 --- a/config/nginx/sites-available/harvest +++ /dev/null @@ -1,32 +0,0 @@ -server { - listen 80; - server_name harvest harvest1 95.85.19.215; - - location / { - proxy_pass http://localhost:5060/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - - location /static/ { - alias /opt/harvest/src/harvest/portality/static/; - autoindex off; - expires max; - } -} - -# app debugging port (when another server needs to talk to app directly) -server { - listen 5560; - server_name harvest harvest1 95.85.19.215; - - location / { - proxy_pass http://localhost:5060/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/harvester-for-doaj-es b/config/nginx/sites-available/harvester-for-doaj-es deleted file mode 100644 index eab7fc7..0000000 --- a/config/nginx/sites-available/harvester-for-doaj-es +++ /dev/null @@ -1,15 +0,0 @@ -server { - listen 80; - server_name 178.62.121.72; - - auth_basic "Restricted"; - auth_basic_user_file /etc/nginx/htpasswd; - - location / { - proxy_pass http://localhost:9200; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/idfind b/config/nginx/sites-available/idfind deleted file mode 100644 index 93515d5..0000000 --- a/config/nginx/sites-available/idfind +++ /dev/null @@ -1,12 +0,0 @@ -server { - listen 80; - server_name idfind.cottagelabs.com; - - location / { - proxy_pass http://localhost:5003/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/lantern-forward-to-OOZ b/config/nginx/sites-available/lantern-forward-to-OOZ deleted file mode 100644 index 1c7cdd5..0000000 --- a/config/nginx/sites-available/lantern-forward-to-OOZ +++ /dev/null @@ -1,33 +0,0 @@ -server { - listen 80; - server_name lantern.cottagelabs.com; - access_log /var/log/nginx/lantern-forward-to-OOZ.access.log; - error_log /var/log/nginx/lantern-forward-to-OOZ.error.log; - return 301 https://$server_name$request_uri; -} - -server { - listen 443 ssl; - server_name lantern.cottagelabs.com; - - ssl_certificate /etc/nginx/CERTS/cl_wildcard/*.cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cl_wildcard/*.cottagelabs.com.key; - - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_ciphers "HIGH:!aNULL:!MD5 or HIGH:!aNULL:!MD5:!3DES"; - ssl_prefer_server_ciphers on; - - access_log /var/log/nginx/lantern-forward-to-OOZ.access.log; - error_log /var/log/nginx/lantern-forward-to-OOZ.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://178.62.191.218; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - -} diff --git a/config/nginx/sites-available/lantern-test b/config/nginx/sites-available/lantern-test deleted file mode 100644 index 40784ef..0000000 --- a/config/nginx/sites-available/lantern-test +++ /dev/null @@ -1,13 +0,0 @@ -server { - listen 80; - - server_name lantern.cottagelabs.com; - - location / { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_pass http://localhost:5050/; - } -} diff --git a/config/nginx/sites-available/leaps b/config/nginx/sites-available/leaps deleted file mode 100644 index ac720f3..0000000 --- a/config/nginx/sites-available/leaps +++ /dev/null @@ -1,81 +0,0 @@ -server { - listen 80; - - server_name leapssurvey.org www.leapssurvey.org; - - access_log /var/log/nginx/leaps-access.log; - error_log /var/log/nginx/leaps-error.log; - - client_max_body_size 200M; - client_body_buffer_size 128k; - - location /survey { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /account { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /schools { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /universities { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /admin { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /static { - autoindex off; - root /opt/leaps/src/leaps/portality; - break; - } - - location / { - proxy_pass http://localhost:5005/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - -} - -server { - listen 443; - ssl on; - - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - - keepalive_timeout 600; - - server_name leapssurvey.org www.leapssurvey.org; - - access_log /var/log/nginx/leaps-access.log; - error_log /var/log/nginx/leaps-error.log; - - client_max_body_size 200M; - client_body_buffer_size 128k; - - location /static { - autoindex off; - root /opt/leaps/src/leaps/portality; - break; - } - - location / { - proxy_pass http://localhost:5005/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - -} - diff --git a/config/nginx/sites-available/leaps-forward-to-EDACCESS b/config/nginx/sites-available/leaps-forward-to-EDACCESS deleted file mode 100644 index 35e785a..0000000 --- a/config/nginx/sites-available/leaps-forward-to-EDACCESS +++ /dev/null @@ -1,44 +0,0 @@ -server { - listen 80; - server_name leapssurvey.org www.leapssurvey.org; - - - access_log /var/log/nginx/leaps-forward-to-EDACCESS.access.log; - error_log /var/log/nginx/leaps-forward-to-EDACCESS.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://188.226.168.183; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} - -server { - listen 443; - - server_name leapssurvey.org www.leapssurvey.org; - - - access_log /var/log/nginx/leaps-forward-to-EDACCESS.access.log; - error_log /var/log/nginx/leaps-forward-to-EDACCESS.error.log; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - proxy_read_timeout 600s; - - location / { - proxy_pass https://188.226.168.183; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} diff --git a/config/nginx/sites-available/leviathan b/config/nginx/sites-available/leviathan deleted file mode 100644 index 24d2aca..0000000 --- a/config/nginx/sites-available/leviathan +++ /dev/null @@ -1,19 +0,0 @@ -server { - listen 80; - - server_name leviathan.cottagelabs.com; - - location /static { - alias /opt/leviathan/src/leviathan/portality/static; - autoindex on; - expires max; - } - - location / { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_pass http://localhost:5019/; - } -} diff --git a/config/nginx/sites-available/oacwellcome-production b/config/nginx/sites-available/oacwellcome-production deleted file mode 100644 index 002e34f..0000000 --- a/config/nginx/sites-available/oacwellcome-production +++ /dev/null @@ -1,13 +0,0 @@ -server { - listen 80; - - server_name compliance.cottagelabs.com; - - location / { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_pass http://localhost:5050/; - } -} diff --git a/config/nginx/sites-available/oacwellcome-test b/config/nginx/sites-available/oacwellcome-test deleted file mode 100644 index fd6d20c..0000000 --- a/config/nginx/sites-available/oacwellcome-test +++ /dev/null @@ -1,13 +0,0 @@ -server { - listen 80; - - server_name oacwellcome-test.cottagelabs.com; - - location / { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_pass http://localhost:5051/; - } -} diff --git a/config/nginx/sites-available/oag b/config/nginx/sites-available/oag deleted file mode 100644 index 2776606..0000000 --- a/config/nginx/sites-available/oag +++ /dev/null @@ -1,25 +0,0 @@ -server { - listen 80; - server_name www.howopenisit.org howopenisit.org oag.cottagelabs.com test.oag.cottagelabs.com; - - access_log /var/log/nginx/oag.access.log; - error_log /var/log/nginx/oag.error.log; - - set_real_ip_from 95.85.56.138; - - location / { - proxy_pass http://localhost:5051/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_send_timeout 86400s; - proxy_read_timeout 86400s; - } - - location /static/ { - alias /opt/oag/src/OpenArticleGauge/openarticlegauge/static/; - autoindex off; - expires max; - } -} diff --git a/config/nginx/sites-available/oag-forward-to-yonce b/config/nginx/sites-available/oag-forward-to-yonce deleted file mode 100644 index e432ae8..0000000 --- a/config/nginx/sites-available/oag-forward-to-yonce +++ /dev/null @@ -1,16 +0,0 @@ -server { - listen 80; - server_name www.howopenisit.org howopenisit.org oag.cottagelabs.com; - - access_log /var/log/nginx/oag-forward-to-yonce.access.log; - error_log /var/log/nginx/oag-forward-to-yonce.error.log; - - location / { - proxy_pass http://95.85.59.151; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} diff --git a/config/nginx/sites-available/oaspectrum-forward-to-OASPECTRUM b/config/nginx/sites-available/oaspectrum-forward-to-OASPECTRUM deleted file mode 100644 index 5a3492b..0000000 --- a/config/nginx/sites-available/oaspectrum-forward-to-OASPECTRUM +++ /dev/null @@ -1,16 +0,0 @@ -server { - listen 80; - server_name .oaspectrum.org; - - access_log /var/log/nginx/oaspectrum-forward-to-OASPECTRUM.access.log; - error_log /var/log/nginx/oaspectrum-forward-to-OASPECTRUM.error.log; - - location / { - proxy_pass http://178.62.3.108/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} diff --git a/config/nginx/sites-available/oaspectrum-production b/config/nginx/sites-available/oaspectrum-production deleted file mode 100644 index d330897..0000000 --- a/config/nginx/sites-available/oaspectrum-production +++ /dev/null @@ -1,14 +0,0 @@ -server { - listen 80; - - server_name .oaspectrum.org; - set_real_ip_from 178.62.75.236; - - location / { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_pass http://localhost:5050/; - } -} diff --git a/config/nginx/sites-available/ooz-es b/config/nginx/sites-available/ooz-es deleted file mode 100644 index 150ba7a..0000000 --- a/config/nginx/sites-available/ooz-es +++ /dev/null @@ -1,15 +0,0 @@ -server { - listen 80; - server_name oozes.cottagelabs.com; - - auth_basic "Restricted"; - auth_basic_user_file /etc/nginx/htpasswd; - - location / { - proxy_pass http://localhost:9200; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/oozes-forward-to-OOZ b/config/nginx/sites-available/oozes-forward-to-OOZ deleted file mode 100644 index e633d4a..0000000 --- a/config/nginx/sites-available/oozes-forward-to-OOZ +++ /dev/null @@ -1,33 +0,0 @@ -server { - listen 80; - server_name oozes.cottagelabs.com; - access_log /var/log/nginx/oozes-forward-to-OOZ.access.log; - error_log /var/log/nginx/oozes-forward-to-OOZ.error.log; - return 301 https://$server_name$request_uri; -} - -server { - listen 443 ssl; - server_name oozes.cottagelabs.com; - - ssl_certificate /etc/nginx/CERTS/cl_wildcard/*.cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cl_wildcard/*.cottagelabs.com.key; - - ssl_protocols TLSv1 TLSv1.1 TLSv1.2; - ssl_ciphers "HIGH:!aNULL:!MD5 or HIGH:!aNULL:!MD5:!3DES"; - ssl_prefer_server_ciphers on; - - access_log /var/log/nginx/oozes-forward-to-OOZ.access.log; - error_log /var/log/nginx/oozes-forward-to-OOZ.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://178.62.191.218; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - -} diff --git a/config/nginx/sites-available/openaccessbuton.org-forward-to-oabutton b/config/nginx/sites-available/openaccessbuton.org-forward-to-oabutton deleted file mode 100644 index 9acca0c..0000000 --- a/config/nginx/sites-available/openaccessbuton.org-forward-to-oabutton +++ /dev/null @@ -1,40 +0,0 @@ -server { - listen 80; - server_name .openaccessbutton.org; - - access_log /var/log/nginx/openaccessbutton.org-forward-to-oabutton.access.log; - error_log /var/log/nginx/openaccessbutton.org-forward-to-oabutton.error.log; - - location / { - #add_header Access-Control-Allow-Origin *; - proxy_pass http://178.62.115.33; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} - -server { - listen 443; - server_name .openaccessbutton.org; - - access_log /var/log/nginx/openaccessbutton-forward-to-oabutton.access.log; - error_log /var/log/nginx/openaccessbutton-forward-to-oabutton.error.log; - - ssl on; - ssl_certificate /etc/nginx/OAB_CERTS/chained.crt; - ssl_certificate_key /etc/nginx/OAB_CERTS/oab_cl.key; - - proxy_read_timeout 600s; - - location / { - #add_header Access-Control-Allow-Origin *; - proxy_pass https://178.62.115.33; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/openrefine b/config/nginx/sites-available/openrefine deleted file mode 100644 index d5a0503..0000000 --- a/config/nginx/sites-available/openrefine +++ /dev/null @@ -1,15 +0,0 @@ -server { - listen 80; - server_name openrefine.test.cottagelabs.com; - - client_max_body_size 1024M; - client_body_buffer_size 128k; - - location / { - proxy_pass http://localhost:3000/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/phd b/config/nginx/sites-available/phd deleted file mode 100644 index 50c5a8d..0000000 --- a/config/nginx/sites-available/phd +++ /dev/null @@ -1,19 +0,0 @@ -server { - listen 80; - - server_name phd.cottagelabs.com ifthisistheanswer.com; - - location /static { - alias /opt/phd/src/phd/portality/static; - autoindex on; - expires max; - } - - location / { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_pass http://localhost:5005/; - } -} diff --git a/config/nginx/sites-available/rc-doaj-to-DOAJTMP b/config/nginx/sites-available/rc-doaj-to-DOAJTMP deleted file mode 100644 index 5d82fd0..0000000 --- a/config/nginx/sites-available/rc-doaj-to-DOAJTMP +++ /dev/null @@ -1,36 +0,0 @@ -server { - listen 80; - server_name rc.doaj.cottagelabs.com; - - access_log /var/log/nginx/rc-doaj-to-DOAJTMP.access.log; - error_log /var/log/nginx/rc-doaj-to-DOAJTMP.error.log; - - location / { - proxy_pass http://188.226.163.151; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} - -server { - listen 443; - server_name rc.doaj.cottagelabs.com; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - access_log /var/log/nginx/rc-doaj-to-DOAJTMP.access.log; - error_log /var/log/nginx/rc-doaj-to-DOAJTMP.error.log; - - location / { - proxy_pass https://188.226.163.151; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - } -} diff --git a/config/nginx/sites-available/rc-doaj-to-YONCE b/config/nginx/sites-available/rc-doaj-to-YONCE deleted file mode 100644 index d8b08d5..0000000 --- a/config/nginx/sites-available/rc-doaj-to-YONCE +++ /dev/null @@ -1,40 +0,0 @@ -server { - listen 80; - server_name rc.doaj.cottagelabs.com; - - access_log /var/log/nginx/rc-doaj-to-YONCE.access.log; - error_log /var/log/nginx/rc-doaj-to-YONCE.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://95.85.59.151; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} - -server { - listen 443; - server_name rc.doaj.cottagelabs.com; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - access_log /var/log/nginx/rc-doaj-to-YONCE.access.log; - error_log /var/log/nginx/rc-doaj-to-YONCE.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass https://95.85.59.151; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - } -} diff --git a/config/nginx/sites-available/swap b/config/nginx/sites-available/swap deleted file mode 100644 index f83dc35..0000000 --- a/config/nginx/sites-available/swap +++ /dev/null @@ -1,77 +0,0 @@ -server { - listen 80; - - server_name swapsurvey.org www.swapsurvey.org; - - access_log /var/log/nginx/swap-access.log; - error_log /var/log/nginx/swap-error.log; - - client_max_body_size 30M; - client_body_buffer_size 128k; - - location /registration { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /account { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /courses { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /admin { - rewrite ^ https://$server_name$request_uri? permanent; - } - - location /static { - autoindex off; - root /opt/SWAP/src/SWAP/portality; - break; - } - - location / { - proxy_pass http://localhost:5006/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - -} - -server { - listen 443; - ssl on; - - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - - keepalive_timeout 600; - - server_name swapsurvey.org www.swapsurvey.org; - - access_log /var/log/nginx/swap-access.log; - error_log /var/log/nginx/swap-error.log; - - client_max_body_size 30M; - client_body_buffer_size 128k; - - location /static { - autoindex off; - root /opt/SWAP/src/SWAP/portality; - break; - } - - location / { - proxy_pass http://localhost:5006/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } - -} - diff --git a/config/nginx/sites-available/swap-forward-to-EDACCESS b/config/nginx/sites-available/swap-forward-to-EDACCESS deleted file mode 100644 index ff25ded..0000000 --- a/config/nginx/sites-available/swap-forward-to-EDACCESS +++ /dev/null @@ -1,44 +0,0 @@ -server { - listen 80; - server_name swapsurvey.org www.swapsurvey.org; - - - access_log /var/log/nginx/swap-forward-to-EDACCESS.access.log; - error_log /var/log/nginx/swap-forward-to-EDACCESS.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://188.226.168.183; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} - -server { - listen 443; - - server_name swapsurvey.org www.swapsurvey.org; - - - access_log /var/log/nginx/swap-forward-to-EDACCESS.access.log; - error_log /var/log/nginx/swap-forward-to-EDACCESS.error.log; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - proxy_read_timeout 600s; - - location / { - proxy_pass https://188.226.168.183; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} diff --git a/config/nginx/sites-available/test-doaj-to-OOZ-server b/config/nginx/sites-available/test-doaj-to-OOZ-server deleted file mode 100644 index 4a968aa..0000000 --- a/config/nginx/sites-available/test-doaj-to-OOZ-server +++ /dev/null @@ -1,41 +0,0 @@ -server { - listen 80; - server_name test.doaj.cottagelabs.com; - - access_log /var/log/nginx/test-doaj-to-trich-server.access.log; - error_log /var/log/nginx/test-doaj-to-trich-server.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass http://5.101.97.169; # Richard's test server - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} - -server { - listen 443; - server_name test.doaj.cottagelabs.com; - - ssl on; - ssl_certificate /etc/nginx/CERTS/cottagelabs.com.chained.crt; - ssl_certificate_key /etc/nginx/CERTS/cottagelabs.key; - - access_log /var/log/nginx/test-doaj-to-trich-server.access.log; - error_log /var/log/nginx/test-doaj-to-trich-server.error.log; - - proxy_read_timeout 600s; - - location / { - proxy_pass https://5.101.97.169; # Richard's test server - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} diff --git a/config/nginx/sites-available/test-oag-forward-to-PINKY b/config/nginx/sites-available/test-oag-forward-to-PINKY deleted file mode 100644 index a0638d0..0000000 --- a/config/nginx/sites-available/test-oag-forward-to-PINKY +++ /dev/null @@ -1,32 +0,0 @@ -server { - listen 80; - server_name test.oag.cottagelabs.com; - - access_log /var/log/nginx/test-oag-forward-to-PINKY.access.log; - error_log /var/log/nginx/test-oag-forward-to-PINKY.error.log; - - location / { - proxy_pass http://188.226.153.213; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # proxy_buffering off; - } -} - -server { - listen 80; - server_name flower.test.oag.cottagelabs.com; - - access_log /var/log/nginx/test-oag-forward-to-PINKY.access.log; - error_log /var/log/nginx/test-oag-forward-to-PINKY.error.log; - - location / { - proxy_pass http://188.226.153.213; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/nginx/sites-available/uniboard-test b/config/nginx/sites-available/uniboard-test deleted file mode 100644 index 18c4148..0000000 --- a/config/nginx/sites-available/uniboard-test +++ /dev/null @@ -1,15 +0,0 @@ -server { - listen 80; - server_name uniboard.ooz.cottagelabs.com; - - access_log /var/log/nginx/uniboard-test.access.log; - error_log /var/log/nginx/uniboard-test.error.log; - - location / { - proxy_pass http://localhost:5011/; - proxy_redirect off; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} diff --git a/config/supervisor/conf.d/README b/config/supervisor/conf.d/README deleted file mode 100644 index 2479a38..0000000 --- a/config/supervisor/conf.d/README +++ /dev/null @@ -1,3 +0,0 @@ -Place custom configuration files for supervisor here. They -will be read after the main configuration file in -/etc/supervisor/supervisord.conf diff --git a/config/supervisor/conf.d/bibsoup.net.conf b/config/supervisor/conf.d/bibsoup.net.conf deleted file mode 100644 index c2cc5df..0000000 --- a/config/supervisor/conf.d/bibsoup.net.conf +++ /dev/null @@ -1,7 +0,0 @@ -[program:bibsoup.net] -command=/opt/%(program_name)s/bin/gunicorn -w 4 -b 127.0.0.1:5050 bibserver.web:app -user=www-data -directory=/opt/%(program_name)s/src/bibserver -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true diff --git a/config/supervisor/conf.d/doaj-production.conf b/config/supervisor/conf.d/doaj-production.conf deleted file mode 100644 index c6040f3..0000000 --- a/config/supervisor/conf.d/doaj-production.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:doaj-production] -command=/opt/doaj/bin/newrelic-admin run-program /opt/doaj/bin/gunicorn -c /opt/doaj/src/doaj/deploy/doaj_gunicorn_config.py portality.app:app -environment = DOAJENV=production,NEW_RELIC_CONFIG_FILE=/opt/doaj/src/doaj/deploy/newrelic.ini,NEW_RELIC_ENVIRONMENT=production -user=cloo -directory=/opt/doaj/src/doaj -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/doaj-staging.conf b/config/supervisor/conf.d/doaj-staging.conf deleted file mode 100644 index 1d82156..0000000 --- a/config/supervisor/conf.d/doaj-staging.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:doaj-staging] -command=/opt/doaj/bin/newrelic-admin run-program /opt/doaj/bin/gunicorn -c /opt/doaj/src/doaj/deploy/doaj_gunicorn_config.py portality.app:app -environment = DOAJENV=staging,NEW_RELIC_CONFIG_FILE=/opt/doaj/src/doaj/deploy/newrelic.ini,NEW_RELIC_ENVIRONMENT=staging -user=cloo -directory=/opt/doaj/src/doaj -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/doaj-test.conf b/config/supervisor/conf.d/doaj-test.conf deleted file mode 100644 index d5533d3..0000000 --- a/config/supervisor/conf.d/doaj-test.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:doaj-test] -command=/opt/doaj/bin/python /opt/doaj/src/doaj/portality/app.py -environment= DOAJENV=test,NEW_RELIC_ENVIRONMENT=test -user=cloo -directory=/opt/doaj/src/doaj -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/g4he.conf b/config/supervisor/conf.d/g4he.conf deleted file mode 100644 index 93e2bd7..0000000 --- a/config/supervisor/conf.d/g4he.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:g4he] -command=/opt/g4he/bin/python /opt/g4he/src/g4he/portality/app.py -user=cloo -directory=/opt/g4he/src/g4he -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/lantern-test-daemon.conf b/config/supervisor/conf.d/lantern-test-daemon.conf deleted file mode 100644 index a486f2d..0000000 --- a/config/supervisor/conf.d/lantern-test-daemon.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:lantern-test-daemon] -command=/opt/lantern/bin/python /opt/lantern/src/lantern/service/runner.py -user=cloo -directory=/opt/lantern/src/lantern -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/lantern-test.conf b/config/supervisor/conf.d/lantern-test.conf deleted file mode 100644 index 95d6bcd..0000000 --- a/config/supervisor/conf.d/lantern-test.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:lantern-test] -command=/opt/lantern/bin/newrelic-admin run-program /opt/lantern/bin/gunicorn -c /opt/lantern/src/lantern/deploy/lantern_gunicorn_config.py service.web:app -environment = NEW_RELIC_CONFIG_FILE=/opt/lantern/src/lantern/deploy/newrelic.ini,NEW_RELIC_ENVIRONMENT=test -user=cloo -directory=/opt/lantern/src/lantern -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/leaps.conf b/config/supervisor/conf.d/leaps.conf deleted file mode 100644 index 8b35f5d..0000000 --- a/config/supervisor/conf.d/leaps.conf +++ /dev/null @@ -1,7 +0,0 @@ -[program:leaps] -command=/opt/%(program_name)s/bin/gunicorn -w 4 -b 127.0.0.1:5005 portality.app:app -user=www-data -directory=/opt/%(program_name)s/src/%(program_name)s -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true diff --git a/config/supervisor/conf.d/metatool.conf b/config/supervisor/conf.d/metatool.conf deleted file mode 100644 index 64844dc..0000000 --- a/config/supervisor/conf.d/metatool.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:metatool] -command=/opt/metatool/bin/python /opt/metatool/src/metatool/metatool/web.py -user=cloo -directory=/opt/metatool/src/metatool -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/oacwellcome-production-daemon.conf b/config/supervisor/conf.d/oacwellcome-production-daemon.conf deleted file mode 100644 index 2973d58..0000000 --- a/config/supervisor/conf.d/oacwellcome-production-daemon.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:oacwellcome-production-daemon] -command=/opt/oacwellcome/bin/python /opt/oacwellcome/src/oacwellcome/service/runner.py -user=cloo -directory=/opt/oacwellcome/src/oacwellcome -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/oacwellcome-production.conf b/config/supervisor/conf.d/oacwellcome-production.conf deleted file mode 100644 index 1cc43fb..0000000 --- a/config/supervisor/conf.d/oacwellcome-production.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:oacwellcome-production] -command=/opt/oacwellcome/bin/newrelic-admin run-program /opt/oacwellcome/bin/gunicorn -c /opt/oacwellcome/src/oacwellcome/deploy/oacwellcome_gunicorn_config.py service.web:app -environment = NEW_RELIC_CONFIG_FILE=/opt/oacwellcome/src/oacwellcome/deploy/newrelic.ini,NEW_RELIC_ENVIRONMENT=production -user=cloo -directory=/opt/oacwellcome/src/oacwellcome -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/oacwellcome-test-daemon.conf b/config/supervisor/conf.d/oacwellcome-test-daemon.conf deleted file mode 100644 index 91e0e35..0000000 --- a/config/supervisor/conf.d/oacwellcome-test-daemon.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:oacwellcome-test-daemon] -command=/opt/oacwellcome/bin/python /opt/oacwellcome/src/oacwellcome/service/runner.py -user=cloo -directory=/opt/oacwellcome/src/oacwellcome -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/oacwellcome-test.conf b/config/supervisor/conf.d/oacwellcome-test.conf deleted file mode 100644 index 5786f32..0000000 --- a/config/supervisor/conf.d/oacwellcome-test.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:oacwellcome-test] -command=/opt/oacwellcome/bin/newrelic-admin run-program /opt/oacwellcome/bin/gunicorn -c /opt/oacwellcome/src/oacwellcome/deploy/oacwellcome_gunicorn_config.py service.web:app -environment = NEW_RELIC_CONFIG_FILE=/opt/oacwellcome/src/oacwellcome/deploy/newrelic.ini,NEW_RELIC_ENVIRONMENT=test -user=cloo -directory=/opt/oacwellcome/src/oacwellcome -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/oag-celery-flower.conf b/config/supervisor/conf.d/oag-celery-flower.conf deleted file mode 100644 index d5684f3..0000000 --- a/config/supervisor/conf.d/oag-celery-flower.conf +++ /dev/null @@ -1,6 +0,0 @@ -[program:oag-celery-flower] -command=/opt/oag/bin/flower --broker=redis://localhost --basic_auth=oagflower:wellitspublicinthesysadminrepoanywayweneedaprivaterepoforconfigs -user=nobody -stdout_logfile=/var/log/supervisor/%(program_name)s.log -stderr_logfile=/var/log/supervisor/%(program_name)s.log -autostart=true diff --git a/config/supervisor/conf.d/oag-celery.conf b/config/supervisor/conf.d/oag-celery.conf deleted file mode 100644 index d0e56eb..0000000 --- a/config/supervisor/conf.d/oag-celery.conf +++ /dev/null @@ -1,26 +0,0 @@ -[program:oag-celery] -command=/opt/oag/bin/celery multi start 8 -A openarticlegauge.slavedriver -l info --pidfile=%%n.pid --logfile=%%n.log -Q:1-3 detect_provider -Q:4-6 provider_licence -Q:7 store_results -Q:8 flush_buffer -user=cloo -directory=/opt/oag/src/OpenArticleGauge/bin -stdout_logfile=/var/log/supervisor/%(program_name)s.log -stderr_logfile=/var/log/supervisor/%(program_name)s.log -autostart=true -autorestart=false -stopwaitsecs=600 - -[program:oag-celerybeat] -command=/opt/oag/bin/celery worker --app=openarticlegauge.slavedriver -B -l info -n %(program_name)s -user=cloo -directory=/opt/oag/src/OpenArticleGauge/bin -stdout_logfile=/var/log/supervisor/%(program_name)s.log -stderr_logfile=/var/log/supervisor/%(program_name)s.log -autostart=true -autorestart=true -stopwaitsecs=600 - -[program:oag-celery-flower] -command=/opt/oag/bin/flower --broker=redis://localhost --auth=".*@cottagelabs\.com|cn@cameronneylon.net|cneylon@plos.org" -user=nobody -stdout_logfile=/var/log/supervisor/%(program_name)s.log -stderr_logfile=/var/log/supervisor/%(program_name)s.log -autostart=true diff --git a/config/supervisor/conf.d/oag-celery.conf.old b/config/supervisor/conf.d/oag-celery.conf.old deleted file mode 100644 index 7ea48aa..0000000 --- a/config/supervisor/conf.d/oag-celery.conf.old +++ /dev/null @@ -1,63 +0,0 @@ -[program:oag-celery] -numprocs_start=1 -numprocs=10 -process_name=%(program_name)s_%(process_num)2d -command=/opt/oag/bin/celery worker --app=openarticlegauge.slavedriver -l info -Q detect_provider,provider_license,store_results,flush_buffer -n oag-%(process_num)1d.cl2 -user=nobody -directory=/opt/oag/src/OpenArticleGauge/bin -stdout_logfile=/var/log/supervisor/%(program_name)s-%(process_num)1d.log -stderr_logfile=/var/log/supervisor/%(program_name)s-%(process_num)1d.log -autostart=true -stopwaitsecs=600 - -[program:oag-celery-provider_license] -numprocs_start=4 -numprocs=5 -process_name=%(program_name)s_%(process_num)1d -command=/opt/oag/bin/celery worker --app=openarticlegauge.slavedriver -l info -Q provider_license -n oag-%(process_num)1d.cl2 -user=nobody -directory=/opt/oag/src/OpenArticleGauge/bin -stdout_logfile=/var/log/supervisor/%(program_name)s-%(process_num)1d.log -stderr_logfile=/var/log/supervisor/%(program_name)s-%(process_num)1d.log -autostart=true -stopwaitsecs=600 - -[program:oag-celery-store_results] -numprocs_start=9 -numprocs=1 -process_name=%(program_name)s_%(process_num)1d -command=/opt/oag/bin/celery worker --app=openarticlegauge.slavedriver -l info -Q store_results -n oag-%(process_num)1d.cl2 -user=nobody -directory=/opt/oag/src/OpenArticleGauge/bin -stdout_logfile=/var/log/supervisor/%(program_name)s-%(process_num)1d.log -stderr_logfile=/var/log/supervisor/%(program_name)s-%(process_num)1d.log -autostart=true -stopwaitsecs=600 - -[program:oag-celery-flush_buffer] -numprocs_start=10 -numprocs=1 -process_name=%(program_name)s_%(process_num)d -command=/opt/oag/bin/celery worker --app=openarticlegauge.slavedriver -l info -Q flush_buffer -n oag-%(process_num)1d.cl2 -user=nobody -directory=/opt/oag/src/OpenArticleGauge/bin -stdout_logfile=/var/log/supervisor/%(program_name)s-%(process_num)d.log -stderr_logfile=/var/log/supervisor/%(program_name)s-%(process_num)1d.log -autostart=true -stopwaitsecs=600 - -[program:oag-celerybeat] -command=/opt/oag/bin/celery worker --app=openarticlegauge.slavedriver -B -l info -n oag-celerybeat.cl2 -user=nobody -directory=/opt/oag/src/OpenArticleGauge/bin -stdout_logfile=/var/log/supervisor/%(program_name)s.log -stderr_logfile=/var/log/supervisor/%(program_name)s.log -autostart=true -stopwaitsecs=600 - -[program:oag-celery-flower] -command=/opt/oag/bin/flower --broker=redis://localhost -user=nobody -stdout_logfile=/var/log/supervisor/%(program_name)s.log -stderr_logfile=/var/log/supervisor/%(program_name)s.log -autostart=true diff --git a/config/supervisor/conf.d/oag.conf b/config/supervisor/conf.d/oag.conf deleted file mode 100644 index 36bed1a..0000000 --- a/config/supervisor/conf.d/oag.conf +++ /dev/null @@ -1,7 +0,0 @@ -[program:oag] -command=/opt/%(program_name)s/bin/gunicorn -c /opt/oag/src/OpenArticleGauge/deploy/oag_gunicorn_config.py openarticlegauge.app:app -user=cloo -directory=/opt/%(program_name)s/src/OpenArticleGauge -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true diff --git a/config/supervisor/conf.d/oagr-oacwellcome-test-daemon.conf b/config/supervisor/conf.d/oagr-oacwellcome-test-daemon.conf deleted file mode 100644 index ec61371..0000000 --- a/config/supervisor/conf.d/oagr-oacwellcome-test-daemon.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:oagr-oacwellcome-test-daemon] -command=/opt/oacwellcome/bin/python /opt/oacwellcome/src/oacwellcome/magnificent-octopus/octopus/modules/oag/runner.py -user=cloo -directory=/opt/oacwellcome/src/oacwellcome -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/oagr-production-daemon.conf b/config/supervisor/conf.d/oagr-production-daemon.conf deleted file mode 100644 index cd2c550..0000000 --- a/config/supervisor/conf.d/oagr-production-daemon.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:oagr-production-daemon] -command=/opt/oacwellcome/bin/python /opt/oacwellcome/src/oacwellcome/magnificent-octopus/octopus/modules/oag/runner.py -user=cloo -directory=/opt/oacwellcome/src/oacwellcome -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/oagr-test-daemon.conf b/config/supervisor/conf.d/oagr-test-daemon.conf deleted file mode 100644 index 37ace4d..0000000 --- a/config/supervisor/conf.d/oagr-test-daemon.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:oagr-test-daemon] -command=/opt/lantern/bin/python /opt/lantern/src/lantern/magnificent-octopus/octopus/modules/oag/runner.py -user=cloo -directory=/opt/lantern/src/lantern -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/oaspectrum-production.conf b/config/supervisor/conf.d/oaspectrum-production.conf deleted file mode 100644 index eebbf87..0000000 --- a/config/supervisor/conf.d/oaspectrum-production.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:oaspectrum-production] -command=/opt/oaspectrum/bin/newrelic-admin run-program /opt/oaspectrum/bin/gunicorn -c /opt/oaspectrum/src/oaspectrum/deploy/oaspectrum_gunicorn_config.py service.web:app -environment = NEW_RELIC_CONFIG_FILE=/opt/oaspectrum/src/oaspectrum/deploy/newrelic.ini,NEW_RELIC_ENVIRONMENT=production -user=cloo -directory=/opt/oaspectrum/src/oaspectrum -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/swap.conf b/config/supervisor/conf.d/swap.conf deleted file mode 100644 index bf88271..0000000 --- a/config/supervisor/conf.d/swap.conf +++ /dev/null @@ -1,7 +0,0 @@ -[program:SWAP] -command=/opt/%(program_name)s/bin/gunicorn -w 4 -b 127.0.0.1:5006 portality.app:app -user=www-data -directory=/opt/%(program_name)s/src/%(program_name)s -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true diff --git a/config/supervisor/conf.d/uniboard-test.conf b/config/supervisor/conf.d/uniboard-test.conf deleted file mode 100644 index 655b935..0000000 --- a/config/supervisor/conf.d/uniboard-test.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:uniboard-test] -command=/opt/uniboard/bin/python /opt/uniboard/src/uniboard/portality/app.py -user=cloo -directory=/opt/uniboard/src/uniboard -stdout_logfile=/var/log/supervisor/%(program_name)s-access.log -stderr_logfile=/var/log/supervisor/%(program_name)s-error.log -autostart=true -autorestart=true -stopasgroup=true diff --git a/config/supervisor/conf.d/wellcome_ncbi_objects.conf b/config/supervisor/conf.d/wellcome_ncbi_objects.conf deleted file mode 100644 index e594084..0000000 --- a/config/supervisor/conf.d/wellcome_ncbi_objects.conf +++ /dev/null @@ -1,8 +0,0 @@ -[program:wellcome-ncbi-objects] -command=/home/cloo/wellcome_in_ncbi/bin/python get_wellcome_ncbi_objects.py --resume -user=cloo -directory=/home/cloo/wellcome_in_ncbi/src/wellcome-outputs-from-ncbi/ -stdout_logfile=/var/log/supervisor/%(program_name)s.log -stderr_logfile=/var/log/supervisor/%(program_name)s.log -autostart=true -autorestart=true diff --git a/config/supervisor/supervisord.conf b/config/supervisor/supervisord.conf deleted file mode 100644 index 61b3020..0000000 --- a/config/supervisor/supervisord.conf +++ /dev/null @@ -1,28 +0,0 @@ -; supervisor config file - -[unix_http_server] -file=/var/run//supervisor.sock ; (the path to the socket file) -chmod=0700 ; sockef file mode (default 0700) - -[supervisord] -logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) -pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid) -childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP) - -; the below section must remain in the config file for RPC -; (supervisorctl/web interface) to work, additional interfaces may be -; added by defining them in separate rpcinterface: sections -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[supervisorctl] -serverurl=unix:///var/run//supervisor.sock ; use a unix:// URL for a unix socket - -; The [include] section can just contain the "files" setting. This -; setting can list multiple files (separated by whitespace or -; newlines). It can also contain wildcards. The filenames are -; interpreted as relative to this file. Included files *cannot* -; include files themselves. - -[include] -files = /etc/supervisor/conf.d/*.conf diff --git a/deploy_scripts/lantern_develop_deploy.sh b/deploy_scripts/lantern_develop_deploy.sh deleted file mode 100755 index e63313f..0000000 --- a/deploy_scripts/lantern_develop_deploy.sh +++ /dev/null @@ -1,45 +0,0 @@ -sudo ln -sf /opt/sysadmin/config/supervisor/conf.d/lantern-test.conf /etc/supervisor/conf.d/lantern-test.conf -sudo ln -sf /opt/sysadmin/config/supervisor/conf.d/oagr-test-daemon.conf /etc/supervisor/conf.d/oagr-test-daemon.conf -sudo ln -sf /opt/sysadmin/config/supervisor/conf.d/lantern-test-daemon.conf /etc/supervisor/conf.d/lantern-test-daemon.conf - -sudo ln -sf /opt/sysadmin/config/nginx/sites-available/lantern-test /etc/nginx/sites-available/lantern-test -sudo ln -sf /etc/nginx/sites-available/lantern-test /etc/nginx/sites-enabled/lantern-test - -sudo apt-get update -q -y -sudo apt-get -q -y install libxml2-dev libxslt-dev python-dev lib32z1-dev - -if [ ! -d /opt/lantern ]; then - cd /opt - sudo mkdir lantern - sudo chown cloo:cloo lantern - virtualenv -p python2.7 lantern - cd lantern - mkdir src - cd src - git clone https://github.com/CottageLabs/lantern.git lantern -fi - -cd /opt/lantern -. bin/activate -cd src/lantern -git checkout develop -git pull -git submodule update --recursive --init -git submodule update --recursive - -cd esprit -pip install -e . -cd .. - -cd magnificent-octopus -pip install -e . -cd .. - -pip install -e . - -sudo supervisorctl reread -sudo supervisorctl update -sudo supervisorctl restart lantern-test -sudo supervisorctl restart lantern-test-daemon -sudo supervisorctl restart oagr-test-daemon -sudo nginx -t && sudo nginx -s reload diff --git a/deploy_scripts/oacwellcome_develop_deploy.sh b/deploy_scripts/oacwellcome_develop_deploy.sh deleted file mode 100755 index 8b19b55..0000000 --- a/deploy_scripts/oacwellcome_develop_deploy.sh +++ /dev/null @@ -1,45 +0,0 @@ -sudo ln -sf /opt/sysadmin/config/supervisor/conf.d/oacwellcome-test.conf /etc/supervisor/conf.d/oacwellcome-test.conf -sudo ln -sf /opt/sysadmin/config/supervisor/conf.d/oagr-oacwellcome-test-daemon.conf /etc/supervisor/conf.d/oagr-oacwellcome-test-daemon.conf -sudo ln -sf /opt/sysadmin/config/supervisor/conf.d/oacwellcome-test-daemon.conf /etc/supervisor/conf.d/oacwellcome-test-daemon.conf - -sudo ln -sf /opt/sysadmin/config/nginx/sites-available/oacwellcome-test /etc/nginx/sites-available/oacwellcome-test -sudo ln -sf /etc/nginx/sites-available/oacwellcome-test /etc/nginx/sites-enabled/oacwellcome-test - -sudo apt-get update -q -y -sudo apt-get -q -y install libxml2-dev libxslt-dev python-dev lib32z1-dev - -if [ ! -d /opt/oacwellcome ]; then - cd /opt - sudo mkdir oacwellcome - sudo chown cloo:cloo oacwellcome - virtualenv -p python2.7 oacwellcome - cd oacwellcome - mkdir src - cd src - git clone https://github.com/CottageLabs/oacwellcome.git oacwellcome -fi - -cd /opt/oacwellcome -. bin/activate -cd src/oacwellcome -git checkout develop -git pull -git submodule update --recursive --init -git submodule update --recursive - -cd esprit -pip install -e . -cd .. - -cd magnificent-octopus -pip install -e . -cd .. - -pip install -e . - -sudo supervisorctl reread -sudo supervisorctl update -sudo supervisorctl restart oacwellcome-test -sudo supervisorctl restart oacwellcome-test-daemon -sudo supervisorctl restart oagr-oacwellcome-test-daemon -sudo nginx -t && sudo nginx -s reload diff --git a/deploy_scripts/oaspectrum_master_deploy_to_production.sh b/deploy_scripts/oaspectrum_master_deploy_to_production.sh deleted file mode 100755 index b0adac0..0000000 --- a/deploy_scripts/oaspectrum_master_deploy_to_production.sh +++ /dev/null @@ -1,22 +0,0 @@ -sudo apt-get update -q -y -sudo apt-get -q -y install libxml2-dev libxslt-dev python-dev lib32z1-dev -cd /opt/oaspectrum -. bin/activate -cd src/oaspectrum -git checkout master -git pull -git submodule update --init - -cd esprit -git submodule update --init -pip install -e . -cd .. - -cd magnificent-octopus -git submodule update --init -pip install -e . -cd .. - -pip install -e . - -sudo supervisorctl restart oaspectrum-production diff --git a/deploy_scripts/oaspectrum_master_deploy_to_test.sh b/deploy_scripts/oaspectrum_master_deploy_to_test.sh deleted file mode 100755 index 1e860b6..0000000 --- a/deploy_scripts/oaspectrum_master_deploy_to_test.sh +++ /dev/null @@ -1,22 +0,0 @@ -sudo apt-get update -q -y -sudo apt-get -q -y install libxml2-dev libxslt-dev python-dev lib32z1-dev -cd /opt/oaspectrum -. env/bin/activate -cd oaspectrum -git checkout master -git pull -git submodule update --init - -cd esprit -git submodule update --init -pip install -e . -cd .. - -cd magnificent-octopus -git submodule update --init -pip install -e . -cd .. - -pip install -e . - -sudo supervisorctl restart spectrum-test diff --git a/disable_network_except_ssh.sh b/disable_network_except_ssh.sh deleted file mode 100755 index 19a37d0..0000000 --- a/disable_network_except_ssh.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh - -die() { printf %s "${@+$@$'\n'}" ; exit 1; } - -if [ $# -ne 1 ] -then - echo "1 argument needed" - echo "Usage: $0 " - echo - echo "Example: $0 93.93.131.168" - echo " will disable all network traffic except incoming ssh on that server" - exit 1 -fi - -# My system IP/set ip address of server -PATH=$PATH:/sbin -SERVER_IP=$1 -# Flushing all rules -sudo iptables -F -sudo iptables -X -# Setting default filter policy -sudo iptables -P INPUT DROP -sudo iptables -P OUTPUT DROP -sudo iptables -P FORWARD DROP -# Allow unlimited traffic on loopback -sudo iptables -A INPUT -i lo -j ACCEPT -sudo iptables -A OUTPUT -o lo -j ACCEPT - -# Allow incoming ssh only -sudo iptables -A INPUT -p tcp -s 0/0 -d $SERVER_IP --sport 513:65535 --dport 22 -m state --state NEW,ESTABLISHED -j ACCEPT -sudo iptables -A OUTPUT -p tcp -s $SERVER_IP -d 0/0 --sport 22 --dport 513:65535 -m state --state ESTABLISHED -j ACCEPT -# make sure nothing comes or goes out of this box -sudo iptables -A INPUT -j DROP -sudo iptables -A OUTPUT -j DROP diff --git a/doaj b/doaj new file mode 120000 index 0000000..17ed9e3 --- /dev/null +++ b/doaj @@ -0,0 +1 @@ +ansible/ \ No newline at end of file diff --git a/doaj/counts.sh b/doaj/counts.sh deleted file mode 100755 index 275826a..0000000 --- a/doaj/counts.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -if [ $# -ne 1 ] -then - echo "Usage: $0 http://:" - echo "E.g.: $0 http://yonce:9200" - exit 1 -fi - -echo -n "The whole DOAJ index: "; curl "$1""/doaj/_count" ; echo -echo -n "Accounts: "; curl "$1""/doaj/account/_count" ; echo -echo -n "Articles: "; curl "$1""/doaj/article/_count" ; echo -echo -n "Journals: "; curl "$1""/doaj/journal/_count" ; echo -echo -n "Suggestions: "; curl "$1""/doaj/suggestion/_count" ; echo -echo -n "Uploads: "; curl "$1""/doaj/upload/_count" ; echo -echo -n "Cache: "; curl "$1""/doaj/cache/_count" ; echo -echo -n "ToC-s: "; curl "$1""/doaj/toc/_count" ; echo diff --git a/fabric/doaj/fabfile.py b/fabric/doaj/fabfile.py deleted file mode 100644 index efdee29..0000000 --- a/fabric/doaj/fabfile.py +++ /dev/null @@ -1,252 +0,0 @@ -import sys, os, time -from fabric.api import env, run, sudo, cd, abort, roles, execute, warn_only - -env.use_ssh_config = True # username, identity file (key), hostnames for machines will all be loaded from ~/.ssh/config -# You can run this script like this: -# fab -H - -# your local ssh config does not apply when the script is explicitly specifying which hosts to run tasks on... -# so username and key path will still have to be set here, or specified on the command line using -u and -i -env.user = 'cloo' -if not env.get('key_filename'): - # env.setdefault does not seem to work correctly - env.key_filename = [] -env.key_filename.extend( - [ - '~/.ssh/cl', - # add the path to your own private key if you wish - # you can also add the -i argument when running fabric: - # fab -i :arg1=value1,arg2=value2 - ] -) - - -DOAJGATE_IP = '46.101.64.14' -DOAJ_HARVESTER_GATE_IP = None -DOAJAPP1_IP = '46.101.38.194' -DOAJ_HARVESTER_APP1_IP = None -DOAJ_TEST_IP = '178.62.92.200' -DOAJ_STAGING_IP = None -APP_SERVER_NAMES = {'DOAJGATE': DOAJGATE_IP} # the gateway nginx config files are named after which app server the gateway directs traffic to -TEST_SERVER_NAMES = {'DOAJ_TEST': DOAJ_TEST_IP} -STAGING_SERVER_NAMES = {'DOAJ_STAGING': DOAJ_STAGING_IP} - -STAGING_DO_NAME = 'doaj-staging' - -env.hosts = [DOAJGATE_IP] - -DOAJ_PROD_PATH_SRC = '/home/cloo/repl/production/doaj/src/doaj' -DOAJ_HARVESTER_PATH_SRC = '/home/cloo/repl/harvester/doaj/src/doaj' -DOAJ_TEST_PATH_SRC = '/home/cloo/repl/test/doaj/src/doaj' -DOAJ_USER_APP_PORT = 5050 - -# Gateway server - nginx site config filename bits (also includes the server name in the middle) -GATE_NGINX_CFG_PREFIX = 'doaj-forward-to-' -GATE_NGINX_CFG_SUFFIX = '-server-with-local-static' - -# Used when running tasks directly, e.g. fab update_doaj . Not (yet) -# used when a task like switch_doaj is calling multiple other tasks -# programmatically. Enables us to not have to specify "which hosts" -# all the time when running Fabric. -env.roledefs.update( - { - 'app': [DOAJAPP1_IP], - 'gate': [DOAJGATE_IP], - 'test': [DOAJ_TEST_IP], - 'staging': [DOAJ_STAGING_IP], - 'harvester-app': [DOAJ_HARVESTER_APP1_IP], - 'harvester-gate': [DOAJ_HARVESTER_GATE_IP], - } -) - -@roles('gate') -def update_doaj(env, branch='production', tag="", doajdir=DOAJ_PROD_PATH_SRC): - if (not tag and env == 'production') or (not tag and env == 'harvester'): - print 'Please specify a tag to deploy to production or harvester' - sys.exit(1) - - if ( - (doajdir == DOAJ_PROD_PATH_SRC and branch != 'production') - or (doajdir == DOAJ_HARVESTER_PATH_SRC and branch != 'production') - or (env == 'production' and branch != 'production') - or (env == 'harvester' and branch != 'production') - ): - print 'You\'re deploying something other than the production branch to the live or harvester DOAJ app location.' - print 'Aborting execution. If you really want to do this edit this script and comment the guard out.' - sys.exit(1) - - with cd(doajdir): - run('git config user.email "us@cottagelabs.com"') - run('git config user.name "Cottage Labs LLP"') - #run('git checkout master') # so that we have a branch we can definitely pull in - # (in case we start from one that was set up for pushing only) - #run('git pull', pty=False) # get any new branches - run('git checkout ' + branch) - run('git pull', pty=False) # again, in case the checkout actually switched the branch, pull from the remote now - if tag: - run('git checkout {0}'.format(tag)) - run('git submodule update --init', pty=False) - run('deploy/deploy-gateway.sh {0}'.format(env)) - - -@roles('gate') -def restart(env, doajdir=DOAJ_PROD_PATH_SRC): - with cd(doajdir): - run('deploy/restart.sh {0}'.format(env)) - - -@roles('staging') -def update_staging(branch='production'): - '''Update the staging server with the latest live code and reload the app.''' - execute(update_doaj, env='staging', branch=branch, hosts=env.roledefs['staging']) - execute(reload_staging) - -@roles('staging') -def reload_staging(): - execute(reload_webserver, supervisor_doaj_task_name='doaj-staging', hosts=env.roledefs['staging']) - -def _get_hosts(from_, to_): - FROM = from_.upper() - TO = to_.upper() - if FROM not in APP_SERVER_NAMES or TO not in APP_SERVER_NAMES: - abort('When syncing suggestions from one machine to another, only the following server names are valid: ' + ', '.join(APP_SERVER_NAMES)) - source_host = APP_SERVER_NAMES[FROM] - target_host = APP_SERVER_NAMES[TO] - return FROM, source_host, TO, target_host - -@roles('app') -def push_xml_uploads(): - # TODO: move the hardcoded dirs and files to python constants to top - # of file .. bit pointless for now as the scheduled backups - # themselves have those bits hardcoded too - run("/home/cloo/backups/backup2s3.sh {doaj_path}/upload/ /home/cloo/backups/doaj-xml-uploads/ dummy s3://doaj-xml-uploads >> /home/cloo/backups/logs/doaj-xml-uploads_`date +%F_%H%M`.log 2>&1" - .format(doaj_path=DOAJ_PROD_PATH_SRC), - pty=False - ) - -@roles('app') -def pull_xml_uploads(): - # TODO: same as push_xml_uploads - run("/home/cloo/backups/restore_from_s3.sh s3://doaj-xml-uploads {doaj_path}/upload/ /home/cloo/backups/doaj-xml-uploads/" - .format(doaj_path=DOAJ_PROD_PATH_SRC), - pty=False - ) - -def sync_suggestions(from_, to_): - FROM, source_host, TO, target_host = _get_hosts(from_, to_) - execute(_sync_suggestions, FROM=FROM, hosts=[target_host]) - execute(count_suggestions, hosts=[source_host, target_host]) - raw_input('Suggestion counts OK? Press . If not, press Ctrl+C to terminate now.') - -@roles('app') -def check_doaj_running(): - run('if [ $(curl -s localhost:{app_port}| grep {check_for} | wc -l) -ge 1 ]; then echo "DOAJ running on localhost:{app_port}"; fi' - .format(app_port=DOAJ_USER_APP_PORT, check_for="doaj") - ) - -@roles('app') -def print_doaj_app_config(): - print_keys = { - 'secret_settings.py': ['RECAPTCHA'], - 'settings.py': ['DOMAIN', 'SUPPRESS_ERROR_EMAILS', 'DEBUG'] - } - for file_, keys in print_keys.items(): - for key in keys: - run('grep {key} {doaj_path}/portality/{file_}'.format(file_=file_, key=key, doaj_path=DOAJ_PROD_PATH_SRC)) - -@roles('app') -def reload_webserver(supervisor_doaj_task_name='doaj-production'): - sudo('kill -HUP $(sudo supervisorctl pid {0})'.format(supervisor_doaj_task_name)) - -@roles('gate') -def deploy_live(branch='production', tag=""): - update_doaj(env='production', branch=branch, tag=tag) - -@roles('gate') -def restart_live(): - restart(env='production') - -@roles('harvester-gate') -def deploy_harvester(branch='production', tag=""): - execute(update_doaj, env='harvester', branch=branch, tag=tag, doajdir=DOAJ_HARVESTER_PATH_SRC, hosts=env.roledefs['harvester-gate']) - execute(reload_webserver, supervisor_doaj_task_name='doaj-harvester', hosts=env.roledefs['harvester-app']) - -@roles('gate') -def deploy_test(branch='develop', tag=""): - update_doaj(env='test', branch=branch, tag=tag, doajdir=DOAJ_TEST_PATH_SRC) - execute(restart_test(supervisor_doaj_task_name='doaj-test'), hosts=env.roledefs['test']) - - -@roles('gate') -def restart_test(): - restart(env='test', doajdir=DOAJ_TEST_PATH_SRC) - -@roles('gate') -def create_staging(tag): - if _find_staging_server(STAGING_DO_NAME): - print "The staging server already exists. Destroy it with destroy_staging task first if you want, then rerun this." - sys.exit(1) - ip = _create_staging_server(STAGING_DO_NAME) - execute(_setup_staging_server, hosts=[ip]) - print 'Staging server set up complete. SSH into {0}'.format(ip) - -def destroy_staging(): - staging = _find_staging_server(STAGING_DO_NAME) - if not staging: - print "Can't find a droplet with the name {0}, so nothing to do.".format(STAGING_DO_NAME) - sys.exit(0) - print 'Destroying {0} in 5 seconds. Ctrl+C if you want to stop this.'.format(STAGING_DO_NAME) - time.sleep(5) - staging.destroy() - print 'Destruct requested for {0}, done here.'.format(STAGING_DO_NAME) - -def _setup_ocean_get_token(): - try: - import digitalocean - except ImportError: - print "You don't have the Digital Ocean API python bindings installed." - print 'pip install python-digitalocean' - print '.. in a virtualenv or in your root python setup (it only requires requests at time of writing)' - print 'Then try again.' - sys.exit(1) - - token = os.getenv('DOTOKEN') - if not token: - print 'Put your DO token in your shell env.' - print 'E.g. keep "export DOTOKEN=thetoken" in /.dotoken.sh and do ". .dotoken.sh" before running Fabric' - sys.exit(1) - return token, digitalocean - -def _find_staging_server(do_name): - token, digitalocean = _setup_ocean_get_token() - manager = digitalocean.Manager(token=token) - droplets = manager.get_all_droplets() - for d in droplets: - if d.name == do_name: - return d - return None - -def _create_staging_server(do_name): - token, digitalocean = _setup_ocean_get_token() - manager = digitalocean.Manager(token=token) - droplet = digitalocean.Droplet( - token=token, - name=do_name, - region='lon1', - image='11434212', # basic-server-15-Apr-2015-2GB - size_slug='2gb', # 2GB ram, 2 VCPUs, 40GB SSD - backups=False, - private_networking=True - ) - droplet.create() - - while droplet.status != 'active': - print 'Waiting for staging droplet to become active. Current status {0}, droplet id {1}'.format(droplet.status, droplet.id) - time.sleep(5) - droplet = manager.get_droplet(droplet.id) - - print 'Staging droplet created, public IP {0}'.format(droplet.ip_address) - return droplet.ip_address - -def _setup_staging_server(): - run('echo "test" > ~/check.txt') diff --git a/fabric/oacwellcome/fabfile.py b/fabric/oacwellcome/fabfile.py deleted file mode 100644 index c1259ba..0000000 --- a/fabric/oacwellcome/fabfile.py +++ /dev/null @@ -1,118 +0,0 @@ -from fabric.api import env, run, sudo, cd, abort, roles, execute, warn_only - -env.use_ssh_config = True # username, identity file (key), hostnames for machines will all be loaded from ~/.ssh/config -# This means that you run this script like this: -# fab -H - -# E.g.: -# Update app to HEAD of current git master: -# fab update_app -# This will update it on all servers specified later in this file - -# If you want to specify which hosts to update it on: -# fab -H host1,host2 - # (replace ssh names with the ones you would use yourself on the command - # line with ssh - they come from your own ~/.ssh/config) - # You can also use IP addresses, of course. - -# your local ssh config does not apply when the script is explicitly specifying which hosts to run tasks on... -# so username and key path will still have to be set here, or specified on the command line using -u and -i -env.user = 'cloo' -if not env.get('key_filename'): - # env.setdefault does not seem to work correctly - env.key_filename = [] -env.key_filename.extend( - [ - '~/.ssh/cl', - # add the path to your own private key if you wish - # you can also add the -i argument when running fabric: - # fab -i :arg1=value1,arg2=value2 - ] -) - - -OACWELLCOME_IP = '178.62.90.232' -RICHARD_TEST_IP = '5.101.97.169' -APP_SERVER_NAMES = {'OACWELLCOME': OACWELLCOME_IP} -TEST_SERVER_NAMES = {'RICHARD_TEST': RICHARD_TEST_IP} - -APP_PATH_SRC = '/opt/oacwellcome/src/oacwellcome' # path on remote servers to the OACWellcome app -USER_APP_PORT = 5050 - -# Used when running tasks directly, e.g. fab update_app . Not (yet) -# used when a task is calling multiple other tasks -# programmatically. Enables us to not have to specify "which hosts" -# all the time when running Fabric. -env.roledefs.update( - { - 'app': [OACWELLCOME_IP], - 'test': [RICHARD_TEST_IP], - } -) - -@roles('app') -def deploy_live(tag, branch='master'): - update_app(branch=branch, tag=tag) - reload_app() - -@roles('app') -def update_app(branch='master', tag=''): - if not tag and branch == 'master': - print 'Please specify a tag to deploy to production' - sys.exit(1) - - with cd(APP_PATH_SRC): - run('git config user.email "us@cottagelabs.com"') - run('git config user.name "Cottage Labs LLP"') - stash = run('git stash') - run('git checkout master') # so that we have a branch we can definitely pull in - # (in case we start from one that was set up for pushing only) - - run('git pull', pty=False) - run('git branch --set-upstream {branch} origin/{branch}'.format(branch=branch)) # make sure we can pull here - run('git checkout ' + branch) - run('git pull', pty=False) - if tag: - run('git checkout {0}'.format(tag)) - run('git submodule update --init --recursive', pty=False) - run('git submodule update --recursive', pty=False) - if not 'No local changes to save' in stash: - with warn_only(): - run('git stash apply') - install_dependencies() - -@roles('app', 'test') -def install_dependencies(): - sudo('sudo apt-get update -q -y') - sudo('sudo apt-get -q -y install libxml2-dev libxslt-dev python-dev lib32z1-dev') - - with cd(APP_PATH_SRC + '/esprit'): - run('source ../../../bin/activate && pip install -e .') - - with cd(APP_PATH_SRC + '/magnificent-octopus'): - run('source ../../../bin/activate && pip install -e .') - - with cd(APP_PATH_SRC): - run('source ../../bin/activate && pip install -e .') - -@roles('test') -def update_test(dev_branch="develop"): - '''Update app on the test server. Optionally takes dev_branch= arg, default "develop".''' - update_app(dev_branch) - sudo('sudo supervisorctl restart oacwellcome-test') - print - print '!!! --- !!!' - print 'Remember to go in and manually restart the OACWellcome and OAGR daemons, running in screens on the test server.' - -@roles('app') -def check_app_running(): - run('if [ $(curl -L -s localhost:{app_port}/health | grep {check_for} | wc -l) -ge 1 ]; then echo "App running on localhost:{app_port}"; fi' - .format(app_port=USER_APP_PORT, check_for="All OK") - ) - # there's also /health on the app - -@roles('app') -def reload_app(supervisor_task_name='oacwellcome-production'): - sudo('kill -HUP $(sudo supervisorctl pid {0})'.format(supervisor_task_name)) - sudo('supervisorctl restart oacwellcome-production-daemon') - sudo('supervisorctl restart oagr-production-daemon') diff --git a/fabric/oaspectrum/fabfile.py b/fabric/oaspectrum/fabfile.py deleted file mode 100644 index 2fe9e69..0000000 --- a/fabric/oaspectrum/fabfile.py +++ /dev/null @@ -1,132 +0,0 @@ -from fabric.api import env, run, sudo, cd, abort, roles, execute, warn_only - -env.use_ssh_config = True # username, identity file (key), hostnames for machines will all be loaded from ~/.ssh/config -# COMMON COMMANDS -# fab update_test - puts newest master on the test server -# fab deploy_live:tag=git_tag_from_repo - rolls out latest tag to live - -# This means that you run this script like this: -# fab -H - -# E.g.: -# Update app to HEAD of current git master: -# fab update_app -# This will update it on all servers specified later in this file - -# If you want to specify which hosts to update it on: -# fab -H host1,host2 - # (replace ssh names with the ones you would use yourself on the command - # line with ssh - they come from your own ~/.ssh/config) - # You can also use IP addresses, of course. - -# your local ssh config does not apply when the script is explicitly specifying which hosts to run tasks on... -# so username and key path will still have to be set here, or specified on the command line using -u and -i -env.user = 'cloo' -if not env.get('key_filename'): - # env.setdefault does not seem to work correctly - env.key_filename = [] -env.key_filename.extend( - [ - '~/.ssh/cl', - # add the path to your own private key if you wish - # you can also add the -i argument when running fabric: - # fab -i :arg1=value1,arg2=value2 - ] -) - - -OASPECTRUM_IP = '178.62.3.108' -RICHARD_TEST_IP = '178.62.191.218' -APP_SERVER_NAMES = {'OASPECTRUM': OASPECTRUM_IP} -TEST_SERVER_NAMES = {'RICHARD_TEST': RICHARD_TEST_IP} - -APP_PATH_SRC = '/opt/oaspectrum/src/oaspectrum' # path on remote servers to the OACWellcome app -TEST_APP_PATH_SRC = '/opt/oaspectrum/oaspectrum' # path on test to the OACWellcome app -USER_APP_PORT = 5050 - -# Used when running tasks directly, e.g. fab update_app . Not (yet) -# used when a task is calling multiple other tasks -# programmatically. Enables us to not have to specify "which hosts" -# all the time when running Fabric. -env.roledefs.update( - { - 'app': [OASPECTRUM_IP], - 'test': [RICHARD_TEST_IP], - } -) - -@roles('app') -def deploy_live(tag, branch='master'): - update_app(branch=branch, tag=tag) - reload_app() - -@roles('app') -def update_app(env, branch='master', tag=''): - - if env == 'test': - app_dir = TEST_APP_PATH_SRC - else: - app_dir = APP_PATH_SRC - - with cd(app_dir): - run('git config user.email "us@cottagelabs.com"') - run('git config user.name "Cottage Labs LLP"') - stash = run('git stash') - run('git checkout master') # so that we have a branch we can definitely pull in - # (in case we start from one that was set up for pushing only) - - run('git pull', pty=False) - run('git branch --set-upstream-to origin/{branch}'.format(branch=branch)) # make sure we can pull here - run('git checkout ' + branch) - run('git pull', pty=False) - if tag: - run('git checkout {0}'.format(tag)) - run('git submodule update --init --recursive', pty=False) - run('git submodule update --recursive', pty=False) - if not 'No local changes to save' in stash: - with warn_only(): - run('git stash apply') - install_dependencies(env=env) - -@roles('app', 'test') -def install_dependencies(env): - sudo('sudo apt-get update -q -y') - sudo('sudo apt-get -q -y install libxml2-dev libxslt-dev python-dev lib32z1-dev') - - if env == 'test': - app_dir = TEST_APP_PATH_SRC - env_path = '../env' - else: - app_dir = APP_PATH_SRC - env_path = '../..' - - with cd(app_dir + '/esprit'): - run('source ../{env_path}/bin/activate && pip install -e .'.format(env_path=env_path)) - - with cd(app_dir + '/magnificent-octopus'): - run('source ../{env_path}/bin/activate && pip install -e .'.format(env_path=env_path)) - - with cd(app_dir): - run('source {env_path}/bin/activate && pip install -e .'.format(env_path=env_path)) - -@roles('test') -def update_test(dev_branch="master"): - '''Update app on the test server. Optionally takes dev_branch= arg, default "master".''' - update_app(env='test', branch=dev_branch) - sudo('sudo supervisorctl restart spectrum-test') - -@roles('app') -def check_app_running(): - run('if [ $(curl -L -s localhost:{app_port}/health | grep {check_for} | wc -l) -ge 1 ]; then echo "App running on localhost:{app_port}"; fi' - .format(app_port=USER_APP_PORT, check_for="All OK") - ) - # there's also /health on the app - -@roles('app') -def reload_app(supervisor_task_name='oaspectrum-production'): - sudo('kill -HUP $(sudo supervisorctl pid {0})'.format(supervisor_task_name)) - -@roles('app') -def deploy_live(branch='master', tag=""): - update_app(env='production', branch=branch, tag=tag) - execute(reload_app, hosts=env.roledefs['app']) diff --git a/fabric/sysadmin/fabfile.py b/fabric/sysadmin/fabfile.py deleted file mode 100644 index 0f41e0d..0000000 --- a/fabric/sysadmin/fabfile.py +++ /dev/null @@ -1,191 +0,0 @@ -from fabric.api import env, run, sudo, cd, abort, roles, execute, warn_only - -import time -import sys - -env.use_ssh_config = True # username, identity file (key), hostnames for machines will all be loaded from ~/.ssh/config -# This means that you run this script like this: -# fab -H - -# E.g.: -# Update sysadmin repo to HEAD of current git master: -# fab update_sysadmin -# This will update it on all servers specified later in this file - -# If you want to specify which hosts to update it on: -# fab -H doaj,cl2,doajgate update_sysadmin - # (replace ssh names with the ones you would use yourself on the command - # line with ssh - they come from your own ~/.ssh/config) - # You can also use IP addresses, of course. - -# Permissions: if you do not have access to one of the servers (your key -# is not authorised), then exclude it from the task you want to run: -# fab update_sysadmin:exclude_hosts=46.235.224.100 - - -# your local ssh config does not apply when the script is explicitly specifying which hosts to run tasks on... -# or when the script is determining which servers to run on based on -# roles in this file, i.e. you didn't specify with -H on the command line -# so username and key path will still have to be set here, or specified on the command line using -u and -i -env.user = 'cloo' -if not env.get('key_filename'): - # env.setdefault does not seem to work correctly - env.key_filename = [] -env.key_filename.extend( - [ - '~/.ssh/cl', - # add the path to your own private key if you wish - # you can also add the -i argument when running fabric: - # fab -i :arg1=value1,arg2=value2 - ] -) - -servers = { - 'yonce': '95.85.59.151', - 'clgate1': '178.62.75.236', - 'bexcellent': '95.85.42.215', - 'phd': '188.226.218.156', - 'arttactic': '188.226.240.230', - 'edaccess': '188.226.168.183', - 'doaj-staging': '95.85.48.213', - 'clesc0': '5.101.107.186', - 'clesc1': '80.240.138.83', - 'oamonitor': '188.226.213.168', - 'uniboard': '95.85.52.130', - 'arttactic-dev': '178.62.13.6', - 'fundfind': '178.62.128.172', - 'cl': '178.62.223.99', - - 'ooz': '5.101.97.169', -} - -all_servers = [] -for name, server in servers.items(): - if server not in all_servers: - all_servers.append(server) - -SYSADMIN_SRC_PATH = '/opt/sysadmin' # path on remote servers to the sysadmin repo -ES_EXPORTER_BACKUPS_PATH = '/home/cloo/backups/elasticsearch-es-exporter' - -env.roledefs.update( - { - # Mainly to be used when calling a fabric task on multiple - # servers, e.g. to apply a security fix across. - # Call fabric with fab -R emanuil_servers task_name - 'emanuil_servers': [servers['yonce'], servers['clgate1'], servers['doaj-staging'], servers['oamonitor'], servers['uniboard'], servers['fundfind'], servers['cl'], servers['ooz']], - # ooz is part of this group since urgent security updates - # and such are performed by ET despite the server owner - # being RJ - 'mark_servers': [servers['bexcellent'], servers['phd'], servers['edaccess'], servers['clesc0'], servers['clesc1']], - 'martyn_servers': [servers['arttactic'], servers['arttactic-dev']], - 'all_servers': all_servers, - } -) - - -def update_sysadmin(): - with warn_only(): - with cd(SYSADMIN_SRC_PATH): - run('git pull', pty=False) - -def push_sysadmin(): - with warn_only(): - with cd(SYSADMIN_SRC_PATH): - run('git push') - -def create_sysadmin(): - with warn_only(): - with cd('/opt'): - sudo('mkdir -p sysadmin') - sudo('chown cloo:cloo sysadmin') - run('git clone https://github.com/CottageLabs/sysadmin.git', pty=False) - -def apt_install(packages): - ''' - Install one or more software packages across all hosts. - :param packages: A space-separated string of package names. Can be just a single name as well. - - This task will fail immediately if one of the servers fails to install the software and will - not proceed with trying to install it on any more servers. - ''' - sudo('apt-get -q -y install ' + packages, pty=False) - -def _get_hosts(from_, to_): - FROM = from_.lower() - TO = to_.lower() - if FROM not in servers or TO not in servers: - abort('only the following server names are valid: ' + ', '.join(servers)) - source_host = servers[FROM] - target_host = servers[TO] - return FROM, source_host, TO, target_host - -# call this like: -# fab transfer_index:index=doaj,from_=yonce,to_=pinky -def transfer_index(index, from_, to_, restore_only=False, scp=False, filename=None, restore_batch_size=None, restore_sleep=None): - ''':index=,from_=,to_= - Copy an index from one machine to another. Requires elasticsearch-exporter (nodejs app) on source machine. Read src for more options.''' - restore_only = (restore_only == 'True') # all args get stringified by fabric - scp = (scp == 'True') - - FROM, source_host, TO, target_host = _get_hosts(from_, to_) - - if restore_only and not filename: - print 'You probably want to specify a specific filename to restore.' - sys.exit(1) - - execute(update_sysadmin, hosts=[source_host, target_host]) - - if not filename: - timestamp = time.strftime('%Y-%m-%d_%H%M') - filename = '{index}_{timestamp}'.format(index=index, timestamp=timestamp) - - if not restore_only: - execute(backup_index_locally, index=index, directory=ES_EXPORTER_BACKUPS_PATH, filename=filename, hosts=[source_host]) - - if (restore_only and scp) or not restore_only: - try: - execute(secure_copy, filename_pattern=filename + '*', target_host=target_host, remote_path=ES_EXPORTER_BACKUPS_PATH, hosts=[source_host]) - except SystemExit: - print - print 'Looks like the secure copy failed - maybe your connection timed out since you didn\'t see that the backup had finished? Trying again...' - print - execute(secure_copy, filename_pattern=filename + '*', target_host=target_host, remote_path=ES_EXPORTER_BACKUPS_PATH, hosts=[source_host]) - - execute(__uncompress_backups, filename=filename, hosts=[target_host]) - - execute(__es_bulk_restore, index=index, filename=filename, restore_batch_size=restore_batch_size, restore_sleep=restore_sleep, hosts=[target_host]) - -def backup_index_locally(index, directory, filename): - with cd(directory): - run('node --nouse-idle-notification --expose-gc ~/elasticsearch-exporter -i {index} -g {filename} -m 0.6'.format(index=index, filename=filename)) - -def secure_copy(filename_pattern, target_host, remote_path): - print 'If the next command fails, you\'ll need to set up proper ssh keys on the servers so that the source can connect to the target. You will be asked for the key\'s passphrase!' - with cd(ES_EXPORTER_BACKUPS_PATH): - run('scp {filename_pattern} cloo@{target_host}:{remote_path}'.format(filename_pattern=filename_pattern, target_host=target_host, remote_path=remote_path)) - - -def __es_bulk_restore(index, filename, restore_batch_size=None, restore_sleep=None): - with cd(ES_EXPORTER_BACKUPS_PATH): - thescript = SYSADMIN_SRC_PATH + '/backup/elasticsearch/bulk_restore.py' - mapping_filename = ES_EXPORTER_BACKUPS_PATH + '/' + filename + '.meta' - data_filename = ES_EXPORTER_BACKUPS_PATH + '/' + filename + '.data' - cmd = 'python {thescript} -i {index} {mapping_filename} {data_filename}'.format(thescript=thescript, index=index, mapping_filename=mapping_filename, data_filename=data_filename) - if restore_batch_size: - cmd += ' -b ' + restore_batch_size - if restore_sleep: - cmd += ' -s ' + restore_sleep - run(cmd) - - -def __uncompress_backups(filename): - with cd(ES_EXPORTER_BACKUPS_PATH): - run('mv {filename}.data {filename}.data.gz'.format(filename=filename)) - run('gunzip {filename}.data.gz'.format(filename=filename)) - - -def disable_ssl3(): - # A security fix, see https://poodle.io - with cd(SYSADMIN_SRC_PATH): - sudo('cp config/nginx/nginx.conf /etc/nginx/nginx.conf') - sudo('nginx -t') - sudo('nginx -s reload') diff --git a/fabric/uniboard/fabfile.py b/fabric/uniboard/fabfile.py deleted file mode 100644 index 1690db3..0000000 --- a/fabric/uniboard/fabfile.py +++ /dev/null @@ -1,96 +0,0 @@ -from fabric.api import env, run, sudo, cd, abort, roles, execute, warn_only - -env.use_ssh_config = True # username, identity file (key), hostnames for machines will all be loaded from ~/.ssh/config -# This means that you run this script like this: -# fab -H - -# E.g.: -# Update app to HEAD of current git master: -# fab update_app -# This will update it on all servers specified later in this file - -# If you want to specify which hosts to update it on: -# fab -H host1,host2 - # (replace ssh names with the ones you would use yourself on the command - # line with ssh - they come from your own ~/.ssh/config) - # You can also use IP addresses, of course. - -# your local ssh config does not apply when the script is explicitly specifying which hosts to run tasks on... -# so username and key path will still have to be set here, or specified on the command line using -u and -i -env.user = 'cloo' -if not env.get('key_filename'): - # env.setdefault does not seem to work correctly - env.key_filename = [] -env.key_filename.extend( - [ - '~/.ssh/cl', - # add the path to your own private key if you wish - # you can also add the -i argument when running fabric: - # fab -i :arg1=value1,arg2=value2 - ] -) - - -UNIBOARD_PILOT_IP = '95.85.52.130' -RICHARD_TEST_IP = '5.101.97.169' -APP_SERVER_NAMES = {'UNIBOARD_PILOT': UNIBOARD_PILOT_IP} # the gateway nginx config files are named after which app server the gateway directs traffic to -TEST_SERVER_NAMES = {'RICHARD_TEST': RICHARD_TEST_IP} - -APP_PATH_SRC = '/opt/uniboard/src/uniboard' # path on remote servers to the UNIBOARD app -USER_APP_PORT = 5050 - -# Used when running tasks directly, e.g. fab update_app . Not (yet) -# used when a task is calling multiple other tasks -# programmatically. Enables us to not have to specify "which hosts" -# all the time when running Fabric. -env.roledefs.update( - { - 'app': [UNIBOARD_PILOT_IP], - 'test': [RICHARD_TEST_IP], - } -) - -@roles('app') -def update_app(branch='master'): - with cd(APP_PATH_SRC): - run('git config user.email "us@cottagelabs.com"') - run('git config user.name "Cottage Labs LLP"') - stash = run('git stash') - run('git pull', pty=False) - run('git checkout ' + branch) - run('git submodule update', pty=False) - if not 'No local changes to save' in stash: - with warn_only(): - run('git stash apply') - install_dependencies() - -@roles('app', 'test') -def install_dependencies(): - with cd(APP_PATH_SRC): - run('source ../../bin/activate && pip install -r requirements.txt') - -@roles('test') -def update_test(dev_branch="master"): - '''Update app on the test server. Optionally takes dev_branch= arg, default "master".''' - update_app(dev_branch) - sudo('sudo supervisorctl restart uniboard-test') - -@roles('app') -def check_app_running(): - run('if [ $(curl -L -s localhost:{app_port}| grep {check_for} | wc -l) -ge 1 ]; then echo "Uniboard running on localhost:{app_port}"; fi' - .format(app_port=USER_APP_PORT, check_for="UniBoard") - ) - -@roles('app') -def print_app_config(): - print_keys = { - 'secret_settings.py': ['RECAPTCHA'], - 'settings.py': ['DOMAIN', 'SUPPRESS_ERROR_EMAILS', 'DEBUG'] - } - for file_, keys in print_keys.items(): - for key in keys: - run('grep {key} {app_path}/portality/{file_}'.format(file_=file_, key=key, app_path=APP_PATH_SRC)) - -@roles('app') -def reload(supervisor_task_name='uniboard'): - sudo('kill -HUP $(sudo supervisorctl pid {0})'.format(supervisor_task_name)) diff --git a/instructions/:opt:README.md b/instructions/:opt:README.md deleted file mode 100644 index 4bcdcbb..0000000 --- a/instructions/:opt:README.md +++ /dev/null @@ -1,16 +0,0 @@ -We're running supervisord on our server. Every time you push changes, do - - git pull # or git clone https://github.com/DOAJ/doaj.git if deploying for the first time - replace URL as needed - git submodule init # if this is the first time you're deploying the app, but won't hurt if it's not - git submodule update # in case one of the repo's submodules are at a newer commit now - sudo supervisorctl restart doaj # replace doaj with the supervisor name of your app - -If you want a list of what's under supervisor, do - - sudo supervisorctl status - -All supervisord log files are under /var/log/supervisor/ - -Web apps often write their logs to the ERROR log (so e.g. doaj-error.log) -because ET hasn't had time to figure out why and fix it. So check the -error log if the access one is empty. diff --git a/instructions/README.md b/instructions/README.md deleted file mode 100644 index cf27a2e..0000000 --- a/instructions/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Put files in directories indicated by file name prefix, replacing : with /, the directory separator. -For example: - -:opt:README.md should be copied to /opt/README.md on new server setups diff --git a/make_swap.sh b/make_swap.sh deleted file mode 100755 index 287390f..0000000 --- a/make_swap.sh +++ /dev/null @@ -1,29 +0,0 @@ -die() { printf %s "${@+$@$'\n'}" ; exit 1; } - -if [ $# -ne 1 ] -then - echo "1 argument needed" - echo "Usage: $0 " - echo - echo "Example: $0 8192k" - echo " will make 8 GiB of swap in /swapfile" - exit 1 -fi - -dd_count=$1 -sudo dd if=/dev/zero of=/swapfile bs=1024 count="$dd_count" || die "Invalid dd sector count? Try k e.g. 8192k == 8GB" - -sudo mkswap /swapfile || die "Could not bake swap filesystem onto swap file" -sudo swapon /swapfile || die "Could not turn on swap file" - -echo "!!! Append this to /etc/fstab if not already there:" -echo " /swapfile none swap sw 0 0" -echo -echo "Setting kernel swappiness" -echo 0 | sudo tee /proc/sys/vm/swappiness || die "Could not set kernel swappiness" -echo -echo "!!! Append this to /etc/sysctl.conf if not already there:" -echo "vm.swappiness = 0" - -echo "Done" -sudo swapon -s || die "Could not list active swap" diff --git a/new-droplet-digital-ocean.md b/new-droplet-digital-ocean.md deleted file mode 100644 index a118cd6..0000000 --- a/new-droplet-digital-ocean.md +++ /dev/null @@ -1,20 +0,0 @@ -In order to save time you should create DO droplets from images of -existing servers. - -However, this means that some settings will be duplicated when both your -new server and the one which was the source of the image are both online -at the same time. - -This is what to edit after bringing a new droplet into the world: - -###Newrelic - - sudo vim /etc/newrelic/newrelic_plugin_agent.cfg - -1. Look for "elasticsearch", change the name: bit to refer to your new - droplet's hostname. -2. Look for "nginx", do the same. - -###Nginx - -###Supervisord diff --git a/newrelic/README.md b/newrelic/README.md deleted file mode 100644 index 4436cbb..0000000 --- a/newrelic/README.md +++ /dev/null @@ -1,44 +0,0 @@ -NOTE: all relative paths in these instructions are relative to this README file. - -Main newrelic system monitor (server monitor called newrelic-sysmond): follow instructions on newrelic website, no modifications needed. - -Of course, you need to install that successfully first before installing the MeetMe plugin -(elasticsearch, nginx, memcached, redis and lots of other monitoring supported). - -Elasticsearch and nginx plugins: - -1. follow instructions on RPM (the newrelic control panel) to install the newrelic_plugin_agent via pip but: sudo pip install newrelic_plugin_agent -2. sudo cp ./newrelic-plugin-agent.cfg /etc/newrelic/ -3. sudo vim /etc/newrelic/newrelic-plugin-agent.cfg # put the Newrelic license key on the line which says INSERT LICENSE KEY HERE -4. make sure the elasticsearch and nginx configs are correct - - a. elasticsearch should be running on localhost:9200 - - b. nginx's "default" site which responds to http://localhost - (i.e. port 80, no domain name) should be linked to from sites-enabled and - should have the nginx stats module activated - see - ../config/nginx/default for an example "default" site file) - -5. Alright, just test it now. - - # first, let ourselves run the newrelic plugin as the newrelic user would run it when it's a service - sudo usermod -a -G newrelic cloo - exit # log out of the server, need to re-login to activate the change of groups for the cloo user - ssh [the server name or IP] - cd /opt/sysadmin/newrelic # or wherever you cloned the sysadmin repo to - bottom line, get back to the directory of this README file - sudo chown newrelic:newrelic /var/log/newrelic - sudo chown newrelic:newrelic /var/run/newrelic - sudo chmod 775 /var/log/newrelic # group-writable, so we can try it out as the cloo user - - # alright, now test the config - won't run as a daemon, just outputs to terminal - newrelic-plugin-agent -c /etc/newrelic/newrelic-plugin-agent.cfg -f - # there should be no errors - if there are, check the config is correct YAML, - # check your nginx http status path and ES settings, check the nginx error log and the ES error log as needed - -6. Set it up as a service if everything is OK and you start seeing data in a couple of minutes on the Newrelic RPM control panel: - - sudo cp /opt/newrelic-plugin-agent/newrelic-plugin-agent.deb /etc/init.d/newrelic-plugin-agent - sudo chmod a+x /etc/init.d/newrelic-plugin-agent - sudo update-rc.d newrelic-plugin-agent defaults 95 05 - sudo sysv-rc-conf --list | grep newrelic # should return something like newrelic-plu (as well as newrelic-sys, the main newrelic monitor) - sudo service newrelic-plugin-agent start diff --git a/newrelic/newrelic_plugin_agent.cfg b/newrelic/newrelic_plugin_agent.cfg deleted file mode 100644 index b191239..0000000 --- a/newrelic/newrelic_plugin_agent.cfg +++ /dev/null @@ -1,180 +0,0 @@ -%YAML 1.2 ---- -Application: - license_key: INSERT LICENSE KEY HERE - wake_interval: 60 - #proxy: http://localhost:8080 - - #apache_httpd: - # name: hostname - # scheme: http - # host: localhost - # verify_ssl_cert: true - # port: 80 - # path: /server-status - - #couchdb: - # name: localhost - # host: localhost - # verify_ssl_cert: true - # port: 5984 - # username: foo - # password: bar - - #edgecast: - # name: My Edgecase Account - # account: YOUR_ACCOUNT_# - # token: YOUR_API_TOKEN - - elasticsearch: - name: DOAJ Elasticsearch - host: localhost - port: 9200 - scheme: http - - #haproxy: - # name: hostname - # scheme: http - # host: localhost - # port: 80 - # verify_ssl_cert: true - # path: /haproxy?stats;csv - - #memcached: - # name: localhost - # host: localhost - # port: 11211 - # path: /path/to/unix/socket - - #mongodb: - # name: hostname - # host: localhost - # port: 27017 - # admin_username: user - # admin_password: pass - # ssl: False - # ssl_keyfile: /path/to/keyfile - # ssl_certfile: /path/to/certfile - # ssl_cert_reqs: 0 # Should be 0 for ssl.CERT_NONE, 1 for ssl.CERT_OPTIONAL, 2 for ssl.CERT_REQUIRED - # ssl_ca_certs: /path/to/cacerts file - # databases: - # - test - # - yourdbname - - #mongodb: # Use when authentication is required - # name: hostname - # host: localhost - # port: 27017 - # admin_username: user - # admin_password: pass - # ssl: False - # ssl_keyfile: /path/to/keyfile - # ssl_certfile: /path/to/certfile - # ssl_cert_reqs: 0 # Should be 0 for ssl.CERT_NONE, 1 for ssl.CERT_OPTIONAL, 2 for ssl.CERT_REQUIRED - # ssl_ca_certs: /path/to/cacerts file - # databases: - # test: - # username: user - # password: pass - # yourdbname: - # username: user - # password: pass - - nginx: - name: DOAJ nginx - scheme: http - host: localhost - port: 80 - path: /nginx_status - - #pgbouncer: - # host: localhost - # port: 6000 - # user: stats - - #php_apc: - # name: hostname - # scheme: http - # host: localhost - # verify_ssl_cert: true - # port: 80 - # path: /apc-nrp.php - - #php_fpm: - # - name: fpm-pool - # scheme: https - # host: localhost - # port: 443 - # path: /fpm_status - # query: json - - #postgresql: - # host: localhost - # port: 5432 - # user: postgres - # dbname: postgres - # superuser: False - - #rabbitmq: - # name: rabbitmq@localhost - # host: localhost - # port: 15672 - # verify_ssl_cert: true - # username: guest - # password: guest - # vhosts: # [OPTIONAL, track this vhosts' queues only] - # production_vhost: - # queues: [encode_video, ] # [OPTIONAL, track this queues only] - # staging_vhost: # [track every queue for this vhost] - # - - #redis: - # - name: localhost - # host: localhost - # port: 6379 - # db_count: 16 - # password: foo # [OPTIONAL] - # #path: /var/run/redis/redis.sock - # - name: localhost - # host: localhost - # port: 6380 - # db_count: 16 - # password: foo # [OPTIONAL] - # #path: /var/run/redis/redis.sock - - #riak: - # name: localhost - # host: node0.riak0.scs.mtmeprod.net - # verify_ssl_cert: true - # port: 8098 - - #uwsgi: - # name: localhost - # host: localhost - # port: 1717 - # path: /path/to/unix/socket - -Daemon: - user: newrelic - pidfile: /var/run/newrelic/newrelic_plugin_agent.pid - -Logging: - formatters: - verbose: - format: '%(levelname) -10s %(asctime)s %(process)-6d %(processName) -15s %(threadName)-10s %(name) -45s %(funcName) -25s L%(lineno)-6d: %(message)s' - handlers: - file: - class : logging.handlers.RotatingFileHandler - formatter: verbose - filename: /var/log/newrelic/newrelic_plugin_agent.log - maxBytes: 10485760 - backupCount: 3 - loggers: - newrelic_plugin_agent: - level: INFO - propagate: True - handlers: [console, file] - requests: - level: ERROR - propagate: True - handlers: [console, file] diff --git a/oag_deploy.sh b/oag_deploy.sh deleted file mode 100755 index 5925fd0..0000000 --- a/oag_deploy.sh +++ /dev/null @@ -1,130 +0,0 @@ -#!/bin/bash -reqs_failed=false - -die() { printf %s "${@+$@$'\n'}" ; exit 1; } - -require_python_version_major=2 -require_python_version_minor=7 -# check the version of python available -printf " Python $require_python_version_major.$require_python_version_minor required... " -if python -c "import sys; exit(1) if sys.version_info.major != $require_python_version_major else exit(1) if sys.version_info.minor != $require_python_version_minor else exit(0)"; then - echo "OK, found it." -else - echo "NOT FOUND! Install it and try again." - reqs_failed=true -fi - -# check that virtualenv is available -printf " virtualenv required... " -if virtualenv --version > /dev/null; then - echo "OK, found it." -else - echo "NOT FOUND! Install it and try again." - reqs_failed=true -fi - -# check that redis is working -printf " Redis server needs to be running... " -if redis-cli ping > /dev/null 2>&1; then - echo "OK, found it." -else - echo "NOT FOUND! Install it, run it and try again." - reqs_failed=true -fi - -# check that elasticsearch is working -printf " Elasticsearch needs to be running... " -if curl http://localhost:9200 > /dev/null 2>&1; then - echo "OK, found it." -else - echo "NOT FOUND! Install it, run it and try again." - reqs_failed=true -fi - -if $reqs_failed; then - echo - echo "Looks like some requirements weren't satisfied. Try these commands (tested on Ubuntu Linux 12.04):" - echo - echo " sudo apt-get install python virtualenv redis-server" - echo " sudo service redis-server restart" - echo " sudo service elasticsearch restart" - echo - echo "and re-run this script." - echo "NOTE: you have to install elasticsearch manually if you don't have it! It's not in the Debian repos yet." - die -fi - -cd /opt || die "Er, your /opt directory does not exist. Set up your server first." -sudo ls > /dev/null || die "Can't sudo for some reason, do it yourself or fix sudo." -if [ -d /opt/oag ]; then - die "/opt/oag already exists. If you want to set up OAG again, make sure there's nothing valuable in /opt/oag and rm -rf it, then run this script again." -fi -sudo mkdir oag -sudo chown cloo:cloo oag -virtualenv oag # will set up with the "python" executable, whatever that is according to current $PATH - # also does --no-site-packages by default, so no need to specify explicitly here -cd oag -. bin/activate -mkdir src -cd src/ -git clone https://github.com/CottageLabs/OpenArticleGauge.git -cd /opt -sudo mkdir sysadmin -sudo chown cloo:cloo sysadmin/ -git clone https://github.com/CottageLabs/sysadmin.git -cd sysadmin/ -echo -echo -echo "Not replacing your main supervisord configuration file. However, check if it's set up the way you want it." -echo "Run this to see if there are any differences between the supervisord config in the sysadmin repo and the one on this machine:" -echo -echo " diff -u /etc/supervisor/supervisord.conf /opt/sysadmin/config/supervisor/supervisord.conf" -echo -echo -printf "Copying all supervisord application configuration files... " -sudo cp -r config/supervisor/conf.d/oag* /etc/supervisor/conf.d/ -echo "done." -echo -cd /opt/oag/src/OpenArticleGauge/ -# install lxml -sudo apt-get update > /dev/null -echo "Installing lxml, handing off to apt-get now..." -echo -sudo apt-get install libxml2-dev libxslt-dev -echo -echo "Done trying to install lxml, proceeding with installing Python-based requirements." -echo -pip install -e . -pip install gunicorn -pip install flower -echo -echo -sudo supervisorctl reload || die "Couldn't reload the supervisord config. Do you have supervisord? Install it if not." -echo -sudo supervisorctl status -cat << EOF - -All done. The supervisord status command output you just saw should look -something like this: - -oag RUNNING pid 5958, uptime 0:01:14 -oag-celery EXITED Oct 21 11:24 PM -oag-celery-flower RUNNING pid 5960, uptime 0:01:14 -oag-celerybeat RUNNING pid 5959, uptime 0:01:14 - -oag-celery should say EXITED, don't worry about it. (If it says RUNNING, -it will switch to EXITED very soon.) - -You can test the whole setup by doing - - curl localhost:5051/lookup/doi:10.1371/journal.pone.0031314.json - # This should return a small JSON result, with "requested": 1 in it. - - # Now wait 2 to 30 seconds, give it some time to look it up. - curl localhost:5051/lookup/doi:10.1371/journal.pone.0031314.json - # This should return a larger JSON result, with lots of info in the - # "results" key. - -You can also open a browser to http://localhost:5555 to use Flower, the -Celery monitor. You can see the workers going about their tasks. -EOF diff --git a/pinger.py b/pinger.py deleted file mode 100755 index 2484b3c..0000000 --- a/pinger.py +++ /dev/null @@ -1,136 +0,0 @@ -#! /usr/bin/python - -import smtplib, socket, requests -from email.MIMEMultipart import MIMEMultipart -from email.MIMEText import MIMEText -from email.Utils import COMMASPACE, formatdate - - -# a script to GET a bunch of server IPs and domain names and send an email -# about their status. Should be set to run, say, every 10 mins with anacron -# or similar, on multiple servers. Note that requests needs to be installed. - - -# edit the following two lists to add server IPs and domain names to the tests -# a status code of 200 OK is expected from each of these, so make sure they -# are configured to return that in normal operation (e.g. make sure your nginx -# config returns a valid response for the IPs / domains -servers = [ - 'http://46.235.224.100', # our main CL server, cottage - 'http://93.93.131.120', # Marks test server, cottaget, test.cottagelabs.com - 'http://46.235.224.107', # prod server cottagep1, arttactic, leaps, swap - 'http://93.93.131.239' # prod server cottaget2, oag -] - - -domains = [ - 'http://cottagelabs.com', - 'http://pads.cottagelabs.com', - 'http://oag.cottagelabs.com', - 'http://artforecaster.com', - 'http://leapssurvey.org', - 'http://swapsurvey.org', - 'http://ifthisistheanswer.com' -] - -# use this ignore list if you want to temporarily ignore one of the above, -# but you don't want to remove it from record -ignore = [ -] - -# set this to true if you always want a status email sent, regardless of state -# otherwise a status email will only come when there is an error -send_msg = False - - -# NO MORE CONFIG BEYOND HERE ================================================= # - - -# get the IP of the machine running this test -this_machine = False -try: - f = open('this_machine','r') - this_machine = f.read() - f.close() -except: - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.connect(("8.8.8.8",80)) - this_machine = s.getsockname()[0] - s.close() - try: - f = open('this_machine','w') - f.write(this_machine) - f.close() - except: - pass - - -# defaults -no_response = [] -subject = "all is well with servers and domains" -text = "Tested the following:\n\n" -if this_machine: - text = "This test was run on machine " + this_machine + '\n\n' + text - subject = this_machine + " says " + subject - - -# check each server / domain -for addr in servers + domains: - if addr not in ignore: - text += addr + '\n' - - # if no response, add it to the no_response list - try: - r = requests.get(addr) - if r.status_code != 200: - no_response.append(addr) - except: - no_response.append(addr) - - -# if one did not respond, update and trigger the message -if len(no_response) != 0: - send_msg = True - subject = "ALL IS NOT WELL WITH " - if this_machine: - subject = this_machine + " SAYS " + subject - text += '\n\nAND THESE ONES FAILED TO RESPOND\n\n' - text += '\n'.join(no_response) - subject += ', '.join(no_response) - - -# send the message -if send_msg: - try: - fro = "us@cottagelabs.com" - to = ["us@cottagelabs.com"] - - msg = MIMEMultipart() - msg['From'] = fro - msg['To'] = COMMASPACE.join(to) - msg['Date'] = formatdate(localtime=True) - msg['Subject'] = subject - - msg.attach( MIMEText(text) ) - - smtp = smtplib.SMTP("localhost") - smtp.sendmail(fro, to, msg.as_string() ) - smtp.close() - except: - # TODO: could update this with a note in a local log file, - # if local logging is done - print "mailing failed" - print subject - print text -else: - # TODO: if local logging is done, could record that the tests were done - # and what their outcome was, even if sending a message was unrequired - print subject - print text - - -# TODO: if local logging is required, add some sort of local logging here - - - - diff --git a/setup.sh b/setup.sh deleted file mode 100644 index 6586586..0000000 --- a/setup.sh +++ /dev/null @@ -1,289 +0,0 @@ -##### BASIC SETUP ##### -# this may be needed - not sure, seems to work without it -#export DEBIAN_FRONTEND=noninteractive - -# create user - DO THIS MANUALLY, AND GIVE THE USER A PASSWORD -# REMEMBER TO UPLOAD SOME PUBLIC KEYS BEFORE CHANGING THE SSH SETTINGS -# adduser --gecos "" XXX-USERNAME-HERE-XXX -# and set the pw at the prompt -# (there is a usual CL username and password, but they are not stored here ...) -cd /home/cloo -mkdir /home/cloo/.ssh -chown cloo:cloo /home/cloo/.ssh -chmod 700 /home/cloo/.ssh -# and copy required pubkeys into it -# If you are creating a new Digital Ocean droplet and have specified -# keys to be included on the DO control panel, do this: -# cp /root/.ssh/authorized_keys .ssh/ -# chown cloo:cloo .ssh/authorized_keys - -# If you wish to add more keys manually: -# vim /home/cloo/.ssh/authorized_keys -chown cloo:cloo /home/cloo/.ssh/authorized_keys -chmod 600 /home/cloo/.ssh/authorized_keys -# if version ubuntu 12.04 - previous to that the group was admin -adduser cloo sudo -# visudo and set cloo ALL=(ALL) NOPASSWD: ALL at the end - -# prevent "unknown host" message when doing sudo -# regular user version: sudo sh -c 'echo "127.0.1.1 "`cat /etc/hostname` >> /etc/hosts' -# but if you're root (should be at this point of script) -echo "127.0.1.1 "`cat /etc/hostname` >> /etc/hosts - -# make some oft-used backup dirs -mkdir -p /home/cloo/backups/elasticsearch -mkdir -p /home/cloo/backups/elasticsearch-es-exporter -mkdir -p /home/cloo/backups/logs -mkdir -p /home/cloo/cron-logs -chown -R cloo:cloo /home/cloo/backups - -# time -sudo apt-get -q -y install ntp -sudo dpkg-reconfigure tzdata # Europe/London - -# edit the ssh settings -vim /etc/ssh/sshd_config -# change it as follows: -PermitRootLogin no -PasswordAuthentication no -/etc/init.d/ssh restart - -# set up firewall - allow 22, 80, 443 -apt-get -q -y install ufw -ufw allow 22 -ufw allow 80 -ufw allow 443 -ufw enable - -# set up newrelic server monitoring -echo "deb http://apt.newrelic.com/debian/ newrelic non-free" >> /etc/apt/sources.list.d/newrelic.list -wget -O- https://download.newrelic.com/548C16BF.gpg | apt-key add - -apt-get update -apt-get install newrelic-sysmond -nrsysmond-config --set license_key= -sudo service newrelic-sysmond start -# go to https://rpm.newrelic.com/accounts/526071/server_alert_policies -# and assign your new server to the appropriate policy - - -# apt install useful stuff -add-apt-repository -y ppa:webupd8team/java -apt-get update -# pre-accept the Oracle Java binaries license -echo "debconf shared/accepted-oracle-license-v1-1 select true" | debconf-set-selections -echo "debconf shared/accepted-oracle-license-v1-1 seen true" | debconf-set-selections -apt-get -q -y install mcelog bpython screen htop nginx git-core curl anacron sysv-rc-conf s3cmd bc vnstat python-pip python-dev python-setuptools build-essential python-software-properties oracle-java7-installer - -# run java -version from the command line to check java's version -# additionally ps and htop will show you the exact path to the java executable running elasticsearch, which includes the version number - -# Set up s3cmd to access the CL AWS account -# scp :/home/cloo/.s3cfg . -# scp .s3cfg :/home/cloo -# rm .s3cfg - -# If you can't find a prefilled config file, do this: -# s3cmd --configure # take Access and Secret keys it asks for from Cottage Labs' AWS account at amazon.cottagelabs.com - -##### PYTHON AND SUPERVISOR ##### - -# pip install useful python stuff -pip install --upgrade pip -ln -s /usr/local/bin/pip /usr/bin/pip -pip install --upgrade virtualenv -pip install gunicorn -pip install requests - - -# get latest version of supervisor via pip -pip install supervisor -curl -s https://raw.githubusercontent.com/Supervisor/initscripts/178ccca96b2c951f400dd6cb778e899422f8e552/ubuntu > ~/supervisord -mv ~/supervisord /etc/init.d/supervisord -chmod a+x /etc/init.d/supervisord -/usr/sbin/service supervisord stop -update-rc.d supervisord defaults -mkdir /var/log/supervisor -mkdir /etc/supervisor/ -mkdir /etc/supervisor/conf.d - -# Path below relative to this script! -cp config/supervisor/supervisord.conf /etc/supervisor/supervisord.conf - -ln -s /etc/supervisor/supervisord.conf /etc/supervisord.conf -ln -s /usr/local/bin/supervisord /usr/bin/supervisord -/usr/sbin/service supervisord start - - -##### OPTIONAL SETUP ##### - -# get new elasticsearch, 1.4.4 (now available as 1.5.x and later, so if you -# think you're better off with a newer one, check ES' website for the right link) -java -version # make sure you are happy with what this returns, otherwise upgrade it -wget https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.4.4.deb -sudo dpkg -i elasticsearch-1.4.4.deb - -# to run on boot, ES recommends -# sudo update-rc.d elasticsearch defaults 95 10 -# sudo /etc/init.d/elasticsearch start -# We generally use sysv-rc-conf for the more intuitive UI, but feel free to use whichever. -sudo sysv-rc-conf # select "elasticsearch" for runlevels 2-5 - -sudo vim /etc/default/elasticsearch -# set -# ES_HEAP_SIZE=1g # or about half the RAM on the server if RAM is up to 8GB. Feel free to set to 12GB if RAM is 16GB. Remember a large disk cache enhances ES performance, so don't just allocate all the RAM to ES' heap. And obviously you may need RAM for other things. -# MAX_LOCKED_MEMORY=unlimited -# RESTART_ON_UPGRADE=false - -sudo vim /etc/elasticsearch/elasticsearch.yml -# set -# bootstrap.mlockall: true -# discovery.zen.ping.multicast.enabled: false - -# It's a wise idea to disable public access to ES -sudo ufw deny in on eth0 to any port 9200 -sudo ufw deny in on eth0 to any port 9300 -sudo ufw status # check rules and that firewall is active -# This still leaves the private network and localhost:9200 functional - -sudo service elasticsearch restart # you should be done. Check with curl localhost:9200 and htop that ES is running and taking the memory you expect. - -### RESTORING FROM ES 1.x BACKUPS - -# install the ES S3 plugin -curl -s "localhost:9200/_nodes/settings?pretty=true" | grep "home" # find out where elasticsearch's executable files live -cd /usr/share/elasticsearch # usually /usr/share/elasticsearch with the ES .deb package, but amend as per the grep result above this line if needed - -# Find out which version of the plugin you need for your ES here: -# https://github.com/elastic/elasticsearch-cloud-aws#aws-cloud-plugin-for-elasticsearch -sudo bin/plugin install elasticsearch/elasticsearch-cloud-aws/2.4.2 -sudo service elasticsearch restart - -# register the backup repo -curl -XPUT 'http://localhost:9200/_snapshot/{BACKUP_REPO_NAME}' -d '{ - "type": "s3", - "settings": { - "bucket": "{YOUR BUCKET}", - "region": "{YOUR REGION, usually eu-west-1 for CL}", - "access_key": "{the ES AWS access key - one PER PROJECT}", - "secret_key": "{the ES AWS secret key - one PER PROJECT}" - } -}' -# Make your own restricted AWS user for each project! Ask ET how, or see -# https://github.com/elastic/elasticsearch-cloud-aws#recommended-s3-permissions if making your own. - -# You should get {"acknowledged":true} after the command above. - -# See all available snapshots for restore -# curl localhost:9200/_snapshot/doaj_s3/_all?pretty=true - -curl -XPOST "http://localhost:9200/_snapshot/{SNAPSHOT_REPO}/{SPECIFIC_SNAPSHOT}/_restore" -# You should get {"accepted":true} to that. -# After that until the restore completes (at about 15MiB/s in ET's experience) you can look at how far it's gone by doing -# curl -s localhost:9200/_status?pretty=true | grep "primary_size_in_bytes" && date - -# After your restore finishes, if this is a TEST MACHINE, you should delete the repository so that you don't accidentally write to it -curl -XDELETE "localhost:9200/_snapshot/doaj_s3" - - -# get elasticsearch 0.90.7 - old instructions for pre-1.x ES -cd /opt -curl -L https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.7.tar.gz -o elasticsearch.tar.gz -tar -xzvf elasticsearch.tar.gz -ln -s elasticsearch-0.90.7 elasticsearch -rm elasticsearch.tar.gz -cd elasticsearch/bin -git clone git://github.com/elasticsearch/elasticsearch-servicewrapper.git -cd elasticsearch-servicewrapper -git checkout 0.90 -mv service ../ -cd ../ -rm -R elasticsearch-servicewrapper -ln -s /opt/elasticsearch/bin/service/elasticsearch /etc/init.d/elasticsearch -update-rc.d elasticsearch defaults -# elasticsearch settings -# vim config/elasticsearch.yml and uncomment bootstrap.mlockall: true -# and uncomment cluster.name: elasticsearch (and change cluster name if necessary) -# IMPORTANT!!! put -# script.disable_dynamic: true -# at the end of the config/elasticsearch.yml file! This is to prevent DDoS-ing -# others through our ES and locking up of the server by our ISP if port 9200 is -# open... -# OK, now: -# vim bin/service/elasticsearch.conf and set.default.ES_HEAP_SIZE=4096 or whatever value works for the machine -# then vim /etc/security/limits.conf and put this in: -# root hard nofile 1024000 -# root soft nofile 1024000 -# root hard memlock unlimited -# root soft memlock unlimited -# * hard nofile 1024000 -# * soft nofile 1024000 -# * hard memlock unlimited -# * soft memlock unlimited -# and then vim /etc/pam.d/common-session and /etc/pam.d/common-session-noninteractive -# and put the following in it: -# session required pam_limits.so -sudo /etc/init.d/elasticsearch start -# this command will always tell you "Running with PID XXX". Even though plugins could cause it to fail to start. So wait for 15 seconds, then try -# curl localhost:9200 -# you should get a response, otherwise something's wrong, check /opt/elasticsearch/logs. - - -# install node -apt-get install python-software-properties g++ make -apt-add-repository ppa:chris-lea/node.js -apt-get update -apt-get install nodejs -npm install sqlite3 - -mkdir /home/cloo/elasticsearch-exporter-src -cd /home/cloo/elasticsearch-exporter-src -npm install elasticsearch-exporter --production -cd /home/cloo -ln -s elasticsearch-exporter-src/node_modules/elasticsearch-exporter/exporter.js elasticsearch-exporter -chown -R cloo:cloo elasticsearch-exporter* - - -# install php stuff -#apt-get install php5 php5-cli php5-mysql php5-cgi -#add-apt-repository ppa:brianmercer/php -#apt-get update -#apt-get install php5-fpm - - -# install mysql stuff -#apt-get install mysql-server libmysqlclient-dev python-mysqldb -#pip install mysql-python - - -# installing etherpad lite -# http://mclear.co.uk/2011/08/01/install-etherpad-lite-on-ubuntu/ -# https://github.com/Pita/etherpad-lite/wiki/How-to-deploy-Etherpad-Lite-as-a-service -#apt-get install build-essential python libssl-dev libsqlite3-dev gzip curl -#cd /opt -#git clone git://github.com/Pita/etherpad-lite.git -#useradd etherpad-lite -#mkdir /var/log/etherpad-lite -#chown -R etherpad-lite:etherpad-lite /var/log/etherpad-lite -#chown -R etherpad-lite:etherpad-lite /opt/etherpad-lite -#cd /etc/init.d -#vim etherpad-lite # and copy the script from the above linked page into it, and customise -# e.g. change the path to link to where node is on this install - /usr/bin/node/bin -# and the eplite dir - -#chmod +x etherpad-lite -#update-rc.d etherpad-lite defaults -#cd /opt/etherpad-lite -# edit the settings.json - change db type to sqlite and db file to var/sqlite.db - - -# set up sensors package for better monitoring and more information e.g. CPU temperatures -# THIS IS POINTLESS ON VIRTUAL SERVERS -# taken from https://help.ubuntu.com/community/SensorInstallHowto -# 1. apt-get install lm-sensors -# 2. Run sudo sensors-detect and choose YES to all YES/no questions. -# 3. At the end of sensors-detect, a list of modules that needs to be loaded will displayed. Type "yes" to have sensors-detect insert those modules into /etc/modules, or edit /etc/modules yourself. -# 4. Next, run -# sudo service module-init-tools restart -# This will read the changes you made to /etc/modules in step 3, and insert the new modules into the kernel. -# See sensor info by running "sensors" from the shell. - - diff --git a/sorted_swap.sh b/sorted_swap.sh deleted file mode 100755 index e96af99..0000000 --- a/sorted_swap.sh +++ /dev/null @@ -1 +0,0 @@ -./swap.sh | sort -nk3 diff --git a/swap.sh b/swap.sh deleted file mode 100755 index 73bccc7..0000000 --- a/swap.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# Get current swap usage for all running processes -# Erik Ljungstrom 27/05/2011 -# Modified by Mikko Rantalainen 2012-08-09 -# Pipe the output to "sort -nk3" to get sorted output -SUM=0 -OVERALL=0 -for DIR in `find /proc/ -maxdepth 1 -type d -regex "^/proc/[0-9]+"` -do - PID=`echo $DIR | cut -d / -f 3` - PROGNAME=`ps -p $PID -o comm --no-headers` - for SWAP in `grep Swap $DIR/smaps 2>/dev/null | awk '{ print $2 }'` - do - let SUM=$SUM+$SWAP - done - if (( $SUM > 0 )); then - echo "PID=$PID swapped $SUM KB ($PROGNAME)" - fi - let OVERALL=$OVERALL+$SUM - SUM=0 -done -echo "Overall swap used: $OVERALL KB" diff --git a/swap2ram.sh b/swap2ram.sh deleted file mode 100755 index 88f4587..0000000 --- a/swap2ram.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -mem=$(free | awk '/Mem:/ {print $4}') -swap=$(free | awk '/Swap:/ {print $3}') - -if [ $mem -lt $swap ]; then - echo "ERROR: not enough RAM to write swap back, nothing done" >&2 - exit 1 -fi - -swapoff -a && -swapon -a diff --git a/useful_info/elasticsearch-exporter.md b/useful_info/elasticsearch-exporter.md deleted file mode 100644 index 26e6861..0000000 --- a/useful_info/elasticsearch-exporter.md +++ /dev/null @@ -1,7 +0,0 @@ -#Use cases -##Export an index - - cd ~/backups/elasticsearch-es-exporter - node --nouse-idle-notification --expose-gc ~/elasticsearch-exporter -i doaj -g doaj_`date +%F_%H%M` -m 0.6 - -This should use no more than 60% of the remaining memory on the server (the -m parameter). Up it to 0.9 for a faster operation if the server has lots of RAM to spare. diff --git a/useful_info/gunicorn-memory-creep.txt b/useful_info/gunicorn-memory-creep.txt deleted file mode 100644 index a31b341..0000000 --- a/useful_info/gunicorn-memory-creep.txt +++ /dev/null @@ -1,26 +0,0 @@ -Gunicorn seems to manage its memory just fine for days, and then at some point -usage shot up by a gigabyte. Considering our machines usually pin lots of RAM -to elasticsearch, this can easily use up all the available memory and crash + -burn. - -The crash + burn has not happened yet, so we don't know whether this is some -form of controlled gunicorn behaviour or not. However, the DOAJ machine did -reach 94% memory. - -The solution is to send a HUP signal to gunicorn, which it interprets as a -graceful restart - it will restart the workers when they are finished -processing their current requests (so in practice there should always be -workers available to server new requests). - -If you are running gunicorn manually, just get the PID somehow (ps -ef | grep -gunicorn and look for the master process) and do kill -HUP [the pid you got]. - -If you are running your app using supervisord (you should be, if you're using -gunicorn to deploy it to production!), this is a one-liner to send a HUP to -gunicorn: - - kill -HUP $(sudo supervisorctl pid doaj) - -We should consider automating the solution to this problem using some simple -memory management monitor or even just a bash script which monitors gunicorn's -memory usage. diff --git a/useful_info/log-files.txt b/useful_info/log-files.txt deleted file mode 100644 index e2eb15f..0000000 --- a/useful_info/log-files.txt +++ /dev/null @@ -1,33 +0,0 @@ -Log files generally follow this pattern: - Most recent: - The one before: .0 - Even older ones: .*.gz (increasing numbers denote older logs, e.g. .1.gz is more recent than .2.gz) - -Use - - zcat ..gz | less - -(or | grep -i "suspicious activity"). -to decompress logs for viewing on the fly. - -NOTE: The firewall we use, ufw, puts things in the kernel log and the syslog. If you don't care about those events, make sure to always - - grep -v "UFW BLOCK" - -when reading these logs - -List of useful log files: - - /var/log/dmesg # stamped log of events from boot of system to now, mostly hardware stuff - /var/log/syslog # the system log, may contain big parts of dmesg - # and hardware stuff, but will also have firewall - # block events and other info. Many programs will - # output to this when they don't know what else to - # do (if they have permissions). - /var/log/kern.log # the big kernel log, anything kernel-related, firewall events and hardware messages - /var/log/mcelog # decoded hardware errors information .. if mcelog works (it's a bit outdated and doesn't work well with Ubuntu) - /var/log/nginx/access.log # all requests served by nginx - /var/log/nginx/error.log # all requests server by nginx which resulted in errors, e.g. 502 Bad Gateway when your app is broken! - /var/log/supervisor/supervisord.log # the log of the master supervisord process - /var/log/supervisor/*-error.log # the output of applications seems to get logged to the error logs, not the normal logs - /var/log/supervisor/*-access.log # seems to log explicit print statements in web application code. STDOUT, basically. diff --git a/useful_info/restore-elasticsearch-index-from-backup.md b/useful_info/restore-elasticsearch-index-from-backup.md deleted file mode 100644 index f3d3512..0000000 --- a/useful_info/restore-elasticsearch-index-from-backup.md +++ /dev/null @@ -1,41 +0,0 @@ -#Warning -ALWAYS REFER TO THE SCRIPT FOR THE LATEST USAGE INFO! (It could be updated independently of this file) - -This is just for convenience. - -#Example of using the restore script: - - /opt/sysadmin/backup/restore_from_s3.sh s3://yonce-index-backup /opt/elasticsearch/data /home/cloo/backups/elasticsearch/ - -#Usage reminder - - -3 arguments needed -Usage: ```./restore_from_s3.sh s3:// ``` - -All files and folders in your S3 bucket will be: - 1. synced down from your S3 bucket (only changed ones and new ones will be downloaded) to `````` - 2. they will then be copied (only changed and new ones) from `````` to `````` - (No attempts at decompression will be made) - -#s3cmd configuration - -If the following error occurs: - -``` -ERROR: /home/cloo/.s3cfg: No such file or directory -ERROR: Configuration file not available. -ERROR: Consider using --configure parameter to create one. -``` - -then copy the .s3cfg file from another server. It's got private AWS credentials so it can't be held in the sysadmin repo. - -##copying .s3cfg securely - -1. SSH into a server which has it (e.g. yonce). -2. Generate a private key using ssh-keygen, use the usual CL server password as the passphrase. (NOTE: YOU REALLY SHOULD PROTECT IT WITH A PASSPHRASE!) -3. cat ~/.ssh/id_rsa.pub -4. append the result of the previous command to the ~/.ssh/authorized_keys on the target (your new) server -5. scp ~/.s3cfg cloo@188.226.163.151:/home/cloo # replace the IP address with the address of the target (your new) server - - The key should be protected with the usual CL server password. Ask Mark or Emanuil if you don't know it. diff --git a/useful_info/server-setup.txt b/useful_info/server-setup.txt deleted file mode 100644 index f70a8f1..0000000 --- a/useful_info/server-setup.txt +++ /dev/null @@ -1,7 +0,0 @@ -Servers should have ports 22, 80, 443 open. - -Test servers should have ports 9200 (ES) and 6379 (redis) open in -addition. - -Test servers should also have the bind interface line in the redis -config uncommented so that the redis server answers to eth0:6379.