Skip to content

Commit

Permalink
Merge branch 'task/WG-141-no-pool-for-workers' into feature/questionn…
Browse files Browse the repository at this point in the history
…aire
  • Loading branch information
nathanfranklin committed Sep 7, 2023
2 parents a586d70 + 481bfd9 commit 13c8829
Show file tree
Hide file tree
Showing 7 changed files with 71 additions and 21 deletions.
1 change: 1 addition & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ services:
- APP_ENV=development
- ASSETS_BASE_DIR=/assets
- TENANT
- APP_CONTEXT=celery
stdin_open: true
tty: true
container_name: geoapiworkers
Expand Down
1 change: 1 addition & 0 deletions geoapi/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ def handle_streetview_limit_exception(error: Exception):
return {'message': 'Exceed concurrent streetview publish limit'}, 403


# ensure SQLAlchemy sessions are properly closed at the end of each request.
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
23 changes: 22 additions & 1 deletion geoapi/db.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,36 @@
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.pool import NullPool
from geoapi.settings import settings
import os


CONNECTION_STRING = 'postgresql://{}:{}@{}/{}'.format(
settings.DB_USERNAME,
settings.DB_PASSWD,
settings.DB_HOST,
settings.DB_NAME
)
engine = create_engine(CONNECTION_STRING, echo=False)


def create_engine_for_context():
context = os.environ.get('APP_CONTEXT', 'flask') # Default to 'flask' if not provided
if context == "celery":
# celery to disable pool (i.e. NullPool) due to https://jira.tacc.utexas.edu/browse/WG-141 and
# https://jira.tacc.utexas.edu/browse/WG-131
engine = create_engine(CONNECTION_STRING,
echo=False, # default value
poolclass=NullPool)
else:
engine = create_engine(CONNECTION_STRING,
echo=False, # default value
pool_pre_ping=True,
pool_reset_on_return=True)
return engine


engine = create_engine_for_context()

db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
Expand Down
4 changes: 2 additions & 2 deletions geoapi/tasks/external_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,12 +259,12 @@ def import_from_agave(tenant_id: str, userId: int, systemId: str, path: str, pro
"""
Recursively import files from a system/path.
If file has already been imported (i.e. during a previously call), we don't re-import it. Likewise,
If file has already been imported (i.e. during a previous call), we don't re-import it. Likewise,
if we have previously failed at importing a file, we do not retry to import the file (unless it was an error like
file-access where it makes sense to retry at a later time).
Files located in /Rapp folder (i.e. created by the RAPP app) are handled differently as their location data is not
contained in specific-file-format meta data (e.g. exif for images) but instead the location is stored in Tapis
contained in specific-file-format metadata (e.g. exif for images) but instead the location is stored in Tapis
metadata.
This method is called by refresh_observable_projects()
Expand Down
4 changes: 2 additions & 2 deletions kube/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ create: checkforcontext checkfortag
.PHONY: delete
delete: checkforcontext
@echo "Deleting geoapi deployment/services/migration-job in '$(KUBE_CONTEXT)' context"
kubectl delete --context $(KUBE_CONTEXT) --ignore-not-found=true deployment geoapi geoapi-workers geoapi-celerybeat geoapi-nginx geoapi-postgres geoapi-rabbitmq
kubectl delete --context $(KUBE_CONTEXT) --ignore-not-found=true service geoapi geoapi-nginx geoapi-postgres geoapi-rabbitmq
kubectl delete --context $(KUBE_CONTEXT) --ignore-not-found=true deployment geoapi geoapi-workers geoapi-celerybeat geoapi-nginx geoapi-rabbitmq
kubectl delete --context $(KUBE_CONTEXT) --ignore-not-found=true service geoapi geoapi-nginx geoapi-rabbitmq
kubectl delete --context $(KUBE_CONTEXT) --ignore-not-found=true job/geoapi-migrations

.PHONY: delete-staging
Expand Down
18 changes: 2 additions & 16 deletions kube/geoapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,22 +100,6 @@ spec:
---
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.16.0 (0c01309)
labels:
app: geoapi-postgres
name: geoapi-postgres
spec:
ports:
- port: 5432
targetPort: 5432
selector:
app: geoapi-postgres
---
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert
Expand Down Expand Up @@ -270,6 +254,8 @@ spec:
- configMapRef:
name: geoapi-environment-vars
env:
- name: APP_CONTEXT
value: "celery"
- name: RABBITMQ_PASSWD
valueFrom:
secretKeyRef:
Expand Down
41 changes: 41 additions & 0 deletions kube/utils/backup_staging_production_to_ranch.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/bin/bash
set -ex

#starting production-assets-util
kubectl --context=wma-geospatial apply -f ~geoapi/util/production_assets_util.yaml
#starting staging-assets-util
kubectl --context=geoapi-dev apply -f ~geoapi/util/staging_assets_util.yaml

# Function to check if both pods are in the "Running" state
function are_pods_running() {
local pod1_status=$(kubectl --context=wma-geospatial get pods production-assets-util -o jsonpath='{.status.phase}')
local pod2_status=$(kubectl --context=geoapi-dev get pods staging-assets-util -o jsonpath='{.status.phase}')

if [[ "$pod1_status" == "Running" ]] && [[ "$pod2_status" == "Running" ]]; then
return 0 # Both pods are running
else
return 1 # At least one pod is not running
fi
}

# Wait for both pods to be ready
while ! are_pods_running; do
echo "Waiting 30s for utility pods to be ready..."
sleep 30
done

echo "Both utility pods are ready."

echo "Removing backups older than 6 weeks (i.e. 42 days) (STAGING)"
ssh [email protected] 'find /stornext/ranch_01/ranch/projects/DesignSafe-Community/geoapi_assets_backup/staging/ -mtime +42 -type f -exec rm {} +'

echo "Backing up staging (skipping /assets/streetview as those are temp files)"
kubectl exec -i pod/staging-assets-util --context=geoapi-dev -- tar --exclude /assets/ --exclude /assets/streetview -c -f - /assets/ | ssh [email protected] 'split -b 300G - /stornext/ranch_01/ranch/projects/DesignSafe-Community/geoapi_assets_backup/staging/staging_assets`date +%Y-%m-%d`.tar.'

echo "Finished with STAGING and beginning PRODUCTION"

echo "Removing backups older than 6 weeks (i.e. 42 days) (PRODUCTION)"
ssh [email protected] 'find /stornext/ranch_01/ranch/projects/DesignSafe-Community/geoapi_assets_backup/production/ -mtime +42 -type f -exec rm {} +'

echo "Backing up production (skipping /assets/streetview as those are temp files)"
kubectl exec -i pod/production-assets-util --context=wma-geospatial -- tar --exclude /assets/streetview -c -f - /assets/ | ssh [email protected] 'split -b 300G - /stornext/ranch_01/ranch/projects/DesignSafe-Community/geoapi_assets_backup/production/production_assets`date +%Y-%m-%d`.tar.'

0 comments on commit 13c8829

Please sign in to comment.