Compare commits

..

No commits in common. "5fc3015bf17a28c6983b22789c66969a58269dc6" and "492fdc9d28516e30ebee9390dd1b5348533194c8" have entirely different histories.

7 changed files with 111 additions and 77 deletions

View File

@ -2,7 +2,6 @@ from apifairy import APIFairy
from config import Config
from docker import DockerClient
from flask import Flask
from flask.logging import default_handler
from flask_apscheduler import APScheduler
from flask_assets import Environment
from flask_login import LoginManager
@ -13,12 +12,11 @@ from flask_paranoid import Paranoid
from flask_socketio import SocketIO
from flask_sqlalchemy import SQLAlchemy
from flask_hashids import Hashids
from logging import Formatter, StreamHandler
from werkzeug.middleware.proxy_fix import ProxyFix
docker_client = DockerClient.from_env()
apifairy = APIFairy()
assets = Environment()
db = SQLAlchemy()
@ -38,42 +36,82 @@ def create_app(config: Config = Config) -> Flask:
app = Flask(__name__)
app.config.from_object(config)
# region Logging
log_formatter = Formatter(
fmt=app.config['NOPAQUE_LOG_FORMAT'],
datefmt=app.config['NOPAQUE_LOG_DATE_FORMAT']
)
_configure_logging(app)
_configure_middlewares(app)
_init_docker_client(app)
_init_extensions(app)
_register_blueprints(app)
_register_socketio_namespaces(app)
_register_db_event_listeners(app)
log_handler = StreamHandler()
log_handler.setFormatter(log_formatter)
log_handler.setLevel(app.config['NOPAQUE_LOG_LEVEL'])
@app.before_request
def log_headers():
from flask import request
print(request.__dict__)
return app
def _configure_logging(app: Flask):
from flask.logging import default_handler
from logging import Formatter, StreamHandler
log_date_format: str = app.config['NOPAQUE_LOG_DATE_FORMAT']
log_format: str = app.config['NOPAQUE_LOG_FORMAT']
log_level: str = app.config['NOPAQUE_LOG_LEVEL']
formatter = Formatter(fmt=log_format, datefmt=log_date_format)
handler = StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(log_level)
app.logger.setLevel('DEBUG')
app.logger.removeHandler(default_handler)
app.logger.addHandler(log_handler)
# endregion Logging
app.logger.addHandler(handler)
def _configure_middlewares(app: Flask):
proxy_fix_enabled: bool = app.config['NOPAQUE_PROXY_FIX_ENABLED']
if proxy_fix_enabled:
from werkzeug.middleware.proxy_fix import ProxyFix
proxy_fix_x_for: int = app.config['NOPAQUE_PROXY_FIX_X_FOR']
proxy_fix_x_host: int = app.config['NOPAQUE_PROXY_FIX_X_HOST']
proxy_fix_x_port: int = app.config['NOPAQUE_PROXY_FIX_X_PORT']
proxy_fix_x_prefix: int = app.config['NOPAQUE_PROXY_FIX_X_PREFIX']
proxy_fix_x_proto: int = app.config['NOPAQUE_PROXY_FIX_X_PROTO']
# region Middlewares
if app.config['NOPAQUE_PROXY_FIX_ENABLED']:
app.wsgi_app = ProxyFix(
app.wsgi_app,
x_for=app.config['NOPAQUE_PROXY_FIX_X_FOR'],
x_host=app.config['NOPAQUE_PROXY_FIX_X_HOST'],
x_port=app.config['NOPAQUE_PROXY_FIX_X_PORT'],
x_prefix=app.config['NOPAQUE_PROXY_FIX_X_PREFIX'],
x_proto=app.config['NOPAQUE_PROXY_FIX_X_PROTO']
x_for=proxy_fix_x_for,
x_host=proxy_fix_x_host,
x_port=proxy_fix_x_port,
x_prefix=proxy_fix_x_prefix,
x_proto=proxy_fix_x_proto
)
# endregion Middlewares
# region Extensions
def _init_docker_client(app: Flask):
registry: str = app.config['NOPAQUE_DOCKER_REGISTRY']
username: str = app.config['NOPAQUE_DOCKER_REGISTRY_USERNAME']
password: str = app.config['NOPAQUE_DOCKER_REGISTRY_PASSWORD']
docker_client.login(
app.config['NOPAQUE_DOCKER_REGISTRY_USERNAME'],
password=app.config['NOPAQUE_DOCKER_REGISTRY_PASSWORD'],
registry=app.config['NOPAQUE_DOCKER_REGISTRY']
username,
password=password,
registry=registry
)
def _init_extensions(app: Flask):
from typing import Callable
from .daemon import daemon
from .models import AnonymousUser, User
is_primary_instance: bool = app.config['NOPAQUE_IS_PRIMARY_INSTANCE']
socketio_message_queue_uri: str = app.config['NOPAQUE_SOCKETIO_MESSAGE_QUEUE_URI']
login_user_loader_callback: Callable[[int], User | None] = lambda user_id: User.query.get(int(user_id))
apifairy.init_app(app)
assets.init_app(app)
db.init_app(app)
@ -81,17 +119,19 @@ def create_app(config: Config = Config) -> Flask:
login.init_app(app)
login.anonymous_user = AnonymousUser
login.login_view = 'auth.login'
login.user_loader(lambda user_id: User.query.get(int(user_id)))
login.user_loader(login_user_loader_callback)
ma.init_app(app)
mail.init_app(app)
migrate.init_app(app, db)
paranoid.init_app(app)
paranoid.redirect_view = '/'
scheduler.init_app(app)
socketio.init_app(app, message_queue=app.config['NOPAQUE_SOCKETIO_MESSAGE_QUEUE_URI'])
# endregion Extensions
if is_primary_instance:
scheduler.add_job('daemon', daemon, args=(app,), seconds=3, trigger='interval')
socketio.init_app(app, message_queue=socketio_message_queue_uri)
# region Blueprints
def _register_blueprints(app: Flask):
from .admin import bp as admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix='/admin')
@ -127,28 +167,6 @@ def create_app(config: Config = Config) -> Flask:
from .workshops import bp as workshops_blueprint
app.register_blueprint(workshops_blueprint, url_prefix='/workshops')
# endregion Blueprints
# region SocketIO Namespaces
from .corpora.cqi_over_sio import CQiOverSocketIO
socketio.on_namespace(CQiOverSocketIO('/cqi_over_sio'))
# endregion SocketIO Namespaces
# region Database event Listeners
from .models.event_listeners import register_event_listeners
register_event_listeners()
# endregion Database event Listeners
# region Add scheduler jobs
if app.config['NOPAQUE_IS_PRIMARY_INSTANCE']:
from .tasks import handle_corpora
scheduler.add_job('handle_corpora', handle_corpora, seconds=3, trigger='interval')
from .tasks import handle_jobs
scheduler.add_job('handle_jobs', handle_jobs, seconds=3, trigger='interval')
# endregion Add scheduler jobs
return app
# def _add_admin_views():
@ -163,3 +181,15 @@ def create_app(config: Config = Config) -> Flask:
# if not issubclass(v, db.Model):
# continue
# admin.add_view(ModelView(v, db.session, category='Database'))
def _register_socketio_namespaces(app: Flask):
from .corpora.cqi_over_sio import CQiOverSocketIO
socketio.on_namespace(CQiOverSocketIO('/cqi_over_sio'))
def _register_db_event_listeners(app: Flask):
from .models.event_listeners import register_event_listeners
register_event_listeners()

11
app/daemon/__init__.py Normal file
View File

@ -0,0 +1,11 @@
from flask import Flask
from app import db
from .corpus_utils import check_corpora
from .job_utils import check_jobs
def daemon(app: Flask):
with app.app_context():
check_corpora()
check_jobs()
db.session.commit()

View File

@ -1,4 +1,4 @@
from app import db, docker_client, scheduler
from app import docker_client
from app.models import Corpus, CorpusStatus
from flask import current_app
import docker
@ -6,11 +6,7 @@ import os
import shutil
def task():
with scheduler.app.app_context():
_handle_corpora()
def _handle_corpora():
def check_corpora():
corpora = Corpus.query.all()
for corpus in [x for x in corpora if x.status == CorpusStatus.SUBMITTED]:
_create_build_corpus_service(corpus)
@ -26,7 +22,6 @@ def _handle_corpora():
_create_cqpserver_container(corpus)
for corpus in [x for x in corpora if x.status == CorpusStatus.CANCELING_ANALYSIS_SESSION]:
_remove_cqpserver_container(corpus)
db.session.commit()
def _create_build_corpus_service(corpus):
''' # Docker service settings # '''

View File

@ -1,4 +1,4 @@
from app import db, docker_client, hashids, scheduler
from app import db, docker_client, hashids
from app.models import (
Job,
JobResult,
@ -15,11 +15,7 @@ import os
import shutil
def task():
with scheduler.app.app_context():
_handle_jobs()
def _handle_jobs():
def check_jobs():
jobs = Job.query.all()
for job in [x for x in jobs if x.status == JobStatus.SUBMITTED]:
_create_job_service(job)
@ -27,7 +23,6 @@ def _handle_jobs():
_checkout_job_service(job)
for job in [x for x in jobs if x.status == JobStatus.CANCELING]:
_remove_job_service(job)
db.session.commit()
def _create_job_service(job):
''' # Docker service settings # '''

View File

@ -1,2 +0,0 @@
from .handle_corpora import task as handle_corpora
from .handle_jobs import task as handle_jobs

View File

@ -15,7 +15,7 @@ class Config:
''' Configuration class for the Flask application. '''
# region APIFairy
APIFAIRY_TITLE = 'nopaque API'
APIFAIRY_TITLE = 'nopaque'
APIFAIRY_VERSION = '0.0.1'
APIFAIRY_APISPEC_PATH = '/api/apispec.json'
APIFAIRY_UI = 'swagger_ui'
@ -60,7 +60,10 @@ class Config:
# region Flask-SQLAlchemy
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', f'sqlite:///{BASE_DIR}/data.sqlite')
SQLALCHEMY_DATABASE_URI = os.environ.get(
'SQLALCHEMY_DATABASE_URI',
f'sqlite:///{BASE_DIR}/data.sqlite'
)
SQLALCHEMY_RECORD_QUERIES = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
# endregion Flask-SQLAlchemy
@ -80,8 +83,14 @@ class Config:
NOPAQUE_DOCKER_REGISTRY_USERNAME = os.environ.get('NOPAQUE_DOCKER_REGISTRY_USERNAME')
NOPAQUE_DOCKER_REGISTRY_PASSWORD = os.environ.get('NOPAQUE_DOCKER_REGISTRY_PASSWORD')
NOPAQUE_LOG_DATE_FORMAT = os.environ.get('NOPAQUE_LOG_DATE_FORMAT', '%Y-%m-%d %H:%M:%S')
NOPAQUE_LOG_FORMAT = os.environ.get('NOPAQUE_LOG_FORMAT','[%(asctime)s] %(levelname)s: %(message)s')
NOPAQUE_LOG_DATE_FORMAT = os.environ.get(
'NOPAQUE_LOG_DATE_FORMAT',
'%Y-%m-%d %H:%M:%S'
)
NOPAQUE_LOG_FORMAT = os.environ.get(
'NOPAQUE_LOG_DATE_FORMAT',
'[%(asctime)s] %(levelname)s in %(pathname)s (function: %(funcName)s, line: %(lineno)d): %(message)s'
)
NOPAQUE_LOG_LEVEL = os.environ.get('NOPAQUE_LOG_LEVEL', 'WARNING')
NOPAQUE_PROXY_FIX_ENABLED = os.environ.get('NOPAQUE_PROXY_FIX_ENABLED', 'false').lower() == 'true'

View File

@ -7,10 +7,6 @@
# Flask #
# https://flask.palletsprojects.com/en/1.1.x/config/ #
##############################################################################
# CHOOSE ONE: False, True
# DEFAULT: False
# FLASK_DEBUG=
# CHOOSE ONE: http, https
# DEFAULT: http
# PREFERRED_URL_SCHEME=
@ -142,7 +138,7 @@ NOPAQUE_DOCKER_REGISTRY_PASSWORD=
# DEFAULT: [%(asctime)s] %(levelname)s in %(pathname)s (function: %(funcName)s, line: %(lineno)d): %(message)s
# NOPAQUE_LOG_FORMAT=
# DEFAULT: WARNING
# DEFAULT: DEBUG if FLASK_DEBUG == True else WARNING
# CHOOSE ONE: CRITICAL, ERROR, WARNING, INFO, DEBUG
# NOPAQUE_LOG_LEVEL=