Per-Arne Andersen
4 years ago
34 changed files with 463 additions and 365 deletions
@ -0,0 +1,89 @@ |
|||
# A generic, single database configuration. |
|||
|
|||
[alembic] |
|||
# path to migration scripts |
|||
script_location = migrations |
|||
|
|||
# template used to generate migration files |
|||
# file_template = %%(rev)s_%%(slug)s |
|||
|
|||
# sys.path path, will be prepended to sys.path if present. |
|||
# defaults to the current working directory. |
|||
prepend_sys_path = . |
|||
|
|||
# timezone to use when rendering the date |
|||
# within the migration file as well as the filename. |
|||
# string value is passed to dateutil.tz.gettz() |
|||
# leave blank for localtime |
|||
# timezone = |
|||
|
|||
# max length of characters to apply to the |
|||
# "slug" field |
|||
# truncate_slug_length = 40 |
|||
|
|||
# set to 'true' to run the environment during |
|||
# the 'revision' command, regardless of autogenerate |
|||
# revision_environment = false |
|||
|
|||
# set to 'true' to allow .pyc and .pyo files without |
|||
# a source .py file to be detected as revisions in the |
|||
# versions/ directory |
|||
# sourceless = false |
|||
|
|||
# version location specification; this defaults |
|||
# to alembic/versions. When using multiple version |
|||
# directories, initial revisions must be specified with --version-path |
|||
# version_locations = %(here)s/bar %(here)s/bat alembic/versions |
|||
|
|||
# the output encoding used when revision files |
|||
# are written from script.py.mako |
|||
# output_encoding = utf-8 |
|||
|
|||
sqlalchemy.url = sqlite:///database.db |
|||
|
|||
|
|||
[post_write_hooks] |
|||
# post_write_hooks defines scripts or Python functions that are run |
|||
# on newly generated revision scripts. See the documentation for further |
|||
# detail and examples |
|||
|
|||
# format using "black" - use the console_scripts runner, against the "black" entrypoint |
|||
# hooks=black |
|||
# black.type=console_scripts |
|||
# black.entrypoint=black |
|||
# black.options=-l 79 |
|||
|
|||
# Logging configuration |
|||
[loggers] |
|||
keys = root,sqlalchemy,alembic |
|||
|
|||
[handlers] |
|||
keys = console |
|||
|
|||
[formatters] |
|||
keys = generic |
|||
|
|||
[logger_root] |
|||
level = WARN |
|||
handlers = console |
|||
qualname = |
|||
|
|||
[logger_sqlalchemy] |
|||
level = WARN |
|||
handlers = |
|||
qualname = sqlalchemy.engine |
|||
|
|||
[logger_alembic] |
|||
level = INFO |
|||
handlers = |
|||
qualname = alembic |
|||
|
|||
[handler_console] |
|||
class = StreamHandler |
|||
args = (sys.stderr,) |
|||
level = NOTSET |
|||
formatter = generic |
|||
|
|||
[formatter_generic] |
|||
format = %(levelname)-5.5s [%(name)s] %(message)s |
|||
datefmt = %H:%M:%S |
@ -1,13 +0,0 @@ |
|||
import sqlalchemy |
|||
from sqlalchemy.ext.declarative import declarative_base |
|||
from sqlalchemy.orm import sessionmaker |
|||
import const |
|||
|
|||
engine = sqlalchemy.create_engine( |
|||
const.DATABASE_URL, connect_args={"check_same_thread": False} |
|||
) |
|||
|
|||
|
|||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) |
|||
|
|||
Base = declarative_base() |
@ -0,0 +1,21 @@ |
|||
import sqlalchemy |
|||
from sqlalchemy import MetaData |
|||
from sqlalchemy.ext.declarative import declarative_base |
|||
from sqlalchemy.orm import sessionmaker |
|||
import const |
|||
|
|||
engine = sqlalchemy.create_engine( |
|||
const.DATABASE_URL, connect_args={"check_same_thread": False} |
|||
) |
|||
|
|||
|
|||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) |
|||
|
|||
meta = MetaData(naming_convention={ |
|||
"ix": "ix_%(column_0_label)s", |
|||
"uq": "uq_%(table_name)s_%(column_0_name)s", |
|||
"ck": "ck_%(table_name)s_%(column_0_name)s", |
|||
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", |
|||
"pk": "pk_%(table_name)s" |
|||
}) |
|||
Base = declarative_base(metadata=meta) |
@ -0,0 +1,59 @@ |
|||
import os |
|||
|
|||
import alembic.command |
|||
from alembic.config import Config |
|||
from sqlalchemy.orm import Session |
|||
from sqlalchemy_utils import database_exists |
|||
|
|||
import middleware |
|||
from database.database import engine, Base, SessionLocal |
|||
from database import models |
|||
from loguru import logger |
|||
|
|||
|
|||
def perform_migrations(): |
|||
logger.info("Performing migrations...") |
|||
alembic_cfg = Config("alembic.ini") |
|||
|
|||
alembic_cfg.set_main_option('script_location', "migrations") |
|||
alembic_cfg.set_main_option('sqlalchemy.url', str(engine.url)) |
|||
alembic.command.upgrade(alembic_cfg, 'head') |
|||
logger.info("Migrations done!") |
|||
|
|||
|
|||
def setup_initial_database(): |
|||
if not database_exists(engine.url): |
|||
logger.info("Database does not exists. Creating initial database...") |
|||
# Create database from metadata |
|||
Base.metadata.create_all(engine) |
|||
logger.info("Database creation done!") |
|||
|
|||
# Create default user |
|||
_db: Session = SessionLocal() |
|||
|
|||
admin_exists = ( |
|||
_db.query(models.User.id) |
|||
.filter_by(role="admin") |
|||
.first() |
|||
) is not None |
|||
|
|||
if not admin_exists: |
|||
logger.info("Admin user does not exists. Creating with env variables ADMIN_USERNAME, ADMIN_PASSWORD") |
|||
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME") |
|||
ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD") |
|||
|
|||
if not ADMIN_USERNAME: |
|||
raise RuntimeError("Database does not exist and the environment variable ADMIN_USERNAME is set") |
|||
if not ADMIN_PASSWORD: |
|||
raise RuntimeError("Database does not exist and the environment variable ADMIN_PASSWORD is set") |
|||
|
|||
_db.merge(models.User( |
|||
username=ADMIN_USERNAME, |
|||
password=middleware.get_password_hash(ADMIN_PASSWORD), |
|||
full_name="Admin", |
|||
role="admin", |
|||
email="" |
|||
)) |
|||
|
|||
_db.commit() |
|||
_db.close() |
@ -1,4 +1 @@ |
|||
This is a database migration repository. |
|||
|
|||
More information at |
|||
http://code.google.com/p/sqlalchemy-migrate/ |
|||
Generic single-database configuration. |
@ -0,0 +1,83 @@ |
|||
from logging.config import fileConfig |
|||
|
|||
from sqlalchemy import engine_from_config |
|||
from sqlalchemy import pool |
|||
|
|||
from alembic import context |
|||
|
|||
# this is the Alembic Config object, which provides |
|||
# access to the values within the .ini file in use. |
|||
import database.models |
|||
import database.database |
|||
|
|||
config = context.config |
|||
|
|||
# Interpret the config file for Python logging. |
|||
# This line sets up loggers basically. |
|||
fileConfig(config.config_file_name) |
|||
|
|||
# add your model's MetaData object here |
|||
# for 'autogenerate' support |
|||
# from myapp import mymodel |
|||
# target_metadata = mymodel.Base.metadata |
|||
target_metadata = database.database.Base.metadata |
|||
|
|||
# other values from the config, defined by the needs of env.py, |
|||
# can be acquired: |
|||
# my_important_option = config.get_main_option("my_important_option") |
|||
# ... etc. |
|||
|
|||
|
|||
def run_migrations_offline(): |
|||
"""Run migrations in 'offline' mode. |
|||
|
|||
This configures the context with just a URL |
|||
and not an Engine, though an Engine is acceptable |
|||
here as well. By skipping the Engine creation |
|||
we don't even need a DBAPI to be available. |
|||
|
|||
Calls to context.execute() here emit the given string to the |
|||
script output. |
|||
|
|||
""" |
|||
url = config.get_main_option("sqlalchemy.url") |
|||
context.configure( |
|||
url=url, |
|||
target_metadata=target_metadata, |
|||
literal_binds=True, |
|||
dialect_opts={"paramstyle": "named"}, |
|||
render_as_batch=False |
|||
) |
|||
|
|||
with context.begin_transaction(): |
|||
context.run_migrations() |
|||
|
|||
|
|||
def run_migrations_online(): |
|||
"""Run migrations in 'online' mode. |
|||
|
|||
In this scenario we need to create an Engine |
|||
and associate a connection with the context. |
|||
|
|||
""" |
|||
connectable = engine_from_config( |
|||
config.get_section(config.config_ini_section), |
|||
prefix="sqlalchemy.", |
|||
poolclass=pool.NullPool, |
|||
) |
|||
|
|||
with connectable.connect() as connection: |
|||
context.configure( |
|||
connection=connection, |
|||
target_metadata=target_metadata, |
|||
render_as_batch=False |
|||
) |
|||
|
|||
with context.begin_transaction(): |
|||
context.run_migrations() |
|||
|
|||
|
|||
if context.is_offline_mode(): |
|||
run_migrations_offline() |
|||
else: |
|||
run_migrations_online() |
@ -1,5 +0,0 @@ |
|||
#!/usr/bin/env python |
|||
from migrate.versioning.shell import main |
|||
|
|||
if __name__ == '__main__': |
|||
main(debug='False') |
@ -1,25 +0,0 @@ |
|||
[db_settings] |
|||
# Used to identify which repository this database is versioned under. |
|||
# You can use the name of your project. |
|||
repository_id=wg-manager |
|||
|
|||
# The name of the database table used to track the schema version. |
|||
# This name shouldn't already be used by your project. |
|||
# If this is changed once a database is under version control, you'll need to |
|||
# change the table name in each database too. |
|||
version_table=migrate_version |
|||
|
|||
# When committing a change script, Migrate will attempt to generate the |
|||
# sql for all supported databases; normally, if one of them fails - probably |
|||
# because you don't have that database installed - it is ignored and the |
|||
# commit continues, perhaps ending successfully. |
|||
# Databases in this list MUST compile successfully during a commit, or the |
|||
# entire commit will fail. List the databases your application will actually |
|||
# be using to ensure your updates to that database work properly. |
|||
# This must be a list; example: ['postgres','sqlite'] |
|||
required_dbs=[] |
|||
|
|||
# When creating new change scripts, Migrate will stamp the new script with |
|||
# a version number. By default this is latest_version + 1. You can set this |
|||
# to 'true' to tell Migrate to use the UTC timestamp instead. |
|||
use_timestamp_numbering=False |
@ -0,0 +1,24 @@ |
|||
"""${message} |
|||
|
|||
Revision ID: ${up_revision} |
|||
Revises: ${down_revision | comma,n} |
|||
Create Date: ${create_date} |
|||
|
|||
""" |
|||
from alembic import op |
|||
import sqlalchemy as sa |
|||
${imports if imports else ""} |
|||
|
|||
# revision identifiers, used by Alembic. |
|||
revision = ${repr(up_revision)} |
|||
down_revision = ${repr(down_revision)} |
|||
branch_labels = ${repr(branch_labels)} |
|||
depends_on = ${repr(depends_on)} |
|||
|
|||
|
|||
def upgrade(): |
|||
${upgrades if upgrades else "pass"} |
|||
|
|||
|
|||
def downgrade(): |
|||
${downgrades if downgrades else "pass"} |
@ -1,20 +0,0 @@ |
|||
from sqlalchemy import Table, MetaData, String, Column, Text |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
account = Table('peer', meta, autoload=True) |
|||
dns = Column('dns', Text) |
|||
dns.create(account) |
|||
except: |
|||
pass |
|||
|
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
dns = Table('peer', meta, autoload=True) |
|||
dns.c.email.drop() |
|||
except: |
|||
pass |
@ -1,21 +0,0 @@ |
|||
from sqlalchemy import * |
|||
from migrate import * |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
server.c.shared_key.drop() |
|||
except: |
|||
pass |
|||
|
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
account = Table('server', meta, autoload=True) |
|||
shared_key = Column('shared_key', Text) |
|||
shared_key.create(account) |
|||
except: |
|||
pass |
@ -1,21 +0,0 @@ |
|||
from sqlalchemy import * |
|||
from migrate import * |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
peer = Table('peer', meta, autoload=True) |
|||
shared_key = Column('shared_key', Text) |
|||
shared_key.create(peer) |
|||
except: |
|||
pass |
|||
|
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
dns = Table('peer', meta, autoload=True) |
|||
dns.c.shared_key.drop() |
|||
except: |
|||
pass |
@ -1,21 +0,0 @@ |
|||
from sqlalchemy import * |
|||
from migrate import * |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
subnet = Column('subnet', Integer, nullable=False) |
|||
subnet.create(server) |
|||
except: |
|||
pass |
|||
|
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
server.c.subnet.drop() |
|||
except: |
|||
pass |
@ -1,32 +0,0 @@ |
|||
from sqlalchemy import * |
|||
from migrate import * |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
v6_address_server = Column('v6_address', VARCHAR, unique=True, nullable=True) |
|||
v6_address_server.create(server) |
|||
|
|||
meta = MetaData(bind=migrate_engine) |
|||
peer = Table('peer', meta, autoload=True) |
|||
v6_address_peer = Column('v6_address', VARCHAR, nullable=True) |
|||
v6_address_peer.create(peer) |
|||
except: |
|||
pass |
|||
|
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
server.c.v6_address.drop() |
|||
|
|||
meta = MetaData(bind=migrate_engine) |
|||
peer = Table('peer', meta, autoload=True) |
|||
peer.c.v6_address.drop() |
|||
except: |
|||
pass |
|||
|
|||
|
@ -1,21 +0,0 @@ |
|||
from sqlalchemy import * |
|||
from migrate import * |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
peer = Table('server', meta, autoload=True) |
|||
v6_subnet = Column('v6_subnet', INTEGER) |
|||
v6_subnet.create(peer) |
|||
except: |
|||
pass |
|||
|
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
peer = Table('server', meta, autoload=True) |
|||
peer.c.v6_subnet.drop() |
|||
except: |
|||
pass |
@ -1,34 +0,0 @@ |
|||
from sqlalchemy import * |
|||
from migrate import * |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
read_only = Column('read_only', INTEGER, default=0) |
|||
read_only.create(server) |
|||
except: |
|||
pass |
|||
|
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
peer = Table('peer', meta, autoload=True) |
|||
read_only = Column('read_only', INTEGER, default=0) |
|||
read_only.create(peer) |
|||
except: |
|||
pass |
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
server.c.read_only.drop() |
|||
except: |
|||
pass |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('peer', meta, autoload=True) |
|||
server.c.read_only.drop() |
|||
except: |
|||
pass |
@ -1,21 +0,0 @@ |
|||
from sqlalchemy import * |
|||
from migrate import * |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
allowed_ips = Column('allowed_ips', Text) |
|||
allowed_ips.create(server) |
|||
except: |
|||
pass |
|||
|
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
server.c.allowed_ips.drop() |
|||
except: |
|||
pass |
@ -1,21 +0,0 @@ |
|||
from sqlalchemy import * |
|||
from migrate import * |
|||
|
|||
|
|||
def upgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
keep_alive = Column('keep_alive', Integer) |
|||
keep_alive.create(server) |
|||
except: |
|||
pass |
|||
|
|||
|
|||
def downgrade(migrate_engine): |
|||
try: |
|||
meta = MetaData(bind=migrate_engine) |
|||
server = Table('server', meta, autoload=True) |
|||
server.c.keep_alive.drop() |
|||
except: |
|||
pass |
@ -0,0 +1,117 @@ |
|||
"""base |
|||
|
|||
Revision ID: 4ac3e58519eb |
|||
Revises: |
|||
Create Date: 2021-03-13 20:29:10.062757 |
|||
|
|||
""" |
|||
from alembic import op |
|||
import sqlalchemy as sa |
|||
|
|||
|
|||
# revision identifiers, used by Alembic. |
|||
from sqlalchemy.exc import OperationalError |
|||
|
|||
revision = '4ac3e58519eb' |
|||
down_revision = None |
|||
branch_labels = None |
|||
depends_on = None |
|||
|
|||
def ignore_duplicate(fn): |
|||
try: |
|||
fn() |
|||
except OperationalError as e: |
|||
if "duplicate" in str(e): |
|||
pass |
|||
|
|||
def upgrade(): |
|||
# ### commands auto generated by Alembic - please adjust! ### |
|||
try: |
|||
op.create_table('api_keys', |
|||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), |
|||
sa.Column('key', sa.String(), nullable=True), |
|||
sa.Column('user_id', sa.Integer(), nullable=True), |
|||
sa.Column('created_date', sa.DateTime(), nullable=True), |
|||
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='fk_user_api_key_user_id', onupdate='CASCADE', ondelete='CASCADE'), |
|||
sa.PrimaryKeyConstraint('id'), |
|||
sa.UniqueConstraint('key') |
|||
) |
|||
except OperationalError: |
|||
pass |
|||
|
|||
try: |
|||
op.drop_table('migrate_version') |
|||
except OperationalError: |
|||
pass |
|||
|
|||
naming_convention = { |
|||
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", |
|||
} |
|||
with op.batch_alter_table("peer", naming_convention=naming_convention) as batch_op: |
|||
batch_op.drop_constraint("fk_peer_server_id_server", type_="foreignkey") |
|||
|
|||
with op.batch_alter_table('peer', schema=None) as batch_op: |
|||
batch_op.create_foreign_key('fk_peer_server_id_server', 'server', ['server_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE') |
|||
|
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('configuration', sa.Text(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('keep_alive', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('read_only', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('server_id', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('shared_key', sa.Text(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('v6_address', sa.String(), nullable=True))) |
|||
|
|||
|
|||
#op.drop_constraint(None, 'peer', type_='foreignkey') |
|||
# |
|||
#op.drop_column('peer', 'server') |
|||
|
|||
|
|||
try: |
|||
with op.batch_alter_table('peer', schema=None) as batch_op: |
|||
batch_op.drop_column("server") |
|||
except KeyError: |
|||
pass |
|||
|
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('allowed_ips', sa.String(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('configuration', sa.Text(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('dns', sa.String(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('keep_alive', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('read_only', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('subnet', sa.Integer(), nullable=False))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('v6_address', sa.String(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('v6_subnet', sa.Integer(), nullable=False))) |
|||
#op.create_unique_constraint(None, 'server', ['v6_address']) |
|||
|
|||
|
|||
try: |
|||
with op.batch_alter_table('server', schema=None) as batch_op: |
|||
batch_op.drop_column("shared_key") |
|||
except KeyError: |
|||
pass |
|||
|
|||
# ### end Alembic commands ### |
|||
|
|||
|
|||
def downgrade(): |
|||
# ### commands auto generated by Alembic - please adjust! ### |
|||
op.add_column('server', sa.Column('shared_key', sa.VARCHAR(), nullable=True)) |
|||
op.drop_constraint(None, 'server', type_='unique') |
|||
op.drop_column('server', 'v6_subnet') |
|||
op.drop_column('server', 'v6_address') |
|||
op.drop_column('server', 'subnet') |
|||
op.drop_column('server', 'read_only') |
|||
op.drop_column('server', 'keep_alive') |
|||
op.drop_column('server', 'dns') |
|||
op.drop_column('server', 'configuration') |
|||
op.drop_column('server', 'allowed_ips') |
|||
op.add_column('peer', sa.Column('server', sa.INTEGER(), nullable=True)) |
|||
op.drop_constraint('fk_wg_peer_server_id', 'peer', type_='foreignkey') |
|||
op.create_foreign_key(None, 'peer', 'server', ['server'], ['interface']) |
|||
op.drop_column('peer', 'v6_address') |
|||
op.drop_column('peer', 'shared_key') |
|||
op.drop_column('peer', 'server_id') |
|||
op.drop_column('peer', 'read_only') |
|||
op.drop_column('peer', 'keep_alive') |
|||
op.drop_column('peer', 'configuration') |
|||
op.drop_table('api_keys') |
|||
# ### end Alembic commands ### |
@ -0,0 +1,34 @@ |
|||
import os |
|||
import typing |
|||
|
|||
from sqlalchemy.orm import Session |
|||
|
|||
import const |
|||
from database import models |
|||
from database.database import SessionLocal |
|||
from db.api_key import add_initial_api_key_for_admin |
|||
from db.wireguard import server_add_on_init |
|||
from script.wireguard import is_installed, start_interface, is_running, load_environment_clients |
|||
|
|||
|
|||
def setup_on_start(): |
|||
_db: Session = SessionLocal() |
|||
servers: typing.List[models.WGServer] = _db.query(models.WGServer).all() |
|||
for s in servers: |
|||
try: |
|||
last_state = s.is_running |
|||
if is_installed() and last_state and is_running(s): |
|||
start_interface(s) |
|||
except Exception as e: |
|||
print(e) |
|||
|
|||
if const.CLIENT: |
|||
load_environment_clients(_db) |
|||
|
|||
if const.SERVER_INIT_INTERFACE is not None: |
|||
server_add_on_init(_db) |
|||
|
|||
if const.SERVER_STARTUP_API_KEY is not None: |
|||
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME") |
|||
add_initial_api_key_for_admin(_db, const.SERVER_STARTUP_API_KEY, ADMIN_USERNAME) |
|||
_db.close() |
Loading…
Reference in new issue