Browse Source

* Fix for #85 #86

pull/109/head
Per-Arne Andersen 4 years ago
parent
commit
8fb6626df8
  1. 89
      wg_dashboard_backend/alembic.ini
  2. 13
      wg_dashboard_backend/database.py
  3. 0
      wg_dashboard_backend/database/__init__.py
  4. 21
      wg_dashboard_backend/database/database.py
  5. 2
      wg_dashboard_backend/database/models.py
  6. 59
      wg_dashboard_backend/database/util.py
  7. 2
      wg_dashboard_backend/db/api_key.py
  8. 3
      wg_dashboard_backend/db/user.py
  9. 6
      wg_dashboard_backend/db/wireguard.py
  10. 99
      wg_dashboard_backend/main.py
  11. 5
      wg_dashboard_backend/middleware.py
  12. 5
      wg_dashboard_backend/migrations/README
  13. 83
      wg_dashboard_backend/migrations/env.py
  14. 5
      wg_dashboard_backend/migrations/manage.py
  15. 25
      wg_dashboard_backend/migrations/migrate.cfg
  16. 24
      wg_dashboard_backend/migrations/script.py.mako
  17. 20
      wg_dashboard_backend/migrations/versions/001_add_dns_column.py
  18. 21
      wg_dashboard_backend/migrations/versions/002_remove_server_shared_key.py
  19. 21
      wg_dashboard_backend/migrations/versions/003_create_client_shared_key.py
  20. 21
      wg_dashboard_backend/migrations/versions/004_create_server_subnet.py
  21. 32
      wg_dashboard_backend/migrations/versions/005_create_v6_address.py
  22. 21
      wg_dashboard_backend/migrations/versions/006_create_v6_subnet.py
  23. 34
      wg_dashboard_backend/migrations/versions/007_create_read_only_client.py
  24. 21
      wg_dashboard_backend/migrations/versions/008_create_allowed_ips.py
  25. 21
      wg_dashboard_backend/migrations/versions/009_create_keep_alive.py
  26. 117
      wg_dashboard_backend/migrations/versions/4ac3e58519eb_base.py
  27. 0
      wg_dashboard_backend/migrations/versions/__init__.py
  28. 2
      wg_dashboard_backend/requirements.txt
  29. 3
      wg_dashboard_backend/routers/v1/peer.py
  30. 6
      wg_dashboard_backend/routers/v1/server.py
  31. 4
      wg_dashboard_backend/routers/v1/user.py
  32. 2
      wg_dashboard_backend/schemas.py
  33. 7
      wg_dashboard_backend/script/wireguard.py
  34. 34
      wg_dashboard_backend/script/wireguard_startup.py

89
wg_dashboard_backend/alembic.ini

@ -0,0 +1,89 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = migrations
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date
# within the migration file as well as the filename.
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat alembic/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = sqlite:///database.db
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks=black
# black.type=console_scripts
# black.entrypoint=black
# black.options=-l 79
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

13
wg_dashboard_backend/database.py

@ -1,13 +0,0 @@
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import const
engine = sqlalchemy.create_engine(
const.DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()

0
wg_dashboard_backend/migrations/__init__.py → wg_dashboard_backend/database/__init__.py

21
wg_dashboard_backend/database/database.py

@ -0,0 +1,21 @@
import sqlalchemy
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import const
engine = sqlalchemy.create_engine(
const.DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
meta = MetaData(naming_convention={
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
})
Base = declarative_base(metadata=meta)

2
wg_dashboard_backend/models.py → wg_dashboard_backend/database/models.py

@ -4,7 +4,7 @@ import sqlalchemy
from sqlalchemy import Integer, Column, DateTime
from sqlalchemy.orm import relationship, backref
from database import Base
from database.database import Base
class User(Base):

59
wg_dashboard_backend/database/util.py

@ -0,0 +1,59 @@
import os
import alembic.command
from alembic.config import Config
from sqlalchemy.orm import Session
from sqlalchemy_utils import database_exists
import middleware
from database.database import engine, Base, SessionLocal
from database import models
from loguru import logger
def perform_migrations():
logger.info("Performing migrations...")
alembic_cfg = Config("alembic.ini")
alembic_cfg.set_main_option('script_location', "migrations")
alembic_cfg.set_main_option('sqlalchemy.url', str(engine.url))
alembic.command.upgrade(alembic_cfg, 'head')
logger.info("Migrations done!")
def setup_initial_database():
if not database_exists(engine.url):
logger.info("Database does not exists. Creating initial database...")
# Create database from metadata
Base.metadata.create_all(engine)
logger.info("Database creation done!")
# Create default user
_db: Session = SessionLocal()
admin_exists = (
_db.query(models.User.id)
.filter_by(role="admin")
.first()
) is not None
if not admin_exists:
logger.info("Admin user does not exists. Creating with env variables ADMIN_USERNAME, ADMIN_PASSWORD")
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME")
ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD")
if not ADMIN_USERNAME:
raise RuntimeError("Database does not exist and the environment variable ADMIN_USERNAME is set")
if not ADMIN_PASSWORD:
raise RuntimeError("Database does not exist and the environment variable ADMIN_PASSWORD is set")
_db.merge(models.User(
username=ADMIN_USERNAME,
password=middleware.get_password_hash(ADMIN_PASSWORD),
full_name="Admin",
role="admin",
email=""
))
_db.commit()
_db.close()

2
wg_dashboard_backend/db/api_key.py

@ -1,6 +1,6 @@
from sqlalchemy.orm import Session
import models
from database import models
def add_initial_api_key_for_admin(sess: Session, api_key, ADMIN_USERNAME):

3
wg_dashboard_backend/db/user.py

@ -1,8 +1,7 @@
from typing import Optional
from sqlalchemy.orm import Session
import models
from passlib.context import CryptContext
from database import models
import schemas

6
wg_dashboard_backend/db/wireguard.py

@ -8,10 +8,8 @@ from starlette.exceptions import HTTPException
import const
import script.wireguard
from sqlalchemy import exists
from sqlalchemy.orm import Session, joinedload
import util
import models
from sqlalchemy.orm import Session
from database import models
import schemas
import logging

99
wg_dashboard_backend/main.py

@ -1,99 +1,22 @@
import logging
import os
import const
import time
import typing
from sqlalchemy_utils import database_exists
from starlette.middleware.base import BaseHTTPMiddleware
import const
import db.wireguard
import db.api_key
import middleware
from database import engine, SessionLocal
from routers.v1 import user, server, peer, wg
import script.wireguard
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
if not logger.hasHandlers():
sh = logging.StreamHandler()
fmt = logging.Formatter(fmt="%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
sh.setFormatter(fmt)
logger.addHandler(sh)
import script.wireguard_startup
import pkg_resources
import uvicorn as uvicorn
from fastapi.staticfiles import StaticFiles
from sqlalchemy.orm import Session
from starlette.responses import FileResponse
from fastapi import Depends, FastAPI
from const import DATABASE_URL
from migrate import DatabaseAlreadyControlledError
from migrate.versioning.shell import main
import models
# Sleep the wait timer.
time.sleep(const.INIT_SLEEP)
import database.util
app = FastAPI()
app.add_middleware(BaseHTTPMiddleware, dispatch=middleware.db_session_middleware)
_db: Session = SessionLocal()
# Ensure database existence
if not database_exists(engine.url):
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME")
if not ADMIN_USERNAME:
raise RuntimeError("Database does not exist and the environment variable ADMIN_USERNAME is set")
ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD")
if not ADMIN_PASSWORD:
raise RuntimeError("Database does not exist and the environment variable ADMIN_PASSWORD is set")
# Create database from metadata
models.Base.metadata.create_all(engine)
# Create default user
_db.merge(models.User(
username=ADMIN_USERNAME,
password=middleware.get_password_hash(ADMIN_PASSWORD),
full_name="Admin",
role="admin",
email=""
))
_db.commit()
# Do migrations
try:
main(["version_control", DATABASE_URL, "migrations"])
except DatabaseAlreadyControlledError:
pass
main(["upgrade", DATABASE_URL, "migrations"])
servers: typing.List[models.WGServer] = _db.query(models.WGServer).all()
for s in servers:
try:
last_state = s.is_running
if script.wireguard.is_installed() and last_state and not script.wireguard.is_running(s):
script.wireguard.start_interface(s)
except Exception as e:
print(e)
if const.CLIENT:
script.wireguard.load_environment_clients(_db)
if const.SERVER_INIT_INTERFACE is not None:
db.wireguard.server_add_on_init(_db)
if const.SERVER_STARTUP_API_KEY is not None:
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME")
db.api_key.add_initial_api_key_for_admin(_db, const.SERVER_STARTUP_API_KEY, ADMIN_USERNAME)
_db.close()
# Configure web routers
app.include_router(
user.router,
prefix="/api/v1",
@ -149,4 +72,16 @@ async def shutdown():
if __name__ == "__main__":
# Sleep the wait timer.
time.sleep(const.INIT_SLEEP)
# Ensure database existence
database.util.setup_initial_database()
# Perform Migrations
database.util.perform_migrations()
# Configure wireguard
script.wireguard_startup.setup_on_start()
uvicorn.run("__main__:app", reload=True)

5
wg_dashboard_backend/middleware.py

@ -11,10 +11,9 @@ from starlette.requests import Request
from starlette.responses import Response
import const
import models
import schemas
from database import SessionLocal
import db.user
from database import models
from database.database import SessionLocal
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/login", auto_error=False)
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")

5
wg_dashboard_backend/migrations/README

@ -1,4 +1 @@
This is a database migration repository.
More information at
http://code.google.com/p/sqlalchemy-migrate/
Generic single-database configuration.

83
wg_dashboard_backend/migrations/env.py

@ -0,0 +1,83 @@
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
import database.models
import database.database
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = database.database.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
render_as_batch=False
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
render_as_batch=False
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

5
wg_dashboard_backend/migrations/manage.py

@ -1,5 +0,0 @@
#!/usr/bin/env python
from migrate.versioning.shell import main
if __name__ == '__main__':
main(debug='False')

25
wg_dashboard_backend/migrations/migrate.cfg

@ -1,25 +0,0 @@
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
repository_id=wg-manager
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
# If this is changed once a database is under version control, you'll need to
# change the table name in each database too.
version_table=migrate_version
# When committing a change script, Migrate will attempt to generate the
# sql for all supported databases; normally, if one of them fails - probably
# because you don't have that database installed - it is ignored and the
# commit continues, perhaps ending successfully.
# Databases in this list MUST compile successfully during a commit, or the
# entire commit will fail. List the databases your application will actually
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs=[]
# When creating new change scripts, Migrate will stamp the new script with
# a version number. By default this is latest_version + 1. You can set this
# to 'true' to tell Migrate to use the UTC timestamp instead.
use_timestamp_numbering=False

24
wg_dashboard_backend/migrations/script.py.mako

@ -0,0 +1,24 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

20
wg_dashboard_backend/migrations/versions/001_add_dns_column.py

@ -1,20 +0,0 @@
from sqlalchemy import Table, MetaData, String, Column, Text
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
account = Table('peer', meta, autoload=True)
dns = Column('dns', Text)
dns.create(account)
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
dns = Table('peer', meta, autoload=True)
dns.c.email.drop()
except:
pass

21
wg_dashboard_backend/migrations/versions/002_remove_server_shared_key.py

@ -1,21 +0,0 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
server.c.shared_key.drop()
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
account = Table('server', meta, autoload=True)
shared_key = Column('shared_key', Text)
shared_key.create(account)
except:
pass

21
wg_dashboard_backend/migrations/versions/003_create_client_shared_key.py

@ -1,21 +0,0 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
peer = Table('peer', meta, autoload=True)
shared_key = Column('shared_key', Text)
shared_key.create(peer)
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
dns = Table('peer', meta, autoload=True)
dns.c.shared_key.drop()
except:
pass

21
wg_dashboard_backend/migrations/versions/004_create_server_subnet.py

@ -1,21 +0,0 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
subnet = Column('subnet', Integer, nullable=False)
subnet.create(server)
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
server.c.subnet.drop()
except:
pass

32
wg_dashboard_backend/migrations/versions/005_create_v6_address.py

@ -1,32 +0,0 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
v6_address_server = Column('v6_address', VARCHAR, unique=True, nullable=True)
v6_address_server.create(server)
meta = MetaData(bind=migrate_engine)
peer = Table('peer', meta, autoload=True)
v6_address_peer = Column('v6_address', VARCHAR, nullable=True)
v6_address_peer.create(peer)
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
server.c.v6_address.drop()
meta = MetaData(bind=migrate_engine)
peer = Table('peer', meta, autoload=True)
peer.c.v6_address.drop()
except:
pass

21
wg_dashboard_backend/migrations/versions/006_create_v6_subnet.py

@ -1,21 +0,0 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
peer = Table('server', meta, autoload=True)
v6_subnet = Column('v6_subnet', INTEGER)
v6_subnet.create(peer)
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
peer = Table('server', meta, autoload=True)
peer.c.v6_subnet.drop()
except:
pass

34
wg_dashboard_backend/migrations/versions/007_create_read_only_client.py

@ -1,34 +0,0 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
read_only = Column('read_only', INTEGER, default=0)
read_only.create(server)
except:
pass
try:
meta = MetaData(bind=migrate_engine)
peer = Table('peer', meta, autoload=True)
read_only = Column('read_only', INTEGER, default=0)
read_only.create(peer)
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
server.c.read_only.drop()
except:
pass
try:
meta = MetaData(bind=migrate_engine)
server = Table('peer', meta, autoload=True)
server.c.read_only.drop()
except:
pass

21
wg_dashboard_backend/migrations/versions/008_create_allowed_ips.py

@ -1,21 +0,0 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
allowed_ips = Column('allowed_ips', Text)
allowed_ips.create(server)
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
server.c.allowed_ips.drop()
except:
pass

21
wg_dashboard_backend/migrations/versions/009_create_keep_alive.py

@ -1,21 +0,0 @@
from sqlalchemy import *
from migrate import *
def upgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
keep_alive = Column('keep_alive', Integer)
keep_alive.create(server)
except:
pass
def downgrade(migrate_engine):
try:
meta = MetaData(bind=migrate_engine)
server = Table('server', meta, autoload=True)
server.c.keep_alive.drop()
except:
pass

117
wg_dashboard_backend/migrations/versions/4ac3e58519eb_base.py

@ -0,0 +1,117 @@
"""base
Revision ID: 4ac3e58519eb
Revises:
Create Date: 2021-03-13 20:29:10.062757
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
from sqlalchemy.exc import OperationalError
revision = '4ac3e58519eb'
down_revision = None
branch_labels = None
depends_on = None
def ignore_duplicate(fn):
try:
fn()
except OperationalError as e:
if "duplicate" in str(e):
pass
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
try:
op.create_table('api_keys',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('key', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('created_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='fk_user_api_key_user_id', onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('key')
)
except OperationalError:
pass
try:
op.drop_table('migrate_version')
except OperationalError:
pass
naming_convention = {
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
}
with op.batch_alter_table("peer", naming_convention=naming_convention) as batch_op:
batch_op.drop_constraint("fk_peer_server_id_server", type_="foreignkey")
with op.batch_alter_table('peer', schema=None) as batch_op:
batch_op.create_foreign_key('fk_peer_server_id_server', 'server', ['server_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
ignore_duplicate(lambda: op.add_column('peer', sa.Column('configuration', sa.Text(), nullable=True)))
ignore_duplicate(lambda: op.add_column('peer', sa.Column('keep_alive', sa.Integer(), nullable=True)))
ignore_duplicate(lambda: op.add_column('peer', sa.Column('read_only', sa.Integer(), nullable=True)))
ignore_duplicate(lambda: op.add_column('peer', sa.Column('server_id', sa.Integer(), nullable=True)))
ignore_duplicate(lambda: op.add_column('peer', sa.Column('shared_key', sa.Text(), nullable=True)))
ignore_duplicate(lambda: op.add_column('peer', sa.Column('v6_address', sa.String(), nullable=True)))
#op.drop_constraint(None, 'peer', type_='foreignkey')
#
#op.drop_column('peer', 'server')
try:
with op.batch_alter_table('peer', schema=None) as batch_op:
batch_op.drop_column("server")
except KeyError:
pass
ignore_duplicate(lambda: op.add_column('server', sa.Column('allowed_ips', sa.String(), nullable=True)))
ignore_duplicate(lambda: op.add_column('server', sa.Column('configuration', sa.Text(), nullable=True)))
ignore_duplicate(lambda: op.add_column('server', sa.Column('dns', sa.String(), nullable=True)))
ignore_duplicate(lambda: op.add_column('server', sa.Column('keep_alive', sa.Integer(), nullable=True)))
ignore_duplicate(lambda: op.add_column('server', sa.Column('read_only', sa.Integer(), nullable=True)))
ignore_duplicate(lambda: op.add_column('server', sa.Column('subnet', sa.Integer(), nullable=False)))
ignore_duplicate(lambda: op.add_column('server', sa.Column('v6_address', sa.String(), nullable=True)))
ignore_duplicate(lambda: op.add_column('server', sa.Column('v6_subnet', sa.Integer(), nullable=False)))
#op.create_unique_constraint(None, 'server', ['v6_address'])
try:
with op.batch_alter_table('server', schema=None) as batch_op:
batch_op.drop_column("shared_key")
except KeyError:
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('server', sa.Column('shared_key', sa.VARCHAR(), nullable=True))
op.drop_constraint(None, 'server', type_='unique')
op.drop_column('server', 'v6_subnet')
op.drop_column('server', 'v6_address')
op.drop_column('server', 'subnet')
op.drop_column('server', 'read_only')
op.drop_column('server', 'keep_alive')
op.drop_column('server', 'dns')
op.drop_column('server', 'configuration')
op.drop_column('server', 'allowed_ips')
op.add_column('peer', sa.Column('server', sa.INTEGER(), nullable=True))
op.drop_constraint('fk_wg_peer_server_id', 'peer', type_='foreignkey')
op.create_foreign_key(None, 'peer', 'server', ['server'], ['interface'])
op.drop_column('peer', 'v6_address')
op.drop_column('peer', 'shared_key')
op.drop_column('peer', 'server_id')
op.drop_column('peer', 'read_only')
op.drop_column('peer', 'keep_alive')
op.drop_column('peer', 'configuration')
op.drop_table('api_keys')
# ### end Alembic commands ###

0
wg_dashboard_backend/migrations/versions/__init__.py

2
wg_dashboard_backend/requirements.txt

@ -16,3 +16,5 @@ uvicorn
uvloop
httptools
qrcode[pil]
alembic
loguru

3
wg_dashboard_backend/routers/v1/peer.py

@ -4,8 +4,7 @@ from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from starlette.responses import PlainTextResponse
import const
import models
from database import models
import schemas
import middleware
import db.wireguard

6
wg_dashboard_backend/routers/v1/server.py

@ -1,12 +1,8 @@
import tempfile
from os.path import exists
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from starlette.responses import JSONResponse
import const
import models
from database import models
import schemas
import middleware
import db.wireguard

4
wg_dashboard_backend/routers/v1/user.py

@ -2,7 +2,7 @@ import os
from datetime import timedelta
from fastapi import APIRouter, HTTPException, Depends, Form, Body
from fastapi.responses import PlainTextResponse, JSONResponse
from fastapi.responses import JSONResponse
import typing
from sqlalchemy.orm import Session
from starlette import status
@ -10,7 +10,7 @@ from binascii import hexlify
import const
import db.user
import middleware
import models
from database import models
import schemas
router = APIRouter()

2
wg_dashboard_backend/schemas.py

@ -5,7 +5,7 @@ from pydantic import BaseModel, typing
from sqlalchemy.orm import Session, Query
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
import logging
import models
from database import models
_LOGGER = logging.getLogger(__name__)

7
wg_dashboard_backend/script/wireguard.py

@ -9,13 +9,13 @@ import configparser
from sqlalchemy.orm import Session
import const
import models
import schemas
import os
import re
import ipaddress
import util
from database import SessionLocal
from database import models
from database.database import SessionLocal
_LOGGER = logging.getLogger(__name__)
@ -390,7 +390,6 @@ def load_environment_clients(sess: Session):
i += 1
if __name__ == "__main__":
os.environ["CLIENT_1_NAME"] = "client-1"
os.environ["CLIENT_1_SERVER_INTERFACE"] = "wg0"
@ -403,3 +402,5 @@ if __name__ == "__main__":
sess: Session = SessionLocal()
load_environment_clients(sess)
sess.close()

34
wg_dashboard_backend/script/wireguard_startup.py

@ -0,0 +1,34 @@
import os
import typing
from sqlalchemy.orm import Session
import const
from database import models
from database.database import SessionLocal
from db.api_key import add_initial_api_key_for_admin
from db.wireguard import server_add_on_init
from script.wireguard import is_installed, start_interface, is_running, load_environment_clients
def setup_on_start():
_db: Session = SessionLocal()
servers: typing.List[models.WGServer] = _db.query(models.WGServer).all()
for s in servers:
try:
last_state = s.is_running
if is_installed() and last_state and is_running(s):
start_interface(s)
except Exception as e:
print(e)
if const.CLIENT:
load_environment_clients(_db)
if const.SERVER_INIT_INTERFACE is not None:
server_add_on_init(_db)
if const.SERVER_STARTUP_API_KEY is not None:
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME")
add_initial_api_key_for_admin(_db, const.SERVER_STARTUP_API_KEY, ADMIN_USERNAME)
_db.close()
Loading…
Cancel
Save