Per-Arne Andersen
4 years ago
215 changed files with 1048 additions and 22038 deletions
@ -1,25 +1,46 @@ |
|||
name: build wg-dashboard |
|||
name: build and publish |
|||
|
|||
on: |
|||
push: |
|||
branches: |
|||
- master |
|||
- '**' |
|||
|
|||
jobs: |
|||
build: |
|||
docker: |
|||
runs-on: ubuntu-latest |
|||
steps: |
|||
- name: checkout code |
|||
|
|||
- |
|||
name: Checkout |
|||
uses: actions/checkout@v2 |
|||
- name: install buildx |
|||
id: buildx |
|||
uses: crazy-max/ghaction-docker-buildx@v1 |
|||
- |
|||
name: Set up QEMU |
|||
uses: docker/setup-qemu-action@v1 |
|||
- |
|||
name: Set up Docker Buildx |
|||
uses: docker/setup-buildx-action@v1 |
|||
- |
|||
name: Login to DockerHub |
|||
uses: docker/login-action@v1 |
|||
with: |
|||
username: ${{ secrets.DOCKER_USERNAME }} |
|||
password: ${{ secrets.DOCKER_PASSWORD }} |
|||
|
|||
- |
|||
name: Build and push main |
|||
uses: docker/build-push-action@v2 |
|||
with: |
|||
context: . |
|||
platforms: linux/amd64,linux/arm64 |
|||
push: true |
|||
tags: perara/wg-manager:latest |
|||
if: github.ref == 'refs/heads/main' |
|||
- |
|||
name: Build and push dev |
|||
uses: docker/build-push-action@v2 |
|||
with: |
|||
version: latest |
|||
- name: login to docker hub |
|||
run: echo "${{ secrets.DOCKER_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_USERNAME }}" --password-stdin |
|||
- name: build the image |
|||
run: | |
|||
docker buildx build --push \ |
|||
--tag perara/wg-manager:latest \ |
|||
--platform linux/amd64 . |
|||
context: . |
|||
platforms: linux/amd64,linux/arm64,linux/arm/v7 |
|||
push: true |
|||
tags: perara/wg-manager:dev |
|||
if: github.ref == 'refs/heads/dev' |
|||
|
@ -0,0 +1,89 @@ |
|||
# A generic, single database configuration. |
|||
|
|||
[alembic] |
|||
# path to migration scripts |
|||
script_location = migrations |
|||
|
|||
# template used to generate migration files |
|||
# file_template = %%(rev)s_%%(slug)s |
|||
|
|||
# sys.path path, will be prepended to sys.path if present. |
|||
# defaults to the current working directory. |
|||
prepend_sys_path = . |
|||
|
|||
# timezone to use when rendering the date |
|||
# within the migration file as well as the filename. |
|||
# string value is passed to dateutil.tz.gettz() |
|||
# leave blank for localtime |
|||
# timezone = |
|||
|
|||
# max length of characters to apply to the |
|||
# "slug" field |
|||
# truncate_slug_length = 40 |
|||
|
|||
# set to 'true' to run the environment during |
|||
# the 'revision' command, regardless of autogenerate |
|||
# revision_environment = false |
|||
|
|||
# set to 'true' to allow .pyc and .pyo files without |
|||
# a source .py file to be detected as revisions in the |
|||
# versions/ directory |
|||
# sourceless = false |
|||
|
|||
# version location specification; this defaults |
|||
# to alembic/versions. When using multiple version |
|||
# directories, initial revisions must be specified with --version-path |
|||
# version_locations = %(here)s/bar %(here)s/bat alembic/versions |
|||
|
|||
# the output encoding used when revision files |
|||
# are written from script.py.mako |
|||
# output_encoding = utf-8 |
|||
|
|||
sqlalchemy.url = sqlite:///database.db |
|||
|
|||
|
|||
[post_write_hooks] |
|||
# post_write_hooks defines scripts or Python functions that are run |
|||
# on newly generated revision scripts. See the documentation for further |
|||
# detail and examples |
|||
|
|||
# format using "black" - use the console_scripts runner, against the "black" entrypoint |
|||
# hooks=black |
|||
# black.type=console_scripts |
|||
# black.entrypoint=black |
|||
# black.options=-l 79 |
|||
|
|||
# Logging configuration |
|||
[loggers] |
|||
keys = root,sqlalchemy,alembic |
|||
|
|||
[handlers] |
|||
keys = console |
|||
|
|||
[formatters] |
|||
keys = generic |
|||
|
|||
[logger_root] |
|||
level = WARN |
|||
handlers = console |
|||
qualname = |
|||
|
|||
[logger_sqlalchemy] |
|||
level = WARN |
|||
handlers = |
|||
qualname = sqlalchemy.engine |
|||
|
|||
[logger_alembic] |
|||
level = INFO |
|||
handlers = |
|||
qualname = alembic |
|||
|
|||
[handler_console] |
|||
class = StreamHandler |
|||
args = (sys.stderr,) |
|||
level = NOTSET |
|||
formatter = generic |
|||
|
|||
[formatter_generic] |
|||
format = %(levelname)-5.5s [%(name)s] %(message)s |
|||
datefmt = %H:%M:%S |
@ -0,0 +1,21 @@ |
|||
import sqlalchemy |
|||
from sqlalchemy import MetaData |
|||
from sqlalchemy.ext.declarative import declarative_base |
|||
from sqlalchemy.orm import sessionmaker |
|||
import const |
|||
|
|||
engine = sqlalchemy.create_engine( |
|||
const.DATABASE_URL, connect_args={"check_same_thread": False} |
|||
) |
|||
|
|||
|
|||
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) |
|||
|
|||
meta = MetaData(naming_convention={ |
|||
"ix": "ix_%(column_0_label)s", |
|||
"uq": "uq_%(table_name)s_%(column_0_name)s", |
|||
"ck": "ck_%(table_name)s_%(column_0_name)s", |
|||
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", |
|||
"pk": "pk_%(table_name)s" |
|||
}) |
|||
Base = declarative_base(metadata=meta) |
@ -0,0 +1,62 @@ |
|||
import contextlib |
|||
import os |
|||
|
|||
import alembic.command |
|||
from alembic.config import Config |
|||
from sqlalchemy.orm import Session |
|||
from sqlalchemy_utils import database_exists |
|||
|
|||
import middleware |
|||
from database.database import engine, Base, SessionLocal |
|||
from database import models |
|||
from loguru import logger |
|||
|
|||
|
|||
def perform_migrations(): |
|||
logger.info("Performing migrations...") |
|||
alembic_cfg = Config("alembic.ini") |
|||
alembic_cfg.attributes['configure_logger'] = False |
|||
|
|||
alembic_cfg.set_main_option('script_location', "migrations") |
|||
alembic_cfg.set_main_option('sqlalchemy.url', str(engine.url)) |
|||
|
|||
alembic.command.upgrade(alembic_cfg, 'head') |
|||
logger.info("Migrations done!") |
|||
|
|||
|
|||
def setup_initial_database(): |
|||
if not database_exists(engine.url): |
|||
logger.info("Database does not exists. Creating initial database...") |
|||
# Create database from metadata |
|||
Base.metadata.create_all(engine) |
|||
logger.info("Database creation done!") |
|||
|
|||
# Create default user |
|||
_db: Session = SessionLocal() |
|||
|
|||
admin_exists = ( |
|||
_db.query(models.User.id) |
|||
.filter_by(role="admin") |
|||
.first() |
|||
) is not None |
|||
|
|||
if not admin_exists: |
|||
logger.info("Admin user does not exists. Creating with env variables ADMIN_USERNAME, ADMIN_PASSWORD") |
|||
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME") |
|||
ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD") |
|||
|
|||
if not ADMIN_USERNAME: |
|||
raise RuntimeError("Database does not exist and the environment variable ADMIN_USERNAME is set") |
|||
if not ADMIN_PASSWORD: |
|||
raise RuntimeError("Database does not exist and the environment variable ADMIN_PASSWORD is set") |
|||
|
|||
_db.merge(models.User( |
|||
username=ADMIN_USERNAME, |
|||
password=middleware.get_password_hash(ADMIN_PASSWORD), |
|||
full_name="Admin", |
|||
role="admin", |
|||
email="" |
|||
)) |
|||
|
|||
_db.commit() |
|||
_db.close() |
@ -1,6 +1,6 @@ |
|||
from sqlalchemy.orm import Session |
|||
|
|||
import models |
|||
from database import models |
|||
|
|||
|
|||
def add_initial_api_key_for_admin(sess: Session, api_key, ADMIN_USERNAME): |
@ -1,8 +1,7 @@ |
|||
from typing import Optional |
|||
|
|||
from sqlalchemy.orm import Session |
|||
import models |
|||
from passlib.context import CryptContext |
|||
from database import models |
|||
|
|||
import schemas |
|||
|
@ -0,0 +1,21 @@ |
|||
|
|||
def setup_logging(): |
|||
import logging |
|||
from loguru import logger |
|||
class InterceptHandler(logging.Handler): |
|||
def emit(self, record): |
|||
# Get corresponding Loguru level if it exists |
|||
try: |
|||
level = logger.level(record.levelname).name |
|||
except ValueError: |
|||
level = record.levelno |
|||
|
|||
# Find caller from where originated the logged message |
|||
frame, depth = logging.currentframe(), 2 |
|||
while frame.f_code.co_filename == logging.__file__: |
|||
frame = frame.f_back |
|||
depth += 1 |
|||
|
|||
logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage()) |
|||
|
|||
logging.basicConfig(handlers=[InterceptHandler()], level=1) |
@ -0,0 +1,11 @@ |
|||
{ |
|||
"logger": { |
|||
"path": "./logs", |
|||
"filename": "access.log", |
|||
"level": "info", |
|||
"rotation": "20 days", |
|||
"retention": "1 months", |
|||
"format": "<level>{level: <8}</level> <green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> {extra[request_id]} - <cyan>{name}</cyan>:<cyan>{function}</cyan> - <level>{message}</level>" |
|||
|
|||
} |
|||
} |
@ -0,0 +1,102 @@ |
|||
from logger import setup_logging |
|||
setup_logging() |
|||
|
|||
import const |
|||
from uvicorn_loguru_integration import run_uvicorn_loguru |
|||
|
|||
import time |
|||
from starlette.middleware.base import BaseHTTPMiddleware |
|||
|
|||
import middleware |
|||
|
|||
from routers.v1 import user, server, peer, wg |
|||
import script.wireguard_startup |
|||
import pkg_resources |
|||
import uvicorn as uvicorn |
|||
from fastapi.staticfiles import StaticFiles |
|||
|
|||
from starlette.responses import FileResponse |
|||
from fastapi import Depends, FastAPI |
|||
import database.util |
|||
|
|||
app = FastAPI() |
|||
app.add_middleware(BaseHTTPMiddleware, dispatch=middleware.db_session_middleware) |
|||
app.add_middleware(BaseHTTPMiddleware, dispatch=middleware.logging_middleware) |
|||
|
|||
app.include_router( |
|||
user.router, |
|||
prefix="/api/v1", |
|||
tags=["user"], |
|||
dependencies=[], |
|||
responses={404: {"description": "Not found"}} |
|||
) |
|||
|
|||
|
|||
app.include_router( |
|||
server.router, |
|||
prefix="/api/v1/server", |
|||
tags=["server"], |
|||
dependencies=[Depends(middleware.auth)], |
|||
responses={404: {"description": "Not found"}} |
|||
) |
|||
|
|||
|
|||
app.include_router( |
|||
peer.router, |
|||
prefix="/api/v1/peer", |
|||
tags=["peer"], |
|||
dependencies=[Depends(middleware.auth)], |
|||
responses={404: {"description": "Not found"}} |
|||
) |
|||
|
|||
|
|||
app.include_router( |
|||
wg.router, |
|||
prefix="/api/v1/wg", |
|||
tags=["wg"], |
|||
dependencies=[Depends(middleware.auth)], |
|||
responses={404: {"description": "Not found"}} |
|||
) |
|||
|
|||
|
|||
@app.get("/", include_in_schema=True) |
|||
def root(): |
|||
return FileResponse('build/index.html') |
|||
|
|||
|
|||
app.mount("/", StaticFiles(directory=pkg_resources.resource_filename(__name__, 'build')), name="static") |
|||
|
|||
|
|||
@app.on_event("startup") |
|||
async def startup(): |
|||
pass |
|||
|
|||
|
|||
@app.on_event("shutdown") |
|||
async def shutdown(): |
|||
pass |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
# Sleep the wait timer. |
|||
time.sleep(const.INIT_SLEEP) |
|||
|
|||
# Ensure database existence |
|||
database.util.setup_initial_database() |
|||
|
|||
# Perform Migrations |
|||
database.util.perform_migrations() |
|||
|
|||
# Configure wireguard |
|||
script.wireguard_startup.setup_on_start() |
|||
|
|||
run_uvicorn_loguru( |
|||
uvicorn.Config( |
|||
"__main__:app", |
|||
host="0.0.0.0", |
|||
port=8000, |
|||
log_level="warning", |
|||
reload=True, |
|||
workers=1 |
|||
) |
|||
) |
@ -0,0 +1 @@ |
|||
Generic single-database configuration. |
@ -0,0 +1,84 @@ |
|||
from logging.config import fileConfig |
|||
|
|||
from sqlalchemy import engine_from_config |
|||
from sqlalchemy import pool |
|||
|
|||
from alembic import context |
|||
|
|||
# this is the Alembic Config object, which provides |
|||
# access to the values within the .ini file in use. |
|||
import database.models |
|||
import database.database |
|||
|
|||
config = context.config |
|||
|
|||
# Interpret the config file for Python logging. |
|||
# This line sets up loggers basically. |
|||
if config.attributes.get('configure_logger', True): |
|||
fileConfig(config.config_file_name) |
|||
|
|||
# add your model's MetaData object here |
|||
# for 'autogenerate' support |
|||
# from myapp import mymodel |
|||
# target_metadata = mymodel.Base.metadata |
|||
target_metadata = database.database.Base.metadata |
|||
|
|||
# other values from the config, defined by the needs of env.py, |
|||
# can be acquired: |
|||
# my_important_option = config.get_main_option("my_important_option") |
|||
# ... etc. |
|||
|
|||
|
|||
def run_migrations_offline(): |
|||
"""Run migrations in 'offline' mode. |
|||
|
|||
This configures the context with just a URL |
|||
and not an Engine, though an Engine is acceptable |
|||
here as well. By skipping the Engine creation |
|||
we don't even need a DBAPI to be available. |
|||
|
|||
Calls to context.execute() here emit the given string to the |
|||
script output. |
|||
|
|||
""" |
|||
url = config.get_main_option("sqlalchemy.url") |
|||
context.configure( |
|||
url=url, |
|||
target_metadata=target_metadata, |
|||
literal_binds=True, |
|||
dialect_opts={"paramstyle": "named"}, |
|||
render_as_batch=False |
|||
) |
|||
|
|||
with context.begin_transaction(): |
|||
context.run_migrations() |
|||
|
|||
|
|||
def run_migrations_online(): |
|||
"""Run migrations in 'online' mode. |
|||
|
|||
In this scenario we need to create an Engine |
|||
and associate a connection with the context. |
|||
|
|||
""" |
|||
connectable = engine_from_config( |
|||
config.get_section(config.config_ini_section), |
|||
prefix="sqlalchemy.", |
|||
poolclass=pool.NullPool, |
|||
) |
|||
|
|||
with connectable.connect() as connection: |
|||
context.configure( |
|||
connection=connection, |
|||
target_metadata=target_metadata, |
|||
render_as_batch=False |
|||
) |
|||
|
|||
with context.begin_transaction(): |
|||
context.run_migrations() |
|||
|
|||
|
|||
if context.is_offline_mode(): |
|||
run_migrations_offline() |
|||
else: |
|||
run_migrations_online() |
@ -0,0 +1,24 @@ |
|||
"""${message} |
|||
|
|||
Revision ID: ${up_revision} |
|||
Revises: ${down_revision | comma,n} |
|||
Create Date: ${create_date} |
|||
|
|||
""" |
|||
from alembic import op |
|||
import sqlalchemy as sa |
|||
${imports if imports else ""} |
|||
|
|||
# revision identifiers, used by Alembic. |
|||
revision = ${repr(up_revision)} |
|||
down_revision = ${repr(down_revision)} |
|||
branch_labels = ${repr(branch_labels)} |
|||
depends_on = ${repr(depends_on)} |
|||
|
|||
|
|||
def upgrade(): |
|||
${upgrades if upgrades else "pass"} |
|||
|
|||
|
|||
def downgrade(): |
|||
${downgrades if downgrades else "pass"} |
@ -0,0 +1,117 @@ |
|||
"""base |
|||
|
|||
Revision ID: 4ac3e58519eb |
|||
Revises: |
|||
Create Date: 2021-03-13 20:29:10.062757 |
|||
|
|||
""" |
|||
from alembic import op |
|||
import sqlalchemy as sa |
|||
|
|||
|
|||
# revision identifiers, used by Alembic. |
|||
from sqlalchemy.exc import OperationalError |
|||
|
|||
revision = '4ac3e58519eb' |
|||
down_revision = None |
|||
branch_labels = None |
|||
depends_on = None |
|||
|
|||
def ignore_duplicate(fn): |
|||
try: |
|||
fn() |
|||
except OperationalError as e: |
|||
if "duplicate" in str(e): |
|||
pass |
|||
|
|||
def upgrade(): |
|||
# ### commands auto generated by Alembic - please adjust! ### |
|||
try: |
|||
op.create_table('api_keys', |
|||
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), |
|||
sa.Column('key', sa.String(), nullable=True), |
|||
sa.Column('user_id', sa.Integer(), nullable=True), |
|||
sa.Column('created_date', sa.DateTime(), nullable=True), |
|||
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='fk_user_api_key_user_id', onupdate='CASCADE', ondelete='CASCADE'), |
|||
sa.PrimaryKeyConstraint('id'), |
|||
sa.UniqueConstraint('key') |
|||
) |
|||
except OperationalError: |
|||
pass |
|||
|
|||
try: |
|||
op.drop_table('migrate_version') |
|||
except OperationalError: |
|||
pass |
|||
|
|||
naming_convention = { |
|||
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", |
|||
} |
|||
with op.batch_alter_table("peer", naming_convention=naming_convention) as batch_op: |
|||
batch_op.drop_constraint("fk_peer_server_id_server", type_="foreignkey") |
|||
|
|||
with op.batch_alter_table('peer', schema=None) as batch_op: |
|||
batch_op.create_foreign_key('fk_peer_server_id_server', 'server', ['server_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE') |
|||
|
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('configuration', sa.Text(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('keep_alive', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('read_only', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('server_id', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('shared_key', sa.Text(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('peer', sa.Column('v6_address', sa.String(), nullable=True))) |
|||
|
|||
|
|||
#op.drop_constraint(None, 'peer', type_='foreignkey') |
|||
# |
|||
#op.drop_column('peer', 'server') |
|||
|
|||
|
|||
try: |
|||
with op.batch_alter_table('peer', schema=None) as batch_op: |
|||
batch_op.drop_column("server") |
|||
except KeyError: |
|||
pass |
|||
|
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('allowed_ips', sa.String(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('configuration', sa.Text(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('dns', sa.String(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('keep_alive', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('read_only', sa.Integer(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('subnet', sa.Integer(), nullable=False))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('v6_address', sa.String(), nullable=True))) |
|||
ignore_duplicate(lambda: op.add_column('server', sa.Column('v6_subnet', sa.Integer(), nullable=False))) |
|||
#op.create_unique_constraint(None, 'server', ['v6_address']) |
|||
|
|||
|
|||
try: |
|||
with op.batch_alter_table('server', schema=None) as batch_op: |
|||
batch_op.drop_column("shared_key") |
|||
except KeyError: |
|||
pass |
|||
|
|||
# ### end Alembic commands ### |
|||
|
|||
|
|||
def downgrade(): |
|||
# ### commands auto generated by Alembic - please adjust! ### |
|||
op.add_column('server', sa.Column('shared_key', sa.VARCHAR(), nullable=True)) |
|||
op.drop_constraint(None, 'server', type_='unique') |
|||
op.drop_column('server', 'v6_subnet') |
|||
op.drop_column('server', 'v6_address') |
|||
op.drop_column('server', 'subnet') |
|||
op.drop_column('server', 'read_only') |
|||
op.drop_column('server', 'keep_alive') |
|||
op.drop_column('server', 'dns') |
|||
op.drop_column('server', 'configuration') |
|||
op.drop_column('server', 'allowed_ips') |
|||
op.add_column('peer', sa.Column('server', sa.INTEGER(), nullable=True)) |
|||
op.drop_constraint('fk_wg_peer_server_id', 'peer', type_='foreignkey') |
|||
op.create_foreign_key(None, 'peer', 'server', ['server'], ['interface']) |
|||
op.drop_column('peer', 'v6_address') |
|||
op.drop_column('peer', 'shared_key') |
|||
op.drop_column('peer', 'server_id') |
|||
op.drop_column('peer', 'read_only') |
|||
op.drop_column('peer', 'keep_alive') |
|||
op.drop_column('peer', 'configuration') |
|||
op.drop_table('api_keys') |
|||
# ### end Alembic commands ### |
@ -1,12 +1,8 @@ |
|||
import tempfile |
|||
from os.path import exists |
|||
|
|||
from fastapi import APIRouter, Depends, HTTPException |
|||
from sqlalchemy.orm import Session |
|||
from starlette.responses import JSONResponse |
|||
|
|||
import const |
|||
import models |
|||
from database import models |
|||
import schemas |
|||
import middleware |
|||
import db.wireguard |
@ -0,0 +1,34 @@ |
|||
import os |
|||
import typing |
|||
|
|||
from sqlalchemy.orm import Session |
|||
|
|||
import const |
|||
from database import models |
|||
from database.database import SessionLocal |
|||
from db.api_key import add_initial_api_key_for_admin |
|||
from db.wireguard import server_add_on_init |
|||
from script.wireguard import is_installed, start_interface, is_running, load_environment_clients |
|||
|
|||
|
|||
def setup_on_start(): |
|||
_db: Session = SessionLocal() |
|||
servers: typing.List[models.WGServer] = _db.query(models.WGServer).all() |
|||
for s in servers: |
|||
try: |
|||
last_state = s.is_running |
|||
if is_installed() and last_state and is_running(s): |
|||
start_interface(s) |
|||
except Exception as e: |
|||
print(e) |
|||
|
|||
if const.CLIENT: |
|||
load_environment_clients(_db) |
|||
|
|||
if const.SERVER_INIT_INTERFACE is not None: |
|||
server_add_on_init(_db) |
|||
|
|||
if const.SERVER_STARTUP_API_KEY is not None: |
|||
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME") |
|||
add_initial_api_key_for_admin(_db, const.SERVER_STARTUP_API_KEY, ADMIN_USERNAME) |
|||
_db.close() |
@ -0,0 +1,14 @@ |
|||
from loguru import logger |
|||
from fastapi import HTTPException |
|||
import os |
|||
from jinja2 import Environment, FileSystemLoader |
|||
|
|||
templates_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates') |
|||
jinja_env = Environment(loader=FileSystemLoader(templates_path)) |
|||
|
|||
|
|||
class WGMHTTPException(HTTPException): |
|||
|
|||
def __init__(self, status_code: int, detail: str = None): |
|||
HTTPException.__init__(self, status_code, detail) |
|||
logger.opt(depth=1).error(detail) |
@ -0,0 +1,51 @@ |
|||
{ |
|||
"root": true, |
|||
"ignorePatterns": [ |
|||
"projects/**/*" |
|||
], |
|||
"overrides": [ |
|||
{ |
|||
"files": [ |
|||
"*.ts" |
|||
], |
|||
"parserOptions": { |
|||
"project": [ |
|||
"tsconfig.json", |
|||
"e2e/tsconfig.json" |
|||
], |
|||
"createDefaultProgram": true |
|||
}, |
|||
"extends": [ |
|||
"plugin:@angular-eslint/recommended", |
|||
"plugin:@angular-eslint/template/process-inline-templates" |
|||
], |
|||
"rules": { |
|||
"@angular-eslint/component-selector": [ |
|||
"error", |
|||
{ |
|||
"prefix": "app", |
|||
"style": "kebab-case", |
|||
"type": "element" |
|||
} |
|||
], |
|||
"@angular-eslint/directive-selector": [ |
|||
"error", |
|||
{ |
|||
"prefix": "app", |
|||
"style": "camelCase", |
|||
"type": "attribute" |
|||
} |
|||
] |
|||
} |
|||
}, |
|||
{ |
|||
"files": [ |
|||
"*.html" |
|||
], |
|||
"extends": [ |
|||
"plugin:@angular-eslint/template/recommended" |
|||
], |
|||
"rules": {} |
|||
} |
|||
] |
|||
} |
@ -0,0 +1,206 @@ |
|||
<mat-expansion-panel> |
|||
<mat-expansion-panel-header> |
|||
|
|||
<mat-panel-title> |
|||
<b>New WireGuard Server</b> |
|||
</mat-panel-title> |
|||
|
|||
|
|||
<mat-panel-description> |
|||
Expand this to open configuration of a new wireguard server |
|||
</mat-panel-description> |
|||
</mat-expansion-panel-header> |
|||
|
|||
|
|||
<form [formGroup]="serverForm" class="add-server-form"> |
|||
|
|||
<p><b>Network Configuration</b></p> |
|||
|
|||
<table class="add-server-full-width" cellspacing="0"><tr> |
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Interface</mat-label> |
|||
<input formControlName="interface" matInput [placeholder]="defaultInterface"> |
|||
</mat-form-field> |
|||
</td> |
|||
|
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Endpoint</mat-label> |
|||
<input formControlName="endpoint" matInput [placeholder]="defaultEndpoint"> |
|||
</mat-form-field> |
|||
</td> |
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Port</mat-label> |
|||
<input formControlName="listen_port" matInput [placeholder]="defaultListenPort"> |
|||
</mat-form-field> |
|||
</td> |
|||
</tr> |
|||
<tr> |
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Default DNS</mat-label> |
|||
<input formControlName="dns" matInput [placeholder]="defaultIPv4Address"> |
|||
</mat-form-field> |
|||
</td> |
|||
|
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Default allowed IPs</mat-label> |
|||
<input formControlName="allowed_ips" matInput [placeholder]="defaultAllowedIPs"> |
|||
</mat-form-field> |
|||
</td> |
|||
|
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Default PersistentKeepalive interval</mat-label> |
|||
<input formControlName="keep_alive" matInput [placeholder]="defaultPersistentKeepalive"> |
|||
</mat-form-field> |
|||
</td> |
|||
</tr> |
|||
|
|||
|
|||
|
|||
</table> |
|||
|
|||
<p><b>IPv4 Configuration</b></p> |
|||
<table class="add-server-full-width" cellspacing="0"><tr> |
|||
|
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>IPv4 Address</mat-label> |
|||
<input formControlName="address" matInput [placeholder]="defaultIPv4Address"> |
|||
</mat-form-field> |
|||
</td> |
|||
|
|||
<td> |
|||
<mat-form-field matLine class="add-server-full-width"> |
|||
<mat-label>Subnet</mat-label> |
|||
<select matNativeControl formControlName="subnet"> |
|||
<option *ngFor="let v4Subnet of v4Subnets" [value]="v4Subnet">/{{v4Subnet}}</option> |
|||
</select> |
|||
|
|||
</mat-form-field> |
|||
</td> |
|||
|
|||
|
|||
</tr></table> |
|||
|
|||
<p><b>IPv6 Configuration</b></p> |
|||
<table class="add-server-full-width" cellspacing="0"><tr> |
|||
<td> |
|||
<mat-checkbox [checked]="defaultHasIPV6Support" [value]="defaultHasIPV6Support" (change)="ipv6SupportChanged($event)">IPv6 Support</mat-checkbox> |
|||
</td> |
|||
|
|||
|
|||
</tr></table> |
|||
|
|||
<table class="add-server-full-width" cellspacing="0"><tr> |
|||
|
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>IPv6 Address</mat-label> |
|||
<input formControlName="v6_address" matInput [placeholder]="defaultIPv6Address"> |
|||
</mat-form-field> |
|||
</td> |
|||
|
|||
<td> |
|||
<mat-form-field matLine class="add-server-full-width"> |
|||
<mat-label>Subnet</mat-label> |
|||
<select matNativeControl formControlName="v6_subnet"> |
|||
<option *ngFor="let v6Subnet of v6Subnets" [value]="v6Subnet">/{{v6Subnet}}</option> |
|||
</select> |
|||
|
|||
</mat-form-field> |
|||
</td> |
|||
|
|||
</tr></table> |
|||
|
|||
|
|||
<p><b>Key-pairs</b></p> |
|||
|
|||
<div class="button-row"> |
|||
<button type="button" [disabled]="!isEdit" (click)="getKeyPair()" mat-raised-button color="primary"> |
|||
<i class="material-icons">vpn_key</i> |
|||
Generate KeyPair |
|||
</button> |
|||
</div> |
|||
|
|||
<table class="add-server-full-width" cellspacing="0"> |
|||
<tr> |
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Private-Key</mat-label> |
|||
<input formControlName="private_key" matInput> |
|||
</mat-form-field> |
|||
</td> |
|||
</tr> |
|||
<tr> |
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Public-Key</mat-label> |
|||
<input formControlName="public_key" matInput> |
|||
</mat-form-field> |
|||
</td> |
|||
</tr> |
|||
</table> |
|||
|
|||
<p><b>Scripts</b></p> |
|||
<table class="add-server-full-width" cellspacing="0"> |
|||
<tr> |
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Post-Up</mat-label> |
|||
<textarea formControlName="post_up" matInput rows="4"></textarea> |
|||
</mat-form-field> |
|||
</td> |
|||
</tr> |
|||
<tr> |
|||
<td> |
|||
<mat-form-field class="add-server-full-width"> |
|||
<mat-label>Post-Down</mat-label> |
|||
<textarea formControlName="post_down" matInput rows="4"></textarea> |
|||
</mat-form-field> |
|||
</td> |
|||
</tr> |
|||
</table> |
|||
|
|||
<div class="button-row"> |
|||
|
|||
<button mat-raised-button color="primary" |
|||
type="submit" |
|||
[disabled]="!serverForm.valid" |
|||
(click)="serverForm.valid && add(serverForm.value)" |
|||
(keydown.enter)="serverForm.valid && add(serverForm.value)" |
|||
> |
|||
<ng-container *ngIf="!isEdit">Add Server</ng-container> |
|||
<ng-container *ngIf="isEdit">Edit Server</ng-container> |
|||
</button> |
|||
|
|||
<button mat-raised-button color="warn" (click)="resetForm()"> |
|||
Reset |
|||
</button> |
|||
|
|||
<input #confInput hidden="true" type="file" multiple onclick="this.value=null" (change)="parseFiles($event)" accept=".conf"/> |
|||
<button |
|||
mat-raised-button |
|||
color="primary" |
|||
(click)="confInput.click()" |
|||
matTooltip="Import existing wireguard configuration. You can select both server and peer configuration. The number of imported peers are described near the submit button." |
|||
>Import Configuration</button> |
|||
|
|||
<div *ngIf="this.serverForm.value['peers'] && this.serverForm.value['peers'].length > 0"> |
|||
Importing <b>{{this.serverForm.value['peers'].length}}</b> peers. |
|||
</div> |
|||
|
|||
</div> |
|||
|
|||
</form> |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
</mat-expansion-panel> |
|||
|
@ -1,17 +1,19 @@ |
|||
<div |
|||
fxFlexFill |
|||
fxLayout="row" |
|||
fxLayout="column" |
|||
fxLayout.lt-lg="column" |
|||
style="padding: 10px;" |
|||
fxLayoutGap="20px"> |
|||
|
|||
style="padding: 10px;" fxLayoutGap="20px"> |
|||
|
|||
<div fxFlex="65"> |
|||
<app-server [(server)]="servers[idx]" [(servers)]="servers" *ngFor="let server of servers; let idx = index"></app-server> |
|||
<div fxFlex="100"> |
|||
<app-add-server [(servers)]="servers"></app-add-server> |
|||
</div> |
|||
|
|||
<div fxFlex="35"> |
|||
<app-add-server [(servers)]="servers"></app-add-server> |
|||
<div fxFlex="100"> |
|||
<app-server [(server)]="servers[idx]" [(servers)]="servers" *ngFor="let server of servers; let idx = index"></app-server> |
|||
</div> |
|||
|
|||
|
|||
|
|||
|
|||
</div> |
Some files were not shown because too many files changed in this diff
Loading…
Reference in new issue