Initial commit

This commit is contained in:
Aleksandr 2024-11-17 14:58:34 +03:00
commit f67dcd8f2a
31 changed files with 3297 additions and 0 deletions

8
.gitignore vendored Normal file
View file

@ -0,0 +1,8 @@
/target
# Added by cargo
#
# already existing elements were commented out
#/target

1486
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

27
Cargo.toml Normal file
View file

@ -0,0 +1,27 @@
[package]
name = "vnj"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "vnj"
path = "bin/main.rs"
[lib]
name = "vnj"
path = "src/lib.rs"
[dependencies]
axum = { version = "0.7.8", features = ["macros", "multipart"] }
clap = { version = "4.5.21", features = ["derive"] }
color-eyre = "0.6.3"
eyre = "0.6.12"
jiff = { version = "0.1.14", features = ["serde"] }
parking_lot = "0.12.3"
serde = { version = "1.0.215", features = ["derive"] }
tokio = { version = "1.41.1", features = ["rt", "rt-multi-thread", "net", "sync", "macros", "time", "parking_lot", "signal"] }
toml = "0.8.19"
tracing = "0.1.40"
tracing-subscriber = "0.3.18"
url = { version = "2.5.3", features = ["serde"] }
uuid = { version = "1.11.0", features = ["v4"] }

45
README.md Normal file
View file

@ -0,0 +1,45 @@
# vnj
VN journal and post scheduling.
This thing is made in a couple of hours and its throwaway, so the code is real shit.
# Use-cases
Main use-case is to keep what we post in the channel locally, so we could fill upcoming site with content faster, additionally helps with scheduling.
# Development & Deployment
Wanna contribute? There's `flake.nix` file to set up development environment:
```shell
$ nix develop
```
Wanna deploy yourself? I made the nixos module for that:
```nix
let
mkCfg = port: {
app = {
secret.path = "path/to/secret";
journal = "path/to/journal";
};
http = {
listen = "0.0.0.0:${port}"
};
};
in
{
services.vnj = {
enable = true;
instances.en = mkCfg 8081;
instances.ru = mkCfg 8082;
};
}
```
Enjoy.

79
bin/main.rs Normal file
View file

@ -0,0 +1,79 @@
use std::{
fs,
future::IntoFuture,
path::{Path, PathBuf},
};
use clap::Parser;
use eyre::Context;
use tokio::{net::TcpListener, runtime as rt};
use tracing_subscriber::FmtSubscriber;
use vnj::{app::App, cfg, db::Db, notifies, state::State};
#[derive(Debug, Parser)]
pub struct Args {
/// Path to the TOML config.
#[clap(long, short)]
pub config: PathBuf,
}
async fn run(cfg: cfg::Root) -> eyre::Result<()> {
let listener = TcpListener::bind(cfg.http.listen)
.await
.wrap_err("failed to bind HTTP port")?;
let db = Db::from_root(&cfg.app.journal)?;
let notifies = notifies::make();
let app = App::new(db, notifies.clone())?;
let state = State::new(app, cfg)?;
let daemon = tokio::spawn(vnj::daemon::run(state.clone(), notifies.clone()));
let router = vnj::http::make(state.clone()).with_state(state);
tracing::info!("listening on {}", listener.local_addr()?);
let server = tokio::spawn(axum::serve(listener, router).into_future());
tokio::try_join!(daemon, server)
.wrap_err("error while running")?
.0?;
Ok(())
}
fn read_config(path: &Path) -> eyre::Result<cfg::Root> {
let contents = fs::read_to_string(path).wrap_err("failed to read")?;
let cfg: cfg::Root = toml::from_str(&contents).wrap_err("failed to parse TOML")?;
Ok(cfg)
}
fn setup_logger(level: cfg::LogLevel) -> eyre::Result<()> {
let level: Option<tracing::Level> = level.into();
let subscriber = FmtSubscriber::builder();
let subscriber = if let Some(level) = level {
subscriber.with_max_level(level)
} else {
subscriber.with_max_level(tracing::level_filters::LevelFilter::OFF)
};
let subscriber = subscriber.finish();
tracing::subscriber::set_global_default(subscriber)?;
Ok(())
}
fn main() -> eyre::Result<()> {
let rt = rt::Builder::new_current_thread()
.enable_all()
.thread_name("vnj")
.build()
.wrap_err("failed to create tokio runtime")?;
let args = Args::parse();
let cfg = read_config(&args.config).wrap_err("failed to load the config")?;
setup_logger(cfg.app.log_level)?;
rt.block_on(run(cfg))
}

10
config.toml Normal file
View file

@ -0,0 +1,10 @@
[app]
secret = "1337"
log_level = "debug"
journal = "./journal"
[http]
listen = "0.0.0.0:8089"

103
flake.lock generated Normal file
View file

@ -0,0 +1,103 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"naersk": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1721727458,
"narHash": "sha256-r/xppY958gmZ4oTfLiHN0ZGuQ+RSTijDblVgVLFi1mw=",
"owner": "nix-community",
"repo": "naersk",
"rev": "3fb418eaf352498f6b6c30592e3beb63df42ef11",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "naersk",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1731676054,
"narHash": "sha256-OZiZ3m8SCMfh3B6bfGC/Bm4x3qc1m2SVEAlkV6iY7Yg=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "5e4fbfb6b3de1aa2872b76d49fafc942626e2add",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"naersk": "naersk",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1731820690,
"narHash": "sha256-/hHFMTD+FGURXZ4JtfXoIgpy87zL505pVi6AL76Wc+U=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "bbab2ab9e1932133b1996baa1dc00fefe924ca81",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

96
flake.nix Normal file
View file

@ -0,0 +1,96 @@
{
description = "vnj";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
naersk = {
url = "github:nix-community/naersk";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = {
self,
naersk,
nixpkgs,
flake-utils,
rust-overlay,
}:
flake-utils.lib.eachDefaultSystem (system:
let
overlays = [(import rust-overlay)];
pkgs = import nixpkgs { inherit system overlays; };
rust = pkgs.rust-bin.stable.latest.default;
naersk' = pkgs.callPackages naersk {};
vnj = naersk'.buildPackage {
src = ./.;
};
nixosModule = { pkgs, config, lib, ... }:
let
ts = lib.types;
cfg = config.services.vnj;
in
{
options.services.vnj = {
enable = lib.mkEnableOption "vnj";
user = lib.mkOption {
type = ts.str;
};
instances = lib.mkOption {
description = "instances of the vnj";
type = ts.attrsOf ts.attrs;
};
};
config.systemd.services = lib.mkIf cfg.enable (lib.attrsets.mapAttrs' (name: value:
let
gen = pkgs.formats.toml {};
toml-cfg = gen.generate "config.toml" value;
in
{
name = "vnj-${name}";
value = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
description = "VNJ";
serviceConfig = {
Type = "simple";
User = cfg.user;
ExecStart = "${vnj}/bin/vnj --config ${toml-cfg}";
};
};
}
) cfg.instances);
};
in
{
packages.vnj = vnj;
packages.default = vnj;
nixosModules.default = nixosModule;
devShells.default = pkgs.mkShell {
shellHook = ''
export PS1="(vnj) $PS1"
'';
buildInputs = [
(rust.override {
extensions = [ "rust-src" "rust-analyzer" ];
})
];
};
}
);
}

141
src/app.rs Normal file
View file

@ -0,0 +1,141 @@
use std::{
path::PathBuf,
sync::Arc,
time::{Duration, Instant},
};
use crate::{
db::{self, UploadId, UploadKind},
notifies::Notifies,
post_queue,
schemas::novel::Novel,
schemas::sanity::ensure_slug,
subs,
};
#[derive(Debug)]
pub struct App {
db: db::Db,
notifies: Arc<Notifies>,
pub subs: subs::Subs,
pub queue: post_queue::Queue,
}
fn measure<O>(f: impl FnOnce() -> O) -> (O, Duration) {
let start = Instant::now();
(f(), start.elapsed())
}
impl App {
pub fn begin_upload(&self, slug: &str, kind: UploadKind) -> eyre::Result<(UploadId, PathBuf)> {
ensure_slug(slug)?;
let entry = self.db.entry(slug);
Ok(entry.start_upload(slug, kind))
}
pub fn finish_upload(&self, slug: &str, id: UploadId, file_name: &str) -> eyre::Result<()> {
ensure_slug(slug)?;
let entry = self.db.entry(slug);
entry.finish_upload(id, file_name)?;
Ok(())
}
}
impl App {
pub fn novel(&self, slug: &str) -> eyre::Result<db::Entry> {
ensure_slug(slug)?;
Ok(self.db.entry(slug))
}
pub fn enumerate_novels(&self) -> Vec<(String, db::Entry)> {
self.db.enumerate()
}
pub fn add_novel(&mut self, slug: impl Into<String>, novel: Novel) -> eyre::Result<()> {
let slug = slug.into();
ensure_slug(&slug)?;
let db_entry = self.db.entry(&slug);
db_entry.init(&novel)?;
self.queue.add(post_queue::Item {
novel: slug,
post_at: novel.post_at,
});
// While storing permit seems unnecessary, but actually
// it prevents race condition, so keep it.
self.notifies.update.notify_one();
Ok(())
}
}
impl App {
pub fn mark_posted_until(&mut self, ts: jiff::Zoned) {
self.db.update_state(move |s| {
s.posted_until = Some(ts);
});
}
pub fn reload_queue(&mut self) -> eyre::Result<()> {
tracing::info!("loading full database...");
let old_entries = self.queue.len();
let (queue, dur) = measure(|| {
let queue: post_queue::Queue = self
.db
.enumerate()
.into_iter()
.filter_map(|(slug, e)| {
let result = e.read_info();
match result {
Ok(n) => {
if let Some(ref posted_until) = self.db.state().posted_until {
if &n.post_at <= posted_until {
return None;
}
}
Some(Ok(post_queue::Item {
novel: slug,
post_at: n.post_at,
}))
}
Err(e) => Some(Err(e)),
}
})
.collect::<eyre::Result<_>>()?;
Ok::<_, eyre::Report>(queue)
});
self.queue = queue?;
tracing::info!(
took = ?dur,
old = old_entries,
new = self.queue.len(),
"loaded database and created queue",
);
self.notifies.update.notify_one();
Ok(())
}
pub fn new(db: db::Db, notifies: Arc<Notifies>) -> eyre::Result<Self> {
let queue = post_queue::Queue::default();
let mut this = Self {
db,
queue,
subs: subs::Subs::new(Arc::clone(&notifies)),
notifies,
};
this.reload_queue()?;
Ok(this)
}
}

65
src/cfg.rs Normal file
View file

@ -0,0 +1,65 @@
use std::{fs, net::SocketAddr, path::PathBuf};
use eyre::Context;
use serde::Deserialize;
#[derive(Debug, Deserialize)]
pub struct Http {
pub listen: SocketAddr,
}
#[derive(Debug, Clone, Copy, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum LogLevel {
Info,
Debug,
// Error, // there's no error logs.
Off,
}
impl From<LogLevel> for Option<tracing::Level> {
fn from(value: LogLevel) -> Self {
use tracing::Level as L;
Some(match value {
LogLevel::Info => L::INFO,
LogLevel::Debug => L::DEBUG,
LogLevel::Off => return None,
})
}
}
#[derive(Debug, Deserialize)]
#[serde(untagged, rename_all = "snake_case")]
pub enum Secret {
Path { path: PathBuf },
Plain(String),
}
impl Secret {
pub fn into_string(&self) -> eyre::Result<String> {
match self {
Self::Path { path } => fs::read_to_string(path).wrap_err("failed to read secret"),
Self::Plain(s) => Ok(s.clone()),
}
}
}
#[derive(Debug, Deserialize)]
pub struct App {
pub secret: Secret,
pub log_level: LogLevel,
pub journal: PathBuf,
}
#[derive(Debug, Deserialize)]
pub struct Journal {
pub root: PathBuf,
}
#[derive(Debug, Deserialize)]
pub struct Root {
pub app: App,
pub http: Http,
}

124
src/daemon.rs Normal file
View file

@ -0,0 +1,124 @@
use std::{
future::Future,
sync::Arc,
time::{Duration, Instant},
};
use tokio::{
signal::unix::{signal, SignalKind},
time,
};
use crate::notifies::Notifies;
use crate::state::State;
async fn sleep_or_hang(deadline: Option<Instant>) {
if let Some(deadline) = deadline {
time::sleep_until(deadline.into()).await
} else {
// Never wake-up.
struct Never;
impl Future for Never {
type Output = ();
fn poll(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
std::task::Poll::Pending
}
}
Never.await
}
}
pub async fn run(state: State, notifies: Arc<Notifies>) -> eyre::Result<()> {
tracing::info!("started daemon");
let mut on_disk_update = signal(SignalKind::hangup()).unwrap();
loop {
let closest_wake_up = 'w: {
let app = state.app.read();
if !app.subs.has_listeners() {
tracing::info!("the queue has no listeners, so it's pointless to schedule wake-up");
break 'w None;
}
if let Some(entry) = app.queue.closest() {
let now = jiff::Zoned::now();
let sleep_nanos = now.duration_until(&entry.post_at).as_nanos().max(0) as u64;
Some(Duration::from_nanos(sleep_nanos))
} else {
None
}
}
.map(|dur| {
tracing::info!(after = ?dur, "the next wake-up is scheduled");
Instant::now() + dur
});
tokio::select! {
biased;
_ = on_disk_update.recv() => {
tracing::info!("got sighup, reloading queue...");
let mut app = state.app.write();
app.reload_queue()?;
}
_reload = notifies.update.notified() => {
tracing::info!("the database was updated, woke up");
continue;
}
_new_sub = notifies.new_subscriber.notified() => {
tracing::debug!("got new subscriber");
continue;
}
_woke_for_update = sleep_or_hang(closest_wake_up) => {
let mut app = state.app.write();
let app = &mut *app;
let Some(item) = app.queue.closest_mut() else {
no_updates();
continue;
};
// First we need to get our waiters.
if !app.subs.has_listeners() {
// If no one is interested in update, then we
// don't consume it.
tracing::warn!("got no subscribers for the update");
continue;
}
// And then pop update out of the queue. This is critical,
// since we simply eat update without notifying anyone.
let now = jiff::Zoned::now();
let Some(upd) = item.try_pop(&now) else {
no_updates();
continue;
};
let pending_subs = app.subs.consume();
for sub in pending_subs {
if let Err(e) = sub.send(upd.novel.clone()) {
tracing::warn!(due_to = e, "one subscriber lost the update");
}
}
app.mark_posted_until(now);
}
}
}
}
fn no_updates() {
tracing::info!("got no updates for this period");
}

229
src/db/mod.rs Normal file
View file

@ -0,0 +1,229 @@
use std::{
fs,
path::{Path, PathBuf},
};
use eyre::Context;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
pub use upload_id::*;
use crate::schemas::novel::{FullNovel, Novel};
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct State {
pub posted_until: Option<jiff::Zoned>,
}
#[derive(Debug, Clone)]
pub struct Db {
novels: PathBuf,
state_path: PathBuf,
state: State,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Entry {
pub info: PathBuf,
pub thumbnail: PathBuf,
pub screenshots: PathBuf,
pub files: PathBuf,
pub upload_queue: PathBuf,
}
impl Entry {
pub fn finish_upload(&self, id: UploadId, file_name: &str) -> eyre::Result<()> {
let src = self.upload_queue.join(id.to_file_name());
let dest = match id.kind {
UploadKind::Thumbnail => self.thumbnail.clone(),
UploadKind::Screenshot => self.screenshots.join(file_name),
UploadKind::File => self.files.join(file_name),
};
fs::rename(src, dest).wrap_err("failed to mark upload as finished")?;
Ok(())
}
pub fn start_upload(&self, slug: impl Into<String>, kind: UploadKind) -> (UploadId, PathBuf) {
let id = UploadId {
id: Uuid::new_v4(),
kind,
novel: slug.into(),
};
let path = self.upload_queue.join(id.to_file_name());
(id, path)
}
}
impl Entry {
pub fn read_info(&self) -> eyre::Result<Novel> {
let raw_info = fs::read_to_string(&self.info).wrap_err("failed to read info")?;
let info: Novel = toml::from_str(&raw_info).wrap_err("failed to deserialize TOML")?;
Ok(info)
}
pub fn get_full_info(&self) -> eyre::Result<FullNovel> {
let info = self.read_info()?;
Ok(FullNovel {
data: info,
upload_queue: self.upload_queue()?,
files: self.list_files()?,
screenshots: self.list_screenshots()?,
})
}
pub fn write_info(&self, info: &Novel) -> eyre::Result<()> {
let s = toml::to_string_pretty(info).wrap_err("failed to serialize novel")?;
fs::write(&self.info, &s)?;
Ok(())
}
pub fn screenshot(&self, name: &str) -> eyre::Result<PathBuf> {
let path = self.files.join(name);
eyre::ensure!(path.exists(), "the screenshot does not exist");
Ok(path)
}
pub fn file(&self, name: &str) -> eyre::Result<PathBuf> {
let path = self.files.join(name);
eyre::ensure!(path.exists(), "the file does not exist");
Ok(path)
}
fn listdir(dir: &Path) -> eyre::Result<Vec<String>> {
let mut out = Vec::new();
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
let last = path.components().last().unwrap();
let last = last.as_os_str().to_string_lossy().to_string();
out.push(last);
}
Ok(out)
}
pub fn list_screenshots(&self) -> eyre::Result<Vec<String>> {
Self::listdir(&self.screenshots)
}
pub fn list_files(&self) -> eyre::Result<Vec<String>> {
Self::listdir(&self.files)
}
pub fn upload_queue(&self) -> eyre::Result<Vec<String>> {
Self::listdir(&self.upload_queue)
}
pub fn init(&self, info: &Novel) -> eyre::Result<()> {
if self.info.exists() || self.files.exists() {
eyre::bail!("this entry is already exists");
}
fs::create_dir_all(&self.screenshots).wrap_err("failed to create screenshots dir")?;
fs::create_dir_all(&self.files).wrap_err("failed to create files dir")?;
fs::create_dir_all(&self.upload_queue).wrap_err("failed to create upload queue")?;
self.write_info(info)?;
Ok(())
}
pub fn exists(&self) -> bool {
self.info.exists()
}
}
fn create_dir(p: impl AsRef<Path>) -> eyre::Result<()> {
let p = p.as_ref();
if p.exists() {
return Ok(());
}
tracing::info!("creating directory {}", p.display());
fs::create_dir_all(p)?;
Ok(())
}
impl Db {
pub fn state(&self) -> &State {
&self.state
}
pub fn update_state<O>(&mut self, f: impl FnOnce(&mut State) -> O) -> O {
let out = f(&mut self.state);
let serialized = toml::to_string_pretty(&self.state).unwrap();
fs::write(&self.state_path, serialized).unwrap();
out
}
pub fn from_root(root: impl AsRef<Path>) -> eyre::Result<Self> {
let root = root.as_ref();
fs::create_dir_all(root)?;
let novels = root.join("novels");
let state_path = root.join("state.toml");
if !state_path.exists() {
fs::write(
&state_path,
toml::to_string_pretty(&State::default()).unwrap(),
)
.wrap_err("failed to create state")?;
}
create_dir(root)?;
create_dir(&novels)?;
let state = toml::from_str(&fs::read_to_string(&state_path).unwrap())
.wrap_err("failed to read state")?;
Ok(Self {
novels,
state,
state_path,
})
}
}
impl Db {
pub fn enumerate(&self) -> Vec<(String, Entry)> {
let read = fs::read_dir(&self.novels).unwrap();
let mut entries = vec![];
for entry in read.map(|e| e.unwrap().path()) {
let slug = entry.file_stem().unwrap().to_str().unwrap();
entries.push((slug.to_owned(), self.entry(slug)));
}
entries
}
pub fn entry(&self, slug: &str) -> Entry {
let root = self.novels.join(slug);
let thumbnail = root.join("thumbnail");
let screenshots = root.join("screenshots");
let upload_queue = root.join("upload_queue");
let info = root.join("info.toml");
let files = root.join("files");
Entry {
info,
files,
upload_queue,
thumbnail,
screenshots,
}
}
}
mod upload_id;

94
src/db/upload_id.rs Normal file
View file

@ -0,0 +1,94 @@
use std::{fmt, str::FromStr};
use eyre::OptionExt;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum UploadKind {
Thumbnail,
Screenshot,
File,
}
impl UploadKind {
pub const fn as_str(self) -> &'static str {
match self {
Self::Thumbnail => "t",
Self::Screenshot => "s",
Self::File => "f",
}
}
}
impl FromStr for UploadKind {
type Err = eyre::Report;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"t" => Self::Thumbnail,
"s" => Self::Screenshot,
"f" => Self::File,
_ => eyre::bail!("wrong upload kind"),
})
}
}
#[derive(Debug)]
pub struct UploadId {
pub novel: String,
pub id: Uuid,
pub kind: UploadKind,
}
impl UploadId {
pub fn to_file_name(&self) -> String {
format!("{}-{}", self.id, self.kind.as_str())
}
}
impl FromStr for UploadId {
type Err = eyre::Report;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let Some((novel, id_and_kind)) = s.rsplit_once('/') else {
eyre::bail!("invalid upload id")
};
if id_and_kind.is_empty() {
eyre::bail!("invalid upload id");
}
let (id, kind) = id_and_kind.split_at(id_and_kind.len().checked_sub(1).ok_or_eyre("loh")?);
Ok(Self {
novel: novel.to_owned(),
kind: kind.parse()?,
id: id.parse()?,
})
}
}
impl fmt::Display for UploadId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}/{}{}", self.novel, self.id, self.kind.as_str())
}
}
impl Serialize for UploadId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for UploadId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = <&str>::deserialize(deserializer)?;
s.parse().map_err(|e| serde::de::Error::custom(e))
}
}

30
src/http.rs Normal file
View file

@ -0,0 +1,30 @@
use axum::{
extract::Request,
middleware::Next,
response::{IntoResponse, Response},
};
use crate::state::{Router, State};
fn deny() -> Response {
crate::result::Err::from(eyre::eyre!("nope")).into_response()
}
async fn validate_secret(state: State, request: Request, next: Next) -> Response {
let Some(val) = request.headers().get("x-secret") else {
return deny();
};
if val != state.secret.as_str() {
return deny();
}
next.run(request).await
}
pub fn make(state: State) -> Router {
Router::new()
.nest("/novels", novels::make())
.layer(axum::middleware::from_fn_with_state(state, validate_secret))
}
mod novels;

12
src/http/novels.rs Normal file
View file

@ -0,0 +1,12 @@
use crate::state::Router;
pub fn make() -> Router {
Router::new()
.nest("/:slug", specific::make())
.merge(list::make())
.nest("/pull_next", pull_next::make())
}
mod list;
mod pull_next;
mod specific;

50
src/http/novels/list.rs Normal file
View file

@ -0,0 +1,50 @@
use std::collections::HashMap;
use axum::routing::post;
use serde::Serialize;
use crate::{
result::{ApiResult, FromJson},
schemas::novel::FullNovel,
state::{Router, State},
};
#[derive(Debug, serde::Deserialize)]
struct Args {
pub start_from: Option<jiff::Zoned>,
}
#[derive(Debug, Serialize, Default)]
struct Output {
pub novels: HashMap<String, FullNovel>,
pub errors: Vec<String>,
}
async fn list(state: State, FromJson(args): FromJson<Args>) -> ApiResult<Output> {
let mut out = Output::default();
let novels = state.app.read().enumerate_novels();
for (slug, entry) in novels {
let novel = match entry.get_full_info() {
Ok(n) => n,
Err(e) => {
out.errors.push(e.to_string());
continue;
}
};
if let Some(ref start_from) = args.start_from {
if &novel.data.post_at < start_from {
continue;
}
}
out.novels.insert(slug, novel);
}
Ok(out.into())
}
pub fn make() -> Router {
Router::new().route("/", post(list))
}

View file

@ -0,0 +1,24 @@
use std::time::Duration;
use crate::{
result::ApiResult,
state::{Router, State},
};
use axum::routing::get;
use tokio::time;
async fn pull_next(state: State) -> ApiResult<Option<String>> {
let rx = state.app.write().subs.subscribe();
let Ok(Ok(r)) = time::timeout(Duration::from_secs(12 * 60 * 60), rx).await else {
tracing::debug!("timed out for subscription");
return Ok(None.into());
};
Ok(Some(r).into())
}
pub fn make() -> Router {
Router::new().route("/", get(pull_next))
}

View file

@ -0,0 +1,43 @@
use axum::{
extract::Path,
routing::{patch, post},
};
use crate::{
result::{ApiResult, FromJson},
schemas::{novel::Novel, sanity::Slug},
state::{Router, State},
};
async fn update(
Path(slug): Path<Slug>,
state: State,
FromJson(novel): FromJson<Novel>,
) -> ApiResult<bool> {
let mut app = state.app.write();
let entry = app.novel(&slug.0)?;
entry.write_info(&novel)?;
// hack, I just don't care, updates are
// infrequent.
app.reload_queue()?;
Ok(true.into())
}
async fn enqueue(
Path(slug): Path<Slug>,
state: State,
FromJson(novel): FromJson<Novel>,
) -> ApiResult<bool> {
let mut app = state.app.write();
app.add_novel(slug.0, novel)?;
Ok(true.into())
}
pub fn make() -> Router {
Router::new()
.route("/", post(enqueue))
.route("/", patch(update))
}

View file

@ -0,0 +1,18 @@
use axum::{extract::Path, routing::get};
use crate::{
result::ApiResult,
schemas::{novel::FullNovel, sanity::Slug},
state::{Router, State},
};
async fn get_novel(Path(slug): Path<Slug>, state: State) -> ApiResult<FullNovel> {
let entry = state.app.read().novel(&slug.0)?;
let novel = entry.get_full_info()?;
Ok(novel.into())
}
pub fn make() -> Router {
Router::new().route("/", get(get_novel))
}

View file

@ -0,0 +1,13 @@
use crate::state::Router;
pub fn make() -> Router {
// TODO: Validation here.
Router::new()
.merge(enqueue::make())
.merge(get::make())
.nest("/upload", upload::make())
}
mod enqueue;
mod get;
mod upload;

View file

@ -0,0 +1,120 @@
use std::{fs, io::Write};
use axum::{
extract::{Multipart, Path},
routing::{delete, post},
};
use eyre::Context;
use crate::{
db::{UploadId, UploadKind},
result::{ApiResult, FromJson},
schemas::sanity::{FileName, Slug},
state::{Router, State},
};
async fn make_upload(
slug: String,
kind: UploadKind,
state: State,
mut multipart: Multipart,
) -> ApiResult<UploadId> {
let (id, dest) = {
let app = state.app.read();
app.begin_upload(&slug, kind)?
};
let Some(mut field) = multipart
.next_field()
.await
.wrap_err("failed to get field")?
else {
return Err(eyre::eyre!("empty multipart").into());
};
let mut file = fs::File::options()
.read(true)
.write(true)
.create(true)
.open(dest)
.wrap_err("failed to open dest file")?;
while let Some(chunk) = field.chunk().await.wrap_err("failed to read chunk")? {
file.write(&chunk).wrap_err("failed to write chunk")?;
}
Ok(id.into())
}
async fn up_screenshot(
Path(slug): Path<String>,
state: State,
multipart: Multipart,
) -> ApiResult<UploadId> {
make_upload(slug, UploadKind::Screenshot, state, multipart).await
}
async fn up_file(
Path(slug): Path<String>,
state: State,
multipart: Multipart,
) -> ApiResult<UploadId> {
make_upload(slug, UploadKind::File, state, multipart).await
}
async fn up_thumbnail(
Path(slug): Path<String>,
state: State,
multipart: Multipart,
) -> ApiResult<UploadId> {
make_upload(slug, UploadKind::Thumbnail, state, multipart).await
}
#[derive(Debug, serde::Deserialize)]
struct Finish {
pub id: UploadId,
pub file_name: FileName,
}
async fn finish(
Path(slug): Path<Slug>,
state: State,
FromJson(args): FromJson<Finish>,
) -> ApiResult<bool> {
let app = state.app.read();
app.finish_upload(&slug.0, args.id, &args.file_name.0)?;
Ok(true.into())
}
#[derive(Debug, serde::Deserialize)]
struct DeleteFile {
pub name: FileName,
pub kind: UploadKind,
}
async fn delete_file(
Path(slug): Path<String>,
state: State,
FromJson(args): FromJson<DeleteFile>,
) -> ApiResult<bool> {
let app = state.app.read();
let novel = app.novel(&slug)?;
let path = match args.kind {
UploadKind::Thumbnail => novel.thumbnail.clone(),
UploadKind::Screenshot => novel.screenshot(&args.name.0)?,
UploadKind::File => novel.file(&args.name.0)?,
};
fs::remove_file(path).wrap_err("failed to delete")?;
Ok(true.into())
}
pub fn make() -> Router {
Router::new()
.route("/screenshot", post(up_screenshot))
.route("/file", post(up_file))
.route("/thumbnail", post(up_thumbnail))
.route("/finish", post(finish))
.route("/", delete(delete_file))
}

17
src/lib.rs Normal file
View file

@ -0,0 +1,17 @@
pub mod http;
pub mod db;
pub mod schemas;
pub mod app;
pub mod post_queue;
pub mod result;
pub mod daemon;
pub mod notifies;
pub mod subs;
pub mod state;
pub mod cfg;

16
src/notifies.rs Normal file
View file

@ -0,0 +1,16 @@
use std::sync::Arc;
use tokio::sync::Notify;
#[derive(Debug)]
pub struct Notifies {
pub update: Notify,
pub new_subscriber: Notify,
}
pub fn make() -> Arc<Notifies> {
Arc::new(Notifies {
update: Notify::new(),
new_subscriber: Notify::new(),
})
}

105
src/post_queue.rs Normal file
View file

@ -0,0 +1,105 @@
use std::{
cmp::{Ordering, Reverse},
ops::Deref,
};
use serde::Serialize;
#[derive(Debug, Clone, Serialize)]
pub struct Item {
pub novel: String,
pub post_at: jiff::Zoned,
}
pub struct Closest<'a> {
queue: &'a mut Queue,
}
impl<'a> Closest<'a> {
pub fn try_pop(self, now: &jiff::Zoned) -> Option<Item> {
let last = self.queue.entries.pop().unwrap();
if now >= &last.post_at {
Some(last)
} else {
self.queue.entries.push(last);
None
}
}
}
impl<'a> Deref for Closest<'a> {
type Target = Item;
fn deref(&self) -> &Self::Target {
let len = self.queue.entries.len() - 1;
&self.queue.entries[len]
}
}
fn cmp_items(lhs: &Item, rhs: &Item) -> Ordering {
Reverse(&lhs.post_at).cmp(&Reverse(&rhs.post_at))
}
#[derive(Debug, Default)]
pub struct Queue {
entries: Vec<Item>,
}
impl FromIterator<Item> for Queue {
fn from_iter<T: IntoIterator<Item = Item>>(iter: T) -> Self {
let mut entries: Vec<Item> = iter.into_iter().collect();
entries.sort_unstable_by(cmp_items);
#[cfg(debug_assertions)]
if entries.len() >= 2 {
let first = &entries[0].post_at;
let second = &entries[1].post_at;
assert!(first >= second);
}
Self { entries }
}
}
impl Queue {
pub fn len(&self) -> usize {
self.entries.len()
}
pub fn closest(&self) -> Option<&Item> {
self.entries.last()
}
pub fn closest_mut(&mut self) -> Option<Closest<'_>> {
if self.entries.is_empty() {
None
} else {
Some(Closest { queue: self })
}
}
pub fn add(&mut self, item: Item) {
// This could be:
// let Ok(idx) | Err(idx) = ..;
// But fuck rust, it's not allowed.
let res = self
.entries
.binary_search_by(|probe| cmp_items(probe, &item));
// The why блядь?
let idx = match res {
Ok(i) => i,
Err(i) => i,
};
self.entries.insert(idx, item);
}
pub const fn new() -> Self {
Self {
entries: Vec::new(),
}
}
}

65
src/result.rs Normal file
View file

@ -0,0 +1,65 @@
use axum::{body::Body, extract::FromRequest, response::IntoResponse};
use serde::{Deserialize, Serialize};
pub type ApiResult<T> = Result<Ok<T>, Err>;
#[derive(Debug, Clone)]
pub struct FromJson<T>(pub T);
#[axum::async_trait]
impl<T: for<'de> Deserialize<'de>, S: Send + Sync> FromRequest<S> for FromJson<T> {
type Rejection = Err;
async fn from_request(
request: axum::http::Request<Body>,
s: &S,
) -> Result<Self, Self::Rejection> {
let js = axum::Json::from_request(request, s)
.await
.map_err(|rej| eyre::eyre!("failed to parse json: {}", rej.to_string()))
.inspect_err(|e| tracing::error!(error = %e, "got error"))?;
Ok(Self(js.0))
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Ok<T> {
pub ok: T,
}
impl<T> From<T> for Ok<T> {
fn from(value: T) -> Self {
Self { ok: value }
}
}
impl<T: Serialize> IntoResponse for Ok<T> {
fn into_response(self) -> axum::response::Response {
axum::Json(self).into_response()
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Err {
pub error: ApiError,
}
impl From<eyre::Report> for Err {
fn from(value: eyre::Report) -> Self {
Self {
error: ApiError::Custom(format!("{value:?}")),
}
}
}
impl IntoResponse for Err {
fn into_response(self) -> axum::response::Response {
axum::Json(self).into_response()
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ApiError {
Custom(String),
}

4
src/schemas/mod.rs Normal file
View file

@ -0,0 +1,4 @@
pub mod novel;
pub mod platform;
pub mod sanity;

44
src/schemas/novel.rs Normal file
View file

@ -0,0 +1,44 @@
use std::num::NonZeroU8;
use serde::{Deserialize, Serialize};
use super::platform::Platform;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Download {
pub file_name: String,
pub platform: Platform,
}
#[derive(Debug, Clone, Serialize)]
pub struct StoredNovel {
#[serde(flatten)]
pub novel: Novel,
pub modified_at: jiff::Zoned,
pub created_at: jiff::Zoned,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FullNovel {
pub data: Novel,
pub upload_queue: Vec<String>,
pub files: Vec<String>,
pub screenshots: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Novel {
pub title: String,
pub description: String,
pub vndb: Option<url::Url>,
pub hours_to_read: Option<NonZeroU8>,
pub tags: Vec<String>,
pub genres: Vec<String>,
pub tg_post: Option<url::Url>,
pub post_at: jiff::Zoned,
}

11
src/schemas/platform.rs Normal file
View file

@ -0,0 +1,11 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(untagged, rename_all = "snake_case")]
pub enum Platform {
Android,
Linux,
Windows,
Ios,
Other(String),
}

145
src/schemas/sanity.rs Normal file
View file

@ -0,0 +1,145 @@
use serde::{Deserialize, Serialize};
use std::path::Component;
use std::path::Path;
use std::path::PathBuf;
// Copypasta from <https://github.com/rust-lang/rfcs/issues/2208>.
fn normalize(p: &Path) -> PathBuf {
let mut stack: Vec<Component> = vec![];
// We assume .components() removes redundant consecutive path separators.
// Note that .components() also does some normalization of '.' on its own anyways.
// This '.' normalization happens to be compatible with the approach below.
for component in p.components() {
match component {
// Drop CurDir components, do not even push onto the stack.
Component::CurDir => {}
// For ParentDir components, we need to use the contents of the stack.
Component::ParentDir => {
// Look at the top element of stack, if any.
let top = stack.last().cloned();
match top {
// A component is on the stack, need more pattern matching.
Some(c) => {
match c {
// Push the ParentDir on the stack.
Component::Prefix(_) => {
stack.push(component);
}
// The parent of a RootDir is itself, so drop the ParentDir (no-op).
Component::RootDir => {}
// A CurDir should never be found on the stack, since they are dropped when seen.
Component::CurDir => {
unreachable!();
}
// If a ParentDir is found, it must be due to it piling up at the start of a path.
// Push the new ParentDir onto the stack.
Component::ParentDir => {
stack.push(component);
}
// If a Normal is found, pop it off.
Component::Normal(_) => {
let _ = stack.pop();
}
}
}
// Stack is empty, so path is empty, just push.
None => {
stack.push(component);
}
}
}
// All others, simply push onto the stack.
_ => {
stack.push(component);
}
}
}
// If an empty PathBuf would be return, instead return CurDir ('.').
if stack.is_empty() {
return PathBuf::from(Component::CurDir.as_os_str());
}
let mut norm_path = PathBuf::new();
for item in &stack {
norm_path.push(item.as_os_str());
}
norm_path
}
#[derive(Debug, Clone)]
pub struct Slug(pub String);
impl Serialize for Slug {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.0.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Slug {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
ensure_slug(&s).map_err(|e| serde::de::Error::custom(e))?;
Ok(Self(s))
}
}
pub fn ensure_slug(text: &str) -> eyre::Result<()> {
fn is_slug(s: &str) -> bool {
!s.is_empty()
&& s.chars()
.all(|c| c.is_ascii_alphabetic() || c.is_ascii_digit() || matches!(c, '-' | '_'))
}
eyre::ensure!(is_slug(text), "not a slug");
Ok(())
}
#[derive(Debug, Clone)]
pub struct FileName(pub String);
impl<'de> Deserialize<'de> for FileName {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let s = PathBuf::from(s);
let res = normalize(&s);
let res = res
.file_name()
.ok_or_else(|| serde::de::Error::custom("the fuck"))?
.to_str()
.ok_or_else(|| serde::de::Error::custom("nigger"))?;
Ok(Self(res.to_owned()))
}
}
impl Serialize for FileName {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.0.serialize(serializer)
}
}

37
src/state.rs Normal file
View file

@ -0,0 +1,37 @@
use std::{ops::Deref, sync::Arc};
use crate::{app::App, cfg};
use axum::extract::FromRequestParts;
use parking_lot::RwLock;
pub type Router = axum::Router<State>;
#[derive(Debug)]
pub struct LockedState {
pub app: RwLock<App>,
pub cfg: cfg::Root,
pub secret: String,
}
#[derive(Debug, Clone, FromRequestParts)]
#[from_request(via(axum::extract::State))]
pub struct State(Arc<LockedState>);
impl State {
pub fn new(app: App, cfg: cfg::Root) -> eyre::Result<Self> {
Ok(Self(Arc::new(LockedState {
app: RwLock::new(app),
secret: cfg.app.secret.into_string()?,
cfg,
})))
}
}
impl Deref for State {
type Target = LockedState;
fn deref(&self) -> &Self::Target {
&self.0
}
}

36
src/subs.rs Normal file
View file

@ -0,0 +1,36 @@
use tokio::sync::oneshot;
use std::sync::Arc;
use crate::notifies::Notifies;
#[derive(Debug)]
pub struct Subs {
queue: Vec<oneshot::Sender<String>>,
notifies: Arc<Notifies>,
}
impl Subs {
pub const fn new(notifies: Arc<Notifies>) -> Self {
Self {
queue: Vec::new(),
notifies,
}
}
pub fn has_listeners(&self) -> bool {
self.queue.len() != 0
}
pub fn consume(&mut self) -> Vec<oneshot::Sender<String>> {
std::mem::take(&mut self.queue)
}
pub fn subscribe(&mut self) -> oneshot::Receiver<String> {
let (tx, rx) = oneshot::channel();
self.queue.push(tx);
self.notifies.new_subscriber.notify_waiters();
rx
}
}