diff --git a/Dockerfile b/Dockerfile
index 2fa4889..75d638d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -8,7 +8,7 @@ WORKDIR lonk
COPY ./Cargo.lock ./Cargo.lock
COPY ./Cargo.toml ./Cargo.toml
-RUN cargo build --release
+RUN cargo build
RUN src/*.rs
# Compile the source
diff --git a/data/config.json b/data/config.json
index 91acba0..8567be3 100644
--- a/data/config.json
+++ b/data/config.json
@@ -1,12 +1,12 @@
{
"db": {
- "address": "redis://127.0.0.1"
+ "address": "redis://db"
},
"slug_rules": {
"length": 5,
"chars": "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"
},
"serve_rules": {
- "Dir": "/etc/lonk/served"
+ "Dir": "/data/served"
}
}
diff --git a/data/served/index.html b/data/served/index.html
new file mode 100644
index 0000000..8660be7
--- /dev/null
+++ b/data/served/index.html
@@ -0,0 +1,13 @@
+
+
+
+
+
+ Lonk
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/data/served/main.css b/data/served/main.css
new file mode 100644
index 0000000..e69de29
diff --git a/docker-compose.yml b/docker-compose.yml
index f4142da..3e1a7ca 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -3,8 +3,8 @@ services:
lonk:
build: .
environment:
- - PROFILE: release
- LONK_CONFIG: /data/config.json
+ - PROFILE: debug
volumes:
- ./data:/data
redis:
diff --git a/src/main.rs b/src/main.rs
index 53ef568..4fc65bd 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1,8 +1,8 @@
use argh::FromArgs;
use figment::{providers::Format, Figment};
use serde::{Deserialize, Serialize};
-use std::{collections::BTreeSet, path::PathBuf, str::FromStr, sync::Arc};
-use tokio::sync::mpsc::UnboundedSender;
+use std::{collections::BTreeSet, path::PathBuf, str::FromStr, sync::Arc, io::BufRead};
+use tokio::{sync};
use validators::prelude::*;
use warp::{filters::BoxedFilter, hyper::StatusCode, Filter};
@@ -15,6 +15,16 @@ macro_rules! unwrap_or_unwrap_err {
};
}
+macro_rules! clone_to_move {
+ ($($y:ident),+$x:ident) => {
+ clone_to_move!($x);
+ clone_to_move!($y)
+ };
+ ($x:ident) => {
+ let $x = $x.clone();
+ };
+}
+
#[derive(Serialize, Deserialize, Debug, Validator, Clone)]
#[validator(domain(ipv4(Allow), local(Allow), at_least_two_labels(Allow), port(Allow)))]
struct Url {
@@ -25,12 +35,14 @@ struct Url {
#[derive(Deserialize, Serialize, Debug, Clone)]
struct DbConfig {
pub address: String,
+ pub worker_threads: usize,
}
impl Default for DbConfig {
fn default() -> Self {
Self {
address: "redis://127.0.0.1".to_string(),
+ worker_threads: 4,
}
}
}
@@ -102,17 +114,40 @@ impl FromStr for Base64WithoutPaddingUrl {
#[derive(Debug)]
struct SlugDatabase {
- tx: UnboundedSender,
+ tx: sync::mpsc::UnboundedSender,
}
-#[derive(Debug)]
+#[derive(Clone, Debug)]
enum SlugDbMessage {
Add(Slug, Url),
}
impl SlugDatabase {
- fn from_client(client: redis::Client) -> Self {
- todo!()
+ fn from_client(client: redis::Client, worker_threads: usize) -> Self {
+ let (tx, rx) = sync::mpsc::unbounded_channel::();
+
+ // I want a FILO queue with a spin lock for consumption.
+ // I'm not sure this is the best way to implement this.
+ // (Alternatively: is there a better architecture?)
+ let rx = Arc::new(sync::Mutex::new(rx));
+
+ for _ in 0..worker_threads {
+ let mut connection = client.get_connection().expect("Could not open connection to Redis server.");
+ clone_to_move!(rx);
+ tokio::spawn(async move {
+ while let Some(msg) = {(*rx.lock().await).recv().await} {
+ match msg {
+ SlugDbMessage::Add(slug, url) => {
+ todo!()
+ },
+ }
+ }
+ });
+ }
+
+ SlugDatabase {
+ tx,
+ }
}
fn insert_slug(&self, slug: Slug, url: Url) -> Result<(), ()> {
@@ -128,7 +163,7 @@ struct SlugFactory {
slug_chars: BTreeSet,
}
-#[derive(Debug)]
+#[derive(Clone, Debug)]
struct Slug(String);
enum InvalidSlug {
@@ -199,7 +234,7 @@ async fn serve() {
let db = {
let client = redis::Client::open(config.db.address).expect("Error opening Redis database.");
//let conn = Connection::open(config.db_location).expect("Could not open database.");
- Arc::new(SlugDatabase::from_client(client))
+ Arc::new(SlugDatabase::from_client(client, config.db.worker_threads))
};
// GET /