Update added download and user authentecation

This commit is contained in:
zastian@mrthoddata.com
2025-05-09 16:48:29 +01:00
parent 9b208edfde
commit 1c123f5a45
10 changed files with 456 additions and 68 deletions

View File

@@ -1,19 +1,19 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
if [[ ! -d "/home/mrfluffy/bitBeam" ]]; then if [[ ! -d "/home/work/Documents/rust/bitBeam" ]]; then
echo "Cannot find source directory; Did you move it?" echo "Cannot find source directory; Did you move it?"
echo "(Looking for "/home/mrfluffy/bitBeam")" echo "(Looking for "/home/work/Documents/rust/bitBeam")"
echo 'Cannot force reload with this script - use "direnv reload" manually and then try again' echo 'Cannot force reload with this script - use "direnv reload" manually and then try again'
exit 1 exit 1
fi fi
# rebuild the cache forcefully # rebuild the cache forcefully
_nix_direnv_force_reload=1 direnv exec "/home/mrfluffy/bitBeam" true _nix_direnv_force_reload=1 direnv exec "/home/work/Documents/rust/bitBeam" true
# Update the mtime for .envrc. # Update the mtime for .envrc.
# This will cause direnv to reload again - but without re-building. # This will cause direnv to reload again - but without re-building.
touch "/home/mrfluffy/bitBeam/.envrc" touch "/home/work/Documents/rust/bitBeam/.envrc"
# Also update the timestamp of whatever profile_rc we have. # Also update the timestamp of whatever profile_rc we have.
# This makes sure that we know we are up to date. # This makes sure that we know we are up to date.
touch -r "/home/mrfluffy/bitBeam/.envrc" "/home/mrfluffy/bitBeam/.direnv"/*.rc touch -r "/home/work/Documents/rust/bitBeam/.envrc" "/home/work/Documents/rust/bitBeam/.direnv"/*.rc

View File

@@ -1 +1 @@
flake-profile-1-link flake-profile-4-link

View File

@@ -1 +0,0 @@
/nix/store/sc88ikf5zh532nisyr5v9h6f6q6fay54-nix-shell-env

View File

@@ -0,0 +1 @@
/nix/store/xvayrqcb8vdiyns5k3j05ia9aflzjrxi-nix-shell-env

1
Cargo.lock generated
View File

@@ -175,6 +175,7 @@ dependencies = [
"log", "log",
"rand 0.9.1", "rand 0.9.1",
"serde", "serde",
"serde_json",
"sqlx", "sqlx",
"tokio", "tokio",
"uuid", "uuid",

View File

@@ -9,9 +9,10 @@ bytes = "1.10"
chrono = {version = "0.4", features = ["serde"]} chrono = {version = "0.4", features = ["serde"]}
extract = "0.1" extract = "0.1"
fern = "0.7.1" fern = "0.7.1"
log = {version = "0.4.27", feature = "std"} log = {version = "0.4", feature = "std"}
rand = "0.9" rand = "0.9"
serde = {version = "1.0", features = ["derive"]} serde = {version = "1.0", features = ["derive"]}
serde_json = "1.0.140"
sqlx = { version = "0.8", features = [ sqlx = { version = "0.8", features = [
"runtime-tokio", # pick exactly one runtime "runtime-tokio", # pick exactly one runtime
"tls-rustls", # pick exactly one TLS backend "tls-rustls", # pick exactly one TLS backend

View File

@@ -79,6 +79,7 @@
RUST_SRC_PATH = "${pkgs.rust-bin.stable.latest.default}/lib/rustlib/src/rust/library"; RUST_SRC_PATH = "${pkgs.rust-bin.stable.latest.default}/lib/rustlib/src/rust/library";
BITBEAM_DATABASE_URL = "sqlite://./bitbeam.sqlite"; BITBEAM_DATABASE_URL = "sqlite://./bitbeam.sqlite";
BITBEAM_DB_TYPE = "sqlite"; BITBEAM_DB_TYPE = "sqlite";
DATABASE_URL = "sqlite://./bitbeam.sqlite";
}; };
} }
); );

View File

@@ -1,27 +1,35 @@
use axum::{ use axum::{
body::Bytes, body::Bytes,
extract::ConnectInfo, extract::{ConnectInfo, Path},
http::{HeaderMap, StatusCode}, http::{HeaderMap, StatusCode},
response::{IntoResponse, Response}, response::{IntoResponse, Response},
Extension, Json, Extension, Json,
}; };
use chrono::Utc; use chrono::Utc;
use log::{error, info, warn};
use rand::Rng; use rand::Rng;
use sqlx::AnyPool; use sqlx::AnyPool;
use std::path::Path; use std::path::Path as PathBuf;
use tokio::fs; use tokio::fs;
use uuid::Uuid; use uuid::Uuid;
use log::{info, warn, error};
use std::net::SocketAddr;
use crate::data; use crate::data;
use std::net::SocketAddr;
use serde_json::json;
/// Handler to return all files as JSON /// Handler to return all files as JSON
/// This function retrieves all files from the database /// This function retrieves all files from the database
/// and returns them as a JSON response. /// and returns them as a JSON response.
/// It also logs the IP address of the client making the request. /// It also logs the IP address of the client making the request.
pub async fn all_files(Extension(pool): Extension<AnyPool>, ConnectInfo(addr): ConnectInfo<SocketAddr>) -> impl IntoResponse { /// example request: curl -X GET http://localhost:3000/all_files
/// requires no parameters
/// returns a JSON array of files
/// TODO: add user authentication
pub async fn all_files(
Extension(pool): Extension<AnyPool>,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
) -> impl IntoResponse {
//log the IP address of the client and the call //log the IP address of the client and the call
let ip = addr.ip().to_string(); let ip = addr.ip().to_string();
info!("Received an all_files request from IP: {}", ip); info!("Received an all_files request from IP: {}", ip);
@@ -40,7 +48,7 @@ pub async fn all_files(Extension(pool): Extension<AnyPool>, ConnectInfo(addr): C
Ok(files) => { Ok(files) => {
info!("DB select all success"); info!("DB select all success");
(StatusCode::OK, Json(files)).into_response() (StatusCode::OK, Json(files)).into_response()
}, }
Err(e) => { Err(e) => {
warn!("DB select all error: {}", e); warn!("DB select all error: {}", e);
( (
@@ -58,21 +66,67 @@ pub async fn all_files(Extension(pool): Extension<AnyPool>, ConnectInfo(addr): C
/// saves it to the server's file system, /// saves it to the server's file system,
/// and stores the file metadata in the database. /// and stores the file metadata in the database.
/// It also logs the IP address of the client making the request. /// It also logs the IP address of the client making the request.
pub async fn upload(Extension(pool): Extension<AnyPool>, /// example request: curl -X POST -H "key: <key>" -H "file_name: <file_name>" -H "content-type: <content_type>" -H "download_limit: <download_limit>" --data-binary @<file_path> http://localhost:3000/upload
/// requires the following headers:
/// - key: the key of the user (not optional)
/// - file_name: the name of the file (optional)
/// - content-type: the content type of the file (optional)
/// - download_limit: the download limit of the file (optional)
pub async fn upload(
Extension(pool): Extension<AnyPool>,
ConnectInfo(addr): ConnectInfo<SocketAddr>, ConnectInfo(addr): ConnectInfo<SocketAddr>,
Extension(config): Extension<data::Config> , Extension(config): Extension<data::Config>,
headers: HeaderMap, headers: HeaderMap,
body: Bytes, body: Bytes,
) -> Response { ) -> Response {
//log the IP address of the client and the call //log the IP address of the client and the call
let ip = addr.ip().to_string(); let ip = addr.ip().to_string();
info!("Received update from IP: {}", ip); info!("Received update from IP: {}", ip);
//get the key from the headers
let key = match headers.get("key") {
Some(hv) => hv.to_str().unwrap_or("unknown").to_string(),
None => {
return (
axum::http::StatusCode::BAD_REQUEST,
"Key header not supplied",
)
.into_response();
}
};
//check if the user exists
let owner = sqlx::query_as::<_, data::user>(
r#"
SELECT *
FROM users
WHERE key = ?
"#,
)
.bind(&key)
.fetch_one(&pool)
.await;
let owner = match owner {
Ok(user) => {
info!("User found in DB: {}", key);
user.username
}
Err(e) => {
error!("DB select error {}: {} Most likely because the Key is not valid", key, e);
return (
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"Your key is not valid",
)
.into_response();
}
};
// gets the content type from the headers // gets the content type from the headers
let content_type = headers let content_type = headers
.get("content-type") .get("content-type")
.and_then(|hv| hv.to_str().ok()) .and_then(|hv| hv.to_str().ok())
.unwrap_or("application/octet-stream") .unwrap_or("unknown")
.to_string(); .to_string();
// gets the download limit from the headers // gets the download limit from the headers
let download_limit = headers let download_limit = headers
@@ -80,6 +134,12 @@ pub async fn upload(Extension(pool): Extension<AnyPool>,
.and_then(|hv| hv.to_str().ok()) // Option<&str> .and_then(|hv| hv.to_str().ok()) // Option<&str>
.and_then(|s| s.parse::<i32>().ok()) // Option<u32> .and_then(|s| s.parse::<i32>().ok()) // Option<u32>
.unwrap_or(1); // u32 .unwrap_or(1); // u32
//get filename from the headers
let file_name = headers
.get("file_name")
.and_then(|hv| hv.to_str().ok())
.unwrap_or("unknown")
.to_string();
//generate a random UUID for the file ID //generate a random UUID for the file ID
let id = { let id = {
// Fallback to random UUID if body is too small // Fallback to random UUID if body is too small
@@ -87,9 +147,9 @@ pub async fn upload(Extension(pool): Extension<AnyPool>,
Uuid::from_u128(rng.random::<u128>()).to_string() Uuid::from_u128(rng.random::<u128>()).to_string()
}; };
//create the directory if it doesn't exist //create the directory if it doesn't exist
let dir = Path::new(&config.data_path); let dir = PathBuf::new(&config.data_path);
if let Err(e) = fs::create_dir_all(dir).await { if let Err(e) = fs::create_dir_all(dir).await {
warn!("could not make dir at {} error: {}", &config.data_path ,e); warn!("could not make dir at {} error: {}", &config.data_path, e);
return ( return (
axum::http::StatusCode::INTERNAL_SERVER_ERROR, axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"Directory creation error", "Directory creation error",
@@ -98,20 +158,8 @@ pub async fn upload(Extension(pool): Extension<AnyPool>,
} }
//create the file path //create the file path
// the file path is the directory + the file ID + file type if file type is not application/x-executable // the file path is the directory + the file ID + file type if file type is not application/x-executable
if content_type == "application/x-executable" {
info!("File type is application/x-executable");
} else {
info!("File type is {}", content_type); info!("File type is {}", content_type);
} let file_path = dir.join(&id);
let file_path = dir.join(
if content_type == "application/x-executable" {
format!("{}",id)
} else {
format!("{}.{}",
id,
content_type.split('/').last().unwrap_or("bin"))
},
);
if let Err(e) = fs::write(&file_path, &body).await { if let Err(e) = fs::write(&file_path, &body).await {
warn!("write error {}: {}", id, e); warn!("write error {}: {}", id, e);
@@ -127,11 +175,17 @@ pub async fn upload(Extension(pool): Extension<AnyPool>,
let download_count = 0; let download_count = 0;
let download_url = match config.use_tls {
true => format!("https://{}/download/{}", config.base_url, id),
false => format!("http://{}/download/{}", config.base_url, id),
};
if let Err(e) = sqlx::query( if let Err(e) = sqlx::query(
r#" r#"
INSERT INTO files INSERT INTO files
(id, content_type, upload_time, download_limit, download_count, file_size) (id, content_type, upload_time, download_limit, download_count, file_size, download_url, file_name, owner)
VALUES (?, ?, ?, ?, ?, ?) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
"#, "#,
) )
.bind(&id) .bind(&id)
@@ -140,6 +194,9 @@ pub async fn upload(Extension(pool): Extension<AnyPool>,
.bind(download_limit) .bind(download_limit)
.bind(download_count) .bind(download_count)
.bind(file_size as i64) .bind(file_size as i64)
.bind(&download_url)
.bind(&file_name)
.bind(&owner)
.execute(&pool) .execute(&pool)
.await .await
{ {
@@ -151,13 +208,273 @@ pub async fn upload(Extension(pool): Extension<AnyPool>,
.into_response(); .into_response();
} }
let uploaded_file = data::File { let uploaded_file = data::File {
id, id,
file_name,
content_type, content_type,
upload_time, upload_time,
download_limit, download_limit,
download_count, download_count,
file_size, file_size,
download_url,
owner,
}; };
Json(uploaded_file).into_response() Json(uploaded_file).into_response()
} }
/// This is The file Download handler
/// This function handles the file download process.
/// It retrieves the file metadata from the database
/// and returns the file as a response.
/// It also logs the IP address of the client making the request.
/// example request: curl -X GET http://localhost:3000/download/<uuid>
/// requires the following path parameter:
/// - uuid: the UUID of the file (not optional)
pub async fn download_file(
Path(uuid): Path<String>, // Add this extractor
Extension(pool): Extension<AnyPool>,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
Extension(config): Extension<data::Config>,
// Remove body: Bytes, // <-- GET handler shouldn't have a body
) -> Response {
// Get UUID directly from path
info!("Download request for UUID: {}", uuid);
// Log the IP address of the client and the call
let ip = addr.ip().to_string();
info!("Received download request for {} from IP: {}", uuid, ip);
// find file by uuid in the config.data_path
let file_path = PathBuf::new(&config.data_path).join(&uuid);
if !file_path.exists() {
error!("File not found: {}", file_path.display());
return (
axum::http::StatusCode::NOT_FOUND,
"File not found",
)
.into_response();
}
// Check if the file exists in the database
let file = sqlx::query_as::<_, data::File>(
r#"
SELECT *
FROM files
WHERE id = ?
"#,
)
.bind(&uuid)
.fetch_one(&pool)
.await;
let file = match file {
Ok(file) => {
info!("File found in DB: {}", uuid);
file
}
Err(e) => {
error!("DB select error {}: {}", uuid, e);
return (
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"Database select error",
)
.into_response();
}
};
//update download count
if let Err(e) = sqlx::query(
r#"
UPDATE files
SET download_count = download_count + 1
WHERE id = ?
"#,
)
.bind(&uuid)
.execute(&pool)
.await
{
error!("DB update error {}: {}", uuid, e);
return (
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"Database update error",
)
.into_response();
}
info!("Update Download Count Sucess for UUID: {}", uuid);
//rutn file to axum::body::Bytes
let file_bytes = match fs::read(&file_path).await {
Ok(file) => file,
Err(e) => {
error!("File read error {}: {}", uuid, e);
return (
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"File read error",
)
.into_response();
}
};
//if download count is greater or equal to download limit delete the file and remove it from the database
if (file.download_count) >= file.download_limit {
if let Err(e) = fs::remove_file(&file_path).await {
error!("File delete error {}: {}", uuid, e);
return (
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"File delete error",
)
.into_response();
}
if let Err(e) = sqlx::query(
r#"
DELETE FROM files
WHERE id = ?
"#,
)
.bind(&uuid)
.execute(&pool)
.await
{
error!("DB delete error {}: {}", uuid, e);
return (
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"Database delete error",
)
.into_response();
}
info!("File deleted from DB because max download limit was reached: {}", uuid);
}
// return the file as a response
return (
axum::http::StatusCode::OK,
axum::response::IntoResponse::into_response(
axum::response::Response::builder()
.header("Content-Disposition", format!("attachment; filename=\"{}\"", uuid))
.header("Content-Type", format!("{}", &file.content_type ))
.header("Content-Length", file.file_size)
.header("filename", file.file_name)
.body(axum::body::Body::from(file_bytes))
.unwrap(),
),
)
.into_response()
}
/// Handler to upload a file
/// This function registers a new user.
/// It receives the user data in the request headers,
/// saves it to the database,
/// and returns the user data as a JSON response.
/// It also logs the IP address of the client making the request.
/// example request: curl -X POST -H "username: <username>" -H "password: <password>" http://localhost:3000/register
/// requires the following headers:
/// - username: the username of the user (not optional)
/// - password: the password of the user (not optional)
pub async fn register_user (
Extension(pool): Extension<AnyPool>,
ConnectInfo(addr): ConnectInfo<SocketAddr>,
Extension(config): Extension<data::Config>,
headers: HeaderMap,
body: Bytes,
) -> Response {
//log the IP address of the client and the call
let ip = addr.ip().to_string();
info!("Received update from IP: {}", ip);
//check if registration is allowed
if !config.allow_register {
return (
axum::http::StatusCode::FORBIDDEN,
"Registration is not allowed",
)
.into_response();
}
// gets the content type from the headers return error if header is not suplyde
let username = match headers .get("username") {
Some(hv) => hv.to_str().unwrap_or("unknown").to_string(),
None => {
return (
axum::http::StatusCode::BAD_REQUEST,
"Username header not supplied",
)
.into_response();
}
};
let password = match headers .get("password") {
Some(hv) => hv.to_str().unwrap_or("unknown").to_string(),
None => {
return (
axum::http::StatusCode::BAD_REQUEST,
"Password header not supplied",
)
.into_response();
}
};
//generate a random UUID for the user key
let key = {
// Fallback to random UUID if body is too small
let mut rng = rand::rng();
Uuid::from_u128(rng.random::<u128>()).to_string()
};
// check if the user already exists
let user = sqlx::query_as::<_, data::user>(
r#"
SELECT *
FROM users
WHERE username = ?
"#,
)
.bind(&username)
.fetch_one(&pool)
.await;
match user {
Ok(_) => {
info!("User already exists: {}", username);
return (
axum::http::StatusCode::BAD_REQUEST,
"User already exists",
)
.into_response();
}
Err(e) => {
warn!("DB select error {}: {}", username, e);
}
}
//add the user to the database
if let Err(e) = sqlx::query(
r#"
INSERT INTO users
(key, username, password)
VALUES (?, ?, ?)
"#,
)
.bind(&key)
.bind(&username)
.bind(&password)
.execute(&pool)
.await
{
error!("DB insert error {}: {}", key, e);
return (
axum::http::StatusCode::INTERNAL_SERVER_ERROR,
"Database insert error",
)
.into_response();
}
info!("User registered: {}", username);
//return the user as a response
let registered_user = json!({
"key": key,
"username": username,
});
Json(registered_user)
.into_response()
}

View File

@@ -13,11 +13,14 @@ use sqlx::FromRow;
#[derive(FromRow, Serialize)] #[derive(FromRow, Serialize)]
pub struct File { pub struct File {
pub id: String, pub id: String,
pub file_name: String,
pub content_type: String, pub content_type: String,
pub upload_time: i64, pub upload_time: i64,
pub download_limit: i32, pub download_limit: i32,
pub download_count: i32, pub download_count: i32,
pub file_size: i64, pub file_size: i64,
pub download_url: String,
pub owner: String,
} }
/// This struct is used to represent the configuration settings for the application. /// This struct is used to represent the configuration settings for the application.
@@ -34,4 +37,14 @@ pub struct Config {
pub listener_addr: String, pub listener_addr: String,
pub log_level: String, pub log_level: String,
pub log_location: String, pub log_location: String,
pub use_tls: bool,
pub base_url: String,
pub allow_register: bool,
}
#[derive(FromRow, Serialize)]
pub struct user {
pub key: String,
pub username: String,
pub password: String,
} }

View File

@@ -1,11 +1,11 @@
use axum::{ use axum::{
extract::DefaultBodyLimit, extract::DefaultBodyLimit,
//response::IntoResponse,
routing::{get, post}, routing::{get, post},
response::IntoResponse,
Extension, Router, Extension, Router,
}; };
use log::{debug, error, info, warn};
use sqlx::{any::AnyPoolOptions, migrate::MigrateDatabase, AnyPool, Sqlite}; use sqlx::{any::AnyPoolOptions, migrate::MigrateDatabase, AnyPool, Sqlite};
use log::{info, warn, error, debug};
use std::path::Path; use std::path::Path;
use tokio::fs; use tokio::fs;
@@ -31,19 +31,50 @@ async fn main() {
sqlx::any::install_default_drivers(); sqlx::any::install_default_drivers();
// Load the configuration from environment variables // Load the configuration from environment variables
let config = data::Config { let config = data::Config {
db_type: std::env::var("BITBEAM_DB_TYPE").unwrap_or_else(|_| "postgres".to_string()), db_type: std::env::var("BITBEAM_DB_TYPE").unwrap_or_else(|_| "sqlite".to_string()),
database_url: match std::env::var("BITBEAM_DB_TYPE").unwrap().as_str() { // Determine the correct database URL
"postgres" => std::env::var("BITBEAM_DATABASE_URL") database_url: match std::env::var("BITBEAM_DB_TYPE")
.expect("BITBEAM_DATABASE_URL must be set for Postgres"), .unwrap_or_else(|_| "sqlite".to_string())
"sqlite" => std::env::var("BITBEAM_DATABASE_URL") .as_str()
.expect("BITBEAM_DATABASE_URL must be set for SQLite"), {
other => panic!("Unsupported BITBEAM_DB_TYPE: {}", other), "postgres" => {
// For Postgres, BITBEAM_DATABASE_URL must be set
std::env::var("BITBEAM_DATABASE_URL")
.expect("BITBEAM_DATABASE_URL must be set for Postgres")
}
"sqlite" => {
// For SQLite, use BITBEAM_DATABASE_URL if set, otherwise default
std::env::var("BITBEAM_DATABASE_URL")
.unwrap_or_else(|_| "sqlite://./bitbeam.sqlite".to_string())
}
other => {
panic!("Unsupported BITBEAM_DB_TYPE: {}", other);
}
}, },
data_path: std::env::var("BITBEAM_DATA_PATH").unwrap_or_else(|_| "./media_store".to_string()), data_path: std::env::var("BITBEAM_DATA_PATH")
.unwrap_or_else(|_| "./media_store".to_string()),
port: std::env::var("BITBEAM_PORT").unwrap_or_else(|_| "3000".to_string()), port: std::env::var("BITBEAM_PORT").unwrap_or_else(|_| "3000".to_string()),
listener_addr: std::env::var("BITBEAM_ADDR").unwrap_or_else(|_| "127.0.0.1".to_string()), listener_addr: std::env::var("BITBEAM_ADDR").unwrap_or_else(|_| "127.0.0.1".to_string()),
log_level: std::env::var("BITBEAM_LOG_LEVEL").unwrap_or_else(|_| "info".to_string()), log_level: std::env::var("BITBEAM_LOG_LEVEL").unwrap_or_else(|_| "info".to_string()),
log_location: std::env::var("BITBEAM_LOG_LOCATION").unwrap_or_else(|_| "./bitbeam.log".to_string()), log_location: std::env::var("BITBEAM_LOG_LOCATION")
.unwrap_or_else(|_| "./bitbeam.log".to_string()),
use_tls: std::env::var("BITBEAM_USE_TLS")
.unwrap_or_else(|_| "false".to_string())
.parse()
.unwrap_or(false),
base_url: std::env::var("BITBEAM_BASE_URL").unwrap_or_else(|_| {
format!(
"localhost:{}",
std::env::var("BITBEAM_PORT").unwrap_or_else(|_| "3000".to_string())
)
.to_string()
}),
allow_register: std::env::var("BITBEAM_ALLOW_REGISTER")
.unwrap_or_else(|_| "true".to_string())
.parse()
.unwrap_or(true),
}; };
// Setting up the logging system // Setting up the logging system
// The log level is set based on the environment variable BITBEAM_LOG_LEVEL // The log level is set based on the environment variable BITBEAM_LOG_LEVEL
@@ -73,7 +104,7 @@ async fn main() {
Err(error) => { Err(error) => {
error!("Error creating database: {}", error); error!("Error creating database: {}", error);
panic!("error: {}", error) panic!("error: {}", error)
}, }
} }
} else { } else {
info!("Database already exists"); info!("Database already exists");
@@ -94,11 +125,29 @@ async fn main() {
r#" r#"
CREATE TABLE IF NOT EXISTS files ( CREATE TABLE IF NOT EXISTS files (
id TEXT PRIMARY KEY, id TEXT PRIMARY KEY,
file_name TEXT NOT NULL,
content_type TEXT NOT NULL, content_type TEXT NOT NULL,
upload_time BIGINT NOT NULL, upload_time BIGINT NOT NULL,
download_limit INTEGER NOT NULL, download_limit INTEGER NOT NULL,
download_count INTEGER NOT NULL, download_count INTEGER NOT NULL,
file_size BIGINT NOT NULL file_size BIGINT NOT NULL,
download_url TEXT NOT NULL,
owner TEXT NOT NULL
);
"#,
)
.execute(&pool)
.await
{
info!("DB created");
};
// create the user table
if let Err(_e) = sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS users (
key TEXT PRIMARY KEY,
username TEXT NOT NULL,
password TEXT NOT NULL
); );
"#, "#,
) )
@@ -107,11 +156,10 @@ async fn main() {
{ {
info!("DB created"); info!("DB created");
}; };
//create the directory if it doesn't exist //create the directory if it doesn't exist
let dir = Path::new(&config.data_path); let dir = Path::new(&config.data_path);
if let Err(e) = fs::create_dir_all(dir).await { if let Err(e) = fs::create_dir_all(dir).await {
warn!("could not make dir at {} error: {}", &config.data_path ,e); warn!("could not make dir at {} error: {}", &config.data_path, e);
} }
//let file_path = dir.join(&id); //let file_path = dir.join(&id);
@@ -122,6 +170,8 @@ async fn main() {
.route("/", get(|| async { "Hello, World!" })) .route("/", get(|| async { "Hello, World!" }))
.route("/upload", post(api::upload)) .route("/upload", post(api::upload))
.route("/all_files", get(api::all_files)) .route("/all_files", get(api::all_files))
.route("/download/{uuid}", get(api::download_file))
.route("/user/register", post(api::register_user))
.layer(DefaultBodyLimit::max(100 * 1024 * 1024)) .layer(DefaultBodyLimit::max(100 * 1024 * 1024))
.layer(Extension(pool)) .layer(Extension(pool))
.layer(Extension(config.clone())) .layer(Extension(config.clone()))
@@ -130,10 +180,15 @@ async fn main() {
// The web server is started using the Axum framework // The web server is started using the Axum framework
// The server listens on the address and port specified in the configuration // The server listens on the address and port specified in the configuration
axum::serve( axum::serve(
match tokio::net::TcpListener::bind(format!("{}:{}",&config.listener_addr,&config.port)).await { match tokio::net::TcpListener::bind(format!("{}:{}", &config.listener_addr, &config.port))
.await
{
Ok(listener) => listener, Ok(listener) => listener,
Err(e) => { Err(e) => {
error!("Error binding to address {}:{} : {}",&config.listener_addr,&config.port, e); error!(
"Error binding to address {}:{} : {}",
&config.listener_addr, &config.port, e
);
return; return;
} }
}, },
@@ -149,7 +204,10 @@ async fn main() {
/// It formats the log messages to include the date, time, log level, target, and message. /// It formats the log messages to include the date, time, log level, target, and message.
/// It also sets the log level based on the provided level filter. /// It also sets the log level based on the provided level filter.
/// It takes the log file path and log level as parameters. /// It takes the log file path and log level as parameters.
fn init_logging(log_file_path: &str, level: log::LevelFilter) -> Result<(), Box<dyn std::error::Error>> { fn init_logging(
log_file_path: &str,
level: log::LevelFilter,
) -> Result<(), Box<dyn std::error::Error>> {
// Build a Dispatch for stdout // Build a Dispatch for stdout
let stdout_dispatch = fern::Dispatch::new() let stdout_dispatch = fern::Dispatch::new()
.format(|out, message, record| { .format(|out, message, record| {
@@ -188,6 +246,3 @@ fn init_logging(log_file_path: &str, level: log::LevelFilter) -> Result<(), Box<
Ok(()) Ok(())
} }