diff --git a/.direnv/bin/nix-direnv-reload b/.direnv/bin/nix-direnv-reload index f34801b..ac0ccf8 100755 --- a/.direnv/bin/nix-direnv-reload +++ b/.direnv/bin/nix-direnv-reload @@ -1,19 +1,19 @@ #!/usr/bin/env bash set -e -if [[ ! -d "/home/mrfluffy/bitBeam" ]]; then +if [[ ! -d "/home/work/Documents/rust/bitBeam" ]]; then echo "Cannot find source directory; Did you move it?" - echo "(Looking for "/home/mrfluffy/bitBeam")" + echo "(Looking for "/home/work/Documents/rust/bitBeam")" echo 'Cannot force reload with this script - use "direnv reload" manually and then try again' exit 1 fi # rebuild the cache forcefully -_nix_direnv_force_reload=1 direnv exec "/home/mrfluffy/bitBeam" true +_nix_direnv_force_reload=1 direnv exec "/home/work/Documents/rust/bitBeam" true # Update the mtime for .envrc. # This will cause direnv to reload again - but without re-building. -touch "/home/mrfluffy/bitBeam/.envrc" +touch "/home/work/Documents/rust/bitBeam/.envrc" # Also update the timestamp of whatever profile_rc we have. # This makes sure that we know we are up to date. -touch -r "/home/mrfluffy/bitBeam/.envrc" "/home/mrfluffy/bitBeam/.direnv"/*.rc +touch -r "/home/work/Documents/rust/bitBeam/.envrc" "/home/work/Documents/rust/bitBeam/.direnv"/*.rc diff --git a/.direnv/flake-profile b/.direnv/flake-profile index 0c05709..e289079 120000 --- a/.direnv/flake-profile +++ b/.direnv/flake-profile @@ -1 +1 @@ -flake-profile-1-link \ No newline at end of file +flake-profile-4-link \ No newline at end of file diff --git a/.direnv/flake-profile-1-link b/.direnv/flake-profile-1-link deleted file mode 120000 index d2b9fa4..0000000 --- a/.direnv/flake-profile-1-link +++ /dev/null @@ -1 +0,0 @@ -/nix/store/sc88ikf5zh532nisyr5v9h6f6q6fay54-nix-shell-env \ No newline at end of file diff --git a/.direnv/flake-profile-4-link b/.direnv/flake-profile-4-link new file mode 120000 index 0000000..7572a67 --- /dev/null +++ b/.direnv/flake-profile-4-link @@ -0,0 +1 @@ +/nix/store/xvayrqcb8vdiyns5k3j05ia9aflzjrxi-nix-shell-env \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 2ae1a95..bdf6a08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -175,6 +175,7 @@ dependencies = [ "log", "rand 0.9.1", "serde", + "serde_json", "sqlx", "tokio", "uuid", diff --git a/Cargo.toml b/Cargo.toml index 326a2c4..8965579 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,9 +9,10 @@ bytes = "1.10" chrono = {version = "0.4", features = ["serde"]} extract = "0.1" fern = "0.7.1" -log = {version = "0.4.27", feature = "std"} +log = {version = "0.4", feature = "std"} rand = "0.9" serde = {version = "1.0", features = ["derive"]} +serde_json = "1.0.140" sqlx = { version = "0.8", features = [ "runtime-tokio", # pick exactly one runtime "tls-rustls", # pick exactly one TLS backend diff --git a/flake.nix b/flake.nix index a874bf1..4d4d30a 100644 --- a/flake.nix +++ b/flake.nix @@ -79,6 +79,7 @@ RUST_SRC_PATH = "${pkgs.rust-bin.stable.latest.default}/lib/rustlib/src/rust/library"; BITBEAM_DATABASE_URL = "sqlite://./bitbeam.sqlite"; BITBEAM_DB_TYPE = "sqlite"; + DATABASE_URL = "sqlite://./bitbeam.sqlite"; }; } ); diff --git a/src/api.rs b/src/api.rs index 245a953..982ff49 100644 --- a/src/api.rs +++ b/src/api.rs @@ -1,27 +1,35 @@ use axum::{ body::Bytes, - extract::ConnectInfo, + extract::{ConnectInfo, Path}, http::{HeaderMap, StatusCode}, response::{IntoResponse, Response}, Extension, Json, }; + use chrono::Utc; +use log::{error, info, warn}; use rand::Rng; use sqlx::AnyPool; -use std::path::Path; +use std::path::Path as PathBuf; use tokio::fs; use uuid::Uuid; -use log::{info, warn, error}; -use std::net::SocketAddr; use crate::data; - +use std::net::SocketAddr; +use serde_json::json; /// Handler to return all files as JSON /// This function retrieves all files from the database /// and returns them as a JSON response. /// It also logs the IP address of the client making the request. -pub async fn all_files(Extension(pool): Extension, ConnectInfo(addr): ConnectInfo) -> impl IntoResponse { +/// example request: curl -X GET http://localhost:3000/all_files +/// requires no parameters +/// returns a JSON array of files +/// TODO: add user authentication +pub async fn all_files( + Extension(pool): Extension, + ConnectInfo(addr): ConnectInfo, +) -> impl IntoResponse { //log the IP address of the client and the call let ip = addr.ip().to_string(); info!("Received an all_files request from IP: {}", ip); @@ -40,7 +48,7 @@ pub async fn all_files(Extension(pool): Extension, ConnectInfo(addr): C Ok(files) => { info!("DB select all success"); (StatusCode::OK, Json(files)).into_response() - }, + } Err(e) => { warn!("DB select all error: {}", e); ( @@ -58,21 +66,67 @@ pub async fn all_files(Extension(pool): Extension, ConnectInfo(addr): C /// saves it to the server's file system, /// and stores the file metadata in the database. /// It also logs the IP address of the client making the request. -pub async fn upload(Extension(pool): Extension, - ConnectInfo(addr): ConnectInfo, - Extension(config): Extension , - headers: HeaderMap, - body: Bytes, - ) -> Response { +/// example request: curl -X POST -H "key: " -H "file_name: " -H "content-type: " -H "download_limit: " --data-binary @ http://localhost:3000/upload +/// requires the following headers: +/// - key: the key of the user (not optional) +/// - file_name: the name of the file (optional) +/// - content-type: the content type of the file (optional) +/// - download_limit: the download limit of the file (optional) +pub async fn upload( + Extension(pool): Extension, + ConnectInfo(addr): ConnectInfo, + Extension(config): Extension, + headers: HeaderMap, + body: Bytes, +) -> Response { //log the IP address of the client and the call let ip = addr.ip().to_string(); info!("Received update from IP: {}", ip); + + //get the key from the headers + let key = match headers.get("key") { + Some(hv) => hv.to_str().unwrap_or("unknown").to_string(), + None => { + return ( + axum::http::StatusCode::BAD_REQUEST, + "Key header not supplied", + ) + .into_response(); + } + }; + + //check if the user exists + let owner = sqlx::query_as::<_, data::user>( + r#" + SELECT * + FROM users + WHERE key = ? + "#, + ) + .bind(&key) + .fetch_one(&pool) + .await; + let owner = match owner { + Ok(user) => { + info!("User found in DB: {}", key); + user.username + } + Err(e) => { + error!("DB select error {}: {} Most likely because the Key is not valid", key, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Your key is not valid", + ) + .into_response(); + } + }; + // gets the content type from the headers let content_type = headers .get("content-type") .and_then(|hv| hv.to_str().ok()) - .unwrap_or("application/octet-stream") + .unwrap_or("unknown") .to_string(); // gets the download limit from the headers let download_limit = headers @@ -80,6 +134,12 @@ pub async fn upload(Extension(pool): Extension, .and_then(|hv| hv.to_str().ok()) // Option<&str> .and_then(|s| s.parse::().ok()) // Option .unwrap_or(1); // u32 + //get filename from the headers + let file_name = headers + .get("file_name") + .and_then(|hv| hv.to_str().ok()) + .unwrap_or("unknown") + .to_string(); //generate a random UUID for the file ID let id = { // Fallback to random UUID if body is too small @@ -87,9 +147,9 @@ pub async fn upload(Extension(pool): Extension, Uuid::from_u128(rng.random::()).to_string() }; //create the directory if it doesn't exist - let dir = Path::new(&config.data_path); + let dir = PathBuf::new(&config.data_path); if let Err(e) = fs::create_dir_all(dir).await { - warn!("could not make dir at {} error: {}", &config.data_path ,e); + warn!("could not make dir at {} error: {}", &config.data_path, e); return ( axum::http::StatusCode::INTERNAL_SERVER_ERROR, "Directory creation error", @@ -98,20 +158,8 @@ pub async fn upload(Extension(pool): Extension, } //create the file path // the file path is the directory + the file ID + file type if file type is not application/x-executable - if content_type == "application/x-executable" { - info!("File type is application/x-executable"); - } else { - info!("File type is {}", content_type); - } - let file_path = dir.join( - if content_type == "application/x-executable" { - format!("{}",id) - } else { - format!("{}.{}", - id, - content_type.split('/').last().unwrap_or("bin")) - }, - ); + info!("File type is {}", content_type); + let file_path = dir.join(&id); if let Err(e) = fs::write(&file_path, &body).await { warn!("write error {}: {}", id, e); @@ -127,11 +175,17 @@ pub async fn upload(Extension(pool): Extension, let download_count = 0; + let download_url = match config.use_tls { + true => format!("https://{}/download/{}", config.base_url, id), + false => format!("http://{}/download/{}", config.base_url, id), + }; + + if let Err(e) = sqlx::query( r#" INSERT INTO files - (id, content_type, upload_time, download_limit, download_count, file_size) - VALUES (?, ?, ?, ?, ?, ?) + (id, content_type, upload_time, download_limit, download_count, file_size, download_url, file_name, owner) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) "#, ) .bind(&id) @@ -140,6 +194,9 @@ pub async fn upload(Extension(pool): Extension, .bind(download_limit) .bind(download_count) .bind(file_size as i64) + .bind(&download_url) + .bind(&file_name) + .bind(&owner) .execute(&pool) .await { @@ -151,13 +208,273 @@ pub async fn upload(Extension(pool): Extension, .into_response(); } + let uploaded_file = data::File { id, + file_name, content_type, upload_time, download_limit, download_count, file_size, + download_url, + owner, }; Json(uploaded_file).into_response() } + +/// This is The file Download handler +/// This function handles the file download process. +/// It retrieves the file metadata from the database +/// and returns the file as a response. +/// It also logs the IP address of the client making the request. +/// example request: curl -X GET http://localhost:3000/download/ +/// requires the following path parameter: +/// - uuid: the UUID of the file (not optional) +pub async fn download_file( + Path(uuid): Path, // Add this extractor + Extension(pool): Extension, + ConnectInfo(addr): ConnectInfo, + Extension(config): Extension, + // Remove body: Bytes, // <-- GET handler shouldn't have a body +) -> Response { + + // Get UUID directly from path + info!("Download request for UUID: {}", uuid); + // Log the IP address of the client and the call + let ip = addr.ip().to_string(); + info!("Received download request for {} from IP: {}", uuid, ip); + + // find file by uuid in the config.data_path + let file_path = PathBuf::new(&config.data_path).join(&uuid); + + if !file_path.exists() { + error!("File not found: {}", file_path.display()); + return ( + axum::http::StatusCode::NOT_FOUND, + "File not found", + ) + .into_response(); + } + // Check if the file exists in the database + let file = sqlx::query_as::<_, data::File>( + r#" + SELECT * + FROM files + WHERE id = ? + "#, + ) + .bind(&uuid) + .fetch_one(&pool) + .await; + let file = match file { + Ok(file) => { + info!("File found in DB: {}", uuid); + file + } + Err(e) => { + error!("DB select error {}: {}", uuid, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Database select error", + ) + .into_response(); + } + }; + + //update download count + if let Err(e) = sqlx::query( + r#" + UPDATE files + SET download_count = download_count + 1 + WHERE id = ? + "#, + ) + .bind(&uuid) + .execute(&pool) + + .await + { + error!("DB update error {}: {}", uuid, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Database update error", + ) + .into_response(); + } + info!("Update Download Count Sucess for UUID: {}", uuid); + + //rutn file to axum::body::Bytes + let file_bytes = match fs::read(&file_path).await { + Ok(file) => file, + Err(e) => { + error!("File read error {}: {}", uuid, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "File read error", + ) + .into_response(); + } + }; + + //if download count is greater or equal to download limit delete the file and remove it from the database + if (file.download_count) >= file.download_limit { + if let Err(e) = fs::remove_file(&file_path).await { + error!("File delete error {}: {}", uuid, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "File delete error", + ) + .into_response(); + } + if let Err(e) = sqlx::query( + r#" + DELETE FROM files + WHERE id = ? + "#, + ) + .bind(&uuid) + .execute(&pool) + .await + { + error!("DB delete error {}: {}", uuid, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Database delete error", + ) + .into_response(); + } + info!("File deleted from DB because max download limit was reached: {}", uuid); + } + + // return the file as a response + return ( + axum::http::StatusCode::OK, + axum::response::IntoResponse::into_response( + axum::response::Response::builder() + .header("Content-Disposition", format!("attachment; filename=\"{}\"", uuid)) + .header("Content-Type", format!("{}", &file.content_type )) + .header("Content-Length", file.file_size) + .header("filename", file.file_name) + .body(axum::body::Body::from(file_bytes)) + .unwrap(), + ), + ) + .into_response() +} + +/// Handler to upload a file +/// This function registers a new user. +/// It receives the user data in the request headers, +/// saves it to the database, +/// and returns the user data as a JSON response. +/// It also logs the IP address of the client making the request. +/// example request: curl -X POST -H "username: " -H "password: " http://localhost:3000/register +/// requires the following headers: +/// - username: the username of the user (not optional) +/// - password: the password of the user (not optional) +pub async fn register_user ( + Extension(pool): Extension, + ConnectInfo(addr): ConnectInfo, + Extension(config): Extension, + headers: HeaderMap, + body: Bytes, +) -> Response { + //log the IP address of the client and the call + let ip = addr.ip().to_string(); + info!("Received update from IP: {}", ip); + + //check if registration is allowed + if !config.allow_register { + return ( + axum::http::StatusCode::FORBIDDEN, + "Registration is not allowed", + ) + .into_response(); + } + + // gets the content type from the headers return error if header is not suplyde + let username = match headers .get("username") { + Some(hv) => hv.to_str().unwrap_or("unknown").to_string(), + None => { + return ( + axum::http::StatusCode::BAD_REQUEST, + "Username header not supplied", + ) + .into_response(); + } + }; + let password = match headers .get("password") { + Some(hv) => hv.to_str().unwrap_or("unknown").to_string(), + None => { + return ( + axum::http::StatusCode::BAD_REQUEST, + "Password header not supplied", + ) + .into_response(); + } + }; + + //generate a random UUID for the user key + let key = { + // Fallback to random UUID if body is too small + let mut rng = rand::rng(); + Uuid::from_u128(rng.random::()).to_string() + }; + + // check if the user already exists + let user = sqlx::query_as::<_, data::user>( + r#" + SELECT * + FROM users + WHERE username = ? + "#, + ) + .bind(&username) + .fetch_one(&pool) + .await; + match user { + Ok(_) => { + info!("User already exists: {}", username); + return ( + axum::http::StatusCode::BAD_REQUEST, + "User already exists", + ) + .into_response(); + } + Err(e) => { + warn!("DB select error {}: {}", username, e); + } + } + + //add the user to the database + if let Err(e) = sqlx::query( + r#" + INSERT INTO users + (key, username, password) + VALUES (?, ?, ?) + "#, + ) + .bind(&key) + .bind(&username) + .bind(&password) + .execute(&pool) + .await + { + error!("DB insert error {}: {}", key, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Database insert error", + ) + .into_response(); + } + info!("User registered: {}", username); + + //return the user as a response + let registered_user = json!({ + "key": key, + "username": username, + }); + Json(registered_user) + .into_response() +} diff --git a/src/data.rs b/src/data.rs index 37929b4..c6ee3e2 100644 --- a/src/data.rs +++ b/src/data.rs @@ -13,11 +13,14 @@ use sqlx::FromRow; #[derive(FromRow, Serialize)] pub struct File { pub id: String, + pub file_name: String, pub content_type: String, pub upload_time: i64, pub download_limit: i32, pub download_count: i32, pub file_size: i64, + pub download_url: String, + pub owner: String, } /// This struct is used to represent the configuration settings for the application. @@ -34,4 +37,14 @@ pub struct Config { pub listener_addr: String, pub log_level: String, pub log_location: String, + pub use_tls: bool, + pub base_url: String, + pub allow_register: bool, +} + +#[derive(FromRow, Serialize)] +pub struct user { + pub key: String, + pub username: String, + pub password: String, } diff --git a/src/main.rs b/src/main.rs index aecb7b9..10da837 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,11 +1,11 @@ use axum::{ extract::DefaultBodyLimit, + //response::IntoResponse, routing::{get, post}, - response::IntoResponse, Extension, Router, }; +use log::{debug, error, info, warn}; use sqlx::{any::AnyPoolOptions, migrate::MigrateDatabase, AnyPool, Sqlite}; -use log::{info, warn, error, debug}; use std::path::Path; use tokio::fs; @@ -31,28 +31,59 @@ async fn main() { sqlx::any::install_default_drivers(); // Load the configuration from environment variables let config = data::Config { - db_type: std::env::var("BITBEAM_DB_TYPE").unwrap_or_else(|_| "postgres".to_string()), - database_url: match std::env::var("BITBEAM_DB_TYPE").unwrap().as_str() { - "postgres" => std::env::var("BITBEAM_DATABASE_URL") - .expect("BITBEAM_DATABASE_URL must be set for Postgres"), - "sqlite" => std::env::var("BITBEAM_DATABASE_URL") - .expect("BITBEAM_DATABASE_URL must be set for SQLite"), - other => panic!("Unsupported BITBEAM_DB_TYPE: {}", other), + db_type: std::env::var("BITBEAM_DB_TYPE").unwrap_or_else(|_| "sqlite".to_string()), + // Determine the correct database URL + database_url: match std::env::var("BITBEAM_DB_TYPE") + .unwrap_or_else(|_| "sqlite".to_string()) + .as_str() + { + "postgres" => { + // For Postgres, BITBEAM_DATABASE_URL must be set + std::env::var("BITBEAM_DATABASE_URL") + .expect("BITBEAM_DATABASE_URL must be set for Postgres") + } + + "sqlite" => { + // For SQLite, use BITBEAM_DATABASE_URL if set, otherwise default + std::env::var("BITBEAM_DATABASE_URL") + .unwrap_or_else(|_| "sqlite://./bitbeam.sqlite".to_string()) + } + + other => { + panic!("Unsupported BITBEAM_DB_TYPE: {}", other); + } }, - data_path: std::env::var("BITBEAM_DATA_PATH").unwrap_or_else(|_| "./media_store".to_string()), + data_path: std::env::var("BITBEAM_DATA_PATH") + .unwrap_or_else(|_| "./media_store".to_string()), port: std::env::var("BITBEAM_PORT").unwrap_or_else(|_| "3000".to_string()), listener_addr: std::env::var("BITBEAM_ADDR").unwrap_or_else(|_| "127.0.0.1".to_string()), log_level: std::env::var("BITBEAM_LOG_LEVEL").unwrap_or_else(|_| "info".to_string()), - log_location: std::env::var("BITBEAM_LOG_LOCATION").unwrap_or_else(|_| "./bitbeam.log".to_string()), + log_location: std::env::var("BITBEAM_LOG_LOCATION") + .unwrap_or_else(|_| "./bitbeam.log".to_string()), + use_tls: std::env::var("BITBEAM_USE_TLS") + .unwrap_or_else(|_| "false".to_string()) + .parse() + .unwrap_or(false), + base_url: std::env::var("BITBEAM_BASE_URL").unwrap_or_else(|_| { + format!( + "localhost:{}", + std::env::var("BITBEAM_PORT").unwrap_or_else(|_| "3000".to_string()) + ) + .to_string() + }), + allow_register: std::env::var("BITBEAM_ALLOW_REGISTER") + .unwrap_or_else(|_| "true".to_string()) + .parse() + .unwrap_or(true), }; // Setting up the logging system // The log level is set based on the environment variable BITBEAM_LOG_LEVEL let level = match config.log_level.as_str() { "debug" => log::LevelFilter::Debug, - "info" => log::LevelFilter::Info, - "warn" => log::LevelFilter::Warn, + "info" => log::LevelFilter::Info, + "warn" => log::LevelFilter::Warn, "error" => log::LevelFilter::Error, - _ => log::LevelFilter::Info, + _ => log::LevelFilter::Info, }; // Initialize the logging system let log_path = &config.log_location; @@ -73,7 +104,7 @@ async fn main() { Err(error) => { error!("Error creating database: {}", error); panic!("error: {}", error) - }, + } } } else { info!("Database already exists"); @@ -94,11 +125,14 @@ async fn main() { r#" CREATE TABLE IF NOT EXISTS files ( id TEXT PRIMARY KEY, + file_name TEXT NOT NULL, content_type TEXT NOT NULL, upload_time BIGINT NOT NULL, download_limit INTEGER NOT NULL, download_count INTEGER NOT NULL, - file_size BIGINT NOT NULL + file_size BIGINT NOT NULL, + download_url TEXT NOT NULL, + owner TEXT NOT NULL ); "#, ) @@ -107,11 +141,25 @@ async fn main() { { info!("DB created"); }; - + // create the user table + if let Err(_e) = sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS users ( + key TEXT PRIMARY KEY, + username TEXT NOT NULL, + password TEXT NOT NULL + ); + "#, + ) + .execute(&pool) + .await + { + info!("DB created"); + }; //create the directory if it doesn't exist let dir = Path::new(&config.data_path); if let Err(e) = fs::create_dir_all(dir).await { - warn!("could not make dir at {} error: {}", &config.data_path ,e); + warn!("could not make dir at {} error: {}", &config.data_path, e); } //let file_path = dir.join(&id); @@ -122,6 +170,8 @@ async fn main() { .route("/", get(|| async { "Hello, World!" })) .route("/upload", post(api::upload)) .route("/all_files", get(api::all_files)) + .route("/download/{uuid}", get(api::download_file)) + .route("/user/register", post(api::register_user)) .layer(DefaultBodyLimit::max(100 * 1024 * 1024)) .layer(Extension(pool)) .layer(Extension(config.clone())) @@ -130,10 +180,15 @@ async fn main() { // The web server is started using the Axum framework // The server listens on the address and port specified in the configuration axum::serve( - match tokio::net::TcpListener::bind(format!("{}:{}",&config.listener_addr,&config.port)).await { + match tokio::net::TcpListener::bind(format!("{}:{}", &config.listener_addr, &config.port)) + .await + { Ok(listener) => listener, Err(e) => { - error!("Error binding to address {}:{} : {}",&config.listener_addr,&config.port, e); + error!( + "Error binding to address {}:{} : {}", + &config.listener_addr, &config.port, e + ); return; } }, @@ -149,16 +204,19 @@ async fn main() { /// It formats the log messages to include the date, time, log level, target, and message. /// It also sets the log level based on the provided level filter. /// It takes the log file path and log level as parameters. -fn init_logging(log_file_path: &str, level: log::LevelFilter) -> Result<(), Box> { +fn init_logging( + log_file_path: &str, + level: log::LevelFilter, +) -> Result<(), Box> { // Build a Dispatch for stdout let stdout_dispatch = fern::Dispatch::new() .format(|out, message, record| { out.finish(format_args!( "[{date}][{lvl}][{target}] {msg}", date = chrono::Local::now().format("%Y-%m-%d %H:%M:%S"), - lvl = record.level(), + lvl = record.level(), target = record.target(), - msg = message, + msg = message, )) }) .level(level) @@ -170,9 +228,9 @@ fn init_logging(log_file_path: &str, level: log::LevelFilter) -> Result<(), Box< out.finish(format_args!( "[{date}][{lvl}][{target}] {msg}", date = chrono::Local::now().format("%Y-%m-%d %H:%M:%S"), - lvl = record.level(), + lvl = record.level(), target = record.target(), - msg = message, + msg = message, )) }) .level(level) @@ -188,6 +246,3 @@ fn init_logging(log_file_path: &str, level: log::LevelFilter) -> Result<(), Box< Ok(()) } - - -