diff --git a/.direnv/bin/nix-direnv-reload b/.direnv/bin/nix-direnv-reload index ac0ccf8..f34801b 100755 --- a/.direnv/bin/nix-direnv-reload +++ b/.direnv/bin/nix-direnv-reload @@ -1,19 +1,19 @@ #!/usr/bin/env bash set -e -if [[ ! -d "/home/work/Documents/rust/bitBeam" ]]; then +if [[ ! -d "/home/mrfluffy/bitBeam" ]]; then echo "Cannot find source directory; Did you move it?" - echo "(Looking for "/home/work/Documents/rust/bitBeam")" + echo "(Looking for "/home/mrfluffy/bitBeam")" echo 'Cannot force reload with this script - use "direnv reload" manually and then try again' exit 1 fi # rebuild the cache forcefully -_nix_direnv_force_reload=1 direnv exec "/home/work/Documents/rust/bitBeam" true +_nix_direnv_force_reload=1 direnv exec "/home/mrfluffy/bitBeam" true # Update the mtime for .envrc. # This will cause direnv to reload again - but without re-building. -touch "/home/work/Documents/rust/bitBeam/.envrc" +touch "/home/mrfluffy/bitBeam/.envrc" # Also update the timestamp of whatever profile_rc we have. # This makes sure that we know we are up to date. -touch -r "/home/work/Documents/rust/bitBeam/.envrc" "/home/work/Documents/rust/bitBeam/.direnv"/*.rc +touch -r "/home/mrfluffy/bitBeam/.envrc" "/home/mrfluffy/bitBeam/.direnv"/*.rc diff --git a/.direnv/flake-inputs/004vk3n8jlx9vw4nzy1f0d2dhsy7wz52-source b/.direnv/flake-inputs/004vk3n8jlx9vw4nzy1f0d2dhsy7wz52-source new file mode 120000 index 0000000..fd2b06d --- /dev/null +++ b/.direnv/flake-inputs/004vk3n8jlx9vw4nzy1f0d2dhsy7wz52-source @@ -0,0 +1 @@ +/nix/store/004vk3n8jlx9vw4nzy1f0d2dhsy7wz52-source \ No newline at end of file diff --git a/.direnv/flake-inputs/52hxk3ygip5xv1jrjymnn4yh9rqikj91-source b/.direnv/flake-inputs/52hxk3ygip5xv1jrjymnn4yh9rqikj91-source deleted file mode 120000 index d76f941..0000000 --- a/.direnv/flake-inputs/52hxk3ygip5xv1jrjymnn4yh9rqikj91-source +++ /dev/null @@ -1 +0,0 @@ -/nix/store/52hxk3ygip5xv1jrjymnn4yh9rqikj91-source \ No newline at end of file diff --git a/.direnv/flake-profile-1-link b/.direnv/flake-profile-1-link index 34e0cb8..d2b9fa4 120000 --- a/.direnv/flake-profile-1-link +++ b/.direnv/flake-profile-1-link @@ -1 +1 @@ -/nix/store/vdijc3indsq6j6xbridfqjib4pkg6vhs-nix-shell-env \ No newline at end of file +/nix/store/sc88ikf5zh532nisyr5v9h6f6q6fay54-nix-shell-env \ No newline at end of file diff --git a/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa b/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa index 34e0cb8..d2b9fa4 120000 --- a/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa +++ b/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa @@ -1 +1 @@ -/nix/store/vdijc3indsq6j6xbridfqjib4pkg6vhs-nix-shell-env \ No newline at end of file +/nix/store/sc88ikf5zh532nisyr5v9h6f6q6fay54-nix-shell-env \ No newline at end of file diff --git a/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa.rc b/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa.rc index dd22a39..d214ede 100644 --- a/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa.rc +++ b/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa.rc @@ -12,6 +12,10 @@ export AS AS_FOR_BUILD='as' export AS_FOR_BUILD BASH='/nix/store/xg75pc4yyfd5n2fimhb98ps910q5lm5n-bash-5.2p37/bin/bash' +BITBEAM_DATABASE_URL='sqlite://./bitbeam.sqlite' +export BITBEAM_DATABASE_URL +BITBEAM_DB_TYPE='sqlite' +export BITBEAM_DB_TYPE CC='gcc' export CC CC_FOR_BUILD='gcc' @@ -43,7 +47,7 @@ NIX_BINTOOLS_WRAPPER_TARGET_BUILD_x86_64_unknown_linux_gnu='1' export NIX_BINTOOLS_WRAPPER_TARGET_BUILD_x86_64_unknown_linux_gnu NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu='1' export NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu -NIX_BUILD_CORES='12' +NIX_BUILD_CORES='16' export NIX_BUILD_CORES NIX_CC='/nix/store/dc6bahp3f5af2rxz3pal9m3kp4vx4rpy-gcc-wrapper-14.2.1.20250322' export NIX_CC @@ -53,7 +57,7 @@ NIX_CC_WRAPPER_TARGET_BUILD_x86_64_unknown_linux_gnu='1' export NIX_CC_WRAPPER_TARGET_BUILD_x86_64_unknown_linux_gnu NIX_CC_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu='1' export NIX_CC_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu -NIX_CFLAGS_COMPILE=' -frandom-seed=vdijc3inds -isystem /nix/store/jv45xs1p8v9mcychfgkv6vxridcn532h-openssl-3.4.1-dev/include -isystem /nix/store/jv45xs1p8v9mcychfgkv6vxridcn532h-openssl-3.4.1-dev/include' +NIX_CFLAGS_COMPILE=' -frandom-seed=sc88ikf5zh -isystem /nix/store/jv45xs1p8v9mcychfgkv6vxridcn532h-openssl-3.4.1-dev/include -isystem /nix/store/jv45xs1p8v9mcychfgkv6vxridcn532h-openssl-3.4.1-dev/include' export NIX_CFLAGS_COMPILE NIX_CFLAGS_COMPILE_FOR_BUILD=' -isystem /nix/store/jv45xs1p8v9mcychfgkv6vxridcn532h-openssl-3.4.1-dev/include -isystem /nix/store/jv45xs1p8v9mcychfgkv6vxridcn532h-openssl-3.4.1-dev/include -isystem /nix/store/jv45xs1p8v9mcychfgkv6vxridcn532h-openssl-3.4.1-dev/include' export NIX_CFLAGS_COMPILE_FOR_BUILD @@ -61,7 +65,7 @@ NIX_ENFORCE_NO_NATIVE='1' export NIX_ENFORCE_NO_NATIVE NIX_HARDENING_ENABLE='bindnow format fortify fortify3 pic relro stackclashprotection stackprotector strictoverflow zerocallusedregs' export NIX_HARDENING_ENABLE -NIX_LDFLAGS='-rpath /home/work/Documents/rust/bitBeam/outputs/out/lib -L/nix/store/jn59p00df0j13ad5jn0q0irfpq6azvby-rust-default-1.86.0/lib -L/nix/store/xy8x4g472i5n1bh24c5ixhbnk6qlm9vz-openssl-3.4.1/lib -L/nix/store/jn59p00df0j13ad5jn0q0irfpq6azvby-rust-default-1.86.0/lib -L/nix/store/xy8x4g472i5n1bh24c5ixhbnk6qlm9vz-openssl-3.4.1/lib' +NIX_LDFLAGS='-rpath /home/mrfluffy/bitBeam/outputs/out/lib -L/nix/store/jn59p00df0j13ad5jn0q0irfpq6azvby-rust-default-1.86.0/lib -L/nix/store/xy8x4g472i5n1bh24c5ixhbnk6qlm9vz-openssl-3.4.1/lib -L/nix/store/jn59p00df0j13ad5jn0q0irfpq6azvby-rust-default-1.86.0/lib -L/nix/store/xy8x4g472i5n1bh24c5ixhbnk6qlm9vz-openssl-3.4.1/lib' export NIX_LDFLAGS NIX_LDFLAGS_FOR_BUILD=' -L/nix/store/jn59p00df0j13ad5jn0q0irfpq6azvby-rust-default-1.86.0/lib -L/nix/store/xy8x4g472i5n1bh24c5ixhbnk6qlm9vz-openssl-3.4.1/lib -L/nix/store/jn59p00df0j13ad5jn0q0irfpq6azvby-rust-default-1.86.0/lib -L/nix/store/xy8x4g472i5n1bh24c5ixhbnk6qlm9vz-openssl-3.4.1/lib -L/nix/store/jn59p00df0j13ad5jn0q0irfpq6azvby-rust-default-1.86.0/lib -L/nix/store/xy8x4g472i5n1bh24c5ixhbnk6qlm9vz-openssl-3.4.1/lib' export NIX_LDFLAGS_FOR_BUILD @@ -173,7 +177,7 @@ declare -a envHostHostHooks=('ccWrapper_addCVars' 'bintoolsWrapper_addLDVars' 'p declare -a envHostTargetHooks=('ccWrapper_addCVars' 'bintoolsWrapper_addLDVars' 'pkgConfigWrapper_addPkgConfigPath' ) declare -a envTargetTargetHooks=() declare -a fixupOutputHooks=('if [ -z "${dontPatchELF-}" ]; then patchELF "$prefix"; fi' 'if [[ -z "${noAuditTmpdir-}" && -e "$prefix" ]]; then auditTmpdir "$prefix"; fi' 'if [ -z "${dontGzipMan-}" ]; then compressManPages "$prefix"; fi' '_moveLib64' '_moveSbin' '_moveSystemdUserUnits' 'patchShebangsAuto' '_pruneLibtoolFiles' '_doStrip' ) -guess='12' +guess='16' initialPath='/nix/store/cg09nslw3w6afyynjw484b86d47ic1cb-coreutils-9.7 /nix/store/frspb25x6v43fwv6b0wna1fm5nsqcp0b-findutils-4.10.0 /nix/store/8cs5vjkbwf2vicgms4km5k1kgbznhwip-diffutils-3.12 /nix/store/1h8gf327cgid0jgjygrj31amp63mn7a7-gnused-4.9 /nix/store/2wni3gbcf6fqwlfb2h9sv7jvqlpf1ylq-gnugrep-3.11 /nix/store/f8x04xqd2cs274k0hgfzsrms6sby2fgx-gawk-5.3.2 /nix/store/5jmcn57x2j9mkdr3j947cbja2hpxmhfn-gnutar-1.35 /nix/store/04z4rhjadrnd0w3ib2sl42pa3xjgpf7p-gzip-1.14 /nix/store/sysih19x8xx8l473d3qnr760hy758lkq-bzip2-1.0.8-bin /nix/store/agn71jakv0a9669k3zx5g9aqm2sl9z77-gnumake-4.4.1 /nix/store/xg75pc4yyfd5n2fimhb98ps910q5lm5n-bash-5.2p37 /nix/store/qd20g193gch8bj4h0h44wr97mw0bhkmf-patch-2.7.6 /nix/store/fi495i6cz40rq1axig930jzyw7ln0zhm-xz-5.8.1-bin /nix/store/hb9v0qx9vk0420z5grlnv2y5wcf6dp6i-file-5.46' mesonFlags='' export mesonFlags @@ -181,7 +185,7 @@ name='nix-shell-env' export name nativeBuildInputs='/nix/store/jn59p00df0j13ad5jn0q0irfpq6azvby-rust-default-1.86.0 /nix/store/jv45xs1p8v9mcychfgkv6vxridcn532h-openssl-3.4.1-dev /nix/store/nbph466agczbny52jzk143ydcp2x14q4-pkg-config-wrapper-0.29.2 /nix/store/ml99slpq5d1yv9827m99cbdkn430kg4c-cargo-deny-0.18.2 /nix/store/dsq2icwvw7qwas4jawzywqrc04hf14ab-cargo-edit-0.13.3 /nix/store/fqdx4vdaa09fxa8cm4dfww0jh1mkz4y9-cargo-watch-8.5.3 /nix/store/xgp1b532vvisxvc4527b13gaw46s4dik-rust-analyzer-2025-05-05' export nativeBuildInputs -out='/home/work/Documents/rust/bitBeam/outputs/out' +out='/home/mrfluffy/bitBeam/outputs/out' export out outputBin='out' outputDev='out' @@ -212,7 +216,7 @@ preConfigurePhases=' updateAutotoolsGnuConfigScriptsPhase' declare -a preFixupHooks=('_moveToShare' '_multioutDocs' '_multioutDevs' ) preferLocalBuild='1' export preferLocalBuild -prefix='/home/work/Documents/rust/bitBeam/outputs/out' +prefix='/home/mrfluffy/bitBeam/outputs/out' declare -a propagatedBuildDepFiles=('propagated-build-build-deps' 'propagated-native-build-inputs' 'propagated-build-target-deps' ) propagatedBuildInputs='' export propagatedBuildInputs diff --git a/.gitignore b/.gitignore index ea8c4bf..c97e767 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,5 @@ /target +/media_store +*.log +*.splite +*.sqlite* diff --git a/Cargo.lock b/Cargo.lock index 2a9aee0..2ae1a95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -171,6 +171,8 @@ dependencies = [ "bytes", "chrono", "extract", + "fern", + "log", "rand 0.9.1", "serde", "sqlx", @@ -397,7 +399,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -448,6 +450,15 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "fern" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4316185f709b23713e41e3195f90edef7fb00c3ed4adc79769cf09cc762a3b29" +dependencies = [ + "log", +] + [[package]] name = "flume" version = "0.11.1" @@ -1355,7 +1366,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1820,7 +1831,7 @@ dependencies = [ "getrandom 0.3.2", "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 8a3de7c..326a2c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,8 @@ axum = "0.8" bytes = "1.10" chrono = {version = "0.4", features = ["serde"]} extract = "0.1" +fern = "0.7.1" +log = {version = "0.4.27", feature = "std"} rand = "0.9" serde = {version = "1.0", features = ["derive"]} sqlx = { version = "0.8", features = [ diff --git a/bitbeem.sqlite b/bitbeem.sqlite deleted file mode 100644 index 1c43073..0000000 Binary files a/bitbeem.sqlite and /dev/null differ diff --git a/flake.nix b/flake.nix index b32676f..a874bf1 100644 --- a/flake.nix +++ b/flake.nix @@ -10,50 +10,77 @@ }; }; - outputs = inputs @ { self, nixpkgs, rust-overlay, ... }: + outputs = + inputs@{ + self, + nixpkgs, + rust-overlay, + ... + }: let - supportedSystems = [ "x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ]; - forEachSupportedSystem = f: nixpkgs.lib.genAttrs supportedSystems (system: f { - pkgs = import nixpkgs { - inherit system; - overlays = [ rust-overlay.overlays.default ]; - }; - }); + supportedSystems = [ + "x86_64-linux" + "aarch64-linux" + "x86_64-darwin" + "aarch64-darwin" + ]; + forEachSupportedSystem = + f: + nixpkgs.lib.genAttrs supportedSystems ( + system: + f { + pkgs = import nixpkgs { + inherit system; + overlays = [ rust-overlay.overlays.default ]; + }; + } + ); in { # Define the package (your Rust binary) - packages = forEachSupportedSystem ({ pkgs }: { - default = pkgs.rustPlatform.buildRustPackage { - name = "bitBeam"; - src = ./.; + packages = forEachSupportedSystem ( + { pkgs }: + { + default = pkgs.rustPlatform.buildRustPackage { + name = "bitBeam"; + src = ./.; - # Specify dependencies (replace with your project's actual dependencies) - buildInputs = [ pkgs.openssl pkgs.pkg-config ]; + # Specify dependencies (replace with your project's actual dependencies) + buildInputs = [ + pkgs.openssl + pkgs.pkg-config + ]; - # Generate this with `cargo generate-lockfile` if you don't have it - cargoLock = { - lockFile = ./Cargo.lock; + # Generate this with `cargo generate-lockfile` if you don't have it + cargoLock = { + lockFile = ./Cargo.lock; + }; + + # Optional: Override the Rust version if needed + nativeBuildInputs = [ pkgs.rust-bin.stable.latest.default ]; }; - - # Optional: Override the Rust version if needed - nativeBuildInputs = [ pkgs.rust-bin.stable.latest.default ]; - }; - }); + } + ); # Development environment (existing setup) - devShells = forEachSupportedSystem ({ pkgs }: { - default = pkgs.mkShell { - packages = with pkgs; [ - rust-bin.stable.latest.default - openssl - pkg-config - cargo-deny - cargo-edit - cargo-watch - rust-analyzer - ]; - RUST_SRC_PATH = "${pkgs.rust-bin.stable.latest.default}/lib/rustlib/src/rust/library"; - }; - }); + devShells = forEachSupportedSystem ( + { pkgs }: + { + default = pkgs.mkShell { + packages = with pkgs; [ + rust-bin.stable.latest.default + openssl + pkg-config + cargo-deny + cargo-edit + cargo-watch + rust-analyzer + ]; + RUST_SRC_PATH = "${pkgs.rust-bin.stable.latest.default}/lib/rustlib/src/rust/library"; + BITBEAM_DATABASE_URL = "sqlite://./bitbeam.sqlite"; + BITBEAM_DB_TYPE = "sqlite"; + }; + } + ); }; } diff --git a/media_store/00000020-6674-7970-6d70-343200000000 b/media_store/00000020-6674-7970-6d70-343200000000 deleted file mode 100644 index 83fe200..0000000 Binary files a/media_store/00000020-6674-7970-6d70-343200000000 and /dev/null differ diff --git a/media_store/3c8a4be4-de49-b70c-c5c3-31897fdf6405 b/media_store/3c8a4be4-de49-b70c-c5c3-31897fdf6405 deleted file mode 100644 index 83fe200..0000000 Binary files a/media_store/3c8a4be4-de49-b70c-c5c3-31897fdf6405 and /dev/null differ diff --git a/media_store/4181489c-6c9a-ccf2-3bed-78f02ad7378c b/media_store/4181489c-6c9a-ccf2-3bed-78f02ad7378c deleted file mode 100644 index 83fe200..0000000 Binary files a/media_store/4181489c-6c9a-ccf2-3bed-78f02ad7378c and /dev/null differ diff --git a/media_store/8bdff898-b87c-54ec-8c4d-72258bd00e75 b/media_store/8bdff898-b87c-54ec-8c4d-72258bd00e75 deleted file mode 100644 index 83fe200..0000000 Binary files a/media_store/8bdff898-b87c-54ec-8c4d-72258bd00e75 and /dev/null differ diff --git a/media_store/b242b5c7-49fc-fa8a-f3cd-8902e91db1a3 b/media_store/b242b5c7-49fc-fa8a-f3cd-8902e91db1a3 deleted file mode 100644 index 83fe200..0000000 Binary files a/media_store/b242b5c7-49fc-fa8a-f3cd-8902e91db1a3 and /dev/null differ diff --git a/media_store/fdc57869-11d0-db1e-6ff1-e3318c787f60 b/media_store/fdc57869-11d0-db1e-6ff1-e3318c787f60 deleted file mode 100644 index 83fe200..0000000 Binary files a/media_store/fdc57869-11d0-db1e-6ff1-e3318c787f60 and /dev/null differ diff --git a/src/api.rs b/src/api.rs new file mode 100644 index 0000000..245a953 --- /dev/null +++ b/src/api.rs @@ -0,0 +1,163 @@ +use axum::{ + body::Bytes, + extract::ConnectInfo, + http::{HeaderMap, StatusCode}, + response::{IntoResponse, Response}, + Extension, Json, +}; +use chrono::Utc; +use rand::Rng; +use sqlx::AnyPool; +use std::path::Path; +use tokio::fs; +use uuid::Uuid; +use log::{info, warn, error}; + +use std::net::SocketAddr; +use crate::data; + + +/// Handler to return all files as JSON +/// This function retrieves all files from the database +/// and returns them as a JSON response. +/// It also logs the IP address of the client making the request. +pub async fn all_files(Extension(pool): Extension, ConnectInfo(addr): ConnectInfo) -> impl IntoResponse { + //log the IP address of the client and the call + let ip = addr.ip().to_string(); + info!("Received an all_files request from IP: {}", ip); + // build the query and map the result to the File struct + // and return the result as JSON if successful + // or return an error message if not + match sqlx::query_as::<_, data::File>( + r#" + SELECT * + FROM files + "#, + ) + .fetch_all(&pool) + .await + { + Ok(files) => { + info!("DB select all success"); + (StatusCode::OK, Json(files)).into_response() + }, + Err(e) => { + warn!("DB select all error: {}", e); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Database select all error", + ) + .into_response() + } + } +} + +/// Handler to upload a file +/// This function handles the file upload process. +/// It receives the file data in the request body, +/// saves it to the server's file system, +/// and stores the file metadata in the database. +/// It also logs the IP address of the client making the request. +pub async fn upload(Extension(pool): Extension, + ConnectInfo(addr): ConnectInfo, + Extension(config): Extension , + headers: HeaderMap, + body: Bytes, + ) -> Response { + //log the IP address of the client and the call + let ip = addr.ip().to_string(); + info!("Received update from IP: {}", ip); + + // gets the content type from the headers + let content_type = headers + .get("content-type") + .and_then(|hv| hv.to_str().ok()) + .unwrap_or("application/octet-stream") + .to_string(); + // gets the download limit from the headers + let download_limit = headers + .get("download_limit") // Option<&HeaderValue> + .and_then(|hv| hv.to_str().ok()) // Option<&str> + .and_then(|s| s.parse::().ok()) // Option + .unwrap_or(1); // u32 + //generate a random UUID for the file ID + let id = { + // Fallback to random UUID if body is too small + let mut rng = rand::rng(); + Uuid::from_u128(rng.random::()).to_string() + }; + //create the directory if it doesn't exist + let dir = Path::new(&config.data_path); + if let Err(e) = fs::create_dir_all(dir).await { + warn!("could not make dir at {} error: {}", &config.data_path ,e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Directory creation error", + ) + .into_response(); + } + //create the file path + // the file path is the directory + the file ID + file type if file type is not application/x-executable + if content_type == "application/x-executable" { + info!("File type is application/x-executable"); + } else { + info!("File type is {}", content_type); + } + let file_path = dir.join( + if content_type == "application/x-executable" { + format!("{}",id) + } else { + format!("{}.{}", + id, + content_type.split('/').last().unwrap_or("bin")) + }, + ); + + if let Err(e) = fs::write(&file_path, &body).await { + warn!("write error {}: {}", id, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "File write error", + ) + .into_response(); + } + let file_size = body.len() as i64; + + let upload_time = Utc::now().timestamp(); // i64 + + let download_count = 0; + + if let Err(e) = sqlx::query( + r#" + INSERT INTO files + (id, content_type, upload_time, download_limit, download_count, file_size) + VALUES (?, ?, ?, ?, ?, ?) + "#, + ) + .bind(&id) + .bind(&content_type) + .bind(&upload_time) + .bind(download_limit) + .bind(download_count) + .bind(file_size as i64) + .execute(&pool) + .await + { + error!("DB insert error {}: {}", id, e); + return ( + axum::http::StatusCode::INTERNAL_SERVER_ERROR, + "Database insert error", + ) + .into_response(); + } + + let uploaded_file = data::File { + id, + content_type, + upload_time, + download_limit, + download_count, + file_size, + }; + Json(uploaded_file).into_response() +} diff --git a/src/data.rs b/src/data.rs new file mode 100644 index 0000000..37929b4 --- /dev/null +++ b/src/data.rs @@ -0,0 +1,37 @@ +use serde::Serialize; +use sqlx::FromRow; + +/// This struct represents a file in the database. +/// It contains fields for the file's ID, content type, +/// upload time, download limit, download count, +/// and file size. +/// It derives the `FromRow` trait from `sqlx` +/// to allow it to be created from a database row. +/// It also derives the `Serialize` trait +/// from `serde` +/// to allow it to be serialized into JSON. +#[derive(FromRow, Serialize)] +pub struct File { + pub id: String, + pub content_type: String, + pub upload_time: i64, + pub download_limit: i32, + pub download_count: i32, + pub file_size: i64, +} + +/// This struct is used to represent the configuration settings for the application. +/// It contains various fields that are used to configure the database connection, +/// data path, server port, and logging settings. +/// It derives the `Clone` trait +/// to allow it to be cloned. +#[derive(Clone)] +pub struct Config { + pub db_type: String, + pub database_url: String, + pub data_path: String, + pub port: String, + pub listener_addr: String, + pub log_level: String, + pub log_location: String, +} diff --git a/src/main.rs b/src/main.rs index bd9ac3f..aecb7b9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,49 +1,67 @@ use axum::{ - body::Bytes, extract::DefaultBodyLimit, - http::{HeaderMap, StatusCode}, - response::{IntoResponse, Response}, routing::{get, post}, - Extension, Json, Router, + response::IntoResponse, + Extension, Router, }; -use chrono::{DateTime, Utc}; -use rand::Rng; -use serde::Serialize; -use sqlx::{any::AnyPoolOptions, migrate::MigrateDatabase, AnyPool, Encode, FromRow, Sqlite}; +use sqlx::{any::AnyPoolOptions, migrate::MigrateDatabase, AnyPool, Sqlite}; +use log::{info, warn, error, debug}; + use std::path::Path; use tokio::fs; -use uuid::Uuid; -#[derive(FromRow, Serialize)] -struct File { - id: String, - content_type: String, - upload_time: i64, - download_limit: i32, - download_count: i32, - file_size: i64, -} - -struct Config { - db_type: String, - database_url: String, -} +use std::net::SocketAddr; +mod api; +mod data; +/// This is the main function of the application. +/// It sets up the database connection, +/// initializes the logging system, +/// and starts the web server. +/// It uses the Axum framework to handle HTTP requests. +/// It also uses SQLx for database interactions. +/// It uses the Fern library for logging. +/// It uses the Tokio runtime for asynchronous programming. +/// It uses the Chrono library for date and time handling. +/// It uses the UUID library for generating unique identifiers. +/// It uses the Bytes library for handling byte arrays. +/// It uses the Serde library for serialization and deserialization. #[tokio::main] async fn main() { sqlx::any::install_default_drivers(); - // Read and normalize DB type and connection URL - let config = Config { - db_type: std::env::var("BITBEEM_DB_TYPE").unwrap_or_else(|_| "postgres".to_string()), - database_url: match std::env::var("BITBEEM_DB_TYPE").unwrap().as_str() { - "postgres" => std::env::var("BITBEEM_DATABASE_URL") - .expect("BITBEEM_DATABASE_URL must be set for Postgres"), - "sqlite" => std::env::var("BITBEEM_DATABASE_URL") - .expect("BITBEEM_DATABASE_URL must be set for SQLite"), - other => panic!("Unsupported BITBEEM_DB_TYPE: {}", other), + // Load the configuration from environment variables + let config = data::Config { + db_type: std::env::var("BITBEAM_DB_TYPE").unwrap_or_else(|_| "postgres".to_string()), + database_url: match std::env::var("BITBEAM_DB_TYPE").unwrap().as_str() { + "postgres" => std::env::var("BITBEAM_DATABASE_URL") + .expect("BITBEAM_DATABASE_URL must be set for Postgres"), + "sqlite" => std::env::var("BITBEAM_DATABASE_URL") + .expect("BITBEAM_DATABASE_URL must be set for SQLite"), + other => panic!("Unsupported BITBEAM_DB_TYPE: {}", other), }, + data_path: std::env::var("BITBEAM_DATA_PATH").unwrap_or_else(|_| "./media_store".to_string()), + port: std::env::var("BITBEAM_PORT").unwrap_or_else(|_| "3000".to_string()), + listener_addr: std::env::var("BITBEAM_ADDR").unwrap_or_else(|_| "127.0.0.1".to_string()), + log_level: std::env::var("BITBEAM_LOG_LEVEL").unwrap_or_else(|_| "info".to_string()), + log_location: std::env::var("BITBEAM_LOG_LOCATION").unwrap_or_else(|_| "./bitbeam.log".to_string()), }; + // Setting up the logging system + // The log level is set based on the environment variable BITBEAM_LOG_LEVEL + let level = match config.log_level.as_str() { + "debug" => log::LevelFilter::Debug, + "info" => log::LevelFilter::Info, + "warn" => log::LevelFilter::Warn, + "error" => log::LevelFilter::Error, + _ => log::LevelFilter::Info, + }; + // Initialize the logging system + let log_path = &config.log_location; + let _logs = init_logging(log_path, level); + info!("done loading config"); + // Create the data path if it doesn't exist + // only if the db type is sqlite + // otherwise, the data path is not used if config.db_type == "sqlite" { if !Sqlite::database_exists(&config.database_url) .await @@ -51,22 +69,27 @@ async fn main() { { println!("Creating database {}", config.database_url); match Sqlite::create_database(&config.database_url).await { - Ok(_) => println!("Create db success"), - Err(error) => panic!("error: {}", error), + Ok(_) => info!("Create db success"), + Err(error) => { + error!("Error creating database: {}", error); + panic!("error: {}", error) + }, } } else { - println!("Database already exists"); + info!("Database already exists"); } } - // Create a generic AnyPool + // Create the database connection any pool + // The connection pool is created using the database URL from the configuration let pool: AnyPool = AnyPoolOptions::new() .max_connections(5) .connect(&config.database_url) .await .expect("could not connect to database"); - // Migration SQL + // Setting up the database schema + // The database schema is created if it doesn't exist if let Err(_e) = sqlx::query( r#" CREATE TABLE IF NOT EXISTS files ( @@ -82,120 +105,89 @@ async fn main() { .execute(&pool) .await { - eprintln!("DB created"); + info!("DB created"); }; + //create the directory if it doesn't exist + let dir = Path::new(&config.data_path); + if let Err(e) = fs::create_dir_all(dir).await { + warn!("could not make dir at {} error: {}", &config.data_path ,e); + } + //let file_path = dir.join(&id); + + // Setting up the web server + // The web server is created using the Axum framework + // these are the routes let app = Router::new() .route("/", get(|| async { "Hello, World!" })) - .route("/upload", post(upload)) - .route("/all_files", get(all_files)) + .route("/upload", post(api::upload)) + .route("/all_files", get(api::all_files)) .layer(DefaultBodyLimit::max(100 * 1024 * 1024)) - .layer(Extension(pool)); + .layer(Extension(pool)) + .layer(Extension(config.clone())) + .into_make_service_with_connect_info::(); + // The web server is started using the Axum framework + // The server listens on the address and port specified in the configuration axum::serve( - tokio::net::TcpListener::bind("0.0.0.0:3000").await.unwrap(), + match tokio::net::TcpListener::bind(format!("{}:{}",&config.listener_addr,&config.port)).await { + Ok(listener) => listener, + Err(e) => { + error!("Error binding to address {}:{} : {}",&config.listener_addr,&config.port, e); + return; + } + }, app, ) .await .unwrap(); } -/// Handler to return all files as JSON -async fn all_files(Extension(pool): Extension) -> impl IntoResponse { - // Run the query and map each row into a File - match sqlx::query_as::<_, File>( - r#" - SELECT * - FROM files - "#, - ) - .fetch_all(&pool) - .await - { - Ok(files) => (StatusCode::OK, Json(files)).into_response(), - Err(e) => { - eprintln!("DB select all error: {}", e); - ( - StatusCode::INTERNAL_SERVER_ERROR, - "Database select all error", - ) - .into_response() - } - } +/// This function initializes the logging system. +/// It sets up a logger that writes to both stdout and a log file. +/// It uses the Fern library for logging. +/// It formats the log messages to include the date, time, log level, target, and message. +/// It also sets the log level based on the provided level filter. +/// It takes the log file path and log level as parameters. +fn init_logging(log_file_path: &str, level: log::LevelFilter) -> Result<(), Box> { + // Build a Dispatch for stdout + let stdout_dispatch = fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "[{date}][{lvl}][{target}] {msg}", + date = chrono::Local::now().format("%Y-%m-%d %H:%M:%S"), + lvl = record.level(), + target = record.target(), + msg = message, + )) + }) + .level(level) + .chain(std::io::stdout()); + + // Build a Dispatch for a rolling log file + let file_dispatch = fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "[{date}][{lvl}][{target}] {msg}", + date = chrono::Local::now().format("%Y-%m-%d %H:%M:%S"), + lvl = record.level(), + target = record.target(), + msg = message, + )) + }) + .level(level) + .chain(fern::log_file(log_file_path)?); + + // Combine the stdout and file dispatches + // and apply them + // This sets up the logger to write to both stdout and the log file + fern::Dispatch::new() + .chain(stdout_dispatch) + .chain(file_dispatch) + .apply()?; + + Ok(()) } -async fn upload(Extension(pool): Extension, headers: HeaderMap, body: Bytes) -> Response { - let content_type = headers - .get("content-type") - .and_then(|hv| hv.to_str().ok()) - .unwrap_or("application/octet-stream") - .to_string(); - let id = { - // Fallback to random UUID if body is too small - let mut rng = rand::rng(); - Uuid::from_u128(rng.random::()).to_string() - }; - let dir = Path::new("./media_store"); - if let Err(e) = fs::create_dir_all(dir).await { - eprintln!("mkdir error: {}", e); - return ( - axum::http::StatusCode::INTERNAL_SERVER_ERROR, - "Directory creation error", - ) - .into_response(); - } - let file_path = dir.join(&id); - if let Err(e) = fs::write(&file_path, &body).await { - eprintln!("write error {}: {}", id, e); - return ( - axum::http::StatusCode::INTERNAL_SERVER_ERROR, - "File write error", - ) - .into_response(); - } - let file_size = body.len() as i64; - - let upload_time = Utc::now().timestamp(); // i64 - let download_limit = headers - .get("download_limit") // Option<&HeaderValue> - .and_then(|hv| hv.to_str().ok()) // Option<&str> - .and_then(|s| s.parse::().ok()) // Option - .unwrap_or(2); // u32 let download_count = 0; - let download_count = 0; - - if let Err(e) = sqlx::query( - r#" - INSERT INTO files - (id, content_type, upload_time, download_limit, download_count, file_size) - VALUES (?, ?, ?, ?, ?, ?) - "#, - ) - .bind(&id) - .bind(&content_type) - .bind(&upload_time) - .bind(download_limit) - .bind(download_count) - .bind(file_size as i64) - .execute(&pool) - .await - { - eprintln!("DB insert error {}: {}", id, e); - return ( - axum::http::StatusCode::INTERNAL_SERVER_ERROR, - "Database insert error", - ) - .into_response(); - } - - let uploaded_file = File { - id, - content_type, - upload_time, - download_limit, - download_count, - file_size, - }; - Json(uploaded_file).into_response() -}