Rewrite app: entries → projects with S3 file storage
All checks were successful
ci/woodpecker/push/build Pipeline was successful

Replace flat text entries with project-based structure.
Each project has name, local/corp fields, content textarea,
and file upload (up to 100MB) stored in MinIO S3.
New API: CRUD projects + file download + content copy.
Frontend: two views (project list + project page).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Mikhail Kilin
2026-03-18 13:50:28 +03:00
parent d23043a489
commit 570e0ca643
11 changed files with 1586 additions and 201 deletions

956
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,10 +4,12 @@ version = "0.1.0"
edition = "2024"
[dependencies]
axum = "0.8"
axum = { version = "0.8", features = ["multipart"] }
sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "chrono"] }
tokio = { version = "1", features = ["full"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tower-http = { version = "0.6", features = ["cors"] }
chrono = { version = "0.4", features = ["serde"] }
rust-s3 = "0.35"
uuid = { version = "1", features = ["v4"] }

View File

@@ -1,14 +1,25 @@
use sqlx::PgPool;
pub async fn init_db(pool: &PgPool) {
sqlx::query("DROP TABLE IF EXISTS entries")
.execute(pool)
.await
.expect("Failed to drop entries table");
sqlx::query(
"CREATE TABLE IF NOT EXISTS entries (
"CREATE TABLE IF NOT EXISTS projects (
id SERIAL PRIMARY KEY,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
content TEXT NOT NULL
name TEXT NOT NULL,
local TEXT NOT NULL DEFAULT '',
corp TEXT NOT NULL DEFAULT '',
content TEXT NOT NULL DEFAULT '',
file_name TEXT,
file_key TEXT,
file_size BIGINT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
)",
)
.execute(pool)
.await
.expect("Failed to create entries table");
.expect("Failed to create projects table");
}

View File

@@ -1,7 +1,13 @@
mod db;
mod routes;
mod s3;
use axum::{Router, routing::{get, delete}};
use axum::{
Router,
extract::DefaultBodyLimit,
routing::get,
};
use routes::AppState;
use sqlx::postgres::PgPoolOptions;
use tower_http::cors::CorsLayer;
@@ -18,12 +24,18 @@ async fn main() {
db::init_db(&pool).await;
let bucket = s3::init_bucket().await;
let state = AppState { pool, bucket };
let app = Router::new()
.route("/api/entries", get(routes::get_entries).post(routes::create_entry))
.route("/api/entries/{id}", delete(routes::delete_entry))
.route("/api/entries/{id}/content", get(routes::get_entry_content))
.route("/api/projects", get(routes::list_projects).post(routes::create_project))
.route("/api/projects/{id}", get(routes::get_project).put(routes::update_project).delete(routes::delete_project))
.route("/api/projects/{id}/file", get(routes::get_project_file))
.route("/api/projects/{id}/content", get(routes::get_project_content))
.layer(DefaultBodyLimit::max(110 * 1024 * 1024))
.layer(CorsLayer::permissive())
.with_state(pool);
.with_state(state);
let listener = tokio::net::TcpListener::bind("0.0.0.0:3000")
.await

View File

@@ -1,75 +1,202 @@
use axum::{
Json,
extract::{Path, State},
http::StatusCode,
body::Body,
extract::{Multipart, Path, State},
http::{StatusCode, header},
response::{IntoResponse, Response},
};
use chrono::{DateTime, Utc};
use s3::Bucket;
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
#[derive(Serialize, sqlx::FromRow)]
pub struct Entry {
pub id: i32,
pub created_at: Option<DateTime<Utc>>,
pub content: String,
#[derive(Clone)]
pub struct AppState {
pub pool: PgPool,
pub bucket: Box<Bucket>,
}
#[derive(Serialize, sqlx::FromRow)]
pub struct EntryMeta {
pub struct Project {
pub id: i32,
pub name: String,
pub local: String,
pub corp: String,
pub content: String,
pub file_name: Option<String>,
pub file_key: Option<String>,
pub file_size: Option<i64>,
pub created_at: Option<DateTime<Utc>>,
}
#[derive(Serialize, sqlx::FromRow)]
pub struct ProjectMeta {
pub id: i32,
pub name: String,
pub created_at: Option<DateTime<Utc>>,
}
#[derive(Deserialize)]
pub struct CreateEntry {
pub content: String,
pub struct CreateProject {
pub name: String,
}
pub async fn get_entries(State(pool): State<PgPool>) -> Result<Json<Vec<EntryMeta>>, StatusCode> {
let entries = sqlx::query_as::<_, EntryMeta>("SELECT id, created_at FROM entries ORDER BY created_at DESC")
.fetch_all(&pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
pub async fn list_projects(
State(state): State<AppState>,
) -> Result<Json<Vec<ProjectMeta>>, StatusCode> {
let projects = sqlx::query_as::<_, ProjectMeta>(
"SELECT id, name, created_at FROM projects ORDER BY created_at DESC",
)
.fetch_all(&state.pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
Ok(Json(entries))
Ok(Json(projects))
}
pub async fn get_entry_content(
State(pool): State<PgPool>,
pub async fn create_project(
State(state): State<AppState>,
Json(payload): Json<CreateProject>,
) -> Result<(StatusCode, Json<ProjectMeta>), StatusCode> {
let project = sqlx::query_as::<_, ProjectMeta>(
"INSERT INTO projects (name) VALUES ($1) RETURNING id, name, created_at",
)
.bind(&payload.name)
.fetch_one(&state.pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
Ok((StatusCode::CREATED, Json(project)))
}
pub async fn get_project(
State(state): State<AppState>,
Path(id): Path<i32>,
) -> Result<String, StatusCode> {
let row: (String,) = sqlx::query_as("SELECT content FROM entries WHERE id = $1")
) -> Result<Json<Project>, StatusCode> {
let project = sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE id = $1")
.bind(id)
.fetch_optional(&pool)
.fetch_optional(&state.pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
.ok_or(StatusCode::NOT_FOUND)?;
Ok(row.0)
Ok(Json(project))
}
pub async fn create_entry(
State(pool): State<PgPool>,
Json(payload): Json<CreateEntry>,
) -> Result<(StatusCode, Json<Entry>), StatusCode> {
let entry = sqlx::query_as::<_, Entry>(
"INSERT INTO entries (content) VALUES ($1) RETURNING id, created_at, content",
pub async fn update_project(
State(state): State<AppState>,
Path(id): Path<i32>,
mut multipart: Multipart,
) -> Result<StatusCode, StatusCode> {
let mut local: Option<String> = None;
let mut corp: Option<String> = None;
let mut content: Option<String> = None;
let mut file_data: Option<(String, Vec<u8>)> = None;
while let Some(field) = multipart
.next_field()
.await
.map_err(|_| StatusCode::BAD_REQUEST)?
{
let name = field.name().unwrap_or("").to_string();
match name.as_str() {
"local" => {
local = Some(field.text().await.map_err(|_| StatusCode::BAD_REQUEST)?);
}
"corp" => {
corp = Some(field.text().await.map_err(|_| StatusCode::BAD_REQUEST)?);
}
"content" => {
content = Some(field.text().await.map_err(|_| StatusCode::BAD_REQUEST)?);
}
"file" => {
let file_name = field
.file_name()
.unwrap_or("unknown")
.to_string();
if file_name.is_empty() || file_name == "unknown" {
// Skip empty file fields
let _ = field.bytes().await;
continue;
}
let data = field.bytes().await.map_err(|_| StatusCode::BAD_REQUEST)?;
if !data.is_empty() {
file_data = Some((file_name, data.to_vec()));
}
}
_ => {}
}
}
// Update text fields
sqlx::query(
"UPDATE projects SET local = COALESCE($1, local), corp = COALESCE($2, corp), content = COALESCE($3, content) WHERE id = $4",
)
.bind(&payload.content)
.fetch_one(&pool)
.bind(&local)
.bind(&corp)
.bind(&content)
.bind(id)
.execute(&state.pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
Ok((StatusCode::CREATED, Json(entry)))
// Handle file upload
if let Some((file_name, data)) = file_data {
// Delete old file if exists
let old_key: Option<(Option<String>,)> =
sqlx::query_as("SELECT file_key FROM projects WHERE id = $1")
.bind(id)
.fetch_optional(&state.pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
if let Some((Some(old_key),)) = old_key {
let _ = state.bucket.delete_object(&old_key).await;
}
let file_key = format!("projects/{}/{}", id, uuid::Uuid::new_v4());
let file_size = data.len() as i64;
state
.bucket
.put_object(&file_key, &data)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
sqlx::query(
"UPDATE projects SET file_name = $1, file_key = $2, file_size = $3 WHERE id = $4",
)
.bind(&file_name)
.bind(&file_key)
.bind(file_size)
.bind(id)
.execute(&state.pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
}
Ok(StatusCode::OK)
}
pub async fn delete_entry(
State(pool): State<PgPool>,
pub async fn delete_project(
State(state): State<AppState>,
Path(id): Path<i32>,
) -> StatusCode {
let result = sqlx::query("DELETE FROM entries WHERE id = $1")
// Delete file from S3 if exists
let row: Option<(Option<String>,)> =
sqlx::query_as("SELECT file_key FROM projects WHERE id = $1")
.bind(id)
.fetch_optional(&state.pool)
.await
.unwrap_or(None);
if let Some((Some(file_key),)) = row {
let _ = state.bucket.delete_object(&file_key).await;
}
let result = sqlx::query("DELETE FROM projects WHERE id = $1")
.bind(id)
.execute(&pool)
.execute(&state.pool)
.await;
match result {
@@ -78,3 +205,50 @@ pub async fn delete_entry(
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
}
}
pub async fn get_project_file(
State(state): State<AppState>,
Path(id): Path<i32>,
) -> Result<Response, StatusCode> {
let project = sqlx::query_as::<_, Project>("SELECT * FROM projects WHERE id = $1")
.bind(id)
.fetch_optional(&state.pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
.ok_or(StatusCode::NOT_FOUND)?;
let file_key = project.file_key.ok_or(StatusCode::NOT_FOUND)?;
let file_name = project.file_name.unwrap_or_else(|| "file".to_string());
let response_data = state
.bucket
.get_object(&file_key)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
let body = Body::from(response_data.to_vec());
Ok(Response::builder()
.header(header::CONTENT_TYPE, "application/octet-stream")
.header(
header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", file_name),
)
.body(body)
.unwrap()
.into_response())
}
pub async fn get_project_content(
State(state): State<AppState>,
Path(id): Path<i32>,
) -> Result<String, StatusCode> {
let row: (String,) = sqlx::query_as("SELECT content FROM projects WHERE id = $1")
.bind(id)
.fetch_optional(&state.pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
.ok_or(StatusCode::NOT_FOUND)?;
Ok(row.0)
}

35
backend/src/s3.rs Normal file
View File

@@ -0,0 +1,35 @@
use s3::creds::Credentials;
use s3::{Bucket, Region};
pub async fn init_bucket() -> Box<Bucket> {
let endpoint = std::env::var("S3_ENDPOINT").unwrap_or_else(|_| "http://localhost:9000".into());
let access_key = std::env::var("S3_ACCESS_KEY").unwrap_or_else(|_| "minioadmin".into());
let secret_key = std::env::var("S3_SECRET_KEY").unwrap_or_else(|_| "minioadmin".into());
let bucket_name = std::env::var("S3_BUCKET").unwrap_or_else(|_| "bbb".into());
let region_name = std::env::var("S3_REGION").unwrap_or_else(|_| "us-east-1".into());
let region = Region::Custom {
region: region_name,
endpoint,
};
let credentials = Credentials::new(Some(&access_key), Some(&secret_key), None, None, None)
.expect("Failed to create S3 credentials");
// Try to create bucket (ignore error if already exists)
let create_result = Bucket::create_with_path_style(
&bucket_name,
region.clone(),
credentials.clone(),
s3::BucketConfiguration::default(),
)
.await;
match create_result {
Ok(_) => println!("Created S3 bucket: {}", bucket_name),
Err(e) => println!("Bucket may already exist: {}", e),
}
Bucket::new(&bucket_name, region, credentials)
.expect("Failed to create S3 bucket handle")
.with_path_style()
}