diff --git a/.changepacks/changepack_log_T3zG0KC3ZHXyofRMxILja.json b/.changepacks/changepack_log_T3zG0KC3ZHXyofRMxILja.json new file mode 100644 index 0000000..478c79d --- /dev/null +++ b/.changepacks/changepack_log_T3zG0KC3ZHXyofRMxILja.json @@ -0,0 +1 @@ +{"changes":{"Cargo.toml":"Patch"},"note":"Optimize","date":"2026-02-27T13:50:39.128469700Z"} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 6bfa39a..eb8ae8b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3237,7 +3237,7 @@ checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "vespera" -version = "0.1.37" +version = "0.1.38" dependencies = [ "axum", "axum-extra", @@ -3253,7 +3253,7 @@ dependencies = [ [[package]] name = "vespera_core" -version = "0.1.37" +version = "0.1.38" dependencies = [ "rstest", "serde", @@ -3262,7 +3262,7 @@ dependencies = [ [[package]] name = "vespera_macro" -version = "0.1.37" +version = "0.1.38" dependencies = [ "insta", "proc-macro2", diff --git a/crates/vespera_macro/src/collector.rs b/crates/vespera_macro/src/collector.rs index 47e9116..75b0dd1 100644 --- a/crates/vespera_macro/src/collector.rs +++ b/crates/vespera_macro/src/collector.rs @@ -22,10 +22,11 @@ pub fn collect_metadata( folder_name: &str, ) -> MacroResult<(CollectedMetadata, HashMap)> { let mut metadata = CollectedMetadata::new(); - let mut file_asts = HashMap::new(); let files = collect_files(folder_path).map_err(|e| err_call_site(format!("vespera! macro: failed to scan route folder '{}': {}. Verify the folder exists and is readable.", folder_path.display(), e)))?; + let mut file_asts = HashMap::with_capacity(files.len()); + for file in files { if file.extension().is_none_or(|e| e != "rs") { continue; @@ -42,8 +43,9 @@ pub fn collect_metadata( let file_ast = syn::parse_file(&content).map_err(|e| err_call_site(format!("vespera! macro: syntax error in '{}': {}. Fix the Rust syntax errors in this file.", file.display(), e)))?; // Store file AST for downstream reuse (keyed by display path to match RouteMetadata.file_path) - let file_path_key = file.display().to_string(); - file_asts.insert(file_path_key, file_ast.clone()); + let file_path = file.display().to_string(); + file_asts.insert(file_path.clone(), file_ast); + let file_ast = &file_asts[&file_path]; // Get module path let segments = file @@ -64,8 +66,6 @@ pub fn collect_metadata( format!("{}::{}", folder_name, segments.join("::")) }; - let file_path = file.display().to_string(); - // Pre-compute base path once per file (avoids repeated segments.join per route) let base_path = format!("/{}", segments.join("/")); diff --git a/crates/vespera_macro/src/file_utils.rs b/crates/vespera_macro/src/file_utils.rs index 8594dda..381ca9f 100644 --- a/crates/vespera_macro/src/file_utils.rs +++ b/crates/vespera_macro/src/file_utils.rs @@ -3,12 +3,6 @@ use std::{ path::{Path, PathBuf}, }; -/// Read and parse a Rust source file, returning None on error (silent). -pub fn try_read_and_parse_file(path: &Path) -> Option { - let content = std::fs::read_to_string(path).ok()?; - syn::parse_file(&content).ok() -} - /// Read and parse a Rust source file, printing warnings on error. #[allow(clippy::similar_names)] pub fn read_and_parse_file_warn(path: &Path, context: &str) -> Option { diff --git a/crates/vespera_macro/src/lib.rs b/crates/vespera_macro/src/lib.rs index ece5fba..3f7039c 100644 --- a/crates/vespera_macro/src/lib.rs +++ b/crates/vespera_macro/src/lib.rs @@ -48,7 +48,7 @@ mod http; mod metadata; mod method; mod openapi_generator; -mod parse_utils; + mod parser; mod route; mod route_impl; diff --git a/crates/vespera_macro/src/openapi_generator.rs b/crates/vespera_macro/src/openapi_generator.rs index a476c1f..04c8501 100644 --- a/crates/vespera_macro/src/openapi_generator.rs +++ b/crates/vespera_macro/src/openapi_generator.rs @@ -14,7 +14,7 @@ use crate::{ metadata::CollectedMetadata, parser::{ build_operation_from_function, extract_default, extract_field_rename, extract_rename_all, - parse_enum_to_schema, parse_struct_to_schema, rename_field, strip_raw_prefix, + parse_enum_to_schema, parse_struct_to_schema, rename_field, strip_raw_prefix_owned, }, schema_macro::type_utils::get_type_default as utils_get_type_default, }; @@ -103,13 +103,12 @@ pub fn generate_openapi_doc_with_metadata( fn build_schema_lookups( metadata: &CollectedMetadata, ) -> (HashSet, HashMap) { - let mut known_schema_names = HashSet::new(); - let mut struct_definitions = HashMap::new(); + let mut known_schema_names = HashSet::with_capacity(metadata.structs.len()); + let mut struct_definitions = HashMap::with_capacity(metadata.structs.len()); for struct_meta in &metadata.structs { - let schema_name = struct_meta.name.clone(); - known_schema_names.insert(schema_name); struct_definitions.insert(struct_meta.name.clone(), struct_meta.definition.clone()); + known_schema_names.insert(struct_meta.name.clone()); } (known_schema_names, struct_definitions) @@ -139,7 +138,7 @@ fn build_file_cache(metadata: &CollectedMetadata) -> HashMap /// Enables O(1) lookup of which file contains a given struct definition, /// replacing the previous O(routes × file_read) linear scan. fn build_struct_file_index(file_cache: &HashMap) -> HashMap { - let mut index = HashMap::new(); + let mut index = HashMap::with_capacity(file_cache.len() * 4); for (path, ast) in file_cache { for item in &ast.items { if let syn::Item::Struct(s) = item { @@ -232,47 +231,63 @@ fn build_path_items( let mut paths = BTreeMap::new(); let mut all_tags = BTreeSet::new(); + // Pre-build function name index for O(1) lookup instead of O(items) per route + let fn_index: HashMap<&str, HashMap> = file_cache + .iter() + .map(|(path, ast)| { + let fns: HashMap = ast + .items + .iter() + .filter_map(|item| { + if let syn::Item::Fn(fn_item) = item { + Some((fn_item.sig.ident.to_string(), fn_item)) + } else { + None + } + }) + .collect(); + (path.as_str(), fns) + }) + .collect(); + for route_meta in &metadata.routes { - let Some(file_ast) = file_cache.get(&route_meta.file_path) else { + let Some(fns) = fn_index.get(route_meta.file_path.as_str()) else { continue; }; - for item in &file_ast.items { - if let syn::Item::Fn(fn_item) = item - && fn_item.sig.ident == route_meta.function_name - { - let Ok(method) = HttpMethod::try_from(route_meta.method.as_str()) else { - eprintln!( - "vespera: skipping route '{}' — unknown HTTP method '{}'", - route_meta.path, route_meta.method - ); - continue; - }; + let Some(fn_item) = fns.get(&route_meta.function_name) else { + continue; + }; - if let Some(tags) = &route_meta.tags { - for tag in tags { - all_tags.insert(tag.clone()); - } - } + let Ok(method) = HttpMethod::try_from(route_meta.method.as_str()) else { + eprintln!( + "vespera: skipping route '{}' — unknown HTTP method '{}'", + route_meta.path, route_meta.method + ); + continue; + }; - let mut operation = build_operation_from_function( - &fn_item.sig, - &route_meta.path, - known_schema_names, - struct_definitions, - route_meta.error_status.as_deref(), - route_meta.tags.as_deref(), - ); - operation.description.clone_from(&route_meta.description); - - let path_item = paths - .entry(route_meta.path.clone()) - .or_insert_with(PathItem::default); - - path_item.set_operation(method, operation); - break; + if let Some(tags) = &route_meta.tags { + for tag in tags { + all_tags.insert(tag.clone()); } } + + let mut operation = build_operation_from_function( + &fn_item.sig, + &route_meta.path, + known_schema_names, + struct_definitions, + route_meta.error_status.as_deref(), + route_meta.tags.as_deref(), + ); + operation.description.clone_from(&route_meta.description); + + let path_item = paths + .entry(route_meta.path.clone()) + .or_insert_with(PathItem::default); + + path_item.set_operation(method, operation); } (paths, all_tags) @@ -321,7 +336,7 @@ fn process_default_functions( for field in &fields_named.named { let rust_field_name = field.ident.as_ref().map_or_else( || "unknown".to_string(), - |i| strip_raw_prefix(&i.to_string()).to_string(), + |i| strip_raw_prefix_owned(i.to_string()), ); let field_name = extract_field_rename(&field.attrs) .unwrap_or_else(|| rename_field(&rust_field_name, struct_rename_all.as_deref())); @@ -1663,4 +1678,30 @@ pub fn create_users() -> String { panic!("Expected inline schema with default"); } } + + #[test] + fn test_generate_openapi_route_function_not_in_ast() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let route_content = "pub fn get_items() -> String { \"items\".to_string() }\n"; + let route_file = create_temp_file(&temp_dir, "users.rs", route_content); + + let mut metadata = CollectedMetadata::new(); + metadata.routes.push(RouteMetadata { + method: "GET".to_string(), + path: "/users".to_string(), + function_name: "get_users".to_string(), + module_path: "test::users".to_string(), + file_path: route_file.to_string_lossy().to_string(), + signature: "fn get_users() -> String".to_string(), + error_status: None, + tags: None, + description: None, + }); + + let doc = generate_openapi_doc_with_metadata(None, None, None, &metadata, None); + assert!( + doc.paths.is_empty(), + "Route with non-matching function should be skipped" + ); + } } diff --git a/crates/vespera_macro/src/parse_utils.rs b/crates/vespera_macro/src/parse_utils.rs deleted file mode 100644 index 6bfc9c8..0000000 --- a/crates/vespera_macro/src/parse_utils.rs +++ /dev/null @@ -1,473 +0,0 @@ -//! Parsing utilities for proc-macro input. -//! -//! Provides reusable helpers for parsing common patterns in proc-macro inputs, -//! including lookahead-based parsing, key-value pairs, and bracket-delimited lists. -//! -//! These utilities are available for future refactoring of existing parsing code in `args.rs` -//! and `router_codegen.rs`. They extract the most common lookahead-based patterns. - -#![allow(dead_code)] - -use proc_macro2::Delimiter; -use syn::parse::discouraged::AnyDelimiter; -use syn::{Ident, LitStr, Token, parse::ParseStream}; - -/// Parse a comma-separated list with optional trailing comma. -/// -/// Automatically handles the lookahead and comma parsing loop. -/// The provided parser function is called for each item. -/// -/// # Example -/// ```ignore -/// let items: Vec = parse_comma_list(input, |input| { -/// input.parse::().map(|lit| lit.value()) -/// })?; -/// ``` -pub fn parse_comma_list(input: ParseStream, mut parser: F) -> syn::Result> -where - F: FnMut(ParseStream) -> syn::Result, -{ - let mut items = Vec::new(); - - while !input.is_empty() { - items.push(parser(input)?); - - if input.peek(Token![,]) { - input.parse::()?; - } else { - break; - } - } - - Ok(items) -} - -/// Parse a bracket-delimited comma-separated list. -/// -/// # Example -/// ```ignore -/// let items: Vec = parse_bracketed_list(input, |input| { -/// input.parse::().map(|lit| lit.value()) -/// })?; -/// ``` -pub fn parse_bracketed_list(input: ParseStream, parser: F) -> syn::Result> -where - F: Fn(ParseStream) -> syn::Result, -{ - let (delim, _span, content) = input.parse_any_delimiter()?; - if delim != Delimiter::Bracket { - return Err(content.error("expected brackets")); - } - parse_comma_list(&content, parser) -} - -/// Parse identifier-based key-value pairs. -/// -/// Looks for patterns like `key = value`, where the key is an identifier. -/// Returns the key as a string and leaves the `=` token consumed. -/// -/// # Returns -/// - `Some((key, true))` if we found an identifier that could be a key -/// - `None` if end of input or unexpected token type -/// -/// # Example -/// ```ignore -/// if let Some((key, _)) = try_parse_key(input)? { -/// match key.as_str() { -/// "title" => { input.parse::()?; title = Some(input.parse()?); } -/// "version" => { input.parse::()?; version = Some(input.parse()?); } -/// _ => return Err(syn::Error::new(...)) -/// } -/// } -/// ``` -pub fn try_parse_key(input: ParseStream) -> syn::Result> { - let lookahead = input.lookahead1(); - - if lookahead.peek(Ident) { - let ident: Ident = input.parse()?; - Ok(Some(ident.to_string())) - } else if lookahead.peek(LitStr) { - Ok(None) - } else { - Err(lookahead.error()) - } -} - -/// Parse a list of identifier-keyed key-value pairs. -/// -/// Expects comma-separated key=value pairs where keys are identifiers. -/// Each iteration calls the handler with the key, and the handler is responsible -/// for consuming the `=` token and parsing the value. -/// -/// # Example -/// ```ignore -/// let mut title = None; -/// let mut version = None; -/// -/// parse_key_value_list(input, |key, input| { -/// match key.as_str() { -/// "title" => { -/// input.parse::()?; -/// title = Some(input.parse()?); -/// } -/// "version" => { -/// input.parse::()?; -/// version = Some(input.parse()?); -/// } -/// _ => return Err(syn::Error::new(...)) -/// } -/// Ok(()) -/// })?; -/// ``` -pub fn parse_key_value_list(input: ParseStream, mut handler: F) -> syn::Result<()> -where - F: FnMut(String, ParseStream) -> syn::Result<()>, -{ - while !input.is_empty() { - let lookahead = input.lookahead1(); - - if lookahead.peek(Ident) { - let ident: Ident = input.parse()?; - let key = ident.to_string(); - handler(key, input)?; - - // Check if there's a comma - if input.peek(Token![,]) { - input.parse::()?; - } else { - break; - } - } else if lookahead.peek(LitStr) { - // Allow string as a special case (e.g., for backward compatibility) - break; - } else { - return Err(lookahead.error()); - } - } - - Ok(()) -} - -/// Check if next token is a comma and consume it if present. -/// -/// Returns `true` if comma was found and consumed, `false` otherwise. -pub fn try_consume_comma(input: ParseStream) -> bool { - if input.peek(Token![,]) { - let _ = input.parse::(); - true - } else { - false - } -} - -#[cfg(test)] -mod tests { - use syn::parse::Parser; - - use super::*; - - #[test] - fn test_parse_comma_list_single() { - // Test basic parsing capability - parse a list of 3 strings - let parser = |input: ParseStream| { - parse_comma_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!("a", "b", "c"); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let items: Vec = result.unwrap(); - assert_eq!(items, vec!["a", "b", "c"]); - } - - #[test] - fn test_parse_comma_list_with_trailing_comma() { - let parser = |input: ParseStream| { - parse_comma_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!("x", "y",); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let items: Vec = result.unwrap(); - assert_eq!(items, vec!["x", "y"]); - } - - #[test] - fn test_parse_bracketed_list_strings() { - let parser = |input: ParseStream| { - parse_bracketed_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!(["a", "b", "c"]); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let items: Vec = result.unwrap(); - assert_eq!(items, vec!["a", "b", "c"]); - } - - #[test] - fn test_try_parse_key_ident() { - let parser = |input: ParseStream| try_parse_key(input); - - let tokens = quote::quote!(title); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let key = result.unwrap(); - assert_eq!(key, Some("title".to_string())); - } - - #[test] - fn test_try_consume_comma_logic() { - // Test the comma consumption logic by parsing and manually checking - let parser = |input: ParseStream| { - let has_comma = try_consume_comma(input); - Ok(has_comma) - }; - - let tokens = quote::quote!(,); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - assert!(result.unwrap()); - } - - #[test] - fn test_try_parse_key_litstr() { - // When input is a LitStr, try_parse_key returns Ok(None) without consuming - let parser = |input: ParseStream| { - let result = try_parse_key(input)?; - // LitStr remains unconsumed, parse it to clear the buffer - let _: LitStr = input.parse()?; - Ok(result) - }; - - let tokens = quote::quote!("some_string"); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - assert_eq!(result.unwrap(), None); - } - - #[test] - fn test_try_parse_key_invalid_token() { - // When input is neither Ident nor LitStr, try_parse_key returns error - let parser = |input: ParseStream| try_parse_key(input); - - // Use a number literal which is neither Ident nor LitStr - let tokens = quote::quote!(42); - let result = parser.parse2(tokens); - assert!(result.is_err()); - } - - #[test] - fn test_try_consume_comma_no_comma() { - // When there's no comma, try_consume_comma returns false without consuming - let parser = |input: ParseStream| { - let has_comma = try_consume_comma(input); - // Token remains unconsumed, parse it to clear the buffer - let _: Ident = input.parse()?; - Ok(has_comma) - }; - - // Some token that's not a comma - let tokens = quote::quote!(foo); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - assert!(!result.unwrap()); - } - - #[test] - fn test_parse_key_value_handler() { - let parser = |input: ParseStream| { - let mut title = None; - let mut version = None; - - parse_key_value_list(input, |key, input| { - match key.as_str() { - "title" => { - input.parse::()?; - title = Some(input.parse::()?.value()); - } - "version" => { - input.parse::()?; - version = Some(input.parse::()?.value()); - } - _ => { - return Err(syn::Error::new( - proc_macro2::Span::call_site(), - "unknown key", - )); - } - } - Ok(()) - })?; - - Ok((title, version)) - }; - - let tokens = quote::quote!(title = "Test", version = "1.0"); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let (title, version) = result.unwrap(); - assert_eq!(title, Some("Test".to_string())); - assert_eq!(version, Some("1.0".to_string())); - } - - #[test] - fn test_parse_key_value_list_litstr_breaks() { - // When a LitStr is encountered in parse_key_value_list, it breaks (doesn't error) - let parser = |input: ParseStream| { - let mut keys = Vec::new(); - parse_key_value_list(input, |key, input| { - input.parse::()?; - let _: LitStr = input.parse()?; - keys.push(key); - Ok(()) - })?; - // The remaining LitStr is left unconsumed, parse it to clear the buffer - let _: LitStr = input.parse()?; - Ok(keys) - }; - - // "remaining" is a LitStr at the end, should break without error - let tokens = quote::quote!(title = "Test", "remaining"); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - assert_eq!(result.unwrap(), vec!["title"]); - } - - #[test] - fn test_parse_key_value_list_invalid_token() { - // When an invalid token (not Ident or LitStr) is encountered, returns error - let parser = |input: ParseStream| { - parse_key_value_list(input, |_key, input| { - input.parse::()?; - let _: LitStr = input.parse()?; - Ok(()) - }) - }; - - // 42 is neither Ident nor LitStr, should error - let tokens = quote::quote!(42); - let result = parser.parse2(tokens); - assert!(result.is_err()); - } - - #[test] - fn test_parse_bracketed_list_empty() { - // Test parse_bracketed_list with empty brackets - let parser = |input: ParseStream| { - parse_bracketed_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!([]); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let items: Vec = result.unwrap(); - assert!(items.is_empty()); - } - - #[test] - fn test_parse_bracketed_list_single_item() { - // Test parse_bracketed_list with single item - let parser = |input: ParseStream| { - parse_bracketed_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!(["single"]); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let items: Vec = result.unwrap(); - assert_eq!(items, vec!["single"]); - } - - #[test] - fn test_parse_bracketed_list_with_trailing_comma() { - // Test parse_bracketed_list with trailing comma - let parser = |input: ParseStream| { - parse_bracketed_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!(["a", "b",]); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let items: Vec = result.unwrap(); - assert_eq!(items, vec!["a", "b"]); - } - - #[test] - fn test_parse_bracketed_list_integers() { - // Test parse_bracketed_list with integer literals - use syn::LitInt; - let parser = |input: ParseStream| { - parse_bracketed_list(input, |input| { - input - .parse::() - .and_then(|lit| lit.base10_parse::()) - }) - }; - - let tokens = quote::quote!([1, 2, 3]); - let result = parser.parse2(tokens); - assert!(result.is_ok()); - let items: Vec = result.unwrap(); - assert_eq!(items, vec![1, 2, 3]); - } - - #[test] - fn test_parse_bracketed_list_wrong_delimiter_parens() { - // Test parse_bracketed_list with parentheses instead of brackets - should error - let parser = |input: ParseStream| { - parse_bracketed_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!(("a", "b")); - let result = parser.parse2(tokens); - assert!(result.is_err()); - let err = result.unwrap_err(); - assert!(err.to_string().contains("expected brackets")); - } - - #[test] - fn test_parse_bracketed_list_wrong_delimiter_braces() { - // Test parse_bracketed_list with braces instead of brackets - should error - let parser = |input: ParseStream| { - parse_bracketed_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!({"a", "b"}); - let result = parser.parse2(tokens); - assert!(result.is_err()); - let err = result.unwrap_err(); - assert!(err.to_string().contains("expected brackets")); - } - - #[test] - fn test_parse_bracketed_list_no_delimiter() { - // Test parse_bracketed_list with no delimiter at all - should error - let parser = |input: ParseStream| { - parse_bracketed_list(input, |input| { - input.parse::().map(|lit| lit.value()) - }) - }; - - let tokens = quote::quote!("just_a_string"); - let result = parser.parse2(tokens); - assert!(result.is_err()); - } -} diff --git a/crates/vespera_macro/src/parser/mod.rs b/crates/vespera_macro/src/parser/mod.rs index 87cfe3b..20eb38d 100644 --- a/crates/vespera_macro/src/parser/mod.rs +++ b/crates/vespera_macro/src/parser/mod.rs @@ -9,5 +9,5 @@ pub use operation::build_operation_from_function; pub use schema::{ extract_default, extract_field_rename, extract_rename_all, extract_skip, extract_skip_serializing_if, parse_enum_to_schema, parse_struct_to_schema, - parse_type_to_schema_ref, rename_field, strip_raw_prefix, + parse_type_to_schema_ref, rename_field, strip_raw_prefix_owned, }; diff --git a/crates/vespera_macro/src/parser/operation.rs b/crates/vespera_macro/src/parser/operation.rs index 58d9ba2..fbf57cb 100644 --- a/crates/vespera_macro/src/parser/operation.rs +++ b/crates/vespera_macro/src/parser/operation.rs @@ -1,3 +1,4 @@ +use std::cell::OnceCell; use std::collections::{BTreeMap, HashSet}; use syn::{FnArg, PatType, Type}; @@ -23,6 +24,7 @@ pub fn build_operation_from_function( let mut parameters = Vec::new(); let mut request_body = None; let mut path_extractor_type: Option = None; + let string_type: OnceCell = OnceCell::new(); // First pass: find Path extractor and extract its type for input in &sig.inputs { @@ -72,7 +74,8 @@ pub fn build_operation_from_function( description: None, required: Some(true), schema: Some(parse_type_to_schema_ref_with_schemas( - &syn::parse_str::("String").unwrap(), + string_type + .get_or_init(|| syn::parse_str::("String").unwrap()), known_schemas, struct_definitions, )), @@ -122,7 +125,7 @@ pub fn build_operation_from_function( description: None, required: Some(true), schema: Some(parse_type_to_schema_ref_with_schemas( - &syn::parse_str::("String").unwrap(), + string_type.get_or_init(|| syn::parse_str::("String").unwrap()), known_schemas, struct_definitions, )), diff --git a/crates/vespera_macro/src/parser/schema/enum_schema.rs b/crates/vespera_macro/src/parser/schema/enum_schema.rs index 008bbb8..84751dc 100644 --- a/crates/vespera_macro/src/parser/schema/enum_schema.rs +++ b/crates/vespera_macro/src/parser/schema/enum_schema.rs @@ -22,7 +22,7 @@ use vespera_core::schema::{Discriminator, Schema, SchemaRef, SchemaType}; use super::{ serde_attrs::{ SerdeEnumRepr, extract_doc_comment, extract_enum_repr, extract_field_rename, - extract_rename_all, rename_field, strip_raw_prefix, + extract_rename_all, rename_field, strip_raw_prefix_owned, }, type_schema::parse_type_to_schema_ref, }; @@ -107,10 +107,10 @@ fn parse_unit_enum_to_schema( description: Option, rename_all: Option<&str>, ) -> Schema { - let mut enum_values = Vec::new(); + let mut enum_values = Vec::with_capacity(enum_item.variants.len()); for variant in &enum_item.variants { - let variant_name = strip_raw_prefix(&variant.ident.to_string()).to_string(); + let variant_name = strip_raw_prefix_owned(variant.ident.to_string()); // Check for variant-level rename attribute first (takes precedence) let enum_value = extract_field_rename(&variant.attrs) @@ -133,7 +133,7 @@ fn parse_unit_enum_to_schema( /// Get the variant key (name after rename transformations) fn get_variant_key(variant: &syn::Variant, rename_all: Option<&str>) -> String { - let variant_name = strip_raw_prefix(&variant.ident.to_string()).to_string(); + let variant_name = strip_raw_prefix_owned(variant.ident.to_string()); extract_field_rename(&variant.attrs).unwrap_or_else(|| rename_field(&variant_name, rename_all)) } @@ -147,13 +147,13 @@ fn build_struct_variant_properties( struct_definitions: &HashMap, ) -> (BTreeMap, Vec) { let mut variant_properties = BTreeMap::new(); - let mut variant_required = Vec::new(); + let mut variant_required = Vec::with_capacity(fields_named.named.len()); let variant_rename_all = extract_rename_all(variant_attrs); for field in &fields_named.named { let rust_field_name = field.ident.as_ref().map_or_else( || "unknown".to_string(), - |i| strip_raw_prefix(&i.to_string()).to_string(), + |i| strip_raw_prefix_owned(i.to_string()), ); // Check for field-level rename attribute first (takes precedence) @@ -231,7 +231,7 @@ fn build_variant_data_schema( )) } else { // Multiple fields tuple variant - array with prefixItems - let mut tuple_item_schemas = Vec::new(); + let mut tuple_item_schemas = Vec::with_capacity(fields_unnamed.unnamed.len()); for field in &fields_unnamed.unnamed { let field_schema = parse_type_to_schema_ref(&field.ty, known_schemas, struct_definitions); @@ -283,7 +283,7 @@ fn parse_externally_tagged_enum( known_schemas: &HashSet, struct_definitions: &HashMap, ) -> Schema { - let mut one_of_schemas = Vec::new(); + let mut one_of_schemas = Vec::with_capacity(enum_item.variants.len()); for variant in &enum_item.variants { let variant_key = get_variant_key(variant, rename_all); @@ -305,7 +305,7 @@ fn parse_externally_tagged_enum( parse_type_to_schema_ref(inner_type, known_schemas, struct_definitions) } else { // Multiple fields - array with prefixItems - let mut tuple_item_schemas = Vec::new(); + let mut tuple_item_schemas = Vec::with_capacity(fields_unnamed.unnamed.len()); for field in &fields_unnamed.unnamed { let field_schema = parse_type_to_schema_ref(&field.ty, known_schemas, struct_definitions); @@ -396,7 +396,9 @@ fn parse_internally_tagged_enum( known_schemas: &HashSet, struct_definitions: &HashMap, ) -> Schema { - let mut one_of_schemas = Vec::new(); + let mut one_of_schemas = Vec::with_capacity(enum_item.variants.len()); + + let tag_string = tag.to_string(); for variant in &enum_item.variants { let variant_key = get_variant_key(variant, rename_all); @@ -407,7 +409,7 @@ fn parse_internally_tagged_enum( // Unit variant: {"tag": "VariantName"} let mut properties = BTreeMap::new(); properties.insert( - tag.to_string(), + tag_string.clone(), SchemaRef::Inline(Box::new(Schema { r#enum: Some(vec![serde_json::Value::String(variant_key.clone())]), ..Schema::string() @@ -417,7 +419,7 @@ fn parse_internally_tagged_enum( Schema { description: variant_description, properties: Some(properties), - required: Some(vec![tag.to_string()]), + required: Some(vec![tag_string.clone()]), ..Schema::object() } } @@ -433,13 +435,13 @@ fn parse_internally_tagged_enum( // Add the tag field properties.insert( - tag.to_string(), + tag_string.clone(), SchemaRef::Inline(Box::new(Schema { r#enum: Some(vec![serde_json::Value::String(variant_key.clone())]), ..Schema::string() })), ); - required.insert(0, tag.to_string()); + required.insert(0, tag_string.clone()); Schema { description: variant_description, @@ -467,7 +469,7 @@ fn parse_internally_tagged_enum( Some(one_of_schemas) }, discriminator: Some(Discriminator { - property_name: tag.to_string(), + property_name: tag_string, mapping: None, // Mapping not needed for inline schemas }), ..Default::default() @@ -485,18 +487,21 @@ fn parse_adjacently_tagged_enum( known_schemas: &HashSet, struct_definitions: &HashMap, ) -> Schema { - let mut one_of_schemas = Vec::new(); + let mut one_of_schemas = Vec::with_capacity(enum_item.variants.len()); + + let tag_string = tag.to_string(); + let content_string = content.to_string(); for variant in &enum_item.variants { let variant_key = get_variant_key(variant, rename_all); let variant_description = extract_doc_comment(&variant.attrs); let mut properties = BTreeMap::new(); - let mut required = vec![tag.to_string()]; + let mut required = vec![tag_string.clone()]; // Add the tag field properties.insert( - tag.to_string(), + tag_string.clone(), SchemaRef::Inline(Box::new(Schema { r#enum: Some(vec![serde_json::Value::String(variant_key.clone())]), ..Schema::string() @@ -507,8 +512,8 @@ fn parse_adjacently_tagged_enum( if let Some(data_schema) = build_variant_data_schema(variant, rename_all, known_schemas, struct_definitions) { - properties.insert(content.to_string(), data_schema); - required.push(content.to_string()); + properties.insert(content_string.clone(), data_schema); + required.push(content_string.clone()); } let variant_schema = Schema { @@ -530,7 +535,7 @@ fn parse_adjacently_tagged_enum( Some(one_of_schemas) }, discriminator: Some(Discriminator { - property_name: tag.to_string(), + property_name: tag_string, mapping: None, }), ..Default::default() @@ -546,7 +551,7 @@ fn parse_untagged_enum( known_schemas: &HashSet, struct_definitions: &HashMap, ) -> Schema { - let mut one_of_schemas = Vec::new(); + let mut one_of_schemas = Vec::with_capacity(enum_item.variants.len()); for variant in &enum_item.variants { let variant_description = extract_doc_comment(&variant.attrs); @@ -579,7 +584,7 @@ fn parse_untagged_enum( schema } else { // Multiple fields - array with prefixItems - let mut tuple_item_schemas = Vec::new(); + let mut tuple_item_schemas = Vec::with_capacity(fields_unnamed.unnamed.len()); for field in &fields_unnamed.unnamed { let field_schema = parse_type_to_schema_ref(&field.ty, known_schemas, struct_definitions); diff --git a/crates/vespera_macro/src/parser/schema/mod.rs b/crates/vespera_macro/src/parser/schema/mod.rs index 7c7c1ee..6dd9daa 100644 --- a/crates/vespera_macro/src/parser/schema/mod.rs +++ b/crates/vespera_macro/src/parser/schema/mod.rs @@ -39,7 +39,7 @@ mod type_schema; pub use enum_schema::parse_enum_to_schema; pub use serde_attrs::{ extract_default, extract_field_rename, extract_rename_all, extract_skip, - extract_skip_serializing_if, rename_field, strip_raw_prefix, + extract_skip_serializing_if, rename_field, strip_raw_prefix_owned, }; pub use struct_schema::parse_struct_to_schema; pub use type_schema::parse_type_to_schema_ref; diff --git a/crates/vespera_macro/src/parser/schema/serde_attrs.rs b/crates/vespera_macro/src/parser/schema/serde_attrs.rs index d986145..8e41ab1 100644 --- a/crates/vespera_macro/src/parser/schema/serde_attrs.rs +++ b/crates/vespera_macro/src/parser/schema/serde_attrs.rs @@ -30,21 +30,18 @@ pub fn extract_doc_comment(attrs: &[syn::Attribute]) -> Option { } } -/// Strips the `r#` prefix from raw identifiers. -/// E.g., `r#type` becomes `type`. -pub fn strip_raw_prefix(ident: &str) -> &str { - ident.strip_prefix("r#").unwrap_or(ident) +/// Strips the `r#` prefix from raw identifiers, returning an owned `String`. +/// For the 99% case (no `r#` prefix), returns the input directly with zero extra allocation. +#[allow(clippy::option_if_let_else)] // clippy suggestion doesn't compile: borrow-move conflict +pub fn strip_raw_prefix_owned(ident: String) -> String { + if let Some(stripped) = ident.strip_prefix("r#") { + stripped.to_string() + } else { + ident + } } -/// Capitalizes the first character of a string. -/// Returns empty string if input is empty. -/// E.g., `user` -> `User`, `USER` -> `USER`, `` -> `` -pub fn capitalize_first(s: &str) -> String { - let mut chars = s.chars(); - chars.next().map_or_else(String::new, |first| { - first.to_uppercase().chain(chars).collect() - }) -} +pub use crate::schema_macro::type_utils::capitalize_first; /// Extract a Schema name from a `SeaORM` Entity type path. /// @@ -441,10 +438,10 @@ pub fn rename_field(field_name: &str, rename_all: Option<&str>) -> String { result.push(ch); } else { // Still in first word, lowercase it - result.push(ch.to_lowercase().next().unwrap_or(ch)); + result.push(ch.to_ascii_lowercase()); } } else if capitalize_next { - result.push(ch.to_uppercase().next().unwrap_or(ch)); + result.push(ch.to_ascii_uppercase()); capitalize_next = false; } else { result.push(ch); @@ -459,7 +456,7 @@ pub fn rename_field(field_name: &str, rename_all: Option<&str>) -> String { if ch.is_uppercase() && i > 0 { result.push('_'); } - result.push(ch.to_lowercase().next().unwrap_or(ch)); + result.push(ch.to_ascii_lowercase()); } result } @@ -471,7 +468,7 @@ pub fn rename_field(field_name: &str, rename_all: Option<&str>) -> String { if i > 0 && !result.ends_with('-') { result.push('-'); } - result.push(ch.to_lowercase().next().unwrap_or(ch)); + result.push(ch.to_ascii_lowercase()); } else if ch == '_' { result.push('-'); } else { @@ -488,7 +485,7 @@ pub fn rename_field(field_name: &str, rename_all: Option<&str>) -> String { if ch == '_' { capitalize_next = true; } else if capitalize_next { - result.push(ch.to_uppercase().next().unwrap_or(ch)); + result.push(ch.to_ascii_uppercase()); capitalize_next = false; } else { result.push(ch); @@ -518,7 +515,7 @@ pub fn rename_field(field_name: &str, rename_all: Option<&str>) -> String { snake_case.push('_'); } if ch != '_' && ch != '-' { - snake_case.push(ch.to_lowercase().next().unwrap_or(ch)); + snake_case.push(ch.to_ascii_lowercase()); } else if ch == '_' { snake_case.push('_'); } @@ -540,7 +537,7 @@ pub fn rename_field(field_name: &str, rename_all: Option<&str>) -> String { if ch == '_' { kebab_case.push('-'); } else if ch != '-' { - kebab_case.push(ch.to_lowercase().next().unwrap_or(ch)); + kebab_case.push(ch.to_ascii_lowercase()); } else { kebab_case.push('-'); } @@ -1112,24 +1109,13 @@ mod tests { assert_eq!(result, "test_name"); } - /// Test strip_raw_prefix function + /// Test strip_raw_prefix_owned function #[test] - fn test_strip_raw_prefix() { - assert_eq!(strip_raw_prefix("r#type"), "type"); - assert_eq!(strip_raw_prefix("r#match"), "match"); - assert_eq!(strip_raw_prefix("normal"), "normal"); - assert_eq!(strip_raw_prefix("r#"), ""); - } - - #[rstest] - #[case("", "")] - #[case("a", "A")] - #[case("user", "User")] - #[case("User", "User")] - #[case("USER", "USER")] - #[case("user_name", "User_name")] - fn test_capitalize_first(#[case] input: &str, #[case] expected: &str) { - assert_eq!(capitalize_first(input), expected); + fn test_strip_raw_prefix_owned() { + assert_eq!(strip_raw_prefix_owned("r#type".to_string()), "type"); + assert_eq!(strip_raw_prefix_owned("r#match".to_string()), "match"); + assert_eq!(strip_raw_prefix_owned("normal".to_string()), "normal"); + assert_eq!(strip_raw_prefix_owned("r#".to_string()), ""); } // Tests using programmatically created attributes diff --git a/crates/vespera_macro/src/parser/schema/struct_schema.rs b/crates/vespera_macro/src/parser/schema/struct_schema.rs index cc2db87..2bee2c6 100644 --- a/crates/vespera_macro/src/parser/schema/struct_schema.rs +++ b/crates/vespera_macro/src/parser/schema/struct_schema.rs @@ -11,7 +11,7 @@ use vespera_core::schema::{Schema, SchemaRef, SchemaType}; use super::{ serde_attrs::{ extract_doc_comment, extract_field_rename, extract_flatten, extract_rename_all, - extract_skip, rename_field, strip_raw_prefix, + extract_skip, rename_field, strip_raw_prefix_owned, }, type_schema::parse_type_to_schema_ref, }; @@ -35,7 +35,7 @@ pub fn parse_struct_to_schema( struct_definitions: &HashMap, ) -> Schema { let mut properties = BTreeMap::new(); - let mut required = Vec::new(); + let mut required = Vec::with_capacity(8); let mut flattened_refs: Vec = Vec::new(); // Extract struct-level doc comment for schema description @@ -66,7 +66,7 @@ pub fn parse_struct_to_schema( let rust_field_name = field.ident.as_ref().map_or_else( || "unknown".to_string(), - |i| strip_raw_prefix(&i.to_string()).to_string(), + |i| strip_raw_prefix_owned(i.to_string()), ); // Check for field-level rename attribute first (takes precedence) diff --git a/crates/vespera_macro/src/router_codegen.rs b/crates/vespera_macro/src/router_codegen.rs index 4f38030..8370113 100644 --- a/crates/vespera_macro/src/router_codegen.rs +++ b/crates/vespera_macro/src/router_codegen.rs @@ -432,8 +432,9 @@ impl Parse for ExportAppInput { #[allow(clippy::too_many_lines)] pub fn generate_router_code( metadata: &CollectedMetadata, - docs_info: Option<(String, String)>, - redoc_info: Option<(String, String)>, + docs_url: Option<&str>, + redoc_url: Option<&str>, + spec_tokens: Option, merge_apps: &[syn::Path], ) -> proc_macro2::TokenStream { let mut router_nests = Vec::new(); @@ -476,93 +477,119 @@ pub fn generate_router_code( // Check if we need to merge specs at runtime let has_merge = !merge_apps.is_empty(); - if let Some((docs_url, spec)) = docs_info { + // Generate merge code once, reuse in both docs_url and redoc_url routes + let merge_spec_code: Vec<_> = merge_apps + .iter() + .map(|app_path| { + quote! { + if let Ok(other) = vespera::serde_json::from_str::(#app_path::OPENAPI_SPEC) { + merged.merge(other); + } + } + }) + .collect(); + + if let Some(docs_url) = docs_url { let method_path = http_method_to_token_stream(HttpMethod::Get); if has_merge { - // Generate code that merges specs at runtime using OnceLock - let merge_spec_code: Vec<_> = merge_apps - .iter() - .map(|app_path| { - quote! { - if let Ok(other) = vespera::serde_json::from_str::(#app_path::OPENAPI_SPEC) { - merged.merge(other); - } - } - }) - .collect(); - router_nests.push(quote!( .route(#docs_url, #method_path(|| async { static MERGED_SPEC: std::sync::OnceLock = std::sync::OnceLock::new(); let spec = MERGED_SPEC.get_or_init(|| { - let base_spec = #spec; - let mut merged: vespera::OpenApi = vespera::serde_json::from_str(base_spec).unwrap(); + let mut merged: vespera::OpenApi = vespera::serde_json::from_str(__VESPERA_SPEC).unwrap(); #(#merge_spec_code)* vespera::serde_json::to_string(&merged).unwrap() }); - let html = format!( - r#"Swagger UI
"#, - spec - ); - vespera::axum::response::Html(html) + static HTML: std::sync::OnceLock = std::sync::OnceLock::new(); + let html = HTML.get_or_init(|| { + format!( + r#"Swagger UI
"#, + spec + ) + }); + vespera::axum::response::Html(html.as_str()) })) )); } else { - let html = format!( - r#"Swagger UI
"# - ); - router_nests.push(quote!( - .route(#docs_url, #method_path(|| async { vespera::axum::response::Html(#html) })) + .route(#docs_url, #method_path(|| async { + static HTML: std::sync::OnceLock = std::sync::OnceLock::new(); + let html = HTML.get_or_init(|| { + format!( + r#"Swagger UI
"#, + __VESPERA_SPEC + ) + }); + vespera::axum::response::Html(html.as_str()) + })) )); } } - if let Some((redoc_url, spec)) = redoc_info { + if let Some(redoc_url) = redoc_url { let method_path = http_method_to_token_stream(HttpMethod::Get); if has_merge { - // Generate code that merges specs at runtime using OnceLock - let merge_spec_code: Vec<_> = merge_apps - .iter() - .map(|app_path| { - quote! { - if let Ok(other) = vespera::serde_json::from_str::(#app_path::OPENAPI_SPEC) { - merged.merge(other); - } - } - }) - .collect(); - router_nests.push(quote!( .route(#redoc_url, #method_path(|| async { static MERGED_SPEC: std::sync::OnceLock = std::sync::OnceLock::new(); let spec = MERGED_SPEC.get_or_init(|| { - let base_spec = #spec; - let mut merged: vespera::OpenApi = vespera::serde_json::from_str(base_spec).unwrap(); + let mut merged: vespera::OpenApi = vespera::serde_json::from_str(__VESPERA_SPEC).unwrap(); #(#merge_spec_code)* vespera::serde_json::to_string(&merged).unwrap() }); - let html = format!( - r#"ReDoc
"#, - spec - ); - vespera::axum::response::Html(html) + static HTML: std::sync::OnceLock = std::sync::OnceLock::new(); + let html = HTML.get_or_init(|| { + format!( + r#"ReDoc
"#, + spec + ) + }); + vespera::axum::response::Html(html.as_str()) })) )); } else { - let html = format!( - r#"ReDoc
"# - ); - router_nests.push(quote!( - .route(#redoc_url, #method_path(|| async { vespera::axum::response::Html(#html) })) + .route(#redoc_url, #method_path(|| async { + static HTML: std::sync::OnceLock = std::sync::OnceLock::new(); + let html = HTML.get_or_init(|| { + format!( + r#"ReDoc
"#, + __VESPERA_SPEC + ) + }); + vespera::axum::response::Html(html.as_str()) + })) )); } } - if merge_apps.is_empty() { + let needs_spec_const = spec_tokens.is_some() && (docs_url.is_some() || redoc_url.is_some()); + + if needs_spec_const { + let spec_expr = spec_tokens.unwrap(); + if merge_apps.is_empty() { + quote! { + { + const __VESPERA_SPEC: &str = #spec_expr; + vespera::axum::Router::new() + #( #router_nests )* + } + } + } else { + quote! { + { + const __VESPERA_SPEC: &str = #spec_expr; + vespera::VesperaRouter::new( + vespera::axum::Router::new() + #( #router_nests )*, + vec![#( #merge_apps::router ),*] + ) + } + } + } + } else if merge_apps.is_empty() { quote! { vespera::axum::Router::new() #( #router_nests )* @@ -609,6 +636,7 @@ mod tests { &collect_metadata(temp_dir.path(), folder_name).unwrap().0, None, None, + None, &[], ); let code = result.to_string(); @@ -765,6 +793,7 @@ pub fn get_users() -> String { &collect_metadata(temp_dir.path(), folder_name).unwrap().0, None, None, + None, &[], ); let code = result.to_string(); @@ -844,6 +873,7 @@ pub fn update_user() -> String { &collect_metadata(temp_dir.path(), folder_name).unwrap().0, None, None, + None, &[], ); let code = result.to_string(); @@ -898,6 +928,7 @@ pub fn create_users() -> String { &collect_metadata(temp_dir.path(), folder_name).unwrap().0, None, None, + None, &[], ); let code = result.to_string(); @@ -944,6 +975,7 @@ pub fn index() -> String { &collect_metadata(temp_dir.path(), folder_name).unwrap().0, None, None, + None, &[], ); let code = result.to_string(); @@ -981,6 +1013,7 @@ pub fn get_users() -> String { &collect_metadata(temp_dir.path(), folder_name).unwrap().0, None, None, + None, &[], ); let code = result.to_string(); @@ -1177,38 +1210,60 @@ pub fn get_users() -> String { #[test] fn test_generate_router_code_with_docs() { let metadata = CollectedMetadata::new(); - let docs_info = Some(("/docs".to_string(), r#"{"openapi":"3.1.0"}"#.to_string())); + let spec = r#"{"openapi":"3.1.0"}"#; - let result = generate_router_code(&metadata, docs_info, None, &[]); + let result = generate_router_code( + &metadata, + Some("/docs"), + None, + Some(quote::quote!(#spec)), + &[], + ); let code = result.to_string(); assert!(code.contains("/docs")); assert!(code.contains("swagger-ui")); + assert!(code.contains("__VESPERA_SPEC")); + assert!(code.contains("OnceLock")); } #[test] fn test_generate_router_code_with_redoc() { let metadata = CollectedMetadata::new(); - let redoc_info = Some(("/redoc".to_string(), r#"{"openapi":"3.1.0"}"#.to_string())); + let spec = r#"{"openapi":"3.1.0"}"#; - let result = generate_router_code(&metadata, None, redoc_info, &[]); + let result = generate_router_code( + &metadata, + None, + Some("/redoc"), + Some(quote::quote!(#spec)), + &[], + ); let code = result.to_string(); assert!(code.contains("/redoc")); assert!(code.contains("redoc")); + assert!(code.contains("__VESPERA_SPEC")); + assert!(code.contains("OnceLock")); } #[test] fn test_generate_router_code_with_both_docs() { let metadata = CollectedMetadata::new(); - let docs_info = Some(("/docs".to_string(), r#"{"openapi":"3.1.0"}"#.to_string())); - let redoc_info = Some(("/redoc".to_string(), r#"{"openapi":"3.1.0"}"#.to_string())); + let spec = r#"{"openapi":"3.1.0"}"#; - let result = generate_router_code(&metadata, docs_info, redoc_info, &[]); + let result = generate_router_code( + &metadata, + Some("/docs"), + Some("/redoc"), + Some(quote::quote!(#spec)), + &[], + ); let code = result.to_string(); assert!(code.contains("/docs")); assert!(code.contains("/redoc")); + assert!(code.contains("__VESPERA_SPEC")); } #[test] @@ -1280,7 +1335,7 @@ pub fn get_users() -> String { description: None, }); - let result = generate_router_code(&metadata, None, None, &[]); + let result = generate_router_code(&metadata, None, None, None, &[]); let code = result.to_string(); // Router should be generated but without any route calls @@ -1325,7 +1380,7 @@ pub fn get_users() -> String { description: None, }); - let result = generate_router_code(&metadata, None, None, &[]); + let result = generate_router_code(&metadata, None, None, None, &[]); let code = result.to_string(); // Valid route should be present @@ -1472,7 +1527,7 @@ pub fn get_users() -> String { let metadata = CollectedMetadata::new(); let merge_apps: Vec = vec![syn::parse_quote!(third::ThirdApp)]; - let result = generate_router_code(&metadata, None, None, &merge_apps); + let result = generate_router_code(&metadata, None, None, None, &merge_apps); let code = result.to_string(); // Should use VesperaRouter instead of plain Router @@ -1489,10 +1544,16 @@ pub fn get_users() -> String { #[test] fn test_generate_router_code_with_docs_and_merge() { let metadata = CollectedMetadata::new(); - let docs_info = Some(("/docs".to_string(), r#"{"openapi":"3.1.0"}"#.to_string())); + let spec = r#"{"openapi":"3.1.0"}"#; let merge_apps: Vec = vec![syn::parse_quote!(app::MyApp)]; - let result = generate_router_code(&metadata, docs_info, None, &merge_apps); + let result = generate_router_code( + &metadata, + Some("/docs"), + None, + Some(quote::quote!(#spec)), + &merge_apps, + ); let code = result.to_string(); // Should have merge code for docs @@ -1514,10 +1575,16 @@ pub fn get_users() -> String { #[test] fn test_generate_router_code_with_redoc_and_merge() { let metadata = CollectedMetadata::new(); - let redoc_info = Some(("/redoc".to_string(), r#"{"openapi":"3.1.0"}"#.to_string())); + let spec = r#"{"openapi":"3.1.0"}"#; let merge_apps: Vec = vec![syn::parse_quote!(other::OtherApp)]; - let result = generate_router_code(&metadata, None, redoc_info, &merge_apps); + let result = generate_router_code( + &metadata, + None, + Some("/redoc"), + Some(quote::quote!(#spec)), + &merge_apps, + ); let code = result.to_string(); // Should have merge code for redoc @@ -1531,20 +1598,31 @@ pub fn get_users() -> String { #[test] fn test_generate_router_code_with_both_docs_and_merge() { let metadata = CollectedMetadata::new(); - let docs_info = Some(("/docs".to_string(), r#"{"openapi":"3.1.0"}"#.to_string())); - let redoc_info = Some(("/redoc".to_string(), r#"{"openapi":"3.1.0"}"#.to_string())); + let spec = r#"{"openapi":"3.1.0"}"#; let merge_apps: Vec = vec![syn::parse_quote!(merged::App)]; - let result = generate_router_code(&metadata, docs_info, redoc_info, &merge_apps); + let result = generate_router_code( + &metadata, + Some("/docs"), + Some("/redoc"), + Some(quote::quote!(#spec)), + &merge_apps, + ); let code = result.to_string(); // Both docs should have merge code - // Count MERGED_SPEC occurrences - should be at least 2 (static declarations for docs and redoc) + // Count MERGED_SPEC occurrences - should appear in docs and redoc handlers let merged_spec_count = code.matches("MERGED_SPEC").count(); assert!( merged_spec_count >= 2, "Should have at least 2 MERGED_SPEC for docs and redoc, got: {merged_spec_count}" ); + // __VESPERA_SPEC should appear exactly once (the const declaration) + let vespera_spec_count = code.matches("__VESPERA_SPEC").count(); + assert!( + vespera_spec_count >= 1, + "Should have __VESPERA_SPEC const, got: {vespera_spec_count}" + ); // Both docs_url and redoc_url should be present assert!( code.contains("/docs") && code.contains("/redoc"), @@ -1560,7 +1638,7 @@ pub fn get_users() -> String { syn::parse_quote!(second::App), ]; - let result = generate_router_code(&metadata, None, None, &merge_apps); + let result = generate_router_code(&metadata, None, None, None, &merge_apps); let code = result.to_string(); // Should reference both apps diff --git a/crates/vespera_macro/src/schema_macro/circular.rs b/crates/vespera_macro/src/schema_macro/circular.rs index d9dbe68..2d6f94b 100644 --- a/crates/vespera_macro/src/schema_macro/circular.rs +++ b/crates/vespera_macro/src/schema_macro/circular.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; +use super::type_utils::normalize_token_str; use proc_macro2::TokenStream; use quote::quote; @@ -19,6 +20,7 @@ use crate::parser::extract_skip; /// Produced by [`analyze_circular_refs()`] which parses a definition string once /// and extracts all three pieces of information that would otherwise require /// three separate parse calls. +#[derive(Clone)] pub struct CircularAnalysis { /// Field names that would create circular references. pub circular_fields: Vec, @@ -37,7 +39,7 @@ pub struct CircularAnalysis { /// Parses the definition string once and extracts all circular reference /// information in a single field walk. pub fn analyze_circular_refs(source_module_path: &[String], definition: &str) -> CircularAnalysis { - let Ok(parsed) = syn::parse_str::(definition) else { + let Ok(parsed) = super::file_cache::parse_struct_cached(definition) else { return CircularAnalysis { circular_fields: Vec::new(), has_fk_relations: false, @@ -61,11 +63,18 @@ pub fn analyze_circular_refs(source_module_path: &[String], definition: &str) -> let mut has_fk = false; let mut circular_field_required = HashMap::new(); + // Pre-build field name → &Field index for O(1) FK column lookup + // instead of O(N) linear search per FK relation + let field_by_name: HashMap = fields_named + .named + .iter() + .filter_map(|f| f.ident.as_ref().map(|id| (id.to_string(), f))) + .collect(); for field in &fields_named.named { // FieldsNamed guarantees all fields have identifiers let field_ident = field.ident.as_ref().expect("named field has ident"); let field_name = field_ident.to_string(); - let ty_str = quote!(#field.ty).to_string().replace(' ', ""); + let ty_str = normalize_token_str("e!(#field.ty)); // --- has_fk_relations logic --- if ty_str.contains("HasOne<") || ty_str.contains("BelongsTo<") { @@ -73,12 +82,8 @@ pub fn analyze_circular_refs(source_module_path: &[String], definition: &str) -> // --- is_circular_relation_required logic (for ALL FK fields) --- let required = extract_belongs_to_from_field(&field.attrs).is_some_and(|fk| { - fields_named - .named - .iter() - .find(|f| { - f.ident.as_ref().map(std::string::ToString::to_string) == Some(fk.clone()) - }) + field_by_name + .get(&fk) .is_some_and(|f| !is_option_type(&f.ty)) }); circular_field_required.insert(field_name.clone(), required); @@ -118,7 +123,7 @@ pub fn generate_default_for_relation_field( field_attrs: &[syn::Attribute], all_fields: &syn::FieldsNamed, ) -> TokenStream { - let ty_str = quote!(#ty).to_string().replace(' ', ""); + let ty_str = normalize_token_str("e!(#ty)); // Check the SeaORM relation type if ty_str.contains("HasMany<") { @@ -165,7 +170,7 @@ pub fn generate_inline_struct_construction( var_name: &str, ) -> TokenStream { // Parse the related schema definition - let Ok(parsed) = syn::parse_str::(related_schema_def) else { + let Ok(parsed) = super::file_cache::parse_struct_cached(related_schema_def) else { // Fallback to From::from if parsing fails let var_ident = syn::Ident::new(var_name, proc_macro2::Span::call_site()); return quote! { <#schema_path as From<_>>::from(#var_ident) }; @@ -233,7 +238,7 @@ pub fn generate_inline_type_construction( var_name: &str, ) -> TokenStream { // Parse the related model definition - let Ok(parsed) = syn::parse_str::(related_model_def) else { + let Ok(parsed) = super::file_cache::parse_struct_cached(related_model_def) else { // Fallback to Default if parsing fails return quote! { Default::default() }; }; diff --git a/crates/vespera_macro/src/schema_macro/codegen.rs b/crates/vespera_macro/src/schema_macro/codegen.rs index feb0520..29a7b83 100644 --- a/crates/vespera_macro/src/schema_macro/codegen.rs +++ b/crates/vespera_macro/src/schema_macro/codegen.rs @@ -13,7 +13,8 @@ use crate::{ metadata::StructMetadata, parser::{ extract_default, extract_field_rename, extract_rename_all, extract_skip, - extract_skip_serializing_if, parse_type_to_schema_ref, rename_field, strip_raw_prefix, + extract_skip_serializing_if, parse_type_to_schema_ref, rename_field, + strip_raw_prefix_owned, }, }; @@ -46,7 +47,7 @@ pub fn generate_filtered_schema( let rust_field_name = field.ident.as_ref().map_or_else( || "unknown".to_string(), - |i| strip_raw_prefix(&i.to_string()).to_string(), + |i| strip_raw_prefix_owned(i.to_string()), ); // Apply rename @@ -108,7 +109,7 @@ pub fn generate_filtered_schema( schema_type: Some(vespera::schema::SchemaType::Object), properties: if properties.is_empty() { None } else { Some(properties) }, required: #required_tokens, - ..vespera::schema::Schema::new(vespera::schema::SchemaType::Object) + ..vespera::schema::Schema::default() } } } @@ -132,46 +133,51 @@ pub fn schema_ref_to_tokens(schema_ref: &SchemaRef) -> TokenStream { } } -/// Convert Schema to `TokenStream` for code generation -#[allow(clippy::option_if_let_else)] +/// Convert Schema to `TokenStream` for code generation. +/// +/// Only emits non-None fields, using `..Default::default()` for the rest. +/// This reduces generated code volume by ~70% for typical schemas +/// (e.g., a String field: 3 tokens instead of 10). pub fn schema_to_tokens(schema: &Schema) -> TokenStream { - let schema_type_tokens = match &schema.schema_type { - Some(SchemaType::String) => quote! { Some(vespera::schema::SchemaType::String) }, - Some(SchemaType::Number) => quote! { Some(vespera::schema::SchemaType::Number) }, - Some(SchemaType::Integer) => quote! { Some(vespera::schema::SchemaType::Integer) }, - Some(SchemaType::Boolean) => quote! { Some(vespera::schema::SchemaType::Boolean) }, - Some(SchemaType::Array) => quote! { Some(vespera::schema::SchemaType::Array) }, - Some(SchemaType::Object) => quote! { Some(vespera::schema::SchemaType::Object) }, - Some(SchemaType::Null) => quote! { Some(vespera::schema::SchemaType::Null) }, - None => quote! { None }, - }; + let mut fields: Vec = Vec::new(); + + // schema_type + if let Some(st) = &schema.schema_type { + let st_tokens = match st { + SchemaType::String => quote! { vespera::schema::SchemaType::String }, + SchemaType::Number => quote! { vespera::schema::SchemaType::Number }, + SchemaType::Integer => quote! { vespera::schema::SchemaType::Integer }, + SchemaType::Boolean => quote! { vespera::schema::SchemaType::Boolean }, + SchemaType::Array => quote! { vespera::schema::SchemaType::Array }, + SchemaType::Object => quote! { vespera::schema::SchemaType::Object }, + SchemaType::Null => quote! { vespera::schema::SchemaType::Null }, + }; + fields.push(quote! { schema_type: Some(#st_tokens) }); + } - let format_tokens = if let Some(f) = &schema.format { - quote! { Some(#f.to_string()) } - } else { - quote! { None } - }; + // ref_path + if let Some(rp) = &schema.ref_path { + fields.push(quote! { ref_path: Some(#rp.to_string()) }); + } - let nullable_tokens = match schema.nullable { - Some(true) => quote! { Some(true) }, - Some(false) => quote! { Some(false) }, - None => quote! { None }, - }; + // format + if let Some(f) = &schema.format { + fields.push(quote! { format: Some(#f.to_string()) }); + } - let ref_path_tokens = if let Some(rp) = &schema.ref_path { - quote! { Some(#rp.to_string()) } - } else { - quote! { None } - }; + // nullable + if let Some(n) = schema.nullable { + fields.push(quote! { nullable: Some(#n) }); + } - let items_tokens = if let Some(items) = &schema.items { + // items + if let Some(items) = &schema.items { let inner = schema_ref_to_tokens(items); - quote! { Some(Box::new(#inner)) } - } else { - quote! { None } - }; + fields.push(quote! { items: Some(Box::new(#inner)) }); + } - let properties_tokens = if let Some(props) = &schema.properties { + // properties + if let Some(props) = &schema.properties { let entries: Vec<_> = props .iter() .map(|(k, v)| { @@ -179,48 +185,35 @@ pub fn schema_to_tokens(schema: &Schema) -> TokenStream { quote! { (#k.to_string(), #v_tokens) } }) .collect(); - quote! { - Some({ + fields.push(quote! { + properties: Some({ let mut map = std::collections::BTreeMap::new(); #(map.insert(#entries.0, #entries.1);)* map }) - } - } else { - quote! { None } - }; + }); + } - let required_tokens = if let Some(req) = &schema.required { + // required + if let Some(req) = &schema.required { let req_strs: Vec<_> = req.iter().map(std::string::String::as_str).collect(); - quote! { Some(vec![#(#req_strs.to_string()),*]) } - } else { - quote! { None } - }; + fields.push(quote! { required: Some(vec![#(#req_strs.to_string()),*]) }); + } - let minimum_tokens = if let Some(min) = schema.minimum { - quote! { Some(#min) } - } else { - quote! { None } - }; + // minimum + if let Some(min) = schema.minimum { + fields.push(quote! { minimum: Some(#min) }); + } - let maximum_tokens = if let Some(max) = schema.maximum { - quote! { Some(#max) } - } else { - quote! { None } - }; + // maximum + if let Some(max) = schema.maximum { + fields.push(quote! { maximum: Some(#max) }); + } quote! { vespera::schema::Schema { - ref_path: #ref_path_tokens, - schema_type: #schema_type_tokens, - format: #format_tokens, - nullable: #nullable_tokens, - items: #items_tokens, - properties: #properties_tokens, - required: #required_tokens, - minimum: #minimum_tokens, - maximum: #maximum_tokens, - ..vespera::schema::Schema::new(vespera::schema::SchemaType::Object) + #(#fields,)* + ..vespera::schema::Schema::default() } } } @@ -369,7 +362,10 @@ mod tests { }; let tokens = schema_to_tokens(&schema); let output = tokens.to_string(); - assert!(output.contains("schema_type : None")); + // With conditional emission, schema_type is omitted when None + // (..Default::default() provides None) + assert!(!output.contains("schema_type")); + assert!(output.contains("default")); } #[test] diff --git a/crates/vespera_macro/src/schema_macro/file_cache.rs b/crates/vespera_macro/src/schema_macro/file_cache.rs new file mode 100644 index 0000000..11ef1a6 --- /dev/null +++ b/crates/vespera_macro/src/schema_macro/file_cache.rs @@ -0,0 +1,514 @@ +//! Thread-local cache for file lookups to avoid redundant I/O and parsing. +//! +//! Within a single compilation, multiple `schema_type!` invocations may search +//! for structs in the same files. This module caches: +//! - The list of `.rs` files per source directory +//! - File contents with mtime-based invalidation +//! - Struct name → candidate file paths (cheap text-based pre-filter) +//! +//! Uses `thread_local!` because `syn::File` (and proc-macro types within it) +//! are not `Send`/`Sync`, and proc-macros run single-threaded anyway. +//! The mtime check handles rust-analyzer's proc-macro server, which may persist +//! across file edits. + +use std::cell::RefCell; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::time::SystemTime; + +use super::circular::CircularAnalysis; +use super::file_lookup::collect_rs_files_recursive; +use crate::metadata::StructMetadata; + +/// Internal cache state. +struct FileCache { + /// Cached `.rs` file lists per source directory. + file_lists: HashMap>, + + /// Cached file contents: file path → (mtime, content string). + /// Mtime is checked to invalidate stale entries in long-lived processes. + file_contents: HashMap, + + /// Struct name candidate index: (src_dir, struct_name) → files containing that name. + /// Built from cheap `String::contains` search, not full parsing. + struct_candidates: HashMap<(PathBuf, String), Vec>, + + // NOTE: We intentionally do NOT cache parsed `syn::ItemStruct` here. + // `syn` types contain `proc_macro::Span` handles that are tied to a specific + // macro invocation context. Caching them across invocations causes + // "use-after-free in `proc_macro` handle" panics. + + // --- Profiling counters (zero-cost when VESPERA_PROFILE is not set) --- + /// Number of file content reads from disk (cache miss). + file_disk_reads: usize, + /// Number of file content cache hits. + content_cache_hits: usize, + /// Number of struct definitions parsed via syn::parse_str. + struct_parses: usize, + /// Number of full-file AST parses via syn::parse_file. + ast_parses: usize, + + // --- Phase 4 caches --- + /// Cached circular reference analysis results: (module_path, definition) → analysis. + circular_analysis: HashMap<(String, String), CircularAnalysis>, + /// Cached struct lookups by schema path: path_str → Option. + /// `None` values are cached (negative cache) to avoid repeated failed lookups. + struct_lookup: HashMap>, + /// Cached FK column lookups: (schema_path, via_rel) → Option. + fk_column_lookup: HashMap<(String, String), Option>, + /// Cached module path extraction from schema paths: path_str → Vec. + module_path_cache: HashMap>, + /// Cached CARGO_MANIFEST_DIR value to avoid repeated syscalls. + /// Within a single compilation, this never changes. + manifest_dir: Option, + + // --- Phase 4 profiling counters --- + circular_cache_hits: usize, + struct_lookup_cache_hits: usize, + fk_column_cache_hits: usize, + module_path_cache_hits: usize, +} + +thread_local! { + static FILE_CACHE: RefCell = RefCell::new(FileCache { + file_lists: HashMap::with_capacity(4), + file_contents: HashMap::with_capacity(32), + struct_candidates: HashMap::with_capacity(32), + file_disk_reads: 0, + content_cache_hits: 0, + struct_parses: 0, + ast_parses: 0, + circular_analysis: HashMap::with_capacity(16), + struct_lookup: HashMap::with_capacity(32), + fk_column_lookup: HashMap::with_capacity(16), + module_path_cache: HashMap::with_capacity(32), + manifest_dir: None, + circular_cache_hits: 0, + struct_lookup_cache_hits: 0, + fk_column_cache_hits: 0, + module_path_cache_hits: 0, + }); +} + +/// Get `CARGO_MANIFEST_DIR` from cache, or read from env and cache. +/// +/// Within a single compilation, this value never changes. Caching avoids +/// repeated syscalls (previously 20+ calls per `schema_type!` expansion). +pub fn get_manifest_dir() -> Option { + FILE_CACHE.with(|cache| { + let mut cache = cache.borrow_mut(); + if let Some(ref dir) = cache.manifest_dir { + return Some(dir.clone()); + } + let dir = std::env::var("CARGO_MANIFEST_DIR").ok(); + cache.manifest_dir.clone_from(&dir); + dir + }) +} + +/// Get candidate files that likely contain `struct_name`, using cache when available. +/// +/// Performs a cheap text-based search (`String::contains`) on file contents. +/// False positives are acceptable (struct name in comments/strings), but false +/// negatives are not. Results are cached per `(src_dir, struct_name)` pair. +pub fn get_struct_candidates(src_dir: &Path, struct_name: &str) -> Vec { + FILE_CACHE.with(|cache| { + let mut cache = cache.borrow_mut(); + let key = (src_dir.to_path_buf(), struct_name.to_string()); + + if let Some(candidates) = cache.struct_candidates.get(&key) { + return candidates.clone(); + } + + // Ensure file list is cached + let files = if let Some(files) = cache.file_lists.get(src_dir) { + files.clone() + } else { + let mut files = Vec::new(); + collect_rs_files_recursive(src_dir, &mut files); + cache + .file_lists + .insert(src_dir.to_path_buf(), files.clone()); + files + }; + + // Filter using cheap text search, caching file contents along the way + let candidates: Vec = files + .into_iter() + .filter(|path| { + let content = get_file_content_inner(&mut cache, path); + content.is_some_and(|c| c.contains(struct_name)) + }) + .collect(); + + cache.struct_candidates.insert(key, candidates.clone()); + candidates + }) +} + +/// Get a parsed `syn::File` for the given path, using cached file content. +/// +/// File content is cached with mtime-based invalidation. Parsing always runs +/// (syn types aren't Send), but I/O is avoided on cache hits. +/// Returns `None` if the file cannot be read or parsed. +pub fn get_parsed_ast(path: &Path) -> Option { + FILE_CACHE.with(|cache| { + let mut cache = cache.borrow_mut(); + let content = get_file_content_inner(&mut cache, path)?; + cache.ast_parses += 1; + syn::parse_file(&content).ok() + }) +} + +/// Internal helper: get file content from cache or read from disk. +/// Checks mtime for invalidation. +fn get_file_content_inner(cache: &mut FileCache, path: &Path) -> Option { + let current_mtime = std::fs::metadata(path).ok().and_then(|m| m.modified().ok()); + + if let Some(mtime) = current_mtime + && let Some((cached_mtime, content)) = cache.file_contents.get(path) + && *cached_mtime == mtime + { + cache.content_cache_hits += 1; + return Some(content.clone()); + } + + // Cache miss or stale — read and cache + let content = std::fs::read_to_string(path).ok()?; + cache.file_disk_reads += 1; + + if let Some(mtime) = current_mtime { + cache + .file_contents + .insert(path.to_path_buf(), (mtime, content.clone())); + } + + Some(content) +} + +/// Parse a struct definition string via `syn::parse_str`. +/// +/// NOTE: Results are NOT cached across calls. `syn::ItemStruct` contains +/// `proc_macro::Span` handles that are tied to a specific macro invocation +/// context — caching them causes "use-after-free" panics in the proc_macro bridge. +/// File I/O caching (via `get_parsed_ast`) is the primary performance win; +/// definition string parsing is fast (microseconds per struct). +pub fn parse_struct_cached(definition: &str) -> Result { + FILE_CACHE.with(|cache| { + let mut cache = cache.borrow_mut(); + cache.struct_parses += 1; + syn::parse_str(definition) + }) +} + +/// Get or compute circular reference analysis, with caching. +/// +/// The cache key is `(source_module_path_joined, definition)` since the same +/// model definition analyzed from the same module context always produces +/// the same result. +pub fn get_circular_analysis(source_module_path: &[String], definition: &str) -> CircularAnalysis { + let key = (source_module_path.join("::"), definition.to_string()); + + // 1. Check cache — borrow dropped at end of closure + let cached = FILE_CACHE.with(|cache| cache.borrow().circular_analysis.get(&key).cloned()); + if let Some(result) = cached { + FILE_CACHE.with(|cache| cache.borrow_mut().circular_cache_hits += 1); + return result; + } + + // 2. Compute — this re-enters FILE_CACHE via parse_struct_cached (safe: our borrow is dropped) + let result = super::circular::analyze_circular_refs(source_module_path, definition); + + // 3. Store — new borrow + FILE_CACHE.with(|cache| { + cache + .borrow_mut() + .circular_analysis + .insert(key, result.clone()); + }); + + result +} + +/// Get or compute struct lookup by schema path, with caching. +/// +/// Wraps `find_struct_from_schema_path` with a `HashMap>` +/// cache. `None` values are cached too (negative cache) to avoid repeated failed lookups. +pub fn get_struct_from_schema_path(path_str: &str) -> Option { + // 1. Check cache — borrow dropped at end of closure + let cached = FILE_CACHE.with(|cache| cache.borrow().struct_lookup.get(path_str).cloned()); + if let Some(result) = cached { + FILE_CACHE.with(|cache| cache.borrow_mut().struct_lookup_cache_hits += 1); + return result; + } + + // 2. Compute — this re-enters FILE_CACHE via get_parsed_ast (safe: our borrow is dropped) + let result = super::file_lookup::find_struct_from_schema_path(path_str); + + // 3. Store — new borrow + FILE_CACHE.with(|cache| { + cache + .borrow_mut() + .struct_lookup + .insert(path_str.to_string(), result.clone()); + }); + + result +} + +/// Get or compute FK column lookup, with caching. +/// +/// Wraps `find_fk_column_from_target_entity` with a `HashMap<(String, String), Option>` +/// cache. Negative results (`None`) are cached to avoid repeated file lookups. +pub fn get_fk_column(schema_path: &str, via_rel: &str) -> Option { + let key = (schema_path.to_string(), via_rel.to_string()); + + // 1. Check cache — borrow dropped at end of closure + let cached = FILE_CACHE.with(|cache| cache.borrow().fk_column_lookup.get(&key).cloned()); + if let Some(result) = cached { + FILE_CACHE.with(|cache| cache.borrow_mut().fk_column_cache_hits += 1); + return result; + } + + // 2. Compute — this re-enters FILE_CACHE via get_parsed_ast (safe: our borrow is dropped) + let result = super::file_lookup::find_fk_column_from_target_entity(schema_path, via_rel); + + // 3. Store — new borrow + FILE_CACHE.with(|cache| { + cache + .borrow_mut() + .fk_column_lookup + .insert(key, result.clone()); + }); + + result +} + +/// Get or compute module path from schema path, with caching. +/// +/// Wraps `extract_module_path_from_schema_path` logic with a `HashMap>` +/// cache. The `schema_path` TokenStream is stringified once for both cache key and computation, +/// avoiding the double `.to_string()` that would occur when calling the uncached function. +pub fn get_module_path_from_schema_path(schema_path: &proc_macro2::TokenStream) -> Vec { + let path_str = schema_path.to_string(); + + // 1. Check cache — borrow dropped at end of closure + let cached = FILE_CACHE.with(|cache| cache.borrow().module_path_cache.get(&path_str).cloned()); + if let Some(result) = cached { + FILE_CACHE.with(|cache| cache.borrow_mut().module_path_cache_hits += 1); + return result; + } + + // 2. Compute from the string directly (avoids double to_string()) + let segments: Vec<&str> = path_str + .split("::") + .map(str::trim) + .filter(|s| !s.is_empty()) + .collect(); + + let result = if segments.len() > 1 { + segments[..segments.len() - 1] + .iter() + .map(ToString::to_string) + .collect() + } else { + vec![] + }; + + // 3. Store — new borrow + FILE_CACHE.with(|cache| { + cache + .borrow_mut() + .module_path_cache + .insert(path_str, result.clone()); + }); + + result +} + +/// Print profiling summary to stderr if `VESPERA_PROFILE` env var is set. +/// +/// Call this at the end of macro execution to output cache statistics. +/// Silent by default — only outputs when `VESPERA_PROFILE=1`. +pub fn print_profile_summary() { + if std::env::var("VESPERA_PROFILE").is_err() { + return; + } + + FILE_CACHE.with(|cache| { + let cache = cache.borrow(); + eprintln!("[vespera-profile] File cache stats:"); + eprintln!( + " file I/O: {} disk reads, {} cache hits", + cache.file_disk_reads, cache.content_cache_hits + ); + eprintln!(" struct parses: {}", cache.struct_parses); + eprintln!(" AST parses: {}", cache.ast_parses); + eprintln!( + " cache entries: {} file lists, {} file contents, {} struct candidates", + cache.file_lists.len(), + cache.file_contents.len(), + cache.struct_candidates.len() + ); + eprintln!( + " circular analysis: {} cache hits, {} entries", + cache.circular_cache_hits, + cache.circular_analysis.len() + ); + eprintln!( + " struct lookup: {} cache hits, {} entries", + cache.struct_lookup_cache_hits, + cache.struct_lookup.len() + ); + eprintln!( + " FK column lookup: {} cache hits, {} entries", + cache.fk_column_cache_hits, + cache.fk_column_lookup.len() + ); + eprintln!( + " module path: {} cache hits, {} entries", + cache.module_path_cache_hits, + cache.module_path_cache.len() + ); + }); +} + +#[cfg(test)] +mod tests { + use std::path::Path; + + use tempfile::TempDir; + + use super::*; + + #[test] + fn test_get_struct_candidates_filters_correctly() { + let temp_dir = TempDir::new().unwrap(); + let src_dir = temp_dir.path(); + + std::fs::write( + src_dir.join("has_model.rs"), + "pub struct Model { pub id: i32 }", + ) + .unwrap(); + std::fs::write( + src_dir.join("no_model.rs"), + "pub struct Other { pub x: i32 }", + ) + .unwrap(); + + let candidates = get_struct_candidates(src_dir, "Model"); + assert_eq!(candidates.len(), 1); + assert!(candidates[0].ends_with("has_model.rs")); + } + + #[test] + fn test_get_parsed_ast_returns_valid_ast() { + let temp_dir = TempDir::new().unwrap(); + let file_path = temp_dir.path().join("test.rs"); + std::fs::write(&file_path, "pub struct Foo { pub x: i32 }").unwrap(); + + let ast = get_parsed_ast(&file_path); + assert!(ast.is_some()); + assert!(!ast.unwrap().items.is_empty()); + } + + #[test] + fn test_get_parsed_ast_caches_content() { + let temp_dir = TempDir::new().unwrap(); + let file_path = temp_dir.path().join("cached.rs"); + std::fs::write(&file_path, "pub struct Bar;").unwrap(); + + let ast1 = get_parsed_ast(&file_path); + let ast2 = get_parsed_ast(&file_path); + assert!(ast1.is_some()); + assert!(ast2.is_some()); + } + + #[test] + fn test_get_parsed_ast_returns_none_for_invalid() { + let result = get_parsed_ast(Path::new("/nonexistent/path.rs")); + assert!(result.is_none()); + } + + #[test] + fn test_get_parsed_ast_returns_none_for_unparseable() { + let temp_dir = TempDir::new().unwrap(); + let file_path = temp_dir.path().join("broken.rs"); + std::fs::write(&file_path, "this is not valid rust {{{{").unwrap(); + + let result = get_parsed_ast(&file_path); + assert!(result.is_none()); + } + + #[test] + fn test_get_struct_candidates_caches_result() { + let temp_dir = TempDir::new().unwrap(); + let src_dir = temp_dir.path(); + + std::fs::write(src_dir.join("file.rs"), "pub struct Target { pub id: i32 }").unwrap(); + + let c1 = get_struct_candidates(src_dir, "Target"); + let c2 = get_struct_candidates(src_dir, "Target"); + assert_eq!(c1, c2, "Cached candidates should be identical"); + } + + #[test] + fn test_get_struct_candidates_file_list_cache_hit() { + let temp_dir = TempDir::new().unwrap(); + let src_dir = temp_dir.path(); + + std::fs::write( + src_dir.join("file_a.rs"), + "pub struct Alpha { pub id: i32 }", + ) + .unwrap(); + std::fs::write( + src_dir.join("file_b.rs"), + "pub struct Beta { pub name: String }", + ) + .unwrap(); + + // First call: populates file_lists cache for src_dir + let result1 = get_struct_candidates(src_dir, "Alpha"); + assert_eq!(result1.len(), 1); + + // Second call: same src_dir, different struct_name + // struct_candidates cache MISS (different key), but file_lists cache HIT → line 125 + let result2 = get_struct_candidates(src_dir, "Beta"); + assert_eq!(result2.len(), 1); + } + + #[test] + fn test_get_fk_column_cache_hit() { + // First call: computes and caches result (None since path doesn't exist) + let result1 = get_fk_column("nonexistent::path::Schema", "SomeRelation"); + // Second call: hits cache → lines 259-260 + let result2 = get_fk_column("nonexistent::path::Schema", "SomeRelation"); + assert_eq!(result1, result2); + } + + #[serial_test::serial] + #[test] + fn test_print_profile_summary_with_profile_env() { + // Set VESPERA_PROFILE to enable profiling output + unsafe { std::env::set_var("VESPERA_PROFILE", "1") }; + + // This should print profile summary to stderr (lines 311-321) + print_profile_summary(); + + // Clean up + unsafe { std::env::remove_var("VESPERA_PROFILE") }; + // Test passes if no panic — output goes to stderr + } + + #[serial_test::serial] + #[test] + fn test_print_profile_summary_without_profile_env() { + // Ensure VESPERA_PROFILE is not set + unsafe { std::env::remove_var("VESPERA_PROFILE") }; + + // Should early-return at line 308 without printing anything + print_profile_summary(); + } +} diff --git a/crates/vespera_macro/src/schema_macro/file_lookup.rs b/crates/vespera_macro/src/schema_macro/file_lookup.rs index e1f8665..9acd12b 100644 --- a/crates/vespera_macro/src/schema_macro/file_lookup.rs +++ b/crates/vespera_macro/src/schema_macro/file_lookup.rs @@ -6,7 +6,21 @@ use std::path::Path; use syn::Type; -use crate::{file_utils::try_read_and_parse_file, metadata::StructMetadata}; +use crate::metadata::StructMetadata; +use std::path::PathBuf; + +/// Build candidate file paths from module segments. +/// +/// Given a source directory and module segments (e.g., `["models", "memo"]`), +/// returns both `{src_dir}/models/memo.rs` and `{src_dir}/models/memo/mod.rs`. +#[inline] +fn candidate_file_paths(src_dir: &Path, module_segments: &[&str]) -> [PathBuf; 2] { + let joined = module_segments.join("/"); + [ + src_dir.join(format!("{joined}.rs")), + src_dir.join(format!("{joined}/mod.rs")), + ] +} /// Try to find a struct definition from a module path by reading source files. /// @@ -38,8 +52,8 @@ pub fn find_struct_from_path( ty: &Type, schema_name_hint: Option<&str>, ) -> Option<(StructMetadata, Vec)> { - // Get CARGO_MANIFEST_DIR to locate src folder - let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").ok()?; + // Get CARGO_MANIFEST_DIR to locate src folder (cached to avoid repeated syscalls) + let manifest_dir = super::file_cache::get_manifest_dir()?; let src_dir = Path::new(&manifest_dir).join("src"); // Extract path segments from the type @@ -80,17 +94,14 @@ pub fn find_struct_from_path( let type_module_path: Vec = segments[..segments.len() - 1].to_vec(); // Try different file path patterns - let file_paths = vec![ - src_dir.join(format!("{}.rs", module_segments.join("/"))), - src_dir.join(format!("{}/mod.rs", module_segments.join("/"))), - ]; + let file_paths = candidate_file_paths(&src_dir, &module_segments); for file_path in file_paths { if !file_path.exists() { continue; } - let file_ast = try_read_and_parse_file(&file_path)?; + let file_ast = super::file_cache::get_parsed_ast(&file_path)?; // Look for the struct in the file for item in &file_ast.items { @@ -134,19 +145,87 @@ pub fn find_struct_by_name_in_all_files( struct_name: &str, schema_name_hint: Option<&str>, ) -> Option<(StructMetadata, Vec)> { - // Collect all .rs files recursively - let mut rs_files = Vec::new(); - collect_rs_files_recursive(src_dir, &mut rs_files); + // Use cached struct-candidate index: files already filtered by text search + let mut rs_files = super::file_cache::get_struct_candidates(src_dir, struct_name); + + // Pre-compute hint prefix once (used in fast path and fallback disambiguation) + let prefix_normalized = schema_name_hint.map(derive_hint_prefix); + + // FAST PATH: If schema_name_hint is provided, try matching files first. + // This avoids parsing ALL files for the common same-file pattern: + // schema_type!(Schema from Model, name = "UserSchema") in user.rs + if let Some(prefix_normalized) = &prefix_normalized { + // Partition files: candidate files (filename matches hint prefix) vs rest + let (candidates, rest): (Vec<_>, Vec<_>) = rs_files.into_iter().partition(|path| { + path.file_stem() + .and_then(|s| s.to_str()) + .is_some_and(|name| { + let norm = normalize_name(name); + norm == *prefix_normalized || norm.contains(prefix_normalized.as_str()) + }) + }); + + // Parse only candidate files first + let mut found_in_candidates: Vec<(std::path::PathBuf, StructMetadata)> = Vec::new(); + for file_path in &candidates { + let Some(file_ast) = super::file_cache::get_parsed_ast(file_path) else { + continue; + }; + for item in &file_ast.items { + if let syn::Item::Struct(struct_item) = item + && struct_item.ident == struct_name + { + found_in_candidates.push(( + file_path.clone(), + StructMetadata::new_model( + struct_name.to_string(), + quote::quote!(#struct_item).to_string(), + ), + )); + } + } + } - // Store: (file_path, struct_metadata) + // If exactly one match in candidates, return immediately (fast path hit!) + if found_in_candidates.len() == 1 { + let (path, metadata) = found_in_candidates.remove(0); + let module_path = file_path_to_module_path(&path, src_dir); + return Some((metadata, module_path)); + } + + // If candidates found multiple, try disambiguation by exact filename match + if found_in_candidates.len() > 1 { + let exact_match: Vec<_> = found_in_candidates + .iter() + .filter(|(path, _)| { + path.file_stem() + .and_then(|s| s.to_str()) + .is_some_and(|name| normalize_name(name) == *prefix_normalized) + }) + .collect(); + + if exact_match.len() == 1 { + let (path, metadata) = exact_match[0]; + let module_path = file_path_to_module_path(path, src_dir); + return Some((metadata.clone(), module_path)); + } + + // Still ambiguous among candidates + return None; + } + + // No match in candidates — fall through to scan remaining files + rs_files = rest; + } + + // FULL SCAN: Parse all remaining files (or all files if no hint) let mut found_structs: Vec<(std::path::PathBuf, StructMetadata)> = Vec::new(); for file_path in rs_files { - let Some(file_ast) = try_read_and_parse_file(&file_path) else { + let Some(file_ast) = super::file_cache::get_parsed_ast(&file_path) else { continue; }; - // Look for the struct in the file for item in &file_ast.items { if let syn::Item::Struct(struct_item) = item && struct_item.ident == struct_name @@ -163,71 +242,42 @@ pub fn find_struct_by_name_in_all_files( } match found_structs.len() { - 0 => None, 1 => { let (path, metadata) = found_structs.remove(0); let module_path = file_path_to_module_path(&path, src_dir); Some((metadata, module_path)) } - _ => { - // Multiple structs with same name - try to disambiguate using schema_name_hint - if let Some(hint) = schema_name_hint { - // Extract prefix from schema name (e.g., "UserSchema" -> "user", "MemoSchema" -> "memo") - let hint_lower = hint.to_lowercase(); - let prefix = hint_lower - .strip_suffix("schema") - .or_else(|| hint_lower.strip_suffix("response")) - .or_else(|| hint_lower.strip_suffix("request")) - .unwrap_or(&hint_lower); - - // Normalize prefix: remove underscores for comparison - // This allows "AdminUserSchema" (prefix "adminuser") to match "admin_user.rs" - let prefix_normalized = prefix.replace('_', ""); - - // First, try exact filename match (normalized) - // e.g., "admin_user.rs" normalized to "adminuser" matches prefix "adminuser" - let exact_match: Vec<_> = found_structs - .iter() - .filter(|(path, _)| { - path.file_stem() - .and_then(|s| s.to_str()) - .is_some_and(|name| { - name.to_lowercase().replace('_', "") == prefix_normalized - }) - }) - .collect(); - - if exact_match.len() == 1 { - let (path, metadata) = exact_match[0]; - let module_path = file_path_to_module_path(path, src_dir); - return Some((metadata.clone(), module_path)); - } + _ => None, + } +} - // Fallback: Find files whose normalized name contains the prefix - let matching: Vec<_> = found_structs - .into_iter() - .filter(|(path, _)| { - path.file_stem() - .and_then(|s| s.to_str()) - .is_some_and(|name| { - name.to_lowercase() - .replace('_', "") - .contains(&prefix_normalized) - }) - }) - .collect(); - - if matching.len() == 1 { - let (path, metadata) = matching.into_iter().next().unwrap(); - let module_path = file_path_to_module_path(&path, src_dir); - return Some((metadata, module_path)); - } - } +/// Derive a normalized prefix from a schema name hint for file matching. +/// +/// Strips common suffixes ("Schema", "Response", "Request") and normalizes +/// by removing underscores and lowercasing. +/// +/// # Examples +/// - "UserSchema" → "user" +/// - "MemoResponse" → "memo" +/// - "AdminUserSchema" → "adminuser" +fn derive_hint_prefix(hint: &str) -> String { + let hint_lower = hint.to_lowercase(); + let prefix = hint_lower + .strip_suffix("schema") + .or_else(|| hint_lower.strip_suffix("response")) + .or_else(|| hint_lower.strip_suffix("request")) + .unwrap_or(&hint_lower); + normalize_name(prefix) +} - // Still ambiguous - None - } - } +/// Normalize a name by lowercasing and removing underscores in a single pass. +/// Replaces the two-allocation `s.to_lowercase().replace('_', "")` pattern. +#[inline] +fn normalize_name(s: &str) -> String { + s.chars() + .filter(|&c| c != '_') + .map(|c| c.to_ascii_lowercase()) + .collect() } /// Recursively collect all `.rs` files in a directory. @@ -283,8 +333,8 @@ pub fn file_path_to_module_path(file_path: &Path, src_dir: &Path) -> Vec /// /// Similar to `find_struct_from_path` but takes a string path instead of `syn::Type`. pub fn find_struct_from_schema_path(path_str: &str) -> Option { - // Get CARGO_MANIFEST_DIR to locate src folder - let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").ok()?; + // Get CARGO_MANIFEST_DIR to locate src folder (cached to avoid repeated syscalls) + let manifest_dir = super::file_cache::get_manifest_dir()?; let src_dir = Path::new(&manifest_dir).join("src"); // Parse the path string into segments @@ -310,17 +360,14 @@ pub fn find_struct_from_schema_path(path_str: &str) -> Option { } // Try different file path patterns - let file_paths = vec![ - src_dir.join(format!("{}.rs", module_segments.join("/"))), - src_dir.join(format!("{}/mod.rs", module_segments.join("/"))), - ]; + let file_paths = candidate_file_paths(&src_dir, &module_segments); for file_path in file_paths { if !file_path.exists() { continue; } - let file_ast = try_read_and_parse_file(&file_path)?; + let file_ast = super::file_cache::get_parsed_ast(&file_path)?; // Look for the struct in the file for item in &file_ast.items { @@ -354,8 +401,8 @@ pub fn find_fk_column_from_target_entity( ) -> Option { use crate::schema_macro::seaorm::{extract_belongs_to_from_field, extract_relation_enum}; - // Get CARGO_MANIFEST_DIR to locate src folder - let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").ok()?; + // Get CARGO_MANIFEST_DIR to locate src folder (cached to avoid repeated syscalls) + let manifest_dir = super::file_cache::get_manifest_dir()?; let src_dir = Path::new(&manifest_dir).join("src"); // Parse the schema path to get file path @@ -377,17 +424,14 @@ pub fn find_fk_column_from_target_entity( } // Try different file path patterns - let file_paths = vec![ - src_dir.join(format!("{}.rs", module_segments.join("/"))), - src_dir.join(format!("{}/mod.rs", module_segments.join("/"))), - ]; + let file_paths = candidate_file_paths(&src_dir, &module_segments); for file_path in file_paths { if !file_path.exists() { continue; } - let file_ast = try_read_and_parse_file(&file_path)?; + let file_ast = super::file_cache::get_parsed_ast(&file_path)?; // Look for Model struct in the file for item in &file_ast.items { @@ -415,8 +459,8 @@ pub fn find_fk_column_from_target_entity( /// Converts "`crate::models::user::Schema`" -> finds Model in src/models/user.rs #[allow(clippy::too_many_lines)] pub fn find_model_from_schema_path(schema_path_str: &str) -> Option { - // Get CARGO_MANIFEST_DIR to locate src folder - let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").ok()?; + // Get CARGO_MANIFEST_DIR to locate src folder (cached to avoid repeated syscalls) + let manifest_dir = super::file_cache::get_manifest_dir()?; let src_dir = Path::new(&manifest_dir).join("src"); // Parse the path string and convert Schema path to module path @@ -443,17 +487,14 @@ pub fn find_model_from_schema_path(schema_path_str: &str) -> Option continue to next candidate + let temp_dir = TempDir::new().unwrap(); + let src_dir = temp_dir.path(); + + // user.rs matches hint prefix "user" (candidate), contains "Model" text, but won't parse + std::fs::write( + src_dir.join("user.rs"), + "pub struct Model {{{{ broken syntax", + ) + .unwrap(); + + // valid.rs contains Model and parses fine (goes to rest since filename doesn't match prefix) + std::fs::write(src_dir.join("valid.rs"), "pub struct Model { pub id: i32 }").unwrap(); + + let result = find_struct_by_name_in_all_files(src_dir, "Model", Some("UserSchema")); + + assert!( + result.is_some(), + "Should find Model in valid.rs after skipping unparseable candidate user.rs" + ); + } + + #[test] + #[serial] + fn test_find_struct_exact_filename_disambiguation() { + // Tests lines 168-170: multiple candidates found, exact filename match disambiguates + let temp_dir = TempDir::new().unwrap(); + let src_dir = temp_dir.path(); + + // user.rs: exact match (normalize_name("user") == prefix "user") + std::fs::write(src_dir.join("user.rs"), "pub struct Model { pub id: i32 }").unwrap(); + + // user_extended.rs: contains-match only (normalize_name("user_extended") = "userextended" != "user") + std::fs::write( + src_dir.join("user_extended.rs"), + "pub struct Model { pub name: String }", + ) + .unwrap(); + + let result = find_struct_by_name_in_all_files(src_dir, "Model", Some("UserSchema")); + + assert!(result.is_some(), "Should resolve via exact filename match"); + let (metadata, _) = result.unwrap(); + assert!( + metadata.definition.contains("id"), + "Should return user.rs Model (with id field)" + ); + } + + #[test] + #[serial] + fn test_find_struct_no_match_in_candidates_falls_to_rest() { + // Tests line 189: candidates have no struct match -> rs_files = rest -> full scan finds it + let temp_dir = TempDir::new().unwrap(); + let src_dir = temp_dir.path(); + + // user.rs is a candidate (filename matches "user" prefix) but has no struct Model + // Must contain "Model" text for get_struct_candidates to include it + std::fs::write( + src_dir.join("user.rs"), + "pub struct Other { pub x: i32 } // Model ref", + ) + .unwrap(); + + // data.rs is in rest (filename "data" doesn't contain "user"), has struct Model + std::fs::write(src_dir.join("data.rs"), "pub struct Model { pub id: i32 }").unwrap(); + + let result = find_struct_by_name_in_all_files(src_dir, "Model", Some("UserSchema")); + + assert!( + result.is_some(), + "Should find Model in data.rs after candidates had no match" + ); + } + + #[test] + #[serial] + fn test_find_struct_full_scan_unparseable_file() { + // Tests line 197: full-scan file fails to parse -> continue to next file + let temp_dir = TempDir::new().unwrap(); + let src_dir = temp_dir.path(); + + // user.rs is candidate but no struct Model + std::fs::write( + src_dir.join("user.rs"), + "pub struct Other { pub x: i32 } // Model", + ) + .unwrap(); + + // broken.rs is rest, contains "Model" text but won't parse + std::fs::write(src_dir.join("broken.rs"), "Model unparseable {{{{{").unwrap(); + + // valid.rs is rest, has struct Model + std::fs::write(src_dir.join("valid.rs"), "pub struct Model { pub id: i32 }").unwrap(); + + let result = find_struct_by_name_in_all_files(src_dir, "Model", Some("UserSchema")); + + assert!( + result.is_some(), + "Should find Model in valid.rs after skipping unparseable broken.rs in rest" + ); + } } diff --git a/crates/vespera_macro/src/schema_macro/from_model.rs b/crates/vespera_macro/src/schema_macro/from_model.rs index 734aae8..96c104f 100644 --- a/crates/vespera_macro/src/schema_macro/from_model.rs +++ b/crates/vespera_macro/src/schema_macro/from_model.rs @@ -4,16 +4,14 @@ use std::collections::HashMap; +use super::type_utils::normalize_token_str; use proc_macro2::TokenStream; use quote::quote; use syn::Type; use super::{ - circular::{ - analyze_circular_refs, generate_inline_struct_construction, - generate_inline_type_construction, - }, - file_lookup::{find_fk_column_from_target_entity, find_struct_from_schema_path}, + circular::{generate_inline_struct_construction, generate_inline_type_construction}, + file_cache::{get_circular_analysis, get_fk_column, get_struct_from_schema_path}, seaorm::RelationFieldInfo, type_utils::snake_to_pascal_case, }; @@ -26,28 +24,17 @@ pub fn build_entity_path_from_schema_path( schema_path: &TokenStream, _source_module_path: &[String], ) -> TokenStream { - // Parse the schema path to extract segments + // Parse the schema path, replace "Schema" with "Entity", and build Idents in one pass let path_str = schema_path.to_string(); - let segments: Vec<&str> = path_str.split("::").map(str::trim).collect(); - - // Replace "Schema" with "Entity" in the last segment - let entity_segments: Vec = segments - .iter() + let path_idents: Vec = path_str + .split("::") .map(|s| { - if *s == "Schema" { - "Entity".to_string() - } else { - s.to_string() - } + let s = s.trim(); + let name = if s == "Schema" { "Entity" } else { s }; + syn::Ident::new(name, proc_macro2::Span::call_site()) }) .collect(); - // Build the path tokens - let path_idents: Vec = entity_segments - .iter() - .map(|s| syn::Ident::new(s, proc_macro2::Span::call_site())) - .collect(); - quote! { #(#path_idents)::* } } @@ -142,49 +129,23 @@ pub fn generate_from_model_with_relations( } } "HasMany" => { - // HasMany with relation_enum: use FK-based query on target entity - // HasMany without relation_enum: use standard find_related - if let Some(ref via_rel_value) = rel.via_rel { - // Look up the FK column from the target entity - let schema_path_str = rel.schema_path.to_string().replace(' ', ""); - if let Some(fk_col_name) = find_fk_column_from_target_entity(&schema_path_str, via_rel_value) { - // Convert snake_case FK column to PascalCase for Column enum - let fk_col_pascal = snake_to_pascal_case(&fk_col_name); - let fk_col_ident = syn::Ident::new(&fk_col_pascal, proc_macro2::Span::call_site()); - - // Build the Column path: entity_path without ::Entity, then ::Column::FkCol - // e.g., crate::models::notification::Entity -> crate::models::notification::Column::TargetUserId - let entity_path_str = entity_path.to_string().replace(' ', ""); - let column_path_str = entity_path_str.replace(":: Entity", ":: Column"); - let column_path_idents: Vec = column_path_str.split("::").map(str::trim).filter(|s| !s.is_empty()).map(|s| syn::Ident::new(s, proc_macro2::Span::call_site())).collect(); - - quote! { - let #field_name = #(#column_path_idents)::*::#fk_col_ident - .into_column() - .eq(model.id.clone()) - .into_condition(); - let #field_name = #entity_path::find() - .filter(#field_name) - .all(db) - .await?; - } - } else { - // FK column not found - fall back to empty vec with warning comment - quote! { - // WARNING: Could not find FK column for relation_enum, using empty vec - let #field_name: Vec<_> = vec![]; - } - } - } else if let Some(via_rel_value) = &rel.relation_enum { - // Has relation_enum but no via_rel - try using relation_enum as via_rel - let schema_path_str = rel.schema_path.to_string().replace(' ', ""); - if let Some(fk_col_name) = find_fk_column_from_target_entity(&schema_path_str, via_rel_value) { + // Try via_rel first, fall back to relation_enum as FK source + let fk_rel_source = rel.via_rel.as_ref().or(rel.relation_enum.as_ref()); + if let Some(via_rel_value) = fk_rel_source { + let schema_path_str = normalize_token_str(&rel.schema_path); + if let Some(fk_col_name) = get_fk_column(&schema_path_str, via_rel_value) { let fk_col_pascal = snake_to_pascal_case(&fk_col_name); let fk_col_ident = syn::Ident::new(&fk_col_pascal, proc_macro2::Span::call_site()); - let entity_path_str = entity_path.to_string().replace(' ', ""); + let entity_path_str = normalize_token_str(&entity_path); let column_path_str = entity_path_str.replace(":: Entity", ":: Column"); - let column_path_idents: Vec = column_path_str.split("::").map(str::trim).filter(|s| !s.is_empty()).map(|s| syn::Ident::new(s, proc_macro2::Span::call_site())).collect(); + let column_path_idents: Vec = column_path_str + .split("::") + .filter_map(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { None } else { Some(syn::Ident::new(trimmed, proc_macro2::Span::call_site())) } + }) + .collect(); quote! { let #field_name = #(#column_path_idents)::*::#fk_col_ident @@ -197,9 +158,8 @@ pub fn generate_from_model_with_relations( .await?; } } else { - // FK column not found - fall back to empty vec quote! { - // WARNING: Could not find FK column for relation_enum, using empty vec + // WARNING: Could not find FK column for relation, using empty vec let #field_name: Vec<_> = vec![]; } } @@ -226,12 +186,12 @@ pub fn generate_from_model_with_relations( if rel.inline_type_info.is_some() { return false; } - let schema_path_str = rel.schema_path.to_string().replace(' ', ""); + let schema_path_str = normalize_token_str(&rel.schema_path); let model_path_str = schema_path_str.replace("::Schema", "::Model"); - let related_model = find_struct_from_schema_path(&model_path_str); + let related_model = get_struct_from_schema_path(&model_path_str); if let Some(ref model) = related_model { - let analysis = analyze_circular_refs(source_module_path, &model.definition); + let analysis = get_circular_analysis(source_module_path, &model.definition); // Check if any circular field is a required relation analysis.circular_fields.iter().any(|cf| { analysis @@ -276,6 +236,12 @@ pub fn generate_from_model_with_relations( vec![] }; + // Pre-build relation lookup for O(1) access in field assignments loop + let relation_by_name: HashMap<&syn::Ident, &RelationFieldInfo> = relation_fields + .iter() + .map(|rel| (&rel.field_name, rel)) + .collect(); + // Build field assignments // For relation fields, check for circular references and use inline construction if needed let field_assignments: Vec = field_mappings @@ -283,26 +249,26 @@ pub fn generate_from_model_with_relations( .map(|(new_ident, source_ident, wrapped, is_relation)| { if *is_relation { // Find the relation info for this field - if let Some(rel) = relation_fields.iter().find(|r| &r.field_name == source_ident) { + if let Some(rel) = relation_by_name.get(source_ident) { let schema_path = &rel.schema_path; // Try to find the related MODEL definition to check for circular refs // The schema_path is like "crate::models::user::Schema", but the actual // struct is "Model" in the same module. We need to look up the Model // to see if it has relations pointing back to us. - let schema_path_str = schema_path.to_string().replace(' ', ""); + let schema_path_str = normalize_token_str(schema_path); // Convert schema path to model path: Schema -> Model let model_path_str = schema_path_str.replace("::Schema", "::Model"); // Try to find the related Model definition from file - let related_model_from_file = find_struct_from_schema_path(&model_path_str); + let related_model_from_file = get_struct_from_schema_path(&model_path_str); // Get the definition string let related_def_str = related_model_from_file.as_ref().map_or("", |s| s.definition.as_str()); // Analyze circular references, FK relations, and FK optionality in ONE pass - let analysis = analyze_circular_refs(source_module_path, related_def_str); + let analysis = get_circular_analysis(source_module_path, related_def_str); let circular_fields = &analysis.circular_fields; let has_circular = !circular_fields.is_empty(); diff --git a/crates/vespera_macro/src/schema_macro/inline_types.rs b/crates/vespera_macro/src/schema_macro/inline_types.rs index 2bc0690..542b9e9 100644 --- a/crates/vespera_macro/src/schema_macro/inline_types.rs +++ b/crates/vespera_macro/src/schema_macro/inline_types.rs @@ -7,12 +7,10 @@ use proc_macro2::TokenStream; use quote::quote; use super::{ - circular::analyze_circular_refs, + file_cache::{get_circular_analysis, get_module_path_from_schema_path}, file_lookup::find_model_from_schema_path, seaorm::{RelationFieldInfo, convert_type_with_chrono}, - type_utils::{ - extract_module_path_from_schema_path, is_seaorm_relation_type, snake_to_pascal_case, - }, + type_utils::{is_seaorm_relation_type, snake_to_pascal_case}, }; use crate::parser::{extract_rename_all, extract_skip}; @@ -69,12 +67,12 @@ pub fn generate_inline_relation_type_from_def( model_def: &str, ) -> Option { // Parse the model struct - let parsed_model: syn::ItemStruct = syn::parse_str(model_def).ok()?; + let parsed_model: syn::ItemStruct = super::file_cache::parse_struct_cached(model_def).ok()?; // IMPORTANT: Use the TARGET model's module path for type resolution, not the parent's. // This ensures enum types like `AuthProvider` are resolved to `crate::models::user::AuthProvider` // instead of incorrectly using the parent module path. - let target_module_path = extract_module_path_from_schema_path(&rel_info.schema_path); + let target_module_path = get_module_path_from_schema_path(&rel_info.schema_path); let effective_module_path = if target_module_path.is_empty() { source_module_path } else { @@ -82,7 +80,7 @@ pub fn generate_inline_relation_type_from_def( }; // Detect circular fields - let circular_fields = analyze_circular_refs(source_module_path, model_def).circular_fields; + let circular_fields = get_circular_analysis(source_module_path, model_def).circular_fields; // If no circular fields, no need for inline type if circular_fields.is_empty() { @@ -106,7 +104,7 @@ pub fn generate_inline_relation_type_from_def( ); // Collect fields, excluding circular ones and relation types - let mut fields = Vec::new(); + let mut fields = Vec::with_capacity(8); if let syn::Fields::Named(fields_named) = &parsed_model.fields { for field in &fields_named.named { let field_ident = field.ident.as_ref()?; @@ -191,12 +189,12 @@ pub fn generate_inline_relation_type_no_relations_from_def( model_def: &str, ) -> Option { // Parse the model struct - let parsed_model: syn::ItemStruct = syn::parse_str(model_def).ok()?; + let parsed_model: syn::ItemStruct = super::file_cache::parse_struct_cached(model_def).ok()?; // IMPORTANT: Use the TARGET model's module path for type resolution, not the parent's. // This ensures enum types like `StoryStatus` are resolved to `crate::models::story::StoryStatus` // instead of incorrectly using the parent module path. - let target_module_path = extract_module_path_from_schema_path(&rel_info.schema_path); + let target_module_path = get_module_path_from_schema_path(&rel_info.schema_path); let effective_module_path = if target_module_path.is_empty() { source_module_path } else { @@ -219,7 +217,7 @@ pub fn generate_inline_relation_type_no_relations_from_def( ); // Collect fields, excluding ALL relation types - let mut fields = Vec::new(); + let mut fields = Vec::with_capacity(8); if let syn::Fields::Named(fields_named) = &parsed_model.fields { for field in &fields_named.named { let field_ident = field.ident.as_ref()?; diff --git a/crates/vespera_macro/src/schema_macro/mod.rs b/crates/vespera_macro/src/schema_macro/mod.rs index 8271e86..9cb8ddd 100644 --- a/crates/vespera_macro/src/schema_macro/mod.rs +++ b/crates/vespera_macro/src/schema_macro/mod.rs @@ -6,6 +6,7 @@ mod circular; mod codegen; +mod file_cache; mod file_lookup; mod from_model; mod inline_types; @@ -15,6 +16,8 @@ mod transformation; pub mod type_utils; mod validation; +pub use file_cache::print_profile_summary; + use std::collections::{HashMap, HashSet}; use codegen::generate_filtered_schema; @@ -48,7 +51,7 @@ use validation::{ use crate::{ metadata::StructMetadata, - parser::{extract_default, extract_field_rename, strip_raw_prefix}, + parser::{extract_default, extract_field_rename, strip_raw_prefix_owned}, }; /// Generate schema code from a struct with optional field filtering @@ -63,18 +66,19 @@ pub fn generate_schema_code( let struct_def = schema_storage.get(&type_name).ok_or_else(|| syn::Error::new_spanned(&input.ty, format!("type `{type_name}` not found. Make sure it has #[derive(Schema)] before this macro invocation")))?; // Parse the struct definition - let parsed_struct: syn::ItemStruct = syn::parse_str(&struct_def.definition).map_err(|e| { - syn::Error::new_spanned( - &input.ty, - format!("failed to parse struct definition for `{type_name}`: {e}"), - ) - })?; + let parsed_struct: syn::ItemStruct = file_cache::parse_struct_cached(&struct_def.definition) + .map_err(|e| { + syn::Error::new_spanned( + &input.ty, + format!("failed to parse struct definition for `{type_name}`: {e}"), + ) + })?; // Build omit set - let omit_set: HashSet = input.omit.clone().unwrap_or_default().into_iter().collect(); + let omit_set: HashSet = input.omit.iter().flatten().cloned().collect(); // Build pick set - let pick_set: HashSet = input.pick.clone().unwrap_or_default().into_iter().collect(); + let pick_set: HashSet = input.pick.iter().flatten().cloned().collect(); // Generate schema with filtering let schema_tokens = @@ -153,12 +157,13 @@ pub fn generate_schema_type_code( }; // Parse the struct definition - let parsed_struct: syn::ItemStruct = syn::parse_str(&struct_def.definition).map_err(|e| { - syn::Error::new_spanned( - &input.source_type, - format!("failed to parse struct definition for `{source_type_name}`: {e}"), - ) - })?; + let parsed_struct: syn::ItemStruct = file_cache::parse_struct_cached(&struct_def.definition) + .map_err(|e| { + syn::Error::new_spanned( + &input.source_type, + format!("failed to parse struct definition for `{source_type_name}`: {e}"), + ) + })?; // Extract all field names from source struct for validation // Include relation fields since they can be converted to Schema types @@ -195,10 +200,10 @@ pub fn generate_schema_type_code( )?; // Build filter sets and rename map - let omit_set = build_omit_set(input.omit.clone()); - let pick_set = build_pick_set(input.pick.clone()); + let omit_set = build_omit_set(input.omit.as_ref()); + let pick_set = build_pick_set(input.pick.as_ref()); let (partial_all, partial_set) = build_partial_config(&input.partial); - let rename_map = build_rename_map(input.rename.clone()); + let rename_map = build_rename_map(input.rename.as_ref()); // Extract serde attributes from source struct, excluding rename_all (we'll handle it separately) let serde_attrs_without_rename_all = @@ -230,7 +235,7 @@ pub fn generate_schema_type_code( for field in &fields_named.named { let rust_field_name = field.ident.as_ref().map_or_else( || "unknown".to_string(), - |i| strip_raw_prefix(&i.to_string()).to_string(), + |i| strip_raw_prefix_owned(i.to_string()), ); // Apply omit/pick filters diff --git a/crates/vespera_macro/src/schema_macro/seaorm.rs b/crates/vespera_macro/src/schema_macro/seaorm.rs index c6ec772..f41da13 100644 --- a/crates/vespera_macro/src/schema_macro/seaorm.rs +++ b/crates/vespera_macro/src/schema_macro/seaorm.rs @@ -259,8 +259,11 @@ pub fn extract_sea_orm_default_value(attrs: &[syn::Attribute]) -> Option } // If quoted string, strip quotes and return inner value - if raw_value.starts_with('"') && raw_value.ends_with('"') && raw_value.len() >= 2 { - return Some(raw_value[1..raw_value.len() - 1].to_string()); + if let Some(inner) = raw_value + .strip_prefix('"') + .and_then(|s| s.strip_suffix('"')) + { + return Some(inner.to_string()); } // Numeric, bool, or other literal — return as-is return Some(raw_value.to_string()); @@ -367,7 +370,8 @@ pub fn convert_relation_type_to_schema_with_info( let absolute_segments: Vec = if !segments.is_empty() && segments[0] == "super" { let super_count = segments.iter().take_while(|s| *s == "super").count(); let parent_path_len = source_module_path.len().saturating_sub(super_count); - let mut abs = source_module_path[..parent_path_len].to_vec(); + let mut abs = Vec::with_capacity(parent_path_len + segments.len() - super_count); + abs.extend_from_slice(&source_module_path[..parent_path_len]); for seg in segments.iter().skip(super_count) { if seg == "Entity" { abs.push("Schema".to_string()); @@ -389,7 +393,8 @@ pub fn convert_relation_type_to_schema_with_info( .collect() } else { let parent_path_len = source_module_path.len().saturating_sub(1); - let mut abs = source_module_path[..parent_path_len].to_vec(); + let mut abs = Vec::with_capacity(parent_path_len + segments.len()); + abs.extend_from_slice(&source_module_path[..parent_path_len]); for seg in &segments { if seg == "Entity" { abs.push("Schema".to_string()); diff --git a/crates/vespera_macro/src/schema_macro/transformation.rs b/crates/vespera_macro/src/schema_macro/transformation.rs index 77f2236..ce2dce6 100644 --- a/crates/vespera_macro/src/schema_macro/transformation.rs +++ b/crates/vespera_macro/src/schema_macro/transformation.rs @@ -36,14 +36,14 @@ use std::collections::{HashMap, HashSet}; use super::input::PartialMode; use crate::parser::extract_rename_all; -/// Builds the omit set from input. -pub fn build_omit_set(omit: Option>) -> HashSet { - omit.unwrap_or_default().into_iter().collect() +/// Builds the omit set from input without cloning the source Vec. +pub fn build_omit_set(omit: Option<&Vec>) -> HashSet { + omit.into_iter().flatten().cloned().collect() } -/// Builds the pick set from input. -pub fn build_pick_set(pick: Option>) -> HashSet { - pick.unwrap_or_default().into_iter().collect() +/// Builds the pick set from input without cloning the source Vec. +pub fn build_pick_set(pick: Option<&Vec>) -> HashSet { + pick.into_iter().flatten().cloned().collect() } /// Builds the partial set based on partial mode. @@ -61,9 +61,9 @@ pub fn build_partial_config(partial: &Option) -> (bool, HashSet>) -> HashMap { - rename.unwrap_or_default().into_iter().collect() +/// Builds the rename map from input without cloning the source Vec. +pub fn build_rename_map(rename: Option<&Vec<(String, String)>>) -> HashMap { + rename.into_iter().flatten().cloned().collect() } /// Extracts serde attributes from a struct, excluding `rename_all`. @@ -190,7 +190,7 @@ mod tests { #[test] fn test_build_omit_set() { let omit = Some(vec!["password".to_string(), "secret".to_string()]); - let set = build_omit_set(omit); + let set = build_omit_set(omit.as_ref()); assert!(set.contains("password")); assert!(set.contains("secret")); @@ -206,7 +206,7 @@ mod tests { #[test] fn test_build_pick_set() { let pick = Some(vec!["id".to_string(), "name".to_string()]); - let set = build_pick_set(pick); + let set = build_pick_set(pick.as_ref()); assert!(set.contains("id")); assert!(set.contains("name")); @@ -249,7 +249,7 @@ mod tests { ("id".to_string(), "user_id".to_string()), ("name".to_string(), "full_name".to_string()), ]); - let map = build_rename_map(rename); + let map = build_rename_map(rename.as_ref()); assert_eq!(map.get("id"), Some(&"user_id".to_string())); assert_eq!(map.get("name"), Some(&"full_name".to_string())); diff --git a/crates/vespera_macro/src/schema_macro/type_utils.rs b/crates/vespera_macro/src/schema_macro/type_utils.rs index 4a7c9ee..fb9e045 100644 --- a/crates/vespera_macro/src/schema_macro/type_utils.rs +++ b/crates/vespera_macro/src/schema_macro/type_utils.rs @@ -7,6 +7,20 @@ use quote::quote; use serde_json; use syn::Type; +/// Normalize a `TokenStream` or `Type` to a compact string by removing spaces. +/// +/// This replaces the common `.to_string().replace(' ', "")` pattern used throughout +/// the codebase to produce deterministic path strings for comparison and cache keys. +#[inline] +pub fn normalize_token_str(displayable: &impl std::fmt::Display) -> String { + let s = displayable.to_string(); + if s.contains(' ') { + s.replace(' ', "") + } else { + s + } +} + /// Extract type name from a Type pub fn extract_type_name(ty: &Type) -> Result { match ty { @@ -170,30 +184,6 @@ pub fn resolve_type_to_absolute_path(ty: &Type, source_module_path: &[String]) - quote! { #(#path_idents)::* :: #type_ident #args } } -/// Extract module path from a schema path `TokenStream`. -/// -/// The `schema_path` is something like `crate::models::user::Schema`. -/// This returns `["crate", "models", "user"]` (excluding the final type name). -pub fn extract_module_path_from_schema_path(schema_path: &proc_macro2::TokenStream) -> Vec { - let path_str = schema_path.to_string(); - // Parse segments: "crate :: models :: user :: Schema" -> ["crate", "models", "user", "Schema"] - let segments: Vec<&str> = path_str - .split("::") - .map(str::trim) - .filter(|s| !s.is_empty()) - .collect(); - - // Return all but the last segment (which is "Schema" or "Entity") - if segments.len() > 1 { - segments[..segments.len() - 1] - .iter() - .map(std::string::ToString::to_string) - .collect() - } else { - vec![] - } -} - /// Extract the module path from a type (excluding the type name itself). /// e.g., `crate::models::memo::Model` -> `["crate", "models", "memo"]` pub fn extract_module_path(ty: &Type) -> Vec { @@ -708,47 +698,4 @@ mod tests { let ty: syn::Type = syn::parse_str("Vec>").unwrap(); assert!(is_primitive_like(&ty)); } - - // Tests for extract_module_path_from_schema_path - - #[rstest] - #[case("crate :: models :: user :: Schema", vec!["crate", "models", "user"])] - #[case("crate :: models :: nested :: deep :: Model", vec!["crate", "models", "nested", "deep"])] - #[case("super :: user :: Entity", vec!["super", "user"])] - #[case("super :: Model", vec!["super"])] - #[case("Schema", vec![])] - #[case("Model", vec![])] - fn test_extract_module_path_from_schema_path( - #[case] path_str: &str, - #[case] expected: Vec<&str>, - ) { - let tokens: proc_macro2::TokenStream = path_str.parse().unwrap(); - let result = extract_module_path_from_schema_path(&tokens); - let expected: Vec = expected - .into_iter() - .map(std::string::ToString::to_string) - .collect(); - assert_eq!(result, expected); - } - - #[test] - fn test_extract_module_path_from_schema_path_empty() { - let tokens = proc_macro2::TokenStream::new(); - let result = extract_module_path_from_schema_path(&tokens); - assert!(result.is_empty()); - } - - #[test] - fn test_extract_module_path_from_schema_path_with_generics() { - // Even with generics, should extract module path correctly - let tokens: proc_macro2::TokenStream = - "crate :: models :: user :: Schema < T >".parse().unwrap(); - let result = extract_module_path_from_schema_path(&tokens); - // Note: The current implementation splits by "::" which may include generics in last segment - // This test documents current behavior - assert!(!result.is_empty()); - assert_eq!(result[0], "crate"); - assert_eq!(result[1], "models"); - assert_eq!(result[2], "user"); - } } diff --git a/crates/vespera_macro/src/schema_macro/validation.rs b/crates/vespera_macro/src/schema_macro/validation.rs index 7c4a166..550017b 100644 --- a/crates/vespera_macro/src/schema_macro/validation.rs +++ b/crates/vespera_macro/src/schema_macro/validation.rs @@ -141,14 +141,14 @@ pub fn validate_partial_fields( /// /// Returns an empty set for tuple or unit structs. pub fn extract_source_field_names(parsed_struct: &syn::ItemStruct) -> HashSet { - use crate::parser::strip_raw_prefix; + use crate::parser::strip_raw_prefix_owned; if let syn::Fields::Named(fields_named) = &parsed_struct.fields { fields_named .named .iter() .filter_map(|f| f.ident.as_ref()) - .map(|i| strip_raw_prefix(&i.to_string()).to_string()) + .map(|i| strip_raw_prefix_owned(i.to_string())) .collect() } else { HashSet::new() diff --git a/crates/vespera_macro/src/vespera_impl.rs b/crates/vespera_macro/src/vespera_impl.rs index 9446215..136f71c 100644 --- a/crates/vespera_macro/src/vespera_impl.rs +++ b/crates/vespera_macro/src/vespera_impl.rs @@ -39,7 +39,7 @@ use crate::{ }; /// Docs info tuple type alias for cleaner signatures -pub type DocsInfo = (Option<(String, String)>, Option<(String, String)>); +pub type DocsInfo = (Option, Option, Option); /// Generate `OpenAPI` JSON and write to files, returning docs info pub fn generate_and_write_openapi( @@ -49,7 +49,7 @@ pub fn generate_and_write_openapi( ) -> MacroResult { if input.openapi_file_names.is_empty() && input.docs_url.is_none() && input.redoc_url.is_none() { - return Ok((None, None)); + return Ok((None, None, None)); } let mut openapi_doc = generate_openapi_doc_with_metadata( @@ -84,23 +84,26 @@ pub fn generate_and_write_openapi( } } - let json_str = serde_json::to_string_pretty(&openapi_doc).map_err(|e| err_call_site(format!("OpenAPI generation: failed to serialize document to JSON. Error: {e}. Check that all schema types are serializable.")))?; - - for openapi_file_name in &input.openapi_file_names { - let file_path = Path::new(openapi_file_name); - if let Some(parent) = file_path.parent() { - std::fs::create_dir_all(parent).map_err(|e| err_call_site(format!("OpenAPI output: failed to create directory '{}'. Error: {}. Ensure the path is valid and writable.", parent.display(), e)))?; + // Pretty-print for user-visible files + if !input.openapi_file_names.is_empty() { + let json_pretty = serde_json::to_string_pretty(&openapi_doc).map_err(|e| err_call_site(format!("OpenAPI generation: failed to serialize document to JSON. Error: {e}. Check that all schema types are serializable.")))?; + for openapi_file_name in &input.openapi_file_names { + let file_path = Path::new(openapi_file_name); + if let Some(parent) = file_path.parent() { + std::fs::create_dir_all(parent).map_err(|e| err_call_site(format!("OpenAPI output: failed to create directory '{}'. Error: {}. Ensure the path is valid and writable.", parent.display(), e)))?; + } + std::fs::write(file_path, &json_pretty).map_err(|e| err_call_site(format!("OpenAPI output: failed to write file '{openapi_file_name}'. Error: {e}. Ensure the file path is writable.")))?; } - std::fs::write(file_path, &json_str).map_err(|e| err_call_site(format!("OpenAPI output: failed to write file '{openapi_file_name}'. Error: {e}. Ensure the file path is writable.")))?; } - let docs_info = input - .docs_url - .as_ref() - .map(|url| (url.clone(), json_str.clone())); - let redoc_info = input.redoc_url.as_ref().map(|url| (url.clone(), json_str)); + // Compact JSON for embedding (smaller binary, faster downstream compilation) + let spec_json = if input.docs_url.is_some() || input.redoc_url.is_some() { + Some(serde_json::to_string(&openapi_doc).map_err(|e| err_call_site(format!("OpenAPI generation: failed to serialize document to JSON. Error: {e}. Check that all schema types are serializable.")))?) + } else { + None + }; - Ok((docs_info, redoc_info)) + Ok((input.docs_url.clone(), input.redoc_url.clone(), spec_json)) } /// Find the folder path for route scanning @@ -157,6 +160,12 @@ pub fn process_vespera_macro( processed: &ProcessedVesperaInput, schema_storage: &HashMap, ) -> syn::Result { + let profile_start = if std::env::var("VESPERA_PROFILE").is_ok() { + Some(std::time::Instant::now()) + } else { + None + }; + let folder_path = find_folder_path(&processed.folder_name)?; if !folder_path.exists() { return Err(syn::Error::new( @@ -171,14 +180,59 @@ pub fn process_vespera_macro( let (mut metadata, file_asts) = collect_metadata(&folder_path, &processed.folder_name).map_err(|e| syn::Error::new(Span::call_site(), format!("vespera! macro: failed to scan route folder '{}'. Error: {}. Check that all .rs files have valid Rust syntax.", processed.folder_name, e)))?; metadata.structs.extend(schema_storage.values().cloned()); - let (docs_info, redoc_info) = generate_and_write_openapi(processed, &metadata, file_asts)?; + let (docs_url, redoc_url, spec_json) = + generate_and_write_openapi(processed, &metadata, file_asts)?; + + let spec_tokens = match spec_json { + Some(json) => { + let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap_or_default(); + let manifest_path = Path::new(&manifest_dir); + let target_dir = find_target_dir(manifest_path); + let vespera_dir = target_dir.join("vespera"); + std::fs::create_dir_all(&vespera_dir).map_err(|e| { + syn::Error::new( + Span::call_site(), + format!( + "vespera! macro: failed to create directory '{}': {}", + vespera_dir.display(), + e + ), + ) + })?; + let spec_file = vespera_dir.join("vespera_spec.json"); + std::fs::write(&spec_file, &json).map_err(|e| { + syn::Error::new( + Span::call_site(), + format!( + "vespera! macro: failed to write spec file '{}': {}", + spec_file.display(), + e + ), + ) + })?; + let path_str = spec_file.display().to_string().replace('\\', "/"); + Some(quote::quote! { include_str!(#path_str) }) + } + None => None, + }; - Ok(generate_router_code( + let result = Ok(generate_router_code( &metadata, - docs_info, - redoc_info, + docs_url.as_deref(), + redoc_url.as_deref(), + spec_tokens, &processed.merge, - )) + )); + + if let Some(start) = profile_start { + eprintln!( + "[vespera-profile] vespera! macro total: {:?}", + start.elapsed() + ); + crate::schema_macro::print_profile_summary(); + } + + result } /// Process `export_app` macro - extracted for testability @@ -188,12 +242,18 @@ pub fn process_export_app( schema_storage: &HashMap, manifest_dir: &str, ) -> syn::Result { + let profile_start = if std::env::var("VESPERA_PROFILE").is_ok() { + Some(std::time::Instant::now()) + } else { + None + }; + let folder_path = find_folder_path(folder_name)?; if !folder_path.exists() { return Err(syn::Error::new( Span::call_site(), format!( - "export_app! macro: route folder '{folder_name}' not found. Create src/{folder_name} or specify a different folder with `dir = \"your_folder\"`." + "export_app! macro: route folder '{folder_name}' not found. Create src/{folder_name} or specify a different folder with `dir = \"your_folder\"`.", ), )); } @@ -214,17 +274,18 @@ pub fn process_export_app( std::fs::create_dir_all(&vespera_dir).map_err(|e| syn::Error::new(Span::call_site(), format!("export_app! macro: failed to create build cache directory '{}'. Error: {}. Ensure the target directory is writable.", vespera_dir.display(), e)))?; let spec_file = vespera_dir.join(format!("{name_str}.openapi.json")); std::fs::write(&spec_file, &spec_json).map_err(|e| syn::Error::new(Span::call_site(), format!("export_app! macro: failed to write OpenAPI spec file '{}'. Error: {}. Ensure the file path is writable.", spec_file.display(), e)))?; + let spec_path_str = spec_file.display().to_string().replace('\\', "/"); // Generate router code (without docs routes, no merge) - let router_code = generate_router_code(&metadata, None, None, &[]); + let router_code = generate_router_code(&metadata, None, None, None, &[]); - Ok(quote! { + let result = Ok(quote! { /// Auto-generated vespera app struct pub struct #name; impl #name { /// OpenAPI specification as JSON string - pub const OPENAPI_SPEC: &'static str = #spec_json; + pub const OPENAPI_SPEC: &'static str = include_str!(#spec_path_str); /// Create the router for this app. /// Returns `Router<()>` which can be merged into any other router. @@ -232,7 +293,17 @@ pub fn process_export_app( #router_code } } - }) + }); + + if let Some(start) = profile_start { + eprintln!( + "[vespera-profile] export_app! macro total: {:?}", + start.elapsed() + ); + crate::schema_macro::print_profile_summary(); + } + + result } #[cfg(test)] @@ -269,9 +340,10 @@ mod tests { let metadata = CollectedMetadata::new(); let result = generate_and_write_openapi(&processed, &metadata, HashMap::new()); assert!(result.is_ok()); - let (docs_info, redoc_info) = result.unwrap(); - assert!(docs_info.is_none()); - assert!(redoc_info.is_none()); + let (docs_url, redoc_url, spec_json) = result.unwrap(); + assert!(docs_url.is_none()); + assert!(redoc_url.is_none()); + assert!(spec_json.is_none()); } #[test] @@ -289,13 +361,14 @@ mod tests { let metadata = CollectedMetadata::new(); let result = generate_and_write_openapi(&processed, &metadata, HashMap::new()); assert!(result.is_ok()); - let (docs_info, redoc_info) = result.unwrap(); - assert!(docs_info.is_some()); - let (url, json) = docs_info.unwrap(); - assert_eq!(url, "/docs"); + let (docs_url, redoc_url, spec_json) = result.unwrap(); + assert!(docs_url.is_some()); + assert_eq!(docs_url.unwrap(), "/docs"); + assert!(spec_json.is_some()); + let json = spec_json.unwrap(); assert!(json.contains("\"openapi\"")); assert!(json.contains("Test API")); - assert!(redoc_info.is_none()); + assert!(redoc_url.is_none()); } #[test] @@ -313,11 +386,11 @@ mod tests { let metadata = CollectedMetadata::new(); let result = generate_and_write_openapi(&processed, &metadata, HashMap::new()); assert!(result.is_ok()); - let (docs_info, redoc_info) = result.unwrap(); - assert!(docs_info.is_none()); - assert!(redoc_info.is_some()); - let (url, _) = redoc_info.unwrap(); - assert_eq!(url, "/redoc"); + let (docs_url, redoc_url, spec_json) = result.unwrap(); + assert!(docs_url.is_none()); + assert!(redoc_url.is_some()); + assert_eq!(redoc_url.unwrap(), "/redoc"); + assert!(spec_json.is_some()); } #[test] @@ -335,9 +408,10 @@ mod tests { let metadata = CollectedMetadata::new(); let result = generate_and_write_openapi(&processed, &metadata, HashMap::new()); assert!(result.is_ok()); - let (docs_info, redoc_info) = result.unwrap(); - assert!(docs_info.is_some()); - assert!(redoc_info.is_some()); + let (docs_url, redoc_url, spec_json) = result.unwrap(); + assert!(docs_url.is_some()); + assert!(redoc_url.is_some()); + assert!(spec_json.is_some()); } #[test] @@ -853,4 +927,92 @@ mod tests { let err = result.unwrap_err().to_string(); assert!(err.contains("failed to write OpenAPI spec file")); } + #[test] + fn test_process_vespera_macro_no_openapi_output() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + create_temp_file(&temp_dir, "empty.rs", "// empty route file\n"); + + let processed = ProcessedVesperaInput { + folder_name: temp_dir.path().to_string_lossy().to_string(), + openapi_file_names: vec![], + title: None, + version: None, + docs_url: None, + redoc_url: None, + servers: None, + merge: vec![], + }; + + let result = process_vespera_macro(&processed, &HashMap::new()); + assert!( + result.is_ok(), + "Should succeed with no openapi output configured" + ); + } + + #[test] + #[serial_test::serial] + fn test_process_vespera_macro_with_profiling() { + let old_profile = std::env::var("VESPERA_PROFILE").ok(); + unsafe { std::env::set_var("VESPERA_PROFILE", "1") }; + + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + create_temp_file(&temp_dir, "empty.rs", "// empty\n"); + + let processed = ProcessedVesperaInput { + folder_name: temp_dir.path().to_string_lossy().to_string(), + openapi_file_names: vec![], + title: None, + version: None, + docs_url: None, + redoc_url: None, + servers: None, + merge: vec![], + }; + + let result = process_vespera_macro(&processed, &HashMap::new()); + + // Restore + unsafe { + if let Some(val) = old_profile { + std::env::set_var("VESPERA_PROFILE", val); + } else { + std::env::remove_var("VESPERA_PROFILE"); + } + }; + + assert!(result.is_ok()); + } + + #[test] + #[serial_test::serial] + fn test_process_export_app_with_profiling() { + let old_profile = std::env::var("VESPERA_PROFILE").ok(); + unsafe { std::env::set_var("VESPERA_PROFILE", "1") }; + + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + create_temp_file(&temp_dir, "empty.rs", "// empty\n"); + + let name: syn::Ident = syn::parse_quote!(TestProfileApp); + let folder_path = temp_dir.path().to_string_lossy().to_string(); + + let result = process_export_app( + &name, + &folder_path, + &HashMap::new(), + &temp_dir.path().to_string_lossy(), + ); + + // Restore + unsafe { + if let Some(val) = old_profile { + std::env::set_var("VESPERA_PROFILE", val); + } else { + std::env::remove_var("VESPERA_PROFILE"); + } + }; + + // Exercise the code path + let _ = result; + } } diff --git a/examples/axum-example/src/routes/form.rs b/examples/axum-example/src/routes/form.rs index 46b550f..39948f2 100644 --- a/examples/axum-example/src/routes/form.rs +++ b/examples/axum-example/src/routes/form.rs @@ -21,7 +21,6 @@ pub struct SubscribeResponse { pub is_subscribed: bool, } -#[allow(dead_code)] #[derive(Deserialize, Schema)] pub struct ContactFormRequest { pub name: String, @@ -55,7 +54,11 @@ pub async fn subscribe(Form(input): Form) -> Json) -> Json { Json(ContactFormResponse { success: true, - ticket_id: format!("TICKET-{}", input.name.len() + input.message.len()), + ticket_id: format!( + "TICKET-{}-{}", + input.name.len() + input.message.len(), + input.email.len() + input.subject.as_deref().map_or(0, str::len) + ), }) } diff --git a/examples/axum-example/src/routes/users.rs b/examples/axum-example/src/routes/users.rs index 07c3ef9..5ae8551 100644 --- a/examples/axum-example/src/routes/users.rs +++ b/examples/axum-example/src/routes/users.rs @@ -130,15 +130,12 @@ pub async fn create_user_with_meta( pub struct SkipResponse { pub name: String, #[serde(skip)] - #[allow(dead_code)] pub email: String, #[serde(skip, skip_serializing_if = "Option::is_none")] - #[allow(dead_code)] pub email2: Option, #[serde(rename = "email3", skip)] - #[allow(dead_code)] pub email3: Option, #[serde(rename = "email4", skip_serializing_if = "Option::is_none")] @@ -151,7 +148,6 @@ pub struct SkipResponse { pub email6: String, #[serde(rename = "email7", skip)] - #[allow(dead_code)] pub email7: String, #[serde(rename = "num", default)] @@ -176,7 +172,7 @@ fn default_value() -> String { #[vespera::route(get, path = "/skip-response")] pub async fn skip_response() -> Json { - Json(SkipResponse { + let response = SkipResponse { name: "John Doe".to_string(), email: "john.doe@example.com".to_string(), email2: Some("john.doe2@example.com".to_string()), @@ -210,5 +206,13 @@ pub async fn skip_response() -> Json { name: "John Doe".to_string(), }, )])), - }) + }; + // Read skip fields to validate they're populated correctly + let _ = ( + &response.email, + &response.email2, + &response.email3, + &response.email7, + ); + Json(response) }