diff --git a/Cargo.lock b/Cargo.lock index 4771c8a1..875d4d9f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2599,6 +2599,7 @@ dependencies = [ "biome_deserialize_macros 0.6.0", "bpaf", "indexmap 2.7.0", + "oxc_resolver", "pgt_analyse", "pgt_analyser", "pgt_console", @@ -2631,6 +2632,7 @@ dependencies = [ "backtrace", "bpaf", "enumflags2", + "oxc_resolver", "pgt_console", "pgt_diagnostics_categories", "pgt_diagnostics_macros", @@ -2676,6 +2678,7 @@ dependencies = [ "crossbeam", "directories", "enumflags2", + "oxc_resolver", "parking_lot", "pgt_diagnostics", "rayon", @@ -2912,6 +2915,7 @@ dependencies = [ "schemars", "serde", "serde_json", + "slotmap", "sqlx", "strum", "tempfile", @@ -3701,6 +3705,16 @@ dependencies = [ "autocfg", ] +[[package]] +name = "slotmap" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbff4acf519f630b3a3ddcfaea6c06b42174d9a44bc70c620e9ed1649d58b82a" +dependencies = [ + "serde", + "version_check", +] + [[package]] name = "smallvec" version = "1.13.2" diff --git a/Cargo.toml b/Cargo.toml index aaaa9035..fe00d7ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ enumflags2 = "0.7.11" ignore = "0.4.23" indexmap = { version = "2.6.0", features = ["serde"] } insta = "1.31.0" +oxc_resolver = "1.12.0" pg_query = "6.1.0" proc-macro2 = "1.0.66" quote = "1.0.33" @@ -38,6 +39,7 @@ schemars = { version = "0.8.22", features = ["indexmap2", "small serde = "1.0.195" serde_json = "1.0.114" similar = "2.6.0" +slotmap = "1.0.7" smallvec = { version = "1.13.2", features = ["union", "const_new", "serde"] } strum = { version = "0.27.1", features = ["derive"] } # this will use tokio if available, otherwise async-std diff --git a/crates/pgt_cli/src/commands/mod.rs b/crates/pgt_cli/src/commands/mod.rs index ebd16e3d..19dc56ad 100644 --- a/crates/pgt_cli/src/commands/mod.rs +++ b/crates/pgt_cli/src/commands/mod.rs @@ -9,9 +9,9 @@ use bpaf::Bpaf; use pgt_configuration::{PartialConfiguration, partial_configuration}; use pgt_console::Console; use pgt_fs::FileSystem; +use pgt_workspace::PartialConfigurationExt; use pgt_workspace::configuration::{LoadedConfiguration, load_configuration}; -use pgt_workspace::settings::PartialConfigurationExt; -use pgt_workspace::workspace::UpdateSettingsParams; +use pgt_workspace::workspace::{RegisterProjectFolderParams, UpdateSettingsParams}; use pgt_workspace::{DynRef, Workspace, WorkspaceError}; use std::ffi::OsString; use std::path::PathBuf; @@ -301,6 +301,10 @@ pub(crate) trait CommandRunner: Sized { let (vcs_base_path, gitignore_matches) = configuration.retrieve_gitignore_matches(fs, vcs_base_path.as_deref())?; let paths = self.get_files_to_process(fs, &configuration)?; + workspace.register_project_folder(RegisterProjectFolderParams { + path: fs.working_directory(), + set_as_current_workspace: true, + })?; workspace.update_settings(UpdateSettingsParams { workspace_directory: fs.working_directory(), diff --git a/crates/pgt_cli/src/diagnostics.rs b/crates/pgt_cli/src/diagnostics.rs index d24d02e9..20d32113 100644 --- a/crates/pgt_cli/src/diagnostics.rs +++ b/crates/pgt_cli/src/diagnostics.rs @@ -455,7 +455,7 @@ mod test { fn termination_diagnostic_size() { assert_eq!( std::mem::size_of::(), - 80, + 96, "you successfully decreased the size of the diagnostic!" ) } diff --git a/crates/pgt_configuration/Cargo.toml b/crates/pgt_configuration/Cargo.toml index 61da458b..3bd685fa 100644 --- a/crates/pgt_configuration/Cargo.toml +++ b/crates/pgt_configuration/Cargo.toml @@ -16,6 +16,7 @@ biome_deserialize = { workspace = true, features = ["schema"] } biome_deserialize_macros = { workspace = true } bpaf = { workspace = true } indexmap = { workspace = true } +oxc_resolver = { workspace = true } pgt_analyse = { workspace = true } pgt_analyser = { workspace = true } pgt_console = { workspace = true } diff --git a/crates/pgt_configuration/src/diagnostics.rs b/crates/pgt_configuration/src/diagnostics.rs index dc835ed7..79fd7714 100644 --- a/crates/pgt_configuration/src/diagnostics.rs +++ b/crates/pgt_configuration/src/diagnostics.rs @@ -1,5 +1,7 @@ use pgt_console::fmt::Display; use pgt_console::{MarkupBuf, markup}; +use pgt_diagnostics::adapters::ResolveError; + use pgt_diagnostics::{Advices, Diagnostic, Error, LogCategory, MessageAndDescription, Visit}; use serde::{Deserialize, Serialize}; use std::fmt::{Debug, Formatter}; @@ -21,6 +23,12 @@ pub enum ConfigurationDiagnostic { /// Thrown when the pattern inside the `ignore` field errors InvalidIgnorePattern(InvalidIgnorePattern), + + /// Thrown when there's something wrong with the files specified inside `"extends"` + CantLoadExtendFile(CantLoadExtendFile), + + /// Thrown when a configuration file can't be resolved from `node_modules` + CantResolve(CantResolve), } impl ConfigurationDiagnostic { @@ -72,6 +80,18 @@ impl ConfigurationDiagnostic { message: MessageAndDescription::from(markup! {{message}}.to_owned()), }) } + + pub fn cant_resolve(path: impl Display, source: oxc_resolver::ResolveError) -> Self { + Self::CantResolve(CantResolve { + message: MessageAndDescription::from( + markup! { + "Failed to resolve the configuration from "{{path}} + } + .to_owned(), + ), + source: Some(Error::from(ResolveError::from(source))), + }) + } } impl Debug for ConfigurationDiagnostic { @@ -168,3 +188,36 @@ pub struct CantResolve { #[source] source: Option, } + +#[derive(Debug, Serialize, Deserialize, Diagnostic)] +#[diagnostic( + category = "configuration", + severity = Error, +)] +pub struct CantLoadExtendFile { + #[location(resource)] + file_path: String, + #[message] + #[description] + message: MessageAndDescription, + + #[verbose_advice] + verbose_advice: ConfigurationAdvices, +} + +impl CantLoadExtendFile { + pub fn new(file_path: impl Into, message: impl Display) -> Self { + Self { + file_path: file_path.into(), + message: MessageAndDescription::from(markup! {{message}}.to_owned()), + verbose_advice: ConfigurationAdvices::default(), + } + } + + pub fn with_verbose_advice(mut self, messsage: impl Display) -> Self { + self.verbose_advice + .messages + .push(markup! {{messsage}}.to_owned()); + self + } +} diff --git a/crates/pgt_configuration/src/lib.rs b/crates/pgt_configuration/src/lib.rs index fcf0b5c6..b862dce4 100644 --- a/crates/pgt_configuration/src/lib.rs +++ b/crates/pgt_configuration/src/lib.rs @@ -22,6 +22,7 @@ pub use analyser::{ RulePlainConfiguration, RuleSelector, RuleWithFixOptions, RuleWithOptions, Rules, partial_linter_configuration, }; +use biome_deserialize::StringSet; use biome_deserialize_macros::{Merge, Partial}; use bpaf::Bpaf; use database::{ @@ -50,6 +51,10 @@ pub struct Configuration { #[partial(bpaf(hide))] pub schema: String, + /// A list of paths to other JSON files, used to extends the current configuration. + #[partial(bpaf(hide))] + pub extends: StringSet, + /// The configuration of the VCS integration #[partial(type, bpaf(external(partial_vcs_configuration), optional, hide_usage))] pub vcs: VcsConfiguration, @@ -85,6 +90,7 @@ impl PartialConfiguration { pub fn init() -> Self { Self { schema: Some(format!("https://pgtools.dev/schemas/{VERSION}/schema.json")), + extends: Some(StringSet::default()), files: Some(PartialFilesConfiguration { ignore: Some(Default::default()), ..Default::default() diff --git a/crates/pgt_diagnostics/Cargo.toml b/crates/pgt_diagnostics/Cargo.toml index 190b25f0..06c6f8dc 100644 --- a/crates/pgt_diagnostics/Cargo.toml +++ b/crates/pgt_diagnostics/Cargo.toml @@ -15,6 +15,7 @@ version = "0.0.0" backtrace = "0.3.74" bpaf = { workspace = true } enumflags2 = { workspace = true } +oxc_resolver = { workspace = true } pgt_console = { workspace = true, features = ["serde"] } pgt_diagnostics_categories = { workspace = true, features = ["serde"] } pgt_diagnostics_macros = { workspace = true } diff --git a/crates/pgt_diagnostics/src/adapters.rs b/crates/pgt_diagnostics/src/adapters.rs index ca627d3b..5c3dcdd5 100644 --- a/crates/pgt_diagnostics/src/adapters.rs +++ b/crates/pgt_diagnostics/src/adapters.rs @@ -134,3 +134,28 @@ impl Diagnostic for SerdeJsonError { fmt.write_markup(markup!({ AsConsoleDisplay(&self.error) })) } } + +#[derive(Debug)] +pub struct ResolveError { + error: oxc_resolver::ResolveError, +} + +impl From for ResolveError { + fn from(error: oxc_resolver::ResolveError) -> Self { + Self { error } + } +} + +impl Diagnostic for ResolveError { + fn category(&self) -> Option<&'static Category> { + Some(category!("internalError/io")) + } + + fn description(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(fmt, "{}", self.error) + } + + fn message(&self, fmt: &mut fmt::Formatter<'_>) -> io::Result<()> { + fmt.write_markup(markup!({ AsConsoleDisplay(&self.error) })) + } +} diff --git a/crates/pgt_fs/Cargo.toml b/crates/pgt_fs/Cargo.toml index 1e4a7b4f..40478934 100644 --- a/crates/pgt_fs/Cargo.toml +++ b/crates/pgt_fs/Cargo.toml @@ -15,6 +15,7 @@ version = "0.0.0" crossbeam = { workspace = true } directories = "5.0.1" enumflags2 = { workspace = true } +oxc_resolver = { workspace = true } parking_lot = { version = "0.12.3", features = ["arc_lock"] } pgt_diagnostics = { workspace = true } rayon = { workspace = true } diff --git a/crates/pgt_fs/src/fs.rs b/crates/pgt_fs/src/fs.rs index b73aef6e..2bfd2e51 100644 --- a/crates/pgt_fs/src/fs.rs +++ b/crates/pgt_fs/src/fs.rs @@ -1,6 +1,7 @@ use crate::{PathInterner, PgTPath}; pub use memory::{ErrorEntry, MemoryFileSystem}; pub use os::OsFileSystem; +use oxc_resolver::{Resolution, ResolveError}; use pgt_diagnostics::{Advices, Diagnostic, LogCategory, Visit, console}; use pgt_diagnostics::{Error, Severity}; use serde::{Deserialize, Serialize}; @@ -164,6 +165,12 @@ pub trait FileSystem: Send + Sync + RefUnwindSafe { fn get_changed_files(&self, base: &str) -> io::Result>; fn get_staged_files(&self) -> io::Result>; + + fn resolve_configuration( + &self, + specifier: &str, + path: &Path, + ) -> Result; } /// Result of the auto search @@ -355,6 +362,14 @@ where fn get_staged_files(&self) -> io::Result> { T::get_staged_files(self) } + + fn resolve_configuration( + &self, + specifier: &str, + path: &Path, + ) -> Result { + T::resolve_configuration(self, specifier, path) + } } #[derive(Debug, Diagnostic, Deserialize, Serialize)] diff --git a/crates/pgt_fs/src/fs/memory.rs b/crates/pgt_fs/src/fs/memory.rs index baffe0ab..a744e575 100644 --- a/crates/pgt_fs/src/fs/memory.rs +++ b/crates/pgt_fs/src/fs/memory.rs @@ -1,3 +1,4 @@ +use oxc_resolver::{Resolution, ResolveError}; use rustc_hash::FxHashMap; use std::collections::hash_map::{Entry, IntoIter}; use std::io; @@ -227,6 +228,15 @@ impl FileSystem for MemoryFileSystem { Ok(cb()) } + + fn resolve_configuration( + &self, + _specifier: &str, + _path: &Path, + ) -> Result { + // not needed for the memory file system + todo!() + } } struct MemoryFile { diff --git a/crates/pgt_fs/src/fs/os.rs b/crates/pgt_fs/src/fs/os.rs index a2e40695..5033f296 100644 --- a/crates/pgt_fs/src/fs/os.rs +++ b/crates/pgt_fs/src/fs/os.rs @@ -5,9 +5,11 @@ use crate::{ FileSystem, PgTPath, fs::{TraversalContext, TraversalScope}, }; +use oxc_resolver::{Resolution, ResolveError, ResolveOptions, Resolver}; use pgt_diagnostics::{DiagnosticExt, Error, Severity, adapters::IoError}; use rayon::{Scope, scope}; use std::fs::{DirEntry, FileType}; +use std::panic::AssertUnwindSafe; use std::process::Command; use std::{ env, fs, @@ -21,12 +23,18 @@ const MAX_SYMLINK_DEPTH: u8 = 3; /// Implementation of [FileSystem] that directly calls through to the underlying OS pub struct OsFileSystem { pub working_directory: Option, + pub configuration_resolver: AssertUnwindSafe, } impl OsFileSystem { pub fn new(working_directory: PathBuf) -> Self { Self { working_directory: Some(working_directory), + configuration_resolver: AssertUnwindSafe(Resolver::new(ResolveOptions { + condition_names: vec!["node".to_string(), "import".to_string()], + extensions: vec![".json".to_string(), ".jsonc".to_string()], + ..ResolveOptions::default() + })), } } } @@ -35,6 +43,11 @@ impl Default for OsFileSystem { fn default() -> Self { Self { working_directory: env::current_dir().ok(), + configuration_resolver: AssertUnwindSafe(Resolver::new(ResolveOptions { + condition_names: vec!["node".to_string(), "import".to_string()], + extensions: vec![".json".to_string(), ".jsonc".to_string()], + ..ResolveOptions::default() + })), } } } @@ -116,6 +129,14 @@ impl FileSystem for OsFileSystem { .map(|l| l.to_string()) .collect()) } + + fn resolve_configuration( + &self, + specifier: &str, + path: &Path, + ) -> Result { + self.configuration_resolver.resolve(path, specifier) + } } struct OsFile { @@ -387,8 +408,6 @@ fn follow_symlink( path: &Path, ctx: &dyn TraversalContext, ) -> Result<(PathBuf, FileType), SymlinkExpansionError> { - tracing::info!("Translating symlink: {path:?}"); - let target_path = fs::read_link(path).map_err(|err| { ctx.push_diagnostic(IoError::from(err).with_file_path(path.to_string_lossy().to_string())); SymlinkExpansionError diff --git a/crates/pgt_lsp/src/server.rs b/crates/pgt_lsp/src/server.rs index 4c05c0e4..6420c511 100644 --- a/crates/pgt_lsp/src/server.rs +++ b/crates/pgt_lsp/src/server.rs @@ -1,10 +1,13 @@ use crate::capabilities::server_capabilities; use crate::handlers; -use crate::session::{CapabilitySet, CapabilityStatus, Session, SessionHandle, SessionKey}; +use crate::session::{ + CapabilitySet, CapabilityStatus, ClientInformation, Session, SessionHandle, SessionKey, +}; use crate::utils::{into_lsp_error, panic_to_lsp_error}; use futures::FutureExt; use futures::future::ready; use pgt_fs::{ConfigName, FileSystem, OsFileSystem}; +use pgt_workspace::workspace::{RegisterProjectFolderParams, UnregisterProjectFolderParams}; use pgt_workspace::{DynRef, Workspace, workspace}; use rustc_hash::FxHashMap; use serde_json::json; @@ -107,6 +110,10 @@ impl LanguageServer for LSPServer { self.session.initialize( params.capabilities, + params.client_info.map(|client_info| ClientInformation { + name: client_info.name, + version: client_info.version, + }), params.root_uri, params.workspace_folders, ); @@ -217,6 +224,47 @@ impl LanguageServer for LSPServer { .ok(); } + async fn did_change_workspace_folders(&self, params: DidChangeWorkspaceFoldersParams) { + for removed in ¶ms.event.removed { + if let Ok(project_path) = self.session.file_path(&removed.uri) { + let result = self + .session + .workspace + .unregister_project_folder(UnregisterProjectFolderParams { path: project_path }) + .map_err(into_lsp_error); + + if let Err(err) = result { + error!("Failed to remove project from the workspace: {}", err); + self.session + .client + .log_message(MessageType::ERROR, err) + .await; + } + } + } + + for added in ¶ms.event.added { + if let Ok(project_path) = self.session.file_path(&added.uri) { + let result = self + .session + .workspace + .register_project_folder(RegisterProjectFolderParams { + path: Some(project_path.to_path_buf()), + set_as_current_workspace: true, + }) + .map_err(into_lsp_error); + + if let Err(err) = result { + error!("Failed to add project to the workspace: {}", err); + self.session + .client + .log_message(MessageType::ERROR, err) + .await; + } + } + } + } + #[tracing::instrument(level = "trace", skip_all)] async fn completion(&self, params: CompletionParams) -> LspResult> { match handlers::completions::get_completions(&self.session, params) { @@ -398,6 +446,8 @@ impl ServerFactory { workspace_method!(builder, close_file); workspace_method!(builder, pull_diagnostics); workspace_method!(builder, get_completions); + workspace_method!(builder, register_project_folder); + workspace_method!(builder, unregister_project_folder); let (service, socket) = builder.finish(); ServerConnection { socket, service } diff --git a/crates/pgt_lsp/src/session.rs b/crates/pgt_lsp/src/session.rs index 7ccf2bab..fd5af2da 100644 --- a/crates/pgt_lsp/src/session.rs +++ b/crates/pgt_lsp/src/session.rs @@ -10,11 +10,11 @@ use pgt_analyse::RuleCategoriesBuilder; use pgt_configuration::{ConfigurationPathHint, PartialConfiguration}; use pgt_diagnostics::{DiagnosticExt, Error}; use pgt_fs::{FileSystem, PgTPath}; +use pgt_workspace::PartialConfigurationExt; use pgt_workspace::Workspace; use pgt_workspace::configuration::{LoadedConfiguration, load_configuration}; use pgt_workspace::features; -use pgt_workspace::settings::PartialConfigurationExt; -use pgt_workspace::workspace::UpdateSettingsParams; +use pgt_workspace::workspace::{RegisterProjectFolderParams, UpdateSettingsParams}; use pgt_workspace::{DynRef, WorkspaceError}; use rustc_hash::FxHashMap; use serde_json::Value; @@ -31,6 +31,14 @@ use tower_lsp::lsp_types::{MessageType, Registration}; use tower_lsp::lsp_types::{Unregistration, WorkspaceFolder}; use tracing::{error, info}; +pub(crate) struct ClientInformation { + /// The name of the client + pub(crate) name: String, + + /// The version of the client + pub(crate) version: Option, +} + /// Key, uniquely identifying a LSP session. #[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)] pub(crate) struct SessionKey(pub u64); @@ -68,6 +76,7 @@ pub(crate) struct Session { struct InitializeParams { /// The capabilities provided by the client as part of [`lsp_types::InitializeParams`] client_capabilities: lsp_types::ClientCapabilities, + client_information: Option, root_uri: Option, #[allow(unused)] workspace_folders: Option>, @@ -164,11 +173,13 @@ impl Session { pub(crate) fn initialize( &self, client_capabilities: lsp_types::ClientCapabilities, + client_information: Option, root_uri: Option, workspace_folders: Option>, ) { let result = self.initialize_params.set(InitializeParams { client_capabilities, + client_information, root_uri, workspace_folders, }); @@ -446,6 +457,8 @@ impl Session { info!("Configuration loaded successfully from disk."); info!("Update workspace settings."); + let fs = &self.fs; + if let Some(ws_configuration) = extra_config { fs_configuration.merge_with(ws_configuration); } @@ -455,6 +468,31 @@ impl Session { match result { Ok((vcs_base_path, gitignore_matches)) => { + let register_result = + if let ConfigurationPathHint::FromWorkspace(path) = &base_path { + // We don't need the key + self.workspace + .register_project_folder(RegisterProjectFolderParams { + path: Some(path.clone()), + // This is naive, but we don't know if the user has a file already open or not, so we register every project as the current one. + // The correct one is actually set when the LSP calls `textDocument/didOpen` + set_as_current_workspace: true, + }) + .err() + } else { + self.workspace + .register_project_folder(RegisterProjectFolderParams { + path: fs.working_directory(), + set_as_current_workspace: true, + }) + .err() + }; + if let Some(error) = register_result { + error!("Failed to register the project folder: {}", error); + self.client.log_message(MessageType::ERROR, &error).await; + return ConfigurationStatus::Error; + } + let result = self.workspace.update_settings(UpdateSettingsParams { workspace_directory: self.fs.working_directory(), configuration: fs_configuration, diff --git a/crates/pgt_lsp/tests/server.rs b/crates/pgt_lsp/tests/server.rs index 19b65b06..438c7298 100644 --- a/crates/pgt_lsp/tests/server.rs +++ b/crates/pgt_lsp/tests/server.rs @@ -3,6 +3,7 @@ use anyhow::Error; use anyhow::Result; use anyhow::bail; use biome_deserialize::Merge; +use biome_deserialize::StringSet; use futures::Sink; use futures::SinkExt; use futures::Stream; @@ -40,6 +41,7 @@ use tower_lsp::lsp_types::Position; use tower_lsp::lsp_types::Range; use tower_lsp::lsp_types::TextDocumentPositionParams; use tower_lsp::lsp_types::WorkDoneProgressParams; +use tower_lsp::lsp_types::WorkspaceFolder; use tower_lsp::lsp_types::{ ClientCapabilities, DidChangeConfigurationParams, DidChangeTextDocumentParams, DidCloseTextDocumentParams, DidOpenTextDocumentParams, InitializeResult, InitializedParams, @@ -164,6 +166,42 @@ impl Server { Ok(()) } + /// It creates two workspaces, one at folder `test_one` and the other in `test_two`. + /// + /// Hence, the two roots will be `/workspace/test_one` and `/workspace/test_two` + #[allow(deprecated)] + async fn initialize_workspaces(&mut self) -> Result<()> { + let _res: InitializeResult = self + .request( + "initialize", + "_init", + InitializeParams { + process_id: None, + root_path: None, + root_uri: Some(url!("/")), + initialization_options: None, + capabilities: ClientCapabilities::default(), + trace: None, + workspace_folders: Some(vec![ + WorkspaceFolder { + name: "test_one".to_string(), + uri: url!("test_one"), + }, + WorkspaceFolder { + name: "test_two".to_string(), + uri: url!("test_two"), + }, + ]), + client_info: None, + locale: None, + }, + ) + .await? + .context("initialize returned None")?; + + Ok(()) + } + /// Basic implementation of the `initialized` notification for tests async fn initialized(&mut self) -> Result<()> { self.notify("initialized", InitializedParams {}).await @@ -204,13 +242,18 @@ impl Server { } /// Opens a document with given contents and given name. The name must contain the extension too - async fn open_named_document(&mut self, text: impl Display, document_name: Url) -> Result<()> { + async fn open_named_document( + &mut self, + text: impl Display, + document_name: Url, + language: impl Display, + ) -> Result<()> { self.notify( "textDocument/didOpen", DidOpenTextDocumentParams { text_document: TextDocumentItem { uri: document_name, - language_id: String::from("sql"), + language_id: language.to_string(), version: 0, text: text.to_string(), }, @@ -230,24 +273,31 @@ impl Server { .await } - async fn change_document( + async fn change_named_document( &mut self, + uri: Url, version: i32, content_changes: Vec, ) -> Result<()> { self.notify( "textDocument/didChange", DidChangeTextDocumentParams { - text_document: VersionedTextDocumentIdentifier { - uri: url!("document.sql"), - version, - }, + text_document: VersionedTextDocumentIdentifier { uri, version }, content_changes, }, ) .await } + async fn change_document( + &mut self, + version: i32, + content_changes: Vec, + ) -> Result<()> { + self.change_named_document(url!("document.sql"), version, content_changes) + .await + } + #[allow(unused)] async fn close_document(&mut self) -> Result<()> { self.notify( @@ -831,7 +881,7 @@ async fn test_execute_statement(test_db: PgPool) -> Result<()> { let doc_url = url!("test.sql"); server - .open_named_document(doc_content.to_string(), doc_url.clone()) + .open_named_document(doc_content.to_string(), doc_url.clone(), "sql") .await?; let code_actions_response = server @@ -1107,3 +1157,388 @@ async fn test_issue_303(test_db: PgPool) -> Result<()> { Ok(()) } + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn multiple_projects(test_db: PgPool) -> Result<()> { + let factory = ServerFactory::default(); + let mut fs = MemoryFileSystem::default(); + + let setup = r#" + create table public.users ( + id serial primary key, + name varchar(255) not null + ); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + // Setup configurations + // - test_one with db connection + let mut conf_with_db = PartialConfiguration::init(); + conf_with_db.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + fs.insert( + url!("test_one/postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_with_db).unwrap(), + ); + + // -- test_two without db connection + let mut conf_without_db = PartialConfiguration::init(); + conf_without_db.merge_with(PartialConfiguration { + db: Some(PartialDatabaseConfiguration { + disable_connection: Some(true), + ..Default::default() + }), + ..Default::default() + }); + fs.insert( + url!("test_two/postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_without_db).unwrap(), + ); + + let (service, client) = factory + .create_with_fs(None, DynRef::Owned(Box::new(fs))) + .into_inner(); + + let (stream, sink) = client.split(); + let mut server = Server::new(service); + + let (sender, _) = channel(CHANNEL_BUFFER_SIZE); + let reader = tokio::spawn(client_handler(stream, sink, sender)); + + server.initialize_workspaces().await?; + server.initialized().await?; + + server.load_configuration().await?; + + // do the same change in both workspaces and request completions in both workspaces + + server + .open_named_document( + "select from public.users;\n", + url!("test_one/document.sql"), + "sql", + ) + .await?; + + server + .change_named_document( + url!("test_one/document.sql"), + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 7, + }, + end: Position { + line: 0, + character: 7, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + let res_ws_one = server + .get_completion(CompletionParams { + work_done_progress_params: WorkDoneProgressParams::default(), + partial_result_params: PartialResultParams::default(), + context: None, + text_document_position: TextDocumentPositionParams { + text_document: TextDocumentIdentifier { + uri: url!("test_one/document.sql"), + }, + position: Position { + line: 0, + character: 8, + }, + }, + }) + .await? + .unwrap(); + + server + .open_named_document( + "select from public.users;\n", + url!("test_two/document.sql"), + "sql", + ) + .await?; + + server + .change_named_document( + url!("test_two/document.sql"), + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 7, + }, + end: Position { + line: 0, + character: 7, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + let res_ws_two = server + .get_completion(CompletionParams { + work_done_progress_params: WorkDoneProgressParams::default(), + partial_result_params: PartialResultParams::default(), + context: None, + text_document_position: TextDocumentPositionParams { + text_document: TextDocumentIdentifier { + uri: url!("test_two/document.sql"), + }, + position: Position { + line: 0, + character: 8, + }, + }, + }) + .await? + .unwrap(); + + // only the first one has a db connection and should return completion items + assert!(!match res_ws_one { + CompletionResponse::Array(a) => a.is_empty(), + CompletionResponse::List(l) => l.items.is_empty(), + }); + assert!(match res_ws_two { + CompletionResponse::Array(a) => a.is_empty(), + CompletionResponse::List(l) => l.items.is_empty(), + }); + + server.shutdown().await?; + reader.abort(); + + Ok(()) +} + +#[sqlx::test(migrator = "pgt_test_utils::MIGRATIONS")] +async fn extends_config(test_db: PgPool) -> Result<()> { + let factory = ServerFactory::default(); + let mut fs = MemoryFileSystem::default(); + + let setup = r#" + create table public.extends_config_test ( + id serial primary key, + name varchar(255) not null + ); + "#; + + test_db + .execute(setup) + .await + .expect("Failed to setup test database"); + + // shared config with default db connection + let conf_with_db = PartialConfiguration::init(); + fs.insert( + url!("postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_with_db).unwrap(), + ); + + let relative_path = if cfg!(windows) { + "..\\postgrestools.jsonc" + } else { + "../postgrestools.jsonc" + }; + + // test_one extends the shared config but sets our test db + let mut conf_with_db = PartialConfiguration::init(); + conf_with_db.merge_with(PartialConfiguration { + extends: Some(StringSet::from_iter([relative_path.to_string()])), + db: Some(PartialDatabaseConfiguration { + database: Some( + test_db + .connect_options() + .get_database() + .unwrap() + .to_string(), + ), + ..Default::default() + }), + ..Default::default() + }); + + fs.insert( + url!("test_one/postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_with_db).unwrap(), + ); + + // test_two extends it but keeps the default one + let mut conf_without_db = PartialConfiguration::init(); + conf_without_db.merge_with(PartialConfiguration { + extends: Some(StringSet::from_iter([relative_path.to_string()])), + ..Default::default() + }); + fs.insert( + url!("test_two/postgrestools.jsonc").to_file_path().unwrap(), + serde_json::to_string_pretty(&conf_without_db).unwrap(), + ); + + let (service, client) = factory + .create_with_fs(None, DynRef::Owned(Box::new(fs))) + .into_inner(); + + let (stream, sink) = client.split(); + let mut server = Server::new(service); + + let (sender, _) = channel(CHANNEL_BUFFER_SIZE); + let reader = tokio::spawn(client_handler(stream, sink, sender)); + + server.initialize_workspaces().await?; + server.initialized().await?; + + server.load_configuration().await?; + + server + .open_named_document( + "select from public.extends_config_test;\n", + url!("test_one/document.sql"), + "sql", + ) + .await?; + + server + .change_named_document( + url!("test_one/document.sql"), + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 7, + }, + end: Position { + line: 0, + character: 7, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + let res_ws_one = server + .get_completion(CompletionParams { + work_done_progress_params: WorkDoneProgressParams::default(), + partial_result_params: PartialResultParams::default(), + context: None, + text_document_position: TextDocumentPositionParams { + text_document: TextDocumentIdentifier { + uri: url!("test_one/document.sql"), + }, + position: Position { + line: 0, + character: 8, + }, + }, + }) + .await? + .unwrap(); + + server + .open_named_document( + "select from public.users;\n", + url!("test_two/document.sql"), + "sql", + ) + .await?; + + server + .change_named_document( + url!("test_two/document.sql"), + 3, + vec![TextDocumentContentChangeEvent { + range: Some(Range { + start: Position { + line: 0, + character: 7, + }, + end: Position { + line: 0, + character: 7, + }, + }), + range_length: Some(0), + text: " ".to_string(), + }], + ) + .await?; + + let res_ws_two = server + .get_completion(CompletionParams { + work_done_progress_params: WorkDoneProgressParams::default(), + partial_result_params: PartialResultParams::default(), + context: None, + text_document_position: TextDocumentPositionParams { + text_document: TextDocumentIdentifier { + uri: url!("test_two/document.sql"), + }, + position: Position { + line: 0, + character: 8, + }, + }, + }) + .await? + .unwrap(); + + let items_one = match res_ws_one { + CompletionResponse::Array(ref a) => a, + CompletionResponse::List(ref l) => &l.items, + }; + + // test one should have our test db connection and should return the completion items for the `extends_config_test` table + assert!(items_one.iter().any(|item| { + item.label_details.clone().is_some_and(|details| { + details + .description + .is_some_and(|desc| desc.contains("public.extends_config_test")) + }) + })); + + let items_two = match res_ws_two { + CompletionResponse::Array(ref a) => a, + CompletionResponse::List(ref l) => &l.items, + }; + + // test two should not have a db connection and should not return the completion items for the `extends_config_test` table + assert!(!items_two.iter().any(|item| { + item.label_details.clone().is_some_and(|details| { + details + .description + .is_some_and(|desc| desc.contains("public.extends_config_test")) + }) + })); + + server.shutdown().await?; + reader.abort(); + + Ok(()) +} diff --git a/crates/pgt_workspace/Cargo.toml b/crates/pgt_workspace/Cargo.toml index 5f598b2d..bfa413e3 100644 --- a/crates/pgt_workspace/Cargo.toml +++ b/crates/pgt_workspace/Cargo.toml @@ -35,6 +35,7 @@ rustc-hash = { workspace = true } schemars = { workspace = true, optional = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, features = ["raw_value"] } +slotmap = { workspace = true, features = ["serde"] } sqlx.workspace = true strum = { workspace = true } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } diff --git a/crates/pgt_workspace/src/configuration.rs b/crates/pgt_workspace/src/configuration.rs index 88c04eec..1baebcc7 100644 --- a/crates/pgt_workspace/src/configuration.rs +++ b/crates/pgt_workspace/src/configuration.rs @@ -1,14 +1,17 @@ use std::{ + ffi::OsStr, io::ErrorKind, ops::Deref, path::{Path, PathBuf}, }; +use biome_deserialize::Merge; use pgt_analyse::AnalyserRules; use pgt_configuration::{ ConfigurationDiagnostic, ConfigurationPathHint, ConfigurationPayload, PartialConfiguration, - VERSION, push_to_analyser_rules, + VERSION, diagnostics::CantLoadExtendFile, push_to_analyser_rules, }; +use pgt_console::markup; use pgt_fs::{AutoSearchResult, ConfigName, FileSystem, OpenOptions}; use crate::{DynRef, WorkspaceError, settings::Settings}; @@ -28,34 +31,41 @@ pub struct LoadedConfiguration { } impl LoadedConfiguration { - /// Return the path of the **directory** where the configuration is - pub fn directory_path(&self) -> Option<&Path> { - self.directory_path.as_deref() - } - - /// Return the path of the **file** where the configuration is - pub fn file_path(&self) -> Option<&Path> { - self.file_path.as_deref() - } -} - -impl From> for LoadedConfiguration { - fn from(value: Option) -> Self { + fn try_from_payload( + value: Option, + fs: &DynRef<'_, dyn FileSystem>, + ) -> Result { let Some(value) = value else { - return LoadedConfiguration::default(); + return Ok(LoadedConfiguration::default()); }; let ConfigurationPayload { + external_resolution_base_path, configuration_file_path, - deserialized: partial_configuration, - .. + deserialized: mut partial_configuration, } = value; - LoadedConfiguration { + partial_configuration.apply_extends( + fs, + &configuration_file_path, + &external_resolution_base_path, + )?; + + Ok(Self { configuration: partial_configuration, directory_path: configuration_file_path.parent().map(PathBuf::from), file_path: Some(configuration_file_path), - } + }) + } + + /// Return the path of the **directory** where the configuration is + pub fn directory_path(&self) -> Option<&Path> { + self.directory_path.as_deref() + } + + /// Return the path of the **file** where the configuration is + pub fn file_path(&self) -> Option<&Path> { + self.file_path.as_deref() } } @@ -65,7 +75,7 @@ pub fn load_configuration( config_path: ConfigurationPathHint, ) -> Result { let config = load_config(fs, config_path)?; - Ok(LoadedConfiguration::from(config)) + LoadedConfiguration::try_from_payload(config, fs) } /// - [Result]: if an error occurred while loading the configuration file. @@ -120,7 +130,7 @@ fn load_config( ConfigurationPathHint::None => file_system.working_directory().unwrap_or_default(), }; - // We first search for `postgrestools.jsonc` + // We first search for `postgrestools.jsonc` files if let Some(auto_search_result) = file_system.auto_search( &configuration_directory, ConfigName::file_names().as_slice(), @@ -265,10 +275,276 @@ pub fn strip_jsonc_comments(jsonc_input: &str) -> String { json_output } +pub trait PartialConfigurationExt { + fn apply_extends( + &mut self, + fs: &DynRef<'_, dyn FileSystem>, + file_path: &Path, + external_resolution_base_path: &Path, + ) -> Result<(), WorkspaceError>; + + fn deserialize_extends( + &mut self, + fs: &DynRef<'_, dyn FileSystem>, + relative_resolution_base_path: &Path, + external_resolution_base_path: &Path, + ) -> Result, WorkspaceError>; + + fn retrieve_gitignore_matches( + &self, + file_system: &DynRef<'_, dyn FileSystem>, + vcs_base_path: Option<&Path>, + ) -> Result<(Option, Vec), WorkspaceError>; +} + +impl PartialConfigurationExt for PartialConfiguration { + /// Mutates the configuration so that any fields that have not been configured explicitly are + /// filled in with their values from configs listed in the `extends` field. + /// + /// The `extends` configs are applied from left to right. + /// + /// If a configuration can't be resolved from the file system, the operation will fail. + fn apply_extends( + &mut self, + fs: &DynRef<'_, dyn FileSystem>, + file_path: &Path, + external_resolution_base_path: &Path, + ) -> Result<(), WorkspaceError> { + let configurations = self.deserialize_extends( + fs, + file_path.parent().expect("file path should have a parent"), + external_resolution_base_path, + )?; + + let extended_configuration = configurations.into_iter().reduce( + |mut previous_configuration, current_configuration| { + previous_configuration.merge_with(current_configuration); + previous_configuration + }, + ); + if let Some(mut extended_configuration) = extended_configuration { + // We swap them to avoid having to clone `self.configuration` to merge it. + std::mem::swap(self, &mut extended_configuration); + self.merge_with(extended_configuration) + } + + Ok(()) + } + + /// It attempts to deserialize all the configuration files that were specified in the `extends` property + fn deserialize_extends( + &mut self, + fs: &DynRef<'_, dyn FileSystem>, + relative_resolution_base_path: &Path, + external_resolution_base_path: &Path, + ) -> Result, WorkspaceError> { + let Some(extends) = &self.extends else { + return Ok(Vec::new()); + }; + + let mut deserialized_configurations = vec![]; + for extend_entry in extends.iter() { + let extend_entry_as_path = Path::new(extend_entry); + + let extend_configuration_file_path = if extend_entry_as_path.starts_with(".") + || matches!( + extend_entry_as_path + .extension() + .map(OsStr::as_encoded_bytes), + Some(b"jsonc") + ) { + // Normalize the path to handle relative segments like "../" + normalize_path(&relative_resolution_base_path.join(extend_entry)) + } else { + fs.resolve_configuration(extend_entry.as_str(), external_resolution_base_path) + .map_err(|error| { + ConfigurationDiagnostic::cant_resolve( + external_resolution_base_path.display().to_string(), + error, + ) + })? + .into_path_buf() + }; + + let mut file = fs + .open_with_options( + extend_configuration_file_path.as_path(), + OpenOptions::default().read(true), + ) + .map_err(|err| { + CantLoadExtendFile::new( + extend_configuration_file_path.display().to_string(), + err.to_string(), + ) + .with_verbose_advice(markup! { + "Postgres Tools tried to load the configuration file \""{ + extend_configuration_file_path.display().to_string() + }"\" in \"extends\" using \""{ + external_resolution_base_path.display().to_string() + }"\" as the base path." + }) + })?; + + let mut content = String::new(); + file.read_to_string(&mut content).map_err(|err| { + CantLoadExtendFile::new(extend_configuration_file_path.display().to_string(), err.to_string()).with_verbose_advice( + markup!{ + "It's possible that the file was created with a different user/group. Make sure you have the rights to read the file." + } + ) + + })?; + + let deserialized = serde_json::from_str::(&content) + .map_err(ConfigurationDiagnostic::new_deserialization_error)?; + deserialized_configurations.push(deserialized) + } + Ok(deserialized_configurations) + } + + /// This function checks if the VCS integration is enabled, and if so, it will attempts to resolve the + /// VCS root directory and the `.gitignore` file. + /// + /// ## Returns + /// + /// A tuple with VCS root folder and the contents of the `.gitignore` file + fn retrieve_gitignore_matches( + &self, + file_system: &DynRef<'_, dyn FileSystem>, + vcs_base_path: Option<&Path>, + ) -> Result<(Option, Vec), WorkspaceError> { + let Some(vcs) = &self.vcs else { + return Ok((None, vec![])); + }; + if vcs.is_enabled() { + let vcs_base_path = match (vcs_base_path, &vcs.root) { + (Some(vcs_base_path), Some(root)) => vcs_base_path.join(root), + (None, Some(root)) => PathBuf::from(root), + (Some(vcs_base_path), None) => PathBuf::from(vcs_base_path), + (None, None) => return Err(WorkspaceError::vcs_disabled()), + }; + if let Some(client_kind) = &vcs.client_kind { + if !vcs.ignore_file_disabled() { + let result = file_system + .auto_search(&vcs_base_path, &[client_kind.ignore_file()], false) + .map_err(WorkspaceError::from)?; + + if let Some(result) = result { + return Ok(( + result.file_path.parent().map(PathBuf::from), + result + .content + .lines() + .map(String::from) + .collect::>(), + )); + } + } + } + } + Ok((None, vec![])) + } +} + +/// Normalizes a path, resolving '..' and '.' segments without requiring the path to exist +fn normalize_path(path: &Path) -> PathBuf { + let mut components = Vec::new(); + let mut prefix_component = None; + let mut is_absolute = false; + + for component in path.components() { + match component { + std::path::Component::Prefix(_prefix) => { + prefix_component = Some(component); + components.clear(); + } + std::path::Component::RootDir => { + is_absolute = true; + components.clear(); + } + std::path::Component::ParentDir => { + if !components.is_empty() { + components.pop(); + } else if !is_absolute && prefix_component.is_none() { + // Only keep parent dir if we're not absolute and have no prefix + components.push(component.as_os_str()); + } + } + std::path::Component::Normal(c) => { + components.push(c); + } + std::path::Component::CurDir => { + // Skip current directory components + } + } + } + + let mut result = PathBuf::new(); + + // Add prefix component (like C: on Windows) + if let Some(prefix) = prefix_component { + result.push(prefix.as_os_str()); + } + + // Add root directory if path is absolute + if is_absolute { + result.push(std::path::Component::RootDir.as_os_str()); + } + + // Add normalized components + for component in components { + result.push(component); + } + + // Handle edge cases + if result.as_os_str().is_empty() { + if prefix_component.is_some() || is_absolute { + // This shouldn't happen with proper input, but fallback to original path's root + return path + .ancestors() + .last() + .unwrap_or(Path::new("")) + .to_path_buf(); + } else { + return PathBuf::from("."); + } + } + + result +} + #[cfg(test)] mod tests { use super::*; + #[test] + fn test_normalize_path_windows_drive() { + if cfg!(windows) { + let path = Path::new(r"z:\workspace\test_one\..\postgrestools.jsonc"); + let normalized = normalize_path(path); + assert_eq!( + normalized, + PathBuf::from(r"z:\workspace\postgrestools.jsonc") + ); + } + } + + #[test] + fn test_normalize_path_relative() { + let path = Path::new("workspace/test_one/../postgrestools.jsonc"); + let normalized = normalize_path(path); + assert_eq!(normalized, PathBuf::from("workspace/postgrestools.jsonc")); + } + + #[test] + fn test_normalize_path_multiple_parent_dirs() { + if cfg!(windows) { + let path = Path::new(r"c:\a\b\c\..\..\d"); + let normalized = normalize_path(path); + assert_eq!(normalized, PathBuf::from(r"c:\a\d")); + } + } + #[test] fn test_strip_jsonc_comments_line_comments() { let input = r#"{ diff --git a/crates/pgt_workspace/src/diagnostics.rs b/crates/pgt_workspace/src/diagnostics.rs index 9ba02a1a..5020cc62 100644 --- a/crates/pgt_workspace/src/diagnostics.rs +++ b/crates/pgt_workspace/src/diagnostics.rs @@ -1,4 +1,5 @@ use pgt_configuration::ConfigurationDiagnostic; +use pgt_configuration::diagnostics::CantLoadExtendFile; use pgt_console::fmt::Bytes; use pgt_console::markup; use pgt_diagnostics::{ @@ -354,3 +355,9 @@ impl Diagnostic for FileTooLarge { ) } } + +impl From for WorkspaceError { + fn from(value: CantLoadExtendFile) -> Self { + WorkspaceError::Configuration(ConfigurationDiagnostic::CantLoadExtendFile(value)) + } +} diff --git a/crates/pgt_workspace/src/lib.rs b/crates/pgt_workspace/src/lib.rs index 99fe063f..df8b0ba7 100644 --- a/crates/pgt_workspace/src/lib.rs +++ b/crates/pgt_workspace/src/lib.rs @@ -14,6 +14,7 @@ pub mod workspace; #[cfg(feature = "schema")] pub mod workspace_types; +pub use crate::configuration::PartialConfigurationExt; pub use crate::diagnostics::{TransportError, WorkspaceError}; pub use crate::workspace::Workspace; diff --git a/crates/pgt_workspace/src/settings.rs b/crates/pgt_workspace/src/settings.rs index f9275aa9..08854493 100644 --- a/crates/pgt_workspace/src/settings.rs +++ b/crates/pgt_workspace/src/settings.rs @@ -8,6 +8,7 @@ use std::{ sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, time::Duration, }; +use tracing::trace; use ignore::gitignore::{Gitignore, GitignoreBuilder}; use pgt_configuration::{ @@ -17,9 +18,185 @@ use pgt_configuration::{ files::FilesConfiguration, migrations::{MigrationsConfiguration, PartialMigrationsConfiguration}, }; -use pgt_fs::FileSystem; +use pgt_fs::PgTPath; -use crate::{DynRef, WorkspaceError, matcher::Matcher}; +use crate::{ + WorkspaceError, + matcher::Matcher, + workspace::{ProjectKey, WorkspaceData}, +}; + +#[derive(Debug, Default)] +/// The information tracked for each project +pub struct ProjectData { + /// The root path of the project. This path should be **absolute**. + path: PgTPath, + /// The settings of the project, usually inferred from the configuration file e.g. `biome.json`. + settings: Settings, +} + +#[derive(Debug, Default)] +/// Type that manages different projects inside the workspace. +pub struct WorkspaceSettings { + /// The data of the projects + data: WorkspaceData, + /// The ID of the current project. + current_project: ProjectKey, +} + +impl WorkspaceSettings { + pub fn get_current_project_key(&self) -> ProjectKey { + self.current_project + } + + pub fn get_current_project_path(&self) -> Option<&PgTPath> { + trace!("Current key {:?}", self.current_project); + self.data + .get(self.current_project) + .as_ref() + .map(|d| &d.path) + } + + pub fn get_current_project_data_mut(&mut self) -> &mut ProjectData { + self.data + .get_mut(self.current_project) + .expect("Current project not configured") + } + + /// Retrieves the settings of the current workspace folder + pub fn get_current_settings(&self) -> Option<&Settings> { + trace!("Current key {:?}", self.current_project); + let data = self.data.get(self.current_project); + if let Some(data) = data { + Some(&data.settings) + } else { + None + } + } + + /// Retrieves a mutable reference of the settings of the current project + pub fn get_current_settings_mut(&mut self) -> &mut Settings { + &mut self + .data + .get_mut(self.current_project) + .expect("You must have at least one workspace.") + .settings + } + + /// Register the current project using its unique key + pub fn register_current_project(&mut self, key: ProjectKey) { + self.current_project = key; + } + + /// Insert a new project using its folder. Use [WorkspaceSettings::get_current_settings_mut] to retrieve + /// a mutable reference to its [Settings] and manipulate them. + pub fn insert_project(&mut self, workspace_path: impl Into) -> ProjectKey { + let path = PgTPath::new(workspace_path.into()); + trace!("Insert workspace folder: {:?}", path); + self.data.insert(ProjectData { + path, + settings: Settings::default(), + }) + } + + /// Remove a project using its folder. + pub fn remove_project(&mut self, workspace_path: &Path) { + let keys_to_remove = { + let mut data = vec![]; + let iter = self.data.iter(); + + for (key, path_to_settings) in iter { + if path_to_settings.path.as_path() == workspace_path { + data.push(key) + } + } + + data + }; + + for key in keys_to_remove { + self.data.remove(key) + } + } + + /// Checks if the current path belongs to a registered project. + /// + /// If there's a match, and the match **isn't** the current project, it returns the new key. + pub fn path_belongs_to_current_workspace(&self, path: &PgTPath) -> Option { + if self.data.is_empty() { + return None; + } + trace!("Current key: {:?}", self.current_project); + let iter = self.data.iter(); + for (key, path_to_settings) in iter { + trace!( + "Workspace path {:?}, file path {:?}", + path_to_settings.path, path + ); + trace!("Iter key: {:?}", key); + if key == self.current_project { + continue; + } + if path.strip_prefix(path_to_settings.path.as_path()).is_ok() { + trace!("Update workspace to {:?}", key); + return Some(key); + } + } + None + } + + /// Checks if the current path belongs to a registered project. + /// + /// If there's a match, and the match **isn't** the current project, the function will mark the match as the current project. + pub fn set_current_project(&mut self, new_key: ProjectKey) { + self.current_project = new_key; + } +} + +#[derive(Debug)] +pub struct WorkspaceSettingsHandle<'a> { + inner: RwLockReadGuard<'a, WorkspaceSettings>, +} + +impl<'a> WorkspaceSettingsHandle<'a> { + pub(crate) fn new(settings: &'a RwLock) -> Self { + Self { + inner: settings.read().unwrap(), + } + } + + pub(crate) fn settings(&self) -> Option<&Settings> { + self.inner.get_current_settings() + } + + pub(crate) fn path(&self) -> Option<&PgTPath> { + self.inner.get_current_project_path() + } +} + +impl AsRef for WorkspaceSettingsHandle<'_> { + fn as_ref(&self) -> &WorkspaceSettings { + &self.inner + } +} + +pub struct WorkspaceSettingsHandleMut<'a> { + inner: RwLockWriteGuard<'a, WorkspaceSettings>, +} + +impl<'a> WorkspaceSettingsHandleMut<'a> { + pub(crate) fn new(settings: &'a RwLock) -> Self { + Self { + inner: settings.write().unwrap(), + } + } +} + +impl AsMut for WorkspaceSettingsHandleMut<'_> { + fn as_mut(&mut self) -> &mut WorkspaceSettings { + &mut self.inner + } +} /// Global settings for the entire workspace #[derive(Debug, Default)] @@ -397,59 +574,6 @@ impl Default for FilesSettings { } } -pub trait PartialConfigurationExt { - fn retrieve_gitignore_matches( - &self, - file_system: &DynRef<'_, dyn FileSystem>, - vcs_base_path: Option<&Path>, - ) -> Result<(Option, Vec), WorkspaceError>; -} - -impl PartialConfigurationExt for PartialConfiguration { - /// This function checks if the VCS integration is enabled, and if so, it will attempts to resolve the - /// VCS root directory and the `.gitignore` file. - /// - /// ## Returns - /// - /// A tuple with VCS root folder and the contents of the `.gitignore` file - fn retrieve_gitignore_matches( - &self, - file_system: &DynRef<'_, dyn FileSystem>, - vcs_base_path: Option<&Path>, - ) -> Result<(Option, Vec), WorkspaceError> { - let Some(vcs) = &self.vcs else { - return Ok((None, vec![])); - }; - if vcs.is_enabled() { - let vcs_base_path = match (vcs_base_path, &vcs.root) { - (Some(vcs_base_path), Some(root)) => vcs_base_path.join(root), - (None, Some(root)) => PathBuf::from(root), - (Some(vcs_base_path), None) => PathBuf::from(vcs_base_path), - (None, None) => return Err(WorkspaceError::vcs_disabled()), - }; - if let Some(client_kind) = &vcs.client_kind { - if !vcs.ignore_file_disabled() { - let result = file_system - .auto_search(&vcs_base_path, &[client_kind.ignore_file()], false) - .map_err(WorkspaceError::from)?; - - if let Some(result) = result { - return Ok(( - result.file_path.parent().map(PathBuf::from), - result - .content - .lines() - .map(String::from) - .collect::>(), - )); - } - } - } - } - Ok((None, vec![])) - } -} - #[cfg(test)] mod tests { use biome_deserialize::StringSet; diff --git a/crates/pgt_workspace/src/workspace.rs b/crates/pgt_workspace/src/workspace.rs index 873dd83e..61d60a49 100644 --- a/crates/pgt_workspace/src/workspace.rs +++ b/crates/pgt_workspace/src/workspace.rs @@ -5,7 +5,10 @@ use pgt_analyse::RuleCategories; use pgt_configuration::{PartialConfiguration, RuleSelector}; use pgt_fs::PgTPath; use pgt_text_size::TextRange; +#[cfg(feature = "schema")] +use schemars::{JsonSchema, SchemaGenerator, schema::Schema}; use serde::{Deserialize, Serialize}; +use slotmap::{DenseSlotMap, new_key_type}; use crate::{ WorkspaceError, @@ -92,6 +95,21 @@ pub struct ServerInfo { pub version: Option, } +#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] +#[serde(rename_all = "camelCase")] +pub struct RegisterProjectFolderParams { + pub path: Option, + pub set_as_current_workspace: bool, +} + +#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[cfg_attr(feature = "schema", derive(schemars::JsonSchema))] +#[serde(rename_all = "camelCase")] +pub struct UnregisterProjectFolderParams { + pub path: PgTPath, +} + pub trait Workspace: Send + Sync + RefUnwindSafe { /// Retrieves the list of diagnostics associated to a file fn pull_diagnostics( @@ -110,6 +128,18 @@ pub trait Workspace: Send + Sync + RefUnwindSafe { params: GetCompletionsParams, ) -> Result; + /// Register a possible workspace project folder. Returns the key of said project. Use this key when you want to switch to different projects. + fn register_project_folder( + &self, + params: RegisterProjectFolderParams, + ) -> Result; + + /// Unregister a workspace project folder. The settings that belong to that project are deleted. + fn unregister_project_folder( + &self, + params: UnregisterProjectFolderParams, + ) -> Result<(), WorkspaceError>; + /// Update the global settings for this workspace fn update_settings(&self, params: UpdateSettingsParams) -> Result<(), WorkspaceError>; @@ -222,3 +252,76 @@ impl Drop for FileGuard<'_, W> { .ok(); } } + +new_key_type! { + pub struct ProjectKey; +} + +#[cfg(feature = "schema")] +impl JsonSchema for ProjectKey { + fn schema_name() -> String { + "ProjectKey".to_string() + } + + fn json_schema(generator: &mut SchemaGenerator) -> Schema { + ::json_schema(generator) + } +} + +#[derive(Debug, Default)] +pub struct WorkspaceData { + /// [DenseSlotMap] is the slowest type in insertion/removal, but the fastest in iteration + /// + /// Users wouldn't change workspace folders very often, + paths: DenseSlotMap, +} + +impl WorkspaceData { + /// Inserts an item + pub fn insert(&mut self, item: V) -> ProjectKey { + self.paths.insert(item) + } + + /// Removes an item + pub fn remove(&mut self, key: ProjectKey) { + self.paths.remove(key); + } + + /// Get a reference of the value + pub fn get(&self, key: ProjectKey) -> Option<&V> { + self.paths.get(key) + } + + /// Get a mutable reference of the value + pub fn get_mut(&mut self, key: ProjectKey) -> Option<&mut V> { + self.paths.get_mut(key) + } + + pub fn is_empty(&self) -> bool { + self.paths.is_empty() + } + + pub fn iter(&self) -> WorkspaceDataIterator<'_, V> { + WorkspaceDataIterator::new(self) + } +} + +pub struct WorkspaceDataIterator<'a, V> { + iterator: slotmap::dense::Iter<'a, ProjectKey, V>, +} + +impl<'a, V> WorkspaceDataIterator<'a, V> { + fn new(data: &'a WorkspaceData) -> Self { + Self { + iterator: data.paths.iter(), + } + } +} + +impl<'a, V> Iterator for WorkspaceDataIterator<'a, V> { + type Item = (ProjectKey, &'a V); + + fn next(&mut self) -> Option { + self.iterator.next() + } +} diff --git a/crates/pgt_workspace/src/workspace/client.rs b/crates/pgt_workspace/src/workspace/client.rs index d727fff6..2bd21513 100644 --- a/crates/pgt_workspace/src/workspace/client.rs +++ b/crates/pgt_workspace/src/workspace/client.rs @@ -7,7 +7,10 @@ use std::{ sync::atomic::{AtomicU64, Ordering}, }; -use super::{CloseFileParams, GetFileContentParams, IsPathIgnoredParams, OpenFileParams}; +use super::{ + CloseFileParams, GetFileContentParams, IsPathIgnoredParams, OpenFileParams, ProjectKey, + RegisterProjectFolderParams, UnregisterProjectFolderParams, +}; pub struct WorkspaceClient { transport: T, @@ -103,6 +106,20 @@ where self.request("pgt/execute_statement", params) } + fn register_project_folder( + &self, + params: RegisterProjectFolderParams, + ) -> Result { + self.request("pgt/register_project_folder", params) + } + + fn unregister_project_folder( + &self, + params: UnregisterProjectFolderParams, + ) -> Result<(), WorkspaceError> { + self.request("pgt/unregister_project_folder", params) + } + fn open_file(&self, params: OpenFileParams) -> Result<(), WorkspaceError> { self.request("pgt/open_file", params) } diff --git a/crates/pgt_workspace/src/workspace/server.rs b/crates/pgt_workspace/src/workspace/server.rs index 82e79e10..67a3713c 100644 --- a/crates/pgt_workspace/src/workspace/server.rs +++ b/crates/pgt_workspace/src/workspace/server.rs @@ -1,14 +1,14 @@ use std::{ fs, panic::RefUnwindSafe, - path::Path, + path::{Path, PathBuf}, sync::{Arc, RwLock}, }; use analyser::AnalyserVisitorBuilder; use async_helper::run_async; +use connection_manager::ConnectionManager; use dashmap::DashMap; -use db_connection::DbConnection; use document::Document; use futures::{StreamExt, stream}; use parsed_document::{ @@ -23,8 +23,8 @@ use pgt_diagnostics::{ use pgt_fs::{ConfigName, PgTPath}; use pgt_typecheck::{IdentifierType, TypecheckParams, TypedIdentifier}; use schema_cache_manager::SchemaCacheManager; -use sqlx::Executor; -use tracing::info; +use sqlx::{Executor, PgPool}; +use tracing::{debug, info}; use crate::{ WorkspaceError, @@ -37,11 +37,12 @@ use crate::{ completions::{CompletionsResult, GetCompletionsParams, get_statement_for_completions}, diagnostics::{PullDiagnosticsParams, PullDiagnosticsResult}, }, - settings::{Settings, SettingsHandle, SettingsHandleMut}, + settings::{WorkspaceSettings, WorkspaceSettingsHandle, WorkspaceSettingsHandleMut}, }; use super::{ - GetFileContentParams, IsPathIgnoredParams, OpenFileParams, ServerInfo, UpdateSettingsParams, + GetFileContentParams, IsPathIgnoredParams, OpenFileParams, ProjectKey, + RegisterProjectFolderParams, ServerInfo, UnregisterProjectFolderParams, UpdateSettingsParams, Workspace, }; @@ -51,7 +52,8 @@ mod analyser; mod annotation; mod async_helper; mod change; -mod db_connection; +mod connection_key; +mod connection_manager; pub(crate) mod document; mod migration; pub(crate) mod parsed_document; @@ -63,14 +65,14 @@ mod tree_sitter; pub(super) struct WorkspaceServer { /// global settings object for this workspace - settings: RwLock, + settings: RwLock, /// Stores the schema cache for this workspace schema_cache: SchemaCacheManager, parsed_documents: DashMap, - connection: RwLock, + connection: ConnectionManager, } /// The `Workspace` object is long-lived, so we want it to be able to cross @@ -91,23 +93,60 @@ impl WorkspaceServer { Self { settings: RwLock::default(), parsed_documents: DashMap::default(), - schema_cache: SchemaCacheManager::default(), - connection: RwLock::default(), + schema_cache: SchemaCacheManager::new(), + connection: ConnectionManager::new(), } } /// Provides a reference to the current settings - fn settings(&self) -> SettingsHandle { - SettingsHandle::new(&self.settings) + fn workspaces(&self) -> WorkspaceSettingsHandle { + WorkspaceSettingsHandle::new(&self.settings) } - fn settings_mut(&self) -> SettingsHandleMut { - SettingsHandleMut::new(&self.settings) + fn workspaces_mut(&self) -> WorkspaceSettingsHandleMut { + WorkspaceSettingsHandleMut::new(&self.settings) + } + + fn get_current_connection(&self) -> Option { + let settings = self.workspaces(); + let settings = settings.settings()?; + self.connection.get_pool(&settings.db) + } + + /// Register a new project in the current workspace + fn register_project(&self, path: PathBuf) -> ProjectKey { + let mut workspace = self.workspaces_mut(); + let workspace_mut = workspace.as_mut(); + workspace_mut.insert_project(path.clone()) + } + + /// Retrieves the current project path + fn get_current_project_path(&self) -> Option { + self.workspaces().path().cloned() + } + + /// Sets the current project of the current workspace + fn set_current_project(&self, project_key: ProjectKey) { + let mut workspace = self.workspaces_mut(); + let workspace_mut = workspace.as_mut(); + workspace_mut.set_current_project(project_key); + } + + /// Checks whether the current path belongs to the current project. + /// + /// If there's a match, and the match **isn't** the current project, it returns the new key. + fn path_belongs_to_current_workspace(&self, path: &PgTPath) -> Option { + let workspaces = self.workspaces(); + workspaces.as_ref().path_belongs_to_current_workspace(path) } fn is_ignored_by_migration_config(&self, path: &Path) -> bool { - let set = self.settings(); - set.as_ref() + let settings = self.workspaces(); + let settings = settings.settings(); + let Some(settings) = settings else { + return false; + }; + settings .migrations .as_ref() .and_then(|migration_settings| { @@ -131,8 +170,12 @@ impl WorkspaceServer { /// Check whether a file is ignored in the top-level config `files.ignore`/`files.include` fn is_ignored_by_top_level_config(&self, path: &Path) -> bool { - let set = self.settings(); - let settings = set.as_ref(); + let settings = self.workspaces(); + let settings = settings.settings(); + let Some(settings) = settings else { + return false; + }; + let is_included = settings.files.included_files.is_empty() || is_dir(path) || settings.files.included_files.matches_path(path); @@ -155,6 +198,48 @@ impl WorkspaceServer { } impl Workspace for WorkspaceServer { + fn register_project_folder( + &self, + params: RegisterProjectFolderParams, + ) -> Result { + let current_project_path = self.get_current_project_path(); + debug!( + "Compare the current project with the new one {:?} {:?} {:?}", + current_project_path, + params.path.as_ref(), + current_project_path.as_deref() != params.path.as_ref() + ); + + let is_new_path = match (current_project_path.as_deref(), params.path.as_ref()) { + (Some(current_project_path), Some(params_path)) => current_project_path != params_path, + (Some(_), None) => { + // If the current project is set, but no path is provided, we assume it's a new project + true + } + _ => true, + }; + + if is_new_path { + let path = params.path.unwrap_or_default(); + let key = self.register_project(path.clone()); + if params.set_as_current_workspace { + self.set_current_project(key); + } + Ok(key) + } else { + Ok(self.workspaces().as_ref().get_current_project_key()) + } + } + + fn unregister_project_folder( + &self, + params: UnregisterProjectFolderParams, + ) -> Result<(), WorkspaceError> { + let mut workspace = self.workspaces_mut(); + workspace.as_mut().remove_project(params.path.as_path()); + Ok(()) + } + /// Update the global settings for this workspace /// /// ## Panics @@ -162,24 +247,17 @@ impl Workspace for WorkspaceServer { /// by another thread having previously panicked while holding the lock #[tracing::instrument(level = "trace", skip(self), err)] fn update_settings(&self, params: UpdateSettingsParams) -> Result<(), WorkspaceError> { - tracing::info!("Updating settings in workspace"); - - self.settings_mut().as_mut().merge_with_configuration( - params.configuration, - params.workspace_directory, - params.vcs_base_path, - params.gitignore_matches.as_slice(), - )?; - - tracing::info!("Updated settings in workspace"); - tracing::debug!("Updated settings are {:#?}", self.settings()); - - self.connection - .write() - .unwrap() - .set_conn_settings(&self.settings().as_ref().db); - - tracing::info!("Updated Db connection settings"); + let mut workspace = self.workspaces_mut(); + + workspace + .as_mut() + .get_current_settings_mut() + .merge_with_configuration( + params.configuration, + params.workspace_directory, + params.vcs_base_path, + params.gitignore_matches.as_slice(), + )?; Ok(()) } @@ -193,6 +271,10 @@ impl Workspace for WorkspaceServer { ParsedDocument::new(params.path.clone(), params.content, params.version) }); + if let Some(project_key) = self.path_belongs_to_current_workspace(¶ms.path) { + self.set_current_project(project_key); + } + Ok(()) } @@ -250,15 +332,13 @@ impl Workspace for WorkspaceServer { .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; - let settings = self - .settings - .read() - .expect("Unable to read settings for Code Actions"); + let settings = self.workspaces(); + let settings = settings.settings(); - let disabled_reason: Option = if settings.db.allow_statement_executions { - None - } else { - Some("Statement execution not allowed against database.".into()) + let disabled_reason = match settings { + Some(settings) if settings.db.allow_statement_executions => None, + Some(_) => Some("Statement execution is disabled in the settings.".into()), + None => Some("Statement execution not allowed against database.".into()), }; let actions = parser @@ -310,15 +390,13 @@ impl Workspace for WorkspaceServer { }); }; - let conn = self.connection.read().unwrap(); - let pool = match conn.get_pool() { - Some(p) => p, - None => { - return Ok(ExecuteStatementResult { - message: "Not connected to database.".into(), - }); - } - }; + let pool = self.get_current_connection(); + if pool.is_none() { + return Ok(ExecuteStatementResult { + message: "No database connection available.".into(), + }); + } + let pool = pool.unwrap(); let result = run_async(async move { pool.execute(sqlx::query(&content)).await })??; @@ -334,16 +412,29 @@ impl Workspace for WorkspaceServer { &self, params: PullDiagnosticsParams, ) -> Result { - let settings = self.settings(); + let settings = self.workspaces(); + + let settings = match settings.settings() { + Some(settings) => settings, + None => { + // return an empty result if no settings are available + // we might want to return an error here in the future + return Ok(PullDiagnosticsResult { + diagnostics: Vec::new(), + errors: 0, + skipped_diagnostics: 0, + }); + } + }; // create analyser for this run // first, collect enabled and disabled rules from the workspace settings - let (enabled_rules, disabled_rules) = AnalyserVisitorBuilder::new(settings.as_ref()) + let (enabled_rules, disabled_rules) = AnalyserVisitorBuilder::new(settings) .with_linter_rules(¶ms.only, ¶ms.skip) .finish(); // then, build a map that contains all options let options = AnalyserOptions { - rules: to_analyser_rules(settings.as_ref()), + rules: to_analyser_rules(settings), }; // next, build the analysis filter which will be used to match rules let filter = AnalysisFilter { @@ -364,15 +455,9 @@ impl Workspace for WorkspaceServer { let mut diagnostics: Vec = parser.document_diagnostics().to_vec(); - if let Some(pool) = self - .connection - .read() - .expect("DbConnection RwLock panicked") - .get_pool() - { + if let Some(pool) = self.get_current_connection() { let path_clone = params.path.clone(); let schema_cache = self.schema_cache.load(pool.clone())?; - let schema_cache_arc = schema_cache.get_arc(); let input = parser.iter(AsyncDiagnosticsMapper).collect::>(); // sorry for the ugly code :( let async_results = run_async(async move { @@ -380,7 +465,7 @@ impl Workspace for WorkspaceServer { .map(|(_id, range, content, ast, cst, sign)| { let pool = pool.clone(); let path = path_clone.clone(); - let schema_cache = Arc::clone(&schema_cache_arc); + let schema_cache = Arc::clone(&schema_cache); async move { if let Some(ast) = ast { pgt_typecheck::check_sql(TypecheckParams { @@ -461,7 +546,6 @@ impl Workspace for WorkspaceServer { || d.severity(), |category| { settings - .as_ref() .get_severity_from_rule_code(category) .unwrap_or(Severity::Warning) }, @@ -503,13 +587,12 @@ impl Workspace for WorkspaceServer { .get(¶ms.path) .ok_or(WorkspaceError::not_found())?; - let pool = match self.connection.read().unwrap().get_pool() { - Some(pool) => pool, - None => { - tracing::debug!("No connection to database. Skipping completions."); - return Ok(CompletionsResult::default()); - } - }; + let pool = self.get_current_connection(); + if pool.is_none() { + tracing::debug!("No database connection available. Skipping completions."); + return Ok(CompletionsResult::default()); + } + let pool = pool.unwrap(); let schema_cache = self.schema_cache.load(pool)?; diff --git a/crates/pgt_workspace/src/workspace/server/connection_key.rs b/crates/pgt_workspace/src/workspace/server/connection_key.rs new file mode 100644 index 00000000..abdd8025 --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/connection_key.rs @@ -0,0 +1,44 @@ +use sqlx::PgPool; + +use crate::settings::DatabaseSettings; + +/// A unique identifier for database connection settings +#[derive(Clone, PartialEq, Eq, Hash)] +pub(crate) struct ConnectionKey { + pub host: String, + pub port: u16, + pub username: String, + pub database: String, +} + +impl From<&DatabaseSettings> for ConnectionKey { + fn from(settings: &DatabaseSettings) -> Self { + Self { + host: settings.host.clone(), + port: settings.port, + username: settings.username.clone(), + database: settings.database.clone(), + } + } +} + +impl From<&PgPool> for ConnectionKey { + fn from(pool: &PgPool) -> Self { + let conn = pool.connect_options(); + + match conn.get_database() { + None => Self { + host: conn.get_host().to_string(), + port: conn.get_port(), + username: conn.get_username().to_string(), + database: String::new(), + }, + Some(db) => Self { + host: conn.get_host().to_string(), + port: conn.get_port(), + username: conn.get_username().to_string(), + database: db.to_string(), + }, + } + } +} diff --git a/crates/pgt_workspace/src/workspace/server/connection_manager.rs b/crates/pgt_workspace/src/workspace/server/connection_manager.rs new file mode 100644 index 00000000..d21988f0 --- /dev/null +++ b/crates/pgt_workspace/src/workspace/server/connection_manager.rs @@ -0,0 +1,95 @@ +use std::time::{Duration, Instant}; + +use dashmap::DashMap; +use sqlx::{PgPool, Postgres, pool::PoolOptions, postgres::PgConnectOptions}; + +use crate::settings::DatabaseSettings; + +use super::connection_key::ConnectionKey; + +/// Cached connection pool with last access time +struct CachedPool { + pool: PgPool, + last_accessed: Instant, + idle_timeout: Duration, +} + +#[derive(Default)] +pub struct ConnectionManager { + pools: DashMap, +} + +impl ConnectionManager { + pub fn new() -> Self { + Self { + pools: DashMap::new(), + } + } + + /// Get a connection pool for the given database settings. + /// If a pool already exists for these settings, it will be returned. + /// If not, a new pool will be created if connections are enabled. + /// Will also clean up idle connections that haven't been accessed for a while. + pub(crate) fn get_pool(&self, settings: &DatabaseSettings) -> Option { + let key = ConnectionKey::from(settings); + + // Cleanup idle connections first + self.cleanup_idle_pools(&key); + + if !settings.enable_connection { + tracing::info!("Database connection disabled."); + return None; + } + + // If we have a cached pool, update its last_accessed time and return it + if let Some(mut cached_pool) = self.pools.get_mut(&key) { + cached_pool.last_accessed = Instant::now(); + return Some(cached_pool.pool.clone()); + } + + // Create a new pool + let config = PgConnectOptions::new() + .host(&settings.host) + .port(settings.port) + .username(&settings.username) + .password(&settings.password) + .database(&settings.database); + + let timeout = settings.conn_timeout_secs; + + let pool = PoolOptions::::new() + .acquire_timeout(timeout) + .acquire_slow_threshold(Duration::from_secs(2)) + .connect_lazy_with(config); + + let cached_pool = CachedPool { + pool: pool.clone(), + last_accessed: Instant::now(), + // TODO: add this to the db settings, for now default to five minutes + idle_timeout: Duration::from_secs(60 * 5), + }; + + self.pools.insert(key, cached_pool); + + Some(pool) + } + + /// Remove pools that haven't been accessed for longer than the idle timeout + fn cleanup_idle_pools(&self, ignore_key: &ConnectionKey) { + let now = Instant::now(); + + // Use retain to keep only non-idle connections + self.pools.retain(|key, cached_pool| { + let idle_duration = now.duration_since(cached_pool.last_accessed); + if idle_duration > cached_pool.idle_timeout && key != ignore_key { + tracing::debug!( + "Removing idle database connection (idle for {:?})", + idle_duration + ); + false + } else { + true + } + }); + } +} diff --git a/crates/pgt_workspace/src/workspace/server/db_connection.rs b/crates/pgt_workspace/src/workspace/server/db_connection.rs deleted file mode 100644 index d002c0a2..00000000 --- a/crates/pgt_workspace/src/workspace/server/db_connection.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::time::Duration; - -use sqlx::{PgPool, Postgres, pool::PoolOptions, postgres::PgConnectOptions}; - -use crate::settings::DatabaseSettings; - -#[derive(Default)] -pub struct DbConnection { - pool: Option, -} - -impl DbConnection { - /// There might be no pool available if the user decides to skip db checks. - pub(crate) fn get_pool(&self) -> Option { - self.pool.clone() - } - - pub(crate) fn set_conn_settings(&mut self, settings: &DatabaseSettings) { - if !settings.enable_connection { - tracing::info!("Database connection disabled."); - return; - } - - let config = PgConnectOptions::new() - .host(&settings.host) - .port(settings.port) - .username(&settings.username) - .password(&settings.password) - .database(&settings.database); - - let timeout = settings.conn_timeout_secs; - - let pool = PoolOptions::::new() - .acquire_timeout(timeout) - .acquire_slow_threshold(Duration::from_secs(2)) - .connect_lazy_with(config); - - self.pool = Some(pool); - } -} diff --git a/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs b/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs index 03cd6ded..b42dfc34 100644 --- a/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs +++ b/crates/pgt_workspace/src/workspace/server/schema_cache_manager.rs @@ -1,97 +1,47 @@ -use std::sync::{Arc, RwLock, RwLockReadGuard}; +use std::sync::Arc; +use dashmap::DashMap; use pgt_schema_cache::SchemaCache; use sqlx::PgPool; use crate::WorkspaceError; -use super::async_helper::run_async; - -pub(crate) struct SchemaCacheHandle<'a> { - inner: RwLockReadGuard<'a, SchemaCacheManagerInner>, -} - -impl<'a> SchemaCacheHandle<'a> { - pub(crate) fn new(cache: &'a RwLock) -> Self { - Self { - inner: cache.read().unwrap(), - } - } - - pub(crate) fn wrap(inner: RwLockReadGuard<'a, SchemaCacheManagerInner>) -> Self { - Self { inner } - } - - pub fn get_arc(&self) -> Arc { - Arc::clone(&self.inner.cache) - } -} - -impl AsRef for SchemaCacheHandle<'_> { - fn as_ref(&self) -> &SchemaCache { - &self.inner.cache - } -} - -#[derive(Default)] -pub(crate) struct SchemaCacheManagerInner { - cache: Arc, - conn_str: String, -} +use super::{async_helper::run_async, connection_key::ConnectionKey}; #[derive(Default)] pub struct SchemaCacheManager { - inner: RwLock, + schemas: DashMap>, } impl SchemaCacheManager { - pub fn load(&self, pool: PgPool) -> Result { - let new_conn_str = pool_to_conn_str(&pool); - - { - // return early if the connection string is the same - let inner = self.inner.read().unwrap(); - if new_conn_str == inner.conn_str { - tracing::info!("Same connection string, no updates."); - return Ok(SchemaCacheHandle::wrap(inner)); - } + pub fn new() -> Self { + Self { + schemas: DashMap::new(), } + } - let maybe_refreshed = run_async(async move { SchemaCache::load(&pool).await })?; - let refreshed = maybe_refreshed?; - - { - // write lock must be dropped before we return the reference below, hence the block - let mut inner = self.inner.write().unwrap(); + pub fn load(&self, pool: PgPool) -> Result, WorkspaceError> { + let key: ConnectionKey = (&pool).into(); - // Double-check that we still need to refresh (another thread might have done it) - if new_conn_str != inner.conn_str { - inner.cache = Arc::new(refreshed); - inner.conn_str = new_conn_str; - tracing::info!("Refreshed connection."); - } + if let Some(cache) = self.schemas.get(&key) { + return Ok(Arc::clone(&*cache)); } - Ok(SchemaCacheHandle::new(&self.inner)) - } -} - -fn pool_to_conn_str(pool: &PgPool) -> String { - let conn = pool.connect_options(); - - match conn.get_database() { - None => format!( - "postgres://{}:@{}:{}", - conn.get_username(), - conn.get_host(), - conn.get_port() - ), - Some(db) => format!( - "postgres://{}:@{}:{}/{}", - conn.get_username(), - conn.get_host(), - conn.get_port(), - db - ), + let schema_cache = self + .schemas + .entry(key) + .or_try_insert_with::(|| { + // This closure will only be called once per key if multiple threads + // try to access the same key simultaneously + let pool_clone = pool.clone(); + let schema_cache = + Arc::new(run_async( + async move { SchemaCache::load(&pool_clone).await }, + )??); + + Ok(schema_cache) + })?; + + Ok(Arc::clone(&schema_cache)) } } diff --git a/crates/pgt_workspace/src/workspace_types.rs b/crates/pgt_workspace/src/workspace_types.rs index 02215e79..b902fad6 100644 --- a/crates/pgt_workspace/src/workspace_types.rs +++ b/crates/pgt_workspace/src/workspace_types.rs @@ -457,9 +457,10 @@ macro_rules! workspace_method { } /// Returns a list of signature for all the methods in the [Workspace] trait -pub fn methods() -> [WorkspaceMethod; 8] { +pub fn methods() -> [WorkspaceMethod; 9] { [ workspace_method!(is_path_ignored), + workspace_method!(register_project_folder), workspace_method!(get_file_content), workspace_method!(pull_diagnostics), workspace_method!(get_completions), diff --git a/docs/schemas/0.0.0/schema.json b/docs/schemas/0.0.0/schema.json index faba3b5c..8c478d0a 100644 --- a/docs/schemas/0.0.0/schema.json +++ b/docs/schemas/0.0.0/schema.json @@ -22,6 +22,17 @@ } ] }, + "extends": { + "description": "A list of paths to other JSON files, used to extends the current configuration.", + "anyOf": [ + { + "$ref": "#/definitions/StringSet" + }, + { + "type": "null" + } + ] + }, "files": { "description": "The configuration of the filesystem", "anyOf": [ diff --git a/docs/schemas/latest/schema.json b/docs/schemas/latest/schema.json index faba3b5c..8c478d0a 100644 --- a/docs/schemas/latest/schema.json +++ b/docs/schemas/latest/schema.json @@ -22,6 +22,17 @@ } ] }, + "extends": { + "description": "A list of paths to other JSON files, used to extends the current configuration.", + "anyOf": [ + { + "$ref": "#/definitions/StringSet" + }, + { + "type": "null" + } + ] + }, "files": { "description": "The configuration of the filesystem", "anyOf": [ diff --git a/justfile b/justfile index a55207ae..c868a122 100644 --- a/justfile +++ b/justfile @@ -6,6 +6,7 @@ alias r := ready alias l := lint alias t := test alias rg := reset-git +alias qm := quick-modify # Installs the tools needed to develop install-tools: @@ -132,10 +133,22 @@ merge-main: git fetch origin main:main git merge main +quick-create branch commit: + git checkout -b {{branch}} + git add -A + git commit -m "{{commit}}" + git push + gh pr create --fill + +quick-modify: + just format + git add -A + git commit -m "progress" + git push # Make sure to set your PGT_LOG_PATH in your shell profile. # You can use the PGT_LOG_LEVEL to set your log level. # We recommend to install `bunyan` (npm i -g bunyan) and pipe the output through there for color-coding: # just show-logs | bunyan show-logs: - tail -f $(ls $PGT_LOG_PATH/server.log.* | sort -t- -k2,2 -k3,3 -k4,4 | tail -n 1) \ No newline at end of file + tail -f $(ls $PGT_LOG_PATH/server.log.* | sort -t- -k2,2 -k3,3 -k4,4 | tail -n 1) diff --git a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts index a35dad81..aaa5a42a 100644 --- a/packages/@postgrestools/backend-jsonrpc/src/workspace.ts +++ b/packages/@postgrestools/backend-jsonrpc/src/workspace.ts @@ -19,6 +19,11 @@ export type FileKind = FileKind2[]; * The priority of the file */ export type FileKind2 = "Config" | "Ignore" | "Inspectable" | "Handleable"; +export interface RegisterProjectFolderParams { + path?: string; + setAsCurrentWorkspace: boolean; +} +export type ProjectKey = string; export interface GetFileContentParams { path: PgTPath; } @@ -92,7 +97,7 @@ export type DiagnosticTags = DiagnosticTag[]; /** * Serializable representation of a [Diagnostic](super::Diagnostic) advice -See the [Visitor] trait for additional documentation on all the supported advice types. +See the [Visitor] trait for additional documentation on all the supported advice types. */ export type Advice = | { log: [LogCategory, MarkupBuf] } @@ -185,6 +190,7 @@ export interface CompletionsResult { export interface CompletionItem { completion_text?: CompletionText; description: string; + detail?: string; kind: CompletionItemKind; label: string; preselected: boolean; @@ -196,16 +202,22 @@ export interface CompletionItem { /** * The text that the editor should fill in. If `None`, the `label` should be used. Tables, for example, might have different completion_texts: -label: "users", description: "Schema: auth", completion_text: "auth.users". +label: "users", description: "Schema: auth", completion_text: "auth.users". */ export interface CompletionText { + is_snippet: boolean; /** * A `range` is required because some editors replace the current token, others naively insert the text. Having a range where start == end makes it an insertion. */ range: TextRange; text: string; } -export type CompletionItemKind = "table" | "function" | "column" | "schema"; +export type CompletionItemKind = + | "table" + | "function" + | "column" + | "schema" + | "policy"; export interface UpdateSettingsParams { configuration: PartialConfiguration; gitignore_matches: string[]; @@ -224,6 +236,10 @@ export interface PartialConfiguration { * The configuration of the database connection */ db?: PartialDatabaseConfiguration; + /** + * A list of paths to other JSON files, used to extends the current configuration. + */ + extends?: StringSet; /** * The configuration of the filesystem */ @@ -271,6 +287,7 @@ export interface PartialDatabaseConfiguration { */ username?: string; } +export type StringSet = string[]; /** * The configuration of the filesystem */ @@ -338,7 +355,7 @@ export interface PartialVcsConfiguration { /** * The folder where we should check for VCS files. By default, we will use the same folder where `postgrestools.jsonc` was found. -If we can't find the configuration, it will attempt to use the current working directory. If no current working directory can't be found, we won't use the VCS integration, and a diagnostic will be emitted +If we can't find the configuration, it will attempt to use the current working directory. If no current working directory can't be found, we won't use the VCS integration, and a diagnostic will be emitted */ root?: string; /** @@ -346,7 +363,6 @@ If we can't find the configuration, it will attempt to use the current working d */ useIgnoreFile?: boolean; } -export type StringSet = string[]; export interface Rules { /** * It enables ALL rules. The rules that belong to `nursery` won't be enabled. @@ -425,6 +441,9 @@ export interface CloseFileParams { export type Configuration = PartialConfiguration; export interface Workspace { isPathIgnored(params: IsPathIgnoredParams): Promise; + registerProjectFolder( + params: RegisterProjectFolderParams, + ): Promise; getFileContent(params: GetFileContentParams): Promise; pullDiagnostics( params: PullDiagnosticsParams, @@ -441,6 +460,9 @@ export function createWorkspace(transport: Transport): Workspace { isPathIgnored(params) { return transport.request("pgt/is_path_ignored", params); }, + registerProjectFolder(params) { + return transport.request("pgt/register_project_folder", params); + }, getFileContent(params) { return transport.request("pgt/get_file_content", params); }, diff --git a/packages/@postgrestools/backend-jsonrpc/tests/workspace.test.mjs b/packages/@postgrestools/backend-jsonrpc/tests/workspace.test.mjs index c83d5e44..c35904c4 100644 --- a/packages/@postgrestools/backend-jsonrpc/tests/workspace.test.mjs +++ b/packages/@postgrestools/backend-jsonrpc/tests/workspace.test.mjs @@ -2,7 +2,7 @@ import { resolve } from "node:path"; import { fileURLToPath } from "node:url"; import { describe, expect, it } from "vitest"; -import { createWorkspaceWithBinary } from "../dist"; +import { createWorkspaceWithBinary } from "../src"; describe("Workspace API", () => { it("should process remote requests", async () => { @@ -14,6 +14,9 @@ describe("Workspace API", () => { ); const workspace = await createWorkspaceWithBinary(command); + workspace.registerProjectFolder({ + setAsCurrentWorkspace: true, + }); await workspace.openFile({ path: { path: "test.sql",