From 600fb6f533c4d1ec9aa4589791e58df4e0694256 Mon Sep 17 00:00:00 2001 From: Silas Marvin <19626586+SilasMarvin@users.noreply.github.com> Date: Sun, 11 Aug 2024 13:52:15 -0700 Subject: [PATCH] Add custom log file and improve logging --- .gitignore | 1 + Cargo.lock | 21 +++--- crates/lsp-ai/Cargo.toml | 3 +- crates/lsp-ai/src/main.rs | 65 +++++++++++++++---- .../lsp-ai/src/memory_backends/file_store.rs | 4 +- crates/lsp-ai/src/splitters/tree_sitter.rs | 8 +-- .../src/transformer_backends/anthropic.rs | 23 ++++--- .../lsp-ai/src/transformer_backends/gemini.rs | 17 +++-- .../transformer_backends/llama_cpp/model.rs | 9 ++- .../src/transformer_backends/mistral_fim.rs | 29 +++++---- .../lsp-ai/src/transformer_backends/ollama.rs | 46 ++++++++----- .../src/transformer_backends/open_ai/mod.rs | 54 ++++++++------- 12 files changed, 182 insertions(+), 98 deletions(-) diff --git a/.gitignore b/.gitignore index ec779b4..9c42cda 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ out dist lsp-ai.log .vsix +lsp-ai-chat.md diff --git a/Cargo.lock b/Cargo.lock index 1c4821e..bafd204 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" @@ -342,9 +342,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.2" +version = "4.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" +checksum = "c937d4061031a6d0c8da4b9a4f98a172fc2976dfb1c19213a9cf7d0d3c837e36" dependencies = [ "clap_builder", "clap_derive", @@ -352,9 +352,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.2" +version = "4.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +checksum = "85379ba512b21a328adf887e85f7742d12e96eb31f3ef077df4ffc26b506ffed" dependencies = [ "anstream", "anstyle", @@ -364,11 +364,11 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.52", @@ -1566,12 +1566,13 @@ dependencies = [ [[package]] name = "lsp-ai" -version = "0.4.0" +version = "0.4.1" dependencies = [ "anyhow", "assert_cmd", "async-trait", "cc", + "clap", "directories", "futures", "fxhash", diff --git a/crates/lsp-ai/Cargo.toml b/crates/lsp-ai/Cargo.toml index 1ea7146..3dc7377 100644 --- a/crates/lsp-ai/Cargo.toml +++ b/crates/lsp-ai/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lsp-ai" -version = "0.4.1" +version = "0.5.0" description.workspace = true repository.workspace = true @@ -42,6 +42,7 @@ md5 = "0.7.0" fxhash = "0.2.1" ordered-float = "4.2.1" futures = "0.3" +clap = { version = "4.5.14", features = ["derive"] } [build-dependencies] cc="1" diff --git a/crates/lsp-ai/src/main.rs b/crates/lsp-ai/src/main.rs index 564fb17..e7419eb 100644 --- a/crates/lsp-ai/src/main.rs +++ b/crates/lsp-ai/src/main.rs @@ -1,17 +1,21 @@ use anyhow::Result; - +use clap::Parser; +use directories::BaseDirs; use lsp_server::{Connection, ExtractError, Message, Notification, Request, RequestId}; use lsp_types::{ request::{CodeActionRequest, CodeActionResolveRequest, Completion}, CodeActionOptions, CompletionOptions, DidChangeTextDocumentParams, DidOpenTextDocumentParams, RenameFilesParams, ServerCapabilities, TextDocumentSyncKind, }; +use std::sync::Mutex; use std::{ collections::HashMap, + fs, + path::Path, sync::{mpsc, Arc}, thread, }; -use tracing::error; +use tracing::{error, info}; use tracing_subscriber::{EnvFilter, FmtSubscriber}; mod config; @@ -54,19 +58,58 @@ where req.extract(R::METHOD) } +// LSP-AI parameters +#[derive(Parser)] +#[command(version)] +struct Args { + // Whether to use a custom log file + #[arg(long, default_value_t = false)] + use_seperate_log_file: bool, +} + +fn create_log_file(base_path: &Path) -> anyhow::Result { + let dir_path = base_path.join("lsp-ai"); + fs::create_dir_all(&dir_path)?; + let file_path = dir_path.join("lsp-ai.log"); + Ok(fs::File::create(file_path)?) +} + // Builds a tracing subscriber from the `LSP_AI_LOG` environment variable // If the variables value is malformed or missing, sets the default log level to ERROR -fn init_logger() { - FmtSubscriber::builder() - .with_writer(std::io::stderr) - .with_ansi(false) - .without_time() - .with_env_filter(EnvFilter::from_env("LSP_AI_LOG")) - .init(); +fn init_logger(args: &Args) { + let builder = FmtSubscriber::builder().with_env_filter(EnvFilter::from_env("LSP_AI_LOG")); + let base_dirs = BaseDirs::new(); + + if args.use_seperate_log_file && base_dirs.is_some() { + let base_dirs = base_dirs.unwrap(); + let cache_dir = base_dirs.cache_dir(); + // Linux: /home/alice/.cache + // Windows: C:\Users\Alice\AppData\Local + // macOS: /Users/Alice/Library/Caches + match create_log_file(&cache_dir) { + Ok(log_file) => builder.with_writer(Mutex::new(log_file)).init(), + Err(e) => { + eprintln!("creating log file: {e:?} - falling back to stderr"); + builder + .with_writer(std::io::stderr) + .without_time() + .with_ansi(false) + .init() + } + } + } else { + builder + .with_writer(std::io::stderr) + .without_time() + .with_ansi(false) + .init() + } } fn main() -> Result<()> { - init_logger(); + let args = Args::parse(); + init_logger(&args); + info!("lsp-ai logger initialized starting server"); let (connection, io_threads) = Connection::stdio(); let server_capabilities = serde_json::to_value(ServerCapabilities { @@ -181,7 +224,7 @@ fn main_loop(connection: Connection, args: serde_json::Value) -> Result<()> { Err(err) => error!("{err:?}"), } } else { - error!("lsp-ai currently only supports textDocument/completion, textDocument/generation and textDocument/generationStream") + error!("Unsupported command - see the wiki for a list of supported commands") } } Message::Notification(not) => { diff --git a/crates/lsp-ai/src/memory_backends/file_store.rs b/crates/lsp-ai/src/memory_backends/file_store.rs index c631012..65aaad4 100644 --- a/crates/lsp-ai/src/memory_backends/file_store.rs +++ b/crates/lsp-ai/src/memory_backends/file_store.rs @@ -101,9 +101,7 @@ impl FileStore { match parse_tree(uri, &contents, None) { Ok(tree) => Some(tree), Err(e) => { - error!( - "Failed to parse tree for {uri} with error {e}, falling back to no tree" - ); + warn!("Failed to parse tree for {uri} with error {e}, falling back to no tree"); None } } diff --git a/crates/lsp-ai/src/splitters/tree_sitter.rs b/crates/lsp-ai/src/splitters/tree_sitter.rs index 7cda4d8..10dbaf2 100644 --- a/crates/lsp-ai/src/splitters/tree_sitter.rs +++ b/crates/lsp-ai/src/splitters/tree_sitter.rs @@ -1,5 +1,5 @@ use splitter_tree_sitter::TreeSitterCodeSplitter; -use tracing::error; +use tracing::warn; use tree_sitter::Tree; use crate::{config, memory_backends::file_store::File, utils::parse_tree}; @@ -43,7 +43,7 @@ impl Splitter for TreeSitter { match self.split_tree(tree, file.rope().to_string().as_bytes()) { Ok(chunks) => chunks, Err(e) => { - error!( + warn!( "Failed to parse tree for file with error: {e:?}. Falling back to default splitter.", ); self.text_splitter.split(file) @@ -59,14 +59,14 @@ impl Splitter for TreeSitter { Ok(tree) => match self.split_tree(&tree, contents.as_bytes()) { Ok(chunks) => chunks, Err(e) => { - error!( + warn!( "Failed to parse tree for file: {uri} with error: {e:?}. Falling back to default splitter.", ); self.text_splitter.split_file_contents(uri, contents) } }, Err(e) => { - error!( + warn!( "Failed to parse tree for file {uri} with error: {e:?}. Falling back to default splitter.", ); self.text_splitter.split_file_contents(uri, contents) diff --git a/crates/lsp-ai/src/transformer_backends/anthropic.rs b/crates/lsp-ai/src/transformer_backends/anthropic.rs index 603411c..e7ea0dd 100644 --- a/crates/lsp-ai/src/transformer_backends/anthropic.rs +++ b/crates/lsp-ai/src/transformer_backends/anthropic.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use anyhow::Context; use serde::Deserialize; use serde_json::{json, Value}; -use tracing::instrument; +use tracing::{info, instrument}; use crate::{ config::{self, ChatMessage}, @@ -80,6 +80,18 @@ impl Anthropic { "Please set `auth_token_env_var_name` or `auth_token` to use an Anthropic" ); }; + let params = json!({ + "model": self.config.model, + "system": system_prompt, + "max_tokens": params.max_tokens, + "top_p": params.top_p, + "temperature": params.temperature, + "messages": messages + }); + info!( + "Calling Anthropic compatible API with parameters:\n{}", + serde_json::to_string_pretty(¶ms).unwrap() + ); let res: AnthropicChatResponse = client .post( self.config @@ -91,14 +103,7 @@ impl Anthropic { .header("anthropic-version", "2023-06-01") .header("Content-Type", "application/json") .header("Accept", "application/json") - .json(&json!({ - "model": self.config.model, - "system": system_prompt, - "max_tokens": params.max_tokens, - "top_p": params.top_p, - "temperature": params.temperature, - "messages": messages - })) + .json(¶ms) .send() .await? .json() diff --git a/crates/lsp-ai/src/transformer_backends/gemini.rs b/crates/lsp-ai/src/transformer_backends/gemini.rs index a4b14ad..db1c550 100644 --- a/crates/lsp-ai/src/transformer_backends/gemini.rs +++ b/crates/lsp-ai/src/transformer_backends/gemini.rs @@ -1,7 +1,7 @@ use anyhow::Context; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use tracing::instrument; +use tracing::{info, instrument}; use super::TransformerBackend; use crate::{ @@ -110,6 +110,15 @@ impl Gemini { ) -> anyhow::Result { let client = reqwest::Client::new(); let token = self.get_token()?; + let params = json!({ + "contents": messages, + "systemInstruction": params.system_instruction, + "generationConfig": params.generation_config, + }); + info!( + "Calling Gemini compatible chat API with parameters:\n{}", + serde_json::to_string_pretty(¶ms).unwrap() + ); let res: serde_json::Value = client .post( self.configuration @@ -122,11 +131,7 @@ impl Gemini { + token.as_ref(), ) .header("Content-Type", "application/json") - .json(&json!({ - "contents": messages, - "systemInstruction": params.system_instruction, - "generationConfig": params.generation_config, - })) + .json(¶ms) .send() .await? .json() diff --git a/crates/lsp-ai/src/transformer_backends/llama_cpp/model.rs b/crates/lsp-ai/src/transformer_backends/llama_cpp/model.rs index 1a16e25..79e87a5 100644 --- a/crates/lsp-ai/src/transformer_backends/llama_cpp/model.rs +++ b/crates/lsp-ai/src/transformer_backends/llama_cpp/model.rs @@ -9,7 +9,7 @@ use llama_cpp_2::{ }; use once_cell::sync::Lazy; use std::{num::NonZeroU32, path::PathBuf, time::Duration}; -use tracing::{debug, info, instrument}; +use tracing::{info, instrument}; use crate::config::{self, ChatMessage}; @@ -29,7 +29,10 @@ impl Model { let model_params = LlamaModelParams::default().with_n_gpu_layers(config.n_gpu_layers); // Load the model - debug!("Loading model at path: {:?}", model_path); + info!( + "Loading llama.cpp compatible model at path: {:?}", + model_path + ); let model = LlamaModel::load_from_file(&BACKEND, model_path, &model_params)?; Ok(Model { @@ -40,6 +43,8 @@ impl Model { #[instrument(skip(self))] pub fn complete(&self, prompt: &str, params: LLaMACPPRunParams) -> anyhow::Result { + info!("Completing with llama.cpp with prompt:\n{prompt}"); + // initialize the context let ctx_params = LlamaContextParams::default().with_n_ctx(Some(self.n_ctx)); diff --git a/crates/lsp-ai/src/transformer_backends/mistral_fim.rs b/crates/lsp-ai/src/transformer_backends/mistral_fim.rs index e6f6de3..fe22644 100644 --- a/crates/lsp-ai/src/transformer_backends/mistral_fim.rs +++ b/crates/lsp-ai/src/transformer_backends/mistral_fim.rs @@ -1,7 +1,7 @@ use anyhow::Context; use serde::Deserialize; use serde_json::{json, Value}; -use tracing::instrument; +use tracing::{info, instrument}; use super::{open_ai::OpenAIChatResponse, TransformerBackend}; use crate::{ @@ -67,6 +67,21 @@ impl MistralFIM { ) -> anyhow::Result { let client = reqwest::Client::new(); let token = self.get_token()?; + let params = json!({ + "prompt": prompt.prompt, + "suffix": prompt.suffix, + "model": self.config.model, + "max_tokens": params.max_tokens, + "top_p": params.top_p, + "temperature": params.temperature, + "min_tokens": params.min_tokens, + "random_seed": params.random_seed, + "stop": params.stop + }); + info!( + "Calling Mistral compatible FIM API with parameters:\n{}", + serde_json::to_string_pretty(¶ms).unwrap() + ); let res: OpenAIChatResponse = client .post( self.config @@ -77,17 +92,7 @@ impl MistralFIM { .bearer_auth(token) .header("Content-Type", "application/json") .header("Accept", "application/json") - .json(&json!({ - "prompt": prompt.prompt, - "suffix": prompt.suffix, - "model": self.config.model, - "max_tokens": params.max_tokens, - "top_p": params.top_p, - "temperature": params.temperature, - "min_tokens": params.min_tokens, - "random_seed": params.random_seed, - "stop": params.stop - })) + .json(¶ms) .send() .await? .json() diff --git a/crates/lsp-ai/src/transformer_backends/ollama.rs b/crates/lsp-ai/src/transformer_backends/ollama.rs index 1662473..d003e2b 100644 --- a/crates/lsp-ai/src/transformer_backends/ollama.rs +++ b/crates/lsp-ai/src/transformer_backends/ollama.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; use std::collections::HashMap; -use tracing::instrument; +use tracing::{info, instrument}; use crate::{ config::{self, ChatMessage, FIM}, @@ -66,6 +66,18 @@ impl Ollama { params: OllamaRunParams, ) -> anyhow::Result { let client = reqwest::Client::new(); + let params = json!({ + "model": self.configuration.model, + "prompt": prompt, + "options": params.options, + "keep_alive": params.keep_alive, + "raw": true, + "stream": false + }); + info!( + "Calling Ollama compatible completion API with parameters:\n{}", + serde_json::to_string_pretty(¶ms).unwrap() + ); let res: OllamaCompletionsResponse = client .post( self.configuration @@ -75,14 +87,7 @@ impl Ollama { ) .header("Content-Type", "application/json") .header("Accept", "application/json") - .json(&json!({ - "model": self.configuration.model, - "prompt": prompt, - "options": params.options, - "keep_alive": params.keep_alive, - "raw": true, - "stream": false - })) + .json(¶ms) .send() .await? .json() @@ -105,6 +110,19 @@ impl Ollama { params: OllamaRunParams, ) -> anyhow::Result { let client = reqwest::Client::new(); + let params = json!({ + "model": self.configuration.model, + "system": params.system, + "template": params.template, + "messages": messages, + "options": params.options, + "keep_alive": params.keep_alive, + "stream": false + }); + info!( + "Calling Ollama compatible chat API with parameters:\n{}", + serde_json::to_string_pretty(¶ms).unwrap() + ); let res: OllamaChatResponse = client .post( self.configuration @@ -114,15 +132,7 @@ impl Ollama { ) .header("Content-Type", "application/json") .header("Accept", "application/json") - .json(&json!({ - "model": self.configuration.model, - "system": params.system, - "template": params.template, - "messages": messages, - "options": params.options, - "keep_alive": params.keep_alive, - "stream": false - })) + .json(¶ms) .send() .await? .json() diff --git a/crates/lsp-ai/src/transformer_backends/open_ai/mod.rs b/crates/lsp-ai/src/transformer_backends/open_ai/mod.rs index d5f68e5..a23669c 100644 --- a/crates/lsp-ai/src/transformer_backends/open_ai/mod.rs +++ b/crates/lsp-ai/src/transformer_backends/open_ai/mod.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use anyhow::Context; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use tracing::instrument; +use tracing::{info, instrument}; use crate::{ config::{self, ChatMessage, FIM}, @@ -116,6 +116,21 @@ impl OpenAI { ) -> anyhow::Result { let client = reqwest::Client::new(); let token = self.get_token()?; + let params = json!({ + "model": self.configuration.model, + "max_tokens": params.max_tokens, + "n": 1, + "top_p": params.top_p, + "presence_penalty": params.presence_penalty, + "frequency_penalty": params.frequency_penalty, + "temperature": params.temperature, + "echo": false, + "prompt": prompt + }); + info!( + "Calling OpenAI compatible completions API with parameters:\n{}", + serde_json::to_string_pretty(¶ms).unwrap() + ); let res: OpenAICompletionsResponse = client .post( self.configuration @@ -126,17 +141,7 @@ impl OpenAI { .bearer_auth(token) .header("Content-Type", "application/json") .header("Accept", "application/json") - .json(&json!({ - "model": self.configuration.model, - "max_tokens": params.max_tokens, - "n": 1, - "top_p": params.top_p, - "presence_penalty": params.presence_penalty, - "frequency_penalty": params.frequency_penalty, - "temperature": params.temperature, - "echo": false, - "prompt": prompt - })) + .json(¶ms) .send().await? .json().await?; if let Some(error) = res.error { @@ -158,6 +163,20 @@ impl OpenAI { ) -> anyhow::Result { let client = reqwest::Client::new(); let token = self.get_token()?; + let params = json!({ + "model": self.configuration.model, + "max_tokens": params.max_tokens, + "n": 1, + "top_p": params.top_p, + "presence_penalty": params.presence_penalty, + "frequency_penalty": params.frequency_penalty, + "temperature": params.temperature, + "messages": messages + }); + info!( + "Calling OpenAI compatible chat API with parameters:\n{}", + serde_json::to_string_pretty(¶ms).unwrap() + ); let res: OpenAIChatResponse = client .post( self.configuration @@ -168,16 +187,7 @@ impl OpenAI { .bearer_auth(token) .header("Content-Type", "application/json") .header("Accept", "application/json") - .json(&json!({ - "model": self.configuration.model, - "max_tokens": params.max_tokens, - "n": 1, - "top_p": params.top_p, - "presence_penalty": params.presence_penalty, - "frequency_penalty": params.frequency_penalty, - "temperature": params.temperature, - "messages": messages - })) + .json(¶ms) .send() .await? .json()