diff --git a/candle-book/src/simplified.rs b/candle-book/src/simplified.rs index 6101591dbc..6f04479110 100644 --- a/candle-book/src/simplified.rs +++ b/candle-book/src/simplified.rs @@ -123,7 +123,7 @@ async fn simplified() -> anyhow::Result<()> { break; }, Err(e) => { - println!("Error: {}", e); + println!("Error: {e}"); continue; } } @@ -142,8 +142,8 @@ async fn simplified() -> anyhow::Result<()> { .argmax(D::Minus1)? .to_dtype(DType::F32)? .get(0).map(|x| x.to_scalar::())??; - println!("real_life_votes: {:?}", real_world_votes); - println!("neural_network_prediction_result: {:?}", result); + println!("real_life_votes: {real_world_votes:?}"); + println!("neural_network_prediction_result: {result:?}"); Ok(()) diff --git a/candle-core/benches/benchmarks/mod.rs b/candle-core/benches/benchmarks/mod.rs index 579c5f3f0b..211b237bad 100644 --- a/candle-core/benches/benchmarks/mod.rs +++ b/candle-core/benches/benchmarks/mod.rs @@ -22,13 +22,13 @@ impl BenchDevice for Device { #[cfg(feature = "cuda")] return Ok(device.synchronize()?); #[cfg(not(feature = "cuda"))] - panic!("Cuda device without cuda feature enabled: {:?}", device) + panic!("Cuda device without cuda feature enabled: {device:?}") } Device::Metal(device) => { #[cfg(feature = "metal")] return Ok(device.wait_until_completed()?); #[cfg(not(feature = "metal"))] - panic!("Metal device without metal feature enabled: {:?}", device) + panic!("Metal device without metal feature enabled: {device:?}") } } } diff --git a/candle-core/benches/benchmarks/qmatmul.rs b/candle-core/benches/benchmarks/qmatmul.rs index 4d34588b36..bd1d815fc4 100644 --- a/candle-core/benches/benchmarks/qmatmul.rs +++ b/candle-core/benches/benchmarks/qmatmul.rs @@ -31,7 +31,7 @@ fn run_bench(c: &mut Criterion, device: &Device, dtype: GgmlDType) { let flops = b * m * n * k; - let mut group = c.benchmark_group(device.bench_name(format!("qmatmul_{:?}", dtype))); + let mut group = c.benchmark_group(device.bench_name(format!("qmatmul_{dtype:?}"))); group.sample_size(200); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { diff --git a/candle-core/benches/benchmarks/unary.rs b/candle-core/benches/benchmarks/unary.rs index 9efd75093d..616ee1609d 100644 --- a/candle-core/benches/benchmarks/unary.rs +++ b/candle-core/benches/benchmarks/unary.rs @@ -40,7 +40,7 @@ fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { for dtype in [DType::F32, DType::BF16, DType::F16] { - let name = format!("sqrt_{:?}", dtype); + let name = format!("sqrt_{dtype:?}"); run_unary_benchmark(c, &device, dtype, &name); } } diff --git a/candle-core/src/display.rs b/candle-core/src/display.rs index 7e6e3cf8f1..f69c25a9f1 100644 --- a/candle-core/src/display.rs +++ b/candle-core/src/display.rs @@ -12,10 +12,10 @@ impl Tensor { let device_str = match self.device().location() { crate::DeviceLocation::Cpu => "".to_owned(), crate::DeviceLocation::Cuda { gpu_id } => { - format!(", cuda:{}", gpu_id) + format!(", cuda:{gpu_id}") } crate::DeviceLocation::Metal { gpu_id } => { - format!(", metal:{}", gpu_id) + format!(", metal:{gpu_id}") } }; @@ -502,10 +502,10 @@ impl std::fmt::Display for Tensor { let device_str = match self.device().location() { crate::DeviceLocation::Cpu => "".to_owned(), crate::DeviceLocation::Cuda { gpu_id } => { - format!(", cuda:{}", gpu_id) + format!(", cuda:{gpu_id}") } crate::DeviceLocation::Metal { gpu_id } => { - format!(", metal:{}", gpu_id) + format!(", metal:{gpu_id}") } }; diff --git a/candle-core/tests/quantized_tests.rs b/candle-core/tests/quantized_tests.rs index 8011333cae..309ae18835 100644 --- a/candle-core/tests/quantized_tests.rs +++ b/candle-core/tests/quantized_tests.rs @@ -378,12 +378,7 @@ fn compare_with_error(values: &[f32], expected: &[f32], tolerance: f32) { assert!( difference < tolerance, - "Error at index {}: value = {}, expected = {}. Difference = {} exceeds tolerance = {}.", - i, - value, - expected_value, - difference, - tolerance + "Error at index {i}: value = {value}, expected = {expected_value}. Difference = {difference} exceeds tolerance = {tolerance}." ); } } diff --git a/candle-examples/examples/clip/main.rs b/candle-examples/examples/clip/main.rs index 273edb6a0a..e38249ce41 100644 --- a/candle-examples/examples/clip/main.rs +++ b/candle-examples/examples/clip/main.rs @@ -95,7 +95,7 @@ pub fn main() -> anyhow::Result<()> { let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?; let softmax_image = softmax(&logits_per_image, 1)?; let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::()?; - println!("softmax_image_vec: {:?}", softmax_image_vec); + println!("softmax_image_vec: {softmax_image_vec:?}"); let probability_vec = softmax_image_vec .iter() .map(|v| v * 100.0) @@ -105,7 +105,7 @@ pub fn main() -> anyhow::Result<()> { let start = i * probability_per_image; let end = start + probability_per_image; let prob = &probability_vec[start..end]; - println!("\n\nResults for image: {}\n", img); + println!("\n\nResults for image: {img}\n"); for (i, p) in prob.iter().enumerate() { println!("Probability: {:.4}% Text: {} ", p, vec_seq[i]); } diff --git a/candle-examples/examples/codegeex4-9b/main.rs b/candle-examples/examples/codegeex4-9b/main.rs index a83d20ca3b..9ddafe0aaf 100644 --- a/candle-examples/examples/codegeex4-9b/main.rs +++ b/candle-examples/examples/codegeex4-9b/main.rs @@ -70,7 +70,7 @@ impl TextGeneration { let start_gen = std::time::Instant::now(); println!("\n start_gen"); - println!("samplelen {}", sample_len); + println!("samplelen {sample_len}"); let mut count = 0; let mut result = vec![]; for index in 0..sample_len { @@ -102,10 +102,7 @@ impl TextGeneration { .decode(&[next_token], true) .expect("Token error"); if self.verbose_prompt { - println!( - "[Count: {}] [Raw Token: {}] [Decode Token: {}]", - count, next_token, token - ); + println!("[Count: {count}] [Raw Token: {next_token}] [Decode Token: {token}]"); } result.push(token); std::io::stdout().flush()?; diff --git a/candle-examples/examples/efficientvit/main.rs b/candle-examples/examples/efficientvit/main.rs index efbf813c52..8d65968a6e 100644 --- a/candle-examples/examples/efficientvit/main.rs +++ b/candle-examples/examples/efficientvit/main.rs @@ -30,7 +30,7 @@ impl Which { Self::M4 => "m4", Self::M5 => "m5", }; - format!("timm/efficientvit_{}.r224_in1k", name) + format!("timm/efficientvit_{name}.r224_in1k") } fn config(&self) -> efficientvit::Config { diff --git a/candle-examples/examples/fastvit/main.rs b/candle-examples/examples/fastvit/main.rs index 520fd0aed3..a5c9d1c39d 100644 --- a/candle-examples/examples/fastvit/main.rs +++ b/candle-examples/examples/fastvit/main.rs @@ -32,7 +32,7 @@ impl Which { Self::SA36 => "sa36", Self::MA36 => "ma36", }; - format!("timm/fastvit_{}.apple_in1k", name) + format!("timm/fastvit_{name}.apple_in1k") } fn config(&self) -> fastvit::Config { diff --git a/candle-examples/examples/glm4/main.rs b/candle-examples/examples/glm4/main.rs index 55a27f349e..4070cebf24 100644 --- a/candle-examples/examples/glm4/main.rs +++ b/candle-examples/examples/glm4/main.rs @@ -107,10 +107,7 @@ impl TextGeneration { .decode(&[next_token], true) .expect("Token error"); if self.verbose_prompt { - println!( - "[Count: {}] [Raw Token: {}] [Decode Token: {}]", - count, next_token, token - ); + println!("[Count: {count}] [Raw Token: {next_token}] [Decode Token: {token}]"); } result.push(token); std::io::stdout().flush()?; diff --git a/candle-examples/examples/hiera/main.rs b/candle-examples/examples/hiera/main.rs index 55bb1d54e1..06a95c2ad2 100644 --- a/candle-examples/examples/hiera/main.rs +++ b/candle-examples/examples/hiera/main.rs @@ -30,7 +30,7 @@ impl Which { Self::Large => "large", Self::Huge => "huge", }; - format!("timm/hiera_{}_224.mae_in1k_ft_in1k", name) + format!("timm/hiera_{name}_224.mae_in1k_ft_in1k") } fn config(&self) -> hiera::Config { diff --git a/candle-examples/examples/llava/main.rs b/candle-examples/examples/llava/main.rs index cb8093002f..b18ca4cb84 100644 --- a/candle-examples/examples/llava/main.rs +++ b/candle-examples/examples/llava/main.rs @@ -206,10 +206,8 @@ fn main() -> Result<()> { let llava: LLaVA = LLaVA::load(vb, &llava_config, clip_vision_config)?; println!("generating conv template"); - let image_token_se = format!( - "{}{}{}", - DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_END_TOKEN - ); + let image_token_se = + format!("{DEFAULT_IM_START_TOKEN}{DEFAULT_IMAGE_TOKEN}{DEFAULT_IM_END_TOKEN}"); let qs = if args.prompt.contains(IMAGE_PLACEHOLDER) { if llava_config.mm_use_im_start_end { args.prompt.replace(IMAGE_PLACEHOLDER, &image_token_se) diff --git a/candle-examples/examples/mamba-minimal/main.rs b/candle-examples/examples/mamba-minimal/main.rs index 5e8968c039..2c8c53b300 100644 --- a/candle-examples/examples/mamba-minimal/main.rs +++ b/candle-examples/examples/mamba-minimal/main.rs @@ -123,7 +123,7 @@ enum Which { impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } diff --git a/candle-examples/examples/mamba/main.rs b/candle-examples/examples/mamba/main.rs index b8c8bb70f6..5caf2e9fad 100644 --- a/candle-examples/examples/mamba/main.rs +++ b/candle-examples/examples/mamba/main.rs @@ -135,7 +135,7 @@ enum Which { impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } diff --git a/candle-examples/examples/mobileclip/main.rs b/candle-examples/examples/mobileclip/main.rs index d9615c43b8..68d6bb32ab 100644 --- a/candle-examples/examples/mobileclip/main.rs +++ b/candle-examples/examples/mobileclip/main.rs @@ -25,7 +25,7 @@ impl Which { Self::S1 => "S1", Self::S2 => "S2", }; - format!("apple/MobileCLIP-{}-OpenCLIP", name) + format!("apple/MobileCLIP-{name}-OpenCLIP") } fn config(&self) -> mobileclip::MobileClipConfig { @@ -107,7 +107,7 @@ pub fn main() -> anyhow::Result<()> { let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?; let softmax_image = softmax(&logits_per_image, 1)?; let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::()?; - println!("softmax_image_vec: {:?}", softmax_image_vec); + println!("softmax_image_vec: {softmax_image_vec:?}"); let probability_vec = softmax_image_vec .iter() .map(|v| v * 100.0) @@ -118,7 +118,7 @@ pub fn main() -> anyhow::Result<()> { let start = i * probability_per_image; let end = start + probability_per_image; let prob = &probability_vec[start..end]; - println!("\n\nResults for image: {}\n", img); + println!("\n\nResults for image: {img}\n"); for (i, p) in prob.iter().enumerate() { println!("Probability: {:.4}% Text: {}", p, vec_seq[i]); diff --git a/candle-examples/examples/mobilenetv4/main.rs b/candle-examples/examples/mobilenetv4/main.rs index c31b91e6e4..b71b9ef61c 100644 --- a/candle-examples/examples/mobilenetv4/main.rs +++ b/candle-examples/examples/mobilenetv4/main.rs @@ -28,7 +28,7 @@ impl Which { Self::Large => "conv_large.e600_r384", Self::HybridLarge => "hybrid_large.ix_e600_r384", }; - format!("timm/mobilenetv4_{}_in1k", name) + format!("timm/mobilenetv4_{name}_in1k") } fn resolution(&self) -> u32 { diff --git a/candle-examples/examples/mobileone/main.rs b/candle-examples/examples/mobileone/main.rs index 76533fe3d5..7e0b0d448b 100644 --- a/candle-examples/examples/mobileone/main.rs +++ b/candle-examples/examples/mobileone/main.rs @@ -28,7 +28,7 @@ impl Which { Self::S3 => "s3", Self::S4 => "s4", }; - format!("timm/mobileone_{}.apple_in1k", name) + format!("timm/mobileone_{name}.apple_in1k") } fn config(&self) -> mobileone::Config { diff --git a/candle-examples/examples/moondream/main.rs b/candle-examples/examples/moondream/main.rs index 6e09988885..7058acb44a 100644 --- a/candle-examples/examples/moondream/main.rs +++ b/candle-examples/examples/moondream/main.rs @@ -106,7 +106,7 @@ impl TextGeneration { } }; load_t = start_gen.elapsed(); - println!("load_t: {:?}", load_t); + println!("load_t: {load_t:?}"); logits }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; diff --git a/candle-examples/examples/paligemma/main.rs b/candle-examples/examples/paligemma/main.rs index 9ce5011bc2..2412f17531 100644 --- a/candle-examples/examples/paligemma/main.rs +++ b/candle-examples/examples/paligemma/main.rs @@ -253,7 +253,7 @@ fn main() -> Result<()> { .to_device(&device)? .to_dtype(dtype)? .unsqueeze(0)?; - println!("loaded image with shape {:?}", image); + println!("loaded image with shape {image:?}"); let start = std::time::Instant::now(); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let model = Model::new(&config, vb)?; diff --git a/candle-examples/examples/pixtral/main.rs b/candle-examples/examples/pixtral/main.rs index 79f438686f..4697eefe26 100644 --- a/candle-examples/examples/pixtral/main.rs +++ b/candle-examples/examples/pixtral/main.rs @@ -295,7 +295,7 @@ fn main() -> Result<()> { )? }; let image = image.to_device(&device)?.unsqueeze(0)?; - println!("loaded image with shape {:?}", image); + println!("loaded image with shape {image:?}"); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; if args.vision_only { diff --git a/candle-examples/examples/quantized-phi/main.rs b/candle-examples/examples/quantized-phi/main.rs index f567ce2d36..31df68c789 100644 --- a/candle-examples/examples/quantized-phi/main.rs +++ b/candle-examples/examples/quantized-phi/main.rs @@ -144,7 +144,7 @@ impl Args { fn format_size(size_in_bytes: usize) -> String { if size_in_bytes < 1_000 { - format!("{}B", size_in_bytes) + format!("{size_in_bytes}B") } else if size_in_bytes < 1_000_000 { format!("{:.2}KB", size_in_bytes as f64 / 1e3) } else if size_in_bytes < 1_000_000_000 { diff --git a/candle-examples/examples/quantized-qwen2-instruct/main.rs b/candle-examples/examples/quantized-qwen2-instruct/main.rs index 1bd230e0e0..7c293c35a8 100644 --- a/candle-examples/examples/quantized-qwen2-instruct/main.rs +++ b/candle-examples/examples/quantized-qwen2-instruct/main.rs @@ -151,7 +151,7 @@ impl Args { fn format_size(size_in_bytes: usize) -> String { if size_in_bytes < 1_000 { - format!("{}B", size_in_bytes) + format!("{size_in_bytes}B") } else if size_in_bytes < 1_000_000 { format!("{:.2}KB", size_in_bytes as f64 / 1e3) } else if size_in_bytes < 1_000_000_000 { @@ -212,10 +212,7 @@ fn main() -> anyhow::Result<()> { let tokenizer = args.tokenizer()?; let mut tos = TokenOutputStream::new(tokenizer); let prompt_str = args.prompt.unwrap_or_else(|| DEFAULT_PROMPT.to_string()); - let prompt_str = format!( - "<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n", - prompt_str - ); + let prompt_str = format!("<|im_start|>user\n{prompt_str}<|im_end|>\n<|im_start|>assistant\n"); print!("formatted instruct prompt: {}", &prompt_str); let tokens = tos .tokenizer() diff --git a/candle-examples/examples/quantized/main.rs b/candle-examples/examples/quantized/main.rs index d91701ff8b..9292add43f 100644 --- a/candle-examples/examples/quantized/main.rs +++ b/candle-examples/examples/quantized/main.rs @@ -364,7 +364,7 @@ impl Args { fn format_size(size_in_bytes: usize) -> String { if size_in_bytes < 1_000 { - format!("{}B", size_in_bytes) + format!("{size_in_bytes}B") } else if size_in_bytes < 1_000_000 { format!("{:.2}KB", size_in_bytes as f64 / 1e3) } else if size_in_bytes < 1_000_000_000 { diff --git a/candle-examples/examples/repvgg/main.rs b/candle-examples/examples/repvgg/main.rs index 7cc90ba16b..5b3521243b 100644 --- a/candle-examples/examples/repvgg/main.rs +++ b/candle-examples/examples/repvgg/main.rs @@ -38,7 +38,7 @@ impl Which { Self::B2G4 => "b2g4", Self::B3G4 => "b3g4", }; - format!("timm/repvgg_{}.rvgg_in1k", name) + format!("timm/repvgg_{name}.rvgg_in1k") } fn config(&self) -> repvgg::Config { diff --git a/candle-examples/examples/rwkv/main.rs b/candle-examples/examples/rwkv/main.rs index 8fb2c0d41f..aa5a406cb0 100644 --- a/candle-examples/examples/rwkv/main.rs +++ b/candle-examples/examples/rwkv/main.rs @@ -134,7 +134,7 @@ enum Which { impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } diff --git a/candle-examples/examples/segformer/main.rs b/candle-examples/examples/segformer/main.rs index 16db62fc01..152f5b8d45 100644 --- a/candle-examples/examples/segformer/main.rs +++ b/candle-examples/examples/segformer/main.rs @@ -57,16 +57,16 @@ enum Commands { } fn get_vb_and_config(model_name: String, device: &Device) -> anyhow::Result<(VarBuilder, Config)> { - println!("loading model {} via huggingface hub", model_name); + println!("loading model {model_name} via huggingface hub"); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name.clone()); let model_file = api.get("model.safetensors")?; - println!("model {} downloaded and loaded", model_name); + println!("model {model_name} downloaded and loaded"); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], candle::DType::F32, device)? }; let config = std::fs::read_to_string(api.get("config.json")?)?; let config: Config = serde_json::from_str(&config)?; - println!("{:?}", config); + println!("{config:?}"); Ok((vb, config)) } @@ -138,7 +138,7 @@ fn classification_task(args: ClassificationArgs, device: &Device) -> anyhow::Res classification.to_vec1::()? ); let label_id = classification.argmax(0)?.to_scalar::()?; - let label_id = format!("{}", label_id); + let label_id = format!("{label_id}"); println!("label: {}", config.id2label[&label_id]); Ok(()) } diff --git a/candle-examples/examples/siglip/main.rs b/candle-examples/examples/siglip/main.rs index be953c8764..28056931a9 100644 --- a/candle-examples/examples/siglip/main.rs +++ b/candle-examples/examples/siglip/main.rs @@ -89,7 +89,7 @@ pub fn main() -> anyhow::Result<()> { let (_logits_per_text, logits_per_image) = model.forward(&images, &input_ids)?; let softmax_image = softmax(&logits_per_image, 1)?; let softmax_image_vec = softmax_image.flatten_all()?.to_vec1::()?; - println!("softmax_image_vec: {:?}", softmax_image_vec); + println!("softmax_image_vec: {softmax_image_vec:?}"); let probability_vec = softmax_image_vec .iter() .map(|v| v * 100.0) @@ -99,7 +99,7 @@ pub fn main() -> anyhow::Result<()> { let start = i * probability_per_image; let end = start + probability_per_image; let prob = &probability_vec[start..end]; - println!("\n\nResults for image: {}\n", img); + println!("\n\nResults for image: {img}\n"); for (i, p) in prob.iter().enumerate() { println!("Probability: {:.4}% Text: {} ", p, vec_seq[i]); } diff --git a/candle-examples/examples/splade/main.rs b/candle-examples/examples/splade/main.rs index aa4c60ac41..738b624b7f 100644 --- a/candle-examples/examples/splade/main.rs +++ b/candle-examples/examples/splade/main.rs @@ -73,7 +73,7 @@ fn main() -> Result<()> { Err(_) => match repo.get("pytorch_model.bin") { Ok(pytorch_model) => pytorch_model, Err(e) => { - return Err(anyhow::Error::msg(format!("Model weights not found. The weights should either be a `model.safetensors` or `pytorch_model.bin` file. Error: {}", e))); + return Err(anyhow::Error::msg(format!("Model weights not found. The weights should either be a `model.safetensors` or `pytorch_model.bin` file. Error: {e}"))); } }, }, diff --git a/candle-examples/examples/trocr/main.rs b/candle-examples/examples/trocr/main.rs index f857295c78..63ee3c1bef 100644 --- a/candle-examples/examples/trocr/main.rs +++ b/candle-examples/examples/trocr/main.rs @@ -93,7 +93,7 @@ pub fn main() -> anyhow::Result<()> { .get("model.safetensors")? } }; - println!("model: {:?}", model); + println!("model: {model:?}"); unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? } }; diff --git a/candle-nn/benches/benchmarks/mod.rs b/candle-nn/benches/benchmarks/mod.rs index 30a6ab6a2b..93244a9d3f 100644 --- a/candle-nn/benches/benchmarks/mod.rs +++ b/candle-nn/benches/benchmarks/mod.rs @@ -17,13 +17,13 @@ impl BenchDevice for Device { #[cfg(feature = "cuda")] return Ok(device.synchronize()?); #[cfg(not(feature = "cuda"))] - panic!("Cuda device without cuda feature enabled: {:?}", device) + panic!("Cuda device without cuda feature enabled: {device:?}") } Device::Metal(device) => { #[cfg(feature = "metal")] return Ok(device.wait_until_completed()?); #[cfg(not(feature = "metal"))] - panic!("Metal device without metal feature enabled: {:?}", device) + panic!("Metal device without metal feature enabled: {device:?}") } } } diff --git a/candle-pyo3/src/lib.rs b/candle-pyo3/src/lib.rs index 722b5e3ace..38464e7f15 100644 --- a/candle-pyo3/src/lib.rs +++ b/candle-pyo3/src/lib.rs @@ -517,9 +517,7 @@ impl PyTensor { // Check that the index is in range if actual_index < 0 || actual_index >= dims[current_dim] as isize { return Err(PyValueError::new_err(format!( - "index out of range for dimension '{i}' with indexer '{value}'", - i = current_dim, - value = index + "index out of range for dimension '{current_dim}' with indexer '{index}'" ))); } Ok(actual_index as usize) @@ -579,8 +577,7 @@ impl PyTensor { Ok((Indexer::Expand, current_dim)) } else { Err(PyTypeError::new_err(format!( - "unsupported indexer {}", - py_indexer + "unsupported indexer {py_indexer}" ))) } } @@ -1422,8 +1419,7 @@ fn save_gguf(path: &str, tensors: PyObject, metadata: PyObject, py: Python<'_>) gguf_file::Value::Array(x) } else { return Err(PyErr::new::(format!( - "unsupported type {:?}", - v + "unsupported type {v:?}" ))); }; Ok(v) diff --git a/candle-pyo3/src/shape.rs b/candle-pyo3/src/shape.rs index b9bc67899d..4218d86186 100644 --- a/candle-pyo3/src/shape.rs +++ b/candle-pyo3/src/shape.rs @@ -56,8 +56,7 @@ impl<'source> pyo3::FromPyObject<'source> for PyShapeWithHole { let any_invalid_dimensions = dims.iter().any(|&x| x < -1 || x == 0); if negative_ones > 1 || any_invalid_dimensions { return Err(PyErr::new::(format!( - "Invalid dimension in shape: {:?}", - dims + "Invalid dimension in shape: {dims:?}" ))); } @@ -89,8 +88,7 @@ impl PyShapeWithHole { new_dims.push(elements); } else { return Err(PyErr::new::(format!( - "Invalid dimension in shape: {}", - dim + "Invalid dimension in shape: {dim}" ))); } } diff --git a/candle-transformers/src/models/chinese_clip/mod.rs b/candle-transformers/src/models/chinese_clip/mod.rs index 88472f0b88..c5ddb5c2ef 100644 --- a/candle-transformers/src/models/chinese_clip/mod.rs +++ b/candle-transformers/src/models/chinese_clip/mod.rs @@ -30,7 +30,7 @@ impl From for Activation { "gelu" => Activation::Gelu, "gelu_new" => Activation::GeluNew, "relu" => Activation::Relu, - _ => panic!("Invalid activation function: {}", value), + _ => panic!("Invalid activation function: {value}"), } } } diff --git a/candle-transformers/src/models/mmdit/model.rs b/candle-transformers/src/models/mmdit/model.rs index 864b662377..2d249092d2 100644 --- a/candle-transformers/src/models/mmdit/model.rs +++ b/candle-transformers/src/models/mmdit/model.rs @@ -145,7 +145,7 @@ impl MMDiTCore { hidden_size, num_heads, use_flash_attn, - vb.pp(format!("joint_blocks.{}", i)), + vb.pp(format!("joint_blocks.{i}")), )?); } diff --git a/candle-transformers/src/models/moondream.rs b/candle-transformers/src/models/moondream.rs index cde59d43d6..731b023643 100644 --- a/candle-transformers/src/models/moondream.rs +++ b/candle-transformers/src/models/moondream.rs @@ -167,7 +167,7 @@ impl VisionTransformer { let blocks = (0..cfg.num_blocks) .map(|i| { VitBlock::new( - vb.pp(format!("blocks.{}", i)), + vb.pp(format!("blocks.{i}")), cfg.embed_dim, cfg.num_heads, cfg, diff --git a/candle-transformers/src/models/quantized_moondream.rs b/candle-transformers/src/models/quantized_moondream.rs index 1b125d9306..5293ed3d8f 100644 --- a/candle-transformers/src/models/quantized_moondream.rs +++ b/candle-transformers/src/models/quantized_moondream.rs @@ -119,7 +119,7 @@ impl VisionTransformer { let blocks = (0..cfg.num_blocks) .map(|i| { VitBlock::new( - vb.pp(format!("blocks.{}", i)), + vb.pp(format!("blocks.{i}")), cfg.embed_dim, cfg.num_heads, cfg, diff --git a/candle-transformers/src/models/segformer.rs b/candle-transformers/src/models/segformer.rs index 260ceb3a84..6dfa1e8dd8 100644 --- a/candle-transformers/src/models/segformer.rs +++ b/candle-transformers/src/models/segformer.rs @@ -404,7 +404,7 @@ impl SegformerEncoder { stride, num_channels, hidden_size, - vb.pp(format!("patch_embeddings.{}", i)), + vb.pp(format!("patch_embeddings.{i}")), )?); let mut layers = Vec::with_capacity(config.depths[i]); for j in 0..config.depths[i] { @@ -417,14 +417,14 @@ impl SegformerEncoder { num_attention_heads, sequence_reduction_ratio, mlp_ratio, - vb.pp(format!("block.{}.{}", i, j)), + vb.pp(format!("block.{i}.{j}")), )?); } blocks.push(layers); layer_norms.push(layer_norm( hidden_size, config.layer_norm_eps, - vb.pp(format!("layer_norm.{}", i)), + vb.pp(format!("layer_norm.{i}")), )?); } Ok(Self { @@ -507,7 +507,7 @@ impl SegformerDecodeHead { linear_c.push(SegformerMLP::new( config, hidden_size, - vb.pp(format!("linear_c.{}", i)), + vb.pp(format!("linear_c.{i}")), )?); } let linear_fuse = conv2d_no_bias( diff --git a/candle-wasm-examples/moondream/src/bin/m.rs b/candle-wasm-examples/moondream/src/bin/m.rs index 27cda1e788..0a924c5b0e 100644 --- a/candle-wasm-examples/moondream/src/bin/m.rs +++ b/candle-wasm-examples/moondream/src/bin/m.rs @@ -120,7 +120,7 @@ impl Model { } = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = Device::Cpu; - let prompt = format!("\n\nQuestion: {0}\n\nAnswer:", prompt); + let prompt = format!("\n\nQuestion: {prompt}\n\nAnswer:"); match &mut self.model { SelectedModel::Moondream(m) => m.text_model.clear_kv_cache(), SelectedModel::Quantized(m) => m.text_model.clear_kv_cache(), diff --git a/candle-wasm-examples/segment-anything/src/bin/m.rs b/candle-wasm-examples/segment-anything/src/bin/m.rs index 38e9fe3b6e..5164bb9ab1 100644 --- a/candle-wasm-examples/segment-anything/src/bin/m.rs +++ b/candle-wasm-examples/segment-anything/src/bin/m.rs @@ -81,14 +81,12 @@ impl Model { for &(x, y, _bool) in &transformed_points { if !(0.0..=1.0).contains(&x) { return Err(JsError::new(&format!( - "x has to be between 0 and 1, got {}", - x + "x has to be between 0 and 1, got {x}" ))); } if !(0.0..=1.0).contains(&y) { return Err(JsError::new(&format!( - "y has to be between 0 and 1, got {}", - y + "y has to be between 0 and 1, got {y}" ))); } } diff --git a/candle-wasm-examples/whisper/src/app.rs b/candle-wasm-examples/whisper/src/app.rs index a2c0ddabcb..03eae9382d 100644 --- a/candle-wasm-examples/whisper/src/app.rs +++ b/candle-wasm-examples/whisper/src/app.rs @@ -184,7 +184,7 @@ impl Component for App { Ok(WorkerOutput::Decoded(segments)) => { self.status = match dt { None => "decoding succeeded!".to_string(), - Some(dt) => format!("decoding succeeded in {:.2}s", dt), + Some(dt) => format!("decoding succeeded in {dt:.2}s"), }; self.segments = segments; } diff --git a/candle-wasm-examples/yolo/src/app.rs b/candle-wasm-examples/yolo/src/app.rs index 61253fb5a8..40445da696 100644 --- a/candle-wasm-examples/yolo/src/app.rs +++ b/candle-wasm-examples/yolo/src/app.rs @@ -204,7 +204,7 @@ impl Component for App { }); self.status = match dt { None => "processing succeeded!".to_string(), - Some(dt) => format!("processing succeeded in {:.2}s", dt,), + Some(dt) => format!("processing succeeded in {dt:.2}s",), }; self.current_decode = None; if let Err(err) = draw_bboxes(bboxes) { diff --git a/tensor-tools/src/main.rs b/tensor-tools/src/main.rs index 0bda36d524..00af187057 100644 --- a/tensor-tools/src/main.rs +++ b/tensor-tools/src/main.rs @@ -352,7 +352,7 @@ fn run_ls( tensor_info.dtype, ); if verbose { - println!(" {:?}", tensor_info); + println!(" {tensor_info:?}"); } } }