|
2 | 2 |
|
3 | 3 | use std::ffi::OsStr; |
4 | 4 | use std::fs::File; |
| 5 | +use std::io::BufReader; |
| 6 | +use std::path::Path; |
5 | 7 | use std::path::PathBuf; |
6 | 8 |
|
7 | 9 | use annotate_snippets::Level; |
8 | 10 | use anyhow::Context as _; |
| 11 | +use cargo_util::paths; |
| 12 | +use indexmap::IndexMap; |
| 13 | +use indexmap::map::Entry; |
| 14 | +use itertools::Itertools as _; |
| 15 | +use tempfile::TempDir; |
9 | 16 |
|
10 | 17 | use crate::CargoResult; |
11 | 18 | use crate::GlobalContext; |
12 | 19 | use crate::core::Workspace; |
| 20 | +use crate::core::compiler::CompileMode; |
| 21 | +use crate::core::compiler::timings::CompilationSection; |
| 22 | +use crate::core::compiler::timings::UnitData; |
| 23 | +use crate::core::compiler::timings::report::RenderContext; |
| 24 | +use crate::core::compiler::timings::report::aggregate_sections; |
| 25 | +use crate::core::compiler::timings::report::compute_concurrency; |
| 26 | +use crate::core::compiler::timings::report::round_to_centisecond; |
| 27 | +use crate::core::compiler::timings::report::write_html; |
13 | 28 | use crate::util::BuildLogger; |
14 | 29 | use crate::util::important_paths::find_root_manifest_for_wd; |
| 30 | +use crate::util::log_message::LogMessage; |
15 | 31 | use crate::util::logger::RunId; |
| 32 | +use crate::util::style; |
16 | 33 |
|
17 | 34 | pub struct ReportTimingsOptions<'gctx> { |
18 | 35 | pub log_file_path: Option<PathBuf>, |
19 | 36 | pub id: Option<RunId>, |
20 | 37 | pub gctx: &'gctx GlobalContext, |
21 | 38 | } |
22 | 39 |
|
| 40 | +/// Collects sections data for later post-processing through [`aggregate_sections`]. |
| 41 | +struct UnitEntry { |
| 42 | + data: UnitData, |
| 43 | + sections: IndexMap<String, CompilationSection>, |
| 44 | + rmeta_time: Option<f64>, |
| 45 | +} |
| 46 | + |
23 | 47 | pub fn report_timings(gctx: &GlobalContext, opts: ReportTimingsOptions<'_>) -> CargoResult<()> { |
24 | 48 | let ws = find_root_manifest_for_wd(gctx.cwd()) |
25 | 49 | .ok() |
26 | 50 | .and_then(|manifest_path| Workspace::new(&manifest_path, gctx).ok()); |
27 | | - let Some((log, _run_id)) = select_log_file(gctx, ws.as_ref(), &opts)? else { |
| 51 | + let Some((log, run_id)) = select_log_file(gctx, ws.as_ref(), &opts)? else { |
28 | 52 | let report = [Level::INFO |
29 | 53 | .secondary_title("no build log files found") |
30 | 54 | .element(Level::NOTE.message("enable `-Z build-analysis` to generate logs"))]; |
31 | 55 | gctx.shell().print_report(&report, false)?; |
32 | 56 | return Ok(()); |
33 | 57 | }; |
34 | 58 |
|
35 | | - let _f = File::open(&log) |
| 59 | + let ctx = prepare_context(&log, &run_id) |
36 | 60 | .with_context(|| format!("failed to analyze log at `{}`", log.display()))?; |
37 | 61 |
|
| 62 | + // If we are in a workspace, |
| 63 | + // put timing reports in <target-dir>/cargo-timings/` as usual for easy access. |
| 64 | + // Otherwise in a temporary directory |
| 65 | + let reports_dir = if let Some(ws) = ws { |
| 66 | + let target_dir = ws.target_dir(); |
| 67 | + let target_dir = target_dir.as_path_unlocked(); |
| 68 | + paths::create_dir_all_excluded_from_backups_atomic(target_dir)?; |
| 69 | + let timings_dir = target_dir.join("cargo-timings"); |
| 70 | + paths::create_dir_all(&timings_dir)?; |
| 71 | + timings_dir |
| 72 | + } else { |
| 73 | + TempDir::with_prefix("cargo-timings-")?.keep() |
| 74 | + }; |
| 75 | + |
| 76 | + let timing_path = reports_dir.join(format!("cargo-timing-{run_id}.html")); |
| 77 | + |
| 78 | + let mut out_file = std::fs::OpenOptions::new() |
| 79 | + .write(true) |
| 80 | + .create(true) |
| 81 | + .truncate(true) |
| 82 | + .open(&timing_path) |
| 83 | + .with_context(|| format!("failed to open `{}`", timing_path.display()))?; |
| 84 | + |
| 85 | + write_html(ctx, &mut out_file)?; |
| 86 | + |
| 87 | + let mut shell = gctx.shell(); |
| 88 | + let link = shell.err_file_hyperlink(&timing_path); |
| 89 | + let msg = format!("report saved to {link}{}{link:#}", timing_path.display()); |
| 90 | + shell.status_with_color("Timing", msg, &style::NOTE)?; |
| 91 | + |
38 | 92 | Ok(()) |
39 | 93 | } |
40 | 94 |
|
@@ -99,3 +153,228 @@ fn select_log_file( |
99 | 153 |
|
100 | 154 | Ok(item.map(|(entry, run_id)| (entry.into_path(), run_id))) |
101 | 155 | } |
| 156 | + |
| 157 | +fn prepare_context(log: &Path, run_id: &RunId) -> CargoResult<RenderContext<'static>> { |
| 158 | + let reader = BufReader::new(File::open(&log)?); |
| 159 | + |
| 160 | + let mut ctx = RenderContext { |
| 161 | + start_str: run_id.timestamp().to_string(), |
| 162 | + root_units: Default::default(), |
| 163 | + profile: Default::default(), |
| 164 | + total_fresh: Default::default(), |
| 165 | + total_dirty: Default::default(), |
| 166 | + unit_data: Default::default(), |
| 167 | + concurrency: Default::default(), |
| 168 | + cpu_usage: Default::default(), |
| 169 | + rustc_version: Default::default(), |
| 170 | + host: Default::default(), |
| 171 | + requested_targets: Default::default(), |
| 172 | + jobs: 0, |
| 173 | + error: &None, |
| 174 | + }; |
| 175 | + let mut units: IndexMap<_, UnitEntry> = IndexMap::new(); |
| 176 | + |
| 177 | + for (log_index, result) in serde_json::Deserializer::from_reader(reader) |
| 178 | + .into_iter::<LogMessage>() |
| 179 | + .enumerate() |
| 180 | + { |
| 181 | + let msg = match result { |
| 182 | + Ok(msg) => msg, |
| 183 | + Err(e) => { |
| 184 | + tracing::warn!("failed to parse log message at index {log_index}: {e}"); |
| 185 | + continue; |
| 186 | + } |
| 187 | + }; |
| 188 | + |
| 189 | + match msg { |
| 190 | + LogMessage::BuildStarted { |
| 191 | + cwd: _, |
| 192 | + host, |
| 193 | + jobs, |
| 194 | + profile, |
| 195 | + rustc_version, |
| 196 | + rustc_version_verbose: _, |
| 197 | + target_dir: _, |
| 198 | + workspace_root: _, |
| 199 | + } => { |
| 200 | + ctx.host = host; |
| 201 | + ctx.jobs = jobs; |
| 202 | + ctx.profile = profile; |
| 203 | + ctx.rustc_version = rustc_version; |
| 204 | + } |
| 205 | + LogMessage::UnitStarted { |
| 206 | + package_id, |
| 207 | + target, |
| 208 | + mode, |
| 209 | + index, |
| 210 | + elapsed, |
| 211 | + } => { |
| 212 | + let version = package_id |
| 213 | + .version() |
| 214 | + .map(|v| v.to_string()) |
| 215 | + .unwrap_or_else(|| "N/A".into()); |
| 216 | + |
| 217 | + // This is pretty similar to how the current `core::compiler::timings` |
| 218 | + // renders `core::manifest::Target`. However, our target is |
| 219 | + // a simplified type so we cannot reuse the same logic here. |
| 220 | + let mut target_str = if target.kind == "lib" && mode == CompileMode::Build { |
| 221 | + // Special case for brevity, since most dependencies hit this path. |
| 222 | + "".to_string() |
| 223 | + } else if target.kind == "build-script" { |
| 224 | + " build-script".to_string() |
| 225 | + } else { |
| 226 | + format!(r#" {} "{}""#, target.name, target.kind) |
| 227 | + }; |
| 228 | + |
| 229 | + match mode { |
| 230 | + CompileMode::Test => target_str.push_str(" (test)"), |
| 231 | + CompileMode::Build => {} |
| 232 | + CompileMode::Check { test: true } => target_str.push_str(" (check-test)"), |
| 233 | + CompileMode::Check { test: false } => target_str.push_str(" (check)"), |
| 234 | + CompileMode::Doc { .. } => target_str.push_str(" (doc)"), |
| 235 | + CompileMode::Doctest => target_str.push_str(" (doc test)"), |
| 236 | + CompileMode::Docscrape => target_str.push_str(" (doc scrape)"), |
| 237 | + CompileMode::RunCustomBuild => target_str.push_str(" (run)"), |
| 238 | + } |
| 239 | + |
| 240 | + let mode_str = if mode.is_run_custom_build() { |
| 241 | + "run-custom-build" |
| 242 | + } else { |
| 243 | + "todo" |
| 244 | + }; |
| 245 | + |
| 246 | + let data = UnitData { |
| 247 | + i: index, |
| 248 | + name: package_id.name().to_string(), |
| 249 | + version, |
| 250 | + mode: mode_str.to_owned(), |
| 251 | + target: target_str, |
| 252 | + features: Vec::new(), |
| 253 | + start: elapsed, |
| 254 | + duration: 0.0, |
| 255 | + unblocked_units: Vec::new(), |
| 256 | + unblocked_rmeta_units: Vec::new(), |
| 257 | + sections: None, |
| 258 | + }; |
| 259 | + |
| 260 | + units.insert( |
| 261 | + index, |
| 262 | + UnitEntry { |
| 263 | + data, |
| 264 | + sections: IndexMap::new(), |
| 265 | + rmeta_time: None, |
| 266 | + }, |
| 267 | + ); |
| 268 | + } |
| 269 | + LogMessage::UnitRmetaFinished { |
| 270 | + index, |
| 271 | + elapsed, |
| 272 | + unblocked, |
| 273 | + } => match units.entry(index) { |
| 274 | + Entry::Occupied(mut e) => { |
| 275 | + let elapsed = f64::max(elapsed - e.get().data.start, 0.0); |
| 276 | + e.get_mut().data.unblocked_rmeta_units = unblocked; |
| 277 | + e.get_mut().data.duration = elapsed; |
| 278 | + e.get_mut().rmeta_time = Some(elapsed); |
| 279 | + } |
| 280 | + Entry::Vacant(_) => { |
| 281 | + tracing::warn!( |
| 282 | + "section `frontend` ended, but unit {index} has no start recorded" |
| 283 | + ) |
| 284 | + } |
| 285 | + }, |
| 286 | + LogMessage::UnitSectionStarted { |
| 287 | + index, |
| 288 | + elapsed, |
| 289 | + section, |
| 290 | + } => match units.entry(index) { |
| 291 | + Entry::Occupied(mut e) => { |
| 292 | + let elapsed = f64::max(elapsed - e.get().data.start, 0.0); |
| 293 | + if e.get_mut() |
| 294 | + .sections |
| 295 | + .insert( |
| 296 | + section.clone(), |
| 297 | + CompilationSection { |
| 298 | + start: elapsed, |
| 299 | + end: None, |
| 300 | + }, |
| 301 | + ) |
| 302 | + .is_some() |
| 303 | + { |
| 304 | + tracing::warn!( |
| 305 | + "section `{section}` for unit {index} started more than once", |
| 306 | + ); |
| 307 | + } |
| 308 | + } |
| 309 | + Entry::Vacant(_) => { |
| 310 | + tracing::warn!( |
| 311 | + "section `{section}` started, but unit {index} has no start recorded" |
| 312 | + ) |
| 313 | + } |
| 314 | + }, |
| 315 | + LogMessage::UnitSectionFinished { |
| 316 | + index, |
| 317 | + elapsed, |
| 318 | + section, |
| 319 | + } => match units.entry(index) { |
| 320 | + Entry::Occupied(mut e) => { |
| 321 | + let elapsed = f64::max(elapsed - e.get().data.start, 0.0); |
| 322 | + if let Some(section) = e.get_mut().sections.get_mut(§ion) { |
| 323 | + section.end = Some(elapsed); |
| 324 | + } else { |
| 325 | + tracing::warn!( |
| 326 | + "section `{section}` for unit {index} ended, but section `{section}` has no start recorded" |
| 327 | + ); |
| 328 | + } |
| 329 | + } |
| 330 | + Entry::Vacant(_) => { |
| 331 | + tracing::warn!( |
| 332 | + "section `{section}` ended, but unit {index} has no start recorded" |
| 333 | + ) |
| 334 | + } |
| 335 | + }, |
| 336 | + LogMessage::UnitFinished { |
| 337 | + index, |
| 338 | + elapsed, |
| 339 | + unblocked, |
| 340 | + } => match units.entry(index) { |
| 341 | + Entry::Occupied(mut e) => { |
| 342 | + let elapsed = f64::max(elapsed - e.get().data.start, 0.0); |
| 343 | + e.get_mut().data.duration = elapsed; |
| 344 | + e.get_mut().data.unblocked_units = unblocked; |
| 345 | + } |
| 346 | + Entry::Vacant(_) => { |
| 347 | + tracing::warn!("unit {index} ended, but it has no start recorded"); |
| 348 | + } |
| 349 | + }, |
| 350 | + _ => {} // skip non-timing logs |
| 351 | + } |
| 352 | + } |
| 353 | + |
| 354 | + let unit_data: Vec<_> = units |
| 355 | + .into_values() |
| 356 | + .map( |
| 357 | + |UnitEntry { |
| 358 | + mut data, |
| 359 | + sections, |
| 360 | + rmeta_time, |
| 361 | + }| { |
| 362 | + // Post-processing for compilation sections we've collected so far. |
| 363 | + data.sections = aggregate_sections(sections, data.duration, rmeta_time); |
| 364 | + data.start = round_to_centisecond(data.start); |
| 365 | + data.duration = round_to_centisecond(data.duration); |
| 366 | + data |
| 367 | + }, |
| 368 | + ) |
| 369 | + .sorted_unstable_by(|a, b| a.start.partial_cmp(&b.start).unwrap()) |
| 370 | + .collect(); |
| 371 | + |
| 372 | + if unit_data.is_empty() { |
| 373 | + anyhow::bail!("no timing data found in log"); |
| 374 | + } |
| 375 | + |
| 376 | + ctx.unit_data = unit_data; |
| 377 | + ctx.concurrency = compute_concurrency(&ctx.unit_data); |
| 378 | + |
| 379 | + Ok(ctx) |
| 380 | +} |
0 commit comments