run_benchmark.rs 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. // Copyright 2019-2022 Tauri Programme within The Commons Conservancy
  2. // SPDX-License-Identifier: Apache-2.0
  3. // SPDX-License-Identifier: MIT
  4. use anyhow::Result;
  5. use std::{
  6. collections::{HashMap, HashSet},
  7. env,
  8. path::Path,
  9. process::{Command, Stdio},
  10. };
  11. mod utils;
  12. /// The list of the examples of the benchmark name and binary relative path
  13. fn get_all_benchmarks() -> Vec<(String, String)> {
  14. vec![
  15. (
  16. "tauri_hello_world".into(),
  17. format!(
  18. "tests/target/{}/release/bench_helloworld",
  19. utils::get_target()
  20. ),
  21. ),
  22. (
  23. "tauri_cpu_intensive".into(),
  24. format!(
  25. "tests/target/{}/release/bench_cpu_intensive",
  26. utils::get_target()
  27. ),
  28. ),
  29. (
  30. "tauri_3mb_transfer".into(),
  31. format!(
  32. "tests/target/{}/release/bench_files_transfer",
  33. utils::get_target()
  34. ),
  35. ),
  36. ]
  37. }
  38. fn run_strace_benchmarks(new_data: &mut utils::BenchResult) -> Result<()> {
  39. use std::io::Read;
  40. let mut thread_count = HashMap::<String, u64>::new();
  41. let mut syscall_count = HashMap::<String, u64>::new();
  42. for (name, example_exe) in get_all_benchmarks() {
  43. let mut file = tempfile::NamedTempFile::new()?;
  44. Command::new("strace")
  45. .args(&[
  46. "-c",
  47. "-f",
  48. "-o",
  49. file.path().to_str().unwrap(),
  50. utils::bench_root_path().join(example_exe).to_str().unwrap(),
  51. ])
  52. .stdout(Stdio::inherit())
  53. .spawn()?
  54. .wait()?;
  55. let mut output = String::new();
  56. file.as_file_mut().read_to_string(&mut output)?;
  57. let strace_result = utils::parse_strace_output(&output);
  58. let clone = strace_result.get("clone").map(|d| d.calls).unwrap_or(0) + 1;
  59. let total = strace_result.get("total").unwrap().calls;
  60. thread_count.insert(name.to_string(), clone);
  61. syscall_count.insert(name.to_string(), total);
  62. }
  63. new_data.thread_count = thread_count;
  64. new_data.syscall_count = syscall_count;
  65. Ok(())
  66. }
  67. fn run_max_mem_benchmark() -> Result<HashMap<String, u64>> {
  68. let mut results = HashMap::<String, u64>::new();
  69. for (name, example_exe) in get_all_benchmarks() {
  70. let benchmark_file = utils::target_dir().join(format!("mprof{}_.dat", name));
  71. let benchmark_file = benchmark_file.to_str().unwrap();
  72. let proc = Command::new("mprof")
  73. .args(&[
  74. "run",
  75. "-C",
  76. "-o",
  77. benchmark_file,
  78. utils::bench_root_path().join(example_exe).to_str().unwrap(),
  79. ])
  80. .stdout(Stdio::null())
  81. .stderr(Stdio::piped())
  82. .spawn()?;
  83. let proc_result = proc.wait_with_output()?;
  84. println!("{:?}", proc_result);
  85. results.insert(
  86. name.to_string(),
  87. utils::parse_max_mem(&benchmark_file).unwrap(),
  88. );
  89. }
  90. Ok(results)
  91. }
  92. fn rlib_size(target_dir: &std::path::Path, prefix: &str) -> u64 {
  93. let mut size = 0;
  94. let mut seen = std::collections::HashSet::new();
  95. for entry in std::fs::read_dir(target_dir.join("deps")).unwrap() {
  96. let entry = entry.unwrap();
  97. let os_str = entry.file_name();
  98. let name = os_str.to_str().unwrap();
  99. if name.starts_with(prefix) && name.ends_with(".rlib") {
  100. let start = name.split('-').next().unwrap().to_string();
  101. if seen.contains(&start) {
  102. println!("skip {}", name);
  103. } else {
  104. seen.insert(start);
  105. size += entry.metadata().unwrap().len();
  106. println!("check size {} {}", name, size);
  107. }
  108. }
  109. }
  110. assert!(size > 0);
  111. size
  112. }
  113. fn get_binary_sizes(target_dir: &Path) -> Result<HashMap<String, u64>> {
  114. let mut sizes = HashMap::<String, u64>::new();
  115. let wry_size = rlib_size(&target_dir, "libwry");
  116. println!("wry {} bytes", wry_size);
  117. sizes.insert("wry_rlib".to_string(), wry_size);
  118. // add size for all EXEC_TIME_BENCHMARKS
  119. for (name, example_exe) in get_all_benchmarks() {
  120. let meta = std::fs::metadata(example_exe).unwrap();
  121. sizes.insert(name.to_string(), meta.len());
  122. }
  123. Ok(sizes)
  124. }
  125. /// (target OS, target triple)
  126. const TARGETS: &[(&str, &[&str])] = &[
  127. (
  128. "Windows",
  129. &[
  130. "x86_64-pc-windows-gnu",
  131. "i686-pc-windows-gnu",
  132. "i686-pc-windows-msvc",
  133. "x86_64-pc-windows-msvc",
  134. ],
  135. ),
  136. (
  137. "Linux",
  138. &[
  139. "x86_64-unknown-linux-gnu",
  140. "i686-unknown-linux-gnu",
  141. "aarch64-unknown-linux-gnu",
  142. ],
  143. ),
  144. ("macOS", &["x86_64-apple-darwin", "aarch64-apple-darwin"]),
  145. ];
  146. fn cargo_deps() -> HashMap<String, usize> {
  147. let mut results = HashMap::new();
  148. for (os, targets) in TARGETS {
  149. for target in *targets {
  150. let mut cmd = Command::new("cargo");
  151. cmd.arg("tree");
  152. cmd.arg("--no-dedupe");
  153. cmd.args(&["--edges", "normal"]);
  154. cmd.args(&["--prefix", "none"]);
  155. cmd.args(&["--target", target]);
  156. cmd.current_dir(&utils::tauri_root_path());
  157. let full_deps = cmd.output().expect("failed to run cargo tree").stdout;
  158. let full_deps = String::from_utf8(full_deps).expect("cargo tree output not utf-8");
  159. let count = full_deps.lines().collect::<HashSet<_>>().len() - 1; // output includes wry itself
  160. // set the count to the highest count seen for this OS
  161. let existing = results.entry(os.to_string()).or_default();
  162. *existing = count.max(*existing);
  163. assert!(count > 10); // sanity check
  164. }
  165. }
  166. results
  167. }
  168. const RESULT_KEYS: &[&str] = &["mean", "stddev", "user", "system", "min", "max"];
  169. fn run_exec_time(target_dir: &Path) -> Result<HashMap<String, HashMap<String, f64>>> {
  170. let benchmark_file = target_dir.join("hyperfine_results.json");
  171. let benchmark_file = benchmark_file.to_str().unwrap();
  172. let mut command = [
  173. "hyperfine",
  174. "--export-json",
  175. benchmark_file,
  176. "--show-output",
  177. "--warmup",
  178. "3",
  179. ]
  180. .iter()
  181. .map(|s| s.to_string())
  182. .collect::<Vec<_>>();
  183. for (_, example_exe) in get_all_benchmarks() {
  184. command.push(
  185. utils::bench_root_path()
  186. .join(example_exe)
  187. .to_str()
  188. .unwrap()
  189. .to_string(),
  190. );
  191. }
  192. utils::run(&command.iter().map(|s| s.as_ref()).collect::<Vec<_>>());
  193. let mut results = HashMap::<String, HashMap<String, f64>>::new();
  194. let hyperfine_results = utils::read_json(benchmark_file)?;
  195. for ((name, _), data) in get_all_benchmarks().iter().zip(
  196. hyperfine_results
  197. .as_object()
  198. .unwrap()
  199. .get("results")
  200. .unwrap()
  201. .as_array()
  202. .unwrap(),
  203. ) {
  204. let data = data.as_object().unwrap().clone();
  205. results.insert(
  206. name.to_string(),
  207. data
  208. .into_iter()
  209. .filter(|(key, _)| RESULT_KEYS.contains(&key.as_str()))
  210. .map(|(key, val)| (key, val.as_f64().unwrap()))
  211. .collect(),
  212. );
  213. }
  214. Ok(results)
  215. }
  216. fn main() -> Result<()> {
  217. // download big files if not present
  218. let json_3mb = utils::home_path().join(".tauri_3mb.json");
  219. if !json_3mb.exists() {
  220. utils::download_file(
  221. "https://github.com/lemarier/tauri-test/releases/download/v2.0.0/json_3mb.json",
  222. json_3mb,
  223. );
  224. }
  225. println!("Starting tauri benchmark");
  226. let target_dir = utils::target_dir();
  227. env::set_current_dir(&utils::bench_root_path())?;
  228. let format =
  229. time::format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second]Z").unwrap();
  230. let now = time::OffsetDateTime::now_utc();
  231. let mut new_data = utils::BenchResult {
  232. created_at: format!("{}", now.format(&format).unwrap()),
  233. sha1: utils::run_collect(&["git", "rev-parse", "HEAD"])
  234. .0
  235. .trim()
  236. .to_string(),
  237. exec_time: run_exec_time(&target_dir)?,
  238. binary_size: get_binary_sizes(&target_dir)?,
  239. cargo_deps: cargo_deps(),
  240. ..Default::default()
  241. };
  242. if cfg!(target_os = "linux") {
  243. run_strace_benchmarks(&mut new_data)?;
  244. new_data.max_memory = run_max_mem_benchmark()?;
  245. }
  246. println!("===== <BENCHMARK RESULTS>");
  247. serde_json::to_writer_pretty(std::io::stdout(), &new_data)?;
  248. println!("\n===== </BENCHMARK RESULTS>");
  249. if let Some(filename) = target_dir.join("bench.json").to_str() {
  250. utils::write_json(filename, &serde_json::to_value(&new_data)?)?;
  251. } else {
  252. eprintln!("Cannot write bench.json, path is invalid");
  253. }
  254. Ok(())
  255. }