/build/source/nativelink-worker/src/running_actions_manager.rs
Line | Count | Source |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Functional Source License, Version 1.1, Apache 2.0 Future License (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // See LICENSE file for details |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use core::cmp::min; |
16 | | use core::convert::Into; |
17 | | use core::fmt::Debug; |
18 | | use core::pin::Pin; |
19 | | use core::sync::atomic::{AtomicBool, Ordering}; |
20 | | use core::time::Duration; |
21 | | use std::borrow::Cow; |
22 | | use std::collections::vec_deque::VecDeque; |
23 | | use std::collections::{HashMap, HashSet}; |
24 | | use std::env; |
25 | | use std::ffi::{OsStr, OsString}; |
26 | | #[cfg(target_family = "unix")] |
27 | | use std::fs::Permissions; |
28 | | #[cfg(target_family = "unix")] |
29 | | use std::os::unix::fs::{MetadataExt, PermissionsExt}; |
30 | | use std::path::{Path, PathBuf}; |
31 | | use std::process::Stdio; |
32 | | use std::sync::{Arc, Weak}; |
33 | | use std::time::SystemTime; |
34 | | |
35 | | use bytes::{Bytes, BytesMut}; |
36 | | use filetime::{FileTime, set_file_mtime}; |
37 | | use formatx::Template; |
38 | | use futures::future::{ |
39 | | BoxFuture, Future, FutureExt, TryFutureExt, try_join, try_join_all, try_join3, |
40 | | }; |
41 | | use futures::stream::{FuturesUnordered, StreamExt, TryStreamExt}; |
42 | | use nativelink_config::cas_server::{ |
43 | | EnvironmentSource, UploadActionResultConfig, UploadCacheResultsStrategy, |
44 | | }; |
45 | | use nativelink_error::{Code, Error, ResultExt, make_err, make_input_err}; |
46 | | use nativelink_metric::MetricsComponent; |
47 | | use nativelink_proto::build::bazel::remote::execution::v2::{ |
48 | | Action, ActionResult as ProtoActionResult, Command as ProtoCommand, |
49 | | Directory as ProtoDirectory, Directory, DirectoryNode, ExecuteResponse, FileNode, SymlinkNode, |
50 | | Tree as ProtoTree, UpdateActionResultRequest, |
51 | | }; |
52 | | use nativelink_proto::com::github::trace_machina::nativelink::remote_execution::{ |
53 | | HistoricalExecuteResponse, StartExecute, |
54 | | }; |
55 | | use nativelink_store::ac_utils::{ |
56 | | ESTIMATED_DIGEST_SIZE, compute_buf_digest, get_and_decode_digest, serialize_and_upload_message, |
57 | | }; |
58 | | use nativelink_store::cas_utils::is_zero_digest; |
59 | | use nativelink_store::fast_slow_store::FastSlowStore; |
60 | | use nativelink_store::filesystem_store::{FileEntry, FilesystemStore}; |
61 | | use nativelink_store::grpc_store::GrpcStore; |
62 | | use nativelink_util::action_messages::{ |
63 | | ActionInfo, ActionResult, DirectoryInfo, ExecutionMetadata, FileInfo, NameOrPath, OperationId, |
64 | | SymlinkInfo, to_execute_response, |
65 | | }; |
66 | | use nativelink_util::common::{DigestInfo, fs}; |
67 | | use nativelink_util::digest_hasher::{DigestHasher, DigestHasherFunc}; |
68 | | use nativelink_util::metrics_utils::{AsyncCounterWrapper, CounterWithTime}; |
69 | | use nativelink_util::store_trait::{Store, StoreLike, UploadSizeInfo}; |
70 | | use nativelink_util::{background_spawn, spawn, spawn_blocking}; |
71 | | use parking_lot::Mutex; |
72 | | use prost::Message; |
73 | | use relative_path::RelativePath; |
74 | | use scopeguard::{ScopeGuard, guard}; |
75 | | use serde::Deserialize; |
76 | | use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; |
77 | | use tokio::process; |
78 | | use tokio::sync::{Notify, oneshot, watch}; |
79 | | use tokio::time::Instant; |
80 | | use tokio_stream::wrappers::ReadDirStream; |
81 | | use tonic::Request; |
82 | | use tracing::{debug, error, info, trace, warn}; |
83 | | use uuid::Uuid; |
84 | | |
85 | | /// For simplicity we use a fixed exit code for cases when our program is terminated |
86 | | /// due to a signal. |
87 | | const EXIT_CODE_FOR_SIGNAL: i32 = 9; |
88 | | |
89 | | /// Default strategy for uploading historical results. |
90 | | /// Note: If this value changes the config documentation |
91 | | /// should reflect it. |
92 | | const DEFAULT_HISTORICAL_RESULTS_STRATEGY: UploadCacheResultsStrategy = |
93 | | UploadCacheResultsStrategy::FailuresOnly; |
94 | | |
95 | | /// Valid string reasons for a failure. |
96 | | /// Note: If these change, the documentation should be updated. |
97 | | #[derive(Debug, Deserialize)] |
98 | | #[serde(rename_all = "snake_case")] |
99 | | enum SideChannelFailureReason { |
100 | | /// Task should be considered timed out. |
101 | | Timeout, |
102 | | } |
103 | | |
104 | | /// This represents the json data that can be passed from the running process |
105 | | /// to the parent via the `SideChannelFile`. See: |
106 | | /// `config::EnvironmentSource::sidechannelfile` for more details. |
107 | | /// Note: Any fields added here must be added to the documentation. |
108 | | #[derive(Debug, Deserialize, Default)] |
109 | | struct SideChannelInfo { |
110 | | /// If the task should be considered a failure and why. |
111 | | failure: Option<SideChannelFailureReason>, |
112 | | } |
113 | | |
114 | | /// Aggressively download the digests of files and make a local folder from it. This function |
115 | | /// will spawn unbounded number of futures to try and get these downloaded. The store itself |
116 | | /// should be rate limited if spawning too many requests at once is an issue. |
117 | | /// We require the `FilesystemStore` to be the `fast` store of `FastSlowStore`. This is for |
118 | | /// efficiency reasons. We will request the `FastSlowStore` to populate the entry then we will |
119 | | /// assume the `FilesystemStore` has the file available immediately after and hardlink the file |
120 | | /// to a new location. |
121 | | // Sadly we cannot use `async fn` here because the rust compiler cannot determine the auto traits |
122 | | // of the future. So we need to force this function to return a dynamic future instead. |
123 | | // see: https://github.com/rust-lang/rust/issues/78649 |
124 | 25 | pub fn download_to_directory<'a>( |
125 | 25 | cas_store: &'a FastSlowStore, |
126 | 25 | filesystem_store: Pin<&'a FilesystemStore>, |
127 | 25 | digest: &'a DigestInfo, |
128 | 25 | current_directory: &'a str, |
129 | 25 | ) -> BoxFuture<'a, Result<(), Error>> { |
130 | 25 | async move { |
131 | 25 | let directory = get_and_decode_digest::<ProtoDirectory>(cas_store, digest.into()) |
132 | 25 | .await |
133 | 25 | .err_tip(|| "Converting digest to Directory")?0 ; |
134 | 25 | let mut futures = FuturesUnordered::new(); |
135 | | |
136 | 29 | for file4 in directory.files { |
137 | 4 | let digest: DigestInfo = file |
138 | 4 | .digest |
139 | 4 | .err_tip(|| "Expected Digest to exist in Directory::file::digest")?0 |
140 | 4 | .try_into() |
141 | 4 | .err_tip(|| "In Directory::file::digest")?0 ; |
142 | 4 | let dest = format!("{}/{}", current_directory, file.name); |
143 | 4 | let (mtime, mut unix_mode) = match file.node_properties { |
144 | 1 | Some(properties) => (properties.mtime, properties.unix_mode), |
145 | 3 | None => (None, None), |
146 | | }; |
147 | | #[cfg_attr(target_family = "windows", allow(unused_assignments))] |
148 | 4 | if file.is_executable { Branch (148:16): [True: 1, False: 3]
Branch (148:16): [Folded - Ignored]
|
149 | 1 | unix_mode = Some(unix_mode.unwrap_or(0o444) | 0o111); |
150 | 3 | } |
151 | 4 | futures.push( |
152 | 4 | cas_store |
153 | 4 | .populate_fast_store(digest.into()) |
154 | 4 | .and_then(move |()| async move { |
155 | 4 | if is_zero_digest(digest) { Branch (155:28): [True: 0, False: 4]
Branch (155:28): [Folded - Ignored]
|
156 | 0 | let mut file_slot = fs::create_file(&dest).await?; |
157 | 0 | file_slot.write_all(&[]).await?; |
158 | | } |
159 | | else { |
160 | 4 | let file_entry = filesystem_store |
161 | 4 | .get_file_entry_for_digest(&digest) |
162 | 4 | .await |
163 | 4 | .err_tip(|| "During hard link")?0 ; |
164 | | // TODO: add a test for #2051: deadlock with large number of files |
165 | 8 | let src_path4 = file_entry4 .get_file_path_locked4 (|src| async move {4 Ok(PathBuf::from(src))4 }).await4 ?0 ; |
166 | 4 | fs::hard_link(&src_path, &dest) |
167 | 4 | .await |
168 | 4 | .map_err(|e| {0 |
169 | 0 | if e.code == Code::NotFound { Branch (169:40): [True: 0, False: 0]
Branch (169:40): [Folded - Ignored]
|
170 | 0 | make_err!( |
171 | 0 | Code::Internal, |
172 | | "Could not make hardlink, file was likely evicted from cache. {e:?} : {dest}\n\ |
173 | | This error often occurs when the filesystem store's max_bytes is too small for your workload.\n\ |
174 | | To fix this issue:\n\ |
175 | | 1. Increase the 'max_bytes' value in your filesystem store configuration\n\ |
176 | | 2. Example: Change 'max_bytes: 10000000000' to 'max_bytes: 50000000000' (or higher)\n\ |
177 | | 3. The setting is typically found in your nativelink.json config under:\n\ |
178 | | stores -> [your_filesystem_store] -> filesystem -> eviction_policy -> max_bytes\n\ |
179 | | 4. Restart NativeLink after making the change\n\n\ |
180 | | If this error persists after increasing max_bytes several times, please report at:\n\ |
181 | | https://github.com/TraceMachina/nativelink/issues\n\ |
182 | | Include your config file and both server and client logs to help us assist you." |
183 | | ) |
184 | | } else { |
185 | 0 | make_err!(Code::Internal, "Could not make hardlink, {e:?} : {dest}") |
186 | | } |
187 | 0 | })?; |
188 | | } |
189 | | #[cfg(target_family = "unix")] |
190 | 4 | if let Some(unix_mode1 ) = unix_mode { Branch (190:32): [True: 1, False: 3]
Branch (190:32): [Folded - Ignored]
|
191 | 1 | fs::set_permissions(&dest, Permissions::from_mode(unix_mode)) |
192 | 1 | .await |
193 | 1 | .err_tip(|| {0 |
194 | 0 | format!( |
195 | 0 | "Could not set unix mode in download_to_directory {dest}" |
196 | | ) |
197 | 0 | })?; |
198 | 3 | } |
199 | 4 | if let Some(mtime1 ) = mtime { Branch (199:32): [True: 1, False: 3]
Branch (199:32): [Folded - Ignored]
|
200 | 1 | spawn_blocking!("download_to_directory_set_mtime", move || { |
201 | 1 | set_file_mtime( |
202 | 1 | &dest, |
203 | 1 | FileTime::from_unix_time(mtime.seconds, mtime.nanos as u32), |
204 | | ) |
205 | 1 | .err_tip(|| {0 |
206 | 0 | format!("Failed to set mtime in download_to_directory {dest}") |
207 | 0 | }) |
208 | 1 | }) |
209 | 1 | .await |
210 | 1 | .err_tip( |
211 | | || "Failed to launch spawn_blocking in download_to_directory", |
212 | 0 | )??; |
213 | 3 | } |
214 | 4 | Ok(()) |
215 | 8 | }) |
216 | 4 | .map_err(move |e| e0 .append0 (format!0 ("for digest {digest}"0 ))) |
217 | 4 | .boxed(), |
218 | | ); |
219 | | } |
220 | | |
221 | 32 | for directory7 in directory.directories { |
222 | 7 | let digest: DigestInfo = directory |
223 | 7 | .digest |
224 | 7 | .err_tip(|| "Expected Digest to exist in Directory::directories::digest")?0 |
225 | 7 | .try_into() |
226 | 7 | .err_tip(|| "In Directory::file::digest")?0 ; |
227 | 7 | let new_directory_path = format!("{}/{}", current_directory, directory.name); |
228 | 7 | futures.push( |
229 | 7 | async move { |
230 | 7 | fs::create_dir(&new_directory_path) |
231 | 7 | .await |
232 | 7 | .err_tip(|| format!("Could not create directory {new_directory_path}"0 ))?0 ; |
233 | 7 | download_to_directory( |
234 | 7 | cas_store, |
235 | 7 | filesystem_store, |
236 | 7 | &digest, |
237 | 7 | &new_directory_path, |
238 | 7 | ) |
239 | 7 | .await |
240 | 7 | .err_tip(|| format!("in download_to_directory : {new_directory_path}"0 ))?0 ; |
241 | 7 | Ok(()) |
242 | 7 | } |
243 | 7 | .boxed(), |
244 | | ); |
245 | | } |
246 | | |
247 | | #[cfg(target_family = "unix")] |
248 | 26 | for symlink_node1 in directory.symlinks { |
249 | 1 | let dest = format!("{}/{}", current_directory, symlink_node.name); |
250 | 1 | futures.push( |
251 | 1 | async move { |
252 | 1 | fs::symlink(&symlink_node.target, &dest).await.err_tip(|| {0 |
253 | 0 | format!( |
254 | 0 | "Could not create symlink {} -> {}", |
255 | | symlink_node.target, dest |
256 | | ) |
257 | 0 | })?; |
258 | 1 | Ok(()) |
259 | 1 | } |
260 | 1 | .boxed(), |
261 | | ); |
262 | | } |
263 | | |
264 | 37 | while futures.try_next().await?0 .is_some() {}12 Branch (264:15): [True: 12, False: 25]
Branch (264:15): [Folded - Ignored]
|
265 | 25 | Ok(()) |
266 | 25 | } |
267 | 25 | .boxed() |
268 | 25 | } |
269 | | |
270 | | /// Prepares action inputs by first trying the directory cache (if available), |
271 | | /// then falling back to traditional `download_to_directory`. |
272 | | /// |
273 | | /// This provides a significant performance improvement for repeated builds |
274 | | /// with the same input directories. |
275 | 15 | pub async fn prepare_action_inputs( |
276 | 15 | directory_cache: &Option<Arc<crate::directory_cache::DirectoryCache>>, |
277 | 15 | cas_store: &FastSlowStore, |
278 | 15 | filesystem_store: Pin<&FilesystemStore>, |
279 | 15 | digest: &DigestInfo, |
280 | 15 | work_directory: &str, |
281 | 15 | ) -> Result<(), Error> { |
282 | | // Try cache first if available |
283 | 15 | if let Some(cache0 ) = directory_cache { Branch (283:12): [Folded - Ignored]
Branch (283:12): [Folded - Ignored]
Branch (283:12): [True: 0, False: 15]
|
284 | 0 | match cache |
285 | 0 | .get_or_create(*digest, Path::new(work_directory)) |
286 | 0 | .await |
287 | | { |
288 | 0 | Ok(cache_hit) => { |
289 | 0 | trace!( |
290 | | ?digest, |
291 | 0 | work_directory, cache_hit, "Successfully prepared inputs via directory cache" |
292 | | ); |
293 | 0 | return Ok(()); |
294 | | } |
295 | 0 | Err(e) => { |
296 | 0 | warn!( |
297 | | ?digest, |
298 | | ?e, |
299 | 0 | "Directory cache failed, falling back to traditional download" |
300 | | ); |
301 | | // Fall through to traditional path |
302 | | } |
303 | | } |
304 | 15 | } |
305 | | |
306 | | // Traditional path (cache disabled or failed) |
307 | 15 | download_to_directory(cas_store, filesystem_store, digest, work_directory).await |
308 | 15 | } |
309 | | |
310 | | #[cfg(target_family = "windows")] |
311 | | fn is_executable(_metadata: &std::fs::Metadata, full_path: &impl AsRef<Path>) -> bool { |
312 | | static EXECUTABLE_EXTENSIONS: &[&str] = &["exe", "bat", "com"]; |
313 | | EXECUTABLE_EXTENSIONS |
314 | | .iter() |
315 | | .any(|ext| full_path.as_ref().extension().map_or(false, |v| v == *ext)) |
316 | | } |
317 | | |
318 | | #[cfg(target_family = "unix")] |
319 | 7 | fn is_executable(metadata: &std::fs::Metadata, _full_path: &impl AsRef<Path>) -> bool { |
320 | 7 | (metadata.mode() & 0o111) != 0 |
321 | 7 | } |
322 | | |
323 | | type DigestUploader = Arc<tokio::sync::OnceCell<()>>; |
324 | | |
325 | 7 | async fn upload_file( |
326 | 7 | cas_store: Pin<&impl StoreLike>, |
327 | 7 | full_path: impl AsRef<Path> + Debug + Send + Sync, |
328 | 7 | hasher: DigestHasherFunc, |
329 | 7 | metadata: std::fs::Metadata, |
330 | 7 | digest_uploaders: Arc<Mutex<HashMap<DigestInfo, DigestUploader>>>, |
331 | 7 | ) -> Result<FileInfo, Error> { |
332 | 7 | let is_executable = is_executable(&metadata, &full_path); |
333 | 7 | let file_size = metadata.len(); |
334 | 7 | let file = fs::open_file(&full_path, 0, u64::MAX) |
335 | 7 | .await |
336 | 7 | .err_tip(|| format!("Could not open file {full_path:?}"0 ))?0 ; |
337 | | |
338 | 7 | let (digest, mut file) = hasher |
339 | 7 | .hasher() |
340 | 7 | .digest_for_file(&full_path, file.into_inner(), Some(file_size)) |
341 | 7 | .await |
342 | 7 | .err_tip(|| format!("Failed to hash file in digest_for_file failed for {full_path:?}"0 ))?0 ; |
343 | | |
344 | 7 | let digest_uploader = match digest_uploaders.lock().entry(digest) { |
345 | 0 | std::collections::hash_map::Entry::Occupied(occupied_entry) => occupied_entry.get().clone(), |
346 | 7 | std::collections::hash_map::Entry::Vacant(vacant_entry) => vacant_entry |
347 | 7 | .insert(Arc::new(tokio::sync::OnceCell::new())) |
348 | 7 | .clone(), |
349 | | }; |
350 | | |
351 | | // Only upload a file with a given hash once. The file may exist multiple |
352 | | // times in the output with different names. |
353 | 7 | digest_uploader |
354 | 14 | .get_or_try_init7 (async || { |
355 | | // Only upload if the digest doesn't already exist, this should be |
356 | | // a much cheaper operation than an upload. |
357 | 7 | let cas_store = cas_store.as_store_driver_pin(); |
358 | 7 | let store_key: nativelink_util::store_trait::StoreKey<'_> = digest.into(); |
359 | 7 | let has_start = std::time::Instant::now(); |
360 | 7 | if cas_store Branch (360:16): [Folded - Ignored]
Branch (360:16): [Folded - Ignored]
Branch (360:16): [True: 1, False: 2]
Branch (360:16): [True: 1, False: 3]
|
361 | 7 | .has(store_key.borrow()) |
362 | 7 | .await |
363 | 7 | .is_ok_and(|result| result.is_some()) |
364 | | { |
365 | 2 | trace!( |
366 | | ?digest, |
367 | 2 | has_elapsed_ms = has_start.elapsed().as_millis(), |
368 | 2 | "upload_file: digest already exists in CAS, skipping upload", |
369 | | ); |
370 | 2 | return Ok(()); |
371 | 5 | } |
372 | 5 | trace!( |
373 | | ?digest, |
374 | 5 | has_elapsed_ms = has_start.elapsed().as_millis(), |
375 | 5 | file_size = digest.size_bytes(), |
376 | 5 | "upload_file: digest not in CAS, starting upload", |
377 | | ); |
378 | | |
379 | 5 | file.rewind().await.err_tip(|| "Could not rewind file")?0 ; |
380 | | |
381 | | // Note: For unknown reasons we appear to be hitting: |
382 | | // https://github.com/rust-lang/rust/issues/92096 |
383 | | // or a similar issue if we try to use the non-store driver function, so we |
384 | | // are using the store driver function here. |
385 | 5 | let store_key_for_upload = store_key.clone(); |
386 | 5 | let file_upload_start = std::time::Instant::now(); |
387 | 5 | let upload_result = cas_store |
388 | 5 | .update_with_whole_file( |
389 | 5 | store_key_for_upload, |
390 | 5 | full_path.as_ref().into(), |
391 | 5 | file, |
392 | 5 | UploadSizeInfo::ExactSize(digest.size_bytes()), |
393 | 5 | ) |
394 | 5 | .await |
395 | 5 | .map(|_slot| ()); |
396 | 5 | trace!( |
397 | | ?digest, |
398 | 5 | upload_elapsed_ms = file_upload_start.elapsed().as_millis(), |
399 | 5 | success = upload_result.is_ok(), |
400 | 5 | "upload_file: update_with_whole_file completed", |
401 | | ); |
402 | | |
403 | 5 | match upload_result { |
404 | 5 | Ok(()) => Ok(()), |
405 | 0 | Err(err) => { |
406 | | // Output uploads run concurrently and may overlap (e.g. a file is listed |
407 | | // both as an output file and inside an output directory). When another |
408 | | // upload has already moved the file into CAS, this update can fail with |
409 | | // NotFound even though the digest is now present. Per the RE spec, missing |
410 | | // outputs should be ignored, so treat this as success if the digest exists. |
411 | 0 | if err.code == Code::NotFound Branch (411:24): [Folded - Ignored]
Branch (411:24): [Folded - Ignored]
Branch (411:24): [True: 0, False: 0]
Branch (411:24): [True: 0, False: 0]
|
412 | 0 | && cas_store Branch (412:28): [Folded - Ignored]
Branch (412:28): [Folded - Ignored]
Branch (412:28): [True: 0, False: 0]
Branch (412:28): [True: 0, False: 0]
|
413 | 0 | .has(store_key.borrow()) |
414 | 0 | .await |
415 | 0 | .is_ok_and(|result| result.is_some()) |
416 | | { |
417 | 0 | Ok(()) |
418 | | } else { |
419 | 0 | Err(err) |
420 | | } |
421 | | } |
422 | | } |
423 | 14 | }) |
424 | 7 | .await |
425 | 7 | .err_tip(|| format!("for {full_path:?}"0 ))?0 ; |
426 | | |
427 | 7 | let name = full_path |
428 | 7 | .as_ref() |
429 | 7 | .file_name() |
430 | 7 | .err_tip(|| format!("Expected file_name to exist on {full_path:?}"0 ))?0 |
431 | 7 | .to_str() |
432 | 7 | .err_tip(|| {0 |
433 | 0 | make_err!( |
434 | 0 | Code::Internal, |
435 | | "Could not convert {:?} to string", |
436 | | full_path |
437 | | ) |
438 | 0 | })? |
439 | 7 | .to_string(); |
440 | | |
441 | 7 | Ok(FileInfo { |
442 | 7 | name_or_path: NameOrPath::Name(name), |
443 | 7 | digest, |
444 | 7 | is_executable, |
445 | 7 | }) |
446 | 7 | } |
447 | | |
448 | 2 | async fn upload_symlink( |
449 | 2 | full_path: impl AsRef<Path> + Debug, |
450 | 2 | full_work_directory_path: impl AsRef<Path>, |
451 | 2 | ) -> Result<SymlinkInfo, Error> { |
452 | 2 | let full_target_path = fs::read_link(full_path.as_ref()) |
453 | 2 | .await |
454 | 2 | .err_tip(|| format!("Could not get read_link path of {full_path:?}"0 ))?0 ; |
455 | | |
456 | | // Detect if our symlink is inside our work directory, if it is find the |
457 | | // relative path otherwise use the absolute path. |
458 | 2 | let target = if full_target_path.starts_with(full_work_directory_path.as_ref()) { Branch (458:21): [Folded - Ignored]
Branch (458:21): [Folded - Ignored]
Branch (458:21): [True: 0, False: 1]
Branch (458:21): [True: 0, False: 1]
|
459 | 0 | let full_target_path = RelativePath::from_path(&full_target_path) |
460 | 0 | .map_err(|v| make_err!(Code::Internal, "Could not convert {} to RelativePath", v))?; |
461 | 0 | RelativePath::from_path(full_work_directory_path.as_ref()) |
462 | 0 | .map_err(|v| make_err!(Code::Internal, "Could not convert {} to RelativePath", v))? |
463 | 0 | .relative(full_target_path) |
464 | 0 | .normalize() |
465 | 0 | .into_string() |
466 | | } else { |
467 | 2 | full_target_path |
468 | 2 | .to_str() |
469 | 2 | .err_tip(|| {0 |
470 | 0 | make_err!( |
471 | 0 | Code::Internal, |
472 | | "Could not convert '{:?}' to string", |
473 | | full_target_path |
474 | | ) |
475 | 0 | })? |
476 | 2 | .to_string() |
477 | | }; |
478 | | |
479 | 2 | let name = full_path |
480 | 2 | .as_ref() |
481 | 2 | .file_name() |
482 | 2 | .err_tip(|| format!("Expected file_name to exist on {full_path:?}"0 ))?0 |
483 | 2 | .to_str() |
484 | 2 | .err_tip(|| {0 |
485 | 0 | make_err!( |
486 | 0 | Code::Internal, |
487 | | "Could not convert {:?} to string", |
488 | | full_path |
489 | | ) |
490 | 0 | })? |
491 | 2 | .to_string(); |
492 | | |
493 | 2 | Ok(SymlinkInfo { |
494 | 2 | name_or_path: NameOrPath::Name(name), |
495 | 2 | target, |
496 | 2 | }) |
497 | 2 | } |
498 | | |
499 | 3 | fn upload_directory<'a, P: AsRef<Path> + Debug + Send + Sync + Clone + 'a>( |
500 | 3 | cas_store: Pin<&'a impl StoreLike>, |
501 | 3 | full_dir_path: P, |
502 | 3 | full_work_directory: &'a str, |
503 | 3 | hasher: DigestHasherFunc, |
504 | 3 | digest_uploaders: Arc<Mutex<HashMap<DigestInfo, DigestUploader>>>, |
505 | 3 | ) -> BoxFuture<'a, Result<(Directory, VecDeque<ProtoDirectory>), Error>> { |
506 | 3 | Box::pin(async move { |
507 | 3 | let file_futures = FuturesUnordered::new(); |
508 | 3 | let dir_futures = FuturesUnordered::new(); |
509 | 3 | let symlink_futures = FuturesUnordered::new(); |
510 | | { |
511 | 3 | let (_permit, dir_handle) = fs::read_dir(&full_dir_path) |
512 | 3 | .await |
513 | 3 | .err_tip(|| format!("Error reading dir for reading {full_dir_path:?}"0 ))?0 |
514 | 3 | .into_inner(); |
515 | 3 | let mut dir_stream = ReadDirStream::new(dir_handle); |
516 | | // Note: Try very hard to not leave file descriptors open. Try to keep them as short |
517 | | // lived as possible. This is why we iterate the directory and then build a bunch of |
518 | | // futures with all the work we are wanting to do then execute it. It allows us to |
519 | | // close the directory iterator file descriptor, then open the child files/folders. |
520 | 8 | while let Some(entry_result5 ) = dir_stream.next().await { Branch (520:23): [Folded - Ignored]
Branch (520:23): [Folded - Ignored]
Branch (520:23): [True: 1, False: 1]
Branch (520:23): [True: 4, False: 2]
|
521 | 5 | let entry = entry_result.err_tip(|| "Error while iterating directory")?0 ; |
522 | 5 | let file_type = entry |
523 | 5 | .file_type() |
524 | 5 | .await |
525 | 5 | .err_tip(|| format!("Error running file_type() on {entry:?}"0 ))?0 ; |
526 | 5 | let full_path = full_dir_path.as_ref().join(entry.path()); |
527 | 5 | if file_type.is_dir() { Branch (527:20): [Folded - Ignored]
Branch (527:20): [Folded - Ignored]
Branch (527:20): [True: 0, False: 1]
Branch (527:20): [True: 1, False: 3]
|
528 | 1 | let full_dir_path = full_dir_path.clone(); |
529 | 1 | dir_futures.push( |
530 | 1 | upload_directory( |
531 | 1 | cas_store, |
532 | 1 | full_path.clone(), |
533 | 1 | full_work_directory, |
534 | 1 | hasher, |
535 | 1 | digest_uploaders.clone(), |
536 | | ) |
537 | 1 | .and_then(|(dir, all_dirs)| async move { |
538 | 1 | let directory_name = full_path |
539 | 1 | .file_name() |
540 | 1 | .err_tip(|| {0 |
541 | 0 | format!("Expected file_name to exist on {full_dir_path:?}") |
542 | 0 | })? |
543 | 1 | .to_str() |
544 | 1 | .err_tip(|| {0 |
545 | 0 | make_err!( |
546 | 0 | Code::Internal, |
547 | | "Could not convert {:?} to string", |
548 | | full_dir_path |
549 | | ) |
550 | 0 | })? |
551 | 1 | .to_string(); |
552 | | |
553 | 1 | let digest = |
554 | 1 | serialize_and_upload_message(&dir, cas_store, &mut hasher.hasher()) |
555 | 1 | .await |
556 | 1 | .err_tip(|| format!("for {}"0 , full_path.display()0 ))?0 ; |
557 | | |
558 | 1 | Result::<(DirectoryNode, VecDeque<Directory>), Error>::Ok(( |
559 | 1 | DirectoryNode { |
560 | 1 | name: directory_name, |
561 | 1 | digest: Some(digest.into()), |
562 | 1 | }, |
563 | 1 | all_dirs, |
564 | 1 | )) |
565 | 2 | }) |
566 | 1 | .boxed(), |
567 | | ); |
568 | 4 | } else if file_type.is_file() { Branch (568:27): [Folded - Ignored]
Branch (568:27): [Folded - Ignored]
Branch (568:27): [True: 0, False: 1]
Branch (568:27): [True: 3, False: 0]
|
569 | 3 | let digest_uploaders = digest_uploaders.clone(); |
570 | 3 | file_futures.push(async move { |
571 | 3 | let metadata = fs::metadata(&full_path) |
572 | 3 | .await |
573 | 3 | .err_tip(|| format!("Could not open file {}"0 , full_path.display()0 ))?0 ; |
574 | 3 | upload_file(cas_store, &full_path, hasher, metadata, digest_uploaders) |
575 | 3 | .map_ok(TryInto::try_into) |
576 | 3 | .await?0 |
577 | 3 | }); |
578 | 1 | } else if file_type.is_symlink() { Branch (578:27): [Folded - Ignored]
Branch (578:27): [Folded - Ignored]
Branch (578:27): [True: 1, False: 0]
Branch (578:27): [True: 0, False: 0]
|
579 | 1 | symlink_futures.push( |
580 | 1 | upload_symlink(full_path, &full_work_directory) |
581 | 1 | .map(|symlink| symlink?0 .try_into()), |
582 | | ); |
583 | 0 | } |
584 | | } |
585 | | } |
586 | | |
587 | 3 | let (mut file_nodes, dir_entries, mut symlinks) = try_join3( |
588 | 3 | file_futures.try_collect::<Vec<FileNode>>(), |
589 | 3 | dir_futures.try_collect::<Vec<(DirectoryNode, VecDeque<Directory>)>>(), |
590 | 3 | symlink_futures.try_collect::<Vec<SymlinkNode>>(), |
591 | 3 | ) |
592 | 3 | .await?0 ; |
593 | | |
594 | 3 | let mut directory_nodes = Vec::with_capacity(dir_entries.len()); |
595 | | // For efficiency we use a deque because it allows cheap concat of Vecs. |
596 | | // We make the assumption here that when performance is important it is because |
597 | | // our directory is quite large. This allows us to cheaply merge large amounts of |
598 | | // directories into one VecDeque. Then after we are done we need to collapse it |
599 | | // down into a single Vec. |
600 | 3 | let mut all_child_directories = VecDeque::with_capacity(dir_entries.len()); |
601 | 4 | for (directory_node1 , mut recursive_child_directories1 ) in dir_entries { |
602 | 1 | directory_nodes.push(directory_node); |
603 | 1 | all_child_directories.append(&mut recursive_child_directories); |
604 | 1 | } |
605 | | |
606 | 3 | file_nodes.sort_unstable_by(|a, b| a.name1 .cmp1 (&b.name1 )); |
607 | 3 | directory_nodes.sort_unstable_by(|a, b| a.name0 .cmp0 (&b.name0 )); |
608 | 3 | symlinks.sort_unstable_by(|a, b| a.name0 .cmp0 (&b.name0 )); |
609 | | |
610 | 3 | let directory = Directory { |
611 | 3 | files: file_nodes, |
612 | 3 | directories: directory_nodes, |
613 | 3 | symlinks, |
614 | 3 | node_properties: None, // We don't support file properties. |
615 | 3 | }; |
616 | 3 | all_child_directories.push_back(directory.clone()); |
617 | | |
618 | 3 | Ok((directory, all_child_directories)) |
619 | 3 | }) |
620 | 3 | } |
621 | | |
622 | 0 | async fn process_side_channel_file( |
623 | 0 | side_channel_file: Cow<'_, OsStr>, |
624 | 0 | args: &[&OsStr], |
625 | 0 | timeout: Duration, |
626 | 0 | ) -> Result<Option<Error>, Error> { |
627 | 0 | let mut json_contents = String::new(); |
628 | | { |
629 | | // Note: Scoping `file_slot` allows the file_slot semaphore to be released faster. |
630 | 0 | let mut file_slot = match fs::open_file(side_channel_file, 0, u64::MAX).await { |
631 | 0 | Ok(file_slot) => file_slot, |
632 | 0 | Err(e) => { |
633 | 0 | if e.code != Code::NotFound { Branch (633:20): [Folded - Ignored]
Branch (633:20): [Folded - Ignored]
Branch (633:20): [True: 0, False: 0]
|
634 | 0 | return Err(e).err_tip(|| "Error opening side channel file"); |
635 | 0 | } |
636 | | // Note: If file does not exist, it's ok. Users are not required to create this file. |
637 | 0 | return Ok(None); |
638 | | } |
639 | | }; |
640 | 0 | file_slot |
641 | 0 | .read_to_string(&mut json_contents) |
642 | 0 | .await |
643 | 0 | .err_tip(|| "Error reading side channel file")?; |
644 | | } |
645 | | |
646 | 0 | let side_channel_info: SideChannelInfo = |
647 | 0 | serde_json5::from_str(&json_contents).map_err(|e| { |
648 | 0 | make_input_err!( |
649 | | "Could not convert contents of side channel file (json) to SideChannelInfo : {e:?}" |
650 | | ) |
651 | 0 | })?; |
652 | 0 | Ok(side_channel_info.failure.map(|failure| match failure { |
653 | 0 | SideChannelFailureReason::Timeout => Error::new( |
654 | 0 | Code::DeadlineExceeded, |
655 | 0 | format!( |
656 | 0 | "Command '{}' timed out after {} seconds", |
657 | 0 | args.join(OsStr::new(" ")).to_string_lossy(), |
658 | 0 | timeout.as_secs_f32() |
659 | | ), |
660 | | ), |
661 | 0 | })) |
662 | 0 | } |
663 | | |
664 | 17 | async fn do_cleanup( |
665 | 17 | running_actions_manager: &Arc<RunningActionsManagerImpl>, |
666 | 17 | operation_id: &OperationId, |
667 | 17 | action_directory: &str, |
668 | 17 | ) -> Result<(), Error> { |
669 | | // Mark this operation as being cleaned up |
670 | 17 | let Some(_cleaning_guard) = running_actions_manager.perform_cleanup(operation_id.clone()) Branch (670:9): [True: 0, False: 0]
Branch (670:9): [Folded - Ignored]
Branch (670:9): [True: 17, False: 0]
|
671 | | else { |
672 | | // Cleanup is already happening elsewhere. |
673 | 0 | return Ok(()); |
674 | | }; |
675 | | |
676 | 17 | debug!("Worker cleaning up"); |
677 | | // Note: We need to be careful to keep trying to cleanup even if one of the steps fails. |
678 | 17 | let remove_dir_result = fs::remove_dir_all(action_directory) |
679 | 17 | .await |
680 | 17 | .err_tip(|| format!("Could not remove working directory {action_directory}"0 )); |
681 | | |
682 | 17 | if let Err(err0 ) = running_actions_manager.cleanup_action(operation_id) { Branch (682:12): [True: 0, False: 0]
Branch (682:12): [Folded - Ignored]
Branch (682:12): [True: 0, False: 17]
|
683 | 0 | error!(%operation_id, ?err, "Error cleaning up action"); |
684 | 0 | Result::<(), Error>::Err(err).merge(remove_dir_result) |
685 | 17 | } else if let Err(err0 ) = remove_dir_result { Branch (685:19): [True: 0, False: 0]
Branch (685:19): [Folded - Ignored]
Branch (685:19): [True: 0, False: 17]
|
686 | 0 | error!(%operation_id, ?err, "Error removing working directory"); |
687 | 0 | Err(err) |
688 | | } else { |
689 | 17 | Ok(()) |
690 | | } |
691 | 17 | } |
692 | | |
693 | | pub trait RunningAction: Sync + Send + Sized + Unpin + 'static { |
694 | | /// Returns the action id of the action. |
695 | | fn get_operation_id(&self) -> &OperationId; |
696 | | |
697 | | /// Anything that needs to execute before the actions is actually executed should happen here. |
698 | | fn prepare_action(self: Arc<Self>) -> impl Future<Output = Result<Arc<Self>, Error>> + Send; |
699 | | |
700 | | /// Actually perform the execution of the action. |
701 | | fn execute(self: Arc<Self>) -> impl Future<Output = Result<Arc<Self>, Error>> + Send; |
702 | | |
703 | | /// Any uploading, processing or analyzing of the results should happen here. |
704 | | fn upload_results(self: Arc<Self>) -> impl Future<Output = Result<Arc<Self>, Error>> + Send; |
705 | | |
706 | | /// Cleanup any residual files, handles or other junk resulting from running the action. |
707 | | fn cleanup(self: Arc<Self>) -> impl Future<Output = Result<Arc<Self>, Error>> + Send; |
708 | | |
709 | | /// Returns the final result. As a general rule this action should be thought of as |
710 | | /// a consumption of `self`, meaning once a return happens here the lifetime of `Self` |
711 | | /// is over and any action performed on it after this call is undefined behavior. |
712 | | fn get_finished_result( |
713 | | self: Arc<Self>, |
714 | | ) -> impl Future<Output = Result<ActionResult, Error>> + Send; |
715 | | |
716 | | /// Returns the work directory of the action. |
717 | | fn get_work_directory(&self) -> &String; |
718 | | } |
719 | | |
720 | | #[derive(Debug)] |
721 | | struct RunningActionImplExecutionResult { |
722 | | stdout: Bytes, |
723 | | stderr: Bytes, |
724 | | exit_code: i32, |
725 | | } |
726 | | |
727 | | #[derive(Debug)] |
728 | | struct RunningActionImplState { |
729 | | command_proto: Option<ProtoCommand>, |
730 | | // TODO(palfrey) Kill is not implemented yet, but is instrumented. |
731 | | // However, it is used if the worker disconnects to destroy current jobs. |
732 | | kill_channel_tx: Option<oneshot::Sender<()>>, |
733 | | kill_channel_rx: Option<oneshot::Receiver<()>>, |
734 | | execution_result: Option<RunningActionImplExecutionResult>, |
735 | | action_result: Option<ActionResult>, |
736 | | execution_metadata: ExecutionMetadata, |
737 | | // If there was an internal error, this will be set. |
738 | | // This should NOT be set if everything was fine, but the process had a |
739 | | // non-zero exit code. Instead this should be used for internal errors |
740 | | // that prevented the action from running, upload failures, timeouts, exc... |
741 | | // but we have (or could have) the action results (like stderr/stdout). |
742 | | error: Option<Error>, |
743 | | } |
744 | | |
745 | | #[derive(Debug)] |
746 | | pub struct RunningActionImpl { |
747 | | operation_id: OperationId, |
748 | | action_directory: String, |
749 | | work_directory: String, |
750 | | action_info: ActionInfo, |
751 | | timeout: Duration, |
752 | | running_actions_manager: Arc<RunningActionsManagerImpl>, |
753 | | state: Mutex<RunningActionImplState>, |
754 | | has_manager_entry: AtomicBool, |
755 | | did_cleanup: AtomicBool, |
756 | | } |
757 | | |
758 | | impl RunningActionImpl { |
759 | 18 | pub fn new( |
760 | 18 | execution_metadata: ExecutionMetadata, |
761 | 18 | operation_id: OperationId, |
762 | 18 | action_directory: String, |
763 | 18 | action_info: ActionInfo, |
764 | 18 | timeout: Duration, |
765 | 18 | running_actions_manager: Arc<RunningActionsManagerImpl>, |
766 | 18 | ) -> Self { |
767 | 18 | let work_directory = format!("{}/{}", action_directory, "work"); |
768 | 18 | let (kill_channel_tx, kill_channel_rx) = oneshot::channel(); |
769 | 18 | Self { |
770 | 18 | operation_id, |
771 | 18 | action_directory, |
772 | 18 | work_directory, |
773 | 18 | action_info, |
774 | 18 | timeout, |
775 | 18 | running_actions_manager, |
776 | 18 | state: Mutex::new(RunningActionImplState { |
777 | 18 | command_proto: None, |
778 | 18 | kill_channel_rx: Some(kill_channel_rx), |
779 | 18 | kill_channel_tx: Some(kill_channel_tx), |
780 | 18 | execution_result: None, |
781 | 18 | action_result: None, |
782 | 18 | execution_metadata, |
783 | 18 | error: None, |
784 | 18 | }), |
785 | 18 | // Always need to ensure that we're removed from the manager on Drop. |
786 | 18 | has_manager_entry: AtomicBool::new(true), |
787 | 18 | // Only needs to be cleaned up after a prepare_action call, set there. |
788 | 18 | did_cleanup: AtomicBool::new(true), |
789 | 18 | } |
790 | 18 | } |
791 | | |
792 | | #[allow( |
793 | | clippy::missing_const_for_fn, |
794 | | reason = "False positive on stable, but not on nightly" |
795 | | )] |
796 | 0 | fn metrics(&self) -> &Arc<Metrics> { |
797 | 0 | &self.running_actions_manager.metrics |
798 | 0 | } |
799 | | |
800 | | /// Prepares any actions needed to execute this action. This action will do the following: |
801 | | /// |
802 | | /// * Download any files needed to execute the action |
803 | | /// * Build a folder with all files needed to execute the action. |
804 | | /// |
805 | | /// This function will aggressively download and spawn potentially thousands of futures. It is |
806 | | /// up to the stores to rate limit if needed. |
807 | 15 | async fn inner_prepare_action(self: Arc<Self>) -> Result<Arc<Self>, Error>0 { |
808 | 15 | { |
809 | 15 | let mut state = self.state.lock(); |
810 | 15 | state.execution_metadata.input_fetch_start_timestamp = |
811 | 15 | (self.running_actions_manager.callbacks.now_fn)(); |
812 | 15 | } |
813 | 15 | let command = { |
814 | | // Download and build out our input files/folders. Also fetch and decode our Command. |
815 | 15 | let command_fut = self.metrics().get_proto_command_from_store.wrap(async { |
816 | 15 | get_and_decode_digest::<ProtoCommand>( |
817 | 15 | self.running_actions_manager.cas_store.as_ref(), |
818 | 15 | self.action_info.command_digest.into(), |
819 | 15 | ) |
820 | 15 | .await |
821 | 15 | .err_tip(|| "Converting command_digest to Command") |
822 | 15 | }); |
823 | 15 | let filesystem_store_pin = |
824 | 15 | Pin::new(self.running_actions_manager.filesystem_store.as_ref()); |
825 | 15 | let (command, ()) = try_join(command_fut, async { |
826 | 15 | fs::create_dir(&self.work_directory) |
827 | 15 | .await |
828 | 15 | .err_tip(|| format!("Error creating work directory {}"0 , self.work_directory0 ))?0 ; |
829 | | // Now the work directory has been created, we have to clean up. |
830 | 15 | self.did_cleanup.store(false, Ordering::Release); |
831 | | // Download the input files/folder and place them into the temp directory. |
832 | | // Use directory cache if available for better performance. |
833 | 15 | self.metrics() |
834 | 15 | .download_to_directory |
835 | 15 | .wrap(prepare_action_inputs( |
836 | 15 | &self.running_actions_manager.directory_cache, |
837 | 15 | &self.running_actions_manager.cas_store, |
838 | 15 | filesystem_store_pin, |
839 | 15 | &self.action_info.input_root_digest, |
840 | 15 | &self.work_directory, |
841 | 15 | )) |
842 | 15 | .await |
843 | 15 | }) |
844 | 15 | .await?0 ; |
845 | 15 | command |
846 | | }; |
847 | | { |
848 | | // Create all directories needed for our output paths. This is required by the bazel spec. |
849 | 15 | let prepare_output_directories = |output_file| {9 |
850 | 9 | let full_output_path = if command.working_directory.is_empty() { Branch (850:43): [Folded - Ignored]
Branch (850:43): [Folded - Ignored]
Branch (850:43): [True: 1, False: 8]
|
851 | 1 | format!("{}/{}", self.work_directory, output_file) |
852 | | } else { |
853 | 8 | format!( |
854 | 8 | "{}/{}/{}", |
855 | 8 | self.work_directory, command.working_directory, output_file |
856 | | ) |
857 | | }; |
858 | 9 | async move { |
859 | 9 | let full_parent_path = Path::new(&full_output_path) |
860 | 9 | .parent() |
861 | 9 | .err_tip(|| format!("Parent path for {full_output_path} has no parent"0 ))?0 ; |
862 | 9 | fs::create_dir_all(full_parent_path).await.err_tip(|| {0 |
863 | 0 | format!( |
864 | 0 | "Error creating output directory {} (file)", |
865 | 0 | full_parent_path.display() |
866 | | ) |
867 | 0 | })?; |
868 | 9 | Result::<(), Error>::Ok(()) |
869 | 9 | } |
870 | 9 | }; |
871 | 15 | self.metrics() |
872 | 15 | .prepare_output_files |
873 | 15 | .wrap(try_join_all( |
874 | 15 | command.output_files.iter().map(prepare_output_directories), |
875 | 15 | )) |
876 | 15 | .await?0 ; |
877 | 15 | self.metrics() |
878 | 15 | .prepare_output_paths |
879 | 15 | .wrap(try_join_all( |
880 | 15 | command.output_paths.iter().map(prepare_output_directories), |
881 | 15 | )) |
882 | 15 | .await?0 ; |
883 | | } |
884 | 15 | debug!(?command, "Worker received command"); |
885 | 15 | { |
886 | 15 | let mut state = self.state.lock(); |
887 | 15 | state.command_proto = Some(command); |
888 | 15 | state.execution_metadata.input_fetch_completed_timestamp = |
889 | 15 | (self.running_actions_manager.callbacks.now_fn)(); |
890 | 15 | } |
891 | 15 | Ok(self) |
892 | 15 | } |
893 | | |
894 | 13 | async fn inner_execute(self: Arc<Self>) -> Result<Arc<Self>, Error> { |
895 | 13 | let (command_proto, mut kill_channel_rx) = { |
896 | 13 | let mut state = self.state.lock(); |
897 | 13 | state.execution_metadata.execution_start_timestamp = |
898 | 13 | (self.running_actions_manager.callbacks.now_fn)(); |
899 | | ( |
900 | 13 | state |
901 | 13 | .command_proto |
902 | 13 | .take() |
903 | 13 | .err_tip(|| "Expected state to have command_proto in execute()")?0 , |
904 | 13 | state |
905 | 13 | .kill_channel_rx |
906 | 13 | .take() |
907 | 13 | .err_tip(|| "Expected state to have kill_channel_rx in execute()")?0 |
908 | | // This is important as we may be killed at any point. |
909 | 13 | .fuse(), |
910 | | ) |
911 | | }; |
912 | 13 | if command_proto.arguments.is_empty() { Branch (912:12): [Folded - Ignored]
Branch (912:12): [Folded - Ignored]
Branch (912:12): [True: 0, False: 13]
|
913 | 0 | return Err(make_input_err!("No arguments provided in Command proto")); |
914 | 13 | } |
915 | 13 | let args: Vec<&OsStr> = if let Some(entrypoint0 ) = &self Branch (915:40): [Folded - Ignored]
Branch (915:40): [Folded - Ignored]
Branch (915:40): [True: 0, False: 13]
|
916 | 13 | .running_actions_manager |
917 | 13 | .execution_configuration |
918 | 13 | .entrypoint |
919 | | { |
920 | 0 | core::iter::once(entrypoint.as_ref()) |
921 | 0 | .chain(command_proto.arguments.iter().map(AsRef::as_ref)) |
922 | 0 | .collect() |
923 | | } else { |
924 | 13 | command_proto.arguments.iter().map(AsRef::as_ref).collect() |
925 | | }; |
926 | | // TODO(palfrey): This should probably be in debug, but currently |
927 | | // that's too busy and we often rely on this to |
928 | | // figure out toolchain misconfiguration issues. |
929 | | // De-bloat the `debug` level by using the `trace` |
930 | | // level more effectively and adjust this. |
931 | 13 | info!(?args, "Executing command",); |
932 | 13 | let mut command_builder = process::Command::new(args[0]); |
933 | 13 | command_builder |
934 | 13 | .args(&args[1..]) |
935 | 13 | .kill_on_drop(true) |
936 | 13 | .stdin(Stdio::null()) |
937 | 13 | .stdout(Stdio::piped()) |
938 | 13 | .stderr(Stdio::piped()) |
939 | 13 | .current_dir(format!( |
940 | 13 | "{}/{}", |
941 | 13 | self.work_directory, command_proto.working_directory |
942 | 13 | )) |
943 | 13 | .env_clear(); |
944 | | |
945 | 13 | let requested_timeout = if self.action_info.timeout.is_zero() { Branch (945:36): [Folded - Ignored]
Branch (945:36): [Folded - Ignored]
Branch (945:36): [True: 11, False: 2]
|
946 | 11 | self.running_actions_manager.max_action_timeout |
947 | | } else { |
948 | 2 | self.action_info.timeout |
949 | | }; |
950 | | |
951 | 13 | let mut maybe_side_channel_file: Option<Cow<'_, OsStr>> = None; |
952 | 13 | if let Some(additional_environment0 ) = &self Branch (952:16): [Folded - Ignored]
Branch (952:16): [Folded - Ignored]
Branch (952:16): [True: 0, False: 13]
|
953 | 13 | .running_actions_manager |
954 | 13 | .execution_configuration |
955 | 13 | .additional_environment |
956 | | { |
957 | 0 | for (name, source) in additional_environment { |
958 | 0 | let value = match source { |
959 | 0 | EnvironmentSource::Property(property) => self |
960 | 0 | .action_info |
961 | 0 | .platform_properties |
962 | 0 | .get(property) |
963 | 0 | .map_or_else(|| Cow::Borrowed(""), |v| Cow::Borrowed(v.as_str())), |
964 | 0 | EnvironmentSource::Value(value) => Cow::Borrowed(value.as_str()), |
965 | | EnvironmentSource::FromEnvironment => { |
966 | 0 | Cow::Owned(env::var(name).unwrap_or_default()) |
967 | | } |
968 | | EnvironmentSource::TimeoutMillis => { |
969 | 0 | Cow::Owned(requested_timeout.as_millis().to_string()) |
970 | | } |
971 | | EnvironmentSource::SideChannelFile => { |
972 | 0 | let file_cow = |
973 | 0 | format!("{}/{}", self.action_directory, Uuid::new_v4().simple()); |
974 | 0 | maybe_side_channel_file = Some(Cow::Owned(file_cow.clone().into())); |
975 | 0 | Cow::Owned(file_cow) |
976 | | } |
977 | | EnvironmentSource::ActionDirectory => { |
978 | 0 | Cow::Borrowed(self.action_directory.as_str()) |
979 | | } |
980 | | }; |
981 | 0 | command_builder.env(name, value.as_ref()); |
982 | | } |
983 | 13 | } |
984 | | |
985 | | #[cfg(target_family = "unix")] |
986 | 13 | let envs = &command_proto.environment_variables; |
987 | | // If SystemRoot is not set on windows we set it to default. Failing to do |
988 | | // this causes all commands to fail. |
989 | | #[cfg(target_family = "windows")] |
990 | | let envs = { |
991 | | let mut envs = command_proto.environment_variables.clone(); |
992 | | if !envs.iter().any(|v| v.name.to_uppercase() == "SYSTEMROOT") { |
993 | | envs.push( |
994 | | nativelink_proto::build::bazel::remote::execution::v2::command::EnvironmentVariable { |
995 | | name: "SystemRoot".to_string(), |
996 | | value: "C:\\Windows".to_string(), |
997 | | }, |
998 | | ); |
999 | | } |
1000 | | if !envs.iter().any(|v| v.name.to_uppercase() == "PATH") { |
1001 | | envs.push( |
1002 | | nativelink_proto::build::bazel::remote::execution::v2::command::EnvironmentVariable { |
1003 | | name: "PATH".to_string(), |
1004 | | value: "C:\\Windows\\System32".to_string(), |
1005 | | }, |
1006 | | ); |
1007 | | } |
1008 | | envs |
1009 | | }; |
1010 | 26 | for environment_variable13 in envs { |
1011 | 13 | command_builder.env(&environment_variable.name, &environment_variable.value); |
1012 | 13 | } |
1013 | | |
1014 | 13 | let mut child_process = command_builder |
1015 | 13 | .spawn() |
1016 | 13 | .err_tip(|| format!("Could not execute command {args:?}"0 ))?0 ; |
1017 | 13 | let mut stdout_reader = child_process |
1018 | 13 | .stdout |
1019 | 13 | .take() |
1020 | 13 | .err_tip(|| "Expected stdout to exist on command this should never happen")?0 ; |
1021 | 13 | let mut stderr_reader = child_process |
1022 | 13 | .stderr |
1023 | 13 | .take() |
1024 | 13 | .err_tip(|| "Expected stderr to exist on command this should never happen")?0 ; |
1025 | | |
1026 | 13 | let mut child_process_guard = guard(child_process, |mut child_process| {0 |
1027 | 0 | let result: Result<Option<std::process::ExitStatus>, std::io::Error> = |
1028 | 0 | child_process.try_wait(); |
1029 | 0 | match result { |
1030 | 0 | Ok(res) if res.is_some() => { Branch (1030:28): [Folded - Ignored]
Branch (1030:28): [Folded - Ignored]
Branch (1030:28): [True: 0, False: 0]
|
1031 | 0 | // The child already exited, probably a timeout or kill operation |
1032 | 0 | } |
1033 | 0 | result => { |
1034 | 0 | error!( |
1035 | | ?result, |
1036 | 0 | "Child process was not cleaned up before dropping the call to execute(), killing in background spawn." |
1037 | | ); |
1038 | 0 | background_spawn!("running_actions_manager_kill_child_process", async move { |
1039 | 0 | child_process.kill().await |
1040 | 0 | }); |
1041 | | } |
1042 | | } |
1043 | 0 | }); |
1044 | | |
1045 | 13 | let all_stdout_fut = spawn!("stdout_reader", async move { |
1046 | 13 | let mut all_stdout = BytesMut::new(); |
1047 | | loop { |
1048 | 16 | let sz = stdout_reader |
1049 | 16 | .read_buf(&mut all_stdout) |
1050 | 16 | .await |
1051 | 16 | .err_tip(|| "Error reading stdout stream")?0 ; |
1052 | 16 | if sz == 0 { Branch (1052:20): [Folded - Ignored]
Branch (1052:20): [Folded - Ignored]
Branch (1052:20): [True: 13, False: 3]
|
1053 | 13 | break; // EOF. |
1054 | 3 | } |
1055 | | } |
1056 | 13 | Result::<Bytes, Error>::Ok(all_stdout.freeze()) |
1057 | 13 | }); |
1058 | 13 | let all_stderr_fut = spawn!("stderr_reader", async move { |
1059 | 13 | let mut all_stderr = BytesMut::new(); |
1060 | | loop { |
1061 | 16 | let sz = stderr_reader |
1062 | 16 | .read_buf(&mut all_stderr) |
1063 | 16 | .await |
1064 | 16 | .err_tip(|| "Error reading stderr stream")?0 ; |
1065 | 16 | if sz == 0 { Branch (1065:20): [Folded - Ignored]
Branch (1065:20): [Folded - Ignored]
Branch (1065:20): [True: 13, False: 3]
|
1066 | 13 | break; // EOF. |
1067 | 3 | } |
1068 | | } |
1069 | 13 | Result::<Bytes, Error>::Ok(all_stderr.freeze()) |
1070 | 13 | }); |
1071 | 13 | let mut killed_action = false; |
1072 | | |
1073 | 13 | let timer = self.metrics().child_process.begin_timer(); |
1074 | 13 | let mut sleep_fut = (self.running_actions_manager.callbacks.sleep_fn)(self.timeout).fuse(); |
1075 | | loop { |
1076 | 17 | tokio::select! { |
1077 | 17 | () = &mut sleep_fut => { |
1078 | 2 | self.running_actions_manager.metrics.task_timeouts.inc(); |
1079 | 2 | killed_action = true; |
1080 | 2 | if let Err(err0 ) = child_process_guard.kill().await { Branch (1080:28): [Folded - Ignored]
Branch (1080:28): [Folded - Ignored]
Branch (1080:28): [True: 0, False: 2]
|
1081 | 0 | error!( |
1082 | | ?err, |
1083 | 0 | "Could not kill process in RunningActionsManager for action timeout", |
1084 | | ); |
1085 | 2 | } |
1086 | | { |
1087 | 2 | let joined_command = args.join(OsStr::new(" ")); |
1088 | 2 | let command = joined_command.to_string_lossy(); |
1089 | 2 | info!( |
1090 | 2 | seconds = self.action_info.timeout.as_secs_f32(), |
1091 | | %command, |
1092 | 2 | "Command timed out" |
1093 | | ); |
1094 | 2 | let mut state = self.state.lock(); |
1095 | 2 | state.error = Error::merge_option(state.error.take(), Some(Error::new( |
1096 | 2 | Code::DeadlineExceeded, |
1097 | 2 | format!( |
1098 | 2 | "Command '{}' timed out after {} seconds", |
1099 | 2 | command, |
1100 | 2 | self.action_info.timeout.as_secs_f32() |
1101 | 2 | ) |
1102 | 2 | ))); |
1103 | | } |
1104 | | }, |
1105 | 17 | maybe_exit_status13 = child_process_guard.wait() => { |
1106 | | // Defuse our guard so it does not try to cleanup and make senseless logs. |
1107 | 13 | drop(ScopeGuard::<_, _>::into_inner(child_process_guard)); |
1108 | 13 | let exit_status = maybe_exit_status.err_tip(|| "Failed to collect exit code of process")?0 ; |
1109 | | // TODO(palfrey) We should implement stderr/stdout streaming to client here. |
1110 | | // If we get killed before the stream is started, then these will lock up. |
1111 | | // TODO(palfrey) There is a significant bug here. If we kill the action and the action creates |
1112 | | // child processes, it can create zombies. See: https://github.com/tracemachina/nativelink/issues/225 |
1113 | 13 | let (stdout, stderr) = if killed_action { Branch (1113:47): [Folded - Ignored]
Branch (1113:47): [Folded - Ignored]
Branch (1113:47): [True: 4, False: 9]
|
1114 | 4 | drop(timer); |
1115 | 4 | (Bytes::new(), Bytes::new()) |
1116 | | } else { |
1117 | 9 | timer.measure(); |
1118 | 9 | let (maybe_all_stdout, maybe_all_stderr) = tokio::join!(all_stdout_fut, all_stderr_fut); |
1119 | | ( |
1120 | 9 | maybe_all_stdout.err_tip(|| "Internal error reading from stdout of worker task")?0 ?0 , |
1121 | 9 | maybe_all_stderr.err_tip(|| "Internal error reading from stderr of worker task")?0 ?0 |
1122 | | ) |
1123 | | }; |
1124 | | |
1125 | 13 | let exit_code = exit_status.code().map_or(EXIT_CODE_FOR_SIGNAL, |exit_code| {9 |
1126 | 9 | if exit_code == 0 { Branch (1126:28): [Folded - Ignored]
Branch (1126:28): [Folded - Ignored]
Branch (1126:28): [True: 8, False: 1]
|
1127 | 8 | self.metrics().child_process_success_error_code.inc(); |
1128 | 8 | } else { |
1129 | 1 | self.metrics().child_process_failure_error_code.inc(); |
1130 | 1 | } |
1131 | 9 | exit_code |
1132 | 9 | }); |
1133 | | |
1134 | 13 | info!(?args, "Command complete"); |
1135 | | |
1136 | 13 | let maybe_error_override = if let Some(side_channel_file0 ) = maybe_side_channel_file { Branch (1136:55): [Folded - Ignored]
Branch (1136:55): [Folded - Ignored]
Branch (1136:55): [True: 0, False: 13]
|
1137 | 0 | process_side_channel_file(side_channel_file.clone(), &args, requested_timeout).await |
1138 | 0 | .err_tip(|| format!("Error processing side channel file: {}", side_channel_file.display()))? |
1139 | | } else { |
1140 | 13 | None |
1141 | | }; |
1142 | 13 | { |
1143 | 13 | let mut state = self.state.lock(); |
1144 | 13 | state.error = Error::merge_option(state.error.take(), maybe_error_override); |
1145 | 13 | |
1146 | 13 | state.command_proto = Some(command_proto); |
1147 | 13 | state.execution_result = Some(RunningActionImplExecutionResult{ |
1148 | 13 | stdout, |
1149 | 13 | stderr, |
1150 | 13 | exit_code, |
1151 | 13 | }); |
1152 | 13 | state.execution_metadata.execution_completed_timestamp = (self.running_actions_manager.callbacks.now_fn)(); |
1153 | 13 | } |
1154 | 13 | return Ok(self); |
1155 | | }, |
1156 | 17 | _ = &mut kill_channel_rx => { |
1157 | 2 | killed_action = true; |
1158 | 2 | if let Err(err0 ) = child_process_guard.kill().await { Branch (1158:28): [Folded - Ignored]
Branch (1158:28): [Folded - Ignored]
Branch (1158:28): [True: 0, False: 2]
|
1159 | 0 | error!( |
1160 | 0 | operation_id = ?self.operation_id, |
1161 | | ?err, |
1162 | 0 | "Could not kill process", |
1163 | | ); |
1164 | 2 | } |
1165 | 2 | { |
1166 | 2 | let mut state = self.state.lock(); |
1167 | 2 | state.error = Error::merge_option(state.error.take(), Some(Error::new( |
1168 | 2 | Code::Aborted, |
1169 | 2 | format!( |
1170 | 2 | "Command '{}' was killed by scheduler", |
1171 | 2 | args.join(OsStr::new(" ")).to_string_lossy() |
1172 | 2 | ) |
1173 | 2 | ))); |
1174 | 2 | } |
1175 | | }, |
1176 | | } |
1177 | | } |
1178 | | // Unreachable. |
1179 | 13 | } |
1180 | | |
1181 | 11 | async fn inner_upload_results(self: Arc<Self>) -> Result<Arc<Self>, Error> { |
1182 | | enum OutputType { |
1183 | | None, |
1184 | | File(FileInfo), |
1185 | | Directory(DirectoryInfo), |
1186 | | FileSymlink(SymlinkInfo), |
1187 | | DirectorySymlink(SymlinkInfo), |
1188 | | } |
1189 | | |
1190 | 11 | let upload_start = std::time::Instant::now(); |
1191 | 11 | debug!( |
1192 | 11 | operation_id = ?self.operation_id, |
1193 | 11 | "Worker uploading results - starting", |
1194 | | ); |
1195 | 11 | let (mut command_proto, execution_result, mut execution_metadata) = { |
1196 | 11 | let mut state = self.state.lock(); |
1197 | 11 | state.execution_metadata.output_upload_start_timestamp = |
1198 | 11 | (self.running_actions_manager.callbacks.now_fn)(); |
1199 | | ( |
1200 | 11 | state |
1201 | 11 | .command_proto |
1202 | 11 | .take() |
1203 | 11 | .err_tip(|| "Expected state to have command_proto in execute()")?0 , |
1204 | 11 | state |
1205 | 11 | .execution_result |
1206 | 11 | .take() |
1207 | 11 | .err_tip(|| "Execution result does not exist at upload_results stage")?0 , |
1208 | 11 | state.execution_metadata.clone(), |
1209 | | ) |
1210 | | }; |
1211 | 11 | let cas_store = self.running_actions_manager.cas_store.as_ref(); |
1212 | 11 | let hasher = self.action_info.unique_qualifier.digest_function(); |
1213 | | |
1214 | 11 | let mut output_path_futures = FuturesUnordered::new(); |
1215 | 11 | let mut output_paths = command_proto.output_paths; |
1216 | 11 | if output_paths.is_empty() { Branch (1216:12): [Folded - Ignored]
Branch (1216:12): [Folded - Ignored]
Branch (1216:12): [True: 6, False: 5]
|
1217 | 6 | output_paths |
1218 | 6 | .reserve(command_proto.output_files.len() + command_proto.output_directories.len()); |
1219 | 6 | output_paths.append(&mut command_proto.output_files); |
1220 | 6 | output_paths.append(&mut command_proto.output_directories); |
1221 | 6 | }5 |
1222 | 11 | let digest_uploaders = Arc::new(Mutex::new(HashMap::new())); |
1223 | 18 | for entry7 in output_paths { |
1224 | 7 | let full_path = OsString::from(if command_proto.working_directory.is_empty() { Branch (1224:47): [Folded - Ignored]
Branch (1224:47): [Folded - Ignored]
Branch (1224:47): [True: 0, False: 7]
|
1225 | 0 | format!("{}/{}", self.work_directory, entry) |
1226 | | } else { |
1227 | 7 | format!( |
1228 | 7 | "{}/{}/{}", |
1229 | 7 | self.work_directory, command_proto.working_directory, entry |
1230 | | ) |
1231 | | }); |
1232 | 7 | let work_directory = &self.work_directory; |
1233 | 7 | let digest_uploaders = digest_uploaders.clone(); |
1234 | 7 | output_path_futures.push(async move { |
1235 | 3 | let metadata = { |
1236 | 7 | let metadata = match fs::symlink_metadata(&full_path).await { |
1237 | 7 | Ok(file) => file, |
1238 | 0 | Err(e) => { |
1239 | 0 | if e.code == Code::NotFound { Branch (1239:32): [Folded - Ignored]
Branch (1239:32): [Folded - Ignored]
Branch (1239:32): [True: 0, False: 0]
|
1240 | | // In the event our output does not exist, according to the bazel remote |
1241 | | // execution spec, we simply ignore it continue. |
1242 | 0 | return Result::<OutputType, Error>::Ok(OutputType::None); |
1243 | 0 | } |
1244 | 0 | return Err(e).err_tip(|| { |
1245 | 0 | format!("Could not open file {}", full_path.display()) |
1246 | 0 | }); |
1247 | | } |
1248 | | }; |
1249 | | |
1250 | 7 | if metadata.is_file() { Branch (1250:24): [Folded - Ignored]
Branch (1250:24): [Folded - Ignored]
Branch (1250:24): [True: 4, False: 3]
|
1251 | | return Ok(OutputType::File( |
1252 | 4 | upload_file( |
1253 | 4 | cas_store.as_pin(), |
1254 | 4 | &full_path, |
1255 | 4 | hasher, |
1256 | 4 | metadata, |
1257 | 4 | digest_uploaders, |
1258 | 4 | ) |
1259 | 4 | .await |
1260 | 4 | .map(|mut file_info| { |
1261 | 4 | file_info.name_or_path = NameOrPath::Path(entry); |
1262 | 4 | file_info |
1263 | 4 | }) |
1264 | 4 | .err_tip(|| format!("Uploading file {}"0 , full_path.display()0 ))?0 , |
1265 | | )); |
1266 | 3 | } |
1267 | 3 | metadata |
1268 | | }; |
1269 | 3 | if metadata.is_dir() { Branch (1269:20): [Folded - Ignored]
Branch (1269:20): [Folded - Ignored]
Branch (1269:20): [True: 2, False: 1]
|
1270 | | Ok(OutputType::Directory( |
1271 | 2 | upload_directory( |
1272 | 2 | cas_store.as_pin(), |
1273 | 2 | &full_path, |
1274 | 2 | work_directory, |
1275 | 2 | hasher, |
1276 | 2 | digest_uploaders, |
1277 | | ) |
1278 | 2 | .and_then(|(root_dir, children)| async move { |
1279 | 2 | let tree = ProtoTree { |
1280 | 2 | root: Some(root_dir), |
1281 | 2 | children: children.into(), |
1282 | 2 | }; |
1283 | 2 | let tree_digest = serialize_and_upload_message( |
1284 | 2 | &tree, |
1285 | 2 | cas_store.as_pin(), |
1286 | 2 | &mut hasher.hasher(), |
1287 | 2 | ) |
1288 | 2 | .await |
1289 | 2 | .err_tip(|| format!("While processing {entry}"0 ))?0 ; |
1290 | 2 | Ok(DirectoryInfo { |
1291 | 2 | path: entry, |
1292 | 2 | tree_digest, |
1293 | 2 | }) |
1294 | 4 | }) |
1295 | 2 | .await |
1296 | 2 | .err_tip(|| format!("Uploading directory {}"0 , full_path.display()0 ))?0 , |
1297 | | )) |
1298 | 1 | } else if metadata.is_symlink() { Branch (1298:27): [Folded - Ignored]
Branch (1298:27): [Folded - Ignored]
Branch (1298:27): [True: 1, False: 0]
|
1299 | 1 | let output_symlink = upload_symlink(&full_path, work_directory) |
1300 | 1 | .await |
1301 | 1 | .map(|mut symlink_info| { |
1302 | 1 | symlink_info.name_or_path = NameOrPath::Path(entry); |
1303 | 1 | symlink_info |
1304 | 1 | }) |
1305 | 1 | .err_tip(|| format!("Uploading symlink {}"0 , full_path.display()0 ))?0 ; |
1306 | 1 | match fs::metadata(&full_path).await { |
1307 | 1 | Ok(metadata) => { |
1308 | 1 | if metadata.is_dir() { Branch (1308:32): [Folded - Ignored]
Branch (1308:32): [Folded - Ignored]
Branch (1308:32): [True: 0, False: 1]
|
1309 | 0 | Ok(OutputType::DirectorySymlink(output_symlink)) |
1310 | | } else { |
1311 | | // Note: If it's anything but directory we put it as a file symlink. |
1312 | 1 | Ok(OutputType::FileSymlink(output_symlink)) |
1313 | | } |
1314 | | } |
1315 | 0 | Err(e) => { |
1316 | 0 | if e.code != Code::NotFound { Branch (1316:32): [Folded - Ignored]
Branch (1316:32): [Folded - Ignored]
Branch (1316:32): [True: 0, False: 0]
|
1317 | 0 | return Err(e).err_tip(|| { |
1318 | 0 | format!( |
1319 | 0 | "While querying target symlink metadata for {}", |
1320 | 0 | full_path.display() |
1321 | | ) |
1322 | 0 | }); |
1323 | 0 | } |
1324 | | // If the file doesn't exist, we consider it a file. Even though the |
1325 | | // file doesn't exist we still need to populate an entry. |
1326 | 0 | Ok(OutputType::FileSymlink(output_symlink)) |
1327 | | } |
1328 | | } |
1329 | | } else { |
1330 | 0 | Err(make_err!( |
1331 | 0 | Code::Internal, |
1332 | 0 | "{full_path:?} was not a file, folder or symlink. Must be one.", |
1333 | 0 | )) |
1334 | | } |
1335 | 7 | }); |
1336 | | } |
1337 | 11 | let mut output_files = vec![]; |
1338 | 11 | let mut output_folders = vec![]; |
1339 | 11 | let mut output_directory_symlinks = vec![]; |
1340 | 11 | let mut output_file_symlinks = vec![]; |
1341 | | |
1342 | 11 | if execution_result.exit_code != 0 { Branch (1342:12): [Folded - Ignored]
Branch (1342:12): [Folded - Ignored]
Branch (1342:12): [True: 5, False: 6]
|
1343 | 5 | let stdout = core::str::from_utf8(&execution_result.stdout).unwrap_or("<no-utf8>"); |
1344 | 5 | let stderr = core::str::from_utf8(&execution_result.stderr).unwrap_or("<no-utf8>"); |
1345 | 5 | error!( |
1346 | | exit_code = ?execution_result.exit_code, |
1347 | 5 | stdout = ?stdout[..min(stdout.len(), 1000)], |
1348 | 5 | stderr = ?stderr[..min(stderr.len(), 1000)], |
1349 | 5 | "Command returned non-zero exit code", |
1350 | | ); |
1351 | 6 | } |
1352 | | |
1353 | 11 | let stdout_digest_fut = self.metrics().upload_stdout.wrap(async { |
1354 | 11 | let start = std::time::Instant::now(); |
1355 | 11 | let data = execution_result.stdout; |
1356 | 11 | let data_len = data.len(); |
1357 | 11 | let digest = compute_buf_digest(&data, &mut hasher.hasher()); |
1358 | 11 | cas_store |
1359 | 11 | .update_oneshot(digest, data) |
1360 | 11 | .await |
1361 | 11 | .err_tip(|| "Uploading stdout")?0 ; |
1362 | 11 | debug!( |
1363 | | ?digest, |
1364 | | data_len, |
1365 | 11 | elapsed_ms = start.elapsed().as_millis(), |
1366 | 11 | "upload_results: stdout upload completed", |
1367 | | ); |
1368 | 11 | Result::<DigestInfo, Error>::Ok(digest) |
1369 | 11 | }); |
1370 | 11 | let stderr_digest_fut = self.metrics().upload_stderr.wrap(async { |
1371 | 11 | let start = std::time::Instant::now(); |
1372 | 11 | let data = execution_result.stderr; |
1373 | 11 | let data_len = data.len(); |
1374 | 11 | let digest = compute_buf_digest(&data, &mut hasher.hasher()); |
1375 | 11 | cas_store |
1376 | 11 | .update_oneshot(digest, data) |
1377 | 11 | .await |
1378 | 11 | .err_tip(|| "Uploading stderr")?0 ; |
1379 | 11 | debug!( |
1380 | | ?digest, |
1381 | | data_len, |
1382 | 11 | elapsed_ms = start.elapsed().as_millis(), |
1383 | 11 | "upload_results: stderr upload completed", |
1384 | | ); |
1385 | 11 | Result::<DigestInfo, Error>::Ok(digest) |
1386 | 11 | }); |
1387 | | |
1388 | 11 | debug!( |
1389 | 11 | operation_id = ?self.operation_id, |
1390 | 11 | num_output_paths = output_path_futures.len(), |
1391 | 11 | "upload_results: starting stdout/stderr/output_paths uploads", |
1392 | | ); |
1393 | 11 | let join_start = std::time::Instant::now(); |
1394 | 11 | let upload_result = futures::try_join!(stdout_digest_fut, stderr_digest_fut, async { |
1395 | 18 | while let Some(output_type7 ) = output_path_futures.try_next().await?0 { Branch (1395:23): [Folded - Ignored]
Branch (1395:23): [Folded - Ignored]
Branch (1395:23): [True: 7, False: 11]
|
1396 | 7 | match output_type { |
1397 | 4 | OutputType::File(output_file) => output_files.push(output_file), |
1398 | 2 | OutputType::Directory(output_folder) => output_folders.push(output_folder), |
1399 | 1 | OutputType::FileSymlink(output_symlink) => { |
1400 | 1 | output_file_symlinks.push(output_symlink); |
1401 | 1 | } |
1402 | 0 | OutputType::DirectorySymlink(output_symlink) => { |
1403 | 0 | output_directory_symlinks.push(output_symlink); |
1404 | 0 | } |
1405 | 0 | OutputType::None => { /* Safe to ignore */ } |
1406 | | } |
1407 | | } |
1408 | 11 | Ok(()) |
1409 | 11 | }); |
1410 | 11 | drop(output_path_futures); |
1411 | 11 | debug!( |
1412 | 11 | operation_id = ?self.operation_id, |
1413 | 11 | elapsed_ms = join_start.elapsed().as_millis(), |
1414 | 11 | success = upload_result.is_ok(), |
1415 | 11 | "upload_results: all uploads completed", |
1416 | | ); |
1417 | 11 | let (stdout_digest, stderr_digest) = match upload_result { |
1418 | 11 | Ok((stdout_digest, stderr_digest, ())) => (stdout_digest, stderr_digest), |
1419 | 0 | Err(e) => return Err(e).err_tip(|| "Error while uploading results"), |
1420 | | }; |
1421 | | |
1422 | 11 | execution_metadata.output_upload_completed_timestamp = |
1423 | 11 | (self.running_actions_manager.callbacks.now_fn)(); |
1424 | 11 | output_files.sort_unstable_by(|a, b| a.name_or_path0 .cmp0 (&b.name_or_path0 )); |
1425 | 11 | output_folders.sort_unstable_by(|a, b| a.path0 .cmp0 (&b.path0 )); |
1426 | 11 | output_file_symlinks.sort_unstable_by(|a, b| a.name_or_path0 .cmp0 (&b.name_or_path0 )); |
1427 | 11 | output_directory_symlinks.sort_unstable_by(|a, b| a.name_or_path0 .cmp0 (&b.name_or_path0 )); |
1428 | 11 | let num_output_files = output_files.len(); |
1429 | 11 | let num_output_folders = output_folders.len(); |
1430 | 11 | { |
1431 | 11 | let mut state = self.state.lock(); |
1432 | 11 | execution_metadata.worker_completed_timestamp = |
1433 | 11 | (self.running_actions_manager.callbacks.now_fn)(); |
1434 | 11 | state.action_result = Some(ActionResult { |
1435 | 11 | output_files, |
1436 | 11 | output_folders, |
1437 | 11 | output_directory_symlinks, |
1438 | 11 | output_file_symlinks, |
1439 | 11 | exit_code: execution_result.exit_code, |
1440 | 11 | stdout_digest, |
1441 | 11 | stderr_digest, |
1442 | 11 | execution_metadata, |
1443 | 11 | server_logs: HashMap::default(), // TODO(palfrey) Not implemented. |
1444 | 11 | error: state.error.clone(), |
1445 | 11 | message: String::new(), // Will be filled in on cache_action_result if needed. |
1446 | 11 | }); |
1447 | 11 | } |
1448 | 11 | debug!( |
1449 | 11 | operation_id = ?self.operation_id, |
1450 | 11 | total_elapsed_ms = upload_start.elapsed().as_millis(), |
1451 | | num_output_files, |
1452 | | num_output_folders, |
1453 | 11 | "upload_results: inner_upload_results completed successfully", |
1454 | | ); |
1455 | 11 | Ok(self) |
1456 | 11 | } |
1457 | | |
1458 | 11 | async fn inner_get_finished_result(self: Arc<Self>) -> Result<ActionResult, Error>0 { |
1459 | 11 | let mut state = self.state.lock(); |
1460 | 11 | state |
1461 | 11 | .action_result |
1462 | 11 | .take() |
1463 | 11 | .err_tip(|| "Expected action_result to exist in get_finished_result") |
1464 | 11 | } |
1465 | | } |
1466 | | |
1467 | | impl Drop for RunningActionImpl { |
1468 | 18 | fn drop(&mut self) { |
1469 | 18 | if self.did_cleanup.load(Ordering::Acquire) { Branch (1469:12): [True: 18, False: 0]
Branch (1469:12): [Folded - Ignored]
|
1470 | 18 | if self.has_manager_entry.load(Ordering::Acquire) { Branch (1470:16): [True: 1, False: 17]
Branch (1470:16): [Folded - Ignored]
|
1471 | 1 | drop( |
1472 | 1 | self.running_actions_manager |
1473 | 1 | .cleanup_action(&self.operation_id), |
1474 | 1 | ); |
1475 | 17 | } |
1476 | 18 | return; |
1477 | 0 | } |
1478 | 0 | let operation_id = self.operation_id.clone(); |
1479 | 0 | error!( |
1480 | | %operation_id, |
1481 | 0 | "RunningActionImpl did not cleanup. This is a violation of the requirements, will attempt to do it in the background." |
1482 | | ); |
1483 | 0 | let running_actions_manager = self.running_actions_manager.clone(); |
1484 | 0 | let action_directory = self.action_directory.clone(); |
1485 | 0 | background_spawn!("running_action_impl_drop", async move { |
1486 | 0 | let Err(err) = Branch (1486:17): [True: 0, False: 0]
Branch (1486:17): [Folded - Ignored]
|
1487 | 0 | do_cleanup(&running_actions_manager, &operation_id, &action_directory).await |
1488 | | else { |
1489 | 0 | return; |
1490 | | }; |
1491 | 0 | error!( |
1492 | | %operation_id, |
1493 | | ?action_directory, |
1494 | | ?err, |
1495 | 0 | "Error cleaning up action" |
1496 | | ); |
1497 | 0 | }); |
1498 | 18 | } |
1499 | | } |
1500 | | |
1501 | | impl RunningAction for RunningActionImpl { |
1502 | 0 | fn get_operation_id(&self) -> &OperationId { |
1503 | 0 | &self.operation_id |
1504 | 0 | } |
1505 | | |
1506 | 15 | async fn prepare_action(self: Arc<Self>) -> Result<Arc<Self>, Error>0 { |
1507 | 15 | let res = self |
1508 | 15 | .metrics() |
1509 | 15 | .clone() |
1510 | 15 | .prepare_action |
1511 | 15 | .wrap(Self::inner_prepare_action(self)) |
1512 | 15 | .await; |
1513 | 15 | if let Err(ref e0 ) = res { Branch (1513:16): [Folded - Ignored]
Branch (1513:16): [Folded - Ignored]
Branch (1513:16): [True: 0, False: 15]
|
1514 | 0 | warn!(?e, "Error during prepare_action"); |
1515 | 15 | } |
1516 | 15 | res |
1517 | 15 | } |
1518 | | |
1519 | 13 | async fn execute(self: Arc<Self>) -> Result<Arc<Self>, Error>0 { |
1520 | 13 | let res = self |
1521 | 13 | .metrics() |
1522 | 13 | .clone() |
1523 | 13 | .execute |
1524 | 13 | .wrap(Self::inner_execute(self)) |
1525 | 13 | .await; |
1526 | 13 | if let Err(ref e0 ) = res { Branch (1526:16): [Folded - Ignored]
Branch (1526:16): [Folded - Ignored]
Branch (1526:16): [True: 0, False: 13]
|
1527 | 0 | warn!(?e, "Error during prepare_action"); |
1528 | 13 | } |
1529 | 13 | res |
1530 | 13 | } |
1531 | | |
1532 | 11 | async fn upload_results(self: Arc<Self>) -> Result<Arc<Self>, Error>0 { |
1533 | 11 | let upload_timeout = self.running_actions_manager.max_upload_timeout; |
1534 | 11 | let operation_id = self.operation_id.clone(); |
1535 | 11 | info!( |
1536 | | ?operation_id, |
1537 | 11 | upload_timeout_s = upload_timeout.as_secs(), |
1538 | 11 | "upload_results: starting with timeout", |
1539 | | ); |
1540 | 11 | let metrics = self.metrics().clone(); |
1541 | 11 | let upload_fut = metrics |
1542 | 11 | .upload_results |
1543 | 11 | .wrap(Self::inner_upload_results(self)); |
1544 | | |
1545 | 11 | let stall_warn_fut = async {10 |
1546 | 10 | let mut elapsed_secs = 0u64; |
1547 | | loop { |
1548 | 10 | tokio::time::sleep(Duration::from_secs(60)).await; |
1549 | 0 | elapsed_secs += 60; |
1550 | 0 | warn!( |
1551 | | ?operation_id, |
1552 | | elapsed_s = elapsed_secs, |
1553 | 0 | timeout_s = upload_timeout.as_secs(), |
1554 | 0 | "upload_results: still in progress — possible stall", |
1555 | | ); |
1556 | | } |
1557 | | }; |
1558 | | |
1559 | 11 | let res = tokio::time::timeout(upload_timeout, async { |
1560 | 11 | tokio::pin!(upload_fut); |
1561 | 11 | tokio::pin!(stall_warn_fut); |
1562 | 11 | tokio::select! { |
1563 | 11 | result = &mut upload_fut => result, |
1564 | 11 | () = &mut stall_warn_fut => unreachable!0 (), |
1565 | | } |
1566 | 11 | }) |
1567 | 11 | .await |
1568 | 11 | .map_err(|_| {0 |
1569 | 0 | make_err!( |
1570 | 0 | Code::DeadlineExceeded, |
1571 | | "Upload results timed out after {}s for operation {:?}", |
1572 | 0 | upload_timeout.as_secs(), |
1573 | | operation_id, |
1574 | | ) |
1575 | 0 | })?; |
1576 | 11 | if let Err(ref e0 ) = res { Branch (1576:16): [Folded - Ignored]
Branch (1576:16): [Folded - Ignored]
Branch (1576:16): [True: 0, False: 11]
|
1577 | 0 | warn!(?operation_id, ?e, "Error during upload_results"); |
1578 | 11 | } |
1579 | 11 | res |
1580 | 11 | } |
1581 | | |
1582 | 17 | async fn cleanup(self: Arc<Self>) -> Result<Arc<Self>, Error> { |
1583 | 17 | let res = self |
1584 | 17 | .metrics() |
1585 | 17 | .clone() |
1586 | 17 | .cleanup |
1587 | 17 | .wrap(async move { |
1588 | 17 | let result = do_cleanup( |
1589 | 17 | &self.running_actions_manager, |
1590 | 17 | &self.operation_id, |
1591 | 17 | &self.action_directory, |
1592 | 17 | ) |
1593 | 17 | .await; |
1594 | 17 | self.has_manager_entry.store(false, Ordering::Release); |
1595 | 17 | self.did_cleanup.store(true, Ordering::Release); |
1596 | 17 | result.map(move |()| self) |
1597 | 17 | }) |
1598 | 17 | .await; |
1599 | 17 | if let Err(ref e0 ) = res { Branch (1599:16): [Folded - Ignored]
Branch (1599:16): [Folded - Ignored]
Branch (1599:16): [True: 0, False: 17]
|
1600 | 0 | warn!(?e, "Error during cleanup"); |
1601 | 17 | } |
1602 | 17 | res |
1603 | 17 | } |
1604 | | |
1605 | 11 | async fn get_finished_result(self: Arc<Self>) -> Result<ActionResult, Error> { |
1606 | 11 | self.metrics() |
1607 | 11 | .clone() |
1608 | 11 | .get_finished_result |
1609 | 11 | .wrap(Self::inner_get_finished_result(self)) |
1610 | 11 | .await |
1611 | 11 | } |
1612 | | |
1613 | 0 | fn get_work_directory(&self) -> &String { |
1614 | 0 | &self.work_directory |
1615 | 0 | } |
1616 | | } |
1617 | | |
1618 | | pub trait RunningActionsManager: Sync + Send + Sized + Unpin + 'static { |
1619 | | type RunningAction: RunningAction; |
1620 | | |
1621 | | fn create_and_add_action( |
1622 | | self: &Arc<Self>, |
1623 | | worker_id: String, |
1624 | | start_execute: StartExecute, |
1625 | | ) -> impl Future<Output = Result<Arc<Self::RunningAction>, Error>> + Send; |
1626 | | |
1627 | | fn cache_action_result( |
1628 | | &self, |
1629 | | action_digest: DigestInfo, |
1630 | | action_result: &mut ActionResult, |
1631 | | hasher: DigestHasherFunc, |
1632 | | ) -> impl Future<Output = Result<(), Error>> + Send; |
1633 | | |
1634 | | fn kill_all(&self) -> impl Future<Output = ()> + Send; |
1635 | | |
1636 | | fn kill_operation( |
1637 | | &self, |
1638 | | operation_id: &OperationId, |
1639 | | ) -> impl Future<Output = Result<(), Error>> + Send; |
1640 | | |
1641 | | fn metrics(&self) -> &Arc<Metrics>; |
1642 | | } |
1643 | | |
1644 | | /// A function to get the current system time, used to allow mocking for tests |
1645 | | type NowFn = fn() -> SystemTime; |
1646 | | type SleepFn = fn(Duration) -> BoxFuture<'static, ()>; |
1647 | | |
1648 | | /// Functions that may be injected for testing purposes, during standard control |
1649 | | /// flows these are specified by the new function. |
1650 | | #[derive(Clone, Copy)] |
1651 | | pub struct Callbacks { |
1652 | | /// A function that gets the current time. |
1653 | | pub now_fn: NowFn, |
1654 | | /// A function that sleeps for a given Duration. |
1655 | | pub sleep_fn: SleepFn, |
1656 | | } |
1657 | | |
1658 | | impl Debug for Callbacks { |
1659 | 0 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { |
1660 | 0 | f.debug_struct("Callbacks").finish_non_exhaustive() |
1661 | 0 | } |
1662 | | } |
1663 | | |
1664 | | /// The set of additional information for executing an action over and above |
1665 | | /// those given in the `ActionInfo` passed to the worker. This allows |
1666 | | /// modification of the action for execution on this particular worker. This |
1667 | | /// may be used to run the action with a particular set of additional |
1668 | | /// environment variables, or perhaps configure it to execute within a |
1669 | | /// container. |
1670 | | #[derive(Debug, Default)] |
1671 | | pub struct ExecutionConfiguration { |
1672 | | /// If set, will be executed instead of the first argument passed in the |
1673 | | /// `ActionInfo` with all of the arguments in the `ActionInfo` passed as |
1674 | | /// arguments to this command. |
1675 | | pub entrypoint: Option<String>, |
1676 | | /// The only environment variables that will be specified when the command |
1677 | | /// executes other than those in the `ActionInfo`. On Windows, `SystemRoot` |
1678 | | /// and PATH are also assigned (see `inner_execute`). |
1679 | | pub additional_environment: Option<HashMap<String, EnvironmentSource>>, |
1680 | | } |
1681 | | |
1682 | | #[derive(Debug)] |
1683 | | struct UploadActionResults { |
1684 | | upload_ac_results_strategy: UploadCacheResultsStrategy, |
1685 | | upload_historical_results_strategy: UploadCacheResultsStrategy, |
1686 | | ac_store: Option<Store>, |
1687 | | historical_store: Store, |
1688 | | success_message_template: Template, |
1689 | | failure_message_template: Template, |
1690 | | } |
1691 | | |
1692 | | impl UploadActionResults { |
1693 | 26 | fn new( |
1694 | 26 | config: &UploadActionResultConfig, |
1695 | 26 | ac_store: Option<Store>, |
1696 | 26 | historical_store: Store, |
1697 | 26 | ) -> Result<Self, Error> { |
1698 | 26 | let upload_historical_results_strategy = config |
1699 | 26 | .upload_historical_results_strategy |
1700 | 26 | .unwrap_or(DEFAULT_HISTORICAL_RESULTS_STRATEGY); |
1701 | 8 | if !matches!( Branch (1701:12): [True: 8, False: 18]
Branch (1701:12): [Folded - Ignored]
|
1702 | 26 | config.upload_ac_results_strategy, |
1703 | | UploadCacheResultsStrategy::Never |
1704 | 8 | ) && ac_store.is_none() Branch (1704:14): [True: 0, False: 8]
Branch (1704:14): [Folded - Ignored]
|
1705 | | { |
1706 | 0 | return Err(make_input_err!( |
1707 | 0 | "upload_ac_results_strategy is set, but no ac_store is configured" |
1708 | 0 | )); |
1709 | 26 | } |
1710 | | Ok(Self { |
1711 | 26 | upload_ac_results_strategy: config.upload_ac_results_strategy, |
1712 | 26 | upload_historical_results_strategy, |
1713 | 26 | ac_store, |
1714 | 26 | historical_store, |
1715 | 26 | success_message_template: Template::new(&config.success_message_template).map_err( |
1716 | 0 | |e| { |
1717 | 0 | make_input_err!( |
1718 | | "Could not convert success_message_template to rust template: {} : {e:?}", |
1719 | | config.success_message_template |
1720 | | ) |
1721 | 0 | }, |
1722 | 0 | )?, |
1723 | 26 | failure_message_template: Template::new(&config.failure_message_template).map_err( |
1724 | 0 | |e| { |
1725 | 0 | make_input_err!( |
1726 | | "Could not convert failure_message_template to rust template: {} : {e:?}", |
1727 | | config.success_message_template |
1728 | | ) |
1729 | 0 | }, |
1730 | 0 | )?, |
1731 | | }) |
1732 | 26 | } |
1733 | | |
1734 | 0 | const fn should_cache_result( |
1735 | 0 | strategy: UploadCacheResultsStrategy, |
1736 | 0 | action_result: &ActionResult, |
1737 | 0 | treat_infra_error_as_failure: bool, |
1738 | 0 | ) -> bool { |
1739 | 0 | let did_fail = action_result.exit_code != 0 Branch (1739:24): [Folded - Ignored]
Branch (1739:24): [Folded - Ignored]
|
1740 | 0 | || (treat_infra_error_as_failure && action_result.error.is_some()); Branch (1740:17): [Folded - Ignored]
Branch (1740:17): [Folded - Ignored]
|
1741 | 0 | match strategy { |
1742 | 0 | UploadCacheResultsStrategy::SuccessOnly => !did_fail, |
1743 | 0 | UploadCacheResultsStrategy::Never => false, |
1744 | | // Never cache internal errors or timeouts. |
1745 | | UploadCacheResultsStrategy::Everything => { |
1746 | 0 | treat_infra_error_as_failure || action_result.error.is_none() Branch (1746:17): [Folded - Ignored]
Branch (1746:17): [Folded - Ignored]
|
1747 | | } |
1748 | 0 | UploadCacheResultsStrategy::FailuresOnly => did_fail, |
1749 | | } |
1750 | 0 | } |
1751 | | |
1752 | | /// Formats the message field in `ExecuteResponse` from the `success_message_template` |
1753 | | /// or `failure_message_template` config templates. |
1754 | 5 | fn format_execute_response_message( |
1755 | 5 | mut template_str: Template, |
1756 | 5 | action_digest_info: DigestInfo, |
1757 | 5 | maybe_historical_digest_info: Option<DigestInfo>, |
1758 | 5 | hasher: DigestHasherFunc, |
1759 | 5 | ) -> Result<String, Error> { |
1760 | 5 | template_str.replace( |
1761 | | "digest_function", |
1762 | 5 | hasher.proto_digest_func().as_str_name().to_lowercase(), |
1763 | | ); |
1764 | 5 | template_str.replace( |
1765 | | "action_digest_hash", |
1766 | 5 | action_digest_info.packed_hash().to_string(), |
1767 | | ); |
1768 | 5 | template_str.replace("action_digest_size", action_digest_info.size_bytes()); |
1769 | 5 | if let Some(historical_digest_info3 ) = maybe_historical_digest_info { Branch (1769:16): [True: 3, False: 2]
Branch (1769:16): [Folded - Ignored]
|
1770 | 3 | template_str.replace( |
1771 | 3 | "historical_results_hash", |
1772 | 3 | format!("{}", historical_digest_info.packed_hash()), |
1773 | 3 | ); |
1774 | 3 | template_str.replace( |
1775 | 3 | "historical_results_size", |
1776 | 3 | historical_digest_info.size_bytes(), |
1777 | 3 | ); |
1778 | 3 | } else { |
1779 | 2 | template_str.replace("historical_results_hash", ""); |
1780 | 2 | template_str.replace("historical_results_size", ""); |
1781 | 2 | } |
1782 | 5 | template_str |
1783 | 5 | .text() |
1784 | 5 | .map_err(|e| make_input_err!("Could not convert template to text: {e:?}")) |
1785 | 5 | } |
1786 | | |
1787 | 5 | async fn upload_ac_results( |
1788 | 5 | &self, |
1789 | 5 | action_digest: DigestInfo, |
1790 | 5 | action_result: ProtoActionResult, |
1791 | 5 | hasher: DigestHasherFunc, |
1792 | 5 | ) -> Result<(), Error> { |
1793 | 5 | let Some(ac_store) = self.ac_store.as_ref() else { Branch (1793:13): [Folded - Ignored]
Branch (1793:13): [Folded - Ignored]
Branch (1793:13): [True: 5, False: 0]
|
1794 | 0 | return Ok(()); |
1795 | | }; |
1796 | | // If we are a GrpcStore we shortcut here, as this is a special store. |
1797 | 5 | if let Some(grpc_store0 ) = ac_store.downcast_ref::<GrpcStore>(Some(action_digest.into())) { Branch (1797:16): [Folded - Ignored]
Branch (1797:16): [Folded - Ignored]
Branch (1797:16): [True: 0, False: 5]
|
1798 | 0 | let update_action_request = UpdateActionResultRequest { |
1799 | 0 | // This is populated by `update_action_result`. |
1800 | 0 | instance_name: String::new(), |
1801 | 0 | action_digest: Some(action_digest.into()), |
1802 | 0 | action_result: Some(action_result), |
1803 | 0 | results_cache_policy: None, |
1804 | 0 | digest_function: hasher.proto_digest_func().into(), |
1805 | 0 | }; |
1806 | 0 | return grpc_store |
1807 | 0 | .update_action_result(Request::new(update_action_request)) |
1808 | 0 | .await |
1809 | 0 | .map(|_| ()) |
1810 | 0 | .err_tip(|| "Caching ActionResult"); |
1811 | 5 | } |
1812 | | |
1813 | 5 | let mut store_data = BytesMut::with_capacity(ESTIMATED_DIGEST_SIZE); |
1814 | 5 | action_result |
1815 | 5 | .encode(&mut store_data) |
1816 | 5 | .err_tip(|| "Encoding ActionResult for caching")?0 ; |
1817 | | |
1818 | 5 | ac_store |
1819 | 5 | .update_oneshot(action_digest, store_data.split().freeze()) |
1820 | 5 | .await |
1821 | 5 | .err_tip(|| "Caching ActionResult") |
1822 | 5 | } |
1823 | | |
1824 | 3 | async fn upload_historical_results_with_message( |
1825 | 3 | &self, |
1826 | 3 | action_digest: DigestInfo, |
1827 | 3 | execute_response: ExecuteResponse, |
1828 | 3 | message_template: Template, |
1829 | 3 | hasher: DigestHasherFunc, |
1830 | 3 | ) -> Result<String, Error> { |
1831 | 3 | let historical_digest_info = serialize_and_upload_message( |
1832 | 3 | &HistoricalExecuteResponse { |
1833 | 3 | action_digest: Some(action_digest.into()), |
1834 | 3 | execute_response: Some(execute_response.clone()), |
1835 | 3 | }, |
1836 | 3 | self.historical_store.as_pin(), |
1837 | 3 | &mut hasher.hasher(), |
1838 | 3 | ) |
1839 | 3 | .await |
1840 | 3 | .err_tip(|| format!("Caching HistoricalExecuteResponse for digest: {action_digest}"0 ))?0 ; |
1841 | | |
1842 | 3 | Self::format_execute_response_message( |
1843 | 3 | message_template, |
1844 | 3 | action_digest, |
1845 | 3 | Some(historical_digest_info), |
1846 | 3 | hasher, |
1847 | | ) |
1848 | 3 | .err_tip(|| "Could not format message in upload_historical_results_with_message") |
1849 | 3 | } |
1850 | | |
1851 | 0 | async fn cache_action_result( |
1852 | 0 | &self, |
1853 | 0 | action_info: DigestInfo, |
1854 | 0 | action_result: &mut ActionResult, |
1855 | 0 | hasher: DigestHasherFunc, |
1856 | 6 | ) -> Result<(), Error> { |
1857 | 6 | let should_upload_historical_results = |
1858 | 6 | Self::should_cache_result(self.upload_historical_results_strategy, action_result, true); |
1859 | 6 | let should_upload_ac_results = |
1860 | 6 | Self::should_cache_result(self.upload_ac_results_strategy, action_result, false); |
1861 | | // Shortcut so we don't need to convert to proto if not needed. |
1862 | 6 | if !should_upload_ac_results && !should_upload_historical_results1 { Branch (1862:12): [Folded - Ignored]
Branch (1862:41): [Folded - Ignored]
Branch (1862:12): [Folded - Ignored]
Branch (1862:41): [Folded - Ignored]
Branch (1862:12): [True: 1, False: 5]
Branch (1862:41): [True: 1, False: 0]
|
1863 | 1 | return Ok(()); |
1864 | 5 | } |
1865 | | |
1866 | 5 | let mut execute_response = to_execute_response(action_result.clone()); |
1867 | | |
1868 | | // In theory exit code should always be != 0 if there's an error, but for safety we |
1869 | | // catch both. |
1870 | 5 | let message_template = if action_result.exit_code == 0 && action_result.error4 .is_none4 () { Branch (1870:35): [Folded - Ignored]
Branch (1870:67): [Folded - Ignored]
Branch (1870:35): [Folded - Ignored]
Branch (1870:67): [Folded - Ignored]
Branch (1870:35): [True: 4, False: 1]
Branch (1870:67): [True: 3, False: 1]
|
1871 | 3 | self.success_message_template.clone() |
1872 | | } else { |
1873 | 2 | self.failure_message_template.clone() |
1874 | | }; |
1875 | | |
1876 | 5 | let upload_historical_results_with_message_result = if should_upload_historical_results { Branch (1876:64): [Folded - Ignored]
Branch (1876:64): [Folded - Ignored]
Branch (1876:64): [True: 3, False: 2]
|
1877 | 3 | let maybe_message = self |
1878 | 3 | .upload_historical_results_with_message( |
1879 | 3 | action_info, |
1880 | 3 | execute_response.clone(), |
1881 | 3 | message_template, |
1882 | 3 | hasher, |
1883 | 3 | ) |
1884 | 3 | .await; |
1885 | 3 | match maybe_message { |
1886 | 3 | Ok(message) => { |
1887 | 3 | action_result.message.clone_from(&message); |
1888 | 3 | execute_response.message = message; |
1889 | 3 | Ok(()) |
1890 | | } |
1891 | 0 | Err(e) => Result::<(), Error>::Err(e), |
1892 | | } |
1893 | | } else { |
1894 | 2 | match Self::format_execute_response_message(message_template, action_info, None, hasher) |
1895 | | { |
1896 | 2 | Ok(message) => { |
1897 | 2 | action_result.message.clone_from(&message); |
1898 | 2 | execute_response.message = message; |
1899 | 2 | Ok(()) |
1900 | | } |
1901 | 0 | Err(e) => Err(e).err_tip(|| "Could not format message in cache_action_result"), |
1902 | | } |
1903 | | }; |
1904 | | |
1905 | | // Note: Done in this order because we assume most results will succeed and most configs will |
1906 | | // either always upload upload historical results or only upload on filure. In which case |
1907 | | // we can avoid an extra clone of the protos by doing this last with the above assumption. |
1908 | 5 | let ac_upload_results = if should_upload_ac_results { Branch (1908:36): [Folded - Ignored]
Branch (1908:36): [Folded - Ignored]
Branch (1908:36): [True: 5, False: 0]
|
1909 | 5 | self.upload_ac_results( |
1910 | 5 | action_info, |
1911 | 5 | execute_response |
1912 | 5 | .result |
1913 | 5 | .err_tip(|| "No result set in cache_action_result")?0 , |
1914 | 5 | hasher, |
1915 | | ) |
1916 | 5 | .await |
1917 | | } else { |
1918 | 0 | Ok(()) |
1919 | | }; |
1920 | 5 | upload_historical_results_with_message_result.merge(ac_upload_results) |
1921 | 6 | } |
1922 | | } |
1923 | | |
1924 | | #[derive(Debug)] |
1925 | | pub struct RunningActionsManagerArgs<'a> { |
1926 | | pub root_action_directory: String, |
1927 | | pub execution_configuration: ExecutionConfiguration, |
1928 | | pub cas_store: Arc<FastSlowStore>, |
1929 | | pub ac_store: Option<Store>, |
1930 | | pub historical_store: Store, |
1931 | | pub upload_action_result_config: &'a UploadActionResultConfig, |
1932 | | pub max_action_timeout: Duration, |
1933 | | pub max_upload_timeout: Duration, |
1934 | | pub timeout_handled_externally: bool, |
1935 | | pub directory_cache: Option<Arc<crate::directory_cache::DirectoryCache>>, |
1936 | | } |
1937 | | |
1938 | | struct CleanupGuard { |
1939 | | manager: Weak<RunningActionsManagerImpl>, |
1940 | | operation_id: OperationId, |
1941 | | } |
1942 | | |
1943 | | impl Drop for CleanupGuard { |
1944 | 17 | fn drop(&mut self) { |
1945 | 17 | let Some(manager) = self.manager.upgrade() else { Branch (1945:13): [True: 17, False: 0]
Branch (1945:13): [Folded - Ignored]
|
1946 | 0 | return; |
1947 | | }; |
1948 | 17 | let mut cleaning = manager.cleaning_up_operations.lock(); |
1949 | 17 | cleaning.remove(&self.operation_id); |
1950 | 17 | manager.cleanup_complete_notify.notify_waiters(); |
1951 | 17 | } |
1952 | | } |
1953 | | |
1954 | | /// Holds state info about what is being executed and the interface for interacting |
1955 | | /// with actions while they are running. |
1956 | | #[derive(Debug)] |
1957 | | pub struct RunningActionsManagerImpl { |
1958 | | root_action_directory: String, |
1959 | | execution_configuration: ExecutionConfiguration, |
1960 | | cas_store: Arc<FastSlowStore>, |
1961 | | filesystem_store: Arc<FilesystemStore>, |
1962 | | upload_action_results: UploadActionResults, |
1963 | | max_action_timeout: Duration, |
1964 | | max_upload_timeout: Duration, |
1965 | | timeout_handled_externally: bool, |
1966 | | running_actions: Mutex<HashMap<OperationId, Weak<RunningActionImpl>>>, |
1967 | | // Note: We don't use Notify because we need to support a .wait_for()-like function, which |
1968 | | // Notify does not support. |
1969 | | action_done_tx: watch::Sender<()>, |
1970 | | callbacks: Callbacks, |
1971 | | metrics: Arc<Metrics>, |
1972 | | /// Track operations being cleaned up to avoid directory collisions during action retries. |
1973 | | /// When an action fails and is retried on the same worker, we need to ensure the previous |
1974 | | /// attempt's directory is fully cleaned up before creating a new one. |
1975 | | /// See: <https://github.com/TraceMachina/nativelink/issues/1859> |
1976 | | cleaning_up_operations: Mutex<HashSet<OperationId>>, |
1977 | | /// Notify waiters when a cleanup operation completes. This is used in conjunction with |
1978 | | /// `cleaning_up_operations` to coordinate directory cleanup and creation. |
1979 | | cleanup_complete_notify: Arc<Notify>, |
1980 | | /// Optional directory cache for improving performance by caching reconstructed |
1981 | | /// input directories and using hardlinks. |
1982 | | directory_cache: Option<Arc<crate::directory_cache::DirectoryCache>>, |
1983 | | } |
1984 | | |
1985 | | impl RunningActionsManagerImpl { |
1986 | | /// Maximum time to wait for a cleanup operation to complete before timing out. |
1987 | | /// TODO(marcussorealheis): Consider making cleanup wait timeout configurable in the future |
1988 | | const MAX_WAIT: Duration = Duration::from_secs(30); |
1989 | | /// Maximum backoff duration for exponential backoff when waiting for cleanup. |
1990 | | const MAX_BACKOFF: Duration = Duration::from_millis(500); |
1991 | 26 | pub fn new_with_callbacks( |
1992 | 26 | args: RunningActionsManagerArgs<'_>, |
1993 | 26 | callbacks: Callbacks, |
1994 | 26 | ) -> Result<Self, Error> { |
1995 | | // Sadly because of some limitations of how Any works we need to clone more times than optimal. |
1996 | 26 | let filesystem_store = args |
1997 | 26 | .cas_store |
1998 | 26 | .fast_store() |
1999 | 26 | .downcast_ref::<FilesystemStore>(None) |
2000 | 26 | .err_tip( |
2001 | | || "Expected FilesystemStore store for .fast_store() in RunningActionsManagerImpl", |
2002 | 0 | )? |
2003 | 26 | .get_arc() |
2004 | 26 | .err_tip(|| "FilesystemStore's internal Arc was lost")?0 ; |
2005 | 26 | let (action_done_tx, _) = watch::channel(()); |
2006 | | Ok(Self { |
2007 | 26 | root_action_directory: args.root_action_directory, |
2008 | 26 | execution_configuration: args.execution_configuration, |
2009 | 26 | cas_store: args.cas_store, |
2010 | 26 | filesystem_store, |
2011 | 26 | upload_action_results: UploadActionResults::new( |
2012 | 26 | args.upload_action_result_config, |
2013 | 26 | args.ac_store, |
2014 | 26 | args.historical_store, |
2015 | | ) |
2016 | 26 | .err_tip(|| "During RunningActionsManagerImpl construction")?0 , |
2017 | 26 | max_action_timeout: args.max_action_timeout, |
2018 | 26 | max_upload_timeout: args.max_upload_timeout, |
2019 | 26 | timeout_handled_externally: args.timeout_handled_externally, |
2020 | 26 | running_actions: Mutex::new(HashMap::new()), |
2021 | 26 | action_done_tx, |
2022 | 26 | callbacks, |
2023 | 26 | metrics: Arc::new(Metrics::default()), |
2024 | 26 | cleaning_up_operations: Mutex::new(HashSet::new()), |
2025 | 26 | cleanup_complete_notify: Arc::new(Notify::new()), |
2026 | 26 | directory_cache: args.directory_cache, |
2027 | | }) |
2028 | 26 | } |
2029 | | |
2030 | 12 | pub fn new(args: RunningActionsManagerArgs<'_>) -> Result<Self, Error> { |
2031 | 12 | Self::new_with_callbacks( |
2032 | 12 | args, |
2033 | | Callbacks { |
2034 | 12 | now_fn: SystemTime::now, |
2035 | 2 | sleep_fn: |duration| Box::pin(tokio::time::sleep(duration)), |
2036 | | }, |
2037 | | ) |
2038 | 12 | } |
2039 | | |
2040 | | /// Fixes a race condition that occurs when an action fails to execute on a worker, and the same worker |
2041 | | /// attempts to re-execute the same action before the physical cleanup (file is removed) completes. |
2042 | | /// See this issue for additional details: <https://github.com/TraceMachina/nativelink/issues/1859> |
2043 | 19 | async fn wait_for_cleanup_if_needed(&self, operation_id: &OperationId) -> Result<(), Error> { |
2044 | 19 | let start = Instant::now(); |
2045 | 19 | let mut backoff = Duration::from_millis(10); |
2046 | 19 | let mut has_waited = false; |
2047 | | |
2048 | | loop { |
2049 | 19 | let should_wait = { |
2050 | 19 | let cleaning = self.cleaning_up_operations.lock(); |
2051 | 19 | cleaning.contains(operation_id) |
2052 | | }; |
2053 | | |
2054 | 19 | if !should_wait { Branch (2054:16): [Folded - Ignored]
Branch (2054:16): [Folded - Ignored]
Branch (2054:16): [True: 19, False: 0]
|
2055 | 19 | let dir_path = |
2056 | 19 | PathBuf::from(&self.root_action_directory).join(operation_id.to_string()); |
2057 | | |
2058 | 19 | if !dir_path.exists() { Branch (2058:20): [Folded - Ignored]
Branch (2058:20): [Folded - Ignored]
Branch (2058:20): [True: 18, False: 1]
|
2059 | 18 | return Ok(()); |
2060 | 1 | } |
2061 | | |
2062 | | // Safety check: ensure we're only removing directories under root_action_directory |
2063 | 1 | let root_path = Path::new(&self.root_action_directory); |
2064 | 1 | let canonical_root = root_path.canonicalize().err_tip(|| {0 |
2065 | 0 | format!( |
2066 | 0 | "Failed to canonicalize root directory: {}", |
2067 | | self.root_action_directory |
2068 | | ) |
2069 | 0 | })?; |
2070 | 1 | let canonical_dir = dir_path.canonicalize().err_tip(|| {0 |
2071 | 0 | format!("Failed to canonicalize directory: {}", dir_path.display()) |
2072 | 0 | })?; |
2073 | | |
2074 | 1 | if !canonical_dir.starts_with(&canonical_root) { Branch (2074:20): [Folded - Ignored]
Branch (2074:20): [Folded - Ignored]
Branch (2074:20): [True: 0, False: 1]
|
2075 | 0 | return Err(make_err!( |
2076 | 0 | Code::Internal, |
2077 | 0 | "Attempted to remove directory outside of root_action_directory: {}", |
2078 | 0 | dir_path.display() |
2079 | 0 | )); |
2080 | 1 | } |
2081 | | |
2082 | | // Directory exists but not being cleaned - remove it |
2083 | 1 | warn!( |
2084 | 1 | "Removing stale directory for {}: {}", |
2085 | | operation_id, |
2086 | 1 | dir_path.display() |
2087 | | ); |
2088 | 1 | self.metrics.stale_removals.inc(); |
2089 | | |
2090 | | // Try to remove the directory, with one retry on failure |
2091 | 1 | let remove_result = fs::remove_dir_all(&dir_path).await; |
2092 | 1 | if let Err(e0 ) = remove_result { Branch (2092:24): [Folded - Ignored]
Branch (2092:24): [Folded - Ignored]
Branch (2092:24): [True: 0, False: 1]
|
2093 | | // Retry once after a short delay in case the directory is temporarily locked |
2094 | 0 | tokio::time::sleep(Duration::from_millis(100)).await; |
2095 | 0 | fs::remove_dir_all(&dir_path).await.err_tip(|| { |
2096 | 0 | format!( |
2097 | 0 | "Failed to remove stale directory {} for retry of {} after retry (original error: {})", |
2098 | 0 | dir_path.display(), |
2099 | | operation_id, |
2100 | | e |
2101 | | ) |
2102 | 0 | })?; |
2103 | 1 | } |
2104 | 1 | return Ok(()); |
2105 | 0 | } |
2106 | | |
2107 | 0 | if start.elapsed() > Self::MAX_WAIT { Branch (2107:16): [Folded - Ignored]
Branch (2107:16): [Folded - Ignored]
Branch (2107:16): [True: 0, False: 0]
|
2108 | 0 | self.metrics.cleanup_wait_timeouts.inc(); |
2109 | 0 | return Err(make_err!( |
2110 | 0 | Code::DeadlineExceeded, |
2111 | 0 | "Timeout waiting for previous operation cleanup: {} (waited {:?})", |
2112 | 0 | operation_id, |
2113 | 0 | start.elapsed() |
2114 | 0 | )); |
2115 | 0 | } |
2116 | | |
2117 | 0 | if !has_waited { Branch (2117:16): [Folded - Ignored]
Branch (2117:16): [Folded - Ignored]
Branch (2117:16): [True: 0, False: 0]
|
2118 | 0 | self.metrics.cleanup_waits.inc(); |
2119 | 0 | has_waited = true; |
2120 | 0 | } |
2121 | | |
2122 | 0 | trace!( |
2123 | 0 | "Waiting for cleanup of {} (elapsed: {:?}, backoff: {:?})", |
2124 | | operation_id, |
2125 | 0 | start.elapsed(), |
2126 | | backoff |
2127 | | ); |
2128 | | |
2129 | 0 | tokio::select! { |
2130 | 0 | () = self.cleanup_complete_notify.notified() => {}, |
2131 | 0 | () = tokio::time::sleep(backoff) => { |
2132 | 0 | // Exponential backoff |
2133 | 0 | backoff = (backoff * 2).min(Self::MAX_BACKOFF); |
2134 | 0 | }, |
2135 | | } |
2136 | | } |
2137 | 19 | } |
2138 | | |
2139 | 0 | fn make_action_directory<'a>( |
2140 | 0 | &'a self, |
2141 | 0 | operation_id: &'a OperationId, |
2142 | 0 | ) -> impl Future<Output = Result<String, Error>> + 'a { |
2143 | 19 | self.metrics.make_action_directory0 .wrap0 (async move { |
2144 | 19 | let action_directory = format!("{}/{}", self.root_action_directory, operation_id); |
2145 | 19 | fs::create_dir(&action_directory) |
2146 | 19 | .await |
2147 | 19 | .err_tip(|| format!("Error creating action directory {action_directory}"0 ))?0 ; |
2148 | 19 | Ok(action_directory) |
2149 | 19 | }) |
2150 | 0 | } |
2151 | | |
2152 | 19 | fn create_action_info( |
2153 | 19 | &self, |
2154 | 19 | start_execute: StartExecute, |
2155 | 19 | queued_timestamp: SystemTime, |
2156 | 19 | ) -> impl Future<Output = Result<ActionInfo, Error>> + '_ { |
2157 | 19 | self.metrics.create_action_info.wrap(async move { |
2158 | 19 | let execute_request = start_execute |
2159 | 19 | .execute_request |
2160 | 19 | .err_tip(|| "Expected execute_request to exist in StartExecute")?0 ; |
2161 | 19 | let action_digest: DigestInfo = execute_request |
2162 | 19 | .action_digest |
2163 | 19 | .clone() |
2164 | 19 | .err_tip(|| "Expected action_digest to exist on StartExecute")?0 |
2165 | 19 | .try_into()?0 ; |
2166 | 19 | let load_start_timestamp = (self.callbacks.now_fn)(); |
2167 | 19 | let action = |
2168 | 19 | get_and_decode_digest::<Action>(self.cas_store.as_ref(), action_digest.into()) |
2169 | 19 | .await |
2170 | 19 | .err_tip(|| "During start_action")?0 ; |
2171 | 19 | let action_info = ActionInfo::try_from_action_and_execute_request( |
2172 | 19 | execute_request, |
2173 | 19 | action, |
2174 | 19 | load_start_timestamp, |
2175 | 19 | queued_timestamp, |
2176 | | ) |
2177 | 19 | .err_tip(|| "Could not create ActionInfo in create_and_add_action()")?0 ; |
2178 | 19 | Ok(action_info) |
2179 | 19 | }) |
2180 | 19 | } |
2181 | | |
2182 | 18 | fn cleanup_action(&self, operation_id: &OperationId) -> Result<(), Error> { |
2183 | 18 | let mut running_actions = self.running_actions.lock(); |
2184 | 18 | let result = running_actions.remove(operation_id).err_tip(|| {0 |
2185 | 0 | format!("Expected operation id '{operation_id}' to exist in RunningActionsManagerImpl") |
2186 | 0 | }); |
2187 | | // No need to copy anything, we just are telling the receivers an event happened. |
2188 | 18 | self.action_done_tx.send_modify(|()| {}); |
2189 | 18 | result.map(|_| ()) |
2190 | 18 | } |
2191 | | |
2192 | | // Note: We do not capture metrics on this call, only `.kill_all()`. |
2193 | | // Important: When the future returns the process may still be running. |
2194 | 2 | async fn kill_operation(action: Arc<RunningActionImpl>)0 { |
2195 | 2 | warn!( |
2196 | 2 | operation_id = ?action.operation_id, |
2197 | 2 | "Sending kill to running operation", |
2198 | | ); |
2199 | 2 | let kill_channel_tx = { |
2200 | 2 | let mut action_state = action.state.lock(); |
2201 | 2 | action_state.kill_channel_tx.take() |
2202 | | }; |
2203 | 2 | if let Some(kill_channel_tx) = kill_channel_tx { Branch (2203:16): [Folded - Ignored]
Branch (2203:16): [Folded - Ignored]
Branch (2203:16): [True: 2, False: 0]
|
2204 | 2 | if kill_channel_tx.send(()).is_err() { Branch (2204:16): [Folded - Ignored]
Branch (2204:16): [Folded - Ignored]
Branch (2204:16): [True: 0, False: 2]
|
2205 | 0 | error!( |
2206 | 0 | operation_id = ?action.operation_id, |
2207 | 0 | "Error sending kill to running operation", |
2208 | | ); |
2209 | 2 | } |
2210 | 0 | } |
2211 | 2 | } |
2212 | | |
2213 | 17 | fn perform_cleanup(self: &Arc<Self>, operation_id: OperationId) -> Option<CleanupGuard> { |
2214 | 17 | let mut cleaning = self.cleaning_up_operations.lock(); |
2215 | 17 | cleaning |
2216 | 17 | .insert(operation_id.clone()) |
2217 | 17 | .then_some(CleanupGuard { |
2218 | 17 | manager: Arc::downgrade(self), |
2219 | 17 | operation_id, |
2220 | 17 | }) |
2221 | 17 | } |
2222 | | } |
2223 | | |
2224 | | impl RunningActionsManager for RunningActionsManagerImpl { |
2225 | | type RunningAction = RunningActionImpl; |
2226 | | |
2227 | 0 | async fn create_and_add_action( |
2228 | 0 | self: &Arc<Self>, |
2229 | 0 | worker_id: String, |
2230 | 0 | start_execute: StartExecute, |
2231 | 19 | ) -> Result<Arc<RunningActionImpl>, Error> { |
2232 | 19 | self.metrics |
2233 | 19 | .create_and_add_action |
2234 | 19 | .wrap(async move { |
2235 | 19 | let queued_timestamp = start_execute |
2236 | 19 | .queued_timestamp |
2237 | 19 | .and_then(|time| time13 .try_into13 ().ok13 ()) |
2238 | 19 | .unwrap_or(SystemTime::UNIX_EPOCH); |
2239 | 19 | let operation_id = start_execute |
2240 | 19 | .operation_id.as_str().into(); |
2241 | 19 | let action_info = self.create_action_info(start_execute, queued_timestamp).await?0 ; |
2242 | 19 | debug!( |
2243 | | ?action_info, |
2244 | 19 | "Worker received action", |
2245 | | ); |
2246 | | // Wait for any previous cleanup to complete before creating directory |
2247 | 19 | self.wait_for_cleanup_if_needed(&operation_id).await?0 ; |
2248 | 19 | let action_directory = self.make_action_directory(&operation_id).await?0 ; |
2249 | 19 | let execution_metadata = ExecutionMetadata { |
2250 | 19 | worker: worker_id, |
2251 | 19 | queued_timestamp: action_info.insert_timestamp, |
2252 | 19 | worker_start_timestamp: action_info.load_timestamp, |
2253 | 19 | worker_completed_timestamp: SystemTime::UNIX_EPOCH, |
2254 | 19 | input_fetch_start_timestamp: SystemTime::UNIX_EPOCH, |
2255 | 19 | input_fetch_completed_timestamp: SystemTime::UNIX_EPOCH, |
2256 | 19 | execution_start_timestamp: SystemTime::UNIX_EPOCH, |
2257 | 19 | execution_completed_timestamp: SystemTime::UNIX_EPOCH, |
2258 | 19 | output_upload_start_timestamp: SystemTime::UNIX_EPOCH, |
2259 | 19 | output_upload_completed_timestamp: SystemTime::UNIX_EPOCH, |
2260 | 19 | }; |
2261 | 19 | let timeout = if action_info.timeout.is_zero() || self.timeout_handled_externally3 { Branch (2261:34): [Folded - Ignored]
Branch (2261:67): [Folded - Ignored]
Branch (2261:34): [Folded - Ignored]
Branch (2261:67): [Folded - Ignored]
Branch (2261:34): [True: 16, False: 3]
Branch (2261:67): [True: 0, False: 3]
|
2262 | 16 | self.max_action_timeout |
2263 | | } else { |
2264 | 3 | action_info.timeout |
2265 | | }; |
2266 | 19 | if timeout > self.max_action_timeout { Branch (2266:20): [Folded - Ignored]
Branch (2266:20): [Folded - Ignored]
Branch (2266:20): [True: 1, False: 18]
|
2267 | 1 | return Err(make_err!( |
2268 | 1 | Code::InvalidArgument, |
2269 | 1 | "Action timeout of {} seconds is greater than the maximum allowed timeout of {} seconds", |
2270 | 1 | timeout.as_secs_f32(), |
2271 | 1 | self.max_action_timeout.as_secs_f32() |
2272 | 1 | )); |
2273 | 18 | } |
2274 | 18 | let running_action = Arc::new(RunningActionImpl::new( |
2275 | 18 | execution_metadata, |
2276 | 18 | operation_id.clone(), |
2277 | 18 | action_directory, |
2278 | 18 | action_info, |
2279 | 18 | timeout, |
2280 | 18 | self.clone(), |
2281 | | )); |
2282 | | { |
2283 | 18 | let mut running_actions = self.running_actions.lock(); |
2284 | | // Check if action already exists and is still alive |
2285 | 18 | if let Some(existing_weak0 ) = running_actions.get(&operation_id) { Branch (2285:28): [Folded - Ignored]
Branch (2285:28): [Folded - Ignored]
Branch (2285:28): [True: 0, False: 18]
|
2286 | 0 | if let Some(_existing_action) = existing_weak.upgrade() { Branch (2286:32): [Folded - Ignored]
Branch (2286:32): [Folded - Ignored]
Branch (2286:32): [True: 0, False: 0]
|
2287 | 0 | return Err(make_err!( |
2288 | 0 | Code::AlreadyExists, |
2289 | 0 | "Action with operation_id {} is already running", |
2290 | 0 | operation_id |
2291 | 0 | )); |
2292 | 0 | } |
2293 | 18 | } |
2294 | 18 | running_actions.insert(operation_id, Arc::downgrade(&running_action)); |
2295 | | } |
2296 | 18 | Ok(running_action) |
2297 | 19 | }) |
2298 | 19 | .await |
2299 | 19 | } |
2300 | | |
2301 | 6 | async fn cache_action_result( |
2302 | 6 | &self, |
2303 | 6 | action_info: DigestInfo, |
2304 | 6 | action_result: &mut ActionResult, |
2305 | 6 | hasher: DigestHasherFunc, |
2306 | 6 | ) -> Result<(), Error> { |
2307 | 6 | self.metrics |
2308 | 6 | .cache_action_result |
2309 | 6 | .wrap(self.upload_action_results.cache_action_result( |
2310 | 6 | action_info, |
2311 | 6 | action_result, |
2312 | 6 | hasher, |
2313 | 6 | )) |
2314 | 6 | .await |
2315 | 6 | } |
2316 | | |
2317 | 0 | async fn kill_operation(&self, operation_id: &OperationId) -> Result<(), Error> { |
2318 | 0 | let running_action = { |
2319 | 0 | let running_actions = self.running_actions.lock(); |
2320 | 0 | running_actions |
2321 | 0 | .get(operation_id) |
2322 | 0 | .and_then(Weak::upgrade) |
2323 | 0 | .ok_or_else(|| make_input_err!("Failed to get running action {operation_id}"))? |
2324 | | }; |
2325 | 0 | Self::kill_operation(running_action).await; |
2326 | 0 | Ok(()) |
2327 | 0 | } |
2328 | | |
2329 | | // Note: When the future returns the process should be fully killed and cleaned up. |
2330 | 2 | async fn kill_all(&self)0 { |
2331 | 2 | self.metrics |
2332 | 2 | .kill_all |
2333 | 2 | .wrap_no_capture_result(async move { |
2334 | 2 | let kill_operations: Vec<Arc<RunningActionImpl>> = { |
2335 | 2 | let running_actions = self.running_actions.lock(); |
2336 | 2 | running_actions |
2337 | 2 | .iter() |
2338 | 2 | .filter_map(|(_operation_id, action)| action.upgrade()) |
2339 | 2 | .collect() |
2340 | | }; |
2341 | 2 | let mut kill_futures: FuturesUnordered<_> = kill_operations |
2342 | 2 | .into_iter() |
2343 | 2 | .map(Self::kill_operation) |
2344 | 2 | .collect(); |
2345 | 4 | while kill_futures.next().await.is_some() {}2 Branch (2345:23): [Folded - Ignored]
Branch (2345:23): [Folded - Ignored]
Branch (2345:23): [True: 2, False: 2]
|
2346 | 2 | }) |
2347 | 2 | .await; |
2348 | | // Ignore error. If error happens it means there's no sender, which is not a problem. |
2349 | | // Note: Sanity check this API will always check current value then future values: |
2350 | | // https://play.rust-lang.org/?version=stable&edition=2021&gist=23103652cc1276a97e5f9938da87fdb2 |
2351 | 2 | drop( |
2352 | 2 | self.action_done_tx |
2353 | 2 | .subscribe() |
2354 | 4 | .wait_for2 (|()| self.running_actions.lock().is_empty()) |
2355 | 2 | .await, |
2356 | | ); |
2357 | 2 | } |
2358 | | |
2359 | | #[inline] |
2360 | 2 | fn metrics(&self) -> &Arc<Metrics> { |
2361 | 2 | &self.metrics |
2362 | 2 | } |
2363 | | } |
2364 | | |
2365 | | #[derive(Debug, Default, MetricsComponent)] |
2366 | | pub struct Metrics { |
2367 | | #[metric(help = "Stats about the create_and_add_action command.")] |
2368 | | create_and_add_action: AsyncCounterWrapper, |
2369 | | #[metric(help = "Stats about the cache_action_result command.")] |
2370 | | cache_action_result: AsyncCounterWrapper, |
2371 | | #[metric(help = "Stats about the kill_all command.")] |
2372 | | kill_all: AsyncCounterWrapper, |
2373 | | #[metric(help = "Stats about the create_action_info command.")] |
2374 | | create_action_info: AsyncCounterWrapper, |
2375 | | #[metric(help = "Stats about the make_work_directory command.")] |
2376 | | make_action_directory: AsyncCounterWrapper, |
2377 | | #[metric(help = "Stats about the prepare_action command.")] |
2378 | | prepare_action: AsyncCounterWrapper, |
2379 | | #[metric(help = "Stats about the execute command.")] |
2380 | | execute: AsyncCounterWrapper, |
2381 | | #[metric(help = "Stats about the upload_results command.")] |
2382 | | upload_results: AsyncCounterWrapper, |
2383 | | #[metric(help = "Stats about the cleanup command.")] |
2384 | | cleanup: AsyncCounterWrapper, |
2385 | | #[metric(help = "Stats about the get_finished_result command.")] |
2386 | | get_finished_result: AsyncCounterWrapper, |
2387 | | #[metric(help = "Number of times an action waited for cleanup to complete.")] |
2388 | | cleanup_waits: CounterWithTime, |
2389 | | #[metric(help = "Number of stale directories removed during action retries.")] |
2390 | | stale_removals: CounterWithTime, |
2391 | | #[metric(help = "Number of timeouts while waiting for cleanup to complete.")] |
2392 | | cleanup_wait_timeouts: CounterWithTime, |
2393 | | #[metric(help = "Stats about the get_proto_command_from_store command.")] |
2394 | | get_proto_command_from_store: AsyncCounterWrapper, |
2395 | | #[metric(help = "Stats about the download_to_directory command.")] |
2396 | | download_to_directory: AsyncCounterWrapper, |
2397 | | #[metric(help = "Stats about the prepare_output_files command.")] |
2398 | | prepare_output_files: AsyncCounterWrapper, |
2399 | | #[metric(help = "Stats about the prepare_output_paths command.")] |
2400 | | prepare_output_paths: AsyncCounterWrapper, |
2401 | | #[metric(help = "Stats about the child_process command.")] |
2402 | | child_process: AsyncCounterWrapper, |
2403 | | #[metric(help = "Stats about the child_process_success_error_code command.")] |
2404 | | child_process_success_error_code: CounterWithTime, |
2405 | | #[metric(help = "Stats about the child_process_failure_error_code command.")] |
2406 | | child_process_failure_error_code: CounterWithTime, |
2407 | | #[metric(help = "Total time spent uploading stdout.")] |
2408 | | upload_stdout: AsyncCounterWrapper, |
2409 | | #[metric(help = "Total time spent uploading stderr.")] |
2410 | | upload_stderr: AsyncCounterWrapper, |
2411 | | #[metric(help = "Total number of task timeouts.")] |
2412 | | task_timeouts: CounterWithTime, |
2413 | | } |