/build/source/nativelink-worker/src/local_worker.rs
Line | Count | Source |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Functional Source License, Version 1.1, Apache 2.0 Future License (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // See LICENSE file for details |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use core::hash::BuildHasher; |
16 | | use core::pin::Pin; |
17 | | use core::str; |
18 | | use core::sync::atomic::{AtomicU64, Ordering}; |
19 | | use core::time::Duration; |
20 | | use std::borrow::Cow; |
21 | | use std::collections::HashMap; |
22 | | use std::env; |
23 | | use std::process::Stdio; |
24 | | use std::sync::{Arc, Weak}; |
25 | | |
26 | | use futures::future::BoxFuture; |
27 | | use futures::stream::FuturesUnordered; |
28 | | use futures::{Future, FutureExt, StreamExt, TryFutureExt, select}; |
29 | | use nativelink_config::cas_server::{EnvironmentSource, LocalWorkerConfig}; |
30 | | use nativelink_error::{Code, Error, ResultExt, make_err, make_input_err}; |
31 | | use nativelink_metric::{MetricsComponent, RootMetricsComponent}; |
32 | | use nativelink_proto::com::github::trace_machina::nativelink::remote_execution::update_for_worker::Update; |
33 | | use nativelink_proto::com::github::trace_machina::nativelink::remote_execution::worker_api_client::WorkerApiClient; |
34 | | use nativelink_proto::com::github::trace_machina::nativelink::remote_execution::{ |
35 | | ExecuteComplete, ExecuteResult, GoingAwayRequest, KeepAliveRequest, UpdateForWorker, |
36 | | execute_result, |
37 | | }; |
38 | | use nativelink_store::fast_slow_store::FastSlowStore; |
39 | | use nativelink_util::action_messages::{ActionResult, ActionStage, OperationId}; |
40 | | use nativelink_util::common::fs; |
41 | | use nativelink_util::digest_hasher::DigestHasherFunc; |
42 | | use nativelink_util::metrics_utils::{AsyncCounterWrapper, CounterWithTime}; |
43 | | use nativelink_util::shutdown_guard::ShutdownGuard; |
44 | | use nativelink_util::store_trait::Store; |
45 | | use nativelink_util::{spawn, tls_utils}; |
46 | | use opentelemetry::context::Context; |
47 | | use tokio::process; |
48 | | use tokio::sync::{broadcast, mpsc}; |
49 | | use tokio::time::sleep; |
50 | | use tokio_stream::wrappers::UnboundedReceiverStream; |
51 | | use tonic::Streaming; |
52 | | use tracing::{Level, debug, error, event, info, info_span, instrument, trace, warn}; |
53 | | |
54 | | use crate::running_actions_manager::{ |
55 | | ExecutionConfiguration, Metrics as RunningActionManagerMetrics, RunningAction, |
56 | | RunningActionsManager, RunningActionsManagerArgs, RunningActionsManagerImpl, |
57 | | }; |
58 | | use crate::worker_api_client_wrapper::{WorkerApiClientTrait, WorkerApiClientWrapper}; |
59 | | use crate::worker_utils::make_connect_worker_request; |
60 | | |
61 | | /// Amount of time to wait if we have actions in transit before we try to |
62 | | /// consider an error to have occurred. |
63 | | const ACTIONS_IN_TRANSIT_TIMEOUT_S: f32 = 10.; |
64 | | |
65 | | /// If we lose connection to the worker api server we will wait this many seconds |
66 | | /// before trying to connect. |
67 | | const CONNECTION_RETRY_DELAY_S: f32 = 0.5; |
68 | | |
69 | | /// Default endpoint timeout. If this value gets modified the documentation in |
70 | | /// `cas_server.rs` must also be updated. |
71 | | const DEFAULT_ENDPOINT_TIMEOUT_S: f32 = 5.; |
72 | | |
73 | | /// Default maximum amount of time a task is allowed to run for. |
74 | | /// If this value gets modified the documentation in `cas_server.rs` must also be updated. |
75 | | const DEFAULT_MAX_ACTION_TIMEOUT: Duration = Duration::from_secs(1200); // 20 mins. |
76 | | const DEFAULT_MAX_UPLOAD_TIMEOUT: Duration = Duration::from_secs(600); // 10 mins. |
77 | | |
78 | | struct LocalWorkerImpl<'a, T: WorkerApiClientTrait + 'static, U: RunningActionsManager> { |
79 | | config: &'a LocalWorkerConfig, |
80 | | // According to the tonic documentation it is a cheap operation to clone this. |
81 | | grpc_client: T, |
82 | | worker_id: String, |
83 | | running_actions_manager: Arc<U>, |
84 | | // Number of actions that have been received in `Update::StartAction`, but |
85 | | // not yet processed by running_actions_manager's spawn. This number should |
86 | | // always be zero if there are no actions running and no actions being waited |
87 | | // on by the scheduler. |
88 | | actions_in_transit: Arc<AtomicU64>, |
89 | | metrics: Arc<Metrics>, |
90 | | } |
91 | | |
92 | 5 | pub async fn preconditions_met<H: BuildHasher + Sync>( |
93 | 5 | precondition_script: Option<String>, |
94 | 5 | extra_envs: &HashMap<String, String, H>, |
95 | 5 | ) -> Result<(), Error> { |
96 | 5 | let Some(precondition_script2 ) = &precondition_script else { Branch (96:9): [True: 2, False: 3]
Branch (96:9): [Folded - Ignored]
Branch (96:9): [Folded - Ignored]
|
97 | | // No script means we are always ok to proceed. |
98 | 3 | return Ok(()); |
99 | | }; |
100 | | // TODO: Might want to pass some information about the command to the |
101 | | // script, but at this point it's not even been downloaded yet, |
102 | | // so that's not currently possible. Perhaps we'll move this in |
103 | | // future to pass useful information through? Or perhaps we'll |
104 | | // have a pre-condition and a pre-execute script instead, although |
105 | | // arguably entrypoint already gives us that. |
106 | | |
107 | 2 | let maybe_split_cmd = shlex::split(precondition_script); |
108 | 2 | let (command, args) = match &maybe_split_cmd { |
109 | 2 | Some(split_cmd) => (&split_cmd[0], &split_cmd[1..]), |
110 | | None => { |
111 | 0 | return Err(make_input_err!( |
112 | 0 | "Could not parse the value of precondition_script: '{}'", |
113 | 0 | precondition_script, |
114 | 0 | )); |
115 | | } |
116 | | }; |
117 | | |
118 | 2 | let precondition_process = process::Command::new(command) |
119 | 2 | .args(args) |
120 | 2 | .kill_on_drop(true) |
121 | 2 | .stdin(Stdio::null()) |
122 | 2 | .stdout(Stdio::piped()) |
123 | 2 | .stderr(Stdio::null()) |
124 | 2 | .env_clear() |
125 | 2 | .envs(extra_envs) |
126 | 2 | .spawn() |
127 | 2 | .err_tip(|| format!("Could not execute precondition command {precondition_script:?}"0 ))?0 ; |
128 | 2 | let output = precondition_process.wait_with_output().await?0 ; |
129 | 2 | let stdout = str::from_utf8(&output.stdout).unwrap_or(""); |
130 | 2 | trace!(status = %output.status, %stdout, "Preconditions script returned"); |
131 | 2 | if output.status.code() == Some(0) { Branch (131:8): [True: 1, False: 1]
Branch (131:8): [Folded - Ignored]
Branch (131:8): [Folded - Ignored]
|
132 | 1 | Ok(()) |
133 | | } else { |
134 | 1 | Err(make_err!( |
135 | 1 | Code::ResourceExhausted, |
136 | 1 | "Preconditions script returned status {} - {}", |
137 | 1 | output.status, |
138 | 1 | stdout |
139 | 1 | )) |
140 | | } |
141 | 5 | } |
142 | | |
143 | | impl<'a, T: WorkerApiClientTrait + 'static, U: RunningActionsManager> LocalWorkerImpl<'a, T, U> { |
144 | 5 | fn new( |
145 | 5 | config: &'a LocalWorkerConfig, |
146 | 5 | grpc_client: T, |
147 | 5 | worker_id: String, |
148 | 5 | running_actions_manager: Arc<U>, |
149 | 5 | metrics: Arc<Metrics>, |
150 | 5 | ) -> Self { |
151 | 5 | Self { |
152 | 5 | config, |
153 | 5 | grpc_client, |
154 | 5 | worker_id, |
155 | 5 | running_actions_manager, |
156 | 5 | // Number of actions that have been received in `Update::StartAction`, but |
157 | 5 | // not yet processed by running_actions_manager's spawn. This number should |
158 | 5 | // always be zero if there are no actions running and no actions being waited |
159 | 5 | // on by the scheduler. |
160 | 5 | actions_in_transit: Arc::new(AtomicU64::new(0)), |
161 | 5 | metrics, |
162 | 5 | } |
163 | 5 | } |
164 | | |
165 | | /// Starts a background spawn/thread that will send a message to the server every `timeout / 2`. |
166 | 5 | async fn start_keep_alive(&self) -> Result<(), Error> {4 |
167 | | // According to tonic's documentation this call should be cheap and is the same stream. |
168 | 4 | let mut grpc_client = self.grpc_client.clone(); |
169 | | |
170 | | loop { |
171 | 4 | let timeout = self |
172 | 4 | .config |
173 | 4 | .worker_api_endpoint |
174 | 4 | .timeout |
175 | 4 | .unwrap_or(DEFAULT_ENDPOINT_TIMEOUT_S); |
176 | | // We always send 2 keep alive requests per timeout. Http2 should manage most of our |
177 | | // timeout issues, this is a secondary check to ensure we can still send data. |
178 | 4 | sleep(Duration::from_secs_f32(timeout / 2.)).await; |
179 | 0 | if let Err(e) = grpc_client.keep_alive(KeepAliveRequest {}).await { Branch (179:20): [True: 0, False: 0]
Branch (179:20): [Folded - Ignored]
Branch (179:20): [Folded - Ignored]
|
180 | 0 | return Err(make_err!( |
181 | 0 | Code::Internal, |
182 | 0 | "Failed to send KeepAlive in LocalWorker : {:?}", |
183 | 0 | e |
184 | 0 | )); |
185 | 0 | } |
186 | | } |
187 | 0 | } |
188 | | |
189 | 5 | async fn run( |
190 | 5 | &self, |
191 | 5 | update_for_worker_stream: Streaming<UpdateForWorker>, |
192 | 5 | shutdown_rx: &mut broadcast::Receiver<ShutdownGuard>, |
193 | 5 | ) -> Result<(), Error> { |
194 | | // This big block of logic is designed to help simplify upstream components. Upstream |
195 | | // components can write standard futures that return a `Result<(), Error>` and this block |
196 | | // will forward the error up to the client and disconnect from the scheduler. |
197 | | // It is a common use case that an item sent through update_for_worker_stream will always |
198 | | // have a response but the response will be triggered through a callback to the scheduler. |
199 | | // This can be quite tricky to manage, so what we have done here is given access to a |
200 | | // `futures` variable which because this is in a single thread as well as a channel that you |
201 | | // send a future into that makes it into the `futures` variable. |
202 | | // This means that if you want to perform an action based on the result of the future |
203 | | // you use the `.map()` method and the new action will always come to live in this spawn, |
204 | | // giving mutable access to stuff in this struct. |
205 | | // NOTE: If you ever return from this function it will disconnect from the scheduler. |
206 | 5 | let mut futures = FuturesUnordered::new(); |
207 | 5 | futures.push(self.start_keep_alive().boxed()); |
208 | | |
209 | 5 | let (add_future_channel, add_future_rx) = mpsc::unbounded_channel(); |
210 | 5 | let mut add_future_rx = UnboundedReceiverStream::new(add_future_rx).fuse(); |
211 | | |
212 | 5 | let mut update_for_worker_stream = update_for_worker_stream.fuse(); |
213 | | // A notify which is triggered every time actions_in_flight is subtracted. |
214 | 5 | let actions_notify = Arc::new(tokio::sync::Notify::new()); |
215 | | // A counter of actions that are in-flight, this is similar to actions_in_transit but |
216 | | // includes the AC upload and notification to the scheduler. |
217 | 5 | let actions_in_flight = Arc::new(AtomicU64::new(0)); |
218 | | // Set to true when shutting down, this stops any new StartAction. |
219 | 5 | let mut shutting_down = false; |
220 | | |
221 | | loop { |
222 | 16 | select! { |
223 | 16 | maybe_update6 = update_for_worker_stream.next() => if !shutting_down6 || maybe_update0 .is_some0 () { Branch (223:70): [True: 6, False: 0]
Branch (223:88): [True: 0, False: 0]
Branch (223:70): [Folded - Ignored]
Branch (223:88): [Folded - Ignored]
Branch (223:70): [Folded - Ignored]
Branch (223:88): [Folded - Ignored]
|
224 | 6 | match maybe_update |
225 | 6 | .err_tip(|| "UpdateForWorker stream closed early")?1 |
226 | 5 | .err_tip(|| "Got error in UpdateForWorker stream")?0 |
227 | | .update |
228 | 5 | .err_tip(|| "Expected update to exist in UpdateForWorker")?0 |
229 | | { |
230 | | Update::ConnectionResult(_) => { |
231 | 0 | return Err(make_input_err!( |
232 | 0 | "Got ConnectionResult in LocalWorker::run which should never happen" |
233 | 0 | )); |
234 | | } |
235 | | // TODO(palfrey) We should possibly do something with this notification. |
236 | 0 | Update::Disconnect(()) => { |
237 | 0 | self.metrics.disconnects_received.inc(); |
238 | 0 | } |
239 | 0 | Update::KeepAlive(()) => { |
240 | 0 | self.metrics.keep_alives_received.inc(); |
241 | 0 | } |
242 | 1 | Update::KillOperationRequest(kill_operation_request) => { |
243 | 1 | let operation_id = OperationId::from(kill_operation_request.operation_id); |
244 | 1 | if let Err(err0 ) = self.running_actions_manager.kill_operation(&operation_id).await { Branch (244:36): [True: 0, False: 1]
Branch (244:36): [Folded - Ignored]
Branch (244:36): [Folded - Ignored]
|
245 | 0 | error!( |
246 | | %operation_id, |
247 | | ?err, |
248 | 0 | "Failed to send kill request for operation" |
249 | | ); |
250 | 1 | } |
251 | | } |
252 | 4 | Update::StartAction(start_execute) => { |
253 | | // Don't accept any new requests if we're shutting down. |
254 | 4 | if shutting_down { Branch (254:32): [True: 0, False: 4]
Branch (254:32): [Folded - Ignored]
Branch (254:32): [Folded - Ignored]
|
255 | 0 | if let Some(instance_name) = start_execute.execute_request.map(|request| request.instance_name) { Branch (255:40): [True: 0, False: 0]
Branch (255:40): [Folded - Ignored]
Branch (255:40): [Folded - Ignored]
|
256 | 0 | self.grpc_client.clone().execution_response( |
257 | 0 | ExecuteResult{ |
258 | 0 | instance_name, |
259 | 0 | operation_id: start_execute.operation_id, |
260 | 0 | result: Some(execute_result::Result::InternalError(make_err!(Code::ResourceExhausted, "Worker shutting down").into())), |
261 | 0 | } |
262 | 0 | ).await?; |
263 | 0 | } |
264 | 0 | continue; |
265 | 4 | } |
266 | | |
267 | 4 | self.metrics.start_actions_received.inc(); |
268 | | |
269 | 4 | let execute_request = start_execute.execute_request.as_ref(); |
270 | 4 | let operation_id = start_execute.operation_id.clone(); |
271 | 4 | let operation_id_to_log = operation_id.clone(); |
272 | 4 | let maybe_instance_name = execute_request.map(|v| v.instance_name.clone()); |
273 | 4 | let action_digest = execute_request.and_then(|v| v.action_digest.clone()); |
274 | 4 | let digest_hasher = execute_request |
275 | 4 | .ok_or_else(|| make_input_err!("Expected execute_request to be set")) |
276 | 4 | .and_then(|v| DigestHasherFunc::try_from(v.digest_function)) |
277 | 4 | .err_tip(|| "In LocalWorkerImpl::new()")?0 ; |
278 | | |
279 | 4 | let start_action_fut = { |
280 | 4 | let precondition_script_cfg = self.config.experimental_precondition_script.clone(); |
281 | 4 | let mut extra_envs: HashMap<String, String> = HashMap::new(); |
282 | 4 | if let Some(ref additional_environment0 ) = self.config.additional_environment { Branch (282:40): [True: 0, False: 4]
Branch (282:40): [Folded - Ignored]
Branch (282:40): [Folded - Ignored]
|
283 | 0 | for (name, source) in additional_environment { |
284 | 0 | let value = match source { |
285 | 0 | EnvironmentSource::Property(property) => start_execute |
286 | 0 | .platform.as_ref().and_then(|p|p.properties.iter().find(|pr| &pr.name == property)) |
287 | 0 | .map_or_else(|| Cow::Borrowed(""), |v| Cow::Borrowed(v.value.as_str())), |
288 | 0 | EnvironmentSource::Value(value) => Cow::Borrowed(value.as_str()), |
289 | 0 | EnvironmentSource::FromEnvironment => Cow::Owned(env::var(name).unwrap_or_default()), |
290 | 0 | other => { |
291 | 0 | debug!(?other, "Worker doesn't support this type of additional environment"); |
292 | 0 | continue; |
293 | | } |
294 | | }; |
295 | 0 | extra_envs.insert(name.clone(), value.into_owned()); |
296 | | } |
297 | 4 | } |
298 | 4 | let actions_in_transit = self.actions_in_transit.clone(); |
299 | 4 | let worker_id = self.worker_id.clone(); |
300 | 4 | let running_actions_manager = self.running_actions_manager.clone(); |
301 | 4 | let mut grpc_client = self.grpc_client.clone(); |
302 | 4 | let complete = ExecuteComplete { |
303 | 4 | operation_id: operation_id.clone(), |
304 | 4 | }; |
305 | 4 | self.metrics.clone().wrap(move |metrics| async move { |
306 | 4 | metrics.preconditions.wrap(preconditions_met(precondition_script_cfg, &extra_envs)) |
307 | 4 | .and_then(|()| running_actions_manager3 .create_and_add_action3 (worker_id3 , start_execute3 )) |
308 | 4 | .map(move |r| { |
309 | | // Now that we either failed or registered our action, we can |
310 | | // consider the action to no longer be in transit. |
311 | 4 | actions_in_transit.fetch_sub(1, Ordering::Release); |
312 | 4 | r |
313 | 4 | }) |
314 | 4 | .and_then(|action| {3 |
315 | 3 | debug!( |
316 | 3 | operation_id = %action.get_operation_id(), |
317 | 3 | "Received request to run action" |
318 | | ); |
319 | 3 | action |
320 | 3 | .clone() |
321 | 3 | .prepare_action() |
322 | 3 | .and_then(RunningAction::execute) |
323 | 3 | .and_then(|result| async move {2 |
324 | | // Notify that execution has completed so it can schedule a new action. |
325 | 2 | drop(grpc_client.execution_complete(complete).await); |
326 | 2 | Ok(result) |
327 | 4 | }) |
328 | 3 | .and_then(RunningAction::upload_results) |
329 | 3 | .and_then(RunningAction::get_finished_result) |
330 | | // Note: We need ensure we run cleanup even if one of the other steps fail. |
331 | 3 | .then(|result| async move {2 |
332 | 2 | if let Err(e0 ) = action.cleanup().await { Branch (332:56): [True: 0, False: 2]
Branch (332:56): [Folded - Ignored]
Branch (332:56): [Folded - Ignored]
|
333 | 0 | return Result::<ActionResult, Error>::Err(e).merge(result); |
334 | 2 | } |
335 | 2 | result |
336 | 4 | }) |
337 | 4 | }3 ).await |
338 | 7 | }) |
339 | | }; |
340 | | |
341 | 4 | let make_publish_future = { |
342 | 4 | let mut grpc_client = self.grpc_client.clone(); |
343 | | |
344 | 4 | let running_actions_manager = self.running_actions_manager.clone(); |
345 | 3 | move |res: Result<ActionResult, Error>| async move { |
346 | 3 | let instance_name = maybe_instance_name |
347 | 3 | .err_tip(|| "`instance_name` could not be resolved; this is likely an internal error in local_worker.")?0 ; |
348 | 3 | match res { |
349 | 2 | Ok(mut action_result) => { |
350 | | // Save in the action cache before notifying the scheduler that we've completed. |
351 | 2 | if let Some(digest_info) = action_digest.clone().and_then(|action_digest| action_digest.try_into().ok()) { Branch (351:52): [True: 2, False: 0]
Branch (351:52): [Folded - Ignored]
Branch (351:52): [Folded - Ignored]
|
352 | 2 | if let Err(err0 ) = running_actions_manager.cache_action_result(digest_info, &mut action_result, digest_hasher).await { Branch (352:56): [True: 0, False: 2]
Branch (352:56): [Folded - Ignored]
Branch (352:56): [Folded - Ignored]
|
353 | 0 | error!( |
354 | | ?err, |
355 | | ?action_digest, |
356 | 0 | "Error saving action in store", |
357 | | ); |
358 | 2 | } |
359 | 0 | } |
360 | 2 | let action_stage = ActionStage::Completed(action_result); |
361 | 2 | grpc_client.execution_response( |
362 | 2 | ExecuteResult{ |
363 | 2 | instance_name, |
364 | 2 | operation_id, |
365 | 2 | result: Some(execute_result::Result::ExecuteResponse(action_stage.into())), |
366 | 2 | } |
367 | 2 | ) |
368 | 2 | .await |
369 | 0 | .err_tip(|| "Error while calling execution_response")?; |
370 | | }, |
371 | 1 | Err(e) => { |
372 | 1 | grpc_client.execution_response(ExecuteResult{ |
373 | 1 | instance_name, |
374 | 1 | operation_id, |
375 | 1 | result: Some(execute_result::Result::InternalError(e.into())), |
376 | 1 | }).await.err_tip0 (|| "Error calling execution_response with error")?0 ; |
377 | | }, |
378 | | } |
379 | 0 | Ok(()) |
380 | 3 | } |
381 | | }; |
382 | | |
383 | 4 | self.actions_in_transit.fetch_add(1, Ordering::Release); |
384 | | |
385 | 4 | let add_future_channel = add_future_channel.clone(); |
386 | | |
387 | 4 | info_span!( |
388 | | "worker_start_action_ctx", |
389 | | operation_id = operation_id_to_log, |
390 | 4 | digest_function = %digest_hasher.to_string(), |
391 | 4 | ).in_scope(|| { |
392 | 4 | let _guard = Context::current_with_value(digest_hasher) |
393 | 4 | .attach(); |
394 | | |
395 | 4 | let actions_in_flight = actions_in_flight.clone(); |
396 | 4 | let actions_notify = actions_notify.clone(); |
397 | 4 | let actions_in_flight_fail = actions_in_flight.clone(); |
398 | 4 | let actions_notify_fail = actions_notify.clone(); |
399 | 4 | actions_in_flight.fetch_add(1, Ordering::Release); |
400 | | |
401 | 4 | futures.push( |
402 | 4 | spawn!("worker_start_action", start_action_fut).map(move |res| {3 |
403 | 3 | let res = res.err_tip(|| "Failed to launch spawn")?0 ; |
404 | 3 | if let Err(err1 ) = &res { Branch (404:48): [True: 1, False: 2]
Branch (404:48): [Folded - Ignored]
Branch (404:48): [Folded - Ignored]
|
405 | 1 | error!(?err, "Error executing action"); |
406 | 2 | } |
407 | 3 | add_future_channel |
408 | 3 | .send(make_publish_future(res).then(move |res| {0 |
409 | 0 | actions_in_flight.fetch_sub(1, Ordering::Release); |
410 | 0 | actions_notify.notify_one(); |
411 | 0 | core::future::ready(res) |
412 | 3 | }0 ).boxed()) |
413 | 3 | .map_err(|_| make_err!(Code::Internal0 , "LocalWorker could not send future"))?0 ; |
414 | 3 | Ok(()) |
415 | 3 | }) |
416 | 4 | .or_else(move |err| {0 |
417 | | // If the make_publish_future is not run we still need to notify. |
418 | 0 | actions_in_flight_fail.fetch_sub(1, Ordering::Release); |
419 | 0 | actions_notify_fail.notify_one(); |
420 | 0 | core::future::ready(Err(err)) |
421 | 0 | }) |
422 | 4 | .boxed() |
423 | | ); |
424 | 4 | }); |
425 | | } |
426 | | } |
427 | 0 | }, |
428 | 16 | res3 = add_future_rx.next() => { |
429 | 3 | let fut = res.err_tip(|| "New future stream receives should never be closed")?0 ; |
430 | 3 | futures.push(fut); |
431 | | }, |
432 | 16 | res3 = futures.next() => res3 .err_tip3 (|| "Keep-alive should always pending. Likely unable to send data to scheduler")?0 ?0 , |
433 | 16 | complete_msg0 = shutdown_rx.recv().fuse() => { |
434 | 0 | warn!("Worker loop received shutdown signal. Shutting down worker...",); |
435 | 0 | let mut grpc_client = self.grpc_client.clone(); |
436 | 0 | let shutdown_guard = complete_msg.map_err(|e| make_err!(Code::Internal, "Failed to receive shutdown message: {e:?}"))?; |
437 | 0 | let actions_in_flight = actions_in_flight.clone(); |
438 | 0 | let actions_notify = actions_notify.clone(); |
439 | 0 | let shutdown_future = async move { |
440 | | // Wait for in-flight operations to be fully completed. |
441 | 0 | while actions_in_flight.load(Ordering::Acquire) > 0 { Branch (441:31): [True: 0, False: 0]
Branch (441:31): [Folded - Ignored]
Branch (441:31): [Folded - Ignored]
|
442 | 0 | actions_notify.notified().await; |
443 | | } |
444 | | // Sending this message immediately evicts all jobs from |
445 | | // this worker, of which there should be none. |
446 | 0 | if let Err(e) = grpc_client.going_away(GoingAwayRequest {}).await { Branch (446:32): [True: 0, False: 0]
Branch (446:32): [Folded - Ignored]
Branch (446:32): [Folded - Ignored]
|
447 | 0 | error!("Failed to send GoingAwayRequest: {e}",); |
448 | 0 | return Err(e); |
449 | 0 | } |
450 | | // Allow shutdown to occur now. |
451 | 0 | drop(shutdown_guard); |
452 | 0 | Ok::<(), Error>(()) |
453 | 0 | }; |
454 | 0 | futures.push(shutdown_future.boxed()); |
455 | 0 | shutting_down = true; |
456 | | }, |
457 | | }; |
458 | | } |
459 | | // Unreachable. |
460 | 1 | } |
461 | | } |
462 | | |
463 | | type ConnectionFactory<T> = Box<dyn Fn() -> BoxFuture<'static, Result<T, Error>> + Send + Sync>; |
464 | | |
465 | | pub struct LocalWorker<T: WorkerApiClientTrait + 'static, U: RunningActionsManager> { |
466 | | config: Arc<LocalWorkerConfig>, |
467 | | running_actions_manager: Arc<U>, |
468 | | connection_factory: ConnectionFactory<T>, |
469 | | sleep_fn: Option<Box<dyn Fn(Duration) -> BoxFuture<'static, ()> + Send + Sync>>, |
470 | | metrics: Arc<Metrics>, |
471 | | } |
472 | | |
473 | | impl< |
474 | | T: WorkerApiClientTrait + core::fmt::Debug + 'static, |
475 | | U: RunningActionsManager + core::fmt::Debug, |
476 | | > core::fmt::Debug for LocalWorker<T, U> |
477 | | { |
478 | 0 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { |
479 | 0 | f.debug_struct("LocalWorker") |
480 | 0 | .field("config", &self.config) |
481 | 0 | .field("running_actions_manager", &self.running_actions_manager) |
482 | 0 | .field("metrics", &self.metrics) |
483 | 0 | .finish_non_exhaustive() |
484 | 0 | } |
485 | | } |
486 | | |
487 | | /// Creates a new `LocalWorker`. The `cas_store` must be an instance of |
488 | | /// `FastSlowStore` and will be checked at runtime. |
489 | 2 | pub async fn new_local_worker( |
490 | 2 | config: Arc<LocalWorkerConfig>, |
491 | 2 | cas_store: Store, |
492 | 2 | ac_store: Option<Store>, |
493 | 2 | historical_store: Store, |
494 | 2 | ) -> Result<LocalWorker<WorkerApiClientWrapper, RunningActionsManagerImpl>, Error> { |
495 | 2 | let fast_slow_store = cas_store |
496 | 2 | .downcast_ref::<FastSlowStore>(None) |
497 | 2 | .err_tip(|| "Expected store for LocalWorker's store to be a FastSlowStore")?0 |
498 | 2 | .get_arc() |
499 | 2 | .err_tip(|| "FastSlowStore's Arc doesn't exist")?0 ; |
500 | | |
501 | | // Log warning about CAS configuration for multi-worker setups |
502 | 2 | event!( |
503 | 2 | Level::INFO, |
504 | 2 | worker_name = %config.name, |
505 | 2 | "Starting worker '{}'. IMPORTANT: If running multiple workers, all workers \ |
506 | 2 | must share the same CAS storage path to avoid 'Object not found' errors.", |
507 | 2 | config.name |
508 | | ); |
509 | | |
510 | 2 | if let Ok(path1 ) = fs::canonicalize(&config.work_directory).await { Branch (510:12): [True: 1, False: 1]
Branch (510:12): [Folded - Ignored]
Branch (510:12): [Folded - Ignored]
|
511 | 1 | fs::remove_dir_all(&path).await.err_tip(|| {0 |
512 | 0 | format!( |
513 | 0 | "Could not remove work_directory '{}' in LocalWorker", |
514 | 0 | &path.as_path().to_str().unwrap_or("bad path") |
515 | | ) |
516 | 0 | })?; |
517 | 1 | } |
518 | | |
519 | 2 | fs::create_dir_all(&config.work_directory) |
520 | 2 | .await |
521 | 2 | .err_tip(|| format!("Could not make work_directory : {}"0 , config.work_directory0 ))?0 ; |
522 | 2 | let entrypoint = if config.entrypoint.is_empty() { Branch (522:25): [True: 2, False: 0]
Branch (522:25): [Folded - Ignored]
Branch (522:25): [Folded - Ignored]
|
523 | 2 | None |
524 | | } else { |
525 | 0 | Some(config.entrypoint.clone()) |
526 | | }; |
527 | 2 | let max_action_timeout = if config.max_action_timeout == 0 { Branch (527:33): [True: 2, False: 0]
Branch (527:33): [Folded - Ignored]
Branch (527:33): [Folded - Ignored]
|
528 | 2 | DEFAULT_MAX_ACTION_TIMEOUT |
529 | | } else { |
530 | 0 | Duration::from_secs(config.max_action_timeout as u64) |
531 | | }; |
532 | 2 | let max_upload_timeout = if config.max_upload_timeout == 0 { Branch (532:33): [True: 2, False: 0]
Branch (532:33): [Folded - Ignored]
Branch (532:33): [Folded - Ignored]
|
533 | 2 | DEFAULT_MAX_UPLOAD_TIMEOUT |
534 | | } else { |
535 | 0 | Duration::from_secs(config.max_upload_timeout as u64) |
536 | | }; |
537 | | |
538 | | // Initialize directory cache if configured |
539 | 2 | let directory_cache = if let Some(cache_config0 ) = &config.directory_cache { Branch (539:34): [True: 0, False: 2]
Branch (539:34): [Folded - Ignored]
Branch (539:34): [Folded - Ignored]
|
540 | | use std::path::PathBuf; |
541 | | |
542 | | use crate::directory_cache::{ |
543 | | DirectoryCache, DirectoryCacheConfig as WorkerDirCacheConfig, |
544 | | }; |
545 | | |
546 | 0 | let cache_root = if cache_config.cache_root.is_empty() { Branch (546:29): [True: 0, False: 0]
Branch (546:29): [Folded - Ignored]
Branch (546:29): [Folded - Ignored]
|
547 | 0 | PathBuf::from(&config.work_directory).parent().map_or_else( |
548 | 0 | || PathBuf::from("/tmp/nativelink_directory_cache"), |
549 | 0 | |p| p.join("directory_cache"), |
550 | | ) |
551 | | } else { |
552 | 0 | PathBuf::from(&cache_config.cache_root) |
553 | | }; |
554 | | |
555 | 0 | let worker_cache_config = WorkerDirCacheConfig { |
556 | 0 | max_entries: cache_config.max_entries, |
557 | 0 | max_size_bytes: cache_config.max_size_bytes, |
558 | 0 | cache_root, |
559 | 0 | }; |
560 | | |
561 | 0 | match DirectoryCache::new(worker_cache_config, Store::new(fast_slow_store.clone())).await { |
562 | 0 | Ok(cache) => { |
563 | 0 | tracing::info!("Directory cache initialized successfully"); |
564 | 0 | Some(Arc::new(cache)) |
565 | | } |
566 | 0 | Err(e) => { |
567 | 0 | tracing::warn!("Failed to initialize directory cache: {:?}", e); |
568 | 0 | None |
569 | | } |
570 | | } |
571 | | } else { |
572 | 2 | None |
573 | | }; |
574 | | |
575 | 2 | let running_actions_manager = |
576 | 2 | Arc::new(RunningActionsManagerImpl::new(RunningActionsManagerArgs { |
577 | 2 | root_action_directory: config.work_directory.clone(), |
578 | 2 | execution_configuration: ExecutionConfiguration { |
579 | 2 | entrypoint, |
580 | 2 | additional_environment: config.additional_environment.clone(), |
581 | 2 | }, |
582 | 2 | cas_store: fast_slow_store, |
583 | 2 | ac_store, |
584 | 2 | historical_store, |
585 | 2 | upload_action_result_config: &config.upload_action_result, |
586 | 2 | max_action_timeout, |
587 | 2 | max_upload_timeout, |
588 | 2 | timeout_handled_externally: config.timeout_handled_externally, |
589 | 2 | directory_cache, |
590 | 2 | })?0 ); |
591 | 2 | let local_worker = LocalWorker::new_with_connection_factory_and_actions_manager( |
592 | 2 | config.clone(), |
593 | 2 | running_actions_manager, |
594 | 2 | Box::new(move || {0 |
595 | 0 | let config = config.clone(); |
596 | 0 | Box::pin(async move { |
597 | 0 | let timeout = config |
598 | 0 | .worker_api_endpoint |
599 | 0 | .timeout |
600 | 0 | .unwrap_or(DEFAULT_ENDPOINT_TIMEOUT_S); |
601 | 0 | let timeout_duration = Duration::from_secs_f32(timeout); |
602 | 0 | let tls_config = |
603 | 0 | tls_utils::load_client_config(&config.worker_api_endpoint.tls_config) |
604 | 0 | .err_tip(|| "Parsing local worker TLS configuration")?; |
605 | 0 | let endpoint = |
606 | 0 | tls_utils::endpoint_from(&config.worker_api_endpoint.uri, tls_config) |
607 | 0 | .map_err(|e| make_input_err!("Invalid URI for worker endpoint : {e:?}"))? |
608 | 0 | .connect_timeout(timeout_duration) |
609 | 0 | .timeout(timeout_duration); |
610 | | |
611 | 0 | let transport = endpoint.connect().await.map_err(|e| { |
612 | 0 | make_err!( |
613 | 0 | Code::Internal, |
614 | | "Could not connect to endpoint {}: {e:?}", |
615 | 0 | config.worker_api_endpoint.uri |
616 | | ) |
617 | 0 | })?; |
618 | 0 | Ok(WorkerApiClient::new(transport).into()) |
619 | 0 | }) |
620 | 0 | }), |
621 | 2 | Box::new(move |d| Box::pin0 (sleep0 (d0 ))), |
622 | | ); |
623 | 2 | Ok(local_worker) |
624 | 2 | } |
625 | | |
626 | | impl<T: WorkerApiClientTrait + 'static, U: RunningActionsManager> LocalWorker<T, U> { |
627 | 8 | pub fn new_with_connection_factory_and_actions_manager( |
628 | 8 | config: Arc<LocalWorkerConfig>, |
629 | 8 | running_actions_manager: Arc<U>, |
630 | 8 | connection_factory: ConnectionFactory<T>, |
631 | 8 | sleep_fn: Box<dyn Fn(Duration) -> BoxFuture<'static, ()> + Send + Sync>, |
632 | 8 | ) -> Self { |
633 | 8 | let metrics = Arc::new(Metrics::new(Arc::downgrade( |
634 | 8 | running_actions_manager.metrics(), |
635 | | ))); |
636 | 8 | Self { |
637 | 8 | config, |
638 | 8 | running_actions_manager, |
639 | 8 | connection_factory, |
640 | 8 | sleep_fn: Some(sleep_fn), |
641 | 8 | metrics, |
642 | 8 | } |
643 | 8 | } |
644 | | |
645 | | #[allow( |
646 | | clippy::missing_const_for_fn, |
647 | | reason = "False positive on stable, but not on nightly" |
648 | | )] |
649 | 0 | pub fn name(&self) -> &String { |
650 | 0 | &self.config.name |
651 | 0 | } |
652 | | |
653 | 8 | async fn register_worker( |
654 | 8 | &self, |
655 | 8 | client: &mut T, |
656 | 8 | ) -> Result<(String, Streaming<UpdateForWorker>), Error> { |
657 | 8 | let mut extra_envs: HashMap<String, String> = HashMap::new(); |
658 | 8 | if let Some(ref additional_environment0 ) = self.config.additional_environment { Branch (658:16): [True: 0, False: 8]
Branch (658:16): [Folded - Ignored]
Branch (658:16): [Folded - Ignored]
|
659 | 0 | for (name, source) in additional_environment { |
660 | 0 | let value = match source { |
661 | 0 | EnvironmentSource::Value(value) => Cow::Borrowed(value.as_str()), |
662 | | EnvironmentSource::FromEnvironment => { |
663 | 0 | Cow::Owned(env::var(name).unwrap_or_default()) |
664 | | } |
665 | 0 | other => { |
666 | 0 | debug!( |
667 | | ?other, |
668 | 0 | "Worker registration doesn't support this type of additional environment" |
669 | | ); |
670 | 0 | continue; |
671 | | } |
672 | | }; |
673 | 0 | extra_envs.insert(name.clone(), value.into_owned()); |
674 | | } |
675 | 8 | } |
676 | | |
677 | 8 | let connect_worker_request = make_connect_worker_request( |
678 | 8 | self.config.name.clone(), |
679 | 8 | &self.config.platform_properties, |
680 | 8 | &extra_envs, |
681 | 8 | self.config.max_inflight_tasks, |
682 | 8 | ) |
683 | 8 | .await?0 ; |
684 | 8 | let mut update_for_worker_stream6 = client |
685 | 8 | .connect_worker(connect_worker_request) |
686 | 8 | .await |
687 | 6 | .err_tip(|| "Could not call connect_worker() in worker")?0 |
688 | 6 | .into_inner(); |
689 | | |
690 | 6 | let first_msg_update5 = update_for_worker_stream |
691 | 6 | .next() |
692 | 6 | .await |
693 | 6 | .err_tip(|| "Got EOF expected UpdateForWorker")?1 |
694 | 5 | .err_tip(|| "Got error when receiving UpdateForWorker")?0 |
695 | | .update; |
696 | | |
697 | 5 | let worker_id = match first_msg_update { |
698 | 5 | Some(Update::ConnectionResult(connection_result)) => connection_result.worker_id, |
699 | 0 | other => { |
700 | 0 | return Err(make_input_err!( |
701 | 0 | "Expected first response from scheduler to be a ConnectResult got : {:?}", |
702 | 0 | other |
703 | 0 | )); |
704 | | } |
705 | | }; |
706 | 5 | Ok((worker_id, update_for_worker_stream)) |
707 | 6 | } |
708 | | |
709 | | #[instrument(skip(self), level = Level::INFO)] |
710 | 6 | pub async fn run( |
711 | 6 | mut self, |
712 | 6 | mut shutdown_rx: broadcast::Receiver<ShutdownGuard>, |
713 | 6 | ) -> Result<(), Error> { |
714 | | let sleep_fn = self |
715 | | .sleep_fn |
716 | | .take() |
717 | | .err_tip(|| "Could not unwrap sleep_fn in LocalWorker::run")?; |
718 | | let sleep_fn_pin = Pin::new(&sleep_fn); |
719 | 2 | let error_handler = Box::pin(move |err| async move { |
720 | 2 | error!(?err, "Error"); |
721 | 2 | (sleep_fn_pin)(Duration::from_secs_f32(CONNECTION_RETRY_DELAY_S)).await; |
722 | 4 | }); |
723 | | |
724 | | loop { |
725 | | // First connect to our endpoint. |
726 | | let mut client = match (self.connection_factory)().await { |
727 | | Ok(client) => client, |
728 | | Err(e) => { |
729 | | (error_handler)(e).await; |
730 | | continue; // Try to connect again. |
731 | | } |
732 | | }; |
733 | | |
734 | | // Next register our worker with the scheduler. |
735 | | let (inner, update_for_worker_stream) = match self.register_worker(&mut client).await { |
736 | | Err(e) => { |
737 | | (error_handler)(e).await; |
738 | | continue; // Try to connect again. |
739 | | } |
740 | | Ok((worker_id, update_for_worker_stream)) => ( |
741 | | LocalWorkerImpl::new( |
742 | | &self.config, |
743 | | client, |
744 | | worker_id, |
745 | | self.running_actions_manager.clone(), |
746 | | self.metrics.clone(), |
747 | | ), |
748 | | update_for_worker_stream, |
749 | | ), |
750 | | }; |
751 | | info!( |
752 | | worker_id = %inner.worker_id, |
753 | | "Worker registered with scheduler" |
754 | | ); |
755 | | |
756 | | // Now listen for connections and run all other services. |
757 | | if let Err(err) = inner.run(update_for_worker_stream, &mut shutdown_rx).await { |
758 | | 'no_more_actions: { |
759 | | // Ensure there are no actions in transit before we try to kill |
760 | | // all our actions. |
761 | | const ITERATIONS: usize = 1_000; |
762 | | |
763 | | const ERROR_MSG: &str = "Actions in transit did not reach zero before we disconnected from the scheduler"; |
764 | | |
765 | | let sleep_duration = ACTIONS_IN_TRANSIT_TIMEOUT_S / ITERATIONS as f32; |
766 | | for _ in 0..ITERATIONS { |
767 | | if inner.actions_in_transit.load(Ordering::Acquire) == 0 { |
768 | | break 'no_more_actions; |
769 | | } |
770 | | (sleep_fn_pin)(Duration::from_secs_f32(sleep_duration)).await; |
771 | | } |
772 | | error!(ERROR_MSG); |
773 | | return Err(err.append(ERROR_MSG)); |
774 | | } |
775 | | error!(?err, "Worker disconnected from scheduler"); |
776 | | // Kill off any existing actions because if we re-connect, we'll |
777 | | // get some more and it might resource lock us. |
778 | | self.running_actions_manager.kill_all().await; |
779 | | |
780 | | (error_handler)(err).await; // Try to connect again. |
781 | | } |
782 | | } |
783 | | // Unreachable. |
784 | 0 | } |
785 | | } |
786 | | |
787 | | #[derive(Debug, MetricsComponent)] |
788 | | pub struct Metrics { |
789 | | #[metric( |
790 | | help = "Total number of actions sent to this worker to process. This does not mean it started them, it just means it received a request to execute it." |
791 | | )] |
792 | | start_actions_received: CounterWithTime, |
793 | | #[metric(help = "Total number of disconnects received from the scheduler.")] |
794 | | disconnects_received: CounterWithTime, |
795 | | #[metric(help = "Total number of keep-alives received from the scheduler.")] |
796 | | keep_alives_received: CounterWithTime, |
797 | | #[metric( |
798 | | help = "Stats about the calls to check if an action satisfies the config supplied script." |
799 | | )] |
800 | | preconditions: AsyncCounterWrapper, |
801 | | #[metric] |
802 | | #[allow( |
803 | | clippy::struct_field_names, |
804 | | reason = "TODO Fix this. Triggers on nightly" |
805 | | )] |
806 | | running_actions_manager_metrics: Weak<RunningActionManagerMetrics>, |
807 | | } |
808 | | |
809 | | impl RootMetricsComponent for Metrics {} |
810 | | |
811 | | impl Metrics { |
812 | 8 | fn new(running_actions_manager_metrics: Weak<RunningActionManagerMetrics>) -> Self { |
813 | 8 | Self { |
814 | 8 | start_actions_received: CounterWithTime::default(), |
815 | 8 | disconnects_received: CounterWithTime::default(), |
816 | 8 | keep_alives_received: CounterWithTime::default(), |
817 | 8 | preconditions: AsyncCounterWrapper::default(), |
818 | 8 | running_actions_manager_metrics, |
819 | 8 | } |
820 | 8 | } |
821 | | } |
822 | | |
823 | | impl Metrics { |
824 | 4 | async fn wrap<U, T: Future<Output = U>, F: FnOnce(Arc<Self>) -> T>( |
825 | 4 | self: Arc<Self>, |
826 | 4 | fut: F, |
827 | 4 | ) -> U { |
828 | 4 | fut(self).await |
829 | 3 | } |
830 | | } |