Coverage Report

Created: 2025-12-17 22:46

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/build/source/nativelink-scheduler/src/cache_lookup_scheduler.rs
Line
Count
Source
1
// Copyright 2024 The NativeLink Authors. All rights reserved.
2
//
3
// Licensed under the Functional Source License, Version 1.1, Apache 2.0 Future License (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//    See LICENSE file for details
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
use std::collections::HashMap;
16
use std::sync::Arc;
17
use std::time::SystemTime;
18
19
use async_trait::async_trait;
20
use nativelink_error::{Code, Error, ResultExt, make_err};
21
use nativelink_metric::{MetricsComponent, RootMetricsComponent};
22
use nativelink_proto::build::bazel::remote::execution::v2::{
23
    ActionResult as ProtoActionResult, GetActionResultRequest,
24
};
25
use nativelink_store::ac_utils::get_and_decode_digest;
26
use nativelink_store::grpc_store::GrpcStore;
27
use nativelink_util::action_messages::{
28
    ActionInfo, ActionStage, ActionState, ActionUniqueKey, ActionUniqueQualifier, OperationId,
29
};
30
use nativelink_util::background_spawn;
31
use nativelink_util::common::DigestInfo;
32
use nativelink_util::digest_hasher::DigestHasherFunc;
33
use nativelink_util::known_platform_property_provider::KnownPlatformPropertyProvider;
34
use nativelink_util::operation_state_manager::{
35
    ActionStateResult, ActionStateResultStream, ClientStateManager, OperationFilter,
36
};
37
use nativelink_util::origin_event::OriginMetadata;
38
use nativelink_util::store_trait::Store;
39
use opentelemetry::baggage::BaggageExt;
40
use opentelemetry::context::Context;
41
use opentelemetry_semantic_conventions::attribute::ENDUSER_ID;
42
use parking_lot::{Mutex, MutexGuard};
43
use scopeguard::guard;
44
use tokio::sync::oneshot;
45
use tonic::{Request, Response};
46
use tracing::error;
47
48
/// Actions that are having their cache checked or failed cache lookup and are
49
/// being forwarded upstream.  Missing the `skip_cache_check` actions which are
50
/// forwarded directly.
51
type CheckActions = HashMap<
52
    ActionUniqueKey,
53
    Vec<(
54
        OperationId,
55
        oneshot::Sender<Result<Box<dyn ActionStateResult>, Error>>,
56
    )>,
57
>;
58
59
#[derive(MetricsComponent)]
60
pub struct CacheLookupScheduler {
61
    /// A reference to the AC to find existing actions in.
62
    /// To prevent unintended issues, this store should probably be a `CompletenessCheckingStore`.
63
    #[metric(group = "ac_store")]
64
    ac_store: Store,
65
    /// The "real" scheduler to use to perform actions if they were not found
66
    /// in the action cache.
67
    #[metric(group = "action_scheduler")]
68
    action_scheduler: Arc<dyn ClientStateManager>,
69
    /// Actions that are currently performing a `CacheCheck`.
70
    inflight_cache_checks: Arc<Mutex<CheckActions>>,
71
}
72
73
impl core::fmt::Debug for CacheLookupScheduler {
74
0
    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
75
0
        f.debug_struct("CacheLookupScheduler")
76
0
            .field("ac_store", &self.ac_store)
77
0
            .finish_non_exhaustive()
78
0
    }
79
}
80
81
0
async fn get_action_from_store(
82
0
    ac_store: &Store,
83
0
    action_digest: DigestInfo,
84
0
    instance_name: String,
85
0
    digest_function: DigestHasherFunc,
86
0
) -> Result<ProtoActionResult, Error> {
87
    // If we are a GrpcStore we shortcut here, as this is a special store.
88
0
    if let Some(grpc_store) = ac_store.downcast_ref::<GrpcStore>(Some(action_digest.into())) {
  Branch (88:12): [True: 0, False: 0]
  Branch (88:12): [Folded - Ignored]
89
0
        let action_result_request = GetActionResultRequest {
90
0
            instance_name,
91
0
            action_digest: Some(action_digest.into()),
92
0
            inline_stdout: false,
93
0
            inline_stderr: false,
94
0
            inline_output_files: Vec::new(),
95
0
            digest_function: digest_function.proto_digest_func().into(),
96
0
        };
97
0
        grpc_store
98
0
            .get_action_result(Request::new(action_result_request))
99
0
            .await
100
0
            .map(Response::into_inner)
101
    } else {
102
0
        get_and_decode_digest::<ProtoActionResult>(ac_store, action_digest.into()).await
103
    }
104
0
}
105
106
/// Future for when `ActionStateResults` are known.
107
type ActionStateResultOneshot = oneshot::Receiver<Result<Box<dyn ActionStateResult>, Error>>;
108
109
0
fn subscribe_to_existing_action(
110
0
    inflight_cache_checks: &mut MutexGuard<CheckActions>,
111
0
    unique_qualifier: &ActionUniqueKey,
112
0
    client_operation_id: &OperationId,
113
0
) -> Option<ActionStateResultOneshot> {
114
0
    inflight_cache_checks
115
0
        .get_mut(unique_qualifier)
116
0
        .map(|oneshots| {
117
0
            let (tx, rx) = oneshot::channel();
118
0
            oneshots.push((client_operation_id.clone(), tx));
119
0
            rx
120
0
        })
121
0
}
122
123
struct CacheLookupActionStateResult {
124
    action_state: Arc<ActionState>,
125
    maybe_origin_metadata: Option<OriginMetadata>,
126
    change_called: bool,
127
}
128
129
#[async_trait]
130
impl ActionStateResult for CacheLookupActionStateResult {
131
0
    async fn as_state(&self) -> Result<(Arc<ActionState>, Option<OriginMetadata>), Error> {
132
        Ok((
133
            self.action_state.clone(),
134
            self.maybe_origin_metadata.clone(),
135
        ))
136
0
    }
137
138
0
    async fn changed(&mut self) -> Result<(Arc<ActionState>, Option<OriginMetadata>), Error> {
139
        if self.change_called {
140
            return Err(make_err!(
141
                Code::Internal,
142
                "CacheLookupActionStateResult::changed called twice"
143
            ));
144
        }
145
        self.change_called = true;
146
        Ok((
147
            self.action_state.clone(),
148
            self.maybe_origin_metadata.clone(),
149
        ))
150
0
    }
151
152
0
    async fn as_action_info(&self) -> Result<(Arc<ActionInfo>, Option<OriginMetadata>), Error> {
153
        // TODO(palfrey) We should probably remove as_action_info()
154
        // or implement it properly.
155
        return Err(make_err!(
156
            Code::Unimplemented,
157
            "as_action_info not implemented for CacheLookupActionStateResult::as_action_info"
158
        ));
159
0
    }
160
}
161
162
impl CacheLookupScheduler {
163
2
    pub fn new(
164
2
        ac_store: Store,
165
2
        action_scheduler: Arc<dyn ClientStateManager>,
166
2
    ) -> Result<Self, Error> {
167
2
        Ok(Self {
168
2
            ac_store,
169
2
            action_scheduler,
170
2
            inflight_cache_checks: Arc::default(),
171
2
        })
172
2
    }
173
174
1
    async fn inner_add_action(
175
1
        &self,
176
1
        client_operation_id: OperationId,
177
1
        action_info: Arc<ActionInfo>,
178
1
    ) -> Result<Box<dyn ActionStateResult>, Error> {
179
1
        let 
unique_key0
= match &action_info.unique_qualifier {
180
0
            ActionUniqueQualifier::Cacheable(unique_key) => unique_key.clone(),
181
            ActionUniqueQualifier::Uncacheable(_) => {
182
                // Cache lookup skipped, forward to the upstream.
183
1
                return self
184
1
                    .action_scheduler
185
1
                    .add_action(client_operation_id, action_info)
186
1
                    .await;
187
            }
188
        };
189
190
0
        let cache_check_result = {
191
            // Check this isn't a duplicate request first.
192
0
            let mut inflight_cache_checks = self.inflight_cache_checks.lock();
193
0
            subscribe_to_existing_action(
194
0
                &mut inflight_cache_checks,
195
0
                &unique_key,
196
0
                &client_operation_id,
197
            )
198
0
            .ok_or_else(move || {
199
0
                let (action_listener_tx, action_listener_rx) = oneshot::channel();
200
0
                inflight_cache_checks.insert(
201
0
                    unique_key.clone(),
202
0
                    vec![(client_operation_id, action_listener_tx)],
203
0
                );
204
                // In the event we loose the reference to our `scope_guard`, it will remove
205
                // the action from the inflight_cache_checks map.
206
0
                let inflight_cache_checks = self.inflight_cache_checks.clone();
207
                (
208
0
                    action_listener_rx,
209
0
                    guard((), move |()| {
210
0
                        inflight_cache_checks.lock().remove(&unique_key);
211
0
                    }),
212
                )
213
0
            })
214
        };
215
0
        let (action_listener_rx, scope_guard) = match cache_check_result {
216
0
            Ok(action_listener_fut) => {
217
0
                let action_listener = action_listener_fut.await.map_err(|_| {
218
0
                    make_err!(
219
0
                        Code::Internal,
220
                        "ActionStateResult tx hung up in CacheLookupScheduler::add_action"
221
                    )
222
0
                })?;
223
0
                return action_listener;
224
            }
225
0
            Err(client_tx_and_scope_guard) => client_tx_and_scope_guard,
226
        };
227
228
0
        let ac_store = self.ac_store.clone();
229
0
        let action_scheduler = self.action_scheduler.clone();
230
0
        let inflight_cache_checks = self.inflight_cache_checks.clone();
231
        // We need this spawn because we are returning a stream and this spawn will populate the stream's data.
232
0
        background_spawn!("cache_lookup_scheduler_add_action", async move {
233
            // If our spawn ever dies, we will remove the action from the inflight_cache_checks map.
234
0
            let _scope_guard = scope_guard;
235
236
0
            let unique_key = match &action_info.unique_qualifier {
237
0
                ActionUniqueQualifier::Cacheable(unique_key) => unique_key,
238
0
                ActionUniqueQualifier::Uncacheable(unique_key) => {
239
0
                    error!(
240
                        ?action_info,
241
0
                        "ActionInfo::unique_qualifier should be ActionUniqueQualifier::Cacheable()"
242
                    );
243
0
                    unique_key
244
                }
245
            };
246
247
            // Perform cache check.
248
0
            let instance_name = action_info.unique_qualifier.instance_name().clone();
249
0
            let maybe_action_result = get_action_from_store(
250
0
                &ac_store,
251
0
                action_info.unique_qualifier.digest(),
252
0
                instance_name,
253
0
                action_info.unique_qualifier.digest_function(),
254
0
            )
255
0
            .await;
256
0
            match maybe_action_result {
257
0
                Ok(action_result) => {
258
0
                    let maybe_pending_txs = {
259
0
                        let mut inflight_cache_checks = inflight_cache_checks.lock();
260
                        // We are ready to resolve the in-flight actions. We remove the
261
                        // in-flight actions from the map.
262
0
                        inflight_cache_checks.remove(unique_key)
263
                    };
264
0
                    let Some(pending_txs) = maybe_pending_txs else {
  Branch (264:25): [True: 0, False: 0]
  Branch (264:25): [Folded - Ignored]
265
0
                        return; // Nobody is waiting for this action anymore.
266
                    };
267
0
                    let mut action_state = ActionState {
268
0
                        client_operation_id: OperationId::default(),
269
0
                        stage: ActionStage::CompletedFromCache(action_result),
270
0
                        action_digest: action_info.unique_qualifier.digest(),
271
0
                        last_transition_timestamp: SystemTime::now(),
272
0
                    };
273
274
0
                    let ctx = Context::current();
275
0
                    let baggage = ctx.baggage();
276
277
0
                    let maybe_origin_metadata = if baggage.is_empty() {
  Branch (277:52): [True: 0, False: 0]
  Branch (277:52): [Folded - Ignored]
278
0
                        None
279
                    } else {
280
                        Some(OriginMetadata {
281
0
                            identity: baggage
282
0
                                .get(ENDUSER_ID)
283
0
                                .map(|v| v.as_str().to_string())
284
0
                                .unwrap_or_default(),
285
0
                            bazel_metadata: None, // TODO(palfrey): Implement conversion.
286
                        })
287
                    };
288
289
0
                    for (client_operation_id, pending_tx) in pending_txs {
290
0
                        action_state.client_operation_id = client_operation_id;
291
0
                        // Ignore errors here, as the other end may have hung up.
292
0
                        drop(pending_tx.send(Ok(Box::new(CacheLookupActionStateResult {
293
0
                            action_state: Arc::new(action_state.clone()),
294
0
                            maybe_origin_metadata: maybe_origin_metadata.clone(),
295
0
                            change_called: false,
296
0
                        }))));
297
0
                    }
298
0
                    return;
299
                }
300
0
                Err(err) => {
301
                    // NotFound errors just mean we need to execute our action.
302
0
                    if err.code != Code::NotFound {
  Branch (302:24): [True: 0, False: 0]
  Branch (302:24): [Folded - Ignored]
303
0
                        let err = err.append("In CacheLookupScheduler::add_action");
304
0
                        let maybe_pending_txs = {
305
0
                            let mut inflight_cache_checks = inflight_cache_checks.lock();
306
                            // We are ready to resolve the in-flight actions. We remove the
307
                            // in-flight actions from the map.
308
0
                            inflight_cache_checks.remove(unique_key)
309
                        };
310
0
                        let Some(pending_txs) = maybe_pending_txs else {
  Branch (310:29): [True: 0, False: 0]
  Branch (310:29): [Folded - Ignored]
311
0
                            return; // Nobody is waiting for this action anymore.
312
                        };
313
0
                        for (_client_operation_id, pending_tx) in pending_txs {
314
0
                            // Ignore errors here, as the other end may have hung up.
315
0
                            drop(pending_tx.send(Err(err.clone())));
316
0
                        }
317
0
                        return;
318
0
                    }
319
                }
320
            }
321
322
0
            let maybe_pending_txs = {
323
0
                let mut inflight_cache_checks = inflight_cache_checks.lock();
324
0
                inflight_cache_checks.remove(unique_key)
325
            };
326
0
            let Some(pending_txs) = maybe_pending_txs else {
  Branch (326:17): [True: 0, False: 0]
  Branch (326:17): [Folded - Ignored]
327
0
                return; // Noone is waiting for this action anymore.
328
            };
329
330
0
            for (client_operation_id, pending_tx) in pending_txs {
331
                // Ignore errors here, as the other end may have hung up.
332
0
                drop(
333
0
                    pending_tx.send(
334
0
                        action_scheduler
335
0
                            .add_action(client_operation_id, action_info.clone())
336
0
                            .await,
337
                    ),
338
                );
339
            }
340
0
        });
341
0
        action_listener_rx
342
0
            .await
343
0
            .map_err(|_| {
344
0
                make_err!(
345
0
                    Code::Internal,
346
                    "ActionStateResult tx hung up in CacheLookupScheduler::add_action"
347
                )
348
0
            })?
349
0
            .err_tip(|| "In CacheLookupScheduler::add_action")
350
1
    }
351
352
1
    async fn inner_filter_operations(
353
1
        &self,
354
1
        filter: OperationFilter,
355
1
    ) -> Result<ActionStateResultStream<'_>, Error> {
356
1
        self.action_scheduler
357
1
            .filter_operations(filter)
358
1
            .await
359
1
            .err_tip(|| "In CacheLookupScheduler::filter_operations")
360
1
    }
361
}
362
363
#[async_trait]
364
impl ClientStateManager for CacheLookupScheduler {
365
    async fn add_action(
366
        &self,
367
        client_operation_id: OperationId,
368
        action_info: Arc<ActionInfo>,
369
1
    ) -> Result<Box<dyn ActionStateResult>, Error> {
370
        self.inner_add_action(client_operation_id, action_info)
371
            .await
372
1
    }
373
374
    async fn filter_operations(
375
        &self,
376
        filter: OperationFilter,
377
1
    ) -> Result<ActionStateResultStream, Error> {
378
        self.inner_filter_operations(filter).await
379
1
    }
380
381
0
    fn as_known_platform_property_provider(&self) -> Option<&dyn KnownPlatformPropertyProvider> {
382
0
        self.action_scheduler.as_known_platform_property_provider()
383
0
    }
384
}
385
386
impl RootMetricsComponent for CacheLookupScheduler {}