/build/source/src/bin/nativelink.rs
Line | Count | Source |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use core::net::SocketAddr; |
16 | | use core::time::Duration; |
17 | | use std::collections::{HashMap, HashSet}; |
18 | | use std::sync::Arc; |
19 | | |
20 | | use async_lock::Mutex as AsyncMutex; |
21 | | use axum::Router; |
22 | | use axum::http::Uri; |
23 | | use clap::Parser; |
24 | | use futures::FutureExt; |
25 | | use futures::future::{BoxFuture, Either, OptionFuture, TryFutureExt, try_join_all}; |
26 | | use hyper::StatusCode; |
27 | | use hyper_util::rt::tokio::TokioIo; |
28 | | use hyper_util::server::conn::auto; |
29 | | use hyper_util::service::TowerToHyperService; |
30 | | use mimalloc::MiMalloc; |
31 | | use nativelink_config::cas_server::{ |
32 | | CasConfig, GlobalConfig, HttpCompressionAlgorithm, ListenerConfig, SchedulerConfig, |
33 | | ServerConfig, StoreConfig, WorkerConfig, |
34 | | }; |
35 | | use nativelink_config::stores::ConfigDigestHashFunction; |
36 | | use nativelink_error::{Code, Error, ResultExt, make_err, make_input_err}; |
37 | | use nativelink_scheduler::default_scheduler_factory::scheduler_factory; |
38 | | use nativelink_service::ac_server::AcServer; |
39 | | use nativelink_service::bep_server::BepServer; |
40 | | use nativelink_service::bytestream_server::ByteStreamServer; |
41 | | use nativelink_service::capabilities_server::CapabilitiesServer; |
42 | | use nativelink_service::cas_server::CasServer; |
43 | | use nativelink_service::execution_server::ExecutionServer; |
44 | | use nativelink_service::fetch_server::FetchServer; |
45 | | use nativelink_service::health_server::HealthServer; |
46 | | use nativelink_service::push_server::PushServer; |
47 | | use nativelink_service::worker_api_server::WorkerApiServer; |
48 | | use nativelink_store::default_store_factory::store_factory; |
49 | | use nativelink_store::store_manager::StoreManager; |
50 | | use nativelink_util::common::fs::set_open_file_limit; |
51 | | use nativelink_util::digest_hasher::{DigestHasherFunc, set_default_digest_hasher_func}; |
52 | | use nativelink_util::health_utils::HealthRegistryBuilder; |
53 | | use nativelink_util::origin_event_publisher::OriginEventPublisher; |
54 | | #[cfg(target_family = "unix")] |
55 | | use nativelink_util::shutdown_guard::Priority; |
56 | | use nativelink_util::shutdown_guard::ShutdownGuard; |
57 | | use nativelink_util::store_trait::{ |
58 | | DEFAULT_DIGEST_SIZE_HEALTH_CHECK_CFG, set_default_digest_size_health_check, |
59 | | }; |
60 | | use nativelink_util::task::TaskExecutor; |
61 | | use nativelink_util::telemetry::init_tracing; |
62 | | use nativelink_util::{background_spawn, fs, spawn}; |
63 | | use nativelink_worker::local_worker::new_local_worker; |
64 | | use rustls_pemfile::{certs as extract_certs, crls as extract_crls}; |
65 | | use tokio::net::TcpListener; |
66 | | use tokio::select; |
67 | | #[cfg(target_family = "unix")] |
68 | | use tokio::signal::unix::{SignalKind, signal}; |
69 | | use tokio::sync::{broadcast, mpsc}; |
70 | | use tokio_rustls::TlsAcceptor; |
71 | | use tokio_rustls::rustls::pki_types::CertificateDer; |
72 | | use tokio_rustls::rustls::server::WebPkiClientVerifier; |
73 | | use tokio_rustls::rustls::{RootCertStore, ServerConfig as TlsServerConfig}; |
74 | | use tonic::codec::CompressionEncoding; |
75 | | use tonic::service::Routes; |
76 | | use tracing::{error, error_span, info, trace_span, warn}; |
77 | | |
78 | | #[global_allocator] |
79 | | static GLOBAL: MiMalloc = MiMalloc; |
80 | | |
81 | | /// Note: This must be kept in sync with the documentation in `AdminConfig::path`. |
82 | | const DEFAULT_ADMIN_API_PATH: &str = "/admin"; |
83 | | |
84 | | // Note: This must be kept in sync with the documentation in `HealthConfig::path`. |
85 | | const DEFAULT_HEALTH_STATUS_CHECK_PATH: &str = "/status"; |
86 | | |
87 | | // Note: This must be kept in sync with the documentation in |
88 | | // `OriginEventsConfig::max_event_queue_size`. |
89 | | const DEFAULT_MAX_QUEUE_EVENTS: usize = 0x0001_0000; |
90 | | |
91 | | /// Broadcast Channel Capacity |
92 | | /// Note: The actual capacity may be greater than the provided capacity. |
93 | | const BROADCAST_CAPACITY: usize = 1; |
94 | | |
95 | | /// Backend for bazel remote execution / cache API. |
96 | | #[derive(Parser, Debug)] |
97 | | #[clap( |
98 | | author = "Trace Machina, Inc. <nativelink@tracemachina.com>", |
99 | | version, |
100 | | about, |
101 | | long_about = None |
102 | | )] |
103 | | struct Args { |
104 | | /// Config file to use. |
105 | | #[clap(value_parser)] |
106 | | config_file: String, |
107 | | } |
108 | | |
109 | | trait RoutesExt { |
110 | | fn add_optional_service<S>(self, svc: Option<S>) -> Self |
111 | | where |
112 | | S: tower::Service< |
113 | | axum::http::Request<tonic::body::Body>, |
114 | | Error = core::convert::Infallible, |
115 | | > + tonic::server::NamedService |
116 | | + Clone |
117 | | + Send |
118 | | + Sync |
119 | | + 'static, |
120 | | S::Response: axum::response::IntoResponse, |
121 | | S::Future: Send + 'static; |
122 | | } |
123 | | |
124 | | impl RoutesExt for Routes { |
125 | 0 | fn add_optional_service<S>(mut self, svc: Option<S>) -> Self |
126 | 0 | where |
127 | 0 | S: tower::Service< |
128 | 0 | axum::http::Request<tonic::body::Body>, |
129 | 0 | Error = core::convert::Infallible, |
130 | 0 | > + tonic::server::NamedService |
131 | 0 | + Clone |
132 | 0 | + Send |
133 | 0 | + Sync |
134 | 0 | + 'static, |
135 | 0 | S::Response: axum::response::IntoResponse, |
136 | 0 | S::Future: Send + 'static, |
137 | | { |
138 | 0 | if let Some(svc) = svc { Branch (138:16): [Folded - Ignored]
|
139 | 0 | self = self.add_service(svc); |
140 | 0 | } |
141 | 0 | self |
142 | 0 | } |
143 | | } |
144 | | |
145 | 0 | async fn inner_main( |
146 | 0 | cfg: CasConfig, |
147 | 0 | shutdown_tx: broadcast::Sender<ShutdownGuard>, |
148 | 0 | ) -> Result<(), Error> { |
149 | 0 | const fn into_encoding(from: HttpCompressionAlgorithm) -> Option<CompressionEncoding> { |
150 | 0 | match from { |
151 | 0 | HttpCompressionAlgorithm::Gzip => Some(CompressionEncoding::Gzip), |
152 | 0 | HttpCompressionAlgorithm::None => None, |
153 | | } |
154 | 0 | } |
155 | | |
156 | 0 | let health_registry_builder = |
157 | 0 | Arc::new(AsyncMutex::new(HealthRegistryBuilder::new("nativelink"))); |
158 | | |
159 | 0 | let store_manager = Arc::new(StoreManager::new()); |
160 | | { |
161 | 0 | let mut health_registry_lock = health_registry_builder.lock().await; |
162 | | |
163 | 0 | for StoreConfig { name, spec } in cfg.stores { |
164 | 0 | let health_component_name = format!("stores/{name}"); |
165 | 0 | let mut health_register_store = |
166 | 0 | health_registry_lock.sub_builder(&health_component_name); |
167 | 0 | let store = store_factory(&spec, &store_manager, Some(&mut health_register_store)) |
168 | 0 | .await |
169 | 0 | .err_tip(|| format!("Failed to create store '{name}'"))?; |
170 | 0 | store_manager.add_store(&name, store); |
171 | | } |
172 | | } |
173 | | |
174 | 0 | let mut root_futures: Vec<BoxFuture<Result<(), Error>>> = Vec::new(); |
175 | | |
176 | 0 | let maybe_origin_event_tx = cfg |
177 | 0 | .experimental_origin_events |
178 | 0 | .as_ref() |
179 | 0 | .map(|origin_events_cfg| { |
180 | 0 | let mut max_queued_events = origin_events_cfg.max_event_queue_size; |
181 | 0 | if max_queued_events == 0 { Branch (181:16): [Folded - Ignored]
|
182 | 0 | max_queued_events = DEFAULT_MAX_QUEUE_EVENTS; |
183 | 0 | } |
184 | 0 | let (tx, rx) = mpsc::channel(max_queued_events); |
185 | 0 | let store_name = origin_events_cfg.publisher.store.as_str(); |
186 | 0 | let store = store_manager.get_store(store_name).err_tip(|| { |
187 | 0 | format!("Could not get store {store_name} for origin event publisher") |
188 | 0 | })?; |
189 | | |
190 | 0 | root_futures.push(Box::pin( |
191 | 0 | OriginEventPublisher::new(store, rx, shutdown_tx.clone()) |
192 | 0 | .run() |
193 | 0 | .map(Ok), |
194 | 0 | )); |
195 | | |
196 | 0 | Ok::<_, Error>(tx) |
197 | 0 | }) |
198 | 0 | .transpose()?; |
199 | | |
200 | 0 | let mut action_schedulers = HashMap::new(); |
201 | 0 | let mut worker_schedulers = HashMap::new(); |
202 | 0 | for SchedulerConfig { name, spec } in cfg.schedulers.iter().flatten() { |
203 | 0 | let (maybe_action_scheduler, maybe_worker_scheduler) = |
204 | 0 | scheduler_factory(spec, &store_manager, maybe_origin_event_tx.as_ref()) |
205 | 0 | .err_tip(|| format!("Failed to create scheduler '{name}'"))?; |
206 | 0 | if let Some(action_scheduler) = maybe_action_scheduler { Branch (206:16): [Folded - Ignored]
|
207 | 0 | action_schedulers.insert(name.clone(), action_scheduler.clone()); |
208 | 0 | } |
209 | 0 | if let Some(worker_scheduler) = maybe_worker_scheduler { Branch (209:16): [Folded - Ignored]
|
210 | 0 | worker_schedulers.insert(name.clone(), worker_scheduler.clone()); |
211 | 0 | } |
212 | | } |
213 | | |
214 | 0 | let server_cfgs: Vec<ServerConfig> = cfg.servers.into_iter().collect(); |
215 | | |
216 | 0 | for server_cfg in server_cfgs { |
217 | 0 | let services = server_cfg |
218 | 0 | .services |
219 | 0 | .err_tip(|| "'services' must be configured")?; |
220 | | |
221 | | // Currently we only support http as our socket type. |
222 | 0 | let ListenerConfig::Http(http_config) = server_cfg.listener; |
223 | | |
224 | 0 | let tonic_services = Routes::builder() |
225 | 0 | .routes() |
226 | 0 | .add_optional_service( |
227 | 0 | services |
228 | 0 | .ac |
229 | 0 | .map_or(Ok(None), |cfg| { |
230 | 0 | AcServer::new(&cfg, &store_manager).map(|v| { |
231 | 0 | let mut service = v.into_service(); |
232 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
233 | 0 | if let Some(encoding) = Branch (233:36): [Folded - Ignored]
|
234 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
235 | 0 | { |
236 | 0 | service = service.send_compressed(encoding); |
237 | 0 | } |
238 | 0 | for encoding in http_config |
239 | 0 | .compression |
240 | 0 | .accepted_compression_algorithms |
241 | 0 | .iter() |
242 | | // Filter None values. |
243 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
244 | 0 | { |
245 | 0 | service = service.accept_compressed(encoding); |
246 | 0 | } |
247 | 0 | Some(service) |
248 | 0 | }) |
249 | 0 | }) |
250 | 0 | .err_tip(|| "Could not create AC service")?, |
251 | | ) |
252 | 0 | .add_optional_service( |
253 | 0 | services |
254 | 0 | .cas |
255 | 0 | .map_or(Ok(None), |cfg| { |
256 | 0 | CasServer::new(&cfg, &store_manager).map(|v| { |
257 | 0 | let mut service = v.into_service(); |
258 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
259 | 0 | if let Some(encoding) = Branch (259:36): [Folded - Ignored]
|
260 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
261 | 0 | { |
262 | 0 | service = service.send_compressed(encoding); |
263 | 0 | } |
264 | 0 | for encoding in http_config |
265 | 0 | .compression |
266 | 0 | .accepted_compression_algorithms |
267 | 0 | .iter() |
268 | | // Filter None values. |
269 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
270 | 0 | { |
271 | 0 | service = service.accept_compressed(encoding); |
272 | 0 | } |
273 | 0 | Some(service) |
274 | 0 | }) |
275 | 0 | }) |
276 | 0 | .err_tip(|| "Could not create CAS service")?, |
277 | | ) |
278 | 0 | .add_optional_service( |
279 | 0 | services |
280 | 0 | .execution |
281 | 0 | .map_or(Ok(None), |cfg| { |
282 | 0 | ExecutionServer::new(&cfg, &action_schedulers, &store_manager).map(|v| { |
283 | 0 | let mut service = v.into_service(); |
284 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
285 | 0 | if let Some(encoding) = Branch (285:36): [Folded - Ignored]
|
286 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
287 | 0 | { |
288 | 0 | service = service.send_compressed(encoding); |
289 | 0 | } |
290 | 0 | for encoding in http_config |
291 | 0 | .compression |
292 | 0 | .accepted_compression_algorithms |
293 | 0 | .iter() |
294 | | // Filter None values. |
295 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
296 | 0 | { |
297 | 0 | service = service.accept_compressed(encoding); |
298 | 0 | } |
299 | 0 | Some(service) |
300 | 0 | }) |
301 | 0 | }) |
302 | 0 | .err_tip(|| "Could not create Execution service")?, |
303 | | ) |
304 | 0 | .add_optional_service( |
305 | 0 | services |
306 | 0 | .fetch |
307 | 0 | .map_or(Ok(None), |cfg| { |
308 | 0 | FetchServer::new(&cfg, &store_manager).map(|v| { |
309 | 0 | let mut service = v.into_service(); |
310 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
311 | 0 | if let Some(encoding) = Branch (311:36): [Folded - Ignored]
|
312 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
313 | 0 | { |
314 | 0 | service = service.send_compressed(encoding); |
315 | 0 | } |
316 | 0 | for encoding in http_config |
317 | 0 | .compression |
318 | 0 | .accepted_compression_algorithms |
319 | 0 | .iter() |
320 | | // Filter None values. |
321 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
322 | 0 | { |
323 | 0 | service = service.accept_compressed(encoding); |
324 | 0 | } |
325 | 0 | Some(service) |
326 | 0 | }) |
327 | 0 | }) |
328 | 0 | .err_tip(|| "Could not create Fetch service")?, |
329 | | ) |
330 | 0 | .add_optional_service( |
331 | 0 | services |
332 | 0 | .push |
333 | 0 | .map_or(Ok(None), |cfg| { |
334 | 0 | PushServer::new(&cfg, &store_manager).map(|v| { |
335 | 0 | let mut service = v.into_service(); |
336 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
337 | 0 | if let Some(encoding) = Branch (337:36): [Folded - Ignored]
|
338 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
339 | 0 | { |
340 | 0 | service = service.send_compressed(encoding); |
341 | 0 | } |
342 | 0 | for encoding in http_config |
343 | 0 | .compression |
344 | 0 | .accepted_compression_algorithms |
345 | 0 | .iter() |
346 | | // Filter None values. |
347 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
348 | 0 | { |
349 | 0 | service = service.accept_compressed(encoding); |
350 | 0 | } |
351 | 0 | Some(service) |
352 | 0 | }) |
353 | 0 | }) |
354 | 0 | .err_tip(|| "Could not create Push service")?, |
355 | | ) |
356 | 0 | .add_optional_service( |
357 | 0 | services |
358 | 0 | .bytestream |
359 | 0 | .map_or(Ok(None), |cfg| { |
360 | 0 | ByteStreamServer::new(&cfg, &store_manager).map(|v| { |
361 | 0 | let mut service = v.into_service(); |
362 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
363 | 0 | if let Some(encoding) = Branch (363:36): [Folded - Ignored]
|
364 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
365 | 0 | { |
366 | 0 | service = service.send_compressed(encoding); |
367 | 0 | } |
368 | 0 | for encoding in http_config |
369 | 0 | .compression |
370 | 0 | .accepted_compression_algorithms |
371 | 0 | .iter() |
372 | | // Filter None values. |
373 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
374 | 0 | { |
375 | 0 | service = service.accept_compressed(encoding); |
376 | 0 | } |
377 | 0 | Some(service) |
378 | 0 | }) |
379 | 0 | }) |
380 | 0 | .err_tip(|| "Could not create ByteStream service")?, |
381 | | ) |
382 | 0 | .add_optional_service( |
383 | 0 | OptionFuture::from( |
384 | 0 | services |
385 | 0 | .capabilities |
386 | 0 | .as_ref() |
387 | | // Borrow checker fighting here... |
388 | 0 | .map(|_| { |
389 | 0 | CapabilitiesServer::new( |
390 | 0 | services.capabilities.as_ref().unwrap(), |
391 | 0 | &action_schedulers, |
392 | | ) |
393 | 0 | }), |
394 | | ) |
395 | 0 | .await |
396 | 0 | .map_or(Ok::<Option<CapabilitiesServer>, Error>(None), |server| { |
397 | 0 | Ok(Some(server?)) |
398 | 0 | }) |
399 | 0 | .err_tip(|| "Could not create Capabilities service")? |
400 | 0 | .map(|v| { |
401 | 0 | let mut service = v.into_service(); |
402 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
403 | 0 | if let Some(encoding) = Branch (403:28): [Folded - Ignored]
|
404 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
405 | 0 | { |
406 | 0 | service = service.send_compressed(encoding); |
407 | 0 | } |
408 | 0 | for encoding in http_config |
409 | 0 | .compression |
410 | 0 | .accepted_compression_algorithms |
411 | 0 | .iter() |
412 | | // Filter None values. |
413 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
414 | 0 | { |
415 | 0 | service = service.accept_compressed(encoding); |
416 | 0 | } |
417 | 0 | service |
418 | 0 | }), |
419 | | ) |
420 | 0 | .add_optional_service( |
421 | 0 | services |
422 | 0 | .worker_api |
423 | 0 | .map_or(Ok(None), |cfg| { |
424 | 0 | WorkerApiServer::new(&cfg, &worker_schedulers).map(|v| { |
425 | 0 | let mut service = v.into_service(); |
426 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
427 | 0 | if let Some(encoding) = Branch (427:36): [Folded - Ignored]
|
428 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
429 | 0 | { |
430 | 0 | service = service.send_compressed(encoding); |
431 | 0 | } |
432 | 0 | for encoding in http_config |
433 | 0 | .compression |
434 | 0 | .accepted_compression_algorithms |
435 | 0 | .iter() |
436 | | // Filter None values. |
437 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
438 | 0 | { |
439 | 0 | service = service.accept_compressed(encoding); |
440 | 0 | } |
441 | 0 | Some(service) |
442 | 0 | }) |
443 | 0 | }) |
444 | 0 | .err_tip(|| "Could not create WorkerApi service")?, |
445 | | ) |
446 | 0 | .add_optional_service( |
447 | 0 | services |
448 | 0 | .experimental_bep |
449 | 0 | .map_or(Ok(None), |cfg| { |
450 | 0 | BepServer::new(&cfg, &store_manager).map(|v| { |
451 | 0 | let mut service = v.into_service(); |
452 | 0 | let send_algo = &http_config.compression.send_compression_algorithm; |
453 | 0 | if let Some(encoding) = Branch (453:36): [Folded - Ignored]
|
454 | 0 | into_encoding(send_algo.unwrap_or(HttpCompressionAlgorithm::None)) |
455 | 0 | { |
456 | 0 | service = service.send_compressed(encoding); |
457 | 0 | } |
458 | 0 | for encoding in http_config |
459 | 0 | .compression |
460 | 0 | .accepted_compression_algorithms |
461 | 0 | .iter() |
462 | | // Filter None values. |
463 | 0 | .filter_map(|from: &HttpCompressionAlgorithm| into_encoding(*from)) |
464 | 0 | { |
465 | 0 | service = service.accept_compressed(encoding); |
466 | 0 | } |
467 | 0 | Some(service) |
468 | 0 | }) |
469 | 0 | }) |
470 | 0 | .err_tip(|| "Could not create BEP service")?, |
471 | | ); |
472 | | |
473 | 0 | let health_registry = health_registry_builder.lock().await.build(); |
474 | | |
475 | 0 | let mut svc = |
476 | 0 | tonic_services |
477 | 0 | .into_axum_router() |
478 | 0 | .layer(nativelink_util::telemetry::OtlpLayer::new( |
479 | 0 | server_cfg.experimental_identity_header.required, |
480 | | )); |
481 | | |
482 | 0 | if let Some(health_cfg) = services.health { Branch (482:16): [Folded - Ignored]
|
483 | 0 | let path = if health_cfg.path.is_empty() { Branch (483:27): [Folded - Ignored]
|
484 | 0 | DEFAULT_HEALTH_STATUS_CHECK_PATH |
485 | | } else { |
486 | 0 | &health_cfg.path |
487 | | }; |
488 | 0 | svc = svc.route_service(path, HealthServer::new(health_registry)); |
489 | 0 | } |
490 | | |
491 | 0 | if let Some(admin_config) = services.admin { Branch (491:16): [Folded - Ignored]
|
492 | 0 | let path = if admin_config.path.is_empty() { Branch (492:27): [Folded - Ignored]
|
493 | 0 | DEFAULT_ADMIN_API_PATH |
494 | | } else { |
495 | 0 | &admin_config.path |
496 | | }; |
497 | 0 | let worker_schedulers = Arc::new(worker_schedulers.clone()); |
498 | 0 | svc = svc.nest_service( |
499 | 0 | path, |
500 | 0 | Router::new().route( |
501 | 0 | "/scheduler/{instance_name}/set_drain_worker/{worker_id}/{is_draining}", |
502 | 0 | axum::routing::post( |
503 | 0 | move |params: axum::extract::Path<(String, String, String)>| async move { |
504 | 0 | let (instance_name, worker_id, is_draining) = params.0; |
505 | 0 | (async move { |
506 | 0 | let is_draining = match is_draining.as_str() { |
507 | 0 | "0" => false, |
508 | 0 | "1" => true, |
509 | | _ => { |
510 | 0 | return Err(make_err!( |
511 | 0 | Code::Internal, |
512 | 0 | "{} is neither 0 nor 1", |
513 | 0 | is_draining |
514 | 0 | )); |
515 | | } |
516 | | }; |
517 | 0 | worker_schedulers |
518 | 0 | .get(&instance_name) |
519 | 0 | .err_tip(|| { |
520 | 0 | format!( |
521 | 0 | "Can not get an instance with the name of '{}'", |
522 | 0 | &instance_name |
523 | | ) |
524 | 0 | })? |
525 | 0 | .clone() |
526 | 0 | .set_drain_worker(&worker_id.clone().into(), is_draining) |
527 | 0 | .await?; |
528 | 0 | Ok::<_, Error>(format!("Draining worker {worker_id}")) |
529 | | }) |
530 | 0 | .await |
531 | 0 | .map_err(|e| { |
532 | 0 | Err::<String, _>(( |
533 | 0 | StatusCode::INTERNAL_SERVER_ERROR, |
534 | 0 | format!("Error: {e:?}"), |
535 | 0 | )) |
536 | 0 | }) |
537 | 0 | }, |
538 | | ), |
539 | | ), |
540 | | ); |
541 | 0 | } |
542 | | |
543 | | // This is the default service that executes if no other endpoint matches. |
544 | 0 | svc = svc.fallback(|uri: Uri| async move { |
545 | 0 | warn!("No route for {uri}"); |
546 | 0 | (StatusCode::NOT_FOUND, format!("No route for {uri}")) |
547 | 0 | }); |
548 | | |
549 | | // Configure our TLS acceptor if we have TLS configured. |
550 | 0 | let maybe_tls_acceptor = http_config.tls.map_or(Ok(None), |tls_config| { |
551 | 0 | fn read_cert(cert_file: &str) -> Result<Vec<CertificateDer<'static>>, Error> { |
552 | 0 | let mut cert_reader = std::io::BufReader::new( |
553 | 0 | std::fs::File::open(cert_file) |
554 | 0 | .err_tip(|| format!("Could not open cert file {cert_file}"))?, |
555 | | ); |
556 | 0 | let certs = extract_certs(&mut cert_reader) |
557 | 0 | .collect::<Result<Vec<CertificateDer<'_>>, _>>() |
558 | 0 | .err_tip(|| format!("Could not extract certs from file {cert_file}"))?; |
559 | 0 | Ok(certs) |
560 | 0 | } |
561 | 0 | let certs = read_cert(&tls_config.cert_file)?; |
562 | 0 | let mut key_reader = std::io::BufReader::new( |
563 | 0 | std::fs::File::open(&tls_config.key_file) |
564 | 0 | .err_tip(|| format!("Could not open key file {}", tls_config.key_file))?, |
565 | | ); |
566 | 0 | let key = match rustls_pemfile::read_one(&mut key_reader) |
567 | 0 | .err_tip(|| format!("Could not extract key(s) from file {}", tls_config.key_file))? |
568 | | { |
569 | 0 | Some(rustls_pemfile::Item::Pkcs8Key(key)) => key.into(), |
570 | 0 | Some(rustls_pemfile::Item::Sec1Key(key)) => key.into(), |
571 | 0 | Some(rustls_pemfile::Item::Pkcs1Key(key)) => key.into(), |
572 | | _ => { |
573 | 0 | return Err(make_err!( |
574 | 0 | Code::Internal, |
575 | 0 | "No keys found in file {}", |
576 | 0 | tls_config.key_file |
577 | 0 | )); |
578 | | } |
579 | | }; |
580 | 0 | if let Ok(Some(_)) = rustls_pemfile::read_one(&mut key_reader) { Branch (580:20): [Folded - Ignored]
|
581 | 0 | return Err(make_err!( |
582 | 0 | Code::InvalidArgument, |
583 | 0 | "Expected 1 key in file {}", |
584 | 0 | tls_config.key_file |
585 | 0 | )); |
586 | 0 | } |
587 | 0 | let verifier = if let Some(client_ca_file) = &tls_config.client_ca_file { Branch (587:35): [Folded - Ignored]
|
588 | 0 | let mut client_auth_roots = RootCertStore::empty(); |
589 | 0 | for cert in read_cert(client_ca_file)? { |
590 | 0 | client_auth_roots.add(cert).map_err(|e| { |
591 | 0 | make_err!(Code::Internal, "Could not read client CA: {e:?}") |
592 | 0 | })?; |
593 | | } |
594 | 0 | let crls = if let Some(client_crl_file) = &tls_config.client_crl_file { Branch (594:35): [Folded - Ignored]
|
595 | 0 | let mut crl_reader = std::io::BufReader::new( |
596 | 0 | std::fs::File::open(client_crl_file) |
597 | 0 | .err_tip(|| format!("Could not open CRL file {client_crl_file}"))?, |
598 | | ); |
599 | 0 | extract_crls(&mut crl_reader) |
600 | 0 | .collect::<Result<_, _>>() |
601 | 0 | .err_tip(|| format!("Could not extract CRLs from file {client_crl_file}"))? |
602 | | } else { |
603 | 0 | Vec::new() |
604 | | }; |
605 | 0 | WebPkiClientVerifier::builder(Arc::new(client_auth_roots)) |
606 | 0 | .with_crls(crls) |
607 | 0 | .build() |
608 | 0 | .map_err(|e| { |
609 | 0 | make_err!( |
610 | 0 | Code::Internal, |
611 | | "Could not create WebPkiClientVerifier: {e:?}" |
612 | | ) |
613 | 0 | })? |
614 | | } else { |
615 | 0 | WebPkiClientVerifier::no_client_auth() |
616 | | }; |
617 | 0 | let mut config = TlsServerConfig::builder() |
618 | 0 | .with_client_cert_verifier(verifier) |
619 | 0 | .with_single_cert(certs, key) |
620 | 0 | .map_err(|e| { |
621 | 0 | make_err!(Code::Internal, "Could not create TlsServerConfig : {e:?}") |
622 | 0 | })?; |
623 | | |
624 | 0 | config.alpn_protocols.push("h2".into()); |
625 | 0 | Ok(Some(TlsAcceptor::from(Arc::new(config)))) |
626 | 0 | })?; |
627 | | |
628 | 0 | let socket_addr = http_config |
629 | 0 | .socket_address |
630 | 0 | .parse::<SocketAddr>() |
631 | 0 | .map_err(|e| { |
632 | 0 | make_input_err!("Invalid address '{}' - {e:?}", http_config.socket_address) |
633 | 0 | })?; |
634 | 0 | let tcp_listener = TcpListener::bind(&socket_addr).await?; |
635 | 0 | let mut http = auto::Builder::new(TaskExecutor::default()); |
636 | | |
637 | 0 | let http_config = &http_config.advanced_http; |
638 | 0 | if let Some(value) = http_config.http2_keep_alive_interval { Branch (638:16): [Folded - Ignored]
|
639 | 0 | http.http2() |
640 | 0 | .keep_alive_interval(Duration::from_secs(u64::from(value))); |
641 | 0 | } |
642 | | |
643 | 0 | if let Some(value) = http_config.experimental_http2_max_pending_accept_reset_streams { Branch (643:16): [Folded - Ignored]
|
644 | 0 | http.http2() |
645 | 0 | .max_pending_accept_reset_streams(usize::try_from(value).err_tip( |
646 | | || "Could not convert experimental_http2_max_pending_accept_reset_streams", |
647 | 0 | )?); |
648 | 0 | } |
649 | 0 | if let Some(value) = http_config.experimental_http2_initial_stream_window_size { Branch (649:16): [Folded - Ignored]
|
650 | 0 | http.http2().initial_stream_window_size(value); |
651 | 0 | } |
652 | 0 | if let Some(value) = http_config.experimental_http2_initial_connection_window_size { Branch (652:16): [Folded - Ignored]
|
653 | 0 | http.http2().initial_connection_window_size(value); |
654 | 0 | } |
655 | 0 | if let Some(value) = http_config.experimental_http2_adaptive_window { Branch (655:16): [Folded - Ignored]
|
656 | 0 | http.http2().adaptive_window(value); |
657 | 0 | } |
658 | 0 | if let Some(value) = http_config.experimental_http2_max_frame_size { Branch (658:16): [Folded - Ignored]
|
659 | 0 | http.http2().max_frame_size(value); |
660 | 0 | } |
661 | 0 | if let Some(value) = http_config.experimental_http2_max_concurrent_streams { Branch (661:16): [Folded - Ignored]
|
662 | 0 | http.http2().max_concurrent_streams(value); |
663 | 0 | } |
664 | 0 | if let Some(value) = http_config.experimental_http2_keep_alive_timeout { Branch (664:16): [Folded - Ignored]
|
665 | 0 | http.http2() |
666 | 0 | .keep_alive_timeout(Duration::from_secs(u64::from(value))); |
667 | 0 | } |
668 | 0 | if let Some(value) = http_config.experimental_http2_max_send_buf_size { Branch (668:16): [Folded - Ignored]
|
669 | 0 | http.http2().max_send_buf_size( |
670 | 0 | usize::try_from(value).err_tip(|| "Could not convert http2_max_send_buf_size")?, |
671 | | ); |
672 | 0 | } |
673 | 0 | if http_config.experimental_http2_enable_connect_protocol == Some(true) { Branch (673:12): [Folded - Ignored]
|
674 | 0 | http.http2().enable_connect_protocol(); |
675 | 0 | } |
676 | 0 | if let Some(value) = http_config.experimental_http2_max_header_list_size { Branch (676:16): [Folded - Ignored]
|
677 | 0 | http.http2().max_header_list_size(value); |
678 | 0 | } |
679 | 0 | info!("Ready, listening on {socket_addr}",); |
680 | 0 | root_futures.push(Box::pin(async move { |
681 | | loop { |
682 | 0 | select! { |
683 | 0 | accept_result = tcp_listener.accept() => { |
684 | 0 | match accept_result { |
685 | 0 | Ok((tcp_stream, remote_addr)) => { |
686 | 0 | info!( |
687 | | target: "nativelink::services", |
688 | | ?remote_addr, |
689 | | ?socket_addr, |
690 | 0 | "Client connected" |
691 | | ); |
692 | | |
693 | 0 | let (http, svc, maybe_tls_acceptor) = |
694 | 0 | (http.clone(), svc.clone(), maybe_tls_acceptor.clone()); |
695 | | |
696 | 0 | background_spawn!( |
697 | | name: "http_connection", |
698 | 0 | fut: error_span!( |
699 | | "http_connection", |
700 | | remote_addr = %remote_addr, |
701 | | socket_addr = %socket_addr, |
702 | 0 | ).in_scope(|| async move { |
703 | 0 | let serve_connection = if let Some(tls_acceptor) = maybe_tls_acceptor { Branch (703:71): [Folded - Ignored]
|
704 | 0 | match tls_acceptor.accept(tcp_stream).await { |
705 | 0 | Ok(tls_stream) => Either::Left(http.serve_connection( |
706 | 0 | TokioIo::new(tls_stream), |
707 | 0 | TowerToHyperService::new(svc), |
708 | 0 | )), |
709 | 0 | Err(err) => { |
710 | 0 | error!(?err, "Failed to accept tls stream"); |
711 | 0 | return; |
712 | | } |
713 | | } |
714 | | } else { |
715 | 0 | Either::Right(http.serve_connection( |
716 | 0 | TokioIo::new(tcp_stream), |
717 | 0 | TowerToHyperService::new(svc), |
718 | 0 | )) |
719 | | }; |
720 | | |
721 | 0 | if let Err(err) = serve_connection.await { Branch (721:48): [Folded - Ignored]
|
722 | 0 | error!( |
723 | | target: "nativelink::services", |
724 | | ?err, |
725 | 0 | "Failed running service" |
726 | | ); |
727 | 0 | } |
728 | 0 | }), |
729 | | target: "nativelink::services", |
730 | | ?remote_addr, |
731 | | ?socket_addr, |
732 | | ); |
733 | | }, |
734 | 0 | Err(err) => { |
735 | 0 | error!(?err, "Failed to accept tcp connection"); |
736 | | } |
737 | | } |
738 | | }, |
739 | | } |
740 | | } |
741 | | // Unreachable |
742 | | })); |
743 | | } |
744 | | |
745 | | { |
746 | | // We start workers after our TcpListener is setup so if our worker connects to one |
747 | | // of these services it will be able to connect. |
748 | 0 | let worker_cfgs = cfg.workers.unwrap_or_default(); |
749 | 0 | let mut worker_names = HashSet::with_capacity(worker_cfgs.len()); |
750 | 0 | for (i, worker_cfg) in worker_cfgs.into_iter().enumerate() { |
751 | 0 | let spawn_fut = match worker_cfg { |
752 | 0 | WorkerConfig::Local(local_worker_cfg) => { |
753 | 0 | let fast_slow_store = store_manager |
754 | 0 | .get_store(&local_worker_cfg.cas_fast_slow_store) |
755 | 0 | .err_tip(|| { |
756 | 0 | format!( |
757 | 0 | "Failed to find store for cas_store_ref in worker config : {}", |
758 | | local_worker_cfg.cas_fast_slow_store |
759 | | ) |
760 | 0 | })?; |
761 | | |
762 | 0 | let maybe_ac_store = if let Some(ac_store_ref) = Branch (762:49): [Folded - Ignored]
|
763 | 0 | &local_worker_cfg.upload_action_result.ac_store |
764 | | { |
765 | 0 | Some(store_manager.get_store(ac_store_ref).err_tip(|| { |
766 | 0 | format!("Failed to find store for ac_store in worker config : {ac_store_ref}") |
767 | 0 | })?) |
768 | | } else { |
769 | 0 | None |
770 | | }; |
771 | | // Note: Defaults to fast_slow_store if not specified. If this ever changes it must |
772 | | // be updated in config documentation for the `historical_results_store` the field. |
773 | 0 | let historical_store = if let Some(cas_store_ref) = &local_worker_cfg Branch (773:51): [Folded - Ignored]
|
774 | 0 | .upload_action_result |
775 | 0 | .historical_results_store |
776 | | { |
777 | 0 | store_manager.get_store(cas_store_ref).err_tip(|| { |
778 | 0 | format!( |
779 | 0 | "Failed to find store for historical_results_store in worker config : {cas_store_ref}" |
780 | | ) |
781 | 0 | })? |
782 | | } else { |
783 | 0 | fast_slow_store.clone() |
784 | | }; |
785 | 0 | let local_worker = new_local_worker( |
786 | 0 | Arc::new(local_worker_cfg), |
787 | 0 | fast_slow_store, |
788 | 0 | maybe_ac_store, |
789 | 0 | historical_store, |
790 | 0 | ) |
791 | 0 | .await |
792 | 0 | .err_tip(|| "Could not make LocalWorker")?; |
793 | | |
794 | 0 | let name = if local_worker.name().is_empty() { Branch (794:35): [Folded - Ignored]
|
795 | 0 | format!("worker_{i}") |
796 | | } else { |
797 | 0 | local_worker.name().clone() |
798 | | }; |
799 | | |
800 | 0 | if worker_names.contains(&name) { Branch (800:24): [Folded - Ignored]
|
801 | 0 | Err(make_input_err!( |
802 | 0 | "Duplicate worker name '{}' found in config", |
803 | 0 | name |
804 | 0 | ))?; |
805 | 0 | } |
806 | 0 | worker_names.insert(name.clone()); |
807 | 0 | let shutdown_rx = shutdown_tx.subscribe(); |
808 | 0 | let fut = trace_span!("worker_ctx", worker_name = %name) |
809 | 0 | .in_scope(|| local_worker.run(shutdown_rx)); |
810 | 0 | spawn!("worker", fut, ?name) |
811 | | } |
812 | | }; |
813 | 0 | root_futures.push(Box::pin(spawn_fut.map_ok_or_else(|e| Err(e.into()), |v| v))); |
814 | | } |
815 | | } |
816 | | |
817 | 0 | if let Err(e) = try_join_all(root_futures).await { Branch (817:12): [Folded - Ignored]
|
818 | 0 | panic!("{e:?}"); |
819 | 0 | } |
820 | | |
821 | 0 | Ok(()) |
822 | 0 | } |
823 | | |
824 | 0 | fn get_config() -> Result<CasConfig, Error> { |
825 | 0 | let args = Args::parse(); |
826 | 0 | CasConfig::try_from_json5_file(&args.config_file) |
827 | 0 | } |
828 | | |
829 | 0 | fn main() -> Result<(), Box<dyn core::error::Error>> { |
830 | 0 | let mut cfg = get_config()?; |
831 | | |
832 | 0 | let global_cfg = if let Some(global_cfg) = &mut cfg.global { Branch (832:29): [Folded - Ignored]
|
833 | 0 | if global_cfg.max_open_files == 0 { Branch (833:12): [Folded - Ignored]
|
834 | 0 | global_cfg.max_open_files = fs::DEFAULT_OPEN_FILE_LIMIT; |
835 | 0 | } |
836 | 0 | if global_cfg.default_digest_size_health_check == 0 { Branch (836:12): [Folded - Ignored]
|
837 | 0 | global_cfg.default_digest_size_health_check = DEFAULT_DIGEST_SIZE_HEALTH_CHECK_CFG; |
838 | 0 | } |
839 | | |
840 | 0 | *global_cfg |
841 | | } else { |
842 | 0 | GlobalConfig { |
843 | 0 | max_open_files: fs::DEFAULT_OPEN_FILE_LIMIT, |
844 | 0 | default_digest_hash_function: None, |
845 | 0 | default_digest_size_health_check: DEFAULT_DIGEST_SIZE_HEALTH_CHECK_CFG, |
846 | 0 | } |
847 | | }; |
848 | 0 | set_open_file_limit(global_cfg.max_open_files); |
849 | 0 | set_default_digest_hasher_func(DigestHasherFunc::from( |
850 | 0 | global_cfg |
851 | 0 | .default_digest_hash_function |
852 | 0 | .unwrap_or(ConfigDigestHashFunction::Sha256), |
853 | 0 | ))?; |
854 | 0 | set_default_digest_size_health_check(global_cfg.default_digest_size_health_check)?; |
855 | | |
856 | | #[expect(clippy::disallowed_methods, reason = "starting main runtime")] |
857 | 0 | let runtime = tokio::runtime::Builder::new_multi_thread() |
858 | 0 | .enable_all() |
859 | 0 | .build()?; |
860 | | |
861 | | // The OTLP exporters need to run in a Tokio context. |
862 | | #[expect(clippy::disallowed_methods, reason = "tracing init on main runtime")] |
863 | 0 | runtime.block_on(async { tokio::spawn(async { init_tracing() }).await? })?; |
864 | | |
865 | | // Initiates the shutdown process by broadcasting the shutdown signal via the `oneshot::Sender` to all listeners. |
866 | | // Each listener will perform its cleanup and then drop its `oneshot::Sender`, signaling completion. |
867 | | // Once all `oneshot::Sender` instances are dropped, the worker knows it can safely terminate. |
868 | 0 | let (shutdown_tx, _) = broadcast::channel::<ShutdownGuard>(BROADCAST_CAPACITY); |
869 | | #[cfg(target_family = "unix")] |
870 | 0 | let shutdown_tx_clone = shutdown_tx.clone(); |
871 | | #[cfg(target_family = "unix")] |
872 | 0 | let mut shutdown_guard = ShutdownGuard::default(); |
873 | | |
874 | | #[expect(clippy::disallowed_methods, reason = "signal handler on main runtime")] |
875 | 0 | runtime.spawn(async move { |
876 | 0 | tokio::signal::ctrl_c() |
877 | 0 | .await |
878 | 0 | .expect("Failed to listen to SIGINT"); |
879 | 0 | eprintln!("User terminated process via SIGINT"); |
880 | 0 | std::process::exit(130); |
881 | | }); |
882 | | |
883 | | #[cfg(target_family = "unix")] |
884 | | #[expect(clippy::disallowed_methods, reason = "signal handler on main runtime")] |
885 | 0 | runtime.spawn(async move { |
886 | 0 | signal(SignalKind::terminate()) |
887 | 0 | .expect("Failed to listen to SIGTERM") |
888 | 0 | .recv() |
889 | 0 | .await; |
890 | 0 | warn!("Process terminated via SIGTERM",); |
891 | 0 | drop(shutdown_tx_clone.send(shutdown_guard.clone())); |
892 | 0 | let () = shutdown_guard.wait_for(Priority::P0).await; |
893 | 0 | warn!("Successfully shut down nativelink.",); |
894 | 0 | std::process::exit(143); |
895 | | }); |
896 | | |
897 | | #[expect(clippy::disallowed_methods, reason = "waiting on everything to finish")] |
898 | 0 | runtime |
899 | 0 | .block_on(async { |
900 | 0 | trace_span!("main") |
901 | 0 | .in_scope(|| async { inner_main(cfg, shutdown_tx).await }) |
902 | 0 | .await |
903 | 0 | }) |
904 | 0 | .err_tip(|| "main() function failed")?; |
905 | 0 | Ok(()) |
906 | 0 | } |