/build/source/nativelink-config/src/cas_server.rs
Line | Count | Source |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Functional Source License, Version 1.1, Apache 2.0 Future License (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // See LICENSE file for details |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use std::collections::HashMap; |
16 | | |
17 | | use nativelink_error::{Error, ResultExt}; |
18 | | use serde::{Deserialize, Serialize}; |
19 | | |
20 | | use crate::schedulers::SchedulerSpec; |
21 | | use crate::serde_utils::{ |
22 | | convert_data_size_with_shellexpand, convert_duration_with_shellexpand, |
23 | | convert_numeric_with_shellexpand, convert_optional_numeric_with_shellexpand, |
24 | | convert_optional_string_with_shellexpand, convert_string_with_shellexpand, |
25 | | convert_vec_string_with_shellexpand, |
26 | | }; |
27 | | use crate::stores::{ClientTlsConfig, ConfigDigestHashFunction, StoreRefName, StoreSpec}; |
28 | | |
29 | | /// Name of the scheduler. This type will be used when referencing a |
30 | | /// scheduler in the `CasConfig::schedulers`'s map key. |
31 | | pub type SchedulerRefName = String; |
32 | | |
33 | | /// Used when the config references `instance_name` in the protocol. |
34 | | pub type InstanceName = String; |
35 | | |
36 | | #[derive(Debug, Default, Clone, PartialEq, Eq, Deserialize, Serialize)] |
37 | | pub struct WithInstanceName<T> { |
38 | | #[serde(default)] |
39 | | pub instance_name: InstanceName, |
40 | | #[serde(flatten)] |
41 | | pub config: T, |
42 | | } |
43 | | |
44 | | impl<T> core::ops::Deref for WithInstanceName<T> { |
45 | | type Target = T; |
46 | | |
47 | 80 | fn deref(&self) -> &Self::Target { |
48 | 80 | &self.config |
49 | 80 | } |
50 | | } |
51 | | |
52 | | #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] |
53 | | pub struct NamedConfig<Spec> { |
54 | | pub name: String, |
55 | | #[serde(flatten)] |
56 | | pub spec: Spec, |
57 | | } |
58 | | |
59 | | #[derive(Deserialize, Serialize, Debug, Default, Clone, Copy)] |
60 | | #[serde(rename_all = "snake_case")] |
61 | | pub enum HttpCompressionAlgorithm { |
62 | | /// No compression. |
63 | | #[default] |
64 | | None, |
65 | | |
66 | | /// Zlib compression. |
67 | | Gzip, |
68 | | } |
69 | | |
70 | | /// Note: Compressing data in the cloud rarely has a benefit, since most |
71 | | /// cloud providers have very high bandwidth backplanes. However, for |
72 | | /// clients not inside the data center, it might be a good idea to |
73 | | /// compress data to and from the cloud. This will however come at a high |
74 | | /// CPU and performance cost. If you are making remote execution share the |
75 | | /// same CAS/AC servers as client's remote cache, you can create multiple |
76 | | /// services with different compression settings that are served on |
77 | | /// different ports. Then configure the non-cloud clients to use one port |
78 | | /// and cloud-clients to use another. |
79 | | #[derive(Deserialize, Serialize, Debug, Default)] |
80 | | #[serde(deny_unknown_fields)] |
81 | | pub struct HttpCompressionConfig { |
82 | | /// The compression algorithm that the server will use when sending |
83 | | /// responses to clients. Enabling this will likely save a lot of |
84 | | /// data transfer, but will consume a lot of CPU and add a lot of |
85 | | /// latency. |
86 | | /// see: <https://github.com/tracemachina/nativelink/issues/109> |
87 | | /// |
88 | | /// Default: `HttpCompressionAlgorithm::None` |
89 | | pub send_compression_algorithm: Option<HttpCompressionAlgorithm>, |
90 | | |
91 | | /// The compression algorithm that the server will accept from clients. |
92 | | /// The server will broadcast the supported compression algorithms to |
93 | | /// clients and the client will choose which compression algorithm to |
94 | | /// use. Enabling this will likely save a lot of data transfer, but |
95 | | /// will consume a lot of CPU and add a lot of latency. |
96 | | /// see: <https://github.com/tracemachina/nativelink/issues/109> |
97 | | /// |
98 | | /// Default: {no supported compression} |
99 | | pub accepted_compression_algorithms: Vec<HttpCompressionAlgorithm>, |
100 | | } |
101 | | |
102 | | #[derive(Deserialize, Serialize, Debug)] |
103 | | #[serde(deny_unknown_fields)] |
104 | | pub struct AcStoreConfig { |
105 | | /// The store name referenced in the `stores` map in the main config. |
106 | | /// This store name referenced here may be reused multiple times. |
107 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
108 | | pub ac_store: StoreRefName, |
109 | | |
110 | | /// Whether the Action Cache store may be written to, this if set to false |
111 | | /// it is only possible to read from the Action Cache. |
112 | | #[serde(default)] |
113 | | pub read_only: bool, |
114 | | } |
115 | | |
116 | | #[derive(Deserialize, Serialize, Debug)] |
117 | | #[serde(deny_unknown_fields)] |
118 | | pub struct CasStoreConfig { |
119 | | /// The store name referenced in the `stores` map in the main config. |
120 | | /// This store name referenced here may be reused multiple times. |
121 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
122 | | pub cas_store: StoreRefName, |
123 | | } |
124 | | |
125 | | #[derive(Deserialize, Serialize, Debug, Default)] |
126 | | #[serde(deny_unknown_fields)] |
127 | | pub struct CapabilitiesRemoteExecutionConfig { |
128 | | /// Scheduler used to configure the capabilities of remote execution. |
129 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
130 | | pub scheduler: SchedulerRefName, |
131 | | } |
132 | | |
133 | | #[derive(Deserialize, Serialize, Debug, Default)] |
134 | | #[serde(deny_unknown_fields)] |
135 | | pub struct CapabilitiesConfig { |
136 | | /// Configuration for remote execution capabilities. |
137 | | /// If not set the capabilities service will inform the client that remote |
138 | | /// execution is not supported. |
139 | | pub remote_execution: Option<CapabilitiesRemoteExecutionConfig>, |
140 | | } |
141 | | |
142 | | #[derive(Deserialize, Serialize, Debug)] |
143 | | #[serde(deny_unknown_fields)] |
144 | | pub struct ExecutionConfig { |
145 | | /// The store name referenced in the `stores` map in the main config. |
146 | | /// This store name referenced here may be reused multiple times. |
147 | | /// This value must be a CAS store reference. |
148 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
149 | | pub cas_store: StoreRefName, |
150 | | |
151 | | /// The scheduler name referenced in the `schedulers` map in the main config. |
152 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
153 | | pub scheduler: SchedulerRefName, |
154 | | } |
155 | | |
156 | | #[derive(Deserialize, Serialize, Debug, Clone)] |
157 | | #[serde(deny_unknown_fields)] |
158 | | pub struct FetchConfig { |
159 | | /// The store name referenced in the `stores` map in the main config. |
160 | | /// This store name referenced here may be reused multiple times. |
161 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
162 | | pub fetch_store: StoreRefName, |
163 | | } |
164 | | |
165 | | #[derive(Deserialize, Serialize, Debug, Clone)] |
166 | | #[serde(deny_unknown_fields)] |
167 | | pub struct PushConfig { |
168 | | /// The store name referenced in the `stores` map in the main config. |
169 | | /// This store name referenced here may be reused multiple times. |
170 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
171 | | pub push_store: StoreRefName, |
172 | | |
173 | | /// Whether the Action Cache store may be written to, this if set to false |
174 | | /// it is only possible to read from the Action Cache. |
175 | | #[serde(default)] |
176 | | pub read_only: bool, |
177 | | } |
178 | | |
179 | | // From https://github.com/serde-rs/serde/issues/818#issuecomment-287438544 |
180 | 80 | fn default<T: Default + PartialEq>(t: &T) -> bool { |
181 | 80 | *t == Default::default() |
182 | 80 | } |
183 | | |
184 | | #[derive(Deserialize, Serialize, Debug, Default, PartialEq, Eq)] |
185 | | #[serde(deny_unknown_fields)] |
186 | | pub struct ByteStreamConfig { |
187 | | /// Name of the store in the "stores" configuration. |
188 | | pub cas_store: StoreRefName, |
189 | | |
190 | | /// Max number of bytes to send on each grpc stream chunk. |
191 | | /// According to <https://github.com/grpc/grpc.github.io/issues/371> |
192 | | /// 16KiB - 64KiB is optimal. |
193 | | /// |
194 | | /// |
195 | | /// Default: 64KiB |
196 | | #[serde( |
197 | | default, |
198 | | deserialize_with = "convert_data_size_with_shellexpand", |
199 | | skip_serializing_if = "default" |
200 | | )] |
201 | | pub max_bytes_per_stream: usize, |
202 | | |
203 | | /// In the event a client disconnects while uploading a blob, we will hold |
204 | | /// the internal stream open for this many seconds before closing it. |
205 | | /// This allows clients that disconnect to reconnect and continue uploading |
206 | | /// the same blob. |
207 | | /// |
208 | | /// Default: 10 (seconds) |
209 | | #[serde( |
210 | | default, |
211 | | deserialize_with = "convert_duration_with_shellexpand", |
212 | | skip_serializing_if = "default" |
213 | | )] |
214 | | pub persist_stream_on_disconnect_timeout: usize, |
215 | | } |
216 | | |
217 | | // Older bytestream config. All fields are as per the newer docs, but this requires |
218 | | // the hashed cas_stores v.s. the WithInstanceName approach. This should _not_ be updated |
219 | | // with newer fields, and eventually dropped |
220 | | #[derive(Deserialize, Serialize, Debug, Clone)] |
221 | | #[serde(deny_unknown_fields)] |
222 | | pub struct OldByteStreamConfig { |
223 | | pub cas_stores: HashMap<InstanceName, StoreRefName>, |
224 | | #[serde( |
225 | | default, |
226 | | deserialize_with = "convert_data_size_with_shellexpand", |
227 | | skip_serializing_if = "default" |
228 | | )] |
229 | | pub max_bytes_per_stream: usize, |
230 | | #[serde( |
231 | | default, |
232 | | deserialize_with = "convert_data_size_with_shellexpand", |
233 | | skip_serializing_if = "default" |
234 | | )] |
235 | | pub max_decoding_message_size: usize, |
236 | | #[serde( |
237 | | default, |
238 | | deserialize_with = "convert_duration_with_shellexpand", |
239 | | skip_serializing_if = "default" |
240 | | )] |
241 | | pub persist_stream_on_disconnect_timeout: usize, |
242 | | } |
243 | | |
244 | | #[derive(Deserialize, Serialize, Debug)] |
245 | | #[serde(deny_unknown_fields)] |
246 | | pub struct WorkerApiConfig { |
247 | | /// The scheduler name referenced in the `schedulers` map in the main config. |
248 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
249 | | pub scheduler: SchedulerRefName, |
250 | | } |
251 | | |
252 | | #[derive(Deserialize, Serialize, Debug, Default)] |
253 | | #[serde(deny_unknown_fields)] |
254 | | pub struct AdminConfig { |
255 | | /// Path to register the admin API. If path is "/admin", and your |
256 | | /// domain is "example.com", you can reach the endpoint with: |
257 | | /// <http://example.com/admin>. |
258 | | /// |
259 | | /// Default: "/admin" |
260 | | #[serde(default)] |
261 | | pub path: String, |
262 | | } |
263 | | |
264 | | #[derive(Deserialize, Serialize, Debug, Default)] |
265 | | #[serde(deny_unknown_fields)] |
266 | | pub struct HealthConfig { |
267 | | /// Path to register the health status check. If path is "/status", and your |
268 | | /// domain is "example.com", you can reach the endpoint with: |
269 | | /// <http://example.com/status>. |
270 | | /// |
271 | | /// Default: "/status" |
272 | | #[serde(default)] |
273 | | pub path: String, |
274 | | |
275 | | // Timeout on health checks. Defaults to 5s. |
276 | | #[serde(default)] |
277 | | pub timeout_seconds: u64, |
278 | | } |
279 | | |
280 | | #[derive(Deserialize, Serialize, Debug)] |
281 | | pub struct BepConfig { |
282 | | /// The store to publish build events to. |
283 | | /// The store name referenced in the `stores` map in the main config. |
284 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
285 | | pub store: StoreRefName, |
286 | | } |
287 | | |
288 | | #[derive(Deserialize, Serialize, Clone, Debug, Default)] |
289 | | pub struct IdentityHeaderSpec { |
290 | | /// The name of the header to look for the identity in. |
291 | | /// Default: "x-identity" |
292 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
293 | | pub header_name: Option<String>, |
294 | | |
295 | | /// If the header is required to be set or fail the request. |
296 | | #[serde(default)] |
297 | | pub required: bool, |
298 | | } |
299 | | |
300 | | #[derive(Deserialize, Serialize, Clone, Debug)] |
301 | | pub struct OriginEventsPublisherSpec { |
302 | | /// The store to publish nativelink events to. |
303 | | /// The store name referenced in the `stores` map in the main config. |
304 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
305 | | pub store: StoreRefName, |
306 | | } |
307 | | |
308 | | #[derive(Deserialize, Serialize, Clone, Debug)] |
309 | | pub struct OriginEventsSpec { |
310 | | /// The publisher configuration for origin events. |
311 | | pub publisher: OriginEventsPublisherSpec, |
312 | | |
313 | | /// The maximum number of events to queue before applying back pressure. |
314 | | /// IMPORTANT: Backpressure causes all clients to slow down significantly. |
315 | | /// Zero is default. |
316 | | /// |
317 | | /// Default: 65536 (zero defaults to this) |
318 | | #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")] |
319 | | pub max_event_queue_size: usize, |
320 | | } |
321 | | |
322 | | #[derive(Deserialize, Serialize, Debug)] |
323 | | #[serde(deny_unknown_fields)] |
324 | | pub struct ServicesConfig { |
325 | | /// The Content Addressable Storage (CAS) backend config. |
326 | | /// The key is the `instance_name` used in the protocol and the |
327 | | /// value is the underlying CAS store config. |
328 | | #[serde( |
329 | | default, |
330 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
331 | | )] |
332 | | pub cas: Option<Vec<WithInstanceName<CasStoreConfig>>>, |
333 | | |
334 | | /// The Action Cache (AC) backend config. |
335 | | /// The key is the `instance_name` used in the protocol and the |
336 | | /// value is the underlying AC store config. |
337 | | #[serde( |
338 | | default, |
339 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
340 | | )] |
341 | | pub ac: Option<Vec<WithInstanceName<AcStoreConfig>>>, |
342 | | |
343 | | /// Capabilities service is required in order to use most of the |
344 | | /// bazel protocol. This service is used to provide the supported |
345 | | /// features and versions of this bazel GRPC service. |
346 | | #[serde( |
347 | | default, |
348 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
349 | | )] |
350 | | pub capabilities: Option<Vec<WithInstanceName<CapabilitiesConfig>>>, |
351 | | |
352 | | /// The remote execution service configuration. |
353 | | /// NOTE: This service is under development and is currently just a |
354 | | /// place holder. |
355 | | #[serde( |
356 | | default, |
357 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
358 | | )] |
359 | | pub execution: Option<Vec<WithInstanceName<ExecutionConfig>>>, |
360 | | |
361 | | /// This is the service used to stream data to and from the CAS. |
362 | | /// Bazel's protocol strongly encourages users to use this streaming |
363 | | /// interface to interact with the CAS when the data is large. |
364 | | #[serde(default, deserialize_with = "super::backcompat::opt_bytestream")] |
365 | | pub bytestream: Option<Vec<WithInstanceName<ByteStreamConfig>>>, |
366 | | |
367 | | /// These two are collectively the Remote Asset protocol, but it's |
368 | | /// defined as two separate services |
369 | | #[serde( |
370 | | default, |
371 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
372 | | )] |
373 | | pub fetch: Option<Vec<WithInstanceName<FetchConfig>>>, |
374 | | |
375 | | #[serde( |
376 | | default, |
377 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
378 | | )] |
379 | | pub push: Option<Vec<WithInstanceName<PushConfig>>>, |
380 | | |
381 | | /// This is the service used for workers to connect and communicate |
382 | | /// through. |
383 | | /// NOTE: This service should be served on a different, non-public port. |
384 | | /// In other words, `worker_api` configuration should not have any other |
385 | | /// services that are served on the same port. Doing so is a security |
386 | | /// risk, as workers have a different permission set than a client |
387 | | /// that makes the remote execution/cache requests. |
388 | | pub worker_api: Option<WorkerApiConfig>, |
389 | | |
390 | | /// Experimental - Build Event Protocol (BEP) configuration. This is |
391 | | /// the service that will consume build events from the client and |
392 | | /// publish them to a store for processing by an external service. |
393 | | pub experimental_bep: Option<BepConfig>, |
394 | | |
395 | | /// This is the service for any administrative tasks. |
396 | | /// It provides a REST API endpoint for administrative purposes. |
397 | | pub admin: Option<AdminConfig>, |
398 | | |
399 | | /// This is the service for health status check. |
400 | | pub health: Option<HealthConfig>, |
401 | | } |
402 | | |
403 | | #[derive(Deserialize, Serialize, Debug)] |
404 | | #[serde(deny_unknown_fields)] |
405 | | pub struct TlsConfig { |
406 | | /// Path to the certificate file. |
407 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
408 | | pub cert_file: String, |
409 | | |
410 | | /// Path to the private key file. |
411 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
412 | | pub key_file: String, |
413 | | |
414 | | /// Path to the certificate authority for mTLS, if client authentication is |
415 | | /// required for this endpoint. |
416 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
417 | | pub client_ca_file: Option<String>, |
418 | | |
419 | | /// Path to the certificate revocation list for mTLS, if client |
420 | | /// authentication is required for this endpoint. |
421 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
422 | | pub client_crl_file: Option<String>, |
423 | | } |
424 | | |
425 | | /// Advanced Http configurations. These are generally should not be set. |
426 | | /// For documentation on what each of these do, see the hyper documentation: |
427 | | /// See: <https://docs.rs/hyper/latest/hyper/server/conn/struct.Http.html> |
428 | | /// |
429 | | /// Note: All of these default to hyper's default values unless otherwise |
430 | | /// specified. |
431 | | #[derive(Deserialize, Serialize, Debug, Default, Clone, Copy)] |
432 | | #[serde(deny_unknown_fields)] |
433 | | pub struct HttpServerConfig { |
434 | | /// Interval to send keep-alive pings via HTTP2. |
435 | | /// Note: This is in seconds. |
436 | | #[serde( |
437 | | default, |
438 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
439 | | )] |
440 | | pub http2_keep_alive_interval: Option<u32>, |
441 | | |
442 | | #[serde( |
443 | | default, |
444 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
445 | | )] |
446 | | pub experimental_http2_max_pending_accept_reset_streams: Option<u32>, |
447 | | |
448 | | #[serde( |
449 | | default, |
450 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
451 | | )] |
452 | | pub experimental_http2_initial_stream_window_size: Option<u32>, |
453 | | |
454 | | #[serde( |
455 | | default, |
456 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
457 | | )] |
458 | | pub experimental_http2_initial_connection_window_size: Option<u32>, |
459 | | |
460 | | #[serde(default)] |
461 | | pub experimental_http2_adaptive_window: Option<bool>, |
462 | | |
463 | | #[serde( |
464 | | default, |
465 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
466 | | )] |
467 | | pub experimental_http2_max_frame_size: Option<u32>, |
468 | | |
469 | | #[serde( |
470 | | default, |
471 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
472 | | )] |
473 | | pub experimental_http2_max_concurrent_streams: Option<u32>, |
474 | | |
475 | | /// Note: This is in seconds. |
476 | | #[serde( |
477 | | default, |
478 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
479 | | )] |
480 | | pub experimental_http2_keep_alive_timeout: Option<u32>, |
481 | | |
482 | | #[serde( |
483 | | default, |
484 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
485 | | )] |
486 | | pub experimental_http2_max_send_buf_size: Option<u32>, |
487 | | |
488 | | #[serde(default)] |
489 | | pub experimental_http2_enable_connect_protocol: Option<bool>, |
490 | | |
491 | | #[serde( |
492 | | default, |
493 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
494 | | )] |
495 | | pub experimental_http2_max_header_list_size: Option<u32>, |
496 | | } |
497 | | |
498 | | #[derive(Deserialize, Serialize, Debug)] |
499 | | #[serde(rename_all = "snake_case")] |
500 | | pub enum ListenerConfig { |
501 | | /// Listener for HTTP/HTTPS/HTTP2 sockets. |
502 | | Http(HttpListener), |
503 | | } |
504 | | |
505 | | #[derive(Deserialize, Serialize, Debug, Default)] |
506 | | #[serde(deny_unknown_fields)] |
507 | | pub struct HttpListener { |
508 | | /// Address to listen on. Example: `127.0.0.1:8080` or `:8080` to listen |
509 | | /// to all IPs. |
510 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
511 | | pub socket_address: String, |
512 | | |
513 | | /// Data transport compression configuration to use for this service. |
514 | | #[serde(default)] |
515 | | pub compression: HttpCompressionConfig, |
516 | | |
517 | | /// Advanced Http server configuration. |
518 | | #[serde(default)] |
519 | | pub advanced_http: HttpServerConfig, |
520 | | |
521 | | /// Maximum number of bytes to decode on each grpc stream chunk. |
522 | | /// Default: 4 MiB |
523 | | #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] |
524 | | pub max_decoding_message_size: usize, |
525 | | |
526 | | /// Tls Configuration for this server. |
527 | | /// If not set, the server will not use TLS. |
528 | | /// |
529 | | /// Default: None |
530 | | #[serde(default)] |
531 | | pub tls: Option<TlsConfig>, |
532 | | } |
533 | | |
534 | | #[derive(Deserialize, Serialize, Debug)] |
535 | | #[serde(deny_unknown_fields)] |
536 | | pub struct ServerConfig { |
537 | | /// Name of the server. This is used to help identify the service |
538 | | /// for telemetry and logs. |
539 | | /// |
540 | | /// Default: {index of server in config} |
541 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
542 | | pub name: String, |
543 | | |
544 | | /// Configuration |
545 | | pub listener: ListenerConfig, |
546 | | |
547 | | /// Services to attach to server. |
548 | | pub services: Option<ServicesConfig>, |
549 | | |
550 | | /// The config related to identifying the client. |
551 | | /// Default: {see `IdentityHeaderSpec`} |
552 | | #[serde(default)] |
553 | | pub experimental_identity_header: IdentityHeaderSpec, |
554 | | } |
555 | | |
556 | | #[derive(Deserialize, Serialize, Debug)] |
557 | | #[serde(rename_all = "snake_case")] |
558 | | pub enum WorkerProperty { |
559 | | /// List of static values. |
560 | | /// Note: Generally there should only ever be 1 value, but if the platform |
561 | | /// property key is `PropertyType::Priority` it may have more than one value. |
562 | | #[serde(deserialize_with = "convert_vec_string_with_shellexpand")] |
563 | | Values(Vec<String>), |
564 | | |
565 | | /// A dynamic configuration. The string will be executed as a command |
566 | | /// (not sell) and will be split by "\n" (new line character). |
567 | | QueryCmd(String), |
568 | | } |
569 | | |
570 | | /// Generic config for an endpoint and associated configs. |
571 | | #[derive(Deserialize, Serialize, Debug, Default)] |
572 | | #[serde(deny_unknown_fields)] |
573 | | pub struct EndpointConfig { |
574 | | /// URI of the endpoint. |
575 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
576 | | pub uri: String, |
577 | | |
578 | | /// Timeout in seconds that a request should take. |
579 | | /// Default: 5 (seconds) |
580 | | pub timeout: Option<f32>, |
581 | | |
582 | | /// The TLS configuration to use to connect to the endpoint. |
583 | | pub tls_config: Option<ClientTlsConfig>, |
584 | | } |
585 | | |
586 | | #[derive(Copy, Clone, Deserialize, Serialize, Debug, Default)] |
587 | | #[serde(rename_all = "snake_case")] |
588 | | pub enum UploadCacheResultsStrategy { |
589 | | /// Only upload action results with an exit code of 0. |
590 | | #[default] |
591 | | SuccessOnly, |
592 | | |
593 | | /// Don't upload any action results. |
594 | | Never, |
595 | | |
596 | | /// Upload all action results that complete. |
597 | | Everything, |
598 | | |
599 | | /// Only upload action results that fail. |
600 | | FailuresOnly, |
601 | | } |
602 | | |
603 | | #[derive(Clone, Deserialize, Serialize, Debug)] |
604 | | #[serde(rename_all = "snake_case")] |
605 | | pub enum EnvironmentSource { |
606 | | /// The name of the platform property in the action to get the value from. |
607 | | Property(String), |
608 | | |
609 | | /// The raw value to set. |
610 | | Value(#[serde(deserialize_with = "convert_string_with_shellexpand")] String), |
611 | | |
612 | | /// The max amount of time in milliseconds the command is allowed to run |
613 | | /// (requested by the client). |
614 | | TimeoutMillis, |
615 | | |
616 | | /// A special file path will be provided that can be used to communicate |
617 | | /// with the parent process about out-of-band information. This file |
618 | | /// will be read after the command has finished executing. Based on the |
619 | | /// contents of the file, the behavior of the result may be modified. |
620 | | /// |
621 | | /// The format of the file contents should be json with the following |
622 | | /// schema: |
623 | | /// { |
624 | | /// // If set the command will be considered a failure. |
625 | | /// // May be one of the following static strings: |
626 | | /// // "timeout": Will Consider this task to be a timeout. |
627 | | /// "failure": "timeout", |
628 | | /// } |
629 | | /// |
630 | | /// All fields are optional, file does not need to be created and may be |
631 | | /// empty. |
632 | | SideChannelFile, |
633 | | |
634 | | /// A "root" directory for the action. This directory can be used to |
635 | | /// store temporary files that are not needed after the action has |
636 | | /// completed. This directory will be purged after the action has |
637 | | /// completed. |
638 | | /// |
639 | | /// For example: |
640 | | /// If an action writes temporary data to a path but nativelink should |
641 | | /// clean up this path after the job has executed, you may create any |
642 | | /// directory under the path provided in this variable. A common pattern |
643 | | /// would be to use `entrypoint` to set a shell script that reads this |
644 | | /// variable, `mkdir $ENV_VAR_NAME/tmp` and `export TMPDIR=$ENV_VAR_NAME/tmp`. |
645 | | /// Another example might be to bind-mount the `/tmp` path in a container to |
646 | | /// this path in `entrypoint`. |
647 | | ActionDirectory, |
648 | | } |
649 | | |
650 | | #[derive(Deserialize, Serialize, Debug, Default)] |
651 | | #[serde(deny_unknown_fields)] |
652 | | pub struct UploadActionResultConfig { |
653 | | /// Underlying AC store that the worker will use to publish execution results |
654 | | /// into. Objects placed in this store should be reachable from the |
655 | | /// scheduler/client-cas after they have finished updating. |
656 | | /// Default: {No uploading is done} |
657 | | pub ac_store: Option<StoreRefName>, |
658 | | |
659 | | /// In which situations should the results be published to the `ac_store`, |
660 | | /// if set to `SuccessOnly` then only results with an exit code of 0 will be |
661 | | /// uploaded, if set to Everything all completed results will be uploaded. |
662 | | /// |
663 | | /// Default: `UploadCacheResultsStrategy::SuccessOnly` |
664 | | #[serde(default)] |
665 | | pub upload_ac_results_strategy: UploadCacheResultsStrategy, |
666 | | |
667 | | /// Store to upload historical results to. This should be a CAS store if set. |
668 | | /// |
669 | | /// Default: {CAS store of parent} |
670 | | pub historical_results_store: Option<StoreRefName>, |
671 | | |
672 | | /// In which situations should the results be published to the historical CAS. |
673 | | /// The historical CAS is where failures are published. These messages conform |
674 | | /// to the CAS key-value lookup format and are always a `HistoricalExecuteResponse` |
675 | | /// serialized message. |
676 | | /// |
677 | | /// Default: `UploadCacheResultsStrategy::FailuresOnly` |
678 | | #[serde(default)] |
679 | | pub upload_historical_results_strategy: Option<UploadCacheResultsStrategy>, |
680 | | |
681 | | /// Template to use for the `ExecuteResponse.message` property. This message |
682 | | /// is attached to the response before it is sent to the client. The following |
683 | | /// special variables are supported: |
684 | | /// - `digest_function`: Digest function used to calculate the action digest. |
685 | | /// - `action_digest_hash`: Action digest hash. |
686 | | /// - `action_digest_size`: Action digest size. |
687 | | /// - `historical_results_hash`: `HistoricalExecuteResponse` digest hash. |
688 | | /// - `historical_results_size`: `HistoricalExecuteResponse` digest size. |
689 | | /// |
690 | | /// A common use case of this is to provide a link to the web page that |
691 | | /// contains more useful information for the user. |
692 | | /// |
693 | | /// An example that is fully compatible with `bb_browser` is: |
694 | | /// <https://example.com/my-instance-name-here/blobs/{digest_function}/action/{action_digest_hash}-{action_digest_size}/> |
695 | | /// |
696 | | /// Default: "" (no message) |
697 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
698 | | pub success_message_template: String, |
699 | | |
700 | | /// Same as `success_message_template` but for failure case. |
701 | | /// |
702 | | /// An example that is fully compatible with `bb_browser` is: |
703 | | /// <https://example.com/my-instance-name-here/blobs/{digest_function}/historical_execute_response/{historical_results_hash}-{historical_results_size}/> |
704 | | /// |
705 | | /// Default: "" (no message) |
706 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
707 | | pub failure_message_template: String, |
708 | | } |
709 | | |
710 | | #[derive(Deserialize, Serialize, Debug, Default)] |
711 | | #[serde(deny_unknown_fields)] |
712 | | pub struct LocalWorkerConfig { |
713 | | /// Name of the worker. This is give a more friendly name to a worker for logging |
714 | | /// and metric publishing. This is also the prefix of the worker id |
715 | | /// (ie: "{name}{uuidv6}"). |
716 | | /// Default: {Index position in the workers list} |
717 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
718 | | pub name: String, |
719 | | |
720 | | /// Endpoint which the worker will connect to the scheduler's `WorkerApiService`. |
721 | | pub worker_api_endpoint: EndpointConfig, |
722 | | |
723 | | /// The maximum time an action is allowed to run. If a task requests for a timeout |
724 | | /// longer than this time limit, the task will be rejected. Value in seconds. |
725 | | /// |
726 | | /// Default: 1200 (seconds / 20 mins) |
727 | | #[serde(default, deserialize_with = "convert_duration_with_shellexpand")] |
728 | | pub max_action_timeout: usize, |
729 | | |
730 | | /// If timeout is handled in `entrypoint` or another wrapper script. |
731 | | /// If set to true `NativeLink` will not honor the timeout the action requested |
732 | | /// and instead will always force kill the action after `max_action_timeout` |
733 | | /// has been reached. If this is set to false, the smaller value of the action's |
734 | | /// timeout and `max_action_timeout` will be used to which `NativeLink` will kill |
735 | | /// the action. |
736 | | /// |
737 | | /// The real timeout can be received via an environment variable set in: |
738 | | /// `EnvironmentSource::TimeoutMillis`. |
739 | | /// |
740 | | /// Example on where this is useful: `entrypoint` launches the action inside |
741 | | /// a docker container, but the docker container may need to be downloaded. Thus |
742 | | /// the timer should not start until the docker container has started executing |
743 | | /// the action. In this case, action will likely be wrapped in another program, |
744 | | /// like `timeout` and propagate timeouts via `EnvironmentSource::SideChannelFile`. |
745 | | /// |
746 | | /// Default: false (`NativeLink` fully handles timeouts) |
747 | | #[serde(default)] |
748 | | pub timeout_handled_externally: bool, |
749 | | |
750 | | /// The command to execute on every execution request. This will be parsed as |
751 | | /// a command + arguments (not shell). |
752 | | /// Example: "run.sh" and a job with command: "sleep 5" will result in a |
753 | | /// command like: "run.sh sleep 5". |
754 | | /// Default: {Use the command from the job request}. |
755 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
756 | | pub entrypoint: String, |
757 | | |
758 | | /// An optional script to run before every action is processed on the worker. |
759 | | /// The value should be the full path to the script to execute and will pause |
760 | | /// all actions on the worker if it returns an exit code other than 0. |
761 | | /// If not set, then the worker will never pause and will continue to accept |
762 | | /// jobs according to the scheduler configuration. |
763 | | /// This is useful, for example, if the worker should not take any more |
764 | | /// actions until there is enough resource available on the machine to |
765 | | /// handle them. |
766 | | pub experimental_precondition_script: Option<String>, |
767 | | |
768 | | /// Underlying CAS store that the worker will use to download CAS artifacts. |
769 | | /// This store must be a `FastSlowStore`. The `fast` store must be a |
770 | | /// `FileSystemStore` because it will use hardlinks when building out the files |
771 | | /// instead of copying the files. The slow store must eventually resolve to the |
772 | | /// same store the scheduler/client uses to send job requests. |
773 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
774 | | pub cas_fast_slow_store: StoreRefName, |
775 | | |
776 | | /// Configuration for uploading action results. |
777 | | #[serde(default)] |
778 | | pub upload_action_result: UploadActionResultConfig, |
779 | | |
780 | | /// The directory work jobs will be executed from. This directory will be fully |
781 | | /// managed by the worker service and will be purged on startup. |
782 | | /// This directory and the directory referenced in `local_filesystem_store_ref`'s |
783 | | /// `stores::FilesystemStore::content_path` must be on the same filesystem. |
784 | | /// Hardlinks will be used when placing files that are accessible to the jobs |
785 | | /// that are sourced from `local_filesystem_store_ref`'s `content_path`. |
786 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
787 | | pub work_directory: String, |
788 | | |
789 | | /// Properties of this worker. This configuration will be sent to the scheduler |
790 | | /// and used to tell the scheduler to restrict what should be executed on this |
791 | | /// worker. |
792 | | pub platform_properties: HashMap<String, WorkerProperty>, |
793 | | |
794 | | /// An optional mapping of environment names to set for the execution |
795 | | /// as well as those specified in the action itself. If set, will set each |
796 | | /// key as an environment variable before executing the job with the value |
797 | | /// of the environment variable being the value of the property of the |
798 | | /// action being executed of that name or the fixed value. |
799 | | pub additional_environment: Option<HashMap<String, EnvironmentSource>>, |
800 | | } |
801 | | |
802 | | #[derive(Deserialize, Serialize, Debug)] |
803 | | #[serde(rename_all = "snake_case")] |
804 | | pub enum WorkerConfig { |
805 | | /// A worker type that executes jobs locally on this machine. |
806 | | Local(LocalWorkerConfig), |
807 | | } |
808 | | |
809 | | #[derive(Deserialize, Serialize, Debug, Clone, Copy)] |
810 | | #[serde(deny_unknown_fields)] |
811 | | pub struct GlobalConfig { |
812 | | /// Maximum number of open files that can be opened at one time. |
813 | | /// This value is not strictly enforced, it is a best effort. Some internal libraries |
814 | | /// open files or read metadata from a files which do not obey this limit, however |
815 | | /// the vast majority of cases will have this limit be honored. |
816 | | /// This value must be larger than `ulimit -n` to have any effect. |
817 | | /// Any network open file descriptors is not counted in this limit, but is counted |
818 | | /// in the kernel limit. It is a good idea to set a very large `ulimit -n`. |
819 | | /// Note: This value must be greater than 10. |
820 | | /// |
821 | | /// Default: 24576 (= 24 * 1024) |
822 | | #[serde(deserialize_with = "convert_numeric_with_shellexpand")] |
823 | | pub max_open_files: usize, |
824 | | |
825 | | /// Default hash function to use while uploading blobs to the CAS when not set |
826 | | /// by client. |
827 | | /// |
828 | | /// Default: `ConfigDigestHashFunction::sha256` |
829 | | pub default_digest_hash_function: Option<ConfigDigestHashFunction>, |
830 | | |
831 | | /// Default digest size to use for health check when running |
832 | | /// diagnostics checks. Health checks are expected to use this |
833 | | /// size for filling a buffer that is used for creation of |
834 | | /// digest. |
835 | | /// |
836 | | /// Default: 1024*1024 (1MiB) |
837 | | #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] |
838 | | pub default_digest_size_health_check: usize, |
839 | | } |
840 | | |
841 | | pub type StoreConfig = NamedConfig<StoreSpec>; |
842 | | pub type SchedulerConfig = NamedConfig<SchedulerSpec>; |
843 | | |
844 | | #[derive(Deserialize, Serialize, Debug)] |
845 | | #[serde(deny_unknown_fields)] |
846 | | pub struct CasConfig { |
847 | | /// List of stores available to use in this config. |
848 | | /// The keys can be used in other configs when needing to reference a store. |
849 | | pub stores: Vec<StoreConfig>, |
850 | | |
851 | | /// Worker configurations used to execute jobs. |
852 | | pub workers: Option<Vec<WorkerConfig>>, |
853 | | |
854 | | /// List of schedulers available to use in this config. |
855 | | /// The keys can be used in other configs when needing to reference a |
856 | | /// scheduler. |
857 | | pub schedulers: Option<Vec<SchedulerConfig>>, |
858 | | |
859 | | /// Servers to setup for this process. |
860 | | pub servers: Vec<ServerConfig>, |
861 | | |
862 | | /// Experimental - Origin events configuration. This is the service that will |
863 | | /// collect and publish nativelink events to a store for processing by an |
864 | | /// external service. |
865 | | pub experimental_origin_events: Option<OriginEventsSpec>, |
866 | | |
867 | | /// Any global configurations that apply to all modules live here. |
868 | | pub global: Option<GlobalConfig>, |
869 | | } |
870 | | |
871 | | impl CasConfig { |
872 | | /// # Errors |
873 | | /// |
874 | | /// Will return `Err` if we can't load the file. |
875 | 9 | pub fn try_from_json5_file(config_file: &str) -> Result<Self, Error> { |
876 | 9 | let json_contents = std::fs::read_to_string(config_file) |
877 | 9 | .err_tip(|| format!("Could not open config file {config_file}"0 ))?0 ; |
878 | 9 | Ok(serde_json5::from_str(&json_contents)?0 ) |
879 | 9 | } |
880 | | } |