/build/source/nativelink-config/src/cas_server.rs
Line | Count | Source |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use std::collections::HashMap; |
16 | | |
17 | | use nativelink_error::{Error, ResultExt}; |
18 | | use serde::Deserialize; |
19 | | |
20 | | use crate::schedulers::SchedulerSpec; |
21 | | use crate::serde_utils::{ |
22 | | convert_data_size_with_shellexpand, convert_duration_with_shellexpand, |
23 | | convert_numeric_with_shellexpand, convert_optional_numeric_with_shellexpand, |
24 | | convert_optional_string_with_shellexpand, convert_string_with_shellexpand, |
25 | | convert_vec_string_with_shellexpand, |
26 | | }; |
27 | | use crate::stores::{ClientTlsConfig, ConfigDigestHashFunction, StoreRefName, StoreSpec}; |
28 | | |
29 | | /// Name of the scheduler. This type will be used when referencing a |
30 | | /// scheduler in the `CasConfig::schedulers`'s map key. |
31 | | pub type SchedulerRefName = String; |
32 | | |
33 | | /// Used when the config references `instance_name` in the protocol. |
34 | | pub type InstanceName = String; |
35 | | |
36 | | #[derive(Debug, Default, Clone, PartialEq, Eq, Deserialize)] |
37 | | pub struct WithInstanceName<T> { |
38 | | #[serde(default)] |
39 | | pub instance_name: InstanceName, |
40 | | #[serde(flatten)] |
41 | | pub config: T, |
42 | | } |
43 | | |
44 | | impl<T> core::ops::Deref for WithInstanceName<T> { |
45 | | type Target = T; |
46 | | |
47 | 21 | fn deref(&self) -> &Self::Target { |
48 | 21 | &self.config |
49 | 21 | } |
50 | | } |
51 | | |
52 | | #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] |
53 | | pub struct NamedConfig<Spec> { |
54 | | pub name: String, |
55 | | #[serde(flatten)] |
56 | | pub spec: Spec, |
57 | | } |
58 | | |
59 | | #[derive(Deserialize, Debug, Default, Clone, Copy)] |
60 | | #[serde(rename_all = "snake_case")] |
61 | | pub enum HttpCompressionAlgorithm { |
62 | | /// No compression. |
63 | | #[default] |
64 | | None, |
65 | | |
66 | | /// Zlib compression. |
67 | | Gzip, |
68 | | } |
69 | | |
70 | | /// Note: Compressing data in the cloud rarely has a benefit, since most |
71 | | /// cloud providers have very high bandwidth backplanes. However, for |
72 | | /// clients not inside the data center, it might be a good idea to |
73 | | /// compress data to and from the cloud. This will however come at a high |
74 | | /// CPU and performance cost. If you are making remote execution share the |
75 | | /// same CAS/AC servers as client's remote cache, you can create multiple |
76 | | /// services with different compression settings that are served on |
77 | | /// different ports. Then configure the non-cloud clients to use one port |
78 | | /// and cloud-clients to use another. |
79 | | #[derive(Deserialize, Debug, Default)] |
80 | | #[serde(deny_unknown_fields)] |
81 | | pub struct HttpCompressionConfig { |
82 | | /// The compression algorithm that the server will use when sending |
83 | | /// responses to clients. Enabling this will likely save a lot of |
84 | | /// data transfer, but will consume a lot of CPU and add a lot of |
85 | | /// latency. |
86 | | /// see: <https://github.com/tracemachina/nativelink/issues/109> |
87 | | /// |
88 | | /// Default: `HttpCompressionAlgorithm::None` |
89 | | pub send_compression_algorithm: Option<HttpCompressionAlgorithm>, |
90 | | |
91 | | /// The compression algorithm that the server will accept from clients. |
92 | | /// The server will broadcast the supported compression algorithms to |
93 | | /// clients and the client will choose which compression algorithm to |
94 | | /// use. Enabling this will likely save a lot of data transfer, but |
95 | | /// will consume a lot of CPU and add a lot of latency. |
96 | | /// see: <https://github.com/tracemachina/nativelink/issues/109> |
97 | | /// |
98 | | /// Default: {no supported compression} |
99 | | pub accepted_compression_algorithms: Vec<HttpCompressionAlgorithm>, |
100 | | } |
101 | | |
102 | | #[derive(Deserialize, Debug)] |
103 | | #[serde(deny_unknown_fields)] |
104 | | pub struct AcStoreConfig { |
105 | | /// The store name referenced in the `stores` map in the main config. |
106 | | /// This store name referenced here may be reused multiple times. |
107 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
108 | | pub ac_store: StoreRefName, |
109 | | |
110 | | /// Whether the Action Cache store may be written to, this if set to false |
111 | | /// it is only possible to read from the Action Cache. |
112 | | #[serde(default)] |
113 | | pub read_only: bool, |
114 | | } |
115 | | |
116 | | #[derive(Deserialize, Debug)] |
117 | | #[serde(deny_unknown_fields)] |
118 | | pub struct CasStoreConfig { |
119 | | /// The store name referenced in the `stores` map in the main config. |
120 | | /// This store name referenced here may be reused multiple times. |
121 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
122 | | pub cas_store: StoreRefName, |
123 | | } |
124 | | |
125 | | #[derive(Deserialize, Debug, Default)] |
126 | | #[serde(deny_unknown_fields)] |
127 | | pub struct CapabilitiesRemoteExecutionConfig { |
128 | | /// Scheduler used to configure the capabilities of remote execution. |
129 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
130 | | pub scheduler: SchedulerRefName, |
131 | | } |
132 | | |
133 | | #[derive(Deserialize, Debug, Default)] |
134 | | #[serde(deny_unknown_fields)] |
135 | | pub struct CapabilitiesConfig { |
136 | | /// Configuration for remote execution capabilities. |
137 | | /// If not set the capabilities service will inform the client that remote |
138 | | /// execution is not supported. |
139 | | pub remote_execution: Option<CapabilitiesRemoteExecutionConfig>, |
140 | | } |
141 | | |
142 | | #[derive(Deserialize, Debug)] |
143 | | #[serde(deny_unknown_fields)] |
144 | | pub struct ExecutionConfig { |
145 | | /// The store name referenced in the `stores` map in the main config. |
146 | | /// This store name referenced here may be reused multiple times. |
147 | | /// This value must be a CAS store reference. |
148 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
149 | | pub cas_store: StoreRefName, |
150 | | |
151 | | /// The scheduler name referenced in the `schedulers` map in the main config. |
152 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
153 | | pub scheduler: SchedulerRefName, |
154 | | } |
155 | | |
156 | | #[derive(Deserialize, Debug, Clone)] |
157 | | #[serde(deny_unknown_fields)] |
158 | | pub struct FetchConfig { |
159 | | /// The store name referenced in the `stores` map in the main config. |
160 | | /// This store name referenced here may be reused multiple times. |
161 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
162 | | pub fetch_store: StoreRefName, |
163 | | } |
164 | | |
165 | | #[derive(Deserialize, Debug, Clone)] |
166 | | #[serde(deny_unknown_fields)] |
167 | | pub struct PushConfig { |
168 | | /// The store name referenced in the `stores` map in the main config. |
169 | | /// This store name referenced here may be reused multiple times. |
170 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
171 | | pub push_store: StoreRefName, |
172 | | |
173 | | /// Whether the Action Cache store may be written to, this if set to false |
174 | | /// it is only possible to read from the Action Cache. |
175 | | #[serde(default)] |
176 | | pub read_only: bool, |
177 | | } |
178 | | |
179 | | #[derive(Deserialize, Debug, Default)] |
180 | | #[serde(deny_unknown_fields)] |
181 | | pub struct ByteStreamConfig { |
182 | | /// Name of the store in the "stores" configuration. |
183 | | pub cas_stores: HashMap<InstanceName, StoreRefName>, |
184 | | |
185 | | /// Max number of bytes to send on each grpc stream chunk. |
186 | | /// According to <https://github.com/grpc/grpc.github.io/issues/371> |
187 | | /// 16KiB - 64KiB is optimal. |
188 | | /// |
189 | | /// |
190 | | /// Default: 64KiB |
191 | | #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] |
192 | | pub max_bytes_per_stream: usize, |
193 | | |
194 | | /// Maximum number of bytes to decode on each grpc stream chunk. |
195 | | /// Default: 4 MiB |
196 | | #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] |
197 | | pub max_decoding_message_size: usize, |
198 | | |
199 | | /// In the event a client disconnects while uploading a blob, we will hold |
200 | | /// the internal stream open for this many seconds before closing it. |
201 | | /// This allows clients that disconnect to reconnect and continue uploading |
202 | | /// the same blob. |
203 | | /// |
204 | | /// Default: 10 (seconds) |
205 | | #[serde(default, deserialize_with = "convert_duration_with_shellexpand")] |
206 | | pub persist_stream_on_disconnect_timeout: usize, |
207 | | } |
208 | | |
209 | | #[derive(Deserialize, Debug)] |
210 | | #[serde(deny_unknown_fields)] |
211 | | pub struct WorkerApiConfig { |
212 | | /// The scheduler name referenced in the `schedulers` map in the main config. |
213 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
214 | | pub scheduler: SchedulerRefName, |
215 | | } |
216 | | |
217 | | #[derive(Deserialize, Debug, Default)] |
218 | | #[serde(deny_unknown_fields)] |
219 | | pub struct AdminConfig { |
220 | | /// Path to register the admin API. If path is "/admin", and your |
221 | | /// domain is "example.com", you can reach the endpoint with: |
222 | | /// <http://example.com/admin>. |
223 | | /// |
224 | | /// Default: "/admin" |
225 | | #[serde(default)] |
226 | | pub path: String, |
227 | | } |
228 | | |
229 | | #[derive(Deserialize, Debug, Default)] |
230 | | #[serde(deny_unknown_fields)] |
231 | | pub struct HealthConfig { |
232 | | /// Path to register the health status check. If path is "/status", and your |
233 | | /// domain is "example.com", you can reach the endpoint with: |
234 | | /// <http://example.com/status>. |
235 | | /// |
236 | | /// Default: "/status" |
237 | | #[serde(default)] |
238 | | pub path: String, |
239 | | } |
240 | | |
241 | | #[derive(Deserialize, Debug)] |
242 | | pub struct BepConfig { |
243 | | /// The store to publish build events to. |
244 | | /// The store name referenced in the `stores` map in the main config. |
245 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
246 | | pub store: StoreRefName, |
247 | | } |
248 | | |
249 | | #[derive(Deserialize, Clone, Debug, Default)] |
250 | | pub struct IdentityHeaderSpec { |
251 | | /// The name of the header to look for the identity in. |
252 | | /// Default: "x-identity" |
253 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
254 | | pub header_name: Option<String>, |
255 | | |
256 | | /// If the header is required to be set or fail the request. |
257 | | #[serde(default)] |
258 | | pub required: bool, |
259 | | } |
260 | | |
261 | | #[derive(Deserialize, Clone, Debug)] |
262 | | pub struct OriginEventsPublisherSpec { |
263 | | /// The store to publish nativelink events to. |
264 | | /// The store name referenced in the `stores` map in the main config. |
265 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
266 | | pub store: StoreRefName, |
267 | | } |
268 | | |
269 | | #[derive(Deserialize, Clone, Debug)] |
270 | | pub struct OriginEventsSpec { |
271 | | /// The publisher configuration for origin events. |
272 | | pub publisher: OriginEventsPublisherSpec, |
273 | | |
274 | | /// The maximum number of events to queue before applying back pressure. |
275 | | /// IMPORTANT: Backpressure causes all clients to slow down significantly. |
276 | | /// Zero is default. |
277 | | /// |
278 | | /// Default: 65536 (zero defaults to this) |
279 | | #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")] |
280 | | pub max_event_queue_size: usize, |
281 | | } |
282 | | |
283 | | #[derive(Deserialize, Debug)] |
284 | | #[serde(deny_unknown_fields)] |
285 | | pub struct ServicesConfig { |
286 | | /// The Content Addressable Storage (CAS) backend config. |
287 | | /// The key is the `instance_name` used in the protocol and the |
288 | | /// value is the underlying CAS store config. |
289 | | #[serde( |
290 | | default, |
291 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
292 | | )] |
293 | | pub cas: Option<Vec<WithInstanceName<CasStoreConfig>>>, |
294 | | |
295 | | /// The Action Cache (AC) backend config. |
296 | | /// The key is the `instance_name` used in the protocol and the |
297 | | /// value is the underlying AC store config. |
298 | | #[serde( |
299 | | default, |
300 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
301 | | )] |
302 | | pub ac: Option<Vec<WithInstanceName<AcStoreConfig>>>, |
303 | | |
304 | | /// Capabilities service is required in order to use most of the |
305 | | /// bazel protocol. This service is used to provide the supported |
306 | | /// features and versions of this bazel GRPC service. |
307 | | #[serde( |
308 | | default, |
309 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
310 | | )] |
311 | | pub capabilities: Option<Vec<WithInstanceName<CapabilitiesConfig>>>, |
312 | | |
313 | | /// The remote execution service configuration. |
314 | | /// NOTE: This service is under development and is currently just a |
315 | | /// place holder. |
316 | | #[serde( |
317 | | default, |
318 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
319 | | )] |
320 | | pub execution: Option<Vec<WithInstanceName<ExecutionConfig>>>, |
321 | | |
322 | | /// This is the service used to stream data to and from the CAS. |
323 | | /// Bazel's protocol strongly encourages users to use this streaming |
324 | | /// interface to interact with the CAS when the data is large. |
325 | | pub bytestream: Option<ByteStreamConfig>, |
326 | | |
327 | | /// These two are collectively the Remote Asset protocol, but it's |
328 | | /// defined as two separate services |
329 | | #[serde( |
330 | | default, |
331 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
332 | | )] |
333 | | pub fetch: Option<Vec<WithInstanceName<FetchConfig>>>, |
334 | | |
335 | | #[serde( |
336 | | default, |
337 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
338 | | )] |
339 | | pub push: Option<Vec<WithInstanceName<PushConfig>>>, |
340 | | |
341 | | /// This is the service used for workers to connect and communicate |
342 | | /// through. |
343 | | /// NOTE: This service should be served on a different, non-public port. |
344 | | /// In other words, `worker_api` configuration should not have any other |
345 | | /// services that are served on the same port. Doing so is a security |
346 | | /// risk, as workers have a different permission set than a client |
347 | | /// that makes the remote execution/cache requests. |
348 | | pub worker_api: Option<WorkerApiConfig>, |
349 | | |
350 | | /// Experimental - Build Event Protocol (BEP) configuration. This is |
351 | | /// the service that will consume build events from the client and |
352 | | /// publish them to a store for processing by an external service. |
353 | | pub experimental_bep: Option<BepConfig>, |
354 | | |
355 | | /// This is the service for any administrative tasks. |
356 | | /// It provides a REST API endpoint for administrative purposes. |
357 | | pub admin: Option<AdminConfig>, |
358 | | |
359 | | /// This is the service for health status check. |
360 | | pub health: Option<HealthConfig>, |
361 | | } |
362 | | |
363 | | #[derive(Deserialize, Debug)] |
364 | | #[serde(deny_unknown_fields)] |
365 | | pub struct TlsConfig { |
366 | | /// Path to the certificate file. |
367 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
368 | | pub cert_file: String, |
369 | | |
370 | | /// Path to the private key file. |
371 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
372 | | pub key_file: String, |
373 | | |
374 | | /// Path to the certificate authority for mTLS, if client authentication is |
375 | | /// required for this endpoint. |
376 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
377 | | pub client_ca_file: Option<String>, |
378 | | |
379 | | /// Path to the certificate revocation list for mTLS, if client |
380 | | /// authentication is required for this endpoint. |
381 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
382 | | pub client_crl_file: Option<String>, |
383 | | } |
384 | | |
385 | | /// Advanced Http configurations. These are generally should not be set. |
386 | | /// For documentation on what each of these do, see the hyper documentation: |
387 | | /// See: <https://docs.rs/hyper/latest/hyper/server/conn/struct.Http.html> |
388 | | /// |
389 | | /// Note: All of these default to hyper's default values unless otherwise |
390 | | /// specified. |
391 | | #[derive(Deserialize, Debug, Default, Clone, Copy)] |
392 | | #[serde(deny_unknown_fields)] |
393 | | pub struct HttpServerConfig { |
394 | | /// Interval to send keep-alive pings via HTTP2. |
395 | | /// Note: This is in seconds. |
396 | | #[serde( |
397 | | default, |
398 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
399 | | )] |
400 | | pub http2_keep_alive_interval: Option<u32>, |
401 | | |
402 | | #[serde( |
403 | | default, |
404 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
405 | | )] |
406 | | pub experimental_http2_max_pending_accept_reset_streams: Option<u32>, |
407 | | |
408 | | #[serde( |
409 | | default, |
410 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
411 | | )] |
412 | | pub experimental_http2_initial_stream_window_size: Option<u32>, |
413 | | |
414 | | #[serde( |
415 | | default, |
416 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
417 | | )] |
418 | | pub experimental_http2_initial_connection_window_size: Option<u32>, |
419 | | |
420 | | #[serde(default)] |
421 | | pub experimental_http2_adaptive_window: Option<bool>, |
422 | | |
423 | | #[serde( |
424 | | default, |
425 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
426 | | )] |
427 | | pub experimental_http2_max_frame_size: Option<u32>, |
428 | | |
429 | | #[serde( |
430 | | default, |
431 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
432 | | )] |
433 | | pub experimental_http2_max_concurrent_streams: Option<u32>, |
434 | | |
435 | | /// Note: This is in seconds. |
436 | | #[serde( |
437 | | default, |
438 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
439 | | )] |
440 | | pub experimental_http2_keep_alive_timeout: Option<u32>, |
441 | | |
442 | | #[serde( |
443 | | default, |
444 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
445 | | )] |
446 | | pub experimental_http2_max_send_buf_size: Option<u32>, |
447 | | |
448 | | #[serde(default)] |
449 | | pub experimental_http2_enable_connect_protocol: Option<bool>, |
450 | | |
451 | | #[serde( |
452 | | default, |
453 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
454 | | )] |
455 | | pub experimental_http2_max_header_list_size: Option<u32>, |
456 | | } |
457 | | |
458 | | #[derive(Deserialize, Debug)] |
459 | | #[serde(rename_all = "snake_case")] |
460 | | pub enum ListenerConfig { |
461 | | /// Listener for HTTP/HTTPS/HTTP2 sockets. |
462 | | Http(HttpListener), |
463 | | } |
464 | | |
465 | | #[derive(Deserialize, Debug)] |
466 | | #[serde(deny_unknown_fields)] |
467 | | pub struct HttpListener { |
468 | | /// Address to listen on. Example: `127.0.0.1:8080` or `:8080` to listen |
469 | | /// to all IPs. |
470 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
471 | | pub socket_address: String, |
472 | | |
473 | | /// Data transport compression configuration to use for this service. |
474 | | #[serde(default)] |
475 | | pub compression: HttpCompressionConfig, |
476 | | |
477 | | /// Advanced Http server configuration. |
478 | | #[serde(default)] |
479 | | pub advanced_http: HttpServerConfig, |
480 | | |
481 | | /// Tls Configuration for this server. |
482 | | /// If not set, the server will not use TLS. |
483 | | /// |
484 | | /// Default: None |
485 | | #[serde(default)] |
486 | | pub tls: Option<TlsConfig>, |
487 | | } |
488 | | |
489 | | #[derive(Deserialize, Debug)] |
490 | | #[serde(deny_unknown_fields)] |
491 | | pub struct ServerConfig { |
492 | | /// Name of the server. This is used to help identify the service |
493 | | /// for telemetry and logs. |
494 | | /// |
495 | | /// Default: {index of server in config} |
496 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
497 | | pub name: String, |
498 | | |
499 | | /// Configuration |
500 | | pub listener: ListenerConfig, |
501 | | |
502 | | /// Services to attach to server. |
503 | | pub services: Option<ServicesConfig>, |
504 | | |
505 | | /// The config related to identifying the client. |
506 | | /// Default: {see `IdentityHeaderSpec`} |
507 | | #[serde(default)] |
508 | | pub experimental_identity_header: IdentityHeaderSpec, |
509 | | } |
510 | | |
511 | | #[derive(Deserialize, Debug)] |
512 | | #[serde(rename_all = "snake_case")] |
513 | | pub enum WorkerProperty { |
514 | | /// List of static values. |
515 | | /// Note: Generally there should only ever be 1 value, but if the platform |
516 | | /// property key is `PropertyType::Priority` it may have more than one value. |
517 | | #[serde(deserialize_with = "convert_vec_string_with_shellexpand")] |
518 | | Values(Vec<String>), |
519 | | |
520 | | /// A dynamic configuration. The string will be executed as a command |
521 | | /// (not sell) and will be split by "\n" (new line character). |
522 | | QueryCmd(String), |
523 | | } |
524 | | |
525 | | /// Generic config for an endpoint and associated configs. |
526 | | #[derive(Deserialize, Debug, Default)] |
527 | | #[serde(deny_unknown_fields)] |
528 | | pub struct EndpointConfig { |
529 | | /// URI of the endpoint. |
530 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
531 | | pub uri: String, |
532 | | |
533 | | /// Timeout in seconds that a request should take. |
534 | | /// Default: 5 (seconds) |
535 | | pub timeout: Option<f32>, |
536 | | |
537 | | /// The TLS configuration to use to connect to the endpoint. |
538 | | pub tls_config: Option<ClientTlsConfig>, |
539 | | } |
540 | | |
541 | | #[derive(Copy, Clone, Deserialize, Debug, Default)] |
542 | | #[serde(rename_all = "snake_case")] |
543 | | pub enum UploadCacheResultsStrategy { |
544 | | /// Only upload action results with an exit code of 0. |
545 | | #[default] |
546 | | SuccessOnly, |
547 | | |
548 | | /// Don't upload any action results. |
549 | | Never, |
550 | | |
551 | | /// Upload all action results that complete. |
552 | | Everything, |
553 | | |
554 | | /// Only upload action results that fail. |
555 | | FailuresOnly, |
556 | | } |
557 | | |
558 | | #[derive(Clone, Deserialize, Debug)] |
559 | | #[serde(rename_all = "snake_case")] |
560 | | pub enum EnvironmentSource { |
561 | | /// The name of the platform property in the action to get the value from. |
562 | | Property(String), |
563 | | |
564 | | /// The raw value to set. |
565 | | Value(#[serde(deserialize_with = "convert_string_with_shellexpand")] String), |
566 | | |
567 | | /// The max amount of time in milliseconds the command is allowed to run |
568 | | /// (requested by the client). |
569 | | TimeoutMillis, |
570 | | |
571 | | /// A special file path will be provided that can be used to communicate |
572 | | /// with the parent process about out-of-band information. This file |
573 | | /// will be read after the command has finished executing. Based on the |
574 | | /// contents of the file, the behavior of the result may be modified. |
575 | | /// |
576 | | /// The format of the file contents should be json with the following |
577 | | /// schema: |
578 | | /// { |
579 | | /// // If set the command will be considered a failure. |
580 | | /// // May be one of the following static strings: |
581 | | /// // "timeout": Will Consider this task to be a timeout. |
582 | | /// "failure": "timeout", |
583 | | /// } |
584 | | /// |
585 | | /// All fields are optional, file does not need to be created and may be |
586 | | /// empty. |
587 | | SideChannelFile, |
588 | | |
589 | | /// A "root" directory for the action. This directory can be used to |
590 | | /// store temporary files that are not needed after the action has |
591 | | /// completed. This directory will be purged after the action has |
592 | | /// completed. |
593 | | /// |
594 | | /// For example: |
595 | | /// If an action writes temporary data to a path but nativelink should |
596 | | /// clean up this path after the job has executed, you may create any |
597 | | /// directory under the path provided in this variable. A common pattern |
598 | | /// would be to use `entrypoint` to set a shell script that reads this |
599 | | /// variable, `mkdir $ENV_VAR_NAME/tmp` and `export TMPDIR=$ENV_VAR_NAME/tmp`. |
600 | | /// Another example might be to bind-mount the `/tmp` path in a container to |
601 | | /// this path in `entrypoint`. |
602 | | ActionDirectory, |
603 | | } |
604 | | |
605 | | #[derive(Deserialize, Debug, Default)] |
606 | | #[serde(deny_unknown_fields)] |
607 | | pub struct UploadActionResultConfig { |
608 | | /// Underlying AC store that the worker will use to publish execution results |
609 | | /// into. Objects placed in this store should be reachable from the |
610 | | /// scheduler/client-cas after they have finished updating. |
611 | | /// Default: {No uploading is done} |
612 | | pub ac_store: Option<StoreRefName>, |
613 | | |
614 | | /// In which situations should the results be published to the `ac_store`, |
615 | | /// if set to `SuccessOnly` then only results with an exit code of 0 will be |
616 | | /// uploaded, if set to Everything all completed results will be uploaded. |
617 | | /// |
618 | | /// Default: `UploadCacheResultsStrategy::SuccessOnly` |
619 | | #[serde(default)] |
620 | | pub upload_ac_results_strategy: UploadCacheResultsStrategy, |
621 | | |
622 | | /// Store to upload historical results to. This should be a CAS store if set. |
623 | | /// |
624 | | /// Default: {CAS store of parent} |
625 | | pub historical_results_store: Option<StoreRefName>, |
626 | | |
627 | | /// In which situations should the results be published to the historical CAS. |
628 | | /// The historical CAS is where failures are published. These messages conform |
629 | | /// to the CAS key-value lookup format and are always a `HistoricalExecuteResponse` |
630 | | /// serialized message. |
631 | | /// |
632 | | /// Default: `UploadCacheResultsStrategy::FailuresOnly` |
633 | | #[serde(default)] |
634 | | pub upload_historical_results_strategy: Option<UploadCacheResultsStrategy>, |
635 | | |
636 | | /// Template to use for the `ExecuteResponse.message` property. This message |
637 | | /// is attached to the response before it is sent to the client. The following |
638 | | /// special variables are supported: |
639 | | /// - `digest_function`: Digest function used to calculate the action digest. |
640 | | /// - `action_digest_hash`: Action digest hash. |
641 | | /// - `action_digest_size`: Action digest size. |
642 | | /// - `historical_results_hash`: `HistoricalExecuteResponse` digest hash. |
643 | | /// - `historical_results_size`: `HistoricalExecuteResponse` digest size. |
644 | | /// |
645 | | /// A common use case of this is to provide a link to the web page that |
646 | | /// contains more useful information for the user. |
647 | | /// |
648 | | /// An example that is fully compatible with `bb_browser` is: |
649 | | /// <https://example.com/my-instance-name-here/blobs/{digest_function}/action/{action_digest_hash}-{action_digest_size}/> |
650 | | /// |
651 | | /// Default: "" (no message) |
652 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
653 | | pub success_message_template: String, |
654 | | |
655 | | /// Same as `success_message_template` but for failure case. |
656 | | /// |
657 | | /// An example that is fully compatible with `bb_browser` is: |
658 | | /// <https://example.com/my-instance-name-here/blobs/{digest_function}/historical_execute_response/{historical_results_hash}-{historical_results_size}/> |
659 | | /// |
660 | | /// Default: "" (no message) |
661 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
662 | | pub failure_message_template: String, |
663 | | } |
664 | | |
665 | | #[derive(Deserialize, Debug, Default)] |
666 | | #[serde(deny_unknown_fields)] |
667 | | pub struct LocalWorkerConfig { |
668 | | /// Name of the worker. This is give a more friendly name to a worker for logging |
669 | | /// and metric publishing. This is also the prefix of the worker id |
670 | | /// (ie: "{name}{uuidv6}"). |
671 | | /// Default: {Index position in the workers list} |
672 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
673 | | pub name: String, |
674 | | |
675 | | /// Endpoint which the worker will connect to the scheduler's `WorkerApiService`. |
676 | | pub worker_api_endpoint: EndpointConfig, |
677 | | |
678 | | /// The maximum time an action is allowed to run. If a task requests for a timeout |
679 | | /// longer than this time limit, the task will be rejected. Value in seconds. |
680 | | /// |
681 | | /// Default: 1200 (seconds / 20 mins) |
682 | | #[serde(default, deserialize_with = "convert_duration_with_shellexpand")] |
683 | | pub max_action_timeout: usize, |
684 | | |
685 | | /// If timeout is handled in `entrypoint` or another wrapper script. |
686 | | /// If set to true `NativeLink` will not honor the timeout the action requested |
687 | | /// and instead will always force kill the action after `max_action_timeout` |
688 | | /// has been reached. If this is set to false, the smaller value of the action's |
689 | | /// timeout and `max_action_timeout` will be used to which `NativeLink` will kill |
690 | | /// the action. |
691 | | /// |
692 | | /// The real timeout can be received via an environment variable set in: |
693 | | /// `EnvironmentSource::TimeoutMillis`. |
694 | | /// |
695 | | /// Example on where this is useful: `entrypoint` launches the action inside |
696 | | /// a docker container, but the docker container may need to be downloaded. Thus |
697 | | /// the timer should not start until the docker container has started executing |
698 | | /// the action. In this case, action will likely be wrapped in another program, |
699 | | /// like `timeout` and propagate timeouts via `EnvironmentSource::SideChannelFile`. |
700 | | /// |
701 | | /// Default: false (`NativeLink` fully handles timeouts) |
702 | | #[serde(default)] |
703 | | pub timeout_handled_externally: bool, |
704 | | |
705 | | /// The command to execute on every execution request. This will be parsed as |
706 | | /// a command + arguments (not shell). |
707 | | /// Example: "run.sh" and a job with command: "sleep 5" will result in a |
708 | | /// command like: "run.sh sleep 5". |
709 | | /// Default: {Use the command from the job request}. |
710 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
711 | | pub entrypoint: String, |
712 | | |
713 | | /// An optional script to run before every action is processed on the worker. |
714 | | /// The value should be the full path to the script to execute and will pause |
715 | | /// all actions on the worker if it returns an exit code other than 0. |
716 | | /// If not set, then the worker will never pause and will continue to accept |
717 | | /// jobs according to the scheduler configuration. |
718 | | /// This is useful, for example, if the worker should not take any more |
719 | | /// actions until there is enough resource available on the machine to |
720 | | /// handle them. |
721 | | pub experimental_precondition_script: Option<String>, |
722 | | |
723 | | /// Underlying CAS store that the worker will use to download CAS artifacts. |
724 | | /// This store must be a `FastSlowStore`. The `fast` store must be a |
725 | | /// `FileSystemStore` because it will use hardlinks when building out the files |
726 | | /// instead of copying the files. The slow store must eventually resolve to the |
727 | | /// same store the scheduler/client uses to send job requests. |
728 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
729 | | pub cas_fast_slow_store: StoreRefName, |
730 | | |
731 | | /// Configuration for uploading action results. |
732 | | #[serde(default)] |
733 | | pub upload_action_result: UploadActionResultConfig, |
734 | | |
735 | | /// The directory work jobs will be executed from. This directory will be fully |
736 | | /// managed by the worker service and will be purged on startup. |
737 | | /// This directory and the directory referenced in `local_filesystem_store_ref`'s |
738 | | /// `stores::FilesystemStore::content_path` must be on the same filesystem. |
739 | | /// Hardlinks will be used when placing files that are accessible to the jobs |
740 | | /// that are sourced from `local_filesystem_store_ref`'s `content_path`. |
741 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
742 | | pub work_directory: String, |
743 | | |
744 | | /// Properties of this worker. This configuration will be sent to the scheduler |
745 | | /// and used to tell the scheduler to restrict what should be executed on this |
746 | | /// worker. |
747 | | pub platform_properties: HashMap<String, WorkerProperty>, |
748 | | |
749 | | /// An optional mapping of environment names to set for the execution |
750 | | /// as well as those specified in the action itself. If set, will set each |
751 | | /// key as an environment variable before executing the job with the value |
752 | | /// of the environment variable being the value of the property of the |
753 | | /// action being executed of that name or the fixed value. |
754 | | pub additional_environment: Option<HashMap<String, EnvironmentSource>>, |
755 | | } |
756 | | |
757 | | #[derive(Deserialize, Debug)] |
758 | | #[serde(rename_all = "snake_case")] |
759 | | pub enum WorkerConfig { |
760 | | /// A worker type that executes jobs locally on this machine. |
761 | | Local(LocalWorkerConfig), |
762 | | } |
763 | | |
764 | | #[derive(Deserialize, Debug, Clone, Copy)] |
765 | | #[serde(deny_unknown_fields)] |
766 | | pub struct GlobalConfig { |
767 | | /// Maximum number of open files that can be opened at one time. |
768 | | /// This value is not strictly enforced, it is a best effort. Some internal libraries |
769 | | /// open files or read metadata from a files which do not obey this limit, however |
770 | | /// the vast majority of cases will have this limit be honored. |
771 | | /// This value must be larger than `ulimit -n` to have any effect. |
772 | | /// Any network open file descriptors is not counted in this limit, but is counted |
773 | | /// in the kernel limit. It is a good idea to set a very large `ulimit -n`. |
774 | | /// Note: This value must be greater than 10. |
775 | | /// |
776 | | /// Default: 24576 (= 24 * 1024) |
777 | | #[serde(deserialize_with = "convert_numeric_with_shellexpand")] |
778 | | pub max_open_files: usize, |
779 | | |
780 | | /// Default hash function to use while uploading blobs to the CAS when not set |
781 | | /// by client. |
782 | | /// |
783 | | /// Default: `ConfigDigestHashFunction::sha256` |
784 | | pub default_digest_hash_function: Option<ConfigDigestHashFunction>, |
785 | | |
786 | | /// Default digest size to use for health check when running |
787 | | /// diagnostics checks. Health checks are expected to use this |
788 | | /// size for filling a buffer that is used for creation of |
789 | | /// digest. |
790 | | /// |
791 | | /// Default: 1024*1024 (1MiB) |
792 | | #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] |
793 | | pub default_digest_size_health_check: usize, |
794 | | } |
795 | | |
796 | | pub type StoreConfig = NamedConfig<StoreSpec>; |
797 | | pub type SchedulerConfig = NamedConfig<SchedulerSpec>; |
798 | | |
799 | | #[derive(Deserialize, Debug)] |
800 | | #[serde(deny_unknown_fields)] |
801 | | pub struct CasConfig { |
802 | | /// List of stores available to use in this config. |
803 | | /// The keys can be used in other configs when needing to reference a store. |
804 | | pub stores: Vec<StoreConfig>, |
805 | | |
806 | | /// Worker configurations used to execute jobs. |
807 | | pub workers: Option<Vec<WorkerConfig>>, |
808 | | |
809 | | /// List of schedulers available to use in this config. |
810 | | /// The keys can be used in other configs when needing to reference a |
811 | | /// scheduler. |
812 | | pub schedulers: Option<Vec<SchedulerConfig>>, |
813 | | |
814 | | /// Servers to setup for this process. |
815 | | pub servers: Vec<ServerConfig>, |
816 | | |
817 | | /// Experimental - Origin events configuration. This is the service that will |
818 | | /// collect and publish nativelink events to a store for processing by an |
819 | | /// external service. |
820 | | pub experimental_origin_events: Option<OriginEventsSpec>, |
821 | | |
822 | | /// Any global configurations that apply to all modules live here. |
823 | | pub global: Option<GlobalConfig>, |
824 | | } |
825 | | |
826 | | impl CasConfig { |
827 | | /// # Errors |
828 | | /// |
829 | | /// Will return `Err` if we can't load the file. |
830 | 6 | pub fn try_from_json5_file(config_file: &str) -> Result<Self, Error> { |
831 | 6 | let json_contents = std::fs::read_to_string(config_file) |
832 | 6 | .err_tip(|| format!("Could not open config file {config_file}"0 ))?0 ; |
833 | 6 | Ok(serde_json5::from_str(&json_contents)?0 ) |
834 | 6 | } |
835 | | } |