/build/source/nativelink-config/src/cas_server.rs
Line | Count | Source |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use std::collections::HashMap; |
16 | | |
17 | | use serde::Deserialize; |
18 | | |
19 | | use crate::schedulers::SchedulerSpec; |
20 | | use crate::serde_utils::{ |
21 | | convert_data_size_with_shellexpand, convert_duration_with_shellexpand, |
22 | | convert_numeric_with_shellexpand, convert_optional_numeric_with_shellexpand, |
23 | | convert_optional_string_with_shellexpand, convert_string_with_shellexpand, |
24 | | convert_vec_string_with_shellexpand, |
25 | | }; |
26 | | use crate::stores::{ClientTlsConfig, ConfigDigestHashFunction, StoreRefName, StoreSpec}; |
27 | | |
28 | | /// Name of the scheduler. This type will be used when referencing a |
29 | | /// scheduler in the `CasConfig::schedulers`'s map key. |
30 | | pub type SchedulerRefName = String; |
31 | | |
32 | | /// Used when the config references `instance_name` in the protocol. |
33 | | pub type InstanceName = String; |
34 | | |
35 | | #[derive(Debug, Default, Clone, PartialEq, Eq, Deserialize)] |
36 | | pub struct WithInstanceName<T> { |
37 | | #[serde(default)] |
38 | | pub instance_name: InstanceName, |
39 | | #[serde(flatten)] |
40 | | pub config: T, |
41 | | } |
42 | | |
43 | | impl<T> core::ops::Deref for WithInstanceName<T> { |
44 | | type Target = T; |
45 | | |
46 | 16 | fn deref(&self) -> &Self::Target { |
47 | 16 | &self.config |
48 | 16 | } |
49 | | } |
50 | | |
51 | | #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] |
52 | | pub struct NamedConfig<Spec> { |
53 | | pub name: String, |
54 | | #[serde(flatten)] |
55 | | pub spec: Spec, |
56 | | } |
57 | | |
58 | | #[derive(Deserialize, Debug, Default, Clone, Copy)] |
59 | | #[serde(rename_all = "snake_case")] |
60 | | pub enum HttpCompressionAlgorithm { |
61 | | /// No compression. |
62 | | #[default] |
63 | | None, |
64 | | |
65 | | /// Zlib compression. |
66 | | Gzip, |
67 | | } |
68 | | |
69 | | /// Note: Compressing data in the cloud rarely has a benefit, since most |
70 | | /// cloud providers have very high bandwidth backplanes. However, for |
71 | | /// clients not inside the data center, it might be a good idea to |
72 | | /// compress data to and from the cloud. This will however come at a high |
73 | | /// CPU and performance cost. If you are making remote execution share the |
74 | | /// same CAS/AC servers as client's remote cache, you can create multiple |
75 | | /// services with different compression settings that are served on |
76 | | /// different ports. Then configure the non-cloud clients to use one port |
77 | | /// and cloud-clients to use another. |
78 | | #[derive(Deserialize, Debug, Default)] |
79 | | #[serde(deny_unknown_fields)] |
80 | | pub struct HttpCompressionConfig { |
81 | | /// The compression algorithm that the server will use when sending |
82 | | /// responses to clients. Enabling this will likely save a lot of |
83 | | /// data transfer, but will consume a lot of CPU and add a lot of |
84 | | /// latency. |
85 | | /// see: <https://github.com/tracemachina/nativelink/issues/109> |
86 | | /// |
87 | | /// Default: `HttpCompressionAlgorithm::None` |
88 | | pub send_compression_algorithm: Option<HttpCompressionAlgorithm>, |
89 | | |
90 | | /// The compression algorithm that the server will accept from clients. |
91 | | /// The server will broadcast the supported compression algorithms to |
92 | | /// clients and the client will choose which compression algorithm to |
93 | | /// use. Enabling this will likely save a lot of data transfer, but |
94 | | /// will consume a lot of CPU and add a lot of latency. |
95 | | /// see: <https://github.com/tracemachina/nativelink/issues/109> |
96 | | /// |
97 | | /// Default: {no supported compression} |
98 | | pub accepted_compression_algorithms: Vec<HttpCompressionAlgorithm>, |
99 | | } |
100 | | |
101 | | #[derive(Deserialize, Debug)] |
102 | | #[serde(deny_unknown_fields)] |
103 | | pub struct AcStoreConfig { |
104 | | /// The store name referenced in the `stores` map in the main config. |
105 | | /// This store name referenced here may be reused multiple times. |
106 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
107 | | pub ac_store: StoreRefName, |
108 | | |
109 | | /// Whether the Action Cache store may be written to, this if set to false |
110 | | /// it is only possible to read from the Action Cache. |
111 | | #[serde(default)] |
112 | | pub read_only: bool, |
113 | | } |
114 | | |
115 | | #[derive(Deserialize, Debug)] |
116 | | #[serde(deny_unknown_fields)] |
117 | | pub struct CasStoreConfig { |
118 | | /// The store name referenced in the `stores` map in the main config. |
119 | | /// This store name referenced here may be reused multiple times. |
120 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
121 | | pub cas_store: StoreRefName, |
122 | | } |
123 | | |
124 | | #[derive(Deserialize, Debug, Default)] |
125 | | #[serde(deny_unknown_fields)] |
126 | | pub struct CapabilitiesRemoteExecutionConfig { |
127 | | /// Scheduler used to configure the capabilities of remote execution. |
128 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
129 | | pub scheduler: SchedulerRefName, |
130 | | } |
131 | | |
132 | | #[derive(Deserialize, Debug, Default)] |
133 | | #[serde(deny_unknown_fields)] |
134 | | pub struct CapabilitiesConfig { |
135 | | /// Configuration for remote execution capabilities. |
136 | | /// If not set the capabilities service will inform the client that remote |
137 | | /// execution is not supported. |
138 | | pub remote_execution: Option<CapabilitiesRemoteExecutionConfig>, |
139 | | } |
140 | | |
141 | | #[derive(Deserialize, Debug)] |
142 | | #[serde(deny_unknown_fields)] |
143 | | pub struct ExecutionConfig { |
144 | | /// The store name referenced in the `stores` map in the main config. |
145 | | /// This store name referenced here may be reused multiple times. |
146 | | /// This value must be a CAS store reference. |
147 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
148 | | pub cas_store: StoreRefName, |
149 | | |
150 | | /// The scheduler name referenced in the `schedulers` map in the main config. |
151 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
152 | | pub scheduler: SchedulerRefName, |
153 | | } |
154 | | |
155 | | #[derive(Deserialize, Debug, Clone, Copy)] |
156 | | #[serde(deny_unknown_fields)] |
157 | | pub struct FetchConfig {} |
158 | | |
159 | | #[derive(Deserialize, Debug, Clone, Copy)] |
160 | | #[serde(deny_unknown_fields)] |
161 | | pub struct PushConfig {} |
162 | | |
163 | | #[derive(Deserialize, Debug, Default)] |
164 | | #[serde(deny_unknown_fields)] |
165 | | pub struct ByteStreamConfig { |
166 | | /// Name of the store in the "stores" configuration. |
167 | | pub cas_stores: HashMap<InstanceName, StoreRefName>, |
168 | | |
169 | | /// Max number of bytes to send on each grpc stream chunk. |
170 | | /// According to <https://github.com/grpc/grpc.github.io/issues/371> |
171 | | /// 16KiB - 64KiB is optimal. |
172 | | /// |
173 | | /// |
174 | | /// Default: 64KiB |
175 | | #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] |
176 | | pub max_bytes_per_stream: usize, |
177 | | |
178 | | /// Maximum number of bytes to decode on each grpc stream chunk. |
179 | | /// Default: 4 MiB |
180 | | #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] |
181 | | pub max_decoding_message_size: usize, |
182 | | |
183 | | /// In the event a client disconnects while uploading a blob, we will hold |
184 | | /// the internal stream open for this many seconds before closing it. |
185 | | /// This allows clients that disconnect to reconnect and continue uploading |
186 | | /// the same blob. |
187 | | /// |
188 | | /// Default: 10 (seconds) |
189 | | #[serde(default, deserialize_with = "convert_duration_with_shellexpand")] |
190 | | pub persist_stream_on_disconnect_timeout: usize, |
191 | | } |
192 | | |
193 | | #[derive(Deserialize, Debug)] |
194 | | #[serde(deny_unknown_fields)] |
195 | | pub struct WorkerApiConfig { |
196 | | /// The scheduler name referenced in the `schedulers` map in the main config. |
197 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
198 | | pub scheduler: SchedulerRefName, |
199 | | } |
200 | | |
201 | | #[derive(Deserialize, Debug, Default)] |
202 | | #[serde(deny_unknown_fields)] |
203 | | pub struct AdminConfig { |
204 | | /// Path to register the admin API. If path is "/admin", and your |
205 | | /// domain is "example.com", you can reach the endpoint with: |
206 | | /// <http://example.com/admin>. |
207 | | /// |
208 | | /// Default: "/admin" |
209 | | #[serde(default)] |
210 | | pub path: String, |
211 | | } |
212 | | |
213 | | #[derive(Deserialize, Debug, Default)] |
214 | | #[serde(deny_unknown_fields)] |
215 | | pub struct HealthConfig { |
216 | | /// Path to register the health status check. If path is "/status", and your |
217 | | /// domain is "example.com", you can reach the endpoint with: |
218 | | /// <http://example.com/status>. |
219 | | /// |
220 | | /// Default: "/status" |
221 | | #[serde(default)] |
222 | | pub path: String, |
223 | | } |
224 | | |
225 | | #[derive(Deserialize, Debug)] |
226 | | pub struct BepConfig { |
227 | | /// The store to publish build events to. |
228 | | /// The store name referenced in the `stores` map in the main config. |
229 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
230 | | pub store: StoreRefName, |
231 | | } |
232 | | |
233 | | #[derive(Deserialize, Clone, Debug, Default)] |
234 | | pub struct IdentityHeaderSpec { |
235 | | /// The name of the header to look for the identity in. |
236 | | /// Default: "x-identity" |
237 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
238 | | pub header_name: Option<String>, |
239 | | |
240 | | /// If the header is required to be set or fail the request. |
241 | | #[serde(default)] |
242 | | pub required: bool, |
243 | | } |
244 | | |
245 | | #[derive(Deserialize, Clone, Debug)] |
246 | | pub struct OriginEventsPublisherSpec { |
247 | | /// The store to publish nativelink events to. |
248 | | /// The store name referenced in the `stores` map in the main config. |
249 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
250 | | pub store: StoreRefName, |
251 | | } |
252 | | |
253 | | #[derive(Deserialize, Clone, Debug)] |
254 | | pub struct OriginEventsSpec { |
255 | | /// The publisher configuration for origin events. |
256 | | pub publisher: OriginEventsPublisherSpec, |
257 | | |
258 | | /// The maximum number of events to queue before applying back pressure. |
259 | | /// IMPORTANT: Backpressure causes all clients to slow down significantly. |
260 | | /// Zero is default. |
261 | | /// |
262 | | /// Default: 65536 (zero defaults to this) |
263 | | #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")] |
264 | | pub max_event_queue_size: usize, |
265 | | } |
266 | | |
267 | | #[derive(Deserialize, Debug)] |
268 | | #[serde(deny_unknown_fields)] |
269 | | pub struct ServicesConfig { |
270 | | /// The Content Addressable Storage (CAS) backend config. |
271 | | /// The key is the `instance_name` used in the protocol and the |
272 | | /// value is the underlying CAS store config. |
273 | | #[serde( |
274 | | default, |
275 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
276 | | )] |
277 | | pub cas: Option<Vec<WithInstanceName<CasStoreConfig>>>, |
278 | | |
279 | | /// The Action Cache (AC) backend config. |
280 | | /// The key is the `instance_name` used in the protocol and the |
281 | | /// value is the underlying AC store config. |
282 | | #[serde( |
283 | | default, |
284 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
285 | | )] |
286 | | pub ac: Option<Vec<WithInstanceName<AcStoreConfig>>>, |
287 | | |
288 | | /// Capabilities service is required in order to use most of the |
289 | | /// bazel protocol. This service is used to provide the supported |
290 | | /// features and versions of this bazel GRPC service. |
291 | | #[serde( |
292 | | default, |
293 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
294 | | )] |
295 | | pub capabilities: Option<Vec<WithInstanceName<CapabilitiesConfig>>>, |
296 | | |
297 | | /// The remote execution service configuration. |
298 | | /// NOTE: This service is under development and is currently just a |
299 | | /// place holder. |
300 | | #[serde( |
301 | | default, |
302 | | deserialize_with = "super::backcompat::opt_vec_with_instance_name" |
303 | | )] |
304 | | pub execution: Option<Vec<WithInstanceName<ExecutionConfig>>>, |
305 | | |
306 | | /// This is the service used to stream data to and from the CAS. |
307 | | /// Bazel's protocol strongly encourages users to use this streaming |
308 | | /// interface to interact with the CAS when the data is large. |
309 | | pub bytestream: Option<ByteStreamConfig>, |
310 | | |
311 | | /// These two are collectively the Remote Asset protocol, but it's |
312 | | /// defined as two separate services |
313 | | pub fetch: Option<FetchConfig>, |
314 | | pub push: Option<PushConfig>, |
315 | | |
316 | | /// This is the service used for workers to connect and communicate |
317 | | /// through. |
318 | | /// NOTE: This service should be served on a different, non-public port. |
319 | | /// In other words, `worker_api` configuration should not have any other |
320 | | /// services that are served on the same port. Doing so is a security |
321 | | /// risk, as workers have a different permission set than a client |
322 | | /// that makes the remote execution/cache requests. |
323 | | pub worker_api: Option<WorkerApiConfig>, |
324 | | |
325 | | /// Experimental - Build Event Protocol (BEP) configuration. This is |
326 | | /// the service that will consume build events from the client and |
327 | | /// publish them to a store for processing by an external service. |
328 | | pub experimental_bep: Option<BepConfig>, |
329 | | |
330 | | /// This is the service for any administrative tasks. |
331 | | /// It provides a REST API endpoint for administrative purposes. |
332 | | pub admin: Option<AdminConfig>, |
333 | | |
334 | | /// This is the service for health status check. |
335 | | pub health: Option<HealthConfig>, |
336 | | } |
337 | | |
338 | | #[derive(Deserialize, Debug)] |
339 | | #[serde(deny_unknown_fields)] |
340 | | pub struct TlsConfig { |
341 | | /// Path to the certificate file. |
342 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
343 | | pub cert_file: String, |
344 | | |
345 | | /// Path to the private key file. |
346 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
347 | | pub key_file: String, |
348 | | |
349 | | /// Path to the certificate authority for mTLS, if client authentication is |
350 | | /// required for this endpoint. |
351 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
352 | | pub client_ca_file: Option<String>, |
353 | | |
354 | | /// Path to the certificate revocation list for mTLS, if client |
355 | | /// authentication is required for this endpoint. |
356 | | #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")] |
357 | | pub client_crl_file: Option<String>, |
358 | | } |
359 | | |
360 | | /// Advanced Http configurations. These are generally should not be set. |
361 | | /// For documentation on what each of these do, see the hyper documentation: |
362 | | /// See: <https://docs.rs/hyper/latest/hyper/server/conn/struct.Http.html> |
363 | | /// |
364 | | /// Note: All of these default to hyper's default values unless otherwise |
365 | | /// specified. |
366 | | #[derive(Deserialize, Debug, Default, Clone, Copy)] |
367 | | #[serde(deny_unknown_fields)] |
368 | | pub struct HttpServerConfig { |
369 | | /// Interval to send keep-alive pings via HTTP2. |
370 | | /// Note: This is in seconds. |
371 | | #[serde( |
372 | | default, |
373 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
374 | | )] |
375 | | pub http2_keep_alive_interval: Option<u32>, |
376 | | |
377 | | #[serde( |
378 | | default, |
379 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
380 | | )] |
381 | | pub experimental_http2_max_pending_accept_reset_streams: Option<u32>, |
382 | | |
383 | | #[serde( |
384 | | default, |
385 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
386 | | )] |
387 | | pub experimental_http2_initial_stream_window_size: Option<u32>, |
388 | | |
389 | | #[serde( |
390 | | default, |
391 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
392 | | )] |
393 | | pub experimental_http2_initial_connection_window_size: Option<u32>, |
394 | | |
395 | | #[serde(default)] |
396 | | pub experimental_http2_adaptive_window: Option<bool>, |
397 | | |
398 | | #[serde( |
399 | | default, |
400 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
401 | | )] |
402 | | pub experimental_http2_max_frame_size: Option<u32>, |
403 | | |
404 | | #[serde( |
405 | | default, |
406 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
407 | | )] |
408 | | pub experimental_http2_max_concurrent_streams: Option<u32>, |
409 | | |
410 | | /// Note: This is in seconds. |
411 | | #[serde( |
412 | | default, |
413 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
414 | | )] |
415 | | pub experimental_http2_keep_alive_timeout: Option<u32>, |
416 | | |
417 | | #[serde( |
418 | | default, |
419 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
420 | | )] |
421 | | pub experimental_http2_max_send_buf_size: Option<u32>, |
422 | | |
423 | | #[serde(default)] |
424 | | pub experimental_http2_enable_connect_protocol: Option<bool>, |
425 | | |
426 | | #[serde( |
427 | | default, |
428 | | deserialize_with = "convert_optional_numeric_with_shellexpand" |
429 | | )] |
430 | | pub experimental_http2_max_header_list_size: Option<u32>, |
431 | | } |
432 | | |
433 | | #[derive(Deserialize, Debug)] |
434 | | #[serde(rename_all = "snake_case")] |
435 | | pub enum ListenerConfig { |
436 | | /// Listener for HTTP/HTTPS/HTTP2 sockets. |
437 | | Http(HttpListener), |
438 | | } |
439 | | |
440 | | #[derive(Deserialize, Debug)] |
441 | | #[serde(deny_unknown_fields)] |
442 | | pub struct HttpListener { |
443 | | /// Address to listen on. Example: `127.0.0.1:8080` or `:8080` to listen |
444 | | /// to all IPs. |
445 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
446 | | pub socket_address: String, |
447 | | |
448 | | /// Data transport compression configuration to use for this service. |
449 | | #[serde(default)] |
450 | | pub compression: HttpCompressionConfig, |
451 | | |
452 | | /// Advanced Http server configuration. |
453 | | #[serde(default)] |
454 | | pub advanced_http: HttpServerConfig, |
455 | | |
456 | | /// Tls Configuration for this server. |
457 | | /// If not set, the server will not use TLS. |
458 | | /// |
459 | | /// Default: None |
460 | | #[serde(default)] |
461 | | pub tls: Option<TlsConfig>, |
462 | | } |
463 | | |
464 | | #[derive(Deserialize, Debug)] |
465 | | #[serde(deny_unknown_fields)] |
466 | | pub struct ServerConfig { |
467 | | /// Name of the server. This is used to help identify the service |
468 | | /// for telemetry and logs. |
469 | | /// |
470 | | /// Default: {index of server in config} |
471 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
472 | | pub name: String, |
473 | | |
474 | | /// Configuration |
475 | | pub listener: ListenerConfig, |
476 | | |
477 | | /// Services to attach to server. |
478 | | pub services: Option<ServicesConfig>, |
479 | | |
480 | | /// The config related to identifying the client. |
481 | | /// Default: {see `IdentityHeaderSpec`} |
482 | | #[serde(default)] |
483 | | pub experimental_identity_header: IdentityHeaderSpec, |
484 | | } |
485 | | |
486 | | #[derive(Deserialize, Debug)] |
487 | | #[serde(rename_all = "snake_case")] |
488 | | pub enum WorkerProperty { |
489 | | /// List of static values. |
490 | | /// Note: Generally there should only ever be 1 value, but if the platform |
491 | | /// property key is `PropertyType::Priority` it may have more than one value. |
492 | | #[serde(deserialize_with = "convert_vec_string_with_shellexpand")] |
493 | | Values(Vec<String>), |
494 | | |
495 | | /// A dynamic configuration. The string will be executed as a command |
496 | | /// (not sell) and will be split by "\n" (new line character). |
497 | | QueryCmd(String), |
498 | | } |
499 | | |
500 | | /// Generic config for an endpoint and associated configs. |
501 | | #[derive(Deserialize, Debug, Default)] |
502 | | #[serde(deny_unknown_fields)] |
503 | | pub struct EndpointConfig { |
504 | | /// URI of the endpoint. |
505 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
506 | | pub uri: String, |
507 | | |
508 | | /// Timeout in seconds that a request should take. |
509 | | /// Default: 5 (seconds) |
510 | | pub timeout: Option<f32>, |
511 | | |
512 | | /// The TLS configuration to use to connect to the endpoint. |
513 | | pub tls_config: Option<ClientTlsConfig>, |
514 | | } |
515 | | |
516 | | #[derive(Copy, Clone, Deserialize, Debug, Default)] |
517 | | #[serde(rename_all = "snake_case")] |
518 | | pub enum UploadCacheResultsStrategy { |
519 | | /// Only upload action results with an exit code of 0. |
520 | | #[default] |
521 | | SuccessOnly, |
522 | | |
523 | | /// Don't upload any action results. |
524 | | Never, |
525 | | |
526 | | /// Upload all action results that complete. |
527 | | Everything, |
528 | | |
529 | | /// Only upload action results that fail. |
530 | | FailuresOnly, |
531 | | } |
532 | | |
533 | | #[derive(Clone, Deserialize, Debug)] |
534 | | #[serde(rename_all = "snake_case")] |
535 | | pub enum EnvironmentSource { |
536 | | /// The name of the platform property in the action to get the value from. |
537 | | Property(String), |
538 | | |
539 | | /// The raw value to set. |
540 | | Value(#[serde(deserialize_with = "convert_string_with_shellexpand")] String), |
541 | | |
542 | | /// The max amount of time in milliseconds the command is allowed to run |
543 | | /// (requested by the client). |
544 | | TimeoutMillis, |
545 | | |
546 | | /// A special file path will be provided that can be used to communicate |
547 | | /// with the parent process about out-of-band information. This file |
548 | | /// will be read after the command has finished executing. Based on the |
549 | | /// contents of the file, the behavior of the result may be modified. |
550 | | /// |
551 | | /// The format of the file contents should be json with the following |
552 | | /// schema: |
553 | | /// { |
554 | | /// // If set the command will be considered a failure. |
555 | | /// // May be one of the following static strings: |
556 | | /// // "timeout": Will Consider this task to be a timeout. |
557 | | /// "failure": "timeout", |
558 | | /// } |
559 | | /// |
560 | | /// All fields are optional, file does not need to be created and may be |
561 | | /// empty. |
562 | | SideChannelFile, |
563 | | |
564 | | /// A "root" directory for the action. This directory can be used to |
565 | | /// store temporary files that are not needed after the action has |
566 | | /// completed. This directory will be purged after the action has |
567 | | /// completed. |
568 | | /// |
569 | | /// For example: |
570 | | /// If an action writes temporary data to a path but nativelink should |
571 | | /// clean up this path after the job has executed, you may create any |
572 | | /// directory under the path provided in this variable. A common pattern |
573 | | /// would be to use `entrypoint` to set a shell script that reads this |
574 | | /// variable, `mkdir $ENV_VAR_NAME/tmp` and `export TMPDIR=$ENV_VAR_NAME/tmp`. |
575 | | /// Another example might be to bind-mount the `/tmp` path in a container to |
576 | | /// this path in `entrypoint`. |
577 | | ActionDirectory, |
578 | | } |
579 | | |
580 | | #[derive(Deserialize, Debug, Default)] |
581 | | #[serde(deny_unknown_fields)] |
582 | | pub struct UploadActionResultConfig { |
583 | | /// Underlying AC store that the worker will use to publish execution results |
584 | | /// into. Objects placed in this store should be reachable from the |
585 | | /// scheduler/client-cas after they have finished updating. |
586 | | /// Default: {No uploading is done} |
587 | | pub ac_store: Option<StoreRefName>, |
588 | | |
589 | | /// In which situations should the results be published to the `ac_store`, |
590 | | /// if set to `SuccessOnly` then only results with an exit code of 0 will be |
591 | | /// uploaded, if set to Everything all completed results will be uploaded. |
592 | | /// |
593 | | /// Default: `UploadCacheResultsStrategy::SuccessOnly` |
594 | | #[serde(default)] |
595 | | pub upload_ac_results_strategy: UploadCacheResultsStrategy, |
596 | | |
597 | | /// Store to upload historical results to. This should be a CAS store if set. |
598 | | /// |
599 | | /// Default: {CAS store of parent} |
600 | | pub historical_results_store: Option<StoreRefName>, |
601 | | |
602 | | /// In which situations should the results be published to the historical CAS. |
603 | | /// The historical CAS is where failures are published. These messages conform |
604 | | /// to the CAS key-value lookup format and are always a `HistoricalExecuteResponse` |
605 | | /// serialized message. |
606 | | /// |
607 | | /// Default: `UploadCacheResultsStrategy::FailuresOnly` |
608 | | #[serde(default)] |
609 | | pub upload_historical_results_strategy: Option<UploadCacheResultsStrategy>, |
610 | | |
611 | | /// Template to use for the `ExecuteResponse.message` property. This message |
612 | | /// is attached to the response before it is sent to the client. The following |
613 | | /// special variables are supported: |
614 | | /// - `digest_function`: Digest function used to calculate the action digest. |
615 | | /// - `action_digest_hash`: Action digest hash. |
616 | | /// - `action_digest_size`: Action digest size. |
617 | | /// - `historical_results_hash`: `HistoricalExecuteResponse` digest hash. |
618 | | /// - `historical_results_size`: `HistoricalExecuteResponse` digest size. |
619 | | /// |
620 | | /// A common use case of this is to provide a link to the web page that |
621 | | /// contains more useful information for the user. |
622 | | /// |
623 | | /// An example that is fully compatible with `bb_browser` is: |
624 | | /// <https://example.com/my-instance-name-here/blobs/{digest_function}/action/{action_digest_hash}-{action_digest_size}/> |
625 | | /// |
626 | | /// Default: "" (no message) |
627 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
628 | | pub success_message_template: String, |
629 | | |
630 | | /// Same as `success_message_template` but for failure case. |
631 | | /// |
632 | | /// An example that is fully compatible with `bb_browser` is: |
633 | | /// <https://example.com/my-instance-name-here/blobs/{digest_function}/historical_execute_response/{historical_results_hash}-{historical_results_size}/> |
634 | | /// |
635 | | /// Default: "" (no message) |
636 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
637 | | pub failure_message_template: String, |
638 | | } |
639 | | |
640 | | #[derive(Deserialize, Debug, Default)] |
641 | | #[serde(deny_unknown_fields)] |
642 | | pub struct LocalWorkerConfig { |
643 | | /// Name of the worker. This is give a more friendly name to a worker for logging |
644 | | /// and metric publishing. This is also the prefix of the worker id |
645 | | /// (ie: "{name}{uuidv6}"). |
646 | | /// Default: {Index position in the workers list} |
647 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
648 | | pub name: String, |
649 | | |
650 | | /// Endpoint which the worker will connect to the scheduler's `WorkerApiService`. |
651 | | pub worker_api_endpoint: EndpointConfig, |
652 | | |
653 | | /// The maximum time an action is allowed to run. If a task requests for a timeout |
654 | | /// longer than this time limit, the task will be rejected. Value in seconds. |
655 | | /// |
656 | | /// Default: 1200 (seconds / 20 mins) |
657 | | #[serde(default, deserialize_with = "convert_duration_with_shellexpand")] |
658 | | pub max_action_timeout: usize, |
659 | | |
660 | | /// If timeout is handled in `entrypoint` or another wrapper script. |
661 | | /// If set to true `NativeLink` will not honor the timeout the action requested |
662 | | /// and instead will always force kill the action after `max_action_timeout` |
663 | | /// has been reached. If this is set to false, the smaller value of the action's |
664 | | /// timeout and `max_action_timeout` will be used to which `NativeLink` will kill |
665 | | /// the action. |
666 | | /// |
667 | | /// The real timeout can be received via an environment variable set in: |
668 | | /// `EnvironmentSource::TimeoutMillis`. |
669 | | /// |
670 | | /// Example on where this is useful: `entrypoint` launches the action inside |
671 | | /// a docker container, but the docker container may need to be downloaded. Thus |
672 | | /// the timer should not start until the docker container has started executing |
673 | | /// the action. In this case, action will likely be wrapped in another program, |
674 | | /// like `timeout` and propagate timeouts via `EnvironmentSource::SideChannelFile`. |
675 | | /// |
676 | | /// Default: false (`NativeLink` fully handles timeouts) |
677 | | #[serde(default)] |
678 | | pub timeout_handled_externally: bool, |
679 | | |
680 | | /// The command to execute on every execution request. This will be parsed as |
681 | | /// a command + arguments (not shell). |
682 | | /// Example: "run.sh" and a job with command: "sleep 5" will result in a |
683 | | /// command like: "run.sh sleep 5". |
684 | | /// Default: {Use the command from the job request}. |
685 | | #[serde(default, deserialize_with = "convert_string_with_shellexpand")] |
686 | | pub entrypoint: String, |
687 | | |
688 | | /// An optional script to run before every action is processed on the worker. |
689 | | /// The value should be the full path to the script to execute and will pause |
690 | | /// all actions on the worker if it returns an exit code other than 0. |
691 | | /// If not set, then the worker will never pause and will continue to accept |
692 | | /// jobs according to the scheduler configuration. |
693 | | /// This is useful, for example, if the worker should not take any more |
694 | | /// actions until there is enough resource available on the machine to |
695 | | /// handle them. |
696 | | pub experimental_precondition_script: Option<String>, |
697 | | |
698 | | /// Underlying CAS store that the worker will use to download CAS artifacts. |
699 | | /// This store must be a `FastSlowStore`. The `fast` store must be a |
700 | | /// `FileSystemStore` because it will use hardlinks when building out the files |
701 | | /// instead of copying the files. The slow store must eventually resolve to the |
702 | | /// same store the scheduler/client uses to send job requests. |
703 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
704 | | pub cas_fast_slow_store: StoreRefName, |
705 | | |
706 | | /// Configuration for uploading action results. |
707 | | #[serde(default)] |
708 | | pub upload_action_result: UploadActionResultConfig, |
709 | | |
710 | | /// The directory work jobs will be executed from. This directory will be fully |
711 | | /// managed by the worker service and will be purged on startup. |
712 | | /// This directory and the directory referenced in `local_filesystem_store_ref`'s |
713 | | /// `stores::FilesystemStore::content_path` must be on the same filesystem. |
714 | | /// Hardlinks will be used when placing files that are accessible to the jobs |
715 | | /// that are sourced from `local_filesystem_store_ref`'s `content_path`. |
716 | | #[serde(deserialize_with = "convert_string_with_shellexpand")] |
717 | | pub work_directory: String, |
718 | | |
719 | | /// Properties of this worker. This configuration will be sent to the scheduler |
720 | | /// and used to tell the scheduler to restrict what should be executed on this |
721 | | /// worker. |
722 | | pub platform_properties: HashMap<String, WorkerProperty>, |
723 | | |
724 | | /// An optional mapping of environment names to set for the execution |
725 | | /// as well as those specified in the action itself. If set, will set each |
726 | | /// key as an environment variable before executing the job with the value |
727 | | /// of the environment variable being the value of the property of the |
728 | | /// action being executed of that name or the fixed value. |
729 | | pub additional_environment: Option<HashMap<String, EnvironmentSource>>, |
730 | | } |
731 | | |
732 | | #[derive(Deserialize, Debug)] |
733 | | #[serde(rename_all = "snake_case")] |
734 | | pub enum WorkerConfig { |
735 | | /// A worker type that executes jobs locally on this machine. |
736 | | Local(LocalWorkerConfig), |
737 | | } |
738 | | |
739 | | #[derive(Deserialize, Debug, Clone, Copy)] |
740 | | #[serde(deny_unknown_fields)] |
741 | | pub struct GlobalConfig { |
742 | | /// Maximum number of open files that can be opened at one time. |
743 | | /// This value is not strictly enforced, it is a best effort. Some internal libraries |
744 | | /// open files or read metadata from a files which do not obey this limit, however |
745 | | /// the vast majority of cases will have this limit be honored. |
746 | | /// This value must be larger than `ulimit -n` to have any effect. |
747 | | /// Any network open file descriptors is not counted in this limit, but is counted |
748 | | /// in the kernel limit. It is a good idea to set a very large `ulimit -n`. |
749 | | /// Note: This value must be greater than 10. |
750 | | /// |
751 | | /// Default: 24576 (= 24 * 1024) |
752 | | #[serde(deserialize_with = "convert_numeric_with_shellexpand")] |
753 | | pub max_open_files: usize, |
754 | | |
755 | | /// Default hash function to use while uploading blobs to the CAS when not set |
756 | | /// by client. |
757 | | /// |
758 | | /// Default: `ConfigDigestHashFunction::sha256` |
759 | | pub default_digest_hash_function: Option<ConfigDigestHashFunction>, |
760 | | |
761 | | /// Default digest size to use for health check when running |
762 | | /// diagnostics checks. Health checks are expected to use this |
763 | | /// size for filling a buffer that is used for creation of |
764 | | /// digest. |
765 | | /// |
766 | | /// Default: 1024*1024 (1MiB) |
767 | | #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")] |
768 | | pub default_digest_size_health_check: usize, |
769 | | } |
770 | | |
771 | | pub type StoreConfig = NamedConfig<StoreSpec>; |
772 | | pub type SchedulerConfig = NamedConfig<SchedulerSpec>; |
773 | | |
774 | | #[derive(Deserialize, Debug)] |
775 | | #[serde(deny_unknown_fields)] |
776 | | pub struct CasConfig { |
777 | | /// List of stores available to use in this config. |
778 | | /// The keys can be used in other configs when needing to reference a store. |
779 | | pub stores: Vec<StoreConfig>, |
780 | | |
781 | | /// Worker configurations used to execute jobs. |
782 | | pub workers: Option<Vec<WorkerConfig>>, |
783 | | |
784 | | /// List of schedulers available to use in this config. |
785 | | /// The keys can be used in other configs when needing to reference a |
786 | | /// scheduler. |
787 | | pub schedulers: Option<Vec<SchedulerConfig>>, |
788 | | |
789 | | /// Servers to setup for this process. |
790 | | pub servers: Vec<ServerConfig>, |
791 | | |
792 | | /// Experimental - Origin events configuration. This is the service that will |
793 | | /// collect and publish nativelink events to a store for processing by an |
794 | | /// external service. |
795 | | pub experimental_origin_events: Option<OriginEventsSpec>, |
796 | | |
797 | | /// Any global configurations that apply to all modules live here. |
798 | | pub global: Option<GlobalConfig>, |
799 | | } |