Coverage Report

Created: 2025-07-10 19:59

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/build/source/nativelink-config/src/stores.rs
Line
Count
Source
1
// Copyright 2024 The NativeLink Authors. All rights reserved.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//    http://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
use serde::{Deserialize, Serialize};
16
17
use crate::serde_utils::{
18
    convert_data_size_with_shellexpand, convert_duration_with_shellexpand,
19
    convert_numeric_with_shellexpand, convert_optional_numeric_with_shellexpand,
20
    convert_optional_string_with_shellexpand, convert_string_with_shellexpand,
21
    convert_vec_string_with_shellexpand,
22
};
23
24
/// Name of the store. This type will be used when referencing a store
25
/// in the `CasConfig::stores`'s map key.
26
pub type StoreRefName = String;
27
28
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
29
#[serde(rename_all = "snake_case")]
30
pub enum ConfigDigestHashFunction {
31
    /// Use the sha256 hash function.
32
    /// <https://en.wikipedia.org/wiki/SHA-2>
33
    Sha256,
34
35
    /// Use the blake3 hash function.
36
    /// <https://en.wikipedia.org/wiki/BLAKE_(hash_function)>
37
    Blake3,
38
}
39
40
#[derive(Serialize, Deserialize, Debug, Clone)]
41
#[serde(rename_all = "snake_case")]
42
pub enum StoreSpec {
43
    /// Memory store will store all data in a hashmap in memory.
44
    ///
45
    /// **Example JSON Config:**
46
    /// ```json
47
    /// "memory": {
48
    ///     "eviction_policy": {
49
    ///       // 10mb.
50
    ///       "max_bytes": 10000000,
51
    ///     }
52
    ///   }
53
    /// }
54
    /// ```
55
    ///
56
    Memory(MemorySpec),
57
58
    /// A generic blob store that will store files on the cloud
59
    /// provider. This configuration will never delete files, so you are
60
    /// responsible for purging old files in other ways.
61
    /// It supports the following backends:
62
    ///
63
    /// 1. **Amazon S3:**
64
    ///    S3 store will use Amazon's S3 service as a backend to store
65
    ///    the files. This configuration can be used to share files
66
    ///    across multiple instances. Uses system certificates for TLS
67
    ///    verification via `rustls-platform-verifier`.
68
    ///
69
    ///   **Example JSON Config:**
70
    ///   ```json
71
    ///   "experimental_cloud_object_store": {
72
    ///     "provider": "aws",
73
    ///     "region": "eu-north-1",
74
    ///     "bucket": "crossplane-bucket-af79aeca9",
75
    ///     "key_prefix": "test-prefix-index/",
76
    ///     "retry": {
77
    ///       "max_retries": 6,
78
    ///       "delay": 0.3,
79
    ///       "jitter": 0.5
80
    ///     },
81
    ///     "multipart_max_concurrent_uploads": 10
82
    ///   }
83
    ///   ```
84
    ///
85
    /// 2. **Google Cloud Storage:**
86
    ///    GCS store uses Google's GCS service as a backend to store
87
    ///    the files. This configuration can be used to share files
88
    ///    across multiple instances.
89
    ///
90
    ///   **Example JSON Config:**
91
    ///   ```json
92
    ///   "experimental_cloud_object_store": {
93
    ///     "provider": "gcs",
94
    ///     "bucket": "test-bucket",
95
    ///     "key_prefix": "test-prefix-index/",
96
    ///     "retry": {
97
    ///       "max_retries": 6,
98
    ///       "delay": 0.3,
99
    ///       "jitter": 0.5
100
    ///     },
101
    ///     "multipart_max_concurrent_uploads": 10
102
    ///   }
103
    ///   ```
104
    ///
105
    ExperimentalCloudObjectStore(ExperimentalCloudObjectSpec),
106
107
    /// Verify store is used to apply verifications to an underlying
108
    /// store implementation. It is strongly encouraged to validate
109
    /// as much data as you can before accepting data from a client,
110
    /// failing to do so may cause the data in the store to be
111
    /// populated with invalid data causing all kinds of problems.
112
    ///
113
    /// The suggested configuration is to have the CAS validate the
114
    /// hash and size and the AC validate nothing.
115
    ///
116
    /// **Example JSON Config:**
117
    /// ```json
118
    /// "verify": {
119
    ///   "memory": {
120
    ///     "eviction_policy": {
121
    ///       "max_bytes": 500000000 // 500mb.
122
    ///     }
123
    ///   },
124
    ///   "verify_size": true,
125
    ///   "hash_verification_function": "sha256"
126
    /// }
127
    /// ```
128
    ///
129
    Verify(Box<VerifySpec>),
130
131
    /// Completeness checking store verifies if the
132
    /// output files & folders exist in the CAS before forwarding
133
    /// the request to the underlying store.
134
    /// Note: This store should only be used on AC stores.
135
    ///
136
    /// **Example JSON Config:**
137
    /// ```json
138
    /// "completeness_checking": {
139
    ///     "backend": {
140
    ///       "filesystem": {
141
    ///         "content_path": "~/.cache/nativelink/content_path-ac",
142
    ///         "temp_path": "~/.cache/nativelink/tmp_path-ac",
143
    ///         "eviction_policy": {
144
    ///           // 500mb.
145
    ///           "max_bytes": 500000000,
146
    ///         }
147
    ///       }
148
    ///     },
149
    ///     "cas_store": {
150
    ///       "ref_store": {
151
    ///         "name": "CAS_MAIN_STORE"
152
    ///       }
153
    ///     }
154
    ///   }
155
    /// ```
156
    ///
157
    CompletenessChecking(Box<CompletenessCheckingSpec>),
158
159
    /// A compression store that will compress the data inbound and
160
    /// outbound. There will be a non-trivial cost to compress and
161
    /// decompress the data, but in many cases if the final store is
162
    /// a store that requires network transport and/or storage space
163
    /// is a concern it is often faster and more efficient to use this
164
    /// store before those stores.
165
    ///
166
    /// **Example JSON Config:**
167
    /// ```json
168
    /// "compression": {
169
    ///     "compression_algorithm": {
170
    ///       "lz4": {}
171
    ///     },
172
    ///     "backend": {
173
    ///       "filesystem": {
174
    ///         "content_path": "/tmp/nativelink/data/content_path-cas",
175
    ///         "temp_path": "/tmp/nativelink/data/tmp_path-cas",
176
    ///         "eviction_policy": {
177
    ///           // 2gb.
178
    ///           "max_bytes": 2000000000,
179
    ///         }
180
    ///       }
181
    ///     }
182
    ///   }
183
    /// ```
184
    ///
185
    Compression(Box<CompressionSpec>),
186
187
    /// A dedup store will take the inputs and run a rolling hash
188
    /// algorithm on them to slice the input into smaller parts then
189
    /// run a sha256 algorithm on the slice and if the object doesn't
190
    /// already exist, upload the slice to the `content_store` using
191
    /// a new digest of just the slice. Once all parts exist, an
192
    /// Action-Cache-like digest will be built and uploaded to the
193
    /// `index_store` which will contain a reference to each
194
    /// chunk/digest of the uploaded file. Downloading a request will
195
    /// first grab the index from the `index_store`, and forward the
196
    /// download content of each chunk as if it were one file.
197
    ///
198
    /// This store is exceptionally good when the following conditions
199
    /// are met:
200
    /// * Content is mostly the same (inserts, updates, deletes are ok)
201
    /// * Content is not compressed or encrypted
202
    /// * Uploading or downloading from `content_store` is the bottleneck.
203
    ///
204
    /// Note: This store pairs well when used with `CompressionSpec` as
205
    /// the `content_store`, but never put `DedupSpec` as the backend of
206
    /// `CompressionSpec` as it will negate all the gains.
207
    ///
208
    /// Note: When running `.has()` on this store, it will only check
209
    /// to see if the entry exists in the `index_store` and not check
210
    /// if the individual chunks exist in the `content_store`.
211
    ///
212
    /// **Example JSON Config:**
213
    /// ```json
214
    /// "dedup": {
215
    ///     "index_store": {
216
    ///       "memory_store": {
217
    ///         "max_size": 1000000000, // 1GB
218
    ///         "eviction_policy": "LeastRecentlyUsed"
219
    ///       }
220
    ///     },
221
    ///     "content_store": {
222
    ///       "compression": {
223
    ///         "compression_algorithm": {
224
    ///           "lz4": {}
225
    ///         },
226
    ///         "backend": {
227
    ///           "fast_slow": {
228
    ///             "fast": {
229
    ///               "memory_store": {
230
    ///                 "max_size": 500000000, // 500MB
231
    ///                 "eviction_policy": "LeastRecentlyUsed"
232
    ///               }
233
    ///             },
234
    ///             "slow": {
235
    ///               "filesystem": {
236
    ///                 "content_path": "/tmp/nativelink/data/content_path-content",
237
    ///                 "temp_path": "/tmp/nativelink/data/tmp_path-content",
238
    ///                 "eviction_policy": {
239
    ///                   "max_bytes": 2000000000 // 2gb.
240
    ///                 }
241
    ///               }
242
    ///             }
243
    ///           }
244
    ///         }
245
    ///       }
246
    ///     }
247
    ///   }
248
    /// ```
249
    ///
250
    Dedup(Box<DedupSpec>),
251
252
    /// Existence store will wrap around another store and cache calls
253
    /// to has so that subsequent `has_with_results` calls will be
254
    /// faster. This is useful for cases when you have a store that
255
    /// is slow to respond to has calls.
256
    /// Note: This store should only be used on CAS stores.
257
    ///
258
    /// **Example JSON Config:**
259
    /// ```json
260
    /// "existence_cache": {
261
    ///     "backend": {
262
    ///       "memory": {
263
    ///         "eviction_policy": {
264
    ///           // 500mb.
265
    ///           "max_bytes": 500000000,
266
    ///         }
267
    ///       }
268
    ///     },
269
    ///     "cas_store": {
270
    ///       "ref_store": {
271
    ///         "name": "CAS_MAIN_STORE"
272
    ///       }
273
    ///     }
274
    ///   }
275
    /// ```
276
    ///
277
    ExistenceCache(Box<ExistenceCacheSpec>),
278
279
    /// `FastSlow` store will first try to fetch the data from the `fast`
280
    /// store and then if it does not exist try the `slow` store.
281
    /// When the object does exist in the `slow` store, it will copy
282
    /// the data to the `fast` store while returning the data.
283
    /// This store should be thought of as a store that "buffers"
284
    /// the data to the `fast` store.
285
    /// On uploads it will mirror data to both `fast` and `slow` stores.
286
    ///
287
    /// WARNING: If you need data to always exist in the `slow` store
288
    /// for something like remote execution, be careful because this
289
    /// store will never check to see if the objects exist in the
290
    /// `slow` store if it exists in the `fast` store (ie: it assumes
291
    /// that if an object exists `fast` store it will exist in `slow`
292
    /// store).
293
    ///
294
    /// ***Example JSON Config:***
295
    /// ```json
296
    /// "fast_slow": {
297
    ///     "fast": {
298
    ///       "filesystem": {
299
    ///         "content_path": "/tmp/nativelink/data/content_path-index",
300
    ///         "temp_path": "/tmp/nativelink/data/tmp_path-index",
301
    ///         "eviction_policy": {
302
    ///           // 500mb.
303
    ///           "max_bytes": 500000000,
304
    ///         }
305
    ///       }
306
    ///     },
307
    ///     "slow": {
308
    ///       "filesystem": {
309
    ///         "content_path": "/tmp/nativelink/data/content_path-index",
310
    ///         "temp_path": "/tmp/nativelink/data/tmp_path-index",
311
    ///         "eviction_policy": {
312
    ///           // 500mb.
313
    ///           "max_bytes": 500000000,
314
    ///         }
315
    ///       }
316
    ///     }
317
    ///   }
318
    /// ```
319
    ///
320
    FastSlow(Box<FastSlowSpec>),
321
322
    /// Shards the data to multiple stores. This is useful for cases
323
    /// when you want to distribute the load across multiple stores.
324
    /// The digest hash is used to determine which store to send the
325
    /// data to.
326
    ///
327
    /// **Example JSON Config:**
328
    /// ```json
329
    /// "shard": {
330
    ///     "stores": [
331
    ///         "memory": {
332
    ///             "eviction_policy": {
333
    ///                 // 10mb.
334
    ///                 "max_bytes": 10000000
335
    ///             },
336
    ///             "weight": 1
337
    ///         }
338
    ///     ]
339
    /// }
340
    /// ```
341
    ///
342
    Shard(ShardSpec),
343
344
    /// Stores the data on the filesystem. This store is designed for
345
    /// local persistent storage. Restarts of this program should restore
346
    /// the previous state, meaning anything uploaded will be persistent
347
    /// as long as the filesystem integrity holds.
348
    ///
349
    /// **Example JSON Config:**
350
    /// ```json
351
    /// "filesystem": {
352
    ///     "content_path": "/tmp/nativelink/data-worker-test/content_path-cas",
353
    ///     "temp_path": "/tmp/nativelink/data-worker-test/tmp_path-cas",
354
    ///     "eviction_policy": {
355
    ///       // 10gb.
356
    ///       "max_bytes": 10000000000,
357
    ///     }
358
    /// }
359
    /// ```
360
    ///
361
    Filesystem(FilesystemSpec),
362
363
    /// Store used to reference a store in the root store manager.
364
    /// This is useful for cases when you want to share a store in different
365
    /// nested stores. Example, you may want to share the same memory store
366
    /// used for the action cache, but use a `FastSlowSpec` and have the fast
367
    /// store also share the memory store for efficiency.
368
    ///
369
    /// **Example JSON Config:**
370
    /// ```json
371
    /// "ref_store": {
372
    ///     "name": "FS_CONTENT_STORE"
373
    /// }
374
    /// ```
375
    ///
376
    RefStore(RefSpec),
377
378
    /// Uses the size field of the digest to separate which store to send the
379
    /// data. This is useful for cases when you'd like to put small objects
380
    /// in one store and large objects in another store. This should only be
381
    /// used if the size field is the real size of the content, in other
382
    /// words, don't use on AC (Action Cache) stores. Any store where you can
383
    /// safely use `VerifySpec.verify_size = true`, this store should be safe
384
    /// to use (ie: CAS stores).
385
    ///
386
    /// **Example JSON Config:**
387
    /// ```json
388
    /// "size_partitioning": {
389
    ///     "size": 134217728, // 128mib.
390
    ///     "lower_store": {
391
    ///       "memory": {
392
    ///         "eviction_policy": {
393
    ///           "max_bytes": "${NATIVELINK_CAS_MEMORY_CONTENT_LIMIT:-100000000}"
394
    ///         }
395
    ///       }
396
    ///     },
397
    ///     "upper_store": {
398
    ///       /// This store discards data larger than 128mib.
399
    ///       "noop": {}
400
    ///     }
401
    ///   }
402
    /// ```
403
    ///
404
    SizePartitioning(Box<SizePartitioningSpec>),
405
406
    /// This store will pass-through calls to another GRPC store. This store
407
    /// is not designed to be used as a sub-store of another store, but it
408
    /// does satisfy the interface and will likely work.
409
    ///
410
    /// One major GOTCHA is that some stores use a special function on this
411
    /// store to get the size of the underlying object, which is only reliable
412
    /// when this store is serving the a CAS store, not an AC store. If using
413
    /// this store directly without being a child of any store there are no
414
    /// side effects and is the most efficient way to use it.
415
    ///
416
    /// **Example JSON Config:**
417
    /// ```json
418
    /// "grpc": {
419
    ///     "instance_name": "main",
420
    ///     "endpoints": [
421
    ///       {"address": "grpc://${CAS_ENDPOINT:-127.0.0.1}:50051"}
422
    ///     ],
423
    ///     "store_type": "ac"
424
    ///   }
425
    /// ```
426
    ///
427
    Grpc(GrpcSpec),
428
429
    /// Stores data in any stores compatible with Redis APIs.
430
    ///
431
    /// Pairs well with `SizePartitioning` and/or `FastSlow` stores.
432
    /// Ideal for accepting small object sizes as most redis store
433
    /// services have a max file upload of between 256Mb-512Mb.
434
    ///
435
    /// **Example JSON Config:**
436
    /// ```json
437
    /// "redis_store": {
438
    ///     "addresses": [
439
    ///         "redis://127.0.0.1:6379/",
440
    ///     ]
441
    /// }
442
    /// ```
443
    ///
444
    RedisStore(RedisSpec),
445
446
    /// Noop store is a store that sends streams into the void and all data
447
    /// retrieval will return 404 (`NotFound`). This can be useful for cases
448
    /// where you may need to partition your data and part of your data needs
449
    /// to be discarded.
450
    ///
451
    /// **Example JSON Config:**
452
    /// ```json
453
    /// "noop": {}
454
    /// ```
455
    ///
456
    Noop(NoopSpec),
457
458
    /// Experimental `MongoDB` store implementation.
459
    ///
460
    /// This store uses `MongoDB` as a backend for storing data. It supports
461
    /// both CAS (Content Addressable Storage) and scheduler data with
462
    /// optional change streams for real-time updates.
463
    ///
464
    /// **Example JSON Config:**
465
    /// ```json
466
    /// "experimental_mongo": {
467
    ///     "connection_string": "mongodb://localhost:27017",
468
    ///     "database": "nativelink",
469
    ///     "cas_collection": "cas",
470
    ///     "key_prefix": "cas:",
471
    ///     "read_chunk_size": 65536,
472
    ///     "max_concurrent_uploads": 10,
473
    ///     "enable_change_streams": false
474
    /// }
475
    /// ```
476
    ///
477
    ExperimentalMongo(ExperimentalMongoSpec),
478
}
479
480
/// Configuration for an individual shard of the store.
481
#[derive(Serialize, Deserialize, Debug, Clone)]
482
#[serde(deny_unknown_fields)]
483
pub struct ShardConfig {
484
    /// Store to shard the data to.
485
    pub store: StoreSpec,
486
487
    /// The weight of the store. This is used to determine how much data
488
    /// should be sent to the store. The actual percentage is the sum of
489
    /// all the store's weights divided by the individual store's weight.
490
    ///
491
    /// Default: 1
492
    pub weight: Option<u32>,
493
}
494
495
#[derive(Serialize, Deserialize, Debug, Clone)]
496
#[serde(deny_unknown_fields)]
497
pub struct ShardSpec {
498
    /// Stores to shard the data to.
499
    pub stores: Vec<ShardConfig>,
500
}
501
502
#[derive(Serialize, Deserialize, Debug, Clone)]
503
#[serde(deny_unknown_fields)]
504
pub struct SizePartitioningSpec {
505
    /// Size to partition the data on.
506
    #[serde(deserialize_with = "convert_data_size_with_shellexpand")]
507
    pub size: u64,
508
509
    /// Store to send data when object is < (less than) size.
510
    pub lower_store: StoreSpec,
511
512
    /// Store to send data when object is >= (less than eq) size.
513
    pub upper_store: StoreSpec,
514
}
515
516
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
517
#[serde(deny_unknown_fields)]
518
pub struct RefSpec {
519
    /// Name of the store under the root "stores" config object.
520
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
521
    pub name: String,
522
}
523
524
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
525
#[serde(deny_unknown_fields)]
526
pub struct FilesystemSpec {
527
    /// Path on the system where to store the actual content. This is where
528
    /// the bulk of the data will be placed.
529
    /// On service bootup this folder will be scanned and all files will be
530
    /// added to the cache. In the event one of the files doesn't match the
531
    /// criteria, the file will be deleted.
532
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
533
    pub content_path: String,
534
535
    /// A temporary location of where files that are being uploaded or
536
    /// deleted will be placed while the content cannot be guaranteed to be
537
    /// accurate. This location must be on the same block device as
538
    /// `content_path` so atomic moves can happen (ie: move without copy).
539
    /// All files in this folder will be deleted on every startup.
540
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
541
    pub temp_path: String,
542
543
    /// Buffer size to use when reading files. Generally this should be left
544
    /// to the default value except for testing.
545
    /// Default: 32k.
546
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
547
    pub read_buffer_size: u32,
548
549
    /// Policy used to evict items out of the store. Failure to set this
550
    /// value will cause items to never be removed from the store causing
551
    /// infinite memory usage.
552
    pub eviction_policy: Option<EvictionPolicy>,
553
554
    /// The block size of the filesystem for the running machine
555
    /// value is used to determine an entry's actual size on disk consumed
556
    /// For a 4KB block size filesystem, a 1B file actually consumes 4KB
557
    /// Default: 4096
558
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
559
    pub block_size: u64,
560
}
561
562
#[derive(Serialize, Deserialize, Debug, Clone)]
563
#[serde(deny_unknown_fields)]
564
pub struct FastSlowSpec {
565
    /// Fast store that will be attempted to be contacted before reaching
566
    /// out to the `slow` store.
567
    pub fast: StoreSpec,
568
569
    /// If the object does not exist in the `fast` store it will try to
570
    /// get it from this store.
571
    pub slow: StoreSpec,
572
}
573
574
#[derive(Serialize, Deserialize, Debug, Default, Clone, Copy)]
575
#[serde(deny_unknown_fields)]
576
pub struct MemorySpec {
577
    /// Policy used to evict items out of the store. Failure to set this
578
    /// value will cause items to never be removed from the store causing
579
    /// infinite memory usage.
580
    pub eviction_policy: Option<EvictionPolicy>,
581
}
582
583
#[derive(Serialize, Deserialize, Debug, Clone)]
584
#[serde(deny_unknown_fields)]
585
pub struct DedupSpec {
586
    /// Store used to store the index of each dedup slice. This store
587
    /// should generally be fast and small.
588
    pub index_store: StoreSpec,
589
590
    /// The store where the individual chunks will be uploaded. This
591
    /// store should generally be the slower & larger store.
592
    pub content_store: StoreSpec,
593
594
    /// Minimum size that a chunk will be when slicing up the content.
595
    /// Note: This setting can be increased to improve performance
596
    /// because it will actually not check this number of bytes when
597
    /// deciding where to partition the data.
598
    ///
599
    /// Default: 65536 (64k)
600
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
601
    pub min_size: u32,
602
603
    /// A best-effort attempt will be made to keep the average size
604
    /// of the chunks to this number. It is not a guarantee, but a
605
    /// slight attempt will be made.
606
    ///
607
    /// This value will also be about the threshold used to determine
608
    /// if we should even attempt to dedup the entry or just forward
609
    /// it directly to the `content_store` without an index. The actual
610
    /// value will be about `normal_size * 1.3` due to implementation
611
    /// details.
612
    ///
613
    /// Default: 262144 (256k)
614
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
615
    pub normal_size: u32,
616
617
    /// Maximum size a chunk is allowed to be.
618
    ///
619
    /// Default: 524288 (512k)
620
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
621
    pub max_size: u32,
622
623
    /// Due to implementation detail, we want to prefer to download
624
    /// the first chunks of the file so we can stream the content
625
    /// out and free up some of our buffers. This configuration
626
    /// will be used to to restrict the number of concurrent chunk
627
    /// downloads at a time per `get()` request.
628
    ///
629
    /// This setting will also affect how much memory might be used
630
    /// per `get()` request. Estimated worst case memory per `get()`
631
    /// request is: `max_concurrent_fetch_per_get * max_size`.
632
    ///
633
    /// Default: 10
634
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
635
    pub max_concurrent_fetch_per_get: u32,
636
}
637
638
#[derive(Serialize, Deserialize, Debug, Clone)]
639
#[serde(deny_unknown_fields)]
640
pub struct ExistenceCacheSpec {
641
    /// The underlying store wrap around. All content will first flow
642
    /// through self before forwarding to backend. In the event there
643
    /// is an error detected in self, the connection to the backend
644
    /// will be terminated, and early termination should always cause
645
    /// updates to fail on the backend.
646
    pub backend: StoreSpec,
647
648
    /// Policy used to evict items out of the store. Failure to set this
649
    /// value will cause items to never be removed from the store causing
650
    /// infinite memory usage.
651
    pub eviction_policy: Option<EvictionPolicy>,
652
}
653
654
#[derive(Serialize, Deserialize, Debug, Clone)]
655
#[serde(deny_unknown_fields)]
656
pub struct VerifySpec {
657
    /// The underlying store wrap around. All content will first flow
658
    /// through self before forwarding to backend. In the event there
659
    /// is an error detected in self, the connection to the backend
660
    /// will be terminated, and early termination should always cause
661
    /// updates to fail on the backend.
662
    pub backend: StoreSpec,
663
664
    /// If set the store will verify the size of the data before accepting
665
    /// an upload of data.
666
    ///
667
    /// This should be set to false for AC, but true for CAS stores.
668
    #[serde(default)]
669
    pub verify_size: bool,
670
671
    /// If the data should be hashed and verify that the key matches the
672
    /// computed hash. The hash function is automatically determined based
673
    /// request and if not set will use the global default.
674
    ///
675
    /// This should be set to None for AC, but hashing function like `sha256` for CAS stores.
676
    #[serde(default)]
677
    pub verify_hash: bool,
678
}
679
680
#[derive(Serialize, Deserialize, Debug, Clone)]
681
#[serde(deny_unknown_fields)]
682
pub struct CompletenessCheckingSpec {
683
    /// The underlying store that will have it's results validated before sending to client.
684
    pub backend: StoreSpec,
685
686
    /// When a request is made, the results are decoded and all output digests/files are verified
687
    /// to exist in this CAS store before returning success.
688
    pub cas_store: StoreSpec,
689
}
690
691
#[derive(Serialize, Deserialize, Debug, Default, PartialEq, Eq, Clone, Copy)]
692
#[serde(deny_unknown_fields)]
693
pub struct Lz4Config {
694
    /// Size of the blocks to compress.
695
    /// Higher values require more ram, but might yield slightly better
696
    /// compression ratios.
697
    ///
698
    /// Default: 65536 (64k).
699
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
700
    pub block_size: u32,
701
702
    /// Maximum size allowed to attempt to deserialize data into.
703
    /// This is needed because the `block_size` is embedded into the data
704
    /// so if there was a bad actor, they could upload an extremely large
705
    /// `block_size`'ed entry and we'd allocate a large amount of memory
706
    /// when retrieving the data. To prevent this from happening, we
707
    /// allow you to specify the maximum that we'll attempt deserialize.
708
    ///
709
    /// Default: value in `block_size`.
710
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
711
    pub max_decode_block_size: u32,
712
}
713
714
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)]
715
#[serde(rename_all = "snake_case")]
716
pub enum CompressionAlgorithm {
717
    /// LZ4 compression algorithm is extremely fast for compression and
718
    /// decompression, however does not perform very well in compression
719
    /// ratio. In most cases build artifacts are highly compressible, however
720
    /// lz4 is quite good at aborting early if the data is not deemed very
721
    /// compressible.
722
    ///
723
    /// see: <https://lz4.github.io/lz4/>
724
    Lz4(Lz4Config),
725
}
726
727
#[derive(Serialize, Deserialize, Debug, Clone)]
728
#[serde(deny_unknown_fields)]
729
pub struct CompressionSpec {
730
    /// The underlying store wrap around. All content will first flow
731
    /// through self before forwarding to backend. In the event there
732
    /// is an error detected in self, the connection to the backend
733
    /// will be terminated, and early termination should always cause
734
    /// updates to fail on the backend.
735
    pub backend: StoreSpec,
736
737
    /// The compression algorithm to use.
738
    pub compression_algorithm: CompressionAlgorithm,
739
}
740
741
/// Eviction policy always works on LRU (Least Recently Used). Any time an entry
742
/// is touched it updates the timestamp. Inserts and updates will execute the
743
/// eviction policy removing any expired entries and/or the oldest entries
744
/// until the store size becomes smaller than `max_bytes`.
745
#[derive(Serialize, Deserialize, Debug, Default, Clone, Copy)]
746
#[serde(deny_unknown_fields)]
747
pub struct EvictionPolicy {
748
    /// Maximum number of bytes before eviction takes place.
749
    /// Default: 0. Zero means never evict based on size.
750
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
751
    pub max_bytes: usize,
752
753
    /// When eviction starts based on hitting `max_bytes`, continue until
754
    /// `max_bytes - evict_bytes` is met to create a low watermark.  This stops
755
    /// operations from thrashing when the store is close to the limit.
756
    /// Default: 0
757
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
758
    pub evict_bytes: usize,
759
760
    /// Maximum number of seconds for an entry to live since it was last
761
    /// accessed before it is evicted.
762
    /// Default: 0. Zero means never evict based on time.
763
    #[serde(default, deserialize_with = "convert_duration_with_shellexpand")]
764
    pub max_seconds: u32,
765
766
    /// Maximum size of the store before an eviction takes place.
767
    /// Default: 0. Zero means never evict based on count.
768
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
769
    pub max_count: u64,
770
}
771
772
#[derive(Serialize, Deserialize, Debug, Clone)]
773
#[serde(tag = "provider", rename_all = "snake_case")]
774
pub enum ExperimentalCloudObjectSpec {
775
    Aws(ExperimentalAwsSpec),
776
    Gcs(ExperimentalGcsSpec),
777
}
778
779
impl Default for ExperimentalCloudObjectSpec {
780
0
    fn default() -> Self {
781
0
        Self::Aws(ExperimentalAwsSpec::default())
782
0
    }
783
}
784
785
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
786
#[serde(deny_unknown_fields)]
787
pub struct ExperimentalAwsSpec {
788
    /// S3 region. Usually us-east-1, us-west-2, af-south-1, exc...
789
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
790
    pub region: String,
791
792
    /// Bucket name to use as the backend.
793
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
794
    pub bucket: String,
795
796
    /// Common retry and upload configuration
797
    #[serde(flatten)]
798
    pub common: CommonObjectSpec,
799
}
800
801
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
802
#[serde(deny_unknown_fields)]
803
pub struct ExperimentalGcsSpec {
804
    /// Bucket name to use as the backend.
805
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
806
    pub bucket: String,
807
808
    /// Chunk size for resumable uploads.
809
    ///
810
    /// Default: 2MB
811
    pub resumable_chunk_size: Option<usize>,
812
813
    /// Common retry and upload configuration
814
    #[serde(flatten)]
815
    pub common: CommonObjectSpec,
816
}
817
818
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
819
pub struct CommonObjectSpec {
820
    /// If you wish to prefix the location in the bucket. If None, no prefix will be used.
821
    #[serde(default)]
822
    pub key_prefix: Option<String>,
823
824
    /// Retry configuration to use when a network request fails.
825
    #[serde(default)]
826
    pub retry: Retry,
827
828
    /// If the number of seconds since the `last_modified` time of the object
829
    /// is greater than this value, the object will not be considered
830
    /// "existing". This allows for external tools to delete objects that
831
    /// have not been uploaded in a long time. If a client receives a `NotFound`
832
    /// the client should re-upload the object.
833
    ///
834
    /// There should be sufficient buffer time between how long the expiration
835
    /// configuration of the external tool is and this value. Keeping items
836
    /// around for a few days is generally a good idea.
837
    ///
838
    /// Default: 0. Zero means never consider an object expired.
839
    #[serde(default, deserialize_with = "convert_duration_with_shellexpand")]
840
    pub consider_expired_after_s: u32,
841
842
    /// The maximum buffer size to retain in case of a retryable error
843
    /// during upload. Setting this to zero will disable upload buffering;
844
    /// this means that in the event of a failure during upload, the entire
845
    /// upload will be aborted and the client will likely receive an error.
846
    ///
847
    /// Default: 5MB.
848
    pub max_retry_buffer_per_request: Option<usize>,
849
850
    /// Maximum number of concurrent `UploadPart` requests per `MultipartUpload`.
851
    ///
852
    /// Default: 10.
853
    pub multipart_max_concurrent_uploads: Option<usize>,
854
855
    /// Allow unencrypted HTTP connections. Only use this for local testing.
856
    ///
857
    /// Default: false
858
    #[serde(default)]
859
    pub insecure_allow_http: bool,
860
861
    /// Disable http/2 connections and only use http/1.1. Default client
862
    /// configuration will have http/1.1 and http/2 enabled for connection
863
    /// schemes. Http/2 should be disabled if environments have poor support
864
    /// or performance related to http/2. Safe to keep default unless
865
    /// underlying network environment, S3, or GCS API servers specify otherwise.
866
    ///
867
    /// Default: false
868
    #[serde(default)]
869
    pub disable_http2: bool,
870
}
871
872
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
873
#[serde(rename_all = "snake_case")]
874
pub enum StoreType {
875
    /// The store is content addressable storage.
876
    Cas,
877
    /// The store is an action cache.
878
    Ac,
879
}
880
881
#[derive(Serialize, Deserialize, Debug, Clone)]
882
pub struct ClientTlsConfig {
883
    /// Path to the certificate authority to use to validate the remote.
884
    ///
885
    /// Default: None
886
    #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")]
887
    pub ca_file: Option<String>,
888
889
    /// Path to the certificate file for client authentication.
890
    ///
891
    /// Default: None
892
    #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")]
893
    pub cert_file: Option<String>,
894
895
    /// Path to the private key file for client authentication.
896
    ///
897
    /// Default: None
898
    #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")]
899
    pub key_file: Option<String>,
900
901
    /// If set the client will use the native roots for TLS connections.
902
    ///
903
    /// Default: false
904
    #[serde(default)]
905
    pub use_native_roots: Option<bool>,
906
}
907
908
#[derive(Serialize, Deserialize, Debug, Clone)]
909
#[serde(deny_unknown_fields)]
910
pub struct GrpcEndpoint {
911
    /// The endpoint address (i.e. grpc(s)://example.com:443).
912
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
913
    pub address: String,
914
    /// The TLS configuration to use to connect to the endpoint (if grpcs).
915
    pub tls_config: Option<ClientTlsConfig>,
916
    /// The maximum concurrency to allow on this endpoint.
917
    pub concurrency_limit: Option<usize>,
918
}
919
920
#[derive(Serialize, Deserialize, Debug, Clone)]
921
#[serde(deny_unknown_fields)]
922
pub struct GrpcSpec {
923
    /// Instance name for GRPC calls. Proxy calls will have the `instance_name` changed to this.
924
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
925
    pub instance_name: String,
926
927
    /// The endpoint of the grpc connection.
928
    pub endpoints: Vec<GrpcEndpoint>,
929
930
    /// The type of the upstream store, this ensures that the correct server calls are made.
931
    pub store_type: StoreType,
932
933
    /// Retry configuration to use when a network request fails.
934
    #[serde(default)]
935
    pub retry: Retry,
936
937
    /// Limit the number of simultaneous upstream requests to this many.  A
938
    /// value of zero is treated as unlimited.  If the limit is reached the
939
    /// request is queued.
940
    #[serde(default)]
941
    pub max_concurrent_requests: usize,
942
943
    /// The number of connections to make to each specified endpoint to balance
944
    /// the load over multiple TCP connections.  Default 1.
945
    #[serde(default)]
946
    pub connections_per_endpoint: usize,
947
}
948
949
/// The possible error codes that might occur on an upstream request.
950
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq)]
951
pub enum ErrorCode {
952
    Cancelled = 1,
953
    Unknown = 2,
954
    InvalidArgument = 3,
955
    DeadlineExceeded = 4,
956
    NotFound = 5,
957
    AlreadyExists = 6,
958
    PermissionDenied = 7,
959
    ResourceExhausted = 8,
960
    FailedPrecondition = 9,
961
    Aborted = 10,
962
    OutOfRange = 11,
963
    Unimplemented = 12,
964
    Internal = 13,
965
    Unavailable = 14,
966
    DataLoss = 15,
967
    Unauthenticated = 16,
968
    // Note: This list is duplicated from nativelink-error/lib.rs.
969
}
970
971
#[derive(Serialize, Deserialize, Debug, Clone)]
972
pub struct RedisSpec {
973
    /// The hostname or IP address of the Redis server.
974
    /// Ex: `["redis://username:password@redis-server-url:6380/99"]`
975
    /// 99 Represents database ID, 6380 represents the port.
976
    #[serde(deserialize_with = "convert_vec_string_with_shellexpand")]
977
    pub addresses: Vec<String>,
978
979
    /// The response timeout for the Redis connection in seconds.
980
    ///
981
    /// Default: 10
982
    #[serde(default)]
983
    pub response_timeout_s: u64,
984
985
    /// The connection timeout for the Redis connection in seconds.
986
    ///
987
    /// Default: 10
988
    #[serde(default)]
989
    pub connection_timeout_s: u64,
990
991
    /// An optional and experimental Redis channel to publish write events to.
992
    ///
993
    /// If set, every time a write operation is made to a Redis node
994
    /// then an event will be published to a Redis channel with the given name.
995
    /// If unset, the writes will still be made,
996
    /// but the write events will not be published.
997
    ///
998
    /// Default: (Empty String / No Channel)
999
    #[serde(default)]
1000
    pub experimental_pub_sub_channel: Option<String>,
1001
1002
    /// An optional prefix to prepend to all keys in this store.
1003
    ///
1004
    /// Setting this value can make it convenient to query or
1005
    /// organize your data according to the shared prefix.
1006
    ///
1007
    /// Default: (Empty String / No Prefix)
1008
    #[serde(default)]
1009
    pub key_prefix: String,
1010
1011
    /// Set the mode Redis is operating in.
1012
    ///
1013
    /// Available options are "cluster" for
1014
    /// [cluster mode](https://redis.io/docs/latest/operate/oss_and_stack/reference/cluster-spec/),
1015
    /// "sentinel" for [sentinel mode](https://redis.io/docs/latest/operate/oss_and_stack/management/sentinel/),
1016
    /// or "standard" if Redis is operating in neither cluster nor sentinel mode.
1017
    ///
1018
    /// Default: standard,
1019
    #[serde(default)]
1020
    pub mode: RedisMode,
1021
1022
    /// When using pubsub interface, this is the maximum number of items to keep
1023
    /// queued up before dropping old items.
1024
    ///
1025
    /// Default: 4096
1026
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1027
    pub broadcast_channel_capacity: usize,
1028
1029
    /// The amount of time in milliseconds until the redis store considers the
1030
    /// command to be timed out. This will trigger a retry of the command and
1031
    /// potentially a reconnection to the redis server.
1032
    ///
1033
    /// Default: 10000 (10 seconds)
1034
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1035
    pub command_timeout_ms: u64,
1036
1037
    /// The amount of time in milliseconds until the redis store considers the
1038
    /// connection to unresponsive. This will trigger a reconnection to the
1039
    /// redis server.
1040
    ///
1041
    /// Default: 3000 (3 seconds)
1042
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1043
    pub connection_timeout_ms: u64,
1044
1045
    /// The amount of data to read from the redis server at a time.
1046
    /// This is used to limit the amount of memory used when reading
1047
    /// large objects from the redis server as well as limiting the
1048
    /// amount of time a single read operation can take.
1049
    ///
1050
    /// IMPORTANT: If this value is too high, the `command_timeout_ms`
1051
    /// might be triggered if the latency or throughput to the redis
1052
    /// server is too low.
1053
    ///
1054
    /// Default: 64KiB
1055
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1056
    pub read_chunk_size: usize,
1057
1058
    /// The number of connections to keep open to the redis server(s).
1059
    ///
1060
    /// Default: 3
1061
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1062
    pub connection_pool_size: usize,
1063
1064
    /// The maximum number of upload chunks to allow per update.
1065
    /// This is used to limit the amount of memory used when uploading
1066
    /// large objects to the redis server. A good rule of thumb is to
1067
    /// think of the data as:
1068
    /// `AVAIL_MEMORY / (read_chunk_size * max_chunk_uploads_per_update) = THORETICAL_MAX_CONCURRENT_UPLOADS`
1069
    /// (note: it is a good idea to divide `AVAIL_MAX_MEMORY` by ~10 to account for other memory usage)
1070
    ///
1071
    /// Default: 10
1072
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1073
    pub max_chunk_uploads_per_update: usize,
1074
1075
    /// The COUNT value passed when scanning keys in Redis.
1076
    /// This is used to hint the amount of work that should be done per response.
1077
    ///
1078
    /// Default: 10000
1079
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1080
    pub scan_count: u32,
1081
1082
    /// Retry configuration to use when a network request fails.
1083
    /// See the `Retry` struct for more information.
1084
    ///
1085
    /// ```txt
1086
    /// Default: Retry {
1087
    ///   max_retries: 0, /* unlimited */
1088
    ///   delay: 0.1, /* 100ms */
1089
    ///   jitter: 0.5, /* 50% */
1090
    ///   retry_on_errors: None, /* not used in redis store */
1091
    /// }
1092
    /// ```
1093
    #[serde(default)]
1094
    pub retry: Retry,
1095
}
1096
1097
#[derive(Debug, Default, Deserialize, Serialize, Clone, Copy, PartialEq, Eq)]
1098
#[serde(rename_all = "snake_case")]
1099
pub enum RedisMode {
1100
    Cluster,
1101
    Sentinel,
1102
    #[default]
1103
    Standard,
1104
}
1105
1106
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)]
1107
pub struct NoopSpec {}
1108
1109
/// Retry configuration. This configuration is exponential and each iteration
1110
/// a jitter as a percentage is applied of the calculated delay. For example:
1111
/// ```haskell
1112
/// Retry{
1113
///   max_retries: 7,
1114
///   delay: 0.1,
1115
///   jitter: 0.5,
1116
/// }
1117
/// ```
1118
/// will result in:
1119
/// Attempt - Delay
1120
/// 1         0ms
1121
/// 2         75ms - 125ms
1122
/// 3         150ms - 250ms
1123
/// 4         300ms - 500ms
1124
/// 5         600ms - 1s
1125
/// 6         1.2s - 2s
1126
/// 7         2.4s - 4s
1127
/// 8         4.8s - 8s
1128
/// Remember that to get total results is additive, meaning the above results
1129
/// would mean a single request would have a total delay of 9.525s - 15.875s.
1130
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
1131
#[serde(deny_unknown_fields)]
1132
pub struct Retry {
1133
    /// Maximum number of retries until retrying stops.
1134
    /// Setting this to zero will always attempt 1 time, but not retry.
1135
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1136
    pub max_retries: usize,
1137
1138
    /// Delay in seconds for exponential back off.
1139
    #[serde(default)]
1140
    pub delay: f32,
1141
1142
    /// Amount of jitter to add as a percentage in decimal form. This will
1143
    /// change the formula like:
1144
    /// ```haskell
1145
    /// random(
1146
    ///    (2 ^ {attempt_number}) * {delay} * (1 - (jitter / 2)),
1147
    ///    (2 ^ {attempt_number}) * {delay} * (1 + (jitter / 2)),
1148
    /// )
1149
    /// ```
1150
    #[serde(default)]
1151
    pub jitter: f32,
1152
1153
    /// A list of error codes to retry on, if this is not set then the default
1154
    /// error codes to retry on are used.  These default codes are the most
1155
    /// likely to be non-permanent.
1156
    ///  - `Unknown`
1157
    ///  - `Cancelled`
1158
    ///  - `DeadlineExceeded`
1159
    ///  - `ResourceExhausted`
1160
    ///  - `Aborted`
1161
    ///  - `Internal`
1162
    ///  - `Unavailable`
1163
    ///  - `DataLoss`
1164
    #[serde(default)]
1165
    pub retry_on_errors: Option<Vec<ErrorCode>>,
1166
}
1167
1168
/// Configuration for `ExperimentalMongoDB` store.
1169
#[derive(Serialize, Deserialize, Debug, Clone)]
1170
#[serde(deny_unknown_fields)]
1171
pub struct ExperimentalMongoSpec {
1172
    /// `ExperimentalMongoDB` connection string.
1173
    /// Example: <mongodb://localhost:27017> or <mongodb+srv://cluster.mongodb.net>
1174
    #[serde(deserialize_with = "convert_string_with_shellexpand")]
1175
    pub connection_string: String,
1176
1177
    /// The database name to use.
1178
    /// Default: "nativelink"
1179
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
1180
    pub database: String,
1181
1182
    /// The collection name for CAS data.
1183
    /// Default: "cas"
1184
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
1185
    pub cas_collection: String,
1186
1187
    /// The collection name for scheduler data.
1188
    /// Default: "scheduler"
1189
    #[serde(default, deserialize_with = "convert_string_with_shellexpand")]
1190
    pub scheduler_collection: String,
1191
1192
    /// Prefix to prepend to all keys stored in `MongoDB`.
1193
    /// Default: ""
1194
    #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")]
1195
    pub key_prefix: Option<String>,
1196
1197
    /// The maximum amount of data to read from `MongoDB` in a single chunk (in bytes).
1198
    /// Default: 65536 (64KB)
1199
    #[serde(default, deserialize_with = "convert_data_size_with_shellexpand")]
1200
    pub read_chunk_size: usize,
1201
1202
    /// Maximum number of concurrent uploads allowed.
1203
    /// Default: 10
1204
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1205
    pub max_concurrent_uploads: usize,
1206
1207
    /// Connection timeout in milliseconds.
1208
    /// Default: 3000
1209
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1210
    pub connection_timeout_ms: u64,
1211
1212
    /// Command timeout in milliseconds.
1213
    /// Default: 10000
1214
    #[serde(default, deserialize_with = "convert_numeric_with_shellexpand")]
1215
    pub command_timeout_ms: u64,
1216
1217
    /// Enable `MongoDB` change streams for real-time updates.
1218
    /// Required for scheduler subscriptions.
1219
    /// Default: false
1220
    #[serde(default)]
1221
    pub enable_change_streams: bool,
1222
1223
    /// Write concern 'w' parameter.
1224
    /// Can be a number (e.g., 1) or string (e.g., "majority").
1225
    /// Default: None (uses `MongoDB` default)
1226
    #[serde(default, deserialize_with = "convert_optional_string_with_shellexpand")]
1227
    pub write_concern_w: Option<String>,
1228
1229
    /// Write concern 'j' parameter (journal acknowledgment).
1230
    /// Default: None (uses `MongoDB` default)
1231
    #[serde(default)]
1232
    pub write_concern_j: Option<bool>,
1233
1234
    /// Write concern timeout in milliseconds.
1235
    /// Default: None (uses `MongoDB` default)
1236
    #[serde(
1237
        default,
1238
        deserialize_with = "convert_optional_numeric_with_shellexpand"
1239
    )]
1240
    pub write_concern_timeout_ms: Option<u32>,
1241
}