/build/source/nativelink-store/src/filesystem_store.rs
Line | Count | Source |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use std::borrow::Cow; |
16 | | use std::ffi::{OsStr, OsString}; |
17 | | use std::fmt::{Debug, Formatter}; |
18 | | use std::pin::Pin; |
19 | | use std::sync::atomic::{AtomicU64, Ordering}; |
20 | | use std::sync::{Arc, Weak}; |
21 | | use std::time::SystemTime; |
22 | | |
23 | | use async_lock::RwLock; |
24 | | use async_trait::async_trait; |
25 | | use bytes::BytesMut; |
26 | | use futures::stream::{StreamExt, TryStreamExt}; |
27 | | use futures::{Future, TryFutureExt}; |
28 | | use nativelink_config::stores::FilesystemSpec; |
29 | | use nativelink_error::{Code, Error, ResultExt, make_err, make_input_err}; |
30 | | use nativelink_metric::MetricsComponent; |
31 | | use nativelink_util::background_spawn; |
32 | | use nativelink_util::buf_channel::{ |
33 | | DropCloserReadHalf, DropCloserWriteHalf, make_buf_channel_pair, |
34 | | }; |
35 | | use nativelink_util::common::{DigestInfo, fs}; |
36 | | use nativelink_util::evicting_map::{EvictingMap, LenEntry}; |
37 | | use nativelink_util::health_utils::{HealthRegistryBuilder, HealthStatus, HealthStatusIndicator}; |
38 | | use nativelink_util::store_trait::{ |
39 | | StoreDriver, StoreKey, StoreKeyBorrow, StoreOptimizations, UploadSizeInfo, |
40 | | }; |
41 | | use tokio::io::{AsyncReadExt, AsyncWriteExt, Take}; |
42 | | use tokio_stream::wrappers::ReadDirStream; |
43 | | use tracing::{Level, event}; |
44 | | |
45 | | use crate::cas_utils::is_zero_digest; |
46 | | |
47 | | // Default size to allocate memory of the buffer when reading files. |
48 | | const DEFAULT_BUFF_SIZE: usize = 32 * 1024; |
49 | | // Default block size of all major filesystems is 4KB |
50 | | const DEFAULT_BLOCK_SIZE: u64 = 4 * 1024; |
51 | | |
52 | | pub const STR_FOLDER: &str = "s"; |
53 | | pub const DIGEST_FOLDER: &str = "d"; |
54 | | |
55 | | #[derive(Clone, Copy, Debug)] |
56 | | pub enum FileType { |
57 | | Digest, |
58 | | String, |
59 | | } |
60 | | |
61 | | #[derive(Debug, MetricsComponent)] |
62 | | pub struct SharedContext { |
63 | | // Used in testing to know how many active drop() spawns are running. |
64 | | // TODO(allada) It is probably a good idea to use a spin lock during |
65 | | // destruction of the store to ensure that all files are actually |
66 | | // deleted (similar to how it is done in tests). |
67 | | #[metric(help = "Number of active drop spawns")] |
68 | | pub active_drop_spawns: AtomicU64, |
69 | | #[metric(help = "Path to the configured temp path")] |
70 | | temp_path: String, |
71 | | #[metric(help = "Path to the configured content path")] |
72 | | content_path: String, |
73 | | } |
74 | | |
75 | | #[derive(Eq, PartialEq, Debug)] |
76 | | enum PathType { |
77 | | Content, |
78 | | Temp, |
79 | | Custom(OsString), |
80 | | } |
81 | | |
82 | | /// [`EncodedFilePath`] stores the path to the file |
83 | | /// including the context, path type and key to the file. |
84 | | /// The whole [`StoreKey`] is stored as opposed to solely |
85 | | /// the [`DigestInfo`] so that it is more usable for things |
86 | | /// such as BEP -see Issue #1108 |
87 | | #[derive(Debug)] |
88 | | pub struct EncodedFilePath { |
89 | | shared_context: Arc<SharedContext>, |
90 | | path_type: PathType, |
91 | | key: StoreKey<'static>, |
92 | | } |
93 | | |
94 | | impl EncodedFilePath { |
95 | | #[inline] |
96 | 330 | fn get_file_path(&self) -> Cow<'_, OsStr> { |
97 | 330 | get_file_path_raw(&self.path_type, self.shared_context.as_ref(), &self.key) |
98 | 330 | } |
99 | | } |
100 | | |
101 | | #[inline] |
102 | 447 | fn get_file_path_raw<'a>( |
103 | 447 | path_type: &'a PathType, |
104 | 447 | shared_context: &SharedContext, |
105 | 447 | key: &StoreKey<'a>, |
106 | 447 | ) -> Cow<'a, OsStr> { |
107 | 447 | let folder438 = match path_type { |
108 | 195 | PathType::Content => &shared_context.content_path, |
109 | 243 | PathType::Temp => &shared_context.temp_path, |
110 | 9 | PathType::Custom(path) => return Cow::Borrowed(path), |
111 | | }; |
112 | 438 | Cow::Owned(to_full_path_from_key(folder, key)) |
113 | 447 | } |
114 | | |
115 | | impl Drop for EncodedFilePath { |
116 | 118 | fn drop(&mut self) { |
117 | 118 | // `drop()` can be called during shutdown, so we use `path_type` flag to know if the |
118 | 118 | // file actually needs to be deleted. |
119 | 118 | if self.path_type == PathType::Content { Branch (119:12): [True: 92, False: 26]
Branch (119:12): [Folded - Ignored]
|
120 | 92 | return; |
121 | 26 | } |
122 | 26 | |
123 | 26 | let file_path = self.get_file_path().to_os_string(); |
124 | 26 | let shared_context = self.shared_context.clone(); |
125 | 26 | shared_context |
126 | 26 | .active_drop_spawns |
127 | 26 | .fetch_add(1, Ordering::Relaxed); |
128 | 26 | background_spawn!("filesystem_delete_file", async move { |
129 | 25 | event!(Level::INFO, ?file_path, "File deleted"0 ,); |
130 | 25 | let result23 = fs::remove_file(&file_path) |
131 | 25 | .await |
132 | 23 | .err_tip(|| format!("Failed to remove file {file_path:?}")0 ); |
133 | 23 | if let Err(err0 ) = result { Branch (133:20): [True: 0, False: 23]
Branch (133:20): [Folded - Ignored]
|
134 | 0 | event!(Level::ERROR, ?file_path, ?err, "Failed to delete file",); |
135 | 23 | } |
136 | 23 | shared_context |
137 | 23 | .active_drop_spawns |
138 | 23 | .fetch_sub(1, Ordering::Relaxed); |
139 | 23 | }); |
140 | 118 | } |
141 | | } |
142 | | |
143 | | /// This creates the file path from the [`StoreKey`]. If |
144 | | /// it is a string, the string, prefixed with [`STR_PREFIX`] |
145 | | /// for backwards compatibility, is stored. |
146 | | /// |
147 | | /// If it is a [`DigestInfo`], it is prefixed by [`DIGEST_PREFIX`] |
148 | | /// followed by the string representation of a digest - the hash in hex, |
149 | | /// a hyphen then the size in bytes |
150 | | /// |
151 | | /// Previously, only the string representation of the [`DigestInfo`] was |
152 | | /// used with no prefix |
153 | | #[inline] |
154 | 465 | fn to_full_path_from_key(folder: &str, key: &StoreKey<'_>) -> OsString { |
155 | 465 | match key { |
156 | 3 | StoreKey::Str(str) => format!("{folder}/{STR_FOLDER}/{str}"), |
157 | 462 | StoreKey::Digest(digest_info) => format!("{folder}/{DIGEST_FOLDER}/{digest_info}"), |
158 | | } |
159 | 465 | .into() |
160 | 465 | } |
161 | | |
162 | | pub trait FileEntry: LenEntry + Send + Sync + Debug + 'static { |
163 | | /// Responsible for creating the underlying `FileEntry`. |
164 | | fn create(data_size: u64, block_size: u64, encoded_file_path: RwLock<EncodedFilePath>) -> Self; |
165 | | |
166 | | /// Creates a (usually) temp file, opens it and returns the path to the temp file. |
167 | | fn make_and_open_file( |
168 | | block_size: u64, |
169 | | encoded_file_path: EncodedFilePath, |
170 | | ) -> impl Future<Output = Result<(Self, fs::FileSlot, OsString), Error>> + Send |
171 | | where |
172 | | Self: Sized; |
173 | | |
174 | | /// Returns the underlying reference to the size of the data in bytes |
175 | | fn data_size_mut(&mut self) -> &mut u64; |
176 | | |
177 | | /// Returns the actual size of the underlying file on the disk after accounting for filesystem block size. |
178 | | fn size_on_disk(&self) -> u64; |
179 | | |
180 | | /// Gets the underlying `EncodedfilePath`. |
181 | | fn get_encoded_file_path(&self) -> &RwLock<EncodedFilePath>; |
182 | | |
183 | | /// Returns a reader that will read part of the underlying file. |
184 | | fn read_file_part( |
185 | | &self, |
186 | | offset: u64, |
187 | | length: u64, |
188 | | ) -> impl Future<Output = Result<Take<fs::FileSlot>, Error>> + Send; |
189 | | |
190 | | /// This function is a safe way to extract the file name of the underlying file. To protect users from |
191 | | /// accidentally creating undefined behavior we encourage users to do the logic they need to do with |
192 | | /// the filename inside this function instead of extracting the filename and doing the logic outside. |
193 | | /// This is because the filename is not guaranteed to exist after this function returns, however inside |
194 | | /// the callback the file is always guaranteed to exist and immutable. |
195 | | /// DO NOT USE THIS FUNCTION TO EXTRACT THE FILENAME AND STORE IT FOR LATER USE. |
196 | | fn get_file_path_locked< |
197 | | T, |
198 | | Fut: Future<Output = Result<T, Error>> + Send, |
199 | | F: FnOnce(OsString) -> Fut + Send, |
200 | | >( |
201 | | &self, |
202 | | handler: F, |
203 | | ) -> impl Future<Output = Result<T, Error>> + Send; |
204 | | } |
205 | | |
206 | | pub struct FileEntryImpl { |
207 | | data_size: u64, |
208 | | block_size: u64, |
209 | | encoded_file_path: RwLock<EncodedFilePath>, |
210 | | } |
211 | | |
212 | | impl FileEntryImpl { |
213 | 9 | pub fn get_shared_context_for_test(&mut self) -> Arc<SharedContext> { |
214 | 9 | self.encoded_file_path.get_mut().shared_context.clone() |
215 | 9 | } |
216 | | } |
217 | | |
218 | | impl FileEntry for FileEntryImpl { |
219 | 118 | fn create(data_size: u64, block_size: u64, encoded_file_path: RwLock<EncodedFilePath>) -> Self { |
220 | 118 | Self { |
221 | 118 | data_size, |
222 | 118 | block_size, |
223 | 118 | encoded_file_path, |
224 | 118 | } |
225 | 118 | } |
226 | | |
227 | | /// This encapsulates the logic for the edge case of if the file fails to create |
228 | | /// the cleanup of the file is handled without creating a `FileEntry`, which would |
229 | | /// try to cleanup the file as well during `drop()`. |
230 | 108 | async fn make_and_open_file( |
231 | 108 | block_size: u64, |
232 | 108 | encoded_file_path: EncodedFilePath, |
233 | 108 | ) -> Result<(FileEntryImpl, fs::FileSlot, OsString), Error> { |
234 | 108 | let temp_full_path = encoded_file_path.get_file_path().to_os_string(); |
235 | 108 | let temp_file_result = fs::create_file(temp_full_path.clone()) |
236 | 108 | .or_else(|mut err| async {0 |
237 | 0 | let remove_result = fs::remove_file(&temp_full_path).await.err_tip(|| { |
238 | 0 | format!("Failed to remove file {temp_full_path:?} in filesystem store") |
239 | 0 | }); |
240 | 0 | if let Err(remove_err) = remove_result { Branch (240:24): [True: 0, False: 0]
Branch (240:24): [True: 0, False: 0]
Branch (240:24): [True: 0, False: 0]
Branch (240:24): [Folded - Ignored]
Branch (240:24): [True: 0, False: 0]
|
241 | 0 | err = err.merge(remove_err); |
242 | 0 | } |
243 | 0 | event!( |
244 | 0 | Level::WARN, |
245 | | ?err, |
246 | | ?block_size, |
247 | | ?temp_full_path, |
248 | 0 | "Failed to create file", |
249 | | ); |
250 | 0 | Err(err) |
251 | 0 | .err_tip(|| format!("Failed to create {temp_full_path:?} in filesystem store")) |
252 | 0 | }) |
253 | 108 | .await?0 ; |
254 | | |
255 | 108 | Ok(( |
256 | 108 | <FileEntryImpl as FileEntry>::create( |
257 | 108 | 0, /* Unknown yet, we will fill it in later */ |
258 | 108 | block_size, |
259 | 108 | RwLock::new(encoded_file_path), |
260 | 108 | ), |
261 | 108 | temp_file_result, |
262 | 108 | temp_full_path, |
263 | 108 | )) |
264 | 108 | } |
265 | | |
266 | 108 | fn data_size_mut(&mut self) -> &mut u64 { |
267 | 108 | &mut self.data_size |
268 | 108 | } |
269 | | |
270 | 245 | fn size_on_disk(&self) -> u64 { |
271 | 245 | self.data_size.div_ceil(self.block_size) * self.block_size |
272 | 245 | } |
273 | | |
274 | 169 | fn get_encoded_file_path(&self) -> &RwLock<EncodedFilePath> { |
275 | 169 | &self.encoded_file_path |
276 | 169 | } |
277 | | |
278 | 46 | fn read_file_part( |
279 | 46 | &self, |
280 | 46 | offset: u64, |
281 | 46 | length: u64, |
282 | 46 | ) -> impl Future<Output = Result<Take<fs::FileSlot>, Error>> + Send { |
283 | 46 | self.get_file_path_locked(move |full_content_path| async move { |
284 | 46 | let file44 = fs::open_file(&full_content_path, offset, length) |
285 | 46 | .await |
286 | 46 | .err_tip(|| { |
287 | 2 | format!("Failed to open file in filesystem store {full_content_path:?}") |
288 | 2 | })?; |
289 | 44 | Ok(file) |
290 | 92 | }) |
291 | 46 | } |
292 | | |
293 | 52 | async fn get_file_path_locked< |
294 | 52 | T, |
295 | 52 | Fut: Future<Output = Result<T, Error>> + Send, |
296 | 52 | F: FnOnce(OsString) -> Fut + Send, |
297 | 52 | >( |
298 | 52 | &self, |
299 | 52 | handler: F, |
300 | 52 | ) -> Result<T, Error> { |
301 | 52 | let encoded_file_path = self.get_encoded_file_path().read().await; |
302 | 52 | handler(encoded_file_path.get_file_path().to_os_string()).await |
303 | 52 | } |
304 | | } |
305 | | |
306 | | impl Debug for FileEntryImpl { |
307 | 0 | fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { |
308 | 0 | f.debug_struct("FileEntryImpl") |
309 | 0 | .field("data_size", &self.data_size) |
310 | 0 | .field("encoded_file_path", &"<behind mutex>") |
311 | 0 | .finish() |
312 | 0 | } |
313 | | } |
314 | | |
315 | 135 | fn make_temp_digest(mut digest: DigestInfo) -> DigestInfo { |
316 | | static DELETE_FILE_COUNTER: AtomicU64 = AtomicU64::new(0); |
317 | 135 | let mut hash = *digest.packed_hash(); |
318 | 135 | hash[24..].clone_from_slice( |
319 | 135 | &DELETE_FILE_COUNTER |
320 | 135 | .fetch_add(1, Ordering::Relaxed) |
321 | 135 | .to_le_bytes(), |
322 | 135 | ); |
323 | 135 | digest.set_packed_hash(*hash); |
324 | 135 | digest |
325 | 135 | } |
326 | | |
327 | 135 | fn make_temp_key(key: &StoreKey) -> StoreKey<'static> { |
328 | 135 | StoreKey::Digest(make_temp_digest(key.borrow().into_digest())) |
329 | 135 | } |
330 | | |
331 | | impl LenEntry for FileEntryImpl { |
332 | | #[inline] |
333 | 243 | fn len(&self) -> u64 { |
334 | 243 | self.size_on_disk() |
335 | 243 | } |
336 | | |
337 | 0 | fn is_empty(&self) -> bool { |
338 | 0 | self.data_size == 0 |
339 | 0 | } |
340 | | |
341 | | // unref() only triggers when an item is removed from the eviction_map. It is possible |
342 | | // that another place in code has a reference to `FileEntryImpl` and may later read the |
343 | | // file. To support this edge case, we first move the file to a temp file and point |
344 | | // target file location to the new temp file. `unref()` should only ever be called once. |
345 | | #[inline] |
346 | 28 | async fn unref(&self) { |
347 | | { |
348 | 28 | let mut encoded_file_path = self.encoded_file_path.write().await; |
349 | 28 | if encoded_file_path.path_type == PathType::Temp { Branch (349:16): [True: 0, False: 0]
Branch (349:16): [True: 1, False: 9]
Branch (349:16): [True: 0, False: 0]
Branch (349:16): [True: 0, False: 0]
Branch (349:16): [Folded - Ignored]
Branch (349:16): [True: 0, False: 18]
|
350 | | // We are already a temp file that is now marked for deletion on drop. |
351 | | // This is very rare, but most likely the rename into the content path failed. |
352 | 1 | return; |
353 | 27 | } |
354 | 27 | let from_path = encoded_file_path.get_file_path(); |
355 | 27 | let new_key = make_temp_key(&encoded_file_path.key); |
356 | 27 | |
357 | 27 | let to_path = |
358 | 27 | to_full_path_from_key(&encoded_file_path.shared_context.temp_path, &new_key); |
359 | | |
360 | 27 | if let Err(err2 ) = fs::rename(&from_path, &to_path).await { Branch (360:20): [True: 0, False: 0]
Branch (360:20): [True: 2, False: 7]
Branch (360:20): [True: 0, False: 0]
Branch (360:20): [True: 0, False: 0]
Branch (360:20): [Folded - Ignored]
Branch (360:20): [True: 0, False: 18]
|
361 | 2 | event!( |
362 | 2 | Level::WARN, |
363 | 2 | key = ?encoded_file_path.key, |
364 | 2 | ?from_path, |
365 | 2 | ?to_path, |
366 | 2 | ?err, |
367 | 2 | "Failed to rename file", |
368 | | ); |
369 | | } else { |
370 | 25 | event!( |
371 | 25 | Level::INFO, |
372 | 0 | key = ?encoded_file_path.key, |
373 | 0 | ?from_path, |
374 | 0 | ?to_path, |
375 | 0 | "Renamed file", |
376 | | ); |
377 | 25 | encoded_file_path.path_type = PathType::Temp; |
378 | 25 | encoded_file_path.key = new_key; |
379 | | } |
380 | | } |
381 | 28 | } |
382 | | } |
383 | | |
384 | | #[inline] |
385 | 2 | fn digest_from_filename(file_name: &str) -> Result<DigestInfo, Error> { |
386 | 2 | let (hash, size) = file_name.split_once('-').err_tip(|| ""0 )?0 ; |
387 | 2 | let size = size.parse::<i64>()?0 ; |
388 | 2 | DigestInfo::try_new(hash, size) |
389 | 2 | } |
390 | | |
391 | 2 | pub fn key_from_file(file_name: &str, file_type: FileType) -> Result<StoreKey<'_>, Error> { |
392 | 2 | match file_type { |
393 | 0 | FileType::String => Ok(StoreKey::new_str(file_name)), |
394 | 2 | FileType::Digest => digest_from_filename(file_name).map(StoreKey::Digest), |
395 | | } |
396 | 2 | } |
397 | | |
398 | | /// The number of files to read the metadata for at the same time when running |
399 | | /// `add_files_to_cache`. |
400 | | const SIMULTANEOUS_METADATA_READS: usize = 200; |
401 | | |
402 | 41 | async fn add_files_to_cache<Fe: FileEntry>( |
403 | 41 | evicting_map: &EvictingMap<StoreKeyBorrow, Arc<Fe>, SystemTime>, |
404 | 41 | anchor_time: &SystemTime, |
405 | 41 | shared_context: &Arc<SharedContext>, |
406 | 41 | block_size: u64, |
407 | 41 | rename_fn: fn(&OsStr, &OsStr) -> Result<(), std::io::Error>, |
408 | 41 | ) -> Result<(), Error> { |
409 | | #[expect(clippy::too_many_arguments)] |
410 | 1 | async fn process_entry<Fe: FileEntry>( |
411 | 1 | evicting_map: &EvictingMap<StoreKeyBorrow, Arc<Fe>, SystemTime>, |
412 | 1 | file_name: &str, |
413 | 1 | file_type: FileType, |
414 | 1 | atime: SystemTime, |
415 | 1 | data_size: u64, |
416 | 1 | block_size: u64, |
417 | 1 | anchor_time: &SystemTime, |
418 | 1 | shared_context: &Arc<SharedContext>, |
419 | 1 | ) -> Result<(), Error> { |
420 | 1 | let key = key_from_file(file_name, file_type)?0 ; |
421 | | |
422 | 1 | let file_entry = Fe::create( |
423 | 1 | data_size, |
424 | 1 | block_size, |
425 | 1 | RwLock::new(EncodedFilePath { |
426 | 1 | shared_context: shared_context.clone(), |
427 | 1 | path_type: PathType::Content, |
428 | 1 | key: key.borrow().into_owned(), |
429 | 1 | }), |
430 | | ); |
431 | 1 | let time_since_anchor = anchor_time |
432 | 1 | .duration_since(atime) |
433 | 1 | .map_err(|_| make_input_err!("File access time newer than now")0 )?0 ; |
434 | 1 | evicting_map |
435 | 1 | .insert_with_time( |
436 | 1 | key.into_owned().into(), |
437 | 1 | Arc::new(file_entry), |
438 | 1 | time_since_anchor.as_secs() as i32, |
439 | 1 | ) |
440 | 1 | .await; |
441 | 1 | Ok(()) |
442 | 1 | } |
443 | | |
444 | 123 | async fn read_files( |
445 | 123 | folder: Option<&str>, |
446 | 123 | shared_context: &SharedContext, |
447 | 123 | ) -> Result<Vec<(String, SystemTime, u64, bool)>, Error> { |
448 | | // Note: In Dec 2024 this is for backwards compatibility with the old |
449 | | // way files were stored on disk. Previously all files were in a single |
450 | | // folder regardless of the StoreKey type. This allows old versions of |
451 | | // nativelink file layout to be upgraded at startup time. |
452 | | // This logic can be removed once more time has passed. |
453 | 123 | let read_dir = if let Some(folder82 ) = folder { Branch (453:31): [True: 0, False: 0]
Branch (453:31): [True: 32, False: 16]
Branch (453:31): [True: 4, False: 2]
Branch (453:31): [Folded - Ignored]
Branch (453:31): [True: 46, False: 23]
|
454 | 82 | format!("{}/{folder}/", shared_context.content_path) |
455 | | } else { |
456 | 41 | format!("{}/", shared_context.content_path) |
457 | | }; |
458 | 123 | let (_permit, dir_handle) = fs::read_dir(read_dir) |
459 | 123 | .await |
460 | 123 | .err_tip(|| "Failed opening content directory for iterating in filesystem store"0 )?0 |
461 | 123 | .into_inner(); |
462 | 123 | |
463 | 123 | let read_dir_stream = ReadDirStream::new(dir_handle); |
464 | 123 | read_dir_stream |
465 | 123 | .map(|dir_entry| async move { |
466 | 83 | let dir_entry = dir_entry.unwrap(); |
467 | 83 | let file_name = dir_entry.file_name().into_string().unwrap(); |
468 | 83 | let metadata = dir_entry |
469 | 83 | .metadata() |
470 | 83 | .await |
471 | 83 | .err_tip(|| "Failed to get metadata in filesystem store"0 )?0 ; |
472 | | // We need to filter out folders - we do not want to try to cache the s and d folders. |
473 | 83 | let is_file = |
474 | 83 | metadata.is_file() || !(file_name == STR_FOLDER82 || file_name == DIGEST_FOLDER41 ); Branch (474:21): [True: 0, False: 0]
Branch (474:45): [True: 0, False: 0]
Branch (474:21): [True: 1, False: 32]
Branch (474:45): [True: 16, False: 16]
Branch (474:21): [True: 0, False: 4]
Branch (474:45): [True: 2, False: 2]
Branch (474:21): [Folded - Ignored]
Branch (474:45): [Folded - Ignored]
Branch (474:21): [True: 0, False: 46]
Branch (474:45): [True: 23, False: 23]
|
475 | | // Using access time is not perfect, but better than random. We do not update the |
476 | | // atime when a file is actually "touched", we rely on whatever the filesystem does |
477 | | // when we read the file (usually update on read). |
478 | 83 | let atime = metadata |
479 | 83 | .accessed() |
480 | 83 | .or_else(|_| metadata.modified()0 ) |
481 | 83 | .unwrap_or(SystemTime::UNIX_EPOCH); |
482 | 83 | Result::<(String, SystemTime, u64, bool), Error>::Ok(( |
483 | 83 | file_name, |
484 | 83 | atime, |
485 | 83 | metadata.len(), |
486 | 83 | is_file, |
487 | 83 | )) |
488 | 166 | }) |
489 | 123 | .buffer_unordered(SIMULTANEOUS_METADATA_READS) |
490 | 123 | .try_collect() |
491 | 123 | .await |
492 | 123 | } |
493 | | |
494 | | /// Note: In Dec 2024 this is for backwards compatibility with the old |
495 | | /// way files were stored on disk. Previously all files were in a single |
496 | | /// folder regardless of the [`StoreKey`] type. This moves files from the old cache |
497 | | /// location to the new cache location, under [`DIGEST_FOLDER`]. |
498 | 41 | async fn move_old_cache( |
499 | 41 | shared_context: &Arc<SharedContext>, |
500 | 41 | rename_fn: fn(&OsStr, &OsStr) -> Result<(), std::io::Error>, |
501 | 41 | ) -> Result<(), Error> { |
502 | 41 | let file_infos = read_files(None, shared_context).await?0 ; |
503 | | |
504 | 41 | let from_path = shared_context.content_path.to_string(); |
505 | 41 | |
506 | 41 | let to_path = format!("{}/{DIGEST_FOLDER}", shared_context.content_path); |
507 | | |
508 | 82 | for (file_name0 , _, _, _) in file_infos.into_iter().filter41 (|x| x.3) { |
509 | 0 | let from_file: OsString = format!("{from_path}/{file_name}").into(); |
510 | 0 | let to_file: OsString = format!("{to_path}/{file_name}").into(); |
511 | | |
512 | 0 | if let Err(err) = rename_fn(&from_file, &to_file) { Branch (512:20): [True: 0, False: 0]
Branch (512:20): [True: 0, False: 0]
Branch (512:20): [True: 0, False: 0]
Branch (512:20): [Folded - Ignored]
Branch (512:20): [True: 0, False: 0]
|
513 | 0 | event!( |
514 | 0 | Level::WARN, |
515 | | ?from_file, |
516 | | ?to_file, |
517 | | ?err, |
518 | 0 | "Failed to rename file", |
519 | | ); |
520 | | } else { |
521 | 0 | event!(Level::INFO, ?from_file, ?to_file, "Renamed file",); |
522 | | } |
523 | | } |
524 | 41 | Ok(()) |
525 | 41 | } |
526 | | |
527 | 82 | async fn add_files_to_cache<Fe: FileEntry>( |
528 | 82 | evicting_map: &EvictingMap<StoreKeyBorrow, Arc<Fe>, SystemTime>, |
529 | 82 | anchor_time: &SystemTime, |
530 | 82 | shared_context: &Arc<SharedContext>, |
531 | 82 | block_size: u64, |
532 | 82 | folder: &str, |
533 | 82 | ) -> Result<(), Error> { |
534 | 82 | let file_infos = read_files(Some(folder), shared_context).await?0 ; |
535 | 82 | let file_type = match folder { |
536 | 82 | STR_FOLDER => FileType::String41 , |
537 | 41 | DIGEST_FOLDER => FileType::Digest, |
538 | 0 | _ => panic!("Invalid folder type"), |
539 | | }; |
540 | | |
541 | 82 | let path_root = format!("{}/{folder}", shared_context.content_path); |
542 | | |
543 | 82 | for (file_name, atime, data_size1 , _) in file_infos.into_iter().filter(|x| x.31 ) { |
544 | 1 | let result = process_entry( |
545 | 1 | evicting_map, |
546 | 1 | &file_name, |
547 | 1 | file_type, |
548 | 1 | atime, |
549 | 1 | data_size, |
550 | 1 | block_size, |
551 | 1 | anchor_time, |
552 | 1 | shared_context, |
553 | 1 | ) |
554 | 1 | .await; |
555 | 1 | if let Err(err0 ) = result { Branch (555:20): [True: 0, False: 0]
Branch (555:20): [True: 0, False: 0]
Branch (555:20): [True: 0, False: 0]
Branch (555:20): [True: 0, False: 0]
Branch (555:20): [True: 0, False: 0]
Branch (555:20): [True: 0, False: 0]
Branch (555:20): [True: 0, False: 1]
Branch (555:20): [True: 0, False: 0]
Branch (555:20): [Folded - Ignored]
Branch (555:20): [True: 0, False: 0]
|
556 | 0 | event!( |
557 | 0 | Level::WARN, |
558 | | ?file_name, |
559 | | ?err, |
560 | 0 | "Failed to add file to eviction cache", |
561 | | ); |
562 | | // Ignore result. |
563 | 0 | drop(fs::remove_file(format!("{path_root}/{file_name}")).await); |
564 | 1 | } |
565 | | } |
566 | 82 | Ok(()) |
567 | 82 | } |
568 | | |
569 | 41 | move_old_cache(shared_context, rename_fn).await?0 ; |
570 | | |
571 | 41 | add_files_to_cache( |
572 | 41 | evicting_map, |
573 | 41 | anchor_time, |
574 | 41 | shared_context, |
575 | 41 | block_size, |
576 | 41 | DIGEST_FOLDER, |
577 | 41 | ) |
578 | 41 | .await?0 ; |
579 | | |
580 | 41 | add_files_to_cache( |
581 | 41 | evicting_map, |
582 | 41 | anchor_time, |
583 | 41 | shared_context, |
584 | 41 | block_size, |
585 | 41 | STR_FOLDER, |
586 | 41 | ) |
587 | 41 | .await?0 ; |
588 | 41 | Ok(()) |
589 | 41 | } |
590 | | |
591 | 41 | async fn prune_temp_path(temp_path: &str) -> Result<(), Error> { |
592 | 82 | async fn prune_temp_inner(temp_path: &str, subpath: &str) -> Result<(), Error> { |
593 | 82 | let (_permit, dir_handle) = fs::read_dir(format!("{temp_path}/{subpath}")) |
594 | 82 | .await |
595 | 82 | .err_tip( |
596 | 0 | || "Failed opening temp directory to prune partial downloads in filesystem store", |
597 | 0 | )? |
598 | 82 | .into_inner(); |
599 | 82 | |
600 | 82 | let mut read_dir_stream = ReadDirStream::new(dir_handle); |
601 | 82 | while let Some(dir_entry0 ) = read_dir_stream.next().await { Branch (601:19): [True: 0, False: 0]
Branch (601:19): [True: 0, False: 32]
Branch (601:19): [True: 0, False: 4]
Branch (601:19): [Folded - Ignored]
Branch (601:19): [True: 0, False: 46]
|
602 | 0 | let path = dir_entry?.path(); |
603 | 0 | if let Err(err) = fs::remove_file(&path).await { Branch (603:20): [True: 0, False: 0]
Branch (603:20): [True: 0, False: 0]
Branch (603:20): [True: 0, False: 0]
Branch (603:20): [Folded - Ignored]
Branch (603:20): [True: 0, False: 0]
|
604 | 0 | event!(Level::WARN, ?path, ?err, "Failed to delete file",); |
605 | 0 | } |
606 | | } |
607 | 82 | Ok(()) |
608 | 82 | } |
609 | | |
610 | 41 | prune_temp_inner(temp_path, STR_FOLDER).await?0 ; |
611 | 41 | prune_temp_inner(temp_path, DIGEST_FOLDER).await?0 ; |
612 | 41 | Ok(()) |
613 | 41 | } |
614 | | |
615 | | #[derive(Debug, MetricsComponent)] |
616 | | pub struct FilesystemStore<Fe: FileEntry = FileEntryImpl> { |
617 | | #[metric] |
618 | | shared_context: Arc<SharedContext>, |
619 | | #[metric(group = "evicting_map")] |
620 | | evicting_map: Arc<EvictingMap<StoreKeyBorrow, Arc<Fe>, SystemTime>>, |
621 | | #[metric(help = "Block size of the configured filesystem")] |
622 | | block_size: u64, |
623 | | #[metric(help = "Size of the configured read buffer size")] |
624 | | read_buffer_size: usize, |
625 | | weak_self: Weak<Self>, |
626 | | rename_fn: fn(&OsStr, &OsStr) -> Result<(), std::io::Error>, |
627 | | } |
628 | | |
629 | | impl<Fe: FileEntry> FilesystemStore<Fe> { |
630 | 34 | pub async fn new(spec: &FilesystemSpec) -> Result<Arc<Self>, Error> { |
631 | 107 | Self::new_with_timeout_and_rename_fn(spec, |from, to| std::fs::rename(from, to)).await34 |
632 | 34 | } |
633 | | |
634 | 41 | pub async fn new_with_timeout_and_rename_fn( |
635 | 41 | spec: &FilesystemSpec, |
636 | 41 | rename_fn: fn(&OsStr, &OsStr) -> Result<(), std::io::Error>, |
637 | 41 | ) -> Result<Arc<Self>, Error> { |
638 | 82 | async fn create_subdirs(path: &str) -> Result<(), Error> { |
639 | 82 | fs::create_dir_all(format!("{path}/{STR_FOLDER}")) |
640 | 82 | .await |
641 | 82 | .err_tip(|| format!("Failed to create directory {path}/{STR_FOLDER}")0 )?0 ; |
642 | 82 | fs::create_dir_all(format!("{path}/{DIGEST_FOLDER}")) |
643 | 82 | .await |
644 | 82 | .err_tip(|| format!("Failed to create directory {path}/{DIGEST_FOLDER}")0 ) |
645 | 82 | } |
646 | | |
647 | 41 | let now = SystemTime::now(); |
648 | 41 | |
649 | 41 | let empty_policy = nativelink_config::stores::EvictionPolicy::default(); |
650 | 41 | let eviction_policy = spec.eviction_policy.as_ref().unwrap_or(&empty_policy); |
651 | 41 | let evicting_map = Arc::new(EvictingMap::new(eviction_policy, now)); |
652 | 41 | |
653 | 41 | // Create temp and content directories and the s and d subdirectories. |
654 | 41 | |
655 | 41 | create_subdirs(&spec.temp_path).await?0 ; |
656 | 41 | create_subdirs(&spec.content_path).await?0 ; |
657 | | |
658 | 41 | let shared_context = Arc::new(SharedContext { |
659 | 41 | active_drop_spawns: AtomicU64::new(0), |
660 | 41 | temp_path: spec.temp_path.clone(), |
661 | 41 | content_path: spec.content_path.clone(), |
662 | 41 | }); |
663 | | |
664 | 41 | let block_size = if spec.block_size == 0 { Branch (664:29): [True: 0, False: 0]
Branch (664:29): [True: 0, False: 1]
Branch (664:29): [True: 0, False: 1]
Branch (664:29): [True: 1, False: 0]
Branch (664:29): [True: 0, False: 1]
Branch (664:29): [True: 1, False: 0]
Branch (664:29): [True: 10, False: 1]
Branch (664:29): [True: 2, False: 0]
Branch (664:29): [Folded - Ignored]
Branch (664:29): [True: 23, False: 0]
|
665 | 37 | DEFAULT_BLOCK_SIZE |
666 | | } else { |
667 | 4 | spec.block_size |
668 | | }; |
669 | 41 | add_files_to_cache( |
670 | 41 | evicting_map.as_ref(), |
671 | 41 | &now, |
672 | 41 | &shared_context, |
673 | 41 | block_size, |
674 | 41 | rename_fn, |
675 | 41 | ) |
676 | 41 | .await?0 ; |
677 | 41 | prune_temp_path(&shared_context.temp_path).await?0 ; |
678 | | |
679 | 41 | let read_buffer_size = if spec.read_buffer_size == 0 { Branch (679:35): [True: 0, False: 0]
Branch (679:35): [True: 1, False: 0]
Branch (679:35): [True: 0, False: 1]
Branch (679:35): [True: 1, False: 0]
Branch (679:35): [True: 0, False: 1]
Branch (679:35): [True: 1, False: 0]
Branch (679:35): [True: 4, False: 7]
Branch (679:35): [True: 2, False: 0]
Branch (679:35): [Folded - Ignored]
Branch (679:35): [True: 23, False: 0]
|
680 | 32 | DEFAULT_BUFF_SIZE |
681 | | } else { |
682 | 9 | spec.read_buffer_size as usize |
683 | | }; |
684 | 41 | Ok(Arc::new_cyclic(|weak_self| Self { |
685 | 41 | shared_context, |
686 | 41 | evicting_map, |
687 | 41 | block_size, |
688 | 41 | read_buffer_size, |
689 | 41 | weak_self: weak_self.clone(), |
690 | 41 | rename_fn, |
691 | 41 | })) |
692 | 41 | } |
693 | | |
694 | 24 | pub fn get_arc(&self) -> Option<Arc<Self>> { |
695 | 24 | self.weak_self.upgrade() |
696 | 24 | } |
697 | | |
698 | 9 | pub async fn get_file_entry_for_digest(&self, digest: &DigestInfo) -> Result<Arc<Fe>, Error> { |
699 | 9 | self.evicting_map |
700 | 9 | .get::<StoreKey<'static>>(&digest.into()) |
701 | 9 | .await |
702 | 9 | .ok_or_else(|| make_err!(Code::NotFound, "{digest} not found in filesystem store")0 ) |
703 | 9 | } |
704 | | |
705 | 108 | async fn update_file( |
706 | 108 | self: Pin<&Self>, |
707 | 108 | mut entry: Fe, |
708 | 108 | mut temp_file: fs::FileSlot, |
709 | 108 | final_key: StoreKey<'static>, |
710 | 108 | mut reader: DropCloserReadHalf, |
711 | 108 | ) -> Result<(), Error> { |
712 | 108 | let mut data_size = 0; |
713 | | loop { |
714 | 183 | let mut data = reader |
715 | 183 | .recv() |
716 | 183 | .await |
717 | 183 | .err_tip(|| "Failed to receive data in filesystem store"0 )?0 ; |
718 | 183 | let data_len = data.len(); |
719 | 183 | if data_len == 0 { Branch (719:16): [True: 0, False: 0]
Branch (719:16): [True: 2, False: 2]
Branch (719:16): [True: 2, False: 2]
Branch (719:16): [True: 2, False: 2]
Branch (719:16): [True: 2, False: 2]
Branch (719:16): [True: 1, False: 1]
Branch (719:16): [True: 13, False: 11]
Branch (719:16): [True: 0, False: 0]
Branch (719:16): [Folded - Ignored]
Branch (719:16): [True: 86, False: 55]
|
720 | 108 | break; // EOF. |
721 | 75 | } |
722 | 75 | temp_file |
723 | 75 | .write_all_buf(&mut data) |
724 | 75 | .await |
725 | 75 | .err_tip(|| "Failed to write data into filesystem store"0 )?0 ; |
726 | 75 | data_size += data_len as u64; |
727 | | } |
728 | | |
729 | 108 | temp_file |
730 | 108 | .as_ref() |
731 | 108 | .sync_all() |
732 | 108 | .await |
733 | 108 | .err_tip(|| "Failed to sync_data in filesystem store"0 )?0 ; |
734 | | |
735 | 108 | drop(temp_file); |
736 | 108 | |
737 | 108 | *entry.data_size_mut() = data_size; |
738 | 108 | self.emplace_file(final_key, Arc::new(entry)).await |
739 | 107 | } |
740 | | |
741 | 117 | async fn emplace_file(&self, key: StoreKey<'static>, entry: Arc<Fe>) -> Result<(), Error> { |
742 | 117 | // This sequence of events is quite ticky to understand due to the amount of triggers that |
743 | 117 | // happen, async'ness of it and the locking. So here is a breakdown of what happens: |
744 | 117 | // 1. Here will hold a write lock on any file operations of this FileEntry. |
745 | 117 | // 2. Then insert the entry into the evicting map. This may trigger an eviction of other |
746 | 117 | // entries. |
747 | 117 | // 3. Eviction triggers `unref()`, which grabs a write lock on the evicted FileEntrys |
748 | 117 | // during the rename. |
749 | 117 | // 4. It should be impossible for items to be added while eviction is happening, so there |
750 | 117 | // should not be a deadlock possability. However, it is possible for the new FileEntry |
751 | 117 | // to be evicted before the file is moved into place. Eviction of the newly inserted |
752 | 117 | // item is not possible within the `insert()` call because the write lock inside the |
753 | 117 | // eviction map. If an eviction of new item happens after `insert()` but before |
754 | 117 | // `rename()` then we get to finish our operation because the `unref()` of the new item |
755 | 117 | // will be blocked on us because we currently have the lock. |
756 | 117 | // 5. Move the file into place. Since we hold a write lock still anyone that gets our new |
757 | 117 | // FileEntry (which has not yet been placed on disk) will not be able to read the file's |
758 | 117 | // contents until we relese the lock. |
759 | 117 | let evicting_map = self.evicting_map.clone(); |
760 | 117 | let rename_fn = self.rename_fn; |
761 | 117 | |
762 | 117 | // We need to guarantee that this will get to the end even if the parent future is dropped. |
763 | 117 | // See: https://github.com/TraceMachina/nativelink/issues/495 |
764 | 117 | background_spawn!("filesystem_store_emplace_file", async move { |
765 | 117 | let mut encoded_file_path = entry.get_encoded_file_path().write().await; |
766 | 117 | let final_path = get_file_path_raw( |
767 | 117 | &PathType::Content, |
768 | 117 | encoded_file_path.shared_context.as_ref(), |
769 | 117 | &key, |
770 | 117 | ); |
771 | 117 | |
772 | 117 | evicting_map |
773 | 117 | .insert(key.borrow().into_owned().into(), entry.clone()) |
774 | 117 | .await; |
775 | | |
776 | 117 | let from_path = encoded_file_path.get_file_path(); |
777 | 117 | // Internally tokio spawns fs commands onto a blocking thread anyways. |
778 | 117 | // Since we are already on a blocking thread, we just need the `fs` wrapper to manage |
779 | 117 | // an open-file permit (ensure we don't open too many files at once). |
780 | 117 | let result = (rename_fn)(&from_path, &final_path) |
781 | 117 | .err_tip(|| format!("Failed to rename temp file to final path {final_path:?}")1 ); |
782 | | |
783 | | // In the event our move from temp file to final file fails we need to ensure we remove |
784 | | // the entry from our map. |
785 | | // Remember: At this point it is possible for another thread to have a reference to |
786 | | // `entry`, so we can't delete the file, only drop() should ever delete files. |
787 | 117 | if let Err(err1 ) = result { Branch (787:20): [True: 0, False: 0]
Branch (787:20): [True: 0, False: 2]
Branch (787:20): [True: 0, False: 2]
Branch (787:20): [True: 0, False: 2]
Branch (787:20): [True: 0, False: 2]
Branch (787:20): [True: 1, False: 0]
Branch (787:20): [True: 0, False: 15]
Branch (787:20): [True: 0, False: 0]
Branch (787:20): [Folded - Ignored]
Branch (787:20): [True: 0, False: 93]
|
788 | 1 | event!( |
789 | 1 | Level::ERROR, |
790 | | ?err, |
791 | | ?from_path, |
792 | | ?final_path, |
793 | 1 | "Failed to rename file", |
794 | | ); |
795 | | // Warning: To prevent deadlock we need to release our lock or during `remove_if()` |
796 | | // it will call `unref()`, which triggers a write-lock on `encoded_file_path`. |
797 | 1 | drop(encoded_file_path); |
798 | 1 | // It is possible that the item in our map is no longer the item we inserted, |
799 | 1 | // So, we need to conditionally remove it only if the pointers are the same. |
800 | 1 | |
801 | 1 | evicting_map |
802 | 1 | .remove_if(&key, |map_entry| Arc::<Fe>::ptr_eq(map_entry, &entry)) |
803 | 1 | .await; |
804 | 1 | return Err(err); |
805 | 116 | } |
806 | 116 | encoded_file_path.path_type = PathType::Content; |
807 | 116 | encoded_file_path.key = key; |
808 | 116 | Ok(()) |
809 | 117 | }) |
810 | 117 | .await |
811 | 116 | .err_tip(|| "Failed to create spawn in filesystem store update_file"0 )?0 |
812 | 116 | } |
813 | | } |
814 | | |
815 | | #[async_trait] |
816 | | impl<Fe: FileEntry> StoreDriver for FilesystemStore<Fe> { |
817 | | async fn has_with_results( |
818 | | self: Pin<&Self>, |
819 | | keys: &[StoreKey<'_>], |
820 | | results: &mut [Option<u64>], |
821 | 170 | ) -> Result<(), Error> { |
822 | 85 | self.evicting_map |
823 | 85 | .sizes_for_keys::<_, StoreKey<'_>, &StoreKey<'_>>( |
824 | 85 | keys.iter(), |
825 | 85 | results, |
826 | 85 | false, /* peek */ |
827 | 85 | ) |
828 | 85 | .await; |
829 | | // We need to do a special pass to ensure our zero files exist. |
830 | | // If our results failed and the result was a zero file, we need to |
831 | | // create the file by spec. |
832 | 85 | for (key, result) in keys.iter().zip(results.iter_mut()) { |
833 | 85 | if result.is_some() || !is_zero_digest(key.borrow())16 { Branch (833:16): [True: 0, False: 0]
Branch (833:36): [True: 0, False: 0]
Branch (833:16): [True: 0, False: 0]
Branch (833:36): [True: 0, False: 0]
Branch (833:16): [True: 0, False: 0]
Branch (833:36): [True: 0, False: 0]
Branch (833:16): [True: 0, False: 0]
Branch (833:36): [True: 0, False: 0]
Branch (833:16): [True: 0, False: 0]
Branch (833:36): [True: 0, False: 0]
Branch (833:16): [True: 0, False: 1]
Branch (833:36): [True: 1, False: 0]
Branch (833:16): [True: 1, False: 2]
Branch (833:36): [True: 0, False: 2]
Branch (833:16): [True: 0, False: 0]
Branch (833:36): [True: 0, False: 0]
Branch (833:16): [Folded - Ignored]
Branch (833:36): [Folded - Ignored]
Branch (833:16): [True: 68, False: 13]
Branch (833:36): [True: 13, False: 0]
|
834 | 83 | continue; |
835 | 2 | } |
836 | 2 | let (mut tx, rx) = make_buf_channel_pair(); |
837 | 2 | let send_eof_result = tx.send_eof(); |
838 | 2 | self.update(key.borrow(), rx, UploadSizeInfo::ExactSize(0)) |
839 | 2 | .await |
840 | 2 | .err_tip(|| format!("Failed to create zero file for key {}", key.as_str())0 ) |
841 | 2 | .merge( |
842 | 2 | send_eof_result |
843 | 2 | .err_tip(|| "Failed to send zero file EOF in filesystem store has"0 ), |
844 | 0 | )?; |
845 | | |
846 | 2 | *result = Some(0); |
847 | | } |
848 | 85 | Ok(()) |
849 | 170 | } |
850 | | |
851 | | async fn update( |
852 | | self: Pin<&Self>, |
853 | | key: StoreKey<'_>, |
854 | | reader: DropCloserReadHalf, |
855 | | _upload_size: UploadSizeInfo, |
856 | 216 | ) -> Result<(), Error> { |
857 | 108 | let temp_key = make_temp_key(&key); |
858 | 108 | let (entry, temp_file, temp_full_path) = Fe::make_and_open_file( |
859 | 108 | self.block_size, |
860 | 108 | EncodedFilePath { |
861 | 108 | shared_context: self.shared_context.clone(), |
862 | 108 | path_type: PathType::Temp, |
863 | 108 | key: temp_key, |
864 | 108 | }, |
865 | 108 | ) |
866 | 108 | .await?0 ; |
867 | | |
868 | 108 | self.update_file(entry, temp_file, key.into_owned(), reader) |
869 | 108 | .await |
870 | 107 | .err_tip(|| format!("While processing with temp file {temp_full_path:?}")1 ) |
871 | 215 | } |
872 | | |
873 | 84 | fn optimized_for(&self, optimization: StoreOptimizations) -> bool { |
874 | 84 | optimization == StoreOptimizations::FileUpdates |
875 | 84 | } |
876 | | |
877 | | async fn update_with_whole_file( |
878 | | self: Pin<&Self>, |
879 | | key: StoreKey<'_>, |
880 | | path: OsString, |
881 | | file: fs::FileSlot, |
882 | | upload_size: UploadSizeInfo, |
883 | 18 | ) -> Result<Option<fs::FileSlot>, Error> { |
884 | 9 | let file_size = match upload_size { |
885 | 9 | UploadSizeInfo::ExactSize(size) => size, |
886 | 0 | UploadSizeInfo::MaxSize(_) => file |
887 | 0 | .as_ref() |
888 | 0 | .metadata() |
889 | 0 | .await |
890 | 0 | .err_tip(|| format!("While reading metadata for {path:?}"))? |
891 | 0 | .len(), |
892 | | }; |
893 | 9 | let entry = Fe::create( |
894 | 9 | file_size, |
895 | 9 | self.block_size, |
896 | 9 | RwLock::new(EncodedFilePath { |
897 | 9 | shared_context: self.shared_context.clone(), |
898 | 9 | path_type: PathType::Custom(path), |
899 | 9 | key: key.borrow().into_owned(), |
900 | 9 | }), |
901 | 9 | ); |
902 | 9 | // We are done with the file, if we hold a reference to the file here, it could |
903 | 9 | // result in a deadlock if `emplace_file()` also needs file descriptors. |
904 | 9 | drop(file); |
905 | 9 | self.emplace_file(key.into_owned(), Arc::new(entry)) |
906 | 9 | .await |
907 | 9 | .err_tip(|| "Could not move file into store in upload_file_to_store, maybe dest is on different volume?"0 )?0 ; |
908 | 9 | return Ok(None); |
909 | 18 | } |
910 | | |
911 | | async fn get_part( |
912 | | self: Pin<&Self>, |
913 | | key: StoreKey<'_>, |
914 | | writer: &mut DropCloserWriteHalf, |
915 | | offset: u64, |
916 | | length: Option<u64>, |
917 | 120 | ) -> Result<(), Error> { |
918 | 60 | if is_zero_digest(key.borrow()) { Branch (918:12): [True: 0, False: 0]
Branch (918:12): [True: 0, False: 0]
Branch (918:12): [True: 0, False: 1]
Branch (918:12): [True: 0, False: 0]
Branch (918:12): [True: 0, False: 1]
Branch (918:12): [True: 0, False: 0]
Branch (918:12): [True: 1, False: 4]
Branch (918:12): [True: 0, False: 0]
Branch (918:12): [Folded - Ignored]
Branch (918:12): [True: 15, False: 38]
|
919 | 16 | self.has(key.borrow()) |
920 | 16 | .await |
921 | 16 | .err_tip(|| "Failed to check if zero digest exists in filesystem store"0 )?0 ; |
922 | 16 | writer |
923 | 16 | .send_eof() |
924 | 16 | .err_tip(|| "Failed to send zero EOF in filesystem store get_part"0 )?0 ; |
925 | 16 | return Ok(()); |
926 | 44 | } |
927 | 44 | let entry = self.evicting_map.get(&key).await.ok_or_else(|| { |
928 | 0 | make_err!( |
929 | 0 | Code::NotFound, |
930 | 0 | "{} not found in filesystem store here", |
931 | 0 | key.as_str() |
932 | 0 | ) |
933 | 0 | })?; |
934 | 44 | let read_limit = length.unwrap_or(u64::MAX); |
935 | 44 | let mut temp_file42 = entry.read_file_part(offset, read_limit).or_else(|err| async move { |
936 | 2 | // If the file is not found, we need to remove it from the eviction map. |
937 | 2 | if err.code == Code::NotFound { Branch (937:16): [True: 0, False: 0]
Branch (937:16): [True: 0, False: 0]
Branch (937:16): [True: 0, False: 0]
Branch (937:16): [True: 0, False: 0]
Branch (937:16): [True: 0, False: 0]
Branch (937:16): [True: 0, False: 0]
Branch (937:16): [True: 2, False: 0]
Branch (937:16): [True: 0, False: 0]
Branch (937:16): [Folded - Ignored]
Branch (937:16): [True: 0, False: 0]
|
938 | 2 | event!( |
939 | 2 | Level::ERROR, |
940 | | ?err, |
941 | | ?key, |
942 | 2 | "Entry was in our map, but not found on disk. Removing from map as a precaution, but process probably need restarted." |
943 | | ); |
944 | 2 | self.evicting_map.remove(&key).await; |
945 | 0 | } |
946 | 2 | Err(err) |
947 | 44 | }2 ).await?2 ; |
948 | | |
949 | | loop { |
950 | 1.12k | let mut buf = BytesMut::with_capacity(self.read_buffer_size); |
951 | 1.12k | temp_file |
952 | 1.12k | .read_buf(&mut buf) |
953 | 1.12k | .await |
954 | 1.12k | .err_tip(|| "Failed to read data in filesystem store"0 )?0 ; |
955 | 1.12k | if buf.is_empty() { Branch (955:16): [True: 0, False: 0]
Branch (955:16): [True: 0, False: 0]
Branch (955:16): [True: 1, False: 10]
Branch (955:16): [True: 0, False: 0]
Branch (955:16): [True: 1, False: 10]
Branch (955:16): [True: 0, False: 0]
Branch (955:16): [True: 1, False: 1.02k]
Branch (955:16): [True: 0, False: 0]
Branch (955:16): [Folded - Ignored]
Branch (955:16): [True: 38, False: 38]
|
956 | 41 | break; // EOF. |
957 | 1.08k | } |
958 | 1.08k | writer |
959 | 1.08k | .send(buf.freeze()) |
960 | 1.08k | .await |
961 | 1.08k | .err_tip(|| "Failed to send chunk in filesystem store get_part"0 )?0 ; |
962 | | } |
963 | 41 | writer |
964 | 41 | .send_eof() |
965 | 41 | .err_tip(|| "Filed to send EOF in filesystem store get_part"0 )?0 ; |
966 | | |
967 | 41 | Ok(()) |
968 | 119 | } |
969 | | |
970 | 101 | fn inner_store(&self, _digest: Option<StoreKey>) -> &dyn StoreDriver { |
971 | 101 | self |
972 | 101 | } |
973 | | |
974 | 24 | fn as_any<'a>(&'a self) -> &'a (dyn std::any::Any + Sync + Send + 'static) { |
975 | 24 | self |
976 | 24 | } |
977 | | |
978 | 0 | fn as_any_arc(self: Arc<Self>) -> Arc<dyn std::any::Any + Sync + Send + 'static> { |
979 | 0 | self |
980 | 0 | } |
981 | | |
982 | 0 | fn register_health(self: Arc<Self>, registry: &mut HealthRegistryBuilder) { |
983 | 0 | registry.register_indicator(self); |
984 | 0 | } |
985 | | } |
986 | | |
987 | | #[async_trait] |
988 | | impl<Fe: FileEntry> HealthStatusIndicator for FilesystemStore<Fe> { |
989 | 0 | fn get_name(&self) -> &'static str { |
990 | 0 | "FilesystemStore" |
991 | 0 | } |
992 | | |
993 | 0 | async fn check_health(&self, namespace: Cow<'static, str>) -> HealthStatus { |
994 | 0 | StoreDriver::check_health(Pin::new(self), namespace).await |
995 | 0 | } |
996 | | } |