/build/source/nativelink-store/src/fast_slow_store.rs
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use std::borrow::BorrowMut; |
16 | | use std::cmp::{max, min}; |
17 | | use std::ops::Range; |
18 | | use std::pin::Pin; |
19 | | use std::sync::atomic::{AtomicU64, Ordering}; |
20 | | use std::sync::{Arc, Weak}; |
21 | | |
22 | | use async_trait::async_trait; |
23 | | use futures::{join, FutureExt}; |
24 | | use nativelink_config::stores::FastSlowSpec; |
25 | | use nativelink_error::{make_err, Code, Error, ResultExt}; |
26 | | use nativelink_metric::MetricsComponent; |
27 | | use nativelink_util::buf_channel::{ |
28 | | make_buf_channel_pair, DropCloserReadHalf, DropCloserWriteHalf, |
29 | | }; |
30 | | use nativelink_util::fs; |
31 | | use nativelink_util::health_utils::{default_health_status_indicator, HealthStatusIndicator}; |
32 | | use nativelink_util::store_trait::{ |
33 | | slow_update_store_with_file, Store, StoreDriver, StoreKey, StoreLike, StoreOptimizations, |
34 | | UploadSizeInfo, |
35 | | }; |
36 | | |
37 | | // TODO(blaise.bruer) This store needs to be evaluated for more efficient memory usage, |
38 | | // there are many copies happening internally. |
39 | | |
40 | | // TODO(blaise.bruer) We should consider copying the data in the background to allow the |
41 | | // client to hang up while the data is buffered. An alternative is to possibly make a |
42 | | // "BufferedStore" that could be placed on the "slow" store that would hang up early |
43 | | // if data is in the buffer. |
44 | 0 | #[derive(MetricsComponent)] |
45 | | pub struct FastSlowStore { |
46 | | #[metric(group = "fast_store")] |
47 | | fast_store: Store, |
48 | | #[metric(group = "slow_store")] |
49 | | slow_store: Store, |
50 | | weak_self: Weak<Self>, |
51 | | #[metric] |
52 | | metrics: FastSlowStoreMetrics, |
53 | | } |
54 | | |
55 | | impl FastSlowStore { |
56 | 25 | pub fn new(_spec: &FastSlowSpec, fast_store: Store, slow_store: Store) -> Arc<Self> { |
57 | 25 | Arc::new_cyclic(|weak_self| Self { |
58 | 25 | fast_store, |
59 | 25 | slow_store, |
60 | 25 | weak_self: weak_self.clone(), |
61 | 25 | metrics: FastSlowStoreMetrics::default(), |
62 | 25 | }) |
63 | 25 | } |
64 | | |
65 | 15 | pub fn fast_store(&self) -> &Store { |
66 | 15 | &self.fast_store |
67 | 15 | } |
68 | | |
69 | 0 | pub fn slow_store(&self) -> &Store { |
70 | 0 | &self.slow_store |
71 | 0 | } |
72 | | |
73 | 2 | pub fn get_arc(&self) -> Option<Arc<Self>> { |
74 | 2 | self.weak_self.upgrade() |
75 | 2 | } |
76 | | |
77 | | /// Ensure our fast store is populated. This should be kept as a low |
78 | | /// cost function. Since the data itself is shared and not copied it should be fairly |
79 | | /// low cost to just discard the data, but does cost a few mutex locks while |
80 | | /// streaming. |
81 | 4 | pub async fn populate_fast_store(&self, key: StoreKey<'_>) -> Result<(), Error> 0 { |
82 | 4 | let maybe_size_info = self |
83 | 4 | .fast_store |
84 | 4 | .has(key.borrow()) |
85 | 0 | .await |
86 | 4 | .err_tip(|| "While querying in populate_fast_store"0 )?0 ; |
87 | 4 | if maybe_size_info.is_some() { Branch (87:12): [Folded - Ignored]
Branch (87:12): [True: 0, False: 4]
Branch (87:12): [Folded - Ignored]
|
88 | 0 | return Ok(()); |
89 | 4 | } |
90 | 4 | // TODO(blaise.bruer) This is extremely inefficient, since we are just trying |
91 | 4 | // to send the stream to /dev/null. Maybe we could instead make a version of |
92 | 4 | // the stream that can send to the drain more efficiently? |
93 | 4 | let (tx, mut rx) = make_buf_channel_pair(); |
94 | 4 | let drain_fut = async move { |
95 | 26 | while !rx.recv()8 .await?0 .is_empty()8 {}4 Branch (95:19): [Folded - Ignored]
Branch (95:19): [True: 4, False: 4]
Branch (95:19): [Folded - Ignored]
|
96 | 4 | Ok(()) |
97 | 4 | }; |
98 | 4 | let (drain_res, get_res) = join!(drain_fut, StoreDriver::get(Pin::new(self), key, tx)); |
99 | 4 | get_res.err_tip(|| "Failed to populate()"0 ).merge(drain_res) |
100 | 4 | } |
101 | | |
102 | | /// Returns the range of bytes that should be sent given a slice bounds |
103 | | /// offset so the output range maps the received_range.start to 0. |
104 | | // TODO(allada) This should be put into utils, as this logic is used |
105 | | // elsewhere in the code. |
106 | 21 | pub fn calculate_range( |
107 | 21 | received_range: &Range<u64>, |
108 | 21 | send_range: &Range<u64>, |
109 | 21 | ) -> Result<Option<Range<usize>>, Error> { |
110 | 21 | // Protect against subtraction overflow. |
111 | 21 | if received_range.start >= received_range.end { Branch (111:12): [True: 0, False: 21]
Branch (111:12): [Folded - Ignored]
|
112 | 0 | return Ok(None); |
113 | 21 | } |
114 | 21 | |
115 | 21 | let start = max(received_range.start, send_range.start); |
116 | 21 | let end = min(received_range.end, send_range.end); |
117 | 21 | if received_range.contains(&start) && received_range.contains(&(end - 1))19 { Branch (117:12): [True: 19, False: 2]
Branch (117:47): [True: 17, False: 2]
Branch (117:12): [Folded - Ignored]
Branch (117:47): [Folded - Ignored]
|
118 | | // Offset both to the start of the received_range. |
119 | 17 | let calculated_range_start = usize::try_from(start - received_range.start) |
120 | 17 | .err_tip(|| "Could not convert (start - received_range.start) to usize"0 )?0 ; |
121 | 17 | let calculated_range_end = usize::try_from(end - received_range.start) |
122 | 17 | .err_tip(|| "Could not convert (end - received_range.start) to usize"0 )?0 ; |
123 | 17 | Ok(Some(calculated_range_start..calculated_range_end)) |
124 | | } else { |
125 | 4 | Ok(None) |
126 | | } |
127 | 21 | } |
128 | | } |
129 | | |
130 | | #[async_trait] |
131 | | impl StoreDriver for FastSlowStore { |
132 | | async fn has_with_results( |
133 | | self: Pin<&Self>, |
134 | | key: &[StoreKey<'_>], |
135 | | results: &mut [Option<u64>], |
136 | 5 | ) -> Result<(), Error> { |
137 | | // If our slow store is a noop store, it'll always return a 404, |
138 | | // so only check the fast store in such case. |
139 | 5 | let slow_store = self.slow_store.inner_store::<StoreKey<'_>>(None); |
140 | 5 | if slow_store.optimized_for(StoreOptimizations::NoopDownloads) { Branch (140:12): [True: 2, False: 3]
Branch (140:12): [Folded - Ignored]
|
141 | 2 | return self.fast_store.has_with_results(key, results).await0 ; |
142 | 3 | } |
143 | 3 | // Only check the slow store because if it's not there, then something |
144 | 3 | // down stream might be unable to get it. This should not affect |
145 | 3 | // workers as they only use get() and a CAS can use an |
146 | 3 | // ExistenceCacheStore to avoid the bottleneck. |
147 | 3 | self.slow_store.has_with_results(key, results).await0 |
148 | 10 | } |
149 | | |
150 | | async fn update( |
151 | | self: Pin<&Self>, |
152 | | key: StoreKey<'_>, |
153 | | mut reader: DropCloserReadHalf, |
154 | | size_info: UploadSizeInfo, |
155 | 40 | ) -> Result<(), Error> { |
156 | | // If either one of our stores is a noop store, bypass the multiplexing |
157 | | // and just use the store that is not a noop store. |
158 | 40 | let slow_store = self.slow_store.inner_store(Some(key.borrow())); |
159 | 40 | if slow_store.optimized_for(StoreOptimizations::NoopUpdates) { Branch (159:12): [True: 0, False: 40]
Branch (159:12): [Folded - Ignored]
|
160 | 0 | return self.fast_store.update(key, reader, size_info).await; |
161 | 40 | } |
162 | 40 | let fast_store = self.fast_store.inner_store(Some(key.borrow())); |
163 | 40 | if fast_store.optimized_for(StoreOptimizations::NoopUpdates) { Branch (163:12): [True: 0, False: 40]
Branch (163:12): [Folded - Ignored]
|
164 | 0 | return self.slow_store.update(key, reader, size_info).await; |
165 | 40 | } |
166 | 40 | |
167 | 40 | let (mut fast_tx, fast_rx) = make_buf_channel_pair(); |
168 | 40 | let (mut slow_tx, slow_rx) = make_buf_channel_pair(); |
169 | 40 | |
170 | 40 | let data_stream_fut = async move { |
171 | | loop { |
172 | 67 | let buffer = reader |
173 | 67 | .recv() |
174 | 0 | .await |
175 | 67 | .err_tip(|| "Failed to read buffer in fastslow store"0 )?0 ; |
176 | 67 | if buffer.is_empty() { Branch (176:20): [True: 40, False: 27]
Branch (176:20): [Folded - Ignored]
|
177 | | // EOF received. |
178 | 40 | fast_tx.send_eof().err_tip(|| { |
179 | 0 | "Failed to write eof to fast store in fast_slow store update" |
180 | 40 | })?0 ; |
181 | 40 | slow_tx |
182 | 40 | .send_eof() |
183 | 40 | .err_tip(|| "Failed to write eof to writer in fast_slow store update"0 )?0 ; |
184 | 40 | return Result::<(), Error>::Ok(()); |
185 | 27 | } |
186 | | |
187 | 27 | let (fast_result, slow_result) = |
188 | 27 | join!(fast_tx.send(buffer.clone()), slow_tx.send(buffer)); |
189 | 27 | fast_result |
190 | 27 | .map_err(|e| { |
191 | 0 | make_err!( |
192 | 0 | Code::Internal, |
193 | 0 | "Failed to send message to fast_store in fast_slow_store {:?}", |
194 | 0 | e |
195 | 0 | ) |
196 | 27 | }) |
197 | 27 | .merge(slow_result.map_err(|e| { |
198 | 0 | make_err!( |
199 | 0 | Code::Internal, |
200 | 0 | "Failed to send message to slow_store in fast_slow store {:?}", |
201 | 0 | e |
202 | 0 | ) |
203 | 27 | }))?0 ; |
204 | | } |
205 | 40 | }; |
206 | | |
207 | 40 | let fast_store_fut = self.fast_store.update(key.borrow(), fast_rx, size_info); |
208 | 40 | let slow_store_fut = self.slow_store.update(key.borrow(), slow_rx, size_info); |
209 | | |
210 | 40 | let (data_stream_res, fast_res, slow_res) = |
211 | 40 | join!(data_stream_fut, fast_store_fut, slow_store_fut); |
212 | 40 | data_stream_res.merge(fast_res).merge(slow_res)?0 ; |
213 | 40 | Ok(()) |
214 | 80 | } |
215 | | |
216 | | /// FastSlowStore has optimiations for dealing with files. |
217 | 0 | fn optimized_for(&self, optimization: StoreOptimizations) -> bool { |
218 | 0 | optimization == StoreOptimizations::FileUpdates |
219 | 0 | } |
220 | | |
221 | | /// Optimized variation to consume the file if one of the stores is a |
222 | | /// filesystem store. This makes the operation a move instead of a copy |
223 | | /// dramatically increasing performance for large files. |
224 | | async fn update_with_whole_file( |
225 | | self: Pin<&Self>, |
226 | | key: StoreKey<'_>, |
227 | | mut file: fs::ResumeableFileSlot, |
228 | | upload_size: UploadSizeInfo, |
229 | 3 | ) -> Result<Option<fs::ResumeableFileSlot>, Error> { |
230 | 3 | if self Branch (230:12): [True: 3, False: 0]
Branch (230:12): [Folded - Ignored]
|
231 | 3 | .fast_store |
232 | 3 | .optimized_for(StoreOptimizations::FileUpdates) |
233 | | { |
234 | 3 | if !self Branch (234:16): [True: 3, False: 0]
Branch (234:16): [Folded - Ignored]
|
235 | 3 | .slow_store |
236 | 3 | .optimized_for(StoreOptimizations::NoopUpdates) |
237 | | { |
238 | 3 | slow_update_store_with_file( |
239 | 3 | self.slow_store.as_store_driver_pin(), |
240 | 3 | key.borrow(), |
241 | 3 | &mut file, |
242 | 3 | upload_size, |
243 | 3 | ) |
244 | 18 | .await |
245 | 3 | .err_tip(|| "In FastSlowStore::update_with_whole_file slow_store"0 )?0 ; |
246 | 0 | } |
247 | 3 | return self |
248 | 3 | .fast_store |
249 | 3 | .update_with_whole_file(key, file, upload_size) |
250 | 3 | .await; |
251 | 0 | } |
252 | 0 |
|
253 | 0 | if self Branch (253:12): [True: 0, False: 0]
Branch (253:12): [Folded - Ignored]
|
254 | 0 | .slow_store |
255 | 0 | .optimized_for(StoreOptimizations::FileUpdates) |
256 | | { |
257 | 0 | if !self Branch (257:16): [True: 0, False: 0]
Branch (257:16): [Folded - Ignored]
|
258 | 0 | .fast_store |
259 | 0 | .optimized_for(StoreOptimizations::NoopUpdates) |
260 | | { |
261 | 0 | slow_update_store_with_file( |
262 | 0 | self.fast_store.as_store_driver_pin(), |
263 | 0 | key.borrow(), |
264 | 0 | &mut file, |
265 | 0 | upload_size, |
266 | 0 | ) |
267 | 0 | .await |
268 | 0 | .err_tip(|| "In FastSlowStore::update_with_whole_file fast_store")?; |
269 | 0 | } |
270 | 0 | return self |
271 | 0 | .slow_store |
272 | 0 | .update_with_whole_file(key, file, upload_size) |
273 | 0 | .await; |
274 | 0 | } |
275 | 0 |
|
276 | 0 | slow_update_store_with_file(self, key, &mut file, upload_size) |
277 | 0 | .await |
278 | 0 | .err_tip(|| "In FastSlowStore::update_with_whole_file")?; |
279 | 0 | Ok(Some(file)) |
280 | 6 | } |
281 | | |
282 | | async fn get_part( |
283 | | self: Pin<&Self>, |
284 | | key: StoreKey<'_>, |
285 | | writer: &mut DropCloserWriteHalf, |
286 | | offset: u64, |
287 | | length: Option<u64>, |
288 | 41 | ) -> Result<(), Error> { |
289 | | // TODO(blaise.bruer) Investigate if we should maybe ignore errors here instead of |
290 | | // forwarding the up. |
291 | 47 | if self.fast_store.has(key.borrow())41 .await?0 .is_some()41 { Branch (291:12): [True: 29, False: 12]
Branch (291:12): [Folded - Ignored]
|
292 | 29 | self.metrics |
293 | 29 | .fast_store_hit_count |
294 | 29 | .fetch_add(1, Ordering::Acquire); |
295 | 29 | self.fast_store |
296 | 29 | .get_part(key, writer.borrow_mut(), offset, length) |
297 | 138 | .await?0 ; |
298 | 29 | self.metrics |
299 | 29 | .fast_store_downloaded_bytes |
300 | 29 | .fetch_add(writer.get_bytes_written(), Ordering::Acquire); |
301 | 29 | return Ok(()); |
302 | 12 | } |
303 | | |
304 | 12 | let sz = self |
305 | 12 | .slow_store |
306 | 12 | .has(key.borrow()) |
307 | 0 | .await |
308 | 12 | .err_tip(|| "Failed to run has() on slow store"0 )?0 |
309 | 12 | .ok_or_else(|| { |
310 | 0 | make_err!( |
311 | 0 | Code::NotFound, |
312 | 0 | "Object {} not found in either fast or slow store", |
313 | 0 | key.as_str() |
314 | 0 | ) |
315 | 12 | })?0 ; |
316 | 12 | self.metrics |
317 | 12 | .slow_store_hit_count |
318 | 12 | .fetch_add(1, Ordering::Acquire); |
319 | 12 | |
320 | 12 | let send_range = offset..length.map_or(u64::MAX, |length| length + offset7 ); |
321 | 12 | let mut bytes_received: u64 = 0; |
322 | 12 | |
323 | 12 | let (mut fast_tx, fast_rx) = make_buf_channel_pair(); |
324 | 12 | let (slow_tx, mut slow_rx) = make_buf_channel_pair(); |
325 | 12 | let data_stream_fut = async move { |
326 | 12 | let mut writer_pin = Pin::new(writer); |
327 | | loop { |
328 | 23 | let output_buf = slow_rx |
329 | 23 | .recv() |
330 | 12 | .await |
331 | 23 | .err_tip(|| "Failed to read data data buffer from slow store"0 )?0 ; |
332 | 23 | if output_buf.is_empty() { Branch (332:20): [True: 12, False: 11]
Branch (332:20): [Folded - Ignored]
|
333 | | // Write out our EOF. |
334 | | // We are dropped as soon as we send_eof to writer_pin, so |
335 | | // we wait until we've finished all of our joins to do that. |
336 | 12 | let fast_res = fast_tx.send_eof(); |
337 | 12 | return Ok::<_, Error>((fast_res, writer_pin)); |
338 | 11 | } |
339 | 11 | let output_buf_len = u64::try_from(output_buf.len()) |
340 | 11 | .err_tip(|| "Could not output_buf.len() to u64"0 )?0 ; |
341 | 11 | self.metrics |
342 | 11 | .slow_store_downloaded_bytes |
343 | 11 | .fetch_add(output_buf_len, Ordering::Acquire); |
344 | | |
345 | 11 | let writer_fut = if let Some(range) = Self::calculate_range( Branch (345:41): [True: 11, False: 0]
Branch (345:41): [Folded - Ignored]
|
346 | 11 | &(bytes_received..bytes_received + output_buf_len), |
347 | 11 | &send_range, |
348 | 11 | )?0 { |
349 | 11 | writer_pin.send(output_buf.slice(range)).right_future() |
350 | | } else { |
351 | 0 | futures::future::ready(Ok(())).left_future() |
352 | | }; |
353 | 11 | bytes_received += output_buf_len; |
354 | | |
355 | 11 | let (fast_tx_res, writer_res) = join!(fast_tx.send(output_buf), writer_fut); |
356 | 11 | fast_tx_res.err_tip(|| "Failed to write to fast store in fast_slow store"0 )?0 ; |
357 | 11 | writer_res.err_tip(|| "Failed to write result to writer in fast_slow store"0 )?0 ; |
358 | | } |
359 | 12 | }; |
360 | | |
361 | 12 | let slow_store_fut = self.slow_store.get(key.borrow(), slow_tx); |
362 | 12 | let fast_store_fut = |
363 | 12 | self.fast_store |
364 | 12 | .update(key.borrow(), fast_rx, UploadSizeInfo::ExactSize(sz)); |
365 | | |
366 | 12 | let (data_stream_res, slow_res, fast_res) = |
367 | 12 | join!(data_stream_fut, slow_store_fut, fast_store_fut); |
368 | 12 | match data_stream_res { |
369 | 12 | Ok((fast_eof_res, mut writer_pin)) => |
370 | 12 | // Sending the EOF will drop us almost immediately in bytestream_server |
371 | 12 | // so we perform it as the very last action in this method. |
372 | 12 | { |
373 | 12 | fast_eof_res |
374 | 12 | .merge(fast_res) |
375 | 12 | .merge(slow_res) |
376 | 12 | .merge(writer_pin.send_eof()) |
377 | | } |
378 | 0 | Err(err) => fast_res.merge(slow_res).merge(Err(err)), |
379 | | } |
380 | 82 | } |
381 | | |
382 | 2 | fn inner_store(&self, _key: Option<StoreKey>) -> &dyn StoreDriver { |
383 | 2 | self |
384 | 2 | } |
385 | | |
386 | 2 | fn as_any<'a>(&'a self) -> &'a (dyn std::any::Any + Sync + Send + 'static) { |
387 | 2 | self |
388 | 2 | } |
389 | | |
390 | 0 | fn as_any_arc(self: Arc<Self>) -> Arc<dyn std::any::Any + Sync + Send + 'static> { |
391 | 0 | self |
392 | 0 | } |
393 | | } |
394 | | |
395 | 0 | #[derive(Default, MetricsComponent)] |
396 | | struct FastSlowStoreMetrics { |
397 | | #[metric(help = "Hit count for the fast store")] |
398 | | fast_store_hit_count: AtomicU64, |
399 | | #[metric(help = "Downloaded bytes from the fast store")] |
400 | | fast_store_downloaded_bytes: AtomicU64, |
401 | | #[metric(help = "Hit count for the slow store")] |
402 | | slow_store_hit_count: AtomicU64, |
403 | | #[metric(help = "Downloaded bytes from the slow store")] |
404 | | slow_store_downloaded_bytes: AtomicU64, |
405 | | } |
406 | | |
407 | | default_health_status_indicator!(FastSlowStore); |