/build/source/nativelink-store/src/s3_store.rs
Line | Count | Source |
1 | | // Copyright 2024 The NativeLink Authors. All rights reserved. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | | // you may not use this file except in compliance with the License. |
5 | | // You may obtain a copy of the License at |
6 | | // |
7 | | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | | // |
9 | | // Unless required by applicable law or agreed to in writing, software |
10 | | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | | // See the License for the specific language governing permissions and |
13 | | // limitations under the License. |
14 | | |
15 | | use core::cmp; |
16 | | use core::future::Future; |
17 | | use core::pin::Pin; |
18 | | use core::task::{Context, Poll}; |
19 | | use core::time::Duration; |
20 | | use std::borrow::Cow; |
21 | | use std::sync::Arc; |
22 | | |
23 | | use async_trait::async_trait; |
24 | | use aws_config::default_provider::credentials; |
25 | | use aws_config::provider_config::ProviderConfig; |
26 | | use aws_config::retry::ErrorKind::TransientError; |
27 | | use aws_config::{AppName, BehaviorVersion}; |
28 | | use aws_sdk_s3::Client; |
29 | | use aws_sdk_s3::config::Region; |
30 | | use aws_sdk_s3::operation::create_multipart_upload::CreateMultipartUploadOutput; |
31 | | use aws_sdk_s3::operation::get_object::GetObjectError; |
32 | | use aws_sdk_s3::operation::head_object::HeadObjectError; |
33 | | use aws_sdk_s3::primitives::ByteStream; // SdkBody |
34 | | use aws_sdk_s3::types::builders::{CompletedMultipartUploadBuilder, CompletedPartBuilder}; |
35 | | use aws_smithy_runtime_api::client::http::{ |
36 | | HttpClient as SmithyHttpClient, HttpConnector as SmithyHttpConnector, HttpConnectorFuture, |
37 | | HttpConnectorSettings, SharedHttpConnector, |
38 | | }; |
39 | | use aws_smithy_runtime_api::client::orchestrator::HttpRequest; |
40 | | use aws_smithy_runtime_api::client::result::ConnectorError; |
41 | | use aws_smithy_runtime_api::client::runtime_components::RuntimeComponents; |
42 | | use aws_smithy_runtime_api::http::Response; |
43 | | use aws_smithy_types::body::SdkBody; |
44 | | use bytes::{Bytes, BytesMut}; |
45 | | use futures::future::FusedFuture; |
46 | | use futures::stream::{FuturesUnordered, unfold}; |
47 | | use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; |
48 | | use http_body::{Frame, SizeHint}; |
49 | | use http_body_util::BodyExt; |
50 | | use hyper::{Method, Request}; |
51 | | use hyper_rustls::{HttpsConnector, HttpsConnectorBuilder}; |
52 | | use hyper_util::client::legacy::Client as LegacyClient; |
53 | | use hyper_util::client::legacy::connect::HttpConnector as LegacyHttpConnector; |
54 | | use hyper_util::rt::TokioExecutor; |
55 | | use nativelink_config::stores::ExperimentalAwsSpec; |
56 | | // Note: S3 store should be very careful about the error codes it returns |
57 | | // when in a retryable wrapper. Always prefer Code::Aborted or another |
58 | | // retryable code over Code::InvalidArgument or make_input_err!(). |
59 | | // ie: Don't import make_input_err!() to help prevent this. |
60 | | use nativelink_error::{Code, Error, ResultExt, make_err}; |
61 | | use nativelink_metric::MetricsComponent; |
62 | | use nativelink_util::buf_channel::{ |
63 | | DropCloserReadHalf, DropCloserWriteHalf, make_buf_channel_pair, |
64 | | }; |
65 | | use nativelink_util::fs; |
66 | | use nativelink_util::health_utils::{HealthRegistryBuilder, HealthStatus, HealthStatusIndicator}; |
67 | | use nativelink_util::instant_wrapper::InstantWrapper; |
68 | | use nativelink_util::retry::{Retrier, RetryResult}; |
69 | | use nativelink_util::store_trait::{StoreDriver, StoreKey, UploadSizeInfo}; |
70 | | use rand::Rng; |
71 | | use tokio::sync::mpsc; |
72 | | use tokio::time::sleep; |
73 | | use tracing::{error, info}; |
74 | | |
75 | | use crate::cas_utils::is_zero_digest; |
76 | | |
77 | | // S3 parts cannot be smaller than this number. See: |
78 | | // https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html |
79 | | const MIN_MULTIPART_SIZE: u64 = 5 * 1024 * 1024; // 5MB. |
80 | | |
81 | | // S3 parts cannot be larger than this number. See: |
82 | | // https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html |
83 | | const MAX_MULTIPART_SIZE: u64 = 5 * 1024 * 1024 * 1024; // 5GB. |
84 | | |
85 | | // S3 parts cannot be more than this number. See: |
86 | | // https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html |
87 | | const MAX_UPLOAD_PARTS: usize = 10_000; |
88 | | |
89 | | // Default max buffer size for retrying upload requests. |
90 | | // Note: If you change this, adjust the docs in the config. |
91 | | const DEFAULT_MAX_RETRY_BUFFER_PER_REQUEST: usize = 5 * 1024 * 1024; // 5MB. |
92 | | |
93 | | // Default limit for concurrent part uploads per multipart upload. |
94 | | // Note: If you change this, adjust the docs in the config. |
95 | | const DEFAULT_MULTIPART_MAX_CONCURRENT_UPLOADS: usize = 10; |
96 | | |
97 | | #[derive(Clone)] |
98 | | pub struct TlsClient { |
99 | | client: LegacyClient<HttpsConnector<LegacyHttpConnector>, SdkBody>, |
100 | | retrier: Retrier, |
101 | | } |
102 | | |
103 | | impl TlsClient { |
104 | | #[must_use] |
105 | 0 | pub fn new( |
106 | 0 | spec: &ExperimentalAwsSpec, |
107 | 0 | jitter_fn: Arc<dyn Fn(Duration) -> Duration + Send + Sync>, |
108 | 0 | ) -> Self { |
109 | 0 | let connector_with_roots = HttpsConnectorBuilder::new().with_platform_verifier(); |
110 | | |
111 | 0 | let connector_with_schemes = if spec.common.insecure_allow_http { Branch (111:41): [True: 0, False: 0]
Branch (111:41): [Folded - Ignored]
|
112 | 0 | connector_with_roots.https_or_http() |
113 | | } else { |
114 | 0 | connector_with_roots.https_only() |
115 | | }; |
116 | | |
117 | 0 | let connector = if spec.common.disable_http2 { Branch (117:28): [True: 0, False: 0]
Branch (117:28): [Folded - Ignored]
|
118 | 0 | connector_with_schemes.enable_http1().build() |
119 | | } else { |
120 | 0 | connector_with_schemes.enable_http1().enable_http2().build() |
121 | | }; |
122 | | |
123 | 0 | let client = LegacyClient::builder(TokioExecutor::new()).build(connector); |
124 | | |
125 | | Self { |
126 | 0 | client, |
127 | 0 | retrier: Retrier::new( |
128 | 0 | Arc::new(|duration| Box::pin(sleep(duration))), |
129 | 0 | jitter_fn, |
130 | 0 | spec.common.retry.clone(), |
131 | | ), |
132 | | } |
133 | 0 | } |
134 | | } |
135 | | |
136 | | impl core::fmt::Debug for TlsClient { |
137 | 0 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { |
138 | 0 | f.debug_struct("TlsClient").finish_non_exhaustive() |
139 | 0 | } |
140 | | } |
141 | | |
142 | | impl SmithyHttpClient for TlsClient { |
143 | 0 | fn http_connector( |
144 | 0 | &self, |
145 | 0 | _settings: &HttpConnectorSettings, |
146 | 0 | _components: &RuntimeComponents, |
147 | 0 | ) -> SharedHttpConnector { |
148 | 0 | SharedHttpConnector::new(self.clone()) |
149 | 0 | } |
150 | | } |
151 | | |
152 | | enum BufferedBodyState { |
153 | | Cloneable(SdkBody), |
154 | | Buffered(Bytes), |
155 | | Empty, |
156 | | } |
157 | | |
158 | | mod body_processing { |
159 | | use super::{BodyExt, BufferedBodyState, BytesMut, ConnectorError, SdkBody, TransientError}; |
160 | | |
161 | | /// Buffer a request body fully into memory. |
162 | | /// |
163 | | /// TODO(aaronmondal): This could lead to OOMs in extremely constrained |
164 | | /// environments. Probably better to implement something |
165 | | /// like a rewindable stream logic. |
166 | | #[inline] |
167 | 0 | pub(crate) async fn buffer_body(body: SdkBody) -> Result<BufferedBodyState, ConnectorError> { |
168 | 0 | let mut bytes = BytesMut::new(); |
169 | 0 | let mut body_stream = body; |
170 | 0 | while let Some(frame) = body_stream.frame().await { Branch (170:19): [True: 0, False: 0]
Branch (170:19): [Folded - Ignored]
|
171 | 0 | match frame { |
172 | 0 | Ok(frame) => { |
173 | 0 | if let Some(data) = frame.data_ref() { Branch (173:28): [True: 0, False: 0]
Branch (173:28): [Folded - Ignored]
|
174 | 0 | bytes.extend_from_slice(data); |
175 | 0 | } |
176 | | } |
177 | 0 | Err(e) => { |
178 | 0 | return Err(ConnectorError::other( |
179 | 0 | format!("Failed to read request body: {e}").into(), |
180 | 0 | Some(TransientError), |
181 | 0 | )); |
182 | | } |
183 | | } |
184 | | } |
185 | | |
186 | 0 | Ok(BufferedBodyState::Buffered(bytes.freeze())) |
187 | 0 | } |
188 | | } |
189 | | |
190 | | struct RequestComponents { |
191 | | method: Method, |
192 | | uri: hyper::Uri, |
193 | | version: hyper::Version, |
194 | | headers: hyper::HeaderMap, |
195 | | body_data: BufferedBodyState, |
196 | | } |
197 | | |
198 | | mod conversions { |
199 | | use super::{ |
200 | | BufferedBodyState, ConnectorError, Future, HttpRequest, Method, RequestComponents, |
201 | | Response, SdkBody, TransientError, body_processing, |
202 | | }; |
203 | | |
204 | | pub(crate) trait RequestExt { |
205 | | fn into_components(self) |
206 | | -> impl Future<Output = Result<RequestComponents, ConnectorError>>; |
207 | | } |
208 | | |
209 | | impl RequestExt for HttpRequest { |
210 | 0 | async fn into_components(self) -> Result<RequestComponents, ConnectorError> { |
211 | | // Note: This does *not* refer the the HTTP protocol, but to the |
212 | | // version of the http crate. |
213 | 0 | let hyper_req = self.try_into_http1x().map_err(|e| { |
214 | 0 | ConnectorError::other( |
215 | 0 | format!("Failed to convert to HTTP request: {e}").into(), |
216 | 0 | Some(TransientError), |
217 | | ) |
218 | 0 | })?; |
219 | | |
220 | 0 | let method = hyper_req.method().clone(); |
221 | 0 | let uri = hyper_req.uri().clone(); |
222 | 0 | let version = hyper_req.version(); |
223 | 0 | let headers = hyper_req.headers().clone(); |
224 | | |
225 | 0 | let body = hyper_req.into_body(); |
226 | | |
227 | | // Only buffer bodies for methods likely to have payloads. |
228 | 0 | let needs_buffering = matches!(method, Method::POST | Method::PUT); |
229 | | |
230 | | // Preserve the body in case we need to retry. |
231 | 0 | let body_data = if needs_buffering { Branch (231:32): [True: 0, False: 0]
Branch (231:32): [Folded - Ignored]
|
232 | 0 | if let Some(cloneable_body) = body.try_clone() { Branch (232:24): [True: 0, False: 0]
Branch (232:24): [Folded - Ignored]
|
233 | 0 | BufferedBodyState::Cloneable(cloneable_body) |
234 | | } else { |
235 | 0 | body_processing::buffer_body(body).await? |
236 | | } |
237 | | } else { |
238 | 0 | BufferedBodyState::Empty |
239 | | }; |
240 | | |
241 | 0 | Ok(RequestComponents { |
242 | 0 | method, |
243 | 0 | uri, |
244 | 0 | version, |
245 | 0 | headers, |
246 | 0 | body_data, |
247 | 0 | }) |
248 | 0 | } |
249 | | } |
250 | | |
251 | | pub(crate) trait ResponseExt { |
252 | | fn into_smithy_response(self) -> Response<SdkBody>; |
253 | | } |
254 | | |
255 | | impl ResponseExt for hyper::Response<hyper::body::Incoming> { |
256 | 0 | fn into_smithy_response(self) -> Response<SdkBody> { |
257 | 0 | let (parts, body) = self.into_parts(); |
258 | 0 | let sdk_body = SdkBody::from_body_1_x(body); |
259 | 0 | let mut smithy_resp = Response::new(parts.status.into(), sdk_body); |
260 | 0 | let header_pairs: Vec<(String, String)> = parts |
261 | 0 | .headers |
262 | 0 | .iter() |
263 | 0 | .filter_map(|(name, value)| { |
264 | 0 | value |
265 | 0 | .to_str() |
266 | 0 | .ok() |
267 | 0 | .map(|value_str| (name.as_str().to_owned(), value_str.to_owned())) |
268 | 0 | }) |
269 | 0 | .collect(); |
270 | | |
271 | 0 | for (name, value) in header_pairs { |
272 | 0 | smithy_resp.headers_mut().insert(name, value); |
273 | 0 | } |
274 | | |
275 | 0 | smithy_resp |
276 | 0 | } |
277 | | } |
278 | | } |
279 | | |
280 | | struct RequestBuilder<'a> { |
281 | | components: &'a RequestComponents, |
282 | | } |
283 | | |
284 | | impl<'a> RequestBuilder<'a> { |
285 | | #[inline] |
286 | 0 | const fn new(components: &'a RequestComponents) -> Self { |
287 | 0 | Self { components } |
288 | 0 | } |
289 | | |
290 | | #[inline] |
291 | | #[allow(unused_qualifications, reason = "false positive on hyper::http::Error")] |
292 | 0 | fn build(&self) -> Result<Request<SdkBody>, hyper::http::Error> { |
293 | 0 | let mut req_builder = Request::builder() |
294 | 0 | .method(self.components.method.clone()) |
295 | 0 | .uri(self.components.uri.clone()) |
296 | 0 | .version(self.components.version); |
297 | | |
298 | 0 | let headers_map = req_builder.headers_mut().unwrap(); |
299 | 0 | for (name, value) in &self.components.headers { |
300 | 0 | headers_map.insert(name, value.clone()); |
301 | 0 | } |
302 | | |
303 | 0 | match &self.components.body_data { |
304 | 0 | BufferedBodyState::Cloneable(body) => { |
305 | 0 | let cloned_body = body.try_clone().expect("Body should be cloneable"); |
306 | 0 | req_builder.body(cloned_body) |
307 | | } |
308 | 0 | BufferedBodyState::Buffered(bytes) => req_builder.body(SdkBody::from(bytes.clone())), |
309 | 0 | BufferedBodyState::Empty => req_builder.body(SdkBody::empty()), |
310 | | } |
311 | 0 | } |
312 | | } |
313 | | |
314 | | mod execution { |
315 | | use super::conversions::ResponseExt; |
316 | | use super::{ |
317 | | Code, HttpsConnector, LegacyClient, LegacyHttpConnector, RequestBuilder, RequestComponents, |
318 | | Response, RetryResult, SdkBody, fs, make_err, |
319 | | }; |
320 | | |
321 | | #[inline] |
322 | 0 | pub(crate) async fn execute_request( |
323 | 0 | client: LegacyClient<HttpsConnector<LegacyHttpConnector>, SdkBody>, |
324 | 0 | components: &RequestComponents, |
325 | 0 | ) -> RetryResult<Response<SdkBody>> { |
326 | 0 | let _permit = match fs::get_permit().await { |
327 | 0 | Ok(permit) => permit, |
328 | 0 | Err(e) => { |
329 | 0 | return RetryResult::Retry(make_err!( |
330 | 0 | Code::Unavailable, |
331 | 0 | "Failed to acquire permit: {e}" |
332 | 0 | )); |
333 | | } |
334 | | }; |
335 | | |
336 | 0 | let request = match RequestBuilder::new(components).build() { |
337 | 0 | Ok(req) => req, |
338 | 0 | Err(e) => { |
339 | 0 | return RetryResult::Err(make_err!( |
340 | 0 | Code::Internal, |
341 | 0 | "Failed to create request: {e}", |
342 | 0 | )); |
343 | | } |
344 | | }; |
345 | | |
346 | 0 | match client.request(request).await { |
347 | 0 | Ok(resp) => RetryResult::Ok(resp.into_smithy_response()), |
348 | 0 | Err(e) => RetryResult::Retry(make_err!( |
349 | 0 | Code::Unavailable, |
350 | 0 | "Failed request in S3Store: {e}" |
351 | 0 | )), |
352 | | } |
353 | 0 | } |
354 | | |
355 | | #[inline] |
356 | 0 | pub(crate) fn create_retry_stream( |
357 | 0 | client: LegacyClient<HttpsConnector<LegacyHttpConnector>, SdkBody>, |
358 | 0 | components: RequestComponents, |
359 | 0 | ) -> impl futures::Stream<Item = RetryResult<Response<SdkBody>>> { |
360 | 0 | futures::stream::unfold(components, move |components| { |
361 | 0 | let client_clone = client.clone(); |
362 | 0 | async move { |
363 | 0 | let result = execute_request(client_clone, &components).await; |
364 | | |
365 | 0 | Some((result, components)) |
366 | 0 | } |
367 | 0 | }) |
368 | 0 | } |
369 | | } |
370 | | |
371 | | impl SmithyHttpConnector for TlsClient { |
372 | 0 | fn call(&self, req: HttpRequest) -> HttpConnectorFuture { |
373 | | use conversions::RequestExt; |
374 | | |
375 | 0 | let client = self.client.clone(); |
376 | 0 | let retrier = self.retrier.clone(); |
377 | | |
378 | 0 | HttpConnectorFuture::new(Box::pin(async move { |
379 | 0 | let components = req.into_components().await?; |
380 | | |
381 | 0 | let retry_stream = execution::create_retry_stream(client, components); |
382 | | |
383 | 0 | match retrier.retry(retry_stream).await { |
384 | 0 | Ok(response) => Ok(response), |
385 | 0 | Err(e) => Err(ConnectorError::other( |
386 | 0 | format!("Connection failed after retries: {e}").into(), |
387 | 0 | Some(TransientError), |
388 | 0 | )), |
389 | | } |
390 | 0 | })) |
391 | 0 | } |
392 | | } |
393 | | |
394 | | #[derive(Debug)] |
395 | | pub struct BodyWrapper { |
396 | | reader: DropCloserReadHalf, |
397 | | size: u64, |
398 | | } |
399 | | |
400 | | impl http_body::Body for BodyWrapper { |
401 | | type Data = Bytes; |
402 | | type Error = std::io::Error; |
403 | | |
404 | 76 | fn poll_frame( |
405 | 76 | self: Pin<&mut Self>, |
406 | 76 | cx: &mut Context<'_>, |
407 | 76 | ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> { |
408 | 76 | let reader = Pin::new(&mut Pin::get_mut(self).reader); |
409 | 76 | reader |
410 | 76 | .poll_next(cx) |
411 | 76 | .map(|maybe_bytes_res| maybe_bytes_res51 .map51 (|res| res50 .map50 (Frame::data))) |
412 | 76 | } |
413 | | |
414 | 2 | fn size_hint(&self) -> SizeHint { |
415 | 2 | SizeHint::with_exact(self.size) |
416 | 2 | } |
417 | | } |
418 | | |
419 | | #[derive(Debug, MetricsComponent)] |
420 | | pub struct S3Store<NowFn> { |
421 | | s3_client: Arc<Client>, |
422 | | now_fn: NowFn, |
423 | | #[metric(help = "The bucket name for the S3 store")] |
424 | | bucket: String, |
425 | | #[metric(help = "The key prefix for the S3 store")] |
426 | | key_prefix: String, |
427 | | retrier: Retrier, |
428 | | #[metric(help = "The number of seconds to consider an object expired")] |
429 | | consider_expired_after_s: i64, |
430 | | #[metric(help = "The number of bytes to buffer for retrying requests")] |
431 | | max_retry_buffer_per_request: usize, |
432 | | #[metric(help = "The number of concurrent uploads allowed for multipart uploads")] |
433 | | multipart_max_concurrent_uploads: usize, |
434 | | } |
435 | | |
436 | | impl<I, NowFn> S3Store<NowFn> |
437 | | where |
438 | | I: InstantWrapper, |
439 | | NowFn: Fn() -> I + Send + Sync + Unpin + 'static, |
440 | | { |
441 | 0 | pub async fn new(spec: &ExperimentalAwsSpec, now_fn: NowFn) -> Result<Arc<Self>, Error> { |
442 | 0 | let jitter_amt = spec.common.retry.jitter; |
443 | 0 | let jitter_fn = Arc::new(move |delay: Duration| { |
444 | 0 | if jitter_amt == 0. { Branch (444:16): [True: 0, False: 0]
Branch (444:16): [Folded - Ignored]
|
445 | 0 | return delay; |
446 | 0 | } |
447 | 0 | delay.mul_f32(jitter_amt.mul_add(rand::rng().random::<f32>() - 0.5, 1.)) |
448 | 0 | }); |
449 | 0 | let s3_client = { |
450 | 0 | let http_client = TlsClient::new(&spec.clone(), jitter_fn.clone()); |
451 | | |
452 | 0 | let credential_provider = credentials::DefaultCredentialsChain::builder() |
453 | 0 | .configure( |
454 | 0 | ProviderConfig::without_region() |
455 | 0 | .with_region(Some(Region::new(Cow::Owned(spec.region.clone())))) |
456 | 0 | .with_http_client(http_client.clone()), |
457 | 0 | ) |
458 | 0 | .build() |
459 | 0 | .await; |
460 | | |
461 | 0 | let config = aws_config::defaults(BehaviorVersion::v2025_01_17()) |
462 | 0 | .credentials_provider(credential_provider) |
463 | 0 | .app_name(AppName::new("nativelink").expect("valid app name")) |
464 | 0 | .timeout_config( |
465 | 0 | aws_config::timeout::TimeoutConfig::builder() |
466 | 0 | .connect_timeout(Duration::from_secs(15)) |
467 | 0 | .build(), |
468 | 0 | ) |
469 | 0 | .region(Region::new(Cow::Owned(spec.region.clone()))) |
470 | 0 | .http_client(http_client) |
471 | 0 | .load() |
472 | 0 | .await; |
473 | | |
474 | 0 | Client::new(&config) |
475 | | }; |
476 | 0 | Self::new_with_client_and_jitter(spec, s3_client, jitter_fn, now_fn) |
477 | 0 | } |
478 | | |
479 | 12 | pub fn new_with_client_and_jitter( |
480 | 12 | spec: &ExperimentalAwsSpec, |
481 | 12 | s3_client: Client, |
482 | 12 | jitter_fn: Arc<dyn Fn(Duration) -> Duration + Send + Sync>, |
483 | 12 | now_fn: NowFn, |
484 | 12 | ) -> Result<Arc<Self>, Error> { |
485 | 12 | Ok(Arc::new(Self { |
486 | 12 | s3_client: Arc::new(s3_client), |
487 | 12 | now_fn, |
488 | 12 | bucket: spec.bucket.to_string(), |
489 | 12 | key_prefix: spec |
490 | 12 | .common |
491 | 12 | .key_prefix |
492 | 12 | .as_ref() |
493 | 12 | .unwrap_or(&String::new()) |
494 | 12 | .clone(), |
495 | 12 | retrier: Retrier::new( |
496 | 12 | Arc::new(|duration| Box::pin6 (sleep6 (duration6 ))), |
497 | 12 | jitter_fn, |
498 | 12 | spec.common.retry.clone(), |
499 | | ), |
500 | 12 | consider_expired_after_s: i64::from(spec.common.consider_expired_after_s), |
501 | 12 | max_retry_buffer_per_request: spec |
502 | 12 | .common |
503 | 12 | .max_retry_buffer_per_request |
504 | 12 | .unwrap_or(DEFAULT_MAX_RETRY_BUFFER_PER_REQUEST), |
505 | 12 | multipart_max_concurrent_uploads: spec |
506 | 12 | .common |
507 | 12 | .multipart_max_concurrent_uploads |
508 | 12 | .map_or(DEFAULT_MULTIPART_MAX_CONCURRENT_UPLOADS, |v| v), |
509 | | })) |
510 | 12 | } |
511 | | |
512 | 14 | fn make_s3_path(&self, key: &StoreKey<'_>) -> String { |
513 | 14 | format!("{}{}", self.key_prefix, key.as_str(),) |
514 | 14 | } |
515 | | |
516 | 5 | async fn has(self: Pin<&Self>, digest: &StoreKey<'_>) -> Result<Option<u64>, Error> { |
517 | 5 | self.retrier |
518 | 8 | .retry5 (unfold5 (()5 , move |state| async move { |
519 | 8 | let result = self |
520 | 8 | .s3_client |
521 | 8 | .head_object() |
522 | 8 | .bucket(&self.bucket) |
523 | 8 | .key(self.make_s3_path(&digest.borrow())) |
524 | 8 | .send() |
525 | 8 | .await; |
526 | | |
527 | 8 | match result { |
528 | 4 | Ok(head_object_output) => { |
529 | 4 | if self.consider_expired_after_s != 0 { Branch (529:28): [True: 0, False: 0]
Branch (529:28): [Folded - Ignored]
Branch (529:28): [True: 2, False: 2]
|
530 | 2 | if let Some(last_modified) = head_object_output.last_modified { Branch (530:36): [True: 0, False: 0]
Branch (530:36): [Folded - Ignored]
Branch (530:36): [True: 2, False: 0]
|
531 | 2 | let now_s = (self.now_fn)().unix_timestamp() as i64; |
532 | 2 | if last_modified.secs() + self.consider_expired_after_s <= now_s { Branch (532:36): [True: 0, False: 0]
Branch (532:36): [Folded - Ignored]
Branch (532:36): [True: 1, False: 1]
|
533 | 1 | return Some((RetryResult::Ok(None), state)); |
534 | 1 | } |
535 | 0 | } |
536 | 2 | } |
537 | 3 | let Some(length) = head_object_output.content_length else { Branch (537:29): [True: 0, False: 0]
Branch (537:29): [Folded - Ignored]
Branch (537:29): [True: 3, False: 0]
|
538 | 0 | return Some((RetryResult::Ok(None), state)); |
539 | | }; |
540 | 3 | if length >= 0 { Branch (540:28): [True: 0, False: 0]
Branch (540:28): [Folded - Ignored]
Branch (540:28): [True: 3, False: 0]
|
541 | 3 | return Some((RetryResult::Ok(Some(length as u64)), state)); |
542 | 0 | } |
543 | 0 | Some(( |
544 | 0 | RetryResult::Err(make_err!( |
545 | 0 | Code::InvalidArgument, |
546 | 0 | "Negative content length in S3: {length:?}", |
547 | 0 | )), |
548 | 0 | state, |
549 | 0 | )) |
550 | | } |
551 | 4 | Err(sdk_error) => match sdk_error.into_service_error() { |
552 | 1 | HeadObjectError::NotFound(_) => Some((RetryResult::Ok(None), state)), |
553 | 3 | other => Some(( |
554 | 3 | RetryResult::Retry(make_err!( |
555 | 3 | Code::Unavailable, |
556 | 3 | "Unhandled HeadObjectError in S3: {other:?}" |
557 | 3 | )), |
558 | 3 | state, |
559 | 3 | )), |
560 | | }, |
561 | | } |
562 | 16 | })) |
563 | 5 | .await |
564 | 5 | } |
565 | | } |
566 | | |
567 | | #[async_trait] |
568 | | impl<I, NowFn> StoreDriver for S3Store<NowFn> |
569 | | where |
570 | | I: InstantWrapper, |
571 | | NowFn: Fn() -> I + Send + Sync + Unpin + 'static, |
572 | | { |
573 | | async fn has_with_results( |
574 | | self: Pin<&Self>, |
575 | | keys: &[StoreKey<'_>], |
576 | | results: &mut [Option<u64>], |
577 | 12 | ) -> Result<(), Error> { |
578 | 6 | keys.iter() |
579 | 6 | .zip(results.iter_mut()) |
580 | 6 | .map(|(key, result)| async move { |
581 | | // We need to do a special pass to ensure our zero key exist. |
582 | 6 | if is_zero_digest(key.borrow()) { Branch (582:20): [True: 0, False: 0]
Branch (582:20): [Folded - Ignored]
Branch (582:20): [True: 1, False: 5]
|
583 | 1 | *result = Some(0); |
584 | 1 | return Ok::<_, Error>(()); |
585 | 5 | } |
586 | 5 | *result = self.has(key).await?0 ; |
587 | 5 | Ok::<_, Error>(()) |
588 | 12 | }) |
589 | 6 | .collect::<FuturesUnordered<_>>() |
590 | 6 | .try_collect() |
591 | 6 | .await |
592 | 12 | } |
593 | | |
594 | | async fn update( |
595 | | self: Pin<&Self>, |
596 | | digest: StoreKey<'_>, |
597 | | mut reader: DropCloserReadHalf, |
598 | | upload_size: UploadSizeInfo, |
599 | 4 | ) -> Result<(), Error> { |
600 | 2 | let s3_path = &self.make_s3_path(&digest.borrow()); |
601 | | |
602 | 2 | let max_size = match upload_size { |
603 | 2 | UploadSizeInfo::ExactSize(sz) | UploadSizeInfo::MaxSize(sz0 ) => sz, |
604 | | }; |
605 | | |
606 | | // Note(aaronmondal) It might be more optimal to use a different |
607 | | // heuristic here, but for simplicity we use a hard coded value. |
608 | | // Anything going down this if-statement will have the advantage of only |
609 | | // 1 network request for the upload instead of minimum of 3 required for |
610 | | // multipart upload requests. |
611 | | // |
612 | | // Note(aaronmondal) If the upload size is not known, we go down the multipart upload path. |
613 | | // This is not very efficient, but it greatly reduces the complexity of the code. |
614 | 2 | if max_size < MIN_MULTIPART_SIZE && matches!0 (upload_size1 , UploadSizeInfo::ExactSize(_)) { Branch (614:12): [True: 0, False: 0]
Branch (614:12): [Folded - Ignored]
Branch (614:12): [True: 1, False: 1]
|
615 | 1 | let UploadSizeInfo::ExactSize(sz) = upload_size else { Branch (615:17): [True: 0, False: 0]
Branch (615:17): [Folded - Ignored]
Branch (615:17): [True: 1, False: 0]
|
616 | 0 | unreachable!("upload_size must be UploadSizeInfo::ExactSize here"); |
617 | | }; |
618 | 1 | reader.set_max_recent_data_size( |
619 | 1 | u64::try_from(self.max_retry_buffer_per_request) |
620 | 1 | .err_tip(|| "Could not convert max_retry_buffer_per_request to u64")?0 , |
621 | | ); |
622 | 1 | return self |
623 | 1 | .retrier |
624 | 1 | .retry(unfold(reader, move |mut reader| async move { |
625 | | // We need to make a new pair here because the aws sdk does not give us |
626 | | // back the body after we send it in order to retry. |
627 | 1 | let (mut tx, rx) = make_buf_channel_pair(); |
628 | | |
629 | | // Upload the data to the S3 backend. |
630 | 1 | let result = { |
631 | 1 | let reader_ref = &mut reader; |
632 | 1 | let (upload_res, bind_res) = tokio::join!( |
633 | 1 | self.s3_client |
634 | 1 | .put_object() |
635 | 1 | .bucket(&self.bucket) |
636 | 1 | .key(s3_path.clone()) |
637 | 1 | .content_length(sz as i64) |
638 | 1 | .body(ByteStream::from_body_1_x(BodyWrapper { |
639 | 1 | reader: rx, |
640 | 1 | size: sz, |
641 | 1 | })) |
642 | 1 | .send() |
643 | 1 | .map_ok_or_else(|e| Err(make_err!0 (Code::Aborted0 , "{e:?}")), |_| Ok(())), |
644 | | // Stream all data from the reader channel to the writer channel. |
645 | 1 | tx.bind_buffered(reader_ref) |
646 | | ); |
647 | 1 | upload_res |
648 | 1 | .merge(bind_res) |
649 | 1 | .err_tip(|| "Failed to upload file to s3 in single chunk") |
650 | | }; |
651 | | |
652 | | // If we failed to upload the file, check to see if we can retry. |
653 | 1 | let retry_result = result.map_or_else(|mut err| {0 |
654 | | // Ensure our code is Code::Aborted, so the client can retry if possible. |
655 | 0 | err.code = Code::Aborted; |
656 | 0 | let bytes_received = reader.get_bytes_received(); |
657 | 0 | if let Err(try_reset_err) = reader.try_reset_stream() { Branch (657:32): [True: 0, False: 0]
Branch (657:32): [Folded - Ignored]
Branch (657:32): [True: 0, False: 0]
|
658 | 0 | error!( |
659 | | ?bytes_received, |
660 | | err = ?try_reset_err, |
661 | 0 | "Unable to reset stream after failed upload in S3Store::update" |
662 | | ); |
663 | 0 | return RetryResult::Err(err |
664 | 0 | .merge(try_reset_err) |
665 | 0 | .append(format!("Failed to retry upload with {bytes_received} bytes received in S3Store::update"))); |
666 | 0 | } |
667 | 0 | let err = err.append(format!("Retry on upload happened with {bytes_received} bytes received in S3Store::update")); |
668 | 0 | info!( |
669 | | ?err, |
670 | | ?bytes_received, |
671 | 0 | "Retryable S3 error" |
672 | | ); |
673 | 0 | RetryResult::Retry(err) |
674 | 1 | }0 , |()| RetryResult::Ok(())); |
675 | 1 | Some((retry_result, reader)) |
676 | 2 | })) |
677 | 1 | .await; |
678 | 1 | } |
679 | | |
680 | 1 | let upload_id = &self |
681 | 1 | .retrier |
682 | 1 | .retry(unfold((), move |()| async move { |
683 | 1 | let retry_result = self |
684 | 1 | .s3_client |
685 | 1 | .create_multipart_upload() |
686 | 1 | .bucket(&self.bucket) |
687 | 1 | .key(s3_path) |
688 | 1 | .send() |
689 | 1 | .await |
690 | 1 | .map_or_else( |
691 | 0 | |e| { |
692 | 0 | RetryResult::Retry(make_err!( |
693 | 0 | Code::Aborted, |
694 | 0 | "Failed to create multipart upload to s3: {e:?}" |
695 | 0 | )) |
696 | 0 | }, |
697 | 1 | |CreateMultipartUploadOutput { upload_id, .. }| { |
698 | 1 | upload_id.map_or_else( |
699 | 0 | || { |
700 | 0 | RetryResult::Err(make_err!( |
701 | 0 | Code::Internal, |
702 | 0 | "Expected upload_id to be set by s3 response" |
703 | 0 | )) |
704 | 0 | }, |
705 | | RetryResult::Ok, |
706 | | ) |
707 | 1 | }, |
708 | | ); |
709 | 1 | Some((retry_result, ())) |
710 | 2 | })) |
711 | 1 | .await?0 ; |
712 | | |
713 | | // S3 requires us to upload in parts if the size is greater than 5GB. The part size must be at least |
714 | | // 5mb (except last part) and can have up to 10,000 parts. |
715 | 1 | let bytes_per_upload_part = |
716 | 1 | (max_size / (MIN_MULTIPART_SIZE - 1)).clamp(MIN_MULTIPART_SIZE, MAX_MULTIPART_SIZE); |
717 | | |
718 | 1 | let upload_parts = move || async move { |
719 | | // This will ensure we only have `multipart_max_concurrent_uploads` * `bytes_per_upload_part` |
720 | | // bytes in memory at any given time waiting to be uploaded. |
721 | 1 | let (tx, mut rx) = mpsc::channel(self.multipart_max_concurrent_uploads); |
722 | | |
723 | 1 | let read_stream_fut = async move { |
724 | 1 | let retrier = &Pin::get_ref(self).retrier; |
725 | | // Note: Our break condition is when we reach EOF. |
726 | 4 | for part_number in 1..i32::MAX { |
727 | 4 | let write_buf = reader |
728 | 4 | .consume(Some(usize::try_from(bytes_per_upload_part).err_tip( |
729 | | || "Could not convert bytes_per_upload_part to usize", |
730 | 0 | )?)) |
731 | 4 | .await |
732 | 4 | .err_tip(|| "Failed to read chunk in s3_store")?0 ; |
733 | 4 | if write_buf.is_empty() { Branch (733:24): [True: 0, False: 0]
Branch (733:24): [Folded - Ignored]
Branch (733:24): [True: 1, False: 3]
|
734 | 1 | break; // Reached EOF. |
735 | 3 | } |
736 | | |
737 | 3 | tx.send(retrier.retry(unfold(write_buf, move |write_buf| { |
738 | 3 | async move { |
739 | 3 | let retry_result = self |
740 | 3 | .s3_client |
741 | 3 | .upload_part() |
742 | 3 | .bucket(&self.bucket) |
743 | 3 | .key(s3_path) |
744 | 3 | .upload_id(upload_id) |
745 | 3 | .body(ByteStream::new(SdkBody::from(write_buf.clone()))) |
746 | 3 | .part_number(part_number) |
747 | 3 | .send() |
748 | 3 | .await |
749 | 3 | .map_or_else( |
750 | 0 | |e| { |
751 | 0 | RetryResult::Retry(make_err!( |
752 | 0 | Code::Aborted, |
753 | 0 | "Failed to upload part {part_number} in S3 store: {e:?}" |
754 | 0 | )) |
755 | 0 | }, |
756 | 3 | |mut response| { |
757 | 3 | RetryResult::Ok( |
758 | 3 | CompletedPartBuilder::default() |
759 | 3 | // Only set an entity tag if it exists. This saves |
760 | 3 | // 13 bytes per part on the final request if it can |
761 | 3 | // omit the `<ETAG><ETAG/>` string. |
762 | 3 | .set_e_tag(response.e_tag.take()) |
763 | 3 | .part_number(part_number) |
764 | 3 | .build(), |
765 | 3 | ) |
766 | 3 | }, |
767 | | ); |
768 | 3 | Some((retry_result, write_buf)) |
769 | 3 | } |
770 | 3 | }))) |
771 | 3 | .await |
772 | 3 | .map_err(|_| {0 |
773 | 0 | make_err!(Code::Internal, "Failed to send part to channel in s3_store") |
774 | 0 | })?; |
775 | | } |
776 | 1 | Result::<_, Error>::Ok(()) |
777 | 1 | } |
778 | 1 | .fuse(); |
779 | | |
780 | 1 | let mut upload_futures = FuturesUnordered::new(); |
781 | | |
782 | 1 | let mut completed_parts = Vec::with_capacity( |
783 | 1 | usize::try_from(cmp::min( |
784 | 1 | MAX_UPLOAD_PARTS as u64, |
785 | 1 | (max_size / bytes_per_upload_part) + 1, |
786 | | )) |
787 | 1 | .err_tip(|| "Could not convert u64 to usize")?0 , |
788 | | ); |
789 | 1 | tokio::pin!(read_stream_fut); |
790 | | loop { |
791 | 8 | if read_stream_fut.is_terminated() && rx7 .is_empty7 () && upload_futures3 .is_empty3 () { Branch (791:20): [True: 0, False: 0]
Branch (791:55): [True: 0, False: 0]
Branch (791:72): [True: 0, False: 0]
Branch (791:20): [Folded - Ignored]
Branch (791:55): [Folded - Ignored]
Branch (791:72): [Folded - Ignored]
Branch (791:20): [True: 7, False: 1]
Branch (791:55): [True: 3, False: 4]
Branch (791:72): [True: 1, False: 2]
|
792 | 1 | break; // No more data to process. |
793 | 7 | } |
794 | 7 | tokio::select! { |
795 | 7 | result1 = &mut read_stream_fut => result1 ?0 , // Return error or wait for other futures. |
796 | 7 | Some(upload_result3 ) = upload_futures.next() => completed_parts3 .push3 (upload_result3 ?0 ), |
797 | 7 | Some(fut3 ) = rx.recv() => upload_futures3 .push3 (fut3 ), |
798 | | } |
799 | | } |
800 | | |
801 | | // Even though the spec does not require parts to be sorted by number, we do it just in case |
802 | | // there's an S3 implementation that requires it. |
803 | 1 | completed_parts.sort_unstable_by_key(|part| part.part_number); |
804 | | |
805 | 1 | self.retrier |
806 | 1 | .retry(unfold(completed_parts, move |completed_parts| async move { |
807 | | Some(( |
808 | 1 | self.s3_client |
809 | 1 | .complete_multipart_upload() |
810 | 1 | .bucket(&self.bucket) |
811 | 1 | .key(s3_path) |
812 | 1 | .multipart_upload( |
813 | 1 | CompletedMultipartUploadBuilder::default() |
814 | 1 | .set_parts(Some(completed_parts.clone())) |
815 | 1 | .build(), |
816 | 1 | ) |
817 | 1 | .upload_id(upload_id) |
818 | 1 | .send() |
819 | 1 | .await |
820 | 1 | .map_or_else( |
821 | 0 | |e| { |
822 | 0 | RetryResult::Retry(make_err!( |
823 | 0 | Code::Aborted, |
824 | 0 | "Failed to complete multipart upload in S3 store: {e:?}" |
825 | 0 | )) |
826 | 0 | }, |
827 | 1 | |_| RetryResult::Ok(()), |
828 | | ), |
829 | 1 | completed_parts, |
830 | | )) |
831 | 2 | })) |
832 | 1 | .await |
833 | 2 | }; |
834 | | // Upload our parts and complete the multipart upload. |
835 | | // If we fail attempt to abort the multipart upload (cleanup). |
836 | 1 | upload_parts() |
837 | 1 | .or_else(move |e| async move {0 |
838 | 0 | Result::<(), _>::Err(e).merge( |
839 | | // Note: We don't retry here because this is just a best attempt. |
840 | 0 | self.s3_client |
841 | 0 | .abort_multipart_upload() |
842 | 0 | .bucket(&self.bucket) |
843 | 0 | .key(s3_path) |
844 | 0 | .upload_id(upload_id) |
845 | 0 | .send() |
846 | 0 | .await |
847 | 0 | .map_or_else( |
848 | 0 | |e| { |
849 | 0 | let err = make_err!( |
850 | 0 | Code::Aborted, |
851 | | "Failed to abort multipart upload in S3 store : {e:?}" |
852 | | ); |
853 | 0 | info!(?err, "Multipart upload error"); |
854 | 0 | Err(err) |
855 | 0 | }, |
856 | 0 | |_| Ok(()), |
857 | | ), |
858 | | ) |
859 | 0 | }) |
860 | 1 | .await |
861 | 4 | } |
862 | | |
863 | | async fn get_part( |
864 | | self: Pin<&Self>, |
865 | | key: StoreKey<'_>, |
866 | | writer: &mut DropCloserWriteHalf, |
867 | | offset: u64, |
868 | | length: Option<u64>, |
869 | 10 | ) -> Result<(), Error> { |
870 | 5 | if is_zero_digest(key.borrow()) { Branch (870:12): [True: 0, False: 0]
Branch (870:12): [Folded - Ignored]
Branch (870:12): [True: 1, False: 4]
|
871 | 1 | writer |
872 | 1 | .send_eof() |
873 | 1 | .err_tip(|| "Failed to send zero EOF in filesystem store get_part")?0 ; |
874 | 1 | return Ok(()); |
875 | 4 | } |
876 | | |
877 | 4 | let s3_path = &self.make_s3_path(&key); |
878 | 4 | let end_read_byte = length |
879 | 4 | .map_or(Some(None), |length| Some(offset2 .checked_add2 (length2 ))) |
880 | 4 | .err_tip(|| "Integer overflow protection triggered")?0 ; |
881 | | |
882 | 4 | self.retrier |
883 | 7 | .retry4 (unfold4 (writer4 , move |writer| async move { |
884 | 7 | let result = self |
885 | 7 | .s3_client |
886 | 7 | .get_object() |
887 | 7 | .bucket(&self.bucket) |
888 | 7 | .key(s3_path) |
889 | 7 | .range(format!( |
890 | 7 | "bytes={}-{}", |
891 | 7 | offset + writer.get_bytes_written(), |
892 | 7 | end_read_byte.map_or_else(String::new, |v| v2 .to_string2 ()) |
893 | | )) |
894 | 7 | .send() |
895 | 7 | .await; |
896 | | |
897 | 7 | let mut s3_in_stream4 = match result { |
898 | 4 | Ok(head_object_output) => head_object_output.body, |
899 | 3 | Err(sdk_error) => match sdk_error.into_service_error() { |
900 | 0 | GetObjectError::NoSuchKey(e) => { |
901 | 0 | return Some(( |
902 | 0 | RetryResult::Err(make_err!( |
903 | 0 | Code::NotFound, |
904 | 0 | "No such key in S3: {e}" |
905 | 0 | )), |
906 | 0 | writer, |
907 | 0 | )); |
908 | | } |
909 | 3 | other => { |
910 | 3 | return Some(( |
911 | 3 | RetryResult::Retry(make_err!( |
912 | 3 | Code::Unavailable, |
913 | 3 | "Unhandled GetObjectError in S3: {other:?}", |
914 | 3 | )), |
915 | 3 | writer, |
916 | 3 | )); |
917 | | } |
918 | | }, |
919 | | }; |
920 | | |
921 | | // Copy data from s3 input stream to the writer stream. |
922 | 8 | while let Some(maybe_bytes4 ) = s3_in_stream.next().await { Branch (922:27): [True: 0, False: 0]
Branch (922:27): [Folded - Ignored]
Branch (922:27): [True: 4, False: 4]
|
923 | 4 | match maybe_bytes { |
924 | 4 | Ok(bytes) => { |
925 | 4 | if bytes.is_empty() { Branch (925:32): [True: 0, False: 0]
Branch (925:32): [Folded - Ignored]
Branch (925:32): [True: 1, False: 3]
|
926 | | // Ignore possible EOF. Different implementations of S3 may or may not |
927 | | // send EOF this way. |
928 | 1 | continue; |
929 | 3 | } |
930 | 3 | if let Err(e0 ) = writer.send(bytes).await { Branch (930:36): [True: 0, False: 0]
Branch (930:36): [Folded - Ignored]
Branch (930:36): [True: 0, False: 3]
|
931 | 0 | return Some(( |
932 | 0 | RetryResult::Err(make_err!( |
933 | 0 | Code::Aborted, |
934 | 0 | "Error sending bytes to consumer in S3: {e}" |
935 | 0 | )), |
936 | 0 | writer, |
937 | 0 | )); |
938 | 3 | } |
939 | | } |
940 | 0 | Err(e) => { |
941 | 0 | return Some(( |
942 | 0 | RetryResult::Retry(make_err!( |
943 | 0 | Code::Aborted, |
944 | 0 | "Bad bytestream element in S3: {e}" |
945 | 0 | )), |
946 | 0 | writer, |
947 | 0 | )); |
948 | | } |
949 | | } |
950 | | } |
951 | 4 | if let Err(e0 ) = writer.send_eof() { Branch (951:24): [True: 0, False: 0]
Branch (951:24): [Folded - Ignored]
Branch (951:24): [True: 0, False: 4]
|
952 | 0 | return Some(( |
953 | 0 | RetryResult::Err(make_err!( |
954 | 0 | Code::Aborted, |
955 | 0 | "Failed to send EOF to consumer in S3: {e}" |
956 | 0 | )), |
957 | 0 | writer, |
958 | 0 | )); |
959 | 4 | } |
960 | 4 | Some((RetryResult::Ok(()), writer)) |
961 | 14 | })) |
962 | 4 | .await |
963 | 10 | } |
964 | | |
965 | 0 | fn inner_store(&self, _digest: Option<StoreKey>) -> &'_ dyn StoreDriver { |
966 | 0 | self |
967 | 0 | } |
968 | | |
969 | 0 | fn as_any<'a>(&'a self) -> &'a (dyn core::any::Any + Sync + Send + 'static) { |
970 | 0 | self |
971 | 0 | } |
972 | | |
973 | 0 | fn as_any_arc(self: Arc<Self>) -> Arc<dyn core::any::Any + Sync + Send + 'static> { |
974 | 0 | self |
975 | 0 | } |
976 | | |
977 | 0 | fn register_health(self: Arc<Self>, registry: &mut HealthRegistryBuilder) { |
978 | 0 | registry.register_indicator(self); |
979 | 0 | } |
980 | | } |
981 | | |
982 | | #[async_trait] |
983 | | impl<I, NowFn> HealthStatusIndicator for S3Store<NowFn> |
984 | | where |
985 | | I: InstantWrapper, |
986 | | NowFn: Fn() -> I + Send + Sync + Unpin + 'static, |
987 | | { |
988 | 0 | fn get_name(&self) -> &'static str { |
989 | 0 | "S3Store" |
990 | 0 | } |
991 | | |
992 | 0 | async fn check_health(&self, namespace: Cow<'static, str>) -> HealthStatus { |
993 | 0 | StoreDriver::check_health(Pin::new(self), namespace).await |
994 | 0 | } |
995 | | } |