Skip to content

Commit

Permalink
Add empty data cache test (#1149)
Browse files Browse the repository at this point in the history
## Description of change

Adds an empty cache retrieval test

Fixes express cache to now pass new empty cache test

Relevant issues: N/A

## Does this change impact existing behavior?

Yes - shared cache no longer emits request failed when reading from an
object that doesn't exist

## Does this change need a changelog entry in any of the crates?

No

---

By submitting this pull request, I confirm that my contribution is made
under the terms of the Apache 2.0 license and I agree to the terms of
the [Developer Certificate of Origin
(DCO)](https://developercertificate.org/).

---------

Signed-off-by: Simon Beal <[email protected]>
  • Loading branch information
muddyfish authored Nov 20, 2024
1 parent 1e331a4 commit 9d26b3c
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 18 deletions.
31 changes: 16 additions & 15 deletions mountpoint-s3/src/data_cache/express_data_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,21 +134,6 @@ where
Err(e) => return Err(e.into()),
};

let object_metadata = result
.get_object_metadata()
.await
.map_err(|err| DataCacheError::IoFailure(err.into()))?;

let checksum = result
.get_object_checksum()
.await
.map_err(|err| DataCacheError::IoFailure(err.into()))?;
let crc32c = crc32c_from_base64(&checksum.checksum_crc32c.ok_or(DataCacheError::BlockChecksumMissing)?)
.map_err(|err| DataCacheError::IoFailure(err.into()))?;

let block_metadata = BlockMetadata::new(block_idx, block_offset, cache_key, &self.bucket_name, crc32c);
block_metadata.validate_object_metadata(&object_metadata)?;

pin_mut!(result);
// Guarantee that the request will start even in case of `initial_read_window == 0`.
result.as_mut().increment_read_window(self.config.block_size as usize);
Expand All @@ -171,6 +156,22 @@ where
}
}
let buffer = buffer.freeze();

let object_metadata = result
.get_object_metadata()
.await
.map_err(|err| DataCacheError::IoFailure(err.into()))?;

let checksum = result
.get_object_checksum()
.await
.map_err(|err| DataCacheError::IoFailure(err.into()))?;
let crc32c = crc32c_from_base64(&checksum.checksum_crc32c.ok_or(DataCacheError::BlockChecksumMissing)?)
.map_err(|err| DataCacheError::IoFailure(err.into()))?;

let block_metadata = BlockMetadata::new(block_idx, block_offset, cache_key, &self.bucket_name, crc32c);
block_metadata.validate_object_metadata(&object_metadata)?;

Ok(Some(ChecksummedBytes::new_from_inner_data(buffer, crc32c)))
}

Expand Down
41 changes: 38 additions & 3 deletions mountpoint-s3/tests/fuse_tests/cache_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use crate::common::fuse::s3_session::create_crt_client;
use crate::common::s3::{get_test_bucket, get_test_prefix};

use mountpoint_s3::data_cache::{DataCache, DiskDataCache, DiskDataCacheConfig};
use mountpoint_s3::object::ObjectId;
use mountpoint_s3::prefetch::caching_prefetch;
use mountpoint_s3_client::S3CrtClient;

Expand All @@ -20,8 +21,6 @@ use crate::common::s3::{get_express_bucket, get_standard_bucket};
#[cfg(feature = "s3express_tests")]
use mountpoint_s3::data_cache::{build_prefix, get_s3_key, BlockIndex, ExpressDataCache};
#[cfg(feature = "s3express_tests")]
use mountpoint_s3::object::ObjectId;
#[cfg(feature = "s3express_tests")]
use mountpoint_s3_client::ObjectClient;

const CACHE_BLOCK_SIZE: u64 = 1024 * 1024;
Expand Down Expand Up @@ -141,6 +140,29 @@ fn disk_cache_write_read(key_suffix: &str, key_size: usize, object_size: usize)
);
}

#[tokio::test]
#[cfg(feature = "s3express_tests")]
async fn express_cache_read_empty() {
let client = create_crt_client(CLIENT_PART_SIZE, CLIENT_PART_SIZE, Default::default());
let bucket_name = get_standard_bucket();
let express_bucket_name = get_express_bucket();
let cache = ExpressDataCache::new(client, Default::default(), &bucket_name, &express_bucket_name);

cache_read_empty(cache, "express_cache_read_empty").await;
}

#[tokio::test]
async fn disk_cache_read_empty() {
let cache_dir = tempfile::tempdir().unwrap();
let cache_config = DiskDataCacheConfig {
block_size: CACHE_BLOCK_SIZE,
limit: Default::default(),
};
let cache = DiskDataCache::new(cache_dir.path().to_path_buf(), cache_config);

cache_read_empty(cache, "disk_cache_read_empty").await;
}

#[tokio::test]
#[cfg(feature = "s3express_tests")]
async fn express_cache_verify_fail_non_express() {
Expand Down Expand Up @@ -252,6 +274,20 @@ fn cache_write_read_base<Cache>(
);
}

async fn cache_read_empty<Cache>(cache: Cache, test_name: &str)
where
Cache: DataCache + Send + Sync + 'static,
{
let prefix = get_test_prefix(test_name);

// Try reading a block that hasn't had anything written to it
let block = cache
.get_block(&get_object_id(&prefix, "does-not-exist", "etag"), 0, 0, 1000)
.await
.expect("should not return an error");
assert!(block.is_none());
}

/// Generates random data of the specified size
fn random_binary_data(size_in_bytes: usize) -> Vec<u8> {
let seed = rand::thread_rng().gen();
Expand Down Expand Up @@ -291,7 +327,6 @@ where
(mount_point, session)
}

#[cfg(feature = "s3express_tests")]
fn get_object_id(prefix: &str, key: &str, etag: &str) -> ObjectId {
ObjectId::new(format!("{prefix}{key}"), etag.into())
}
Expand Down

0 comments on commit 9d26b3c

Please sign in to comment.