summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDov Shlachter <dovs@google.com>2024-04-16 11:25:21 -0700
committerDov Shlachter <dovs@google.com>2024-04-22 16:56:06 -0700
commitc984ad87b7588c01da4c6c23e75fead2b6cb0f7a (patch)
tree4ea590096ec1f027bcc75850884de2cc1de8da52
parent276f9de6b1b1d83013360ee3ff5c9f442be164cb (diff)
downloadlibbootloader-c984ad87b7588c01da4c6c23e75fead2b6cb0f7a.tar.gz
Use safemath library within GBL
Swap out usage of checked arithmetic helper functions to use safemath::SafeNum. Includes conversion between usize and u64. Tests: all unit tests pass Change-Id: Ic5b43ed185f4b9eef8489e72c592aca3d4cd2919
-rw-r--r--gbl/libgbl/BUILD1
-rw-r--r--gbl/libgbl/src/error.rs1
-rw-r--r--gbl/libgbl/src/fastboot/vars.rs2
-rw-r--r--gbl/libgbl/src/lib.rs10
-rw-r--r--gbl/libgbl/src/ops.rs19
-rw-r--r--gbl/libstorage/BUILD2
-rw-r--r--gbl/libstorage/src/gpt.rs130
-rw-r--r--gbl/libstorage/src/lib.rs350
-rw-r--r--gbl/libstorage/src/multi_blocks.rs24
-rw-r--r--gbl/libstorage/src/testlib.rs20
10 files changed, 282 insertions, 277 deletions
diff --git a/gbl/libgbl/BUILD b/gbl/libgbl/BUILD
index 70da4df..75b58fd 100644
--- a/gbl/libgbl/BUILD
+++ b/gbl/libgbl/BUILD
@@ -28,6 +28,7 @@ rust_library(
"@crc32fast",
"@cstr",
"@gbl//libfastboot",
+ "@gbl//libsafemath",
"@gbl//libstorage",
"@gbl//third_party/libzbi",
"@spin",
diff --git a/gbl/libgbl/src/error.rs b/gbl/libgbl/src/error.rs
index b4aa1f6..e7474ce 100644
--- a/gbl/libgbl/src/error.rs
+++ b/gbl/libgbl/src/error.rs
@@ -145,6 +145,7 @@ composite_enum! {
FromBytesUntilNulError(FromBytesUntilNulError),
FromBytesWithNulError(FromBytesWithNulError),
StorageError(StorageError),
+ SafeMathError(safemath::Error),
}
}
diff --git a/gbl/libgbl/src/fastboot/vars.rs b/gbl/libgbl/src/fastboot/vars.rs
index 39dcb12..521205a 100644
--- a/gbl/libgbl/src/fastboot/vars.rs
+++ b/gbl/libgbl/src/fastboot/vars.rs
@@ -104,7 +104,7 @@ impl Variable for Partition {
let id = snprintf!(id_str, "{:x}", id);
res = (|| {
for ptn in v.partition_iter() {
- let sz = ptn.size()?;
+ let sz: u64 = ptn.size()?;
let part = ptn.gpt_entry().name_to_str(part_name)?;
f(PARTITION_SIZE, &[part, id], snprintf!(size_str, "{:#x}", sz))?;
// Image type is not supported yet.
diff --git a/gbl/libgbl/src/lib.rs b/gbl/libgbl/src/lib.rs
index a244607..fc619e0 100644
--- a/gbl/libgbl/src/lib.rs
+++ b/gbl/libgbl/src/lib.rs
@@ -542,9 +542,13 @@ where
block_devices.sync_gpt_all(&mut |_, _, _| {});
// TODO(b/334962583): Implement zircon ABR + AVB.
// The following are place holder for test of invocation in the integration test only.
- let ptn_size = block_devices.find_partition("zircon_a")?.size()?;
- let (kernel, remains) =
- load_buffer.split_at_mut(ptn_size.try_into().map_err(|_| Error::ArithmeticOverflow)?);
+ let ptn_size = block_devices
+ .find_partition("zircon_a")?
+ .size()
+ .map_err(|e: gbl_storage::StorageError| IntegrationError::StorageError(e))?
+ .try_into()
+ .or(Err(Error::ArithmeticOverflow))?;
+ let (kernel, remains) = load_buffer.split_at_mut(ptn_size);
block_devices.read_gpt_partition("zircon_a", 0, kernel)?;
self.ops.boot(BootImages::Fuchsia(FuchsiaBootImages {
zbi_kernel: kernel,
diff --git a/gbl/libgbl/src/ops.rs b/gbl/libgbl/src/ops.rs
index 901f63b..b89547d 100644
--- a/gbl/libgbl/src/ops.rs
+++ b/gbl/libgbl/src/ops.rs
@@ -33,6 +33,7 @@ use core::{
use gbl_storage::{
required_scratch_size, AsBlockDevice, AsMultiBlockDevices, BlockDevice, BlockIo,
};
+use safemath::SafeNum;
use super::slots;
@@ -143,16 +144,17 @@ pub trait GblOps {
/// Computes the sum of required scratch size for all block devices.
fn required_scratch_size(&mut self) -> GblResult<usize> {
- let mut total = 0usize;
+ let mut total = SafeNum::ZERO;
let mut res = Ok(());
self.visit_block_devices(&mut |io, id, max_gpt_entries| {
res = (|| {
- let scratch_size = required_scratch_size(io, max_gpt_entries)?;
- total = total.checked_add(scratch_size).ok_or(Error::ArithmeticOverflow)?;
+ total += required_scratch_size(io, max_gpt_entries).unwrap();
Ok(())
})();
})?;
- res.map(|_| total)
+
+ let total = usize::try_from(total).map_err(|e| e.into());
+ res.and(total)
}
}
@@ -185,13 +187,14 @@ impl<T: GblOps> AsMultiBlockDevices for GblUtils<'_, '_, T> {
&mut self,
f: &mut dyn FnMut(&mut dyn AsBlockDevice, u64),
) -> core::result::Result<(), Option<&'static str>> {
- let mut scratch_offset = 0;
+ let mut scratch_offset = SafeNum::ZERO;
self.ops
.visit_block_devices(&mut |io, id, max_gpt_entries| {
// Not expected to fail as `Self::new()` should have checked any overflow.
- let scratch_size = required_scratch_size(io, max_gpt_entries).unwrap();
- let scratch = &mut self.scratch[scratch_offset..][..scratch_size];
- scratch_offset = scratch_offset.checked_add(scratch_size).unwrap();
+ let scratch_size: usize = required_scratch_size(io, max_gpt_entries).unwrap();
+ let scratch =
+ &mut self.scratch[scratch_offset.try_into().unwrap()..][..scratch_size];
+ scratch_offset += scratch_size;
f(&mut BlockDevice::new(io, scratch, max_gpt_entries), id);
})
.map_err(|v| v.0)
diff --git a/gbl/libstorage/BUILD b/gbl/libstorage/BUILD
index ef17d1c..9f0aed0 100644
--- a/gbl/libstorage/BUILD
+++ b/gbl/libstorage/BUILD
@@ -29,6 +29,7 @@ rust_library(
edition = "2021",
deps = [
"@crc32fast",
+ "@gbl//libsafemath",
"@zerocopy",
],
)
@@ -42,6 +43,7 @@ rust_library(
edition = "2021",
deps = [
":libstorage",
+ "@gbl//libsafemath",
],
)
diff --git a/gbl/libstorage/src/gpt.rs b/gbl/libstorage/src/gpt.rs
index 86d44e5..c5a44ed 100644
--- a/gbl/libstorage/src/gpt.rs
+++ b/gbl/libstorage/src/gpt.rs
@@ -12,14 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use crate::{
- add, aligned_subslice, div, mul, read, sub, to_usize, write_bytes, write_bytes_mut, BlockIo,
- Result, StorageError,
-};
+use crate::{aligned_subslice, read, write_bytes, write_bytes_mut, BlockIo, Result, StorageError};
use core::default::Default;
use core::mem::{align_of, size_of};
use core::num::NonZeroU64;
use crc32fast::Hasher;
+use safemath::SafeNum;
use zerocopy::{AsBytes, FromBytes, FromZeroes, Ref};
const GPT_GUID_LEN: usize = 16;
@@ -72,7 +70,7 @@ pub struct GptEntry {
impl GptEntry {
/// Return the partition entry size in blocks.
pub fn blocks(&self) -> Result<u64> {
- add(sub(self.last, self.first)?, 1)
+ u64::try_from((SafeNum::from(self.last) - self.first) + 1).map_err(|e| e.into())
}
/// Return whether this is a `NULL` entry. The first null entry marks the end of the partition
@@ -173,8 +171,7 @@ impl<'a> Gpt<'a> {
return Err(StorageError::InvalidInput);
}
let buffer = aligned_subslice(buffer, GPT_ENTRY_ALIGNMENT)?;
- *GptInfo::from_bytes(buffer) =
- GptInfo { num_valid_entries: None, max_entries: max_entries };
+ *GptInfo::from_bytes(buffer) = GptInfo { num_valid_entries: None, max_entries };
Self::from_existing(buffer)
}
@@ -186,32 +183,29 @@ impl<'a> Gpt<'a> {
pub fn from_existing(buffer: &'a mut [u8]) -> Result<Gpt<'a>> {
let buffer = aligned_subslice(buffer, GPT_ENTRY_ALIGNMENT)?;
let (info, remain) = Ref::<_, GptInfo>::new_from_prefix(buffer).unwrap();
- let entries_size = mul(info.max_entries, GPT_ENTRY_SIZE)?;
- let split_pos = add(GPT_HEADER_SIZE_PADDED, entries_size)?;
- let (primary, secondary) = remain.split_at_mut(to_usize(split_pos)?);
- let (primary_header, primary_entries) =
- primary.split_at_mut(to_usize(GPT_HEADER_SIZE_PADDED)?);
- let (secondary_header, secondary_entries) =
- secondary.split_at_mut(to_usize(GPT_HEADER_SIZE_PADDED)?);
+ let entries_size = SafeNum::from(info.max_entries) * GPT_ENTRY_SIZE;
+ let header_size: usize = SafeNum::from(GPT_HEADER_SIZE_PADDED).try_into()?;
+ let split_pos = entries_size + header_size;
+ let (primary, secondary) = remain.split_at_mut(split_pos.try_into()?);
+ let (primary_header, primary_entries) = primary.split_at_mut(header_size);
+ let (secondary_header, secondary_entries) = secondary.split_at_mut(header_size);
Ok(Self {
info: info.into_mut(),
- primary_header: primary_header,
- primary_entries: primary_entries,
- secondary_header: secondary_header,
- secondary_entries: &mut secondary_entries[..to_usize(entries_size)?],
+ primary_header,
+ primary_entries,
+ secondary_header,
+ secondary_entries: &mut secondary_entries[..entries_size.try_into()?],
})
}
/// The minimum buffer size needed for `new_from_buffer()`
pub(crate) fn required_buffer_size(max_entries: u64) -> Result<usize> {
- // Add 7 more bytes to accommodate 8-byte alignment.
- let entries_size = mul(max_entries, GPT_ENTRY_SIZE)?;
- let info_size = size_of::<GptInfo>() as u64;
- to_usize(add(
- add(info_size, mul(2, add(GPT_HEADER_SIZE_PADDED, entries_size)?)?)?,
- GPT_ENTRY_ALIGNMENT - 1,
- )?)
+ let entries_size = SafeNum::from(max_entries) * GPT_ENTRY_SIZE;
+ (((entries_size + GPT_HEADER_SIZE_PADDED) * 2) + size_of::<GptInfo>() + GPT_ENTRY_ALIGNMENT
+ - 1)
+ .try_into()
+ .map_err(|e: safemath::Error| e.into())
}
/// Return the list of GPT entries.
@@ -220,7 +214,7 @@ impl<'a> Gpt<'a> {
pub(crate) fn entries(&self) -> Result<&[GptEntry]> {
self.check_valid()?;
Ok(&Ref::<_, [GptEntry]>::new_slice(&self.primary_entries[..]).unwrap().into_slice()
- [..to_usize(self.info.num_valid_entries()?)?])
+ [..self.info.num_valid_entries()?.try_into()?])
}
/// Search for a partition entry.
@@ -254,41 +248,43 @@ impl<'a> Gpt<'a> {
) -> Result<bool> {
let (header_start, header_bytes, entries) = match header_type {
HeaderType::Primary => {
- (blk_dev.block_size(), &mut self.primary_header, &mut self.primary_entries)
+ (blk_dev.block_size().into(), &mut self.primary_header, &mut self.primary_entries)
}
HeaderType::Secondary => (
- mul(sub(blk_dev.num_blocks(), 1)?, blk_dev.block_size())?,
+ (SafeNum::from(blk_dev.num_blocks()) - 1) * blk_dev.block_size(),
&mut self.secondary_header,
&mut self.secondary_entries,
),
};
- read(blk_dev, header_start, header_bytes, scratch)?;
- let header = Ref::<_, GptHeader>::new_from_prefix(&header_bytes[..]).unwrap().0.into_ref();
+ read(blk_dev, header_start.try_into()?, header_bytes, scratch)?;
+ let header =
+ Ref::<_, GptHeader>::new_from_prefix(header_bytes.as_bytes()).unwrap().0.into_ref();
if header.magic != GPT_MAGIC {
return Ok(false);
}
- let entries_size = mul(header.entries_count as u64, GPT_ENTRY_SIZE)?;
- let entries_offset = mul(header.entries as u64, blk_dev.block_size())?;
- if header.entries_count as u64 > self.info.max_entries
- || add(entries_size, entries_offset)?
- > mul(sub(blk_dev.num_blocks(), 1)?, blk_dev.block_size())?
+ let entries_size = SafeNum::from(header.entries_count) * GPT_ENTRY_SIZE;
+ let entries_offset = SafeNum::from(header.entries) * blk_dev.block_size();
+ if self.info.max_entries < header.entries_count.into()
+ || u64::try_from(entries_size + entries_offset)?
+ > ((SafeNum::from(blk_dev.num_blocks()) - 1) * blk_dev.block_size()).try_into()?
{
return Ok(false);
}
+ let crc32_offset = SafeNum::from(GPT_CRC32_OFFSET).try_into()?;
let mut hasher = Hasher::new();
- hasher.update(&header.as_bytes()[..to_usize(GPT_CRC32_OFFSET)?]);
+ hasher.update(&header.as_bytes()[..crc32_offset]);
hasher.update(&[0u8; size_of::<u32>()]);
- hasher.update(&header.as_bytes()[to_usize(GPT_CRC32_OFFSET)? + size_of::<u32>()..]);
+ hasher.update(&header.as_bytes()[crc32_offset + size_of::<u32>()..]);
if hasher.finalize() != header.crc32 {
return Ok(false);
}
// Load the entries
- let out = &mut entries[..to_usize(entries_size)?];
- read(blk_dev, entries_offset, out, scratch)?;
+ let out = &mut entries[..entries_size.try_into()?];
+ read(blk_dev, entries_offset.try_into()?, out, scratch)?;
// Validate entries crc32.
Ok(header.entries_crc == crc32(out))
}
@@ -302,24 +298,24 @@ impl<'a> Gpt<'a> {
self.info.num_valid_entries = None;
let block_size = blk_dev.block_size();
- let total_blocks = blk_dev.num_blocks();
+ let total_blocks: SafeNum = blk_dev.num_blocks().into();
let primary_header_blk = 1;
let primary_header_pos = block_size;
- let secondary_header_blk = sub(total_blocks, 1)?;
- let secondary_header_pos = mul(secondary_header_blk, block_size)?;
+ let secondary_header_blk = total_blocks - 1;
+ let secondary_header_pos = secondary_header_blk * block_size;
// Entries position for restoring.
let primary_entries_blk = 2;
- let primary_entries_pos = mul(primary_entries_blk, block_size)?;
- let secondary_entries_pos = sub(secondary_header_pos, GPT_MAX_ENTRIES_SIZE)?;
- let secondary_entries_blk = div(secondary_entries_pos, block_size)?;
+ let primary_entries_pos = SafeNum::from(primary_entries_blk) * block_size;
+ let secondary_entries_pos = secondary_header_pos - GPT_MAX_ENTRIES_SIZE;
+ let secondary_entries_blk = secondary_entries_pos / block_size;
let primary_valid = self.validate_gpt(blk_dev, scratch, HeaderType::Primary)?;
let secondary_valid = self.validate_gpt(blk_dev, scratch, HeaderType::Secondary)?;
let primary_header = GptHeader::from_bytes(self.primary_header);
- let secondary_header = GptHeader::from_bytes(&mut self.secondary_header[..]);
+ let secondary_header = GptHeader::from_bytes(self.secondary_header.as_mut());
if !primary_valid {
if !secondary_valid {
return Err(StorageError::NoValidGpt);
@@ -328,26 +324,36 @@ impl<'a> Gpt<'a> {
primary_header.as_bytes_mut().clone_from_slice(secondary_header.as_bytes());
self.primary_entries.clone_from_slice(&self.secondary_entries);
primary_header.current = primary_header_blk;
- primary_header.backup = secondary_header_blk;
+ primary_header.backup = secondary_header_blk.try_into()?;
primary_header.entries = primary_entries_blk;
primary_header.update_crc();
write_bytes_mut(blk_dev, primary_header_pos, primary_header.as_bytes_mut(), scratch)?;
- write_bytes_mut(blk_dev, primary_entries_pos, self.primary_entries, scratch)?
+ write_bytes_mut(
+ blk_dev,
+ primary_entries_pos.try_into()?,
+ self.primary_entries,
+ scratch,
+ )?
} else if !secondary_valid {
// Restore to secondary
secondary_header.as_bytes_mut().clone_from_slice(primary_header.as_bytes());
self.secondary_entries.clone_from_slice(&self.primary_entries);
- secondary_header.current = secondary_header_blk;
+ secondary_header.current = secondary_header_blk.try_into()?;
secondary_header.backup = primary_header_blk;
- secondary_header.entries = secondary_entries_blk;
+ secondary_header.entries = secondary_entries_blk.try_into()?;
secondary_header.update_crc();
write_bytes_mut(
blk_dev,
- secondary_header_pos,
+ secondary_header_pos.try_into()?,
secondary_header.as_bytes_mut(),
scratch,
)?;
- write_bytes_mut(blk_dev, secondary_entries_pos, self.secondary_entries, scratch)?;
+ write_bytes_mut(
+ blk_dev,
+ secondary_entries_pos.try_into()?,
+ self.secondary_entries,
+ scratch,
+ )?;
}
// Calculate actual number of GPT entries by finding the first invalid entry.
@@ -376,10 +382,12 @@ fn check_offset(
blk_dev: &mut (impl BlockIo + ?Sized),
entry: &GptEntry,
offset: u64,
- len: u64,
+ len: usize,
) -> Result<u64> {
- match add(offset, len)? <= mul(entry.blocks()?, blk_dev.block_size())? {
- true => Ok(add(mul(entry.first, blk_dev.block_size())?, offset)?),
+ let s = SafeNum::from(offset) + len;
+ let total_size = SafeNum::from(entry.blocks()?) * blk_dev.block_size();
+ match u64::try_from(s)? <= total_size.try_into()? {
+ true => Ok((SafeNum::from(entry.first) * blk_dev.block_size() + offset).try_into()?),
false => Err(StorageError::OutOfRange),
}
}
@@ -394,7 +402,7 @@ pub(crate) fn read_gpt_partition(
scratch: &mut [u8],
) -> Result<()> {
let e = gpt.find_partition(part_name)?;
- let abs_offset = check_offset(blk_dev, e, offset, out.len() as u64)?;
+ let abs_offset = check_offset(blk_dev, e, offset, out.len())?;
read(blk_dev, abs_offset, out, scratch)
}
@@ -408,7 +416,7 @@ pub(crate) fn write_gpt_partition(
scratch: &mut [u8],
) -> Result<()> {
let e = gpt.find_partition(part_name)?;
- let abs_offset = check_offset(blk_dev, e, offset, data.len() as u64)?;
+ let abs_offset = check_offset(blk_dev, e, offset, data.len())?;
write_bytes(blk_dev, abs_offset, data, scratch)
}
@@ -423,7 +431,7 @@ pub(crate) fn write_gpt_partition_mut(
scratch: &mut [u8],
) -> Result<()> {
let e = gpt.find_partition(part_name)?;
- let abs_offset = check_offset(blk_dev, e, offset, data.as_ref().len() as u64)?;
+ let abs_offset = check_offset(blk_dev, e, offset, data.as_ref().len())?;
write_bytes_mut(blk_dev, abs_offset, data.as_mut(), scratch)
}
@@ -525,7 +533,7 @@ pub(crate) mod test {
dev.sync_gpt().unwrap();
let gpt = gpt(&mut dev);
- let primary_header = &mut gpt.primary_header[..to_usize(GPT_HEADER_SIZE).unwrap()];
+ let primary_header = &mut gpt.primary_header[..GPT_HEADER_SIZE.try_into().unwrap()];
let gpt_header = GptHeader::from_bytes(primary_header);
gpt_header.magic = 0x123456;
gpt_header.update_crc();
@@ -556,12 +564,12 @@ pub(crate) mod test {
dev.sync_gpt().unwrap();
let gpt = gpt(&mut dev);
- let primary_header = &mut gpt.primary_header[..to_usize(GPT_HEADER_SIZE).unwrap()];
+ let primary_header = &mut gpt.primary_header[..GPT_HEADER_SIZE.try_into().unwrap()];
let gpt_header = GptHeader::from_bytes(primary_header);
gpt_header.entries_count = 2;
// Update entries crc32
gpt_header.entries_crc =
- crc32(&gpt.primary_entries[..to_usize(2 * GPT_ENTRY_SIZE).unwrap()]);
+ crc32(&gpt.primary_entries[..(2 * GPT_ENTRY_SIZE).try_into().unwrap()]);
gpt_header.update_crc();
// Update to primary.
let primary_header = Vec::from(primary_header);
diff --git a/gbl/libstorage/src/lib.rs b/gbl/libstorage/src/lib.rs
index 5a10aa2..871540b 100644
--- a/gbl/libstorage/src/lib.rs
+++ b/gbl/libstorage/src/lib.rs
@@ -113,6 +113,8 @@ mod gpt;
use gpt::Gpt;
pub use gpt::{GptEntry, GPT_NAME_LEN_U16};
+use safemath::SafeNum;
+
mod multi_blocks;
pub use multi_blocks::AsMultiBlockDevices;
@@ -122,9 +124,7 @@ pub type Result<T> = core::result::Result<T, StorageError>;
/// Error code for this library.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum StorageError {
- ArithmeticOverflow,
- OutOfRange,
- ScratchTooSmall,
+ ArithmeticOverflow(safemath::Error),
BlockDeviceNotFound,
BlockIoError,
BlockIoNotProvided,
@@ -132,16 +132,27 @@ pub enum StorageError {
InvalidInput,
NoValidGpt,
NotExist,
+ OutOfRange,
PartitionNotUnique,
- U64toUSizeOverflow,
+ ScratchTooSmall,
+}
+
+impl From<safemath::Error> for StorageError {
+ fn from(err: safemath::Error) -> Self {
+ Self::ArithmeticOverflow(err)
+ }
+}
+
+impl From<core::num::TryFromIntError> for StorageError {
+ fn from(_: core::num::TryFromIntError) -> Self {
+ Self::OutOfRange
+ }
}
impl core::fmt::Display for StorageError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
- StorageError::ArithmeticOverflow => write!(f, "Arithmetic overflow"),
- StorageError::OutOfRange => write!(f, "Out of range"),
- StorageError::ScratchTooSmall => write!(f, "Not enough scratch buffer"),
+ StorageError::ArithmeticOverflow(e) => write!(f, "Arithmetic overflow {:?}", e),
StorageError::BlockDeviceNotFound => write!(f, "Block device not found"),
StorageError::BlockIoError => write!(f, "Block IO error"),
StorageError::BlockIoNotProvided => write!(f, "Block IO is not provided"),
@@ -151,10 +162,11 @@ impl core::fmt::Display for StorageError {
StorageError::InvalidInput => write!(f, "Invalid input"),
StorageError::NoValidGpt => write!(f, "GPT not found"),
StorageError::NotExist => write!(f, "The specified partition could not be found"),
+ StorageError::OutOfRange => write!(f, "Out of range"),
StorageError::PartitionNotUnique => {
write!(f, "Partition is found on multiple block devices")
}
- StorageError::U64toUSizeOverflow => write!(f, "u64 to usize fails"),
+ StorageError::ScratchTooSmall => write!(f, "Not enough scratch buffer"),
}
}
}
@@ -218,7 +230,9 @@ impl Partition {
/// Returns the partition size in bytes.
pub fn size(&self) -> Result<u64> {
- Ok(mul(self.block_size, self.entry.blocks()?)?)
+ (SafeNum::from(self.entry.blocks()?) * self.block_size)
+ .try_into()
+ .map_err(|e: safemath::Error| e.into())
}
/// Returns the block size of this partition.
@@ -306,7 +320,7 @@ pub trait AsBlockDevice {
/// Returns the total size in number of bytes.
fn total_size(&mut self) -> Result<u64> {
- mul(self.block_size()?, self.num_blocks()?)
+ Ok((SafeNum::from(self.block_size()?) * self.num_blocks()?).try_into()?)
}
/// Read data from the block device.
@@ -516,12 +530,12 @@ pub fn required_scratch_size(
io: &mut (impl BlockIo + ?Sized),
max_gpt_entries: u64,
) -> Result<usize> {
- let alignment_size = alignment_scratch_size(io)?;
+ let alignment_size: SafeNum = alignment_scratch_size(io)?.into();
let gpt_buffer_size = match max_gpt_entries {
0 => 0,
v => Gpt::required_buffer_size(v)?,
};
- alignment_size.checked_add(gpt_buffer_size).ok_or(StorageError::ArithmeticOverflow)
+ (alignment_size + gpt_buffer_size).try_into().map_err(|e: safemath::Error| e.into())
}
/// A helper that wraps `AsBlockDevice::with` and additionally partitions the scratch buffer into
@@ -546,70 +560,32 @@ where
res
}
-/// Add two u64 integers and check overflow
-fn add(lhs: u64, rhs: u64) -> Result<u64> {
- lhs.checked_add(rhs).ok_or_else(|| StorageError::ArithmeticOverflow)
-}
-
-/// Substract two u64 integers and check overflow
-fn sub(lhs: u64, rhs: u64) -> Result<u64> {
- lhs.checked_sub(rhs).ok_or_else(|| StorageError::ArithmeticOverflow)
-}
-
-/// Calculate remainders and check overflow
-fn rem(lhs: u64, rhs: u64) -> Result<u64> {
- lhs.checked_rem(rhs).ok_or_else(|| StorageError::ArithmeticOverflow)
-}
-
-/// Multiply two numbers and check overflow
-fn mul(lhs: u64, rhs: u64) -> Result<u64> {
- lhs.checked_mul(rhs).ok_or_else(|| StorageError::ArithmeticOverflow)
-}
-
-/// Divide two numbers and check overflow
-fn div(lhs: u64, rhs: u64) -> Result<u64> {
- lhs.checked_div(rhs).ok_or_else(|| StorageError::ArithmeticOverflow)
-}
-
/// Check if `value` is aligned to (multiples of) `alignment`
/// It can fail if the remainider calculation fails overflow check.
-pub fn is_aligned(value: u64, alignment: u64) -> Result<bool> {
- Ok(rem(value, alignment)? == 0)
+pub fn is_aligned(value: SafeNum, alignment: SafeNum) -> Result<bool> {
+ Ok(u64::try_from(value % alignment)? == 0)
}
/// Check if `buffer` address is aligned to `alignment`
/// It can fail if the remainider calculation fails overflow check.
pub fn is_buffer_aligned(buffer: &[u8], alignment: u64) -> Result<bool> {
- is_aligned(buffer.as_ptr() as u64, alignment)
-}
-
-/// Round down `size` according to `alignment`.
-/// It can fail if any arithmetic operation fails overflow check
-fn round_down(size: u64, alignment: u64) -> Result<u64> {
- sub(size, rem(size, alignment)?)
-}
-
-/// Round up `size` according to `alignment`.
-/// It can fail if any arithmetic operation fails overflow check
-fn round_up(size: u64, alignment: u64) -> Result<u64> {
- // equivalent to round_down(size + alignment - 1, alignment)
- round_down(add(size, sub(alignment, 1)?)?, alignment)
-}
-
-/// Check and convert u64 into usize
-pub fn to_usize(val: u64) -> Result<usize> {
- val.try_into().map_err(|_| StorageError::U64toUSizeOverflow)
+ is_aligned((buffer.as_ptr() as usize).into(), alignment.into())
}
/// Check read/write range and calculate offset in number of blocks.
-fn check_range(blk_io: &mut (impl BlockIo + ?Sized), offset: u64, buffer: &[u8]) -> Result<u64> {
- // The following should be invariants if implementation is correct.
- debug_assert!(is_aligned(offset, blk_io.block_size())?);
- debug_assert!(is_aligned(buffer.len() as u64, blk_io.block_size())?);
- debug_assert!(is_buffer_aligned(buffer, blk_io.alignment())?);
- let blk_offset = offset / blk_io.block_size();
- let blk_count = buffer.len() as u64 / blk_io.block_size();
- match add(blk_offset, blk_count)? <= blk_io.num_blocks() {
+fn check_range(
+ blk_io: &mut (impl BlockIo + ?Sized),
+ offset: u64,
+ buffer: &[u8],
+) -> Result<SafeNum> {
+ let offset: SafeNum = offset.into();
+ let block_size: SafeNum = blk_io.block_size().into();
+ debug_assert!(is_aligned(offset, block_size)?);
+ debug_assert!(is_aligned(buffer.len().into(), block_size)?);
+ debug_assert!(is_buffer_aligned(buffer, blk_io.alignment().into())?);
+ let blk_offset = offset / block_size;
+ let blk_count = SafeNum::from(buffer.len()) / block_size;
+ match u64::try_from(blk_offset + blk_count)? <= blk_io.num_blocks() {
true => Ok(blk_offset),
false => Err(StorageError::OutOfRange),
}
@@ -621,7 +597,7 @@ fn read_aligned_all(
offset: u64,
out: &mut [u8],
) -> Result<()> {
- let blk_offset = check_range(blk_io, offset, out)?;
+ let blk_offset = check_range(blk_io, offset, out).map(u64::try_from)??;
if blk_io.read_blocks(blk_offset, out) {
return Ok(());
}
@@ -637,20 +613,23 @@ fn read_aligned_offset_and_buffer(
out: &mut [u8],
scratch: &mut [u8],
) -> Result<()> {
- debug_assert!(is_aligned(offset, blk_io.block_size())?);
+ let block_size = SafeNum::from(blk_io.block_size());
+ debug_assert!(is_aligned(offset.into(), block_size)?);
debug_assert!(is_buffer_aligned(out, blk_io.alignment())?);
- let aligned_read = round_down(out.len() as u64, blk_io.block_size())?;
+ let aligned_read: usize = SafeNum::from(out.len()).round_down(block_size).try_into()?;
+
if aligned_read > 0 {
- read_aligned_all(blk_io, offset, &mut out[..to_usize(aligned_read)?])?;
+ read_aligned_all(blk_io, offset, &mut out[..aligned_read])?;
}
- let unaligned = &mut out[to_usize(aligned_read)?..];
- if unaligned.len() == 0 {
+ let unaligned = &mut out[aligned_read..];
+ if unaligned.is_empty() {
return Ok(());
}
// Read unalinged part.
- let block_scratch = &mut scratch[..to_usize(blk_io.block_size())?];
- read_aligned_all(blk_io, add(offset, aligned_read)?, block_scratch)?;
+ let block_scratch = &mut scratch[..block_size.try_into()?];
+ let aligned_offset = SafeNum::from(offset) + aligned_read;
+ read_aligned_all(blk_io, aligned_offset.try_into()?, block_scratch)?;
unaligned.clone_from_slice(&block_scratch[..unaligned.len()]);
Ok(())
}
@@ -670,67 +649,61 @@ fn read_aligned_buffer(
) -> Result<()> {
debug_assert!(is_buffer_aligned(out, blk_io.alignment())?);
- if is_aligned(offset, blk_io.block_size())? {
+ if is_aligned(offset.into(), blk_io.block_size().into())? {
return read_aligned_offset_and_buffer(blk_io, offset, out, scratch);
}
+ let offset = SafeNum::from(offset);
+ let aligned_start: u64 =
+ min(offset.round_up(blk_io.block_size()).try_into()?, (offset + out.len()).try_into()?);
- let aligned_start = min(round_up(offset, blk_io.block_size())?, add(offset, out.len() as u64)?);
- let aligned_relative_offset = sub(aligned_start, offset)?;
- if aligned_relative_offset < out.len() as u64 {
- if is_buffer_aligned(&out[to_usize(aligned_relative_offset)?..], blk_io.alignment())? {
+ let aligned_relative_offset: usize = (SafeNum::from(aligned_start) - offset).try_into()?;
+ if aligned_relative_offset < out.len() {
+ if is_buffer_aligned(&out[aligned_relative_offset..], blk_io.alignment())? {
// If new output address is aligned, read directly.
read_aligned_offset_and_buffer(
blk_io,
aligned_start,
- &mut out[to_usize(aligned_relative_offset)?..],
+ &mut out[aligned_relative_offset..],
scratch,
)?;
} else {
// Otherwise read into `out` (assumed aligned) and memmove to the correct
// position
- let read_len = sub(out.len() as u64, aligned_relative_offset)?;
- read_aligned_offset_and_buffer(
- blk_io,
- aligned_start,
- &mut out[..to_usize(read_len)?],
- scratch,
- )?;
- out.copy_within(..to_usize(read_len)?, to_usize(aligned_relative_offset)?);
+ let read_len: usize =
+ (SafeNum::from(out.len()) - aligned_relative_offset).try_into()?;
+ read_aligned_offset_and_buffer(blk_io, aligned_start, &mut out[..read_len], scratch)?;
+ out.copy_within(..read_len, aligned_relative_offset);
}
}
// Now read the unaligned part
- let block_scratch = &mut scratch[..to_usize(blk_io.block_size())?];
- let round_down_offset = round_down(offset, blk_io.block_size())?;
- read_aligned_all(blk_io, round_down_offset, block_scratch)?;
- let offset_relative = sub(offset, round_down_offset)?;
- let unaligned = &mut out[..to_usize(aligned_relative_offset)?];
+ let block_scratch = &mut scratch[..SafeNum::from(blk_io.block_size()).try_into()?];
+ let round_down_offset = offset.round_down(blk_io.block_size());
+ read_aligned_all(blk_io, round_down_offset.try_into()?, block_scratch)?;
+ let offset_relative = offset - round_down_offset;
+ let unaligned = &mut out[..aligned_relative_offset];
unaligned.clone_from_slice(
&block_scratch
- [to_usize(offset_relative)?..to_usize(add(offset_relative, unaligned.len() as u64)?)?],
+ [offset_relative.try_into()?..(offset_relative + unaligned.len()).try_into()?],
);
Ok(())
}
/// Calculates the necessary scratch buffer size for handling block and buffer misalignment.
pub fn alignment_scratch_size(blk_io: &mut (impl BlockIo + ?Sized)) -> Result<usize> {
- // block_size() + 2 * (alignment() - 1)
- // This guarantees that we can craft out two aligned scratch buffers:
- // [u8; blk_io.alignment() - 1] and [u8; blk_io.block_size())] respectively. They are
- // needed for handing buffer and read/write range misalignment.
let block_alignment = match blk_io.block_size() {
1 => 0,
v => v,
};
- let buffer_alignment = mul(2, sub(blk_io.alignment(), 1)?)?;
- to_usize(add(block_alignment, buffer_alignment)?)
+ ((SafeNum::from(blk_io.alignment()) - 1) * 2 + block_alignment)
+ .try_into()
+ .map_err(|e: safemath::Error| e.into())
}
/// Gets a subslice of the given slice with aligned address according to `alignment`
fn aligned_subslice(buffer: &mut [u8], alignment: u64) -> Result<&mut [u8]> {
- let addr =
- u64::try_from(buffer.as_ptr() as usize).map_err(|_| StorageError::ArithmeticOverflow)?;
- Ok(&mut buffer[to_usize(sub(round_up(addr, alignment)?, addr)?)?..])
+ let addr = SafeNum::from(buffer.as_ptr() as usize);
+ Ok(&mut buffer[(addr.round_up(alignment) - addr).try_into()?..])
}
// Partition a scratch into two aligned parts: [u8; alignment()-1] and [u8; block_size())]
@@ -740,13 +713,13 @@ fn split_scratch<'a>(
scratch: &'a mut [u8],
) -> Result<(&'a mut [u8], &'a mut [u8])> {
let (buffer_alignment, block_alignment) = aligned_subslice(scratch, blk_io.alignment())?
- .split_at_mut(to_usize(sub(blk_io.alignment(), 1)?)?);
+ .split_at_mut((SafeNum::from(blk_io.alignment()) - 1).try_into()?);
let block_alignment = aligned_subslice(block_alignment, blk_io.alignment())?;
let block_alignment_scratch_size = match blk_io.block_size() {
- 1 => 0usize,
- v => to_usize(v)?,
+ 1 => SafeNum::ZERO,
+ v => v.into(),
};
- Ok((buffer_alignment, &mut block_alignment[..block_alignment_scratch_size]))
+ Ok((buffer_alignment, &mut block_alignment[..block_alignment_scratch_size.try_into()?]))
}
/// Read with no alignment requirement.
@@ -773,29 +746,30 @@ fn read(
// |----------------------|---------------------|
// blk_io.alignment()
- let out_addr_value = out.as_ptr() as u64;
- let unaligned_read =
- min(sub(round_up(out_addr_value, blk_io.alignment())?, out_addr_value)?, out.len() as u64);
+ let out_addr_value = SafeNum::from(out.as_ptr() as usize);
+ let unaligned_read: usize =
+ min((out_addr_value.round_up(blk_io.alignment()) - out_addr_value).try_into()?, out.len());
+
// Read unaligned part
- let unaligned_out = &mut buffer_alignment_scratch[..to_usize(unaligned_read)?];
+ let unaligned_out = &mut buffer_alignment_scratch[..unaligned_read];
read_aligned_buffer(blk_io, offset, unaligned_out, block_alignment_scratch)?;
- out[..to_usize(unaligned_read)?].clone_from_slice(unaligned_out);
+ out[..unaligned_read].clone_from_slice(unaligned_out);
- if unaligned_read == out.len() as u64 {
+ if unaligned_read == out.len() {
return Ok(());
}
// Read aligned part
read_aligned_buffer(
blk_io,
- add(offset, unaligned_read)?,
- &mut out[to_usize(unaligned_read)?..],
+ (SafeNum::from(offset) + unaligned_read).try_into()?,
+ &mut out[unaligned_read..],
block_alignment_scratch,
)
}
fn write_aligned_all(blk_io: &mut (impl BlockIo + ?Sized), offset: u64, data: &[u8]) -> Result<()> {
let blk_offset = check_range(blk_io, offset, data)?;
- if blk_io.write_blocks(blk_offset, data) {
+ if blk_io.write_blocks(blk_offset.try_into()?, data) {
return Ok(());
}
Err(StorageError::BlockIoError)
@@ -810,21 +784,22 @@ fn write_aligned_offset_and_buffer(
data: &[u8],
scratch: &mut [u8],
) -> Result<()> {
- debug_assert!(is_aligned(offset, blk_io.block_size())?);
+ debug_assert!(is_aligned(offset.into(), blk_io.block_size().into())?);
debug_assert!(is_buffer_aligned(data, blk_io.alignment())?);
- let aligned_write = round_down(data.len() as u64, blk_io.block_size())?;
+ let aligned_write: usize =
+ SafeNum::from(data.len()).round_down(blk_io.block_size()).try_into()?;
if aligned_write > 0 {
- write_aligned_all(blk_io, offset, &data[..to_usize(aligned_write)?])?;
+ write_aligned_all(blk_io, offset, &data[..aligned_write])?;
}
- let unaligned = &data[to_usize(aligned_write)?..];
+ let unaligned = &data[aligned_write..];
if unaligned.len() == 0 {
return Ok(());
}
// Perform read-modify-write for the unaligned part
- let unaligned_start = add(offset, aligned_write)?;
- let block_scratch = &mut scratch[..to_usize(blk_io.block_size())?];
+ let unaligned_start: u64 = (SafeNum::from(offset) + aligned_write).try_into()?;
+ let block_scratch = &mut scratch[..SafeNum::from(blk_io.block_size()).try_into()?];
read_aligned_all(blk_io, unaligned_start, block_scratch)?;
block_scratch[..unaligned.len()].clone_from_slice(unaligned);
write_aligned_all(blk_io, unaligned_start, block_scratch)
@@ -836,35 +811,39 @@ fn write_aligned_offset_and_buffer(
/// case.
fn write_bytes(
blk_io: &mut (impl BlockIo + ?Sized),
- mut offset: u64,
+ offset: u64,
data: &[u8],
scratch: &mut [u8],
) -> Result<()> {
let (_, block_scratch) = split_scratch(blk_io, scratch)?;
- let block_size = blk_io.block_size();
- let mut data_offset = 0;
- while data_offset < data.len() as u64 {
- if is_aligned(offset, blk_io.block_size())?
- && is_buffer_aligned(&data[to_usize(data_offset)?..], blk_io.alignment())?
+ let block_size = SafeNum::from(blk_io.block_size());
+ let mut data_offset = SafeNum::ZERO;
+ let mut offset = SafeNum::from(offset);
+ while usize::try_from(data_offset)? < data.len() {
+ if is_aligned(offset, block_size)?
+ && is_buffer_aligned(&data[data_offset.try_into()?..], blk_io.alignment())?
{
return write_aligned_offset_and_buffer(
blk_io,
- offset,
- &data[to_usize(data_offset)?..],
+ offset.try_into()?,
+ &data[data_offset.try_into()?..],
block_scratch,
);
}
- let block_offset = round_down(offset, block_size)?;
+ let block_offset = offset.round_down(block_size);
let copy_offset = offset - block_offset;
- let copy_size = min(data[to_usize(data_offset)?..].len() as u64, block_size - copy_offset);
- if copy_size < block_size {
+ let copy_size =
+ min(data[data_offset.try_into()?..].len(), (block_size - copy_offset).try_into()?);
+ if copy_size < block_size.try_into()? {
// Partial block copy. Perform read-modify-write
- read_aligned_all(blk_io, block_offset, block_scratch)?;
+ read_aligned_all(blk_io, block_offset.try_into()?, block_scratch)?;
}
- block_scratch[to_usize(copy_offset)?..to_usize(copy_offset + copy_size)?]
- .clone_from_slice(&data[to_usize(data_offset)?..to_usize(data_offset + copy_size)?]);
- write_aligned_all(blk_io, block_offset, block_scratch)?;
+ block_scratch[copy_offset.try_into()?..(copy_offset + copy_size).try_into()?]
+ .clone_from_slice(
+ &data[data_offset.try_into()?..(data_offset + copy_size).try_into()?],
+ );
+ write_aligned_all(blk_io, block_offset.try_into()?, block_scratch)?;
data_offset += copy_size;
offset += copy_size;
}
@@ -895,45 +874,44 @@ fn write_aligned_buffer(
) -> Result<()> {
debug_assert!(is_buffer_aligned(data, blk_io.alignment())?);
- if is_aligned(offset, blk_io.block_size())? {
- return write_aligned_offset_and_buffer(blk_io, offset, data, scratch);
+ let offset = SafeNum::from(offset);
+ if is_aligned(offset, blk_io.block_size().into())? {
+ return write_aligned_offset_and_buffer(blk_io, offset.try_into()?, data, scratch);
}
- let aligned_start =
- min(round_up(offset, blk_io.block_size())?, add(offset, data.len() as u64)?);
- let aligned_relative_offset = sub(aligned_start, offset)?;
- if aligned_relative_offset < data.len() as u64 {
- if is_buffer_aligned(&data[to_usize(aligned_relative_offset)?..], blk_io.alignment())? {
+ let aligned_start: u64 =
+ min(offset.round_up(blk_io.block_size()).try_into()?, (offset + data.len()).try_into()?);
+ let aligned_relative_offset: usize = (SafeNum::from(aligned_start) - offset).try_into()?;
+ if aligned_relative_offset < data.len() {
+ if is_buffer_aligned(&data[aligned_relative_offset..], blk_io.alignment())? {
// If new address is aligned, write directly.
write_aligned_offset_and_buffer(
blk_io,
aligned_start,
- &data[to_usize(aligned_relative_offset)?..],
+ &data[aligned_relative_offset..],
scratch,
)?;
} else {
- let write_len = sub(data.len() as u64, aligned_relative_offset)?;
+ let write_len: usize =
+ (SafeNum::from(data.len()) - aligned_relative_offset).try_into()?;
// Swap the offset-aligned part to the beginning of the buffer (assumed aligned)
- swap_slice(data, to_usize(aligned_relative_offset)?);
- let res = write_aligned_offset_and_buffer(
- blk_io,
- aligned_start,
- &data[..to_usize(write_len)?],
- scratch,
- );
+ swap_slice(data, aligned_relative_offset);
+ let res =
+ write_aligned_offset_and_buffer(blk_io, aligned_start, &data[..write_len], scratch);
// Swap the two parts back before checking the result.
- swap_slice(data, to_usize(write_len)?);
+ swap_slice(data, write_len);
res?;
}
}
// perform read-modify-write for the unaligned part.
- let block_scratch = &mut scratch[..to_usize(blk_io.block_size())?];
- let round_down_offset = round_down(offset, blk_io.block_size())?;
+ let block_scratch = &mut scratch[..SafeNum::from(blk_io.block_size()).try_into()?];
+ let round_down_offset: u64 = offset.round_down(blk_io.block_size()).try_into()?;
read_aligned_all(blk_io, round_down_offset, block_scratch)?;
- let offset_relative = sub(offset, round_down_offset)?;
- block_scratch[to_usize(offset_relative)?..to_usize(offset_relative + aligned_relative_offset)?]
- .clone_from_slice(&data[..to_usize(aligned_relative_offset)?]);
+ let offset_relative = offset - round_down_offset;
+ block_scratch
+ [offset_relative.try_into()?..(offset_relative + aligned_relative_offset).try_into()?]
+ .clone_from_slice(&data[..aligned_relative_offset]);
write_aligned_all(blk_io, round_down_offset, block_scratch)
}
@@ -963,34 +941,34 @@ fn write_bytes_mut(
// blk_io.alignment()
// Write unaligned part
- let data_addr_value = data.as_ptr() as u64;
- let unaligned_write = min(
- sub(round_up(data_addr_value, blk_io.alignment())?, data_addr_value)?,
- data.len() as u64,
+ let data_addr_value = SafeNum::from(data.as_ptr() as usize);
+ let unaligned_write: usize = min(
+ (data_addr_value.round_up(blk_io.alignment()) - data_addr_value).try_into()?,
+ data.len(),
);
- let mut unaligned_data = &mut buffer_alignment_scratch[..to_usize(unaligned_write)?];
- unaligned_data.clone_from_slice(&data[..to_usize(unaligned_write)?]);
+ let mut unaligned_data = &mut buffer_alignment_scratch[..unaligned_write];
+ unaligned_data.clone_from_slice(&data[..unaligned_write]);
write_aligned_buffer(blk_io, offset, &mut unaligned_data, block_alignment_scratch)?;
- if unaligned_write == data.len() as u64 {
+ if unaligned_write == data.len() {
return Ok(());
}
// Write aligned part
write_aligned_buffer(
blk_io,
- add(offset, unaligned_write)?,
- &mut data[to_usize(unaligned_write)?..],
+ (SafeNum::from(offset) + unaligned_write).try_into()?,
+ &mut data[unaligned_write..],
block_alignment_scratch,
)
}
#[cfg(test)]
mod test {
- use super::{round_up, to_usize};
use core::mem::size_of;
use gbl_storage_testlib::{
required_scratch_size, AsBlockDevice, TestBlockDevice, TestBlockDeviceBuilder,
};
+ use safemath::SafeNum;
#[derive(Debug)]
struct TestCase {
@@ -1024,15 +1002,16 @@ mod test {
impl AlignedBuffer {
pub fn new(alignment: u64, size: u64) -> Self {
- let buffer = vec![0u8; to_usize(size + alignment).unwrap()];
+ let aligned_size = (SafeNum::from(size) + alignment).try_into().unwrap();
+ let buffer = vec![0u8; aligned_size];
Self { buffer, alignment, size }
}
pub fn get(&mut self) -> &mut [u8] {
- let addr = self.buffer.as_ptr() as u64;
- let aligned_start = round_up(addr, self.alignment).unwrap() - addr;
+ let addr = SafeNum::from(self.buffer.as_ptr() as usize);
+ let aligned_start = addr.round_up(self.alignment) - addr;
&mut self.buffer
- [to_usize(aligned_start).unwrap()..to_usize(aligned_start + self.size).unwrap()]
+ [aligned_start.try_into().unwrap()..(aligned_start + self.size).try_into().unwrap()]
}
}
@@ -1058,13 +1037,15 @@ mod test {
// starts at an unaligned offset. Because of this we need to allocate
// `case.misalignment` more to accommodate it.
let mut aligned_buf = AlignedBuffer::new(case.alignment, case.rw_size + case.misalignment);
- let out = &mut aligned_buf.get()[to_usize(case.misalignment).unwrap()
- ..to_usize(case.misalignment + case.rw_size).unwrap()];
+ let misalignment = SafeNum::from(case.misalignment);
+ let out = &mut aligned_buf.get()
+ [misalignment.try_into().unwrap()..(misalignment + case.rw_size).try_into().unwrap()];
blk.read(case.rw_offset, out).unwrap();
+ let rw_offset = SafeNum::from(case.rw_offset);
assert_eq!(
out.to_vec(),
- blk.io.storage[to_usize(case.rw_offset).unwrap()
- ..to_usize(case.rw_offset + case.rw_size).unwrap()]
+ blk.io.storage
+ [rw_offset.try_into().unwrap()..(rw_offset + case.rw_size).try_into().unwrap()]
.to_vec(),
"Failed. Test case {:?}",
case,
@@ -1080,22 +1061,25 @@ mod test {
.set_size(case.storage_size as usize)
.build();
// Write a reverse version of the current data.
+ let rw_offset = SafeNum::from(case.rw_offset);
let mut expected = blk.io.storage
- [to_usize(case.rw_offset).unwrap()..to_usize(case.rw_offset + case.rw_size).unwrap()]
+ [rw_offset.try_into().unwrap()..(rw_offset + case.rw_size).try_into().unwrap()]
.to_vec();
expected.reverse();
// Make an aligned buffer. A misaligned version is created by taking a sub slice that
// starts at an unaligned offset. Because of this we need to allocate
// `case.misalignment` more to accommodate it.
+ let misalignment = SafeNum::from(case.misalignment);
let mut aligned_buf = AlignedBuffer::new(case.alignment, case.rw_size + case.misalignment);
- let data = &mut aligned_buf.get()[to_usize(case.misalignment).unwrap()
- ..to_usize(case.misalignment + case.rw_size).unwrap()];
+ let data = &mut aligned_buf.get()
+ [misalignment.try_into().unwrap()..(misalignment + case.rw_size).try_into().unwrap()];
data.clone_from_slice(&expected);
write_func(&mut blk, case.rw_offset, data);
+ let rw_offset = SafeNum::from(case.rw_offset);
assert_eq!(
expected,
- blk.io.storage[to_usize(case.rw_offset).unwrap()
- ..to_usize(case.rw_offset + case.rw_size).unwrap()]
+ blk.io.storage
+ [rw_offset.try_into().unwrap()..(rw_offset + case.rw_size).try_into().unwrap()]
.to_vec(),
"Failed. Test case {:?}",
case,
@@ -1413,7 +1397,7 @@ mod test {
.set_scratch_size(scratch_size)
.build();
let block_size = TestBlockDeviceBuilder::DEFAULT_BLOCK_SIZE;
- assert!(blk.read(0, &mut vec![0u8; to_usize(block_size).unwrap()]).is_err());
+ assert!(blk.read(0, &mut vec![0u8; block_size.try_into().unwrap()]).is_err());
}
#[test]
diff --git a/gbl/libstorage/src/multi_blocks.rs b/gbl/libstorage/src/multi_blocks.rs
index 0d63eb3..8c193a7 100644
--- a/gbl/libstorage/src/multi_blocks.rs
+++ b/gbl/libstorage/src/multi_blocks.rs
@@ -217,28 +217,28 @@ mod test {
]);
devs.sync_gpt_all(&mut |_, _, _| panic!("GPT sync failed"));
- assert_eq!(devs.find_partition("boot_a").and_then(|v| v.size()).unwrap(), 8 * 1024);
+ assert_eq!(devs.find_partition("boot_a").map(|v| v.size()).unwrap(), Ok(8 * 1024));
assert_eq!(
- devs.get(0).unwrap().find_partition("boot_a").and_then(|v| v.size()).unwrap(),
- 8 * 1024
+ devs.get(0).unwrap().find_partition("boot_a").map(|v| v.size()).unwrap(),
+ Ok(8 * 1024)
);
- assert_eq!(devs.find_partition("boot_b").and_then(|v| v.size()).unwrap(), 12 * 1024);
+ assert_eq!(devs.find_partition("boot_b").map(|v| v.size()).unwrap(), Ok(12 * 1024));
assert_eq!(
- devs.get(0).unwrap().find_partition("boot_b").and_then(|v| v.size()).unwrap(),
- 12 * 1024
+ devs.get(0).unwrap().find_partition("boot_b").map(|v| v.size()).unwrap(),
+ Ok(12 * 1024)
);
- assert_eq!(devs.find_partition("vendor_boot_a").and_then(|v| v.size()).unwrap(), 4 * 1024);
+ assert_eq!(devs.find_partition("vendor_boot_a").map(|v| v.size()).unwrap(), Ok(4 * 1024));
assert_eq!(
- devs.get(1).unwrap().find_partition("vendor_boot_a").and_then(|v| v.size()).unwrap(),
- 4 * 1024
+ devs.get(1).unwrap().find_partition("vendor_boot_a").map(|v| v.size()).unwrap(),
+ Ok(4 * 1024)
);
- assert_eq!(devs.find_partition("vendor_boot_b").and_then(|v| v.size()).unwrap(), 6 * 1024);
+ assert_eq!(devs.find_partition("vendor_boot_b").map(|v| v.size()).unwrap(), Ok(6 * 1024));
assert_eq!(
- devs.get(1).unwrap().find_partition("vendor_boot_b").and_then(|v| v.size()).unwrap(),
- 6 * 1024
+ devs.get(1).unwrap().find_partition("vendor_boot_b").map(|v| v.size()).unwrap(),
+ Ok(6 * 1024)
);
}
diff --git a/gbl/libstorage/src/testlib.rs b/gbl/libstorage/src/testlib.rs
index 85a8225..e79a62d 100644
--- a/gbl/libstorage/src/testlib.rs
+++ b/gbl/libstorage/src/testlib.rs
@@ -12,10 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
pub use gbl_storage::{
- alignment_scratch_size, is_aligned, is_buffer_aligned, required_scratch_size, to_usize,
- AsBlockDevice, AsMultiBlockDevices, BlockIo,
+ alignment_scratch_size, is_aligned, is_buffer_aligned, required_scratch_size, AsBlockDevice,
+ AsMultiBlockDevices, BlockIo,
};
+use safemath::SafeNum;
+
/// Helper `gbl_storage::BlockIo` struct for TestBlockDevice.
pub struct TestBlockIo {
/// The storage block size in bytes.
@@ -43,7 +45,7 @@ impl TestBlockIo {
fn check_alignment(&mut self, buffer: &[u8]) -> bool {
matches!(is_buffer_aligned(buffer, self.alignment()), Ok(true))
- && matches!(is_aligned(buffer.len() as u64, self.block_size()), Ok(true))
+ && matches!(is_aligned(buffer.len().into(), self.block_size().into()), Ok(true))
}
}
@@ -65,9 +67,9 @@ impl BlockIo for TestBlockIo {
return false;
}
- let start = blk_offset * self.block_size();
- let end = start + out.len() as u64;
- out.clone_from_slice(&self.storage[to_usize(start).unwrap()..to_usize(end).unwrap()]);
+ let start = SafeNum::from(blk_offset) * self.block_size();
+ let end = start + out.len();
+ out.clone_from_slice(&self.storage[start.try_into().unwrap()..end.try_into().unwrap()]);
self.num_reads += 1;
true
}
@@ -77,9 +79,9 @@ impl BlockIo for TestBlockIo {
return false;
}
- let start = blk_offset * self.block_size();
- let end = start + data.len() as u64;
- self.storage[to_usize(start).unwrap()..to_usize(end).unwrap()].clone_from_slice(&data);
+ let start = SafeNum::from(blk_offset) * self.block_size();
+ let end = start + data.len();
+ self.storage[start.try_into().unwrap()..end.try_into().unwrap()].clone_from_slice(&data);
self.num_writes += 1;
true
}