From 67027f0eaf3b054048aa8ef0cf6bfd9e9d44b943 Mon Sep 17 00:00:00 2001 From: wcampbell Date: Thu, 2 Nov 2023 22:45:49 -0400 Subject: [PATCH] Add bit_order support --- benches/deku.rs | 88 ++-- deku-derive/src/lib.rs | 18 + deku-derive/src/macros/deku_read.rs | 8 +- deku-derive/src/macros/deku_write.rs | 2 + deku-derive/src/macros/mod.rs | 46 +- ensure_no_std/Cargo.toml | 1 - examples/custom_reader_and_writer.rs | 1 - examples/ieee.rs | 73 ++++ src/attributes.rs | 87 ++++ src/ctx.rs | 9 + src/impls/nonzero.rs | 31 ++ src/impls/primitive.rs | 413 ++++++++++++++++-- src/reader.rs | 99 +++-- src/writer.rs | 176 ++++++-- tests/bit_order.rs | 341 +++++++++++++++ tests/test_attributes/test_cond.rs | 2 +- tests/test_attributes/test_ctx.rs | 2 +- .../test_limits/test_bits_read.rs | 2 +- .../test_limits/test_bytes_read.rs | 2 +- .../test_attributes/test_limits/test_count.rs | 2 +- tests/test_attributes/test_map.rs | 2 +- tests/test_attributes/test_padding/mod.rs | 4 +- .../test_padding/test_pad_bits_after.rs | 2 +- .../test_padding/test_pad_bits_before.rs | 2 +- .../test_padding/test_pad_bytes_after.rs | 2 +- .../test_padding/test_pad_bytes_before.rs | 2 +- tests/test_attributes/test_skip.rs | 4 +- tests/test_compile/cases/temp_field.stderr | 4 +- tests/test_regression.rs | 20 +- 29 files changed, 1270 insertions(+), 175 deletions(-) create mode 100644 examples/ieee.rs create mode 100644 tests/bit_order.rs diff --git a/benches/deku.rs b/benches/deku.rs index 4243ff51..bbc6678f 100644 --- a/benches/deku.rs +++ b/benches/deku.rs @@ -13,6 +13,17 @@ struct DekuBits { data_03: u8, } +#[derive(Debug, PartialEq, DekuRead, DekuWrite)] +#[deku(bit_order = "lsb")] +struct DekuBitsLsb { + #[deku(bits = "1")] + data_01: u8, + #[deku(bits = "2")] + data_02: u8, + #[deku(bits = "5")] + data_03: u8, +} + #[derive(Debug, PartialEq, DekuRead, DekuWrite)] struct DekuBytes { data_00: u8, @@ -27,46 +38,22 @@ enum DekuEnum { VariantA(u8), } -#[derive(Debug, PartialEq, DekuRead, DekuWrite)] +#[derive(Debug, PartialEq, DekuRead, DekuWrite, Clone)] struct DekuVec { count: u8, #[deku(count = "count")] data: Vec, } -fn deku_read_bits(mut reader: impl Read) { - let mut reader = Reader::new(&mut reader); - let _v = DekuBits::from_reader_with_ctx(&mut reader, ()).unwrap(); -} - -fn deku_write_bits(input: &DekuBits) { - let _v = input.to_bytes().unwrap(); -} - -fn deku_read_byte(mut reader: impl Read) { - let mut reader = Reader::new(&mut reader); - let _v = DekuBytes::from_reader_with_ctx(&mut reader, ()).unwrap(); -} - -fn deku_write_byte(input: &DekuBytes) { - let _v = input.to_bytes().unwrap(); -} - -fn deku_read_enum(mut reader: impl Read) { - let mut reader = Reader::new(&mut reader); - let _v = DekuEnum::from_reader_with_ctx(&mut reader, ()).unwrap(); -} - -fn deku_write_enum(input: &DekuEnum) { - let _v = input.to_bytes().unwrap(); -} - -fn deku_read_vec(mut reader: impl Read) { +fn deku_read(mut reader: impl Read) +where + T: for<'a> DekuReader<'a>, +{ let mut reader = Reader::new(&mut reader); - let _v = DekuVec::from_reader_with_ctx(&mut reader, ()).unwrap(); + let _v = ::from_reader_with_ctx(&mut reader, ()).unwrap(); } -fn deku_write_vec(input: &DekuVec) { +fn deku_write(input: impl DekuWriter + DekuContainerWrite) { let _v = input.to_bytes().unwrap(); } @@ -75,30 +62,49 @@ fn criterion_benchmark(c: &mut Criterion) { let reader = Cursor::new(&[0x01; 1 + 2 + 4]); b.iter_batched( || reader.clone(), - |mut reader| deku_read_byte(&mut reader), + |mut reader| deku_read::(&mut reader), BatchSize::SmallInput, ) }); c.bench_function("deku_write_byte", |b| { b.iter(|| { - deku_write_byte(black_box(&DekuBytes { + deku_write(black_box(DekuBytes { data_00: 0x00, data_01: 0x02, data_02: 0x03, })) }) }); + c.bench_function("deku_read_bits", |b| { let reader = Cursor::new(&[0x01; 1]); b.iter_batched( || reader.clone(), - |mut reader| deku_read_bits(&mut reader), + |mut reader| deku_read::(&mut reader), BatchSize::SmallInput, ) }); c.bench_function("deku_write_bits", |b| { b.iter(|| { - deku_write_bits(black_box(&DekuBits { + deku_write(black_box(DekuBits { + data_01: 0x0f, + data_02: 0x00, + data_03: 0x01, + })) + }) + }); + + c.bench_function("deku_read_bits_lsb", |b| { + let reader = Cursor::new(&[0x01; 1]); + b.iter_batched( + || reader.clone(), + |mut reader| deku_read::(&mut reader), + BatchSize::SmallInput, + ) + }); + c.bench_function("deku_write_bits_lsb", |b| { + b.iter(|| { + deku_write(black_box(DekuBitsLsb { data_01: 0x0f, data_02: 0x00, data_03: 0x01, @@ -110,12 +116,12 @@ fn criterion_benchmark(c: &mut Criterion) { let reader = Cursor::new(&[0x01; 2]); b.iter_batched( || reader.clone(), - |mut reader| deku_read_enum(&mut reader), + |mut reader| deku_read::(&mut reader), BatchSize::SmallInput, ) }); c.bench_function("deku_write_enum", |b| { - b.iter(|| deku_write_enum(black_box(&DekuEnum::VariantA(0x02)))) + b.iter(|| deku_write(black_box(DekuEnum::VariantA(0x02)))) }); let deku_write_vec_input = DekuVec { @@ -126,12 +132,16 @@ fn criterion_benchmark(c: &mut Criterion) { let reader = Cursor::new(&[0x08; 8 + 1]); b.iter_batched( || reader.clone(), - |mut reader| deku_read_vec(&mut reader), + |mut reader| deku_read::(&mut reader), BatchSize::SmallInput, ) }); c.bench_function("deku_write_vec", |b| { - b.iter(|| deku_write_vec(black_box(&deku_write_vec_input))) + b.iter_batched( + || deku_write_vec_input.clone(), + |deku_write_vec_input| deku_write(black_box(deku_write_vec_input)), + BatchSize::SmallInput, + ) }); } diff --git a/deku-derive/src/lib.rs b/deku-derive/src/lib.rs index b6a10e9d..4f7637ab 100644 --- a/deku-derive/src/lib.rs +++ b/deku-derive/src/lib.rs @@ -136,6 +136,9 @@ struct DekuData { /// enum only: byte size of the enum `id` bytes: Option, + + /// Bit Order for all fields + bit_order: Option, } impl DekuData { @@ -184,6 +187,7 @@ impl DekuData { id_type: receiver.id_type?, bits: receiver.bits, bytes: receiver.bytes, + bit_order: receiver.bit_order, }; DekuData::validate(&data)?; @@ -191,6 +195,7 @@ impl DekuData { Ok(data) } + // TODO: Add #[bit_order] require #[bytes] fn validate(data: &DekuData) -> Result<(), TokenStream> { // Validate `ctx_default` if data.ctx_default.is_some() && data.ctx.is_none() { @@ -315,6 +320,7 @@ impl<'a> TryFrom<&'a DekuData> for DekuDataEnum<'a> { deku_data.endian.as_ref(), deku_data.bits.as_ref(), deku_data.bytes.as_ref(), + deku_data.bit_order.as_ref(), )?; Ok(Self { @@ -434,6 +440,9 @@ struct FieldData { // assert value of field assert_eq: Option, + + /// Bit Order of field + bit_order: Option, } impl FieldData { @@ -470,6 +479,7 @@ impl FieldData { cond: receiver.cond?, assert: receiver.assert?, assert_eq: receiver.assert_eq?, + bit_order: receiver.bit_order, }; FieldData::validate(&data)?; @@ -649,6 +659,10 @@ struct DekuReceiver { /// enum only: byte size of the enum `id` #[darling(default)] bytes: Option, + + /// Bit Order of field + #[darling(default)] + bit_order: Option, } type ReplacementError = TokenStream; @@ -825,6 +839,10 @@ struct DekuFieldReceiver { // assert value of field #[darling(default = "default_res_opt", map = "map_litstr_as_tokenstream")] assert_eq: Result, ReplacementError>, + + /// Bit Order of field + #[darling(default)] + bit_order: Option, } /// Receiver for the variant-level attributes inside a enum diff --git a/deku-derive/src/macros/deku_read.rs b/deku-derive/src/macros/deku_read.rs index fdcd40ee..a71a759d 100644 --- a/deku-derive/src/macros/deku_read.rs +++ b/deku-derive/src/macros/deku_read.rs @@ -517,9 +517,11 @@ fn emit_padding(bit_size: &TokenStream) -> TokenStream { if (__deku_pad % 8) == 0 { let bytes_read = __deku_pad / 8; let mut buf = vec![0; bytes_read]; - let _ = __deku_reader.read_bytes(bytes_read, &mut buf)?; + // TODO: use skip_bytes, or Seek in the future? + let _ = __deku_reader.read_bytes(bytes_read, &mut buf, ::#crate_::ctx::Order::Msb0)?; } else { - let _ = __deku_reader.read_bits(__deku_pad)?; + // TODO: use skip_bits, or Seek in the future? + let _ = __deku_reader.read_bits(__deku_pad, ::#crate_::ctx::Order::Msb0)?; } } } @@ -536,6 +538,7 @@ fn emit_field_read( let field_type = &f.ty; let field_endian = f.endian.as_ref().or(input.endian.as_ref()); + let field_bit_order = f.bit_order.as_ref().or(input.bit_order.as_ref()); let field_reader = &f.reader; @@ -617,6 +620,7 @@ fn emit_field_read( f.bits.as_ref(), f.bytes.as_ref(), f.ctx.as_ref(), + field_bit_order, )?; // The __deku_reader limiting options are special, we need to generate `(limit, (other, ..))` for them. diff --git a/deku-derive/src/macros/deku_write.rs b/deku-derive/src/macros/deku_write.rs index 5f57916d..e46892e4 100644 --- a/deku-derive/src/macros/deku_write.rs +++ b/deku-derive/src/macros/deku_write.rs @@ -418,6 +418,7 @@ fn emit_field_write( ) -> Result { let crate_ = super::get_crate_name(); let field_endian = f.endian.as_ref().or(input.endian.as_ref()); + let field_bit_order = f.bit_order.as_ref().or(input.bit_order.as_ref()); // fields to check usage of bit/byte offset let field_check_vars = [ @@ -483,6 +484,7 @@ fn emit_field_write( f.bits.as_ref(), f.bytes.as_ref(), f.ctx.as_ref(), + field_bit_order, )?; if f.temp { diff --git a/deku-derive/src/macros/mod.rs b/deku-derive/src/macros/mod.rs index 3dccf463..1610ab9f 100644 --- a/deku-derive/src/macros/mod.rs +++ b/deku-derive/src/macros/mod.rs @@ -238,17 +238,24 @@ pub(crate) fn gen_id_args( endian: Option<&syn::LitStr>, bits: Option<&Num>, bytes: Option<&Num>, + bit_order: Option<&syn::LitStr>, ) -> syn::Result { let crate_ = get_crate_name(); let endian = endian.map(gen_endian_from_str).transpose()?; let bits = bits.map(|n| quote! {::#crate_::ctx::BitSize(#n)}); let bytes = bytes.map(|n| quote! {::#crate_::ctx::ByteSize(#n)}); + let bit_order = bit_order.map(gen_bit_order_from_str).transpose()?; // FIXME: Should be `into_iter` here, see https://github.com/rust-lang/rust/issues/66145. - let id_args = [endian.as_ref(), bits.as_ref(), bytes.as_ref()] - .iter() - .filter_map(|i| *i) - .collect::>(); + let id_args = [ + endian.as_ref(), + bits.as_ref(), + bytes.as_ref(), + bit_order.as_ref(), + ] + .iter() + .filter_map(|i| *i) + .collect::>(); match &id_args[..] { [arg] => Ok(quote! {#arg}), @@ -265,18 +272,27 @@ fn gen_field_args( bits: Option<&Num>, bytes: Option<&Num>, ctx: Option<&Punctuated>, + bit_order: Option<&syn::LitStr>, ) -> syn::Result { let crate_ = get_crate_name(); let endian = endian.map(gen_endian_from_str).transpose()?; let bits = bits.map(|n| quote! {::#crate_::ctx::BitSize(#n)}); let bytes = bytes.map(|n| quote! {::#crate_::ctx::ByteSize(#n)}); + let bit_order = bit_order.map(gen_bit_order_from_str).transpose()?; let ctx = ctx.map(|c| quote! {#c}); // FIXME: Should be `into_iter` here, see https://github.com/rust-lang/rust/issues/66145. - let field_args = [endian.as_ref(), bits.as_ref(), bytes.as_ref(), ctx.as_ref()] - .iter() - .filter_map(|i| *i) - .collect::>(); + // TODO: the order here should be documented + let field_args = [ + endian.as_ref(), + bits.as_ref(), + bytes.as_ref(), + bit_order.as_ref(), + ctx.as_ref(), + ] + .iter() + .filter_map(|i| *i) + .collect::>(); // Because `impl DekuRead<'_, (T1, T2)>` but `impl DekuRead<'_, T1>`(not tuple) match &field_args[..] { @@ -299,6 +315,20 @@ fn gen_endian_from_str(s: &syn::LitStr) -> syn::Result { } } +/// Generate bit_order tokens from string: `lsb` -> `Order::Lsb0`. +fn gen_bit_order_from_str(s: &syn::LitStr) -> syn::Result { + let crate_ = get_crate_name(); + match s.value().as_str() { + "lsb" => Ok(quote! {::#crate_::ctx::Order::Lsb0}), + "msb" => Ok(quote! {::#crate_::ctx::Order::Msb0}), + _ => { + // treat as variable, possibly from `ctx` + let v: TokenStream = s.value().parse()?; + Ok(quote! {#v}) + } + } +} + /// Wraps a TokenStream with a closure providing access to `ctx` variables when /// `ctx_default` is provided fn wrap_default_ctx( diff --git a/ensure_no_std/Cargo.toml b/ensure_no_std/Cargo.toml index f3799d4a..c56eee92 100644 --- a/ensure_no_std/Cargo.toml +++ b/ensure_no_std/Cargo.toml @@ -22,4 +22,3 @@ alloc = [] cortex-m-rt = "0.7.3" deku = { path = "../", default-features = false, features = ["alloc"] } embedded-alloc = "0.5.0" - diff --git a/examples/custom_reader_and_writer.rs b/examples/custom_reader_and_writer.rs index 7199f610..10a55114 100644 --- a/examples/custom_reader_and_writer.rs +++ b/examples/custom_reader_and_writer.rs @@ -1,6 +1,5 @@ use std::convert::TryInto; -use deku::bitvec::{BitVec, Msb0}; use deku::ctx::BitSize; use deku::writer::Writer; use deku::{prelude::*, DekuWriter}; diff --git a/examples/ieee.rs b/examples/ieee.rs new file mode 100644 index 00000000..6428df09 --- /dev/null +++ b/examples/ieee.rs @@ -0,0 +1,73 @@ +use deku::ctx::Order; +use deku::prelude::*; + +use std::convert::TryFrom; + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(type = "u8", bits = "2")] +#[deku(bit_order = "ctx_lsb", ctx = "ctx_lsb: Order")] +pub enum FrameType { + #[deku(id = "0")] + Management, + #[deku(id = "1")] + Control, + #[deku(id = "2")] + Data, +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "ctx_lsb", ctx = "ctx_lsb: Order")] +pub struct Flags { + #[deku(bits = 1)] + pub to_ds: u8, + #[deku(bits = 1)] + pub from_ds: u8, + #[deku(bits = 1)] + pub more_fragments: u8, + #[deku(bits = 1)] + pub retry: u8, + #[deku(bits = 1)] + pub power_management: u8, + #[deku(bits = 1)] + pub more_data: u8, + #[deku(bits = 1)] + pub protected_frame: u8, + #[deku(bits = 1)] + pub order: u8, +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "lsb")] +pub struct FrameControl { + #[deku(bits = 4)] + pub sub_type: u8, + #[deku(bits = 2)] + pub protocol_version: u8, + pub frame_type: FrameType, + + pub flags: Flags, +} + +fn main() { + let data = vec![0x88u8, 0x41]; + let control_frame = FrameControl::try_from(data.as_ref()).unwrap(); + assert_eq!( + control_frame, + FrameControl { + protocol_version: 0, + frame_type: FrameType::Data, + sub_type: 8, + + flags: Flags { + to_ds: 1, + from_ds: 0, + more_fragments: 0, + retry: 0, + power_management: 0, + more_data: 0, + protected_frame: 1, + order: 0, + } + } + ); +} diff --git a/src/attributes.rs b/src/attributes.rs index 048dfc65..a19671b3 100644 --- a/src/attributes.rs +++ b/src/attributes.rs @@ -33,6 +33,7 @@ enum DekuEnum { | Attribute | Scope | Description |-----------|------------------|------------ | [endian](#endian) | top-level, field | Set the endianness +| [bit_order](#bit_order) | top-level, field | Set the field representing the order in which to read the bits | [magic](#magic) | top-level | A magic value that must be present at the start of this struct/enum | [assert](#assert) | field | Assert a condition | [assert_eq](#assert_eq) | field | Assert equals on the field @@ -141,6 +142,92 @@ assert_eq!( let value: Vec = value.try_into().unwrap(); assert_eq!(&*data, value); ``` +# bit_order + +Specify the field or containers bit order. By default all bits are read in `Msb0` (Most significant bit) order. + +### Top-Level Example +```rust +# use deku::prelude::*; +# use std::convert::{TryInto, TryFrom}; +# #[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "lsb")] +pub struct SquashfsV3 { + #[deku(bits = "4")] + inode_type: u32, + #[deku(bits = "12")] + mode: u32, + #[deku(bits = "8")] + uid: u32, + #[deku(bits = "8")] + guid: u32, + mtime: u32, + inode_number: u32, +} + +let data: &[u8] = &[ + 0x31, 0x12, 0x04, 0x05, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +]; +let header = SquashfsV3::try_from(data).unwrap(); +assert_eq!( + SquashfsV3 { + inode_type: 0x01, + mode: 0x123, + uid: 0x4, + guid: 0x5, + mtime: 0x6, + inode_number: 0x7 + }, + header, +); +``` + +With endian-ness: +```rust +# use deku::prelude::*; +# use std::convert::{TryInto, TryFrom}; +# #[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(endian = "big", bit_order = "lsb")] +pub struct BigEndian { + #[deku(bits = "13")] + offset: u16, + #[deku(bits = "3")] + t: u8, +} + +let data = vec![0x40, 0x40]; +let big_endian = BigEndian::try_from(data.as_ref()).unwrap(); +assert_eq!( + big_endian, + BigEndian { + offset: 0x4000, + t: 2 + } +); + +let bytes = big_endian.to_bytes().unwrap(); +assert_eq!(bytes, data); +```` + +### Field Example +```rust +# use deku::prelude::*; +# use std::convert::{TryInto, TryFrom}; +# #[derive(Debug, DekuRead, DekuWrite, PartialEq)] +pub struct LsbField { + #[deku(bit_order = "lsb", bits = "13")] + offset: u16, + #[deku(bit_order = "lsb", bits = "3")] + t: u8, +} + +let data = vec![0x40, 0x40]; +let more_first = LsbField::try_from(data.as_ref()).unwrap(); +assert_eq!(more_first, LsbField { offset: 0x40, t: 2 }); + +let bytes = more_first.to_bytes().unwrap(); +assert_eq!(bytes, data); +``` # magic diff --git a/src/ctx.rs b/src/ctx.rs index 37283288..2935c472 100644 --- a/src/ctx.rs +++ b/src/ctx.rs @@ -4,6 +4,15 @@ use core::marker::PhantomData; use core::str::FromStr; +/// Bit numbering +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum Order { + /// Most significant bit + Msb0, + /// least significant bit + Lsb0, +} + /// An endian #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Endian { diff --git a/src/impls/nonzero.rs b/src/impls/nonzero.rs index 5ea5b557..0c1121e2 100644 --- a/src/impls/nonzero.rs +++ b/src/impls/nonzero.rs @@ -38,11 +38,42 @@ macro_rules! ImplDekuTraitsCtx { }; } +macro_rules! ImplDekuTraitsCtxOrder { + ($typ:ty, $readtype:ty, $ctx_arg:tt, $ctx_type:tt) => { + impl DekuReader<'_, $ctx_type> for $typ { + fn from_reader_with_ctx( + reader: &mut crate::reader::Reader, + $ctx_arg: $ctx_type, + ) -> Result { + let value = <$readtype>::from_reader_with_ctx(reader, $ctx_arg)?; + let value = <$typ>::new(value); + + match value { + None => Err(DekuError::Parse(format!("NonZero assertion"))), + Some(v) => Ok(v), + } + } + } + }; +} + macro_rules! ImplDekuTraits { ($typ:ty, $readtype:ty) => { ImplDekuTraitsCtx!($typ, $readtype, (), ()); ImplDekuTraitsCtx!($typ, $readtype, (endian, bitsize), (Endian, BitSize)); ImplDekuTraitsCtx!($typ, $readtype, (endian, bytesize), (Endian, ByteSize)); + ImplDekuTraitsCtxOrder!( + $typ, + $readtype, + (endian, bitsize, order), + (Endian, BitSize, Order) + ); + ImplDekuTraitsCtxOrder!( + $typ, + $readtype, + (endian, bytesize, order), + (Endian, ByteSize, Order) + ); ImplDekuTraitsCtx!($typ, $readtype, endian, Endian); }; } diff --git a/src/impls/primitive.rs b/src/impls/primitive.rs index 6e3f72de..caf0af53 100644 --- a/src/impls/primitive.rs +++ b/src/impls/primitive.rs @@ -2,6 +2,8 @@ use alloc::format; #[cfg(feature = "alloc")] use alloc::string::ToString; +#[cfg(feature = "alloc")] +use alloc::vec; use core::convert::TryInto; use bitvec::prelude::*; @@ -32,33 +34,21 @@ trait DekuRead<'a, Ctx = ()> { Self: Sized; } -/// "Writer" trait: write from type to bits -trait DekuWrite { - /// Write type to bits - /// * **output** - Sink to store resulting bits - /// * **ctx** - A context required by context-sensitive reading. A unit type `()` means no context - /// needed. - fn write( - &self, - output: &mut crate::bitvec::BitVec, - ctx: Ctx, - ) -> Result<(), DekuError>; -} +// specialize u8 for ByteSize +impl DekuRead<'_, (Endian, ByteSize, Order)> for u8 { + #[inline] + fn read( + input: &BitSlice, + (_, _, _): (Endian, ByteSize, Order), + ) -> Result<(usize, Self), DekuError> { + const MAX_TYPE_BITS: usize = BitSize::of::().0; -/// Implements DekuWrite for references of types that implement DekuWrite -impl DekuWrite for &T -where - T: DekuWrite, - Ctx: Copy, -{ - /// Write value of type to bits - fn write(&self, output: &mut BitVec, ctx: Ctx) -> Result<(), DekuError> { - ::write(self, output, ctx)?; - Ok(()) + // PANIC: We already check that input.len() < bit_size above, so no panic will happen + let value = input[..MAX_TYPE_BITS].load::(); + Ok((MAX_TYPE_BITS, value)) } } -// specialize u8 for ByteSize impl DekuRead<'_, (Endian, ByteSize)> for u8 { #[inline] fn read( @@ -73,14 +63,14 @@ impl DekuRead<'_, (Endian, ByteSize)> for u8 { } } -impl DekuReader<'_, (Endian, ByteSize)> for u8 { +impl DekuReader<'_, (Endian, ByteSize, Order)> for u8 { #[inline] fn from_reader_with_ctx( reader: &mut Reader, - (endian, size): (Endian, ByteSize), + (endian, size, order): (Endian, ByteSize, Order), ) -> Result { let mut buf = [0; core::mem::size_of::()]; - let ret = reader.read_bytes(size.0, &mut buf)?; + let ret = reader.read_bytes(size.0, &mut buf, order)?; let a = match ret { ReaderRet::Bits(bits) => { let Some(bits) = bits else { @@ -97,6 +87,124 @@ impl DekuReader<'_, (Endian, ByteSize)> for u8 { macro_rules! ImplDekuReadBits { ($typ:ty, $inner:ty) => { + impl DekuRead<'_, (Endian, BitSize, Order)> for $typ { + #[inline] + fn read( + input: &BitSlice, + (endian, size, order): (Endian, BitSize, Order), + ) -> Result<(usize, Self), DekuError> { + const MAX_TYPE_BITS: usize = BitSize::of::<$typ>().0; + let bit_size: usize = size.0; + + let input_is_le = endian.is_le(); + + // PANIC: We already check that input.len() < bit_size above, so no panic will happen + let bit_slice = &input; + + let pad = 8 * ((bit_slice.len() + 7) / 8) - bit_slice.len(); + + // if everything is aligned, just read the value + if pad == 0 && bit_slice.len() == MAX_TYPE_BITS { + let bytes = bit_slice.domain().region().unwrap().1; + + if bytes.len() * 8 == MAX_TYPE_BITS { + // Read value + let value = if input_is_le { + <$typ>::from_le_bytes(bytes.try_into()?) + } else { + <$typ>::from_be_bytes(bytes.try_into()?) + }; + return Ok((bit_size, value)); + } + } + + // if read from Lsb order and it's escpecially cursed since its not just within one byte... + // read_bits returned: [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1] + // | second | first | + // we want to read from right to left when lsb (without using BitVec BitFields) + // + // Turning this into [0x23, 0x01] (then appending till type size) + if order == Order::Lsb0 && bit_slice.len() > 8 { + let mut bits = BitVec::::with_capacity(bit_slice.len() + pad); + + bits.extend_from_bitslice(&bit_slice); + + for _ in 0..pad { + bits.insert(0, false); + } + + let mut buf = vec![]; + let mut n = bits.len() - 8; + while let Some(slice) = bits.get(n..n + 8) { + let a: u8 = slice.load_be(); + buf.push(a); + if n < 8 { + break; + } + n -= 8; + } + + // Pad up-to size of type + for _ in 0..core::mem::size_of::<$typ>() - buf.len() { + buf.push(0x00); + } + + // Read value + let value = if input_is_le { + <$typ>::from_le_bytes(buf.try_into().unwrap()) + } else { + <$typ>::from_be_bytes(buf.try_into().unwrap()) + }; + + Ok((bit_size, value)) + } else { + // Create a new BitVec from the slice and pad un-aligned chunks + // i.e. [10010110, 1110] -> [10010110, 00001110] + let bits: BitVec = { + let mut bits = BitVec::with_capacity(bit_slice.len() + pad); + + // Copy bits to new BitVec + bits.extend_from_bitslice(&bit_slice); + + // Force align + //i.e. [1110, 10010110] -> [11101001, 0110] + bits.force_align(); + + // Some padding to next byte + let index = if input_is_le { + bits.len() - (8 - pad) + } else { + 0 + }; + for _ in 0..pad { + bits.insert(index, false); + } + + // Pad up-to size of type + for _ in 0..(MAX_TYPE_BITS - bits.len()) { + if input_is_le { + bits.push(false); + } else { + bits.insert(0, false); + } + } + + bits + }; + let bytes: &[u8] = bits.domain().region().unwrap().1; + + // Read value + let value = if input_is_le { + <$typ>::from_le_bytes(bytes.try_into()?) + } else { + <$typ>::from_be_bytes(bytes.try_into()?) + }; + + Ok((bit_size, value)) + } + } + } + impl DekuRead<'_, (Endian, BitSize)> for $typ { #[inline] fn read( @@ -173,6 +281,28 @@ macro_rules! ImplDekuReadBits { } } + impl DekuReader<'_, (Endian, BitSize, Order)> for $typ { + #[inline] + fn from_reader_with_ctx( + reader: &mut Reader, + (endian, size, order): (Endian, BitSize, Order), + ) -> Result<$typ, DekuError> { + const MAX_TYPE_BITS: usize = BitSize::of::<$typ>().0; + if size.0 > MAX_TYPE_BITS { + return Err(DekuError::Parse(format!( + "too much data: container of {MAX_TYPE_BITS} bits cannot hold {} bits", + size.0 + ))); + } + let bits = reader.read_bits(size.0, order)?; + let Some(bits) = bits else { + return Err(DekuError::Parse(format!("no bits read from reader",))); + }; + let a = <$typ>::read(&bits, (endian, size, order))?; + Ok(a.1) + } + } + impl DekuReader<'_, (Endian, BitSize)> for $typ { #[inline] fn from_reader_with_ctx( @@ -186,11 +316,11 @@ macro_rules! ImplDekuReadBits { size.0 ))); } - let bits = reader.read_bits(size.0)?; + let bits = reader.read_bits(size.0, Order::Msb0)?; let Some(bits) = bits else { return Err(DekuError::Parse(format!("no bits read from reader",))); }; - let a = <$typ>::read(&bits, (endian, size))?; + let a = <$typ>::read(&bits, (endian, size, Order::Msb0))?; Ok(a.1) } } @@ -199,6 +329,17 @@ macro_rules! ImplDekuReadBits { macro_rules! ImplDekuReadBytes { ($typ:ty, $inner:ty) => { + /// Ignore order + impl DekuRead<'_, (Endian, ByteSize, Order)> for $typ { + #[inline] + fn read( + input: &BitSlice, + (endian, size, _order): (Endian, ByteSize, Order), + ) -> Result<(usize, Self), DekuError> { + <$typ as DekuRead<'_, (Endian, ByteSize)>>::read(input, (endian, size)) + } + } + impl DekuRead<'_, (Endian, ByteSize)> for $typ { #[inline] fn read( @@ -222,11 +363,12 @@ macro_rules! ImplDekuReadBytes { } } - impl DekuReader<'_, (Endian, ByteSize)> for $typ { + // TODO: Remove + impl DekuReader<'_, (Endian, ByteSize, Order)> for $typ { #[inline] fn from_reader_with_ctx( reader: &mut Reader, - (endian, size): (Endian, ByteSize), + (endian, size, order): (Endian, ByteSize, Order), ) -> Result<$typ, DekuError> { const MAX_TYPE_BYTES: usize = core::mem::size_of::<$typ>(); if size.0 > MAX_TYPE_BYTES { @@ -236,7 +378,7 @@ macro_rules! ImplDekuReadBytes { ))); } let mut buf = [0; core::mem::size_of::<$typ>()]; - let ret = reader.read_bytes(size.0, &mut buf)?; + let ret = reader.read_bytes(size.0, &mut buf, order)?; let a = match ret { ReaderRet::Bits(Some(bits)) => { let a = <$typ>::read(&bits, (endian, size))?; @@ -266,6 +408,17 @@ macro_rules! ImplDekuReadBytes { macro_rules! ImplDekuReadSignExtend { ($typ:ty, $inner:ty) => { + // Ignore Order, send back + impl DekuRead<'_, (Endian, ByteSize, Order)> for $typ { + #[inline] + fn read( + input: &BitSlice, + (endian, size, _order): (Endian, ByteSize, Order), + ) -> Result<(usize, Self), DekuError> { + <$typ as DekuRead<'_, (Endian, ByteSize)>>::read(input, (endian, size)) + } + } + impl DekuRead<'_, (Endian, ByteSize)> for $typ { #[inline] fn read( @@ -283,14 +436,15 @@ macro_rules! ImplDekuReadSignExtend { } } - impl DekuReader<'_, (Endian, ByteSize)> for $typ { + // TODO: Remove + impl DekuReader<'_, (Endian, ByteSize, Order)> for $typ { #[inline] fn from_reader_with_ctx( reader: &mut Reader, - (endian, size): (Endian, ByteSize), + (endian, size, order): (Endian, ByteSize, Order), ) -> Result<$typ, DekuError> { let mut buf = [0; core::mem::size_of::<$typ>()]; - let ret = reader.read_bytes(size.0, &mut buf)?; + let ret = reader.read_bytes(size.0, &mut buf, order)?; let a = match ret { ReaderRet::Bits(bits) => { let Some(bits) = bits else { @@ -316,6 +470,25 @@ macro_rules! ImplDekuReadSignExtend { } } + impl DekuRead<'_, (Endian, BitSize, Order)> for $typ { + #[inline] + fn read( + input: &BitSlice, + (endian, size, order): (Endian, BitSize, Order), + ) -> Result<(usize, Self), DekuError> { + let (amt_read, value) = <$inner as DekuRead<'_, (Endian, BitSize, Order)>>::read( + input, + (endian, size, order), + )?; + + const MAX_TYPE_BITS: usize = BitSize::of::<$typ>().0; + let bit_size = size.0; + let shift = MAX_TYPE_BITS - bit_size; + let value = (value as $typ) << shift >> shift; + Ok((amt_read, value)) + } + } + impl DekuRead<'_, (Endian, BitSize)> for $typ { #[inline] fn read( @@ -338,6 +511,16 @@ macro_rules! ImplDekuReadSignExtend { fn from_reader_with_ctx( reader: &mut Reader, (endian, size): (Endian, BitSize), + ) -> Result<$typ, DekuError> { + <$typ>::from_reader_with_ctx(reader, (endian, size, Order::Msb0)) + } + } + + impl DekuReader<'_, (Endian, BitSize, Order)> for $typ { + #[inline] + fn from_reader_with_ctx( + reader: &mut Reader, + (endian, size, order): (Endian, BitSize, Order), ) -> Result<$typ, DekuError> { const MAX_TYPE_BITS: usize = BitSize::of::<$typ>().0; if size.0 > MAX_TYPE_BITS { @@ -346,7 +529,7 @@ macro_rules! ImplDekuReadSignExtend { size.0 ))); } - let bits = reader.read_bits(size.0)?; + let bits = reader.read_bits(size.0, order)?; let Some(bits) = bits else { return Err(DekuError::Parse(format!("no bits read from reader",))); }; @@ -361,6 +544,28 @@ macro_rules! ImplDekuReadSignExtend { // BitSize wasn't defined macro_rules! ForwardDekuRead { ($typ:ty) => { + impl DekuReader<'_, (Endian, Order)> for $typ { + #[inline] + fn from_reader_with_ctx( + reader: &mut Reader, + (endian, order): (Endian, Order), + ) -> Result<$typ, DekuError> { + let byte_size = core::mem::size_of::<$typ>(); + + <$typ>::from_reader_with_ctx(reader, (endian, ByteSize(byte_size), order)) + } + } + + impl DekuReader<'_, (Endian, ByteSize)> for $typ { + #[inline] + fn from_reader_with_ctx( + reader: &mut Reader, + (endian, byte_size): (Endian, ByteSize), + ) -> Result<$typ, DekuError> { + <$typ>::from_reader_with_ctx(reader, (endian, byte_size, Order::Msb0)) + } + } + // Only have `endian`, set `bit_size` to `Size::of::()` impl DekuReader<'_, Endian> for $typ { #[inline] @@ -405,6 +610,33 @@ macro_rules! ForwardDekuRead { } } + //// Only have `bit_size`, set `endian` to `Endian::default`. + impl DekuReader<'_, (BitSize, Order)> for $typ { + #[inline] + fn from_reader_with_ctx( + reader: &mut Reader, + (bit_size, order): (BitSize, Order), + ) -> Result<$typ, DekuError> { + let endian = Endian::default(); + + if (bit_size.0 % 8) == 0 { + <$typ>::from_reader_with_ctx(reader, (endian, ByteSize(bit_size.0 / 8), order)) + } else { + <$typ>::from_reader_with_ctx(reader, (endian, bit_size, order)) + } + } + } + + impl DekuReader<'_, Order> for $typ { + #[inline] + fn from_reader_with_ctx( + reader: &mut Reader, + order: Order, + ) -> Result<$typ, DekuError> { + <$typ>::from_reader_with_ctx(reader, (Endian::default(), order)) + } + } + impl DekuReader<'_> for $typ { #[inline] fn from_reader_with_ctx( @@ -419,6 +651,63 @@ macro_rules! ForwardDekuRead { macro_rules! ImplDekuWrite { ($typ:ty) => { + impl DekuWriter<(Endian, BitSize, Order)> for $typ { + #[inline] + fn to_writer( + &self, + writer: &mut Writer, + (endian, size, order): (Endian, BitSize, Order), + ) -> Result<(), DekuError> { + let input = match endian { + Endian::Little => self.to_le_bytes(), + Endian::Big => self.to_be_bytes(), + }; + + let bit_size: usize = size.0; + + let input_bits = input.view_bits::(); + + if bit_size > input_bits.len() { + return Err(DekuError::InvalidParam(format!( + "bit size {} is larger then input {}", + bit_size, + input_bits.len() + ))); + } + + match (endian, order) { + (Endian::Little, Order::Lsb0) + | (Endian::Little, Order::Msb0) + | (Endian::Big, Order::Lsb0) => { + let mut remaining_bits = bit_size; + for chunk in input_bits.chunks(8) { + if chunk.len() > remaining_bits { + writer.write_bits_order( + &chunk[chunk.len() - remaining_bits..], + order, + )?; + break; + } else { + writer.write_bits_order(&chunk, order)?; + } + remaining_bits -= chunk.len(); + } + } + (Endian::Big, Order::Msb0) => { + // big endian + // Example read 10 bits u32 [0xAB, 0b11_000000] + // => [00000000, 00000000, 00000010, 10101111] + writer.write_bits_order( + &input_bits[input_bits.len() - bit_size..], + Order::Msb0, + )?; + } + } + + Ok(()) + } + } + impl DekuWriter<(Endian, BitSize)> for $typ { #[inline] fn to_writer( @@ -465,6 +754,18 @@ macro_rules! ImplDekuWrite { } } + /// When using Endian and ByteSize, Order is not used + impl DekuWriter<(Endian, ByteSize, Order)> for $typ { + #[inline] + fn to_writer( + &self, + writer: &mut Writer, + (endian, size, _order): (Endian, ByteSize, Order), + ) -> Result<(), DekuError> { + <$typ>::to_writer(self, writer, (endian, size)) + } + } + impl DekuWriter<(Endian, ByteSize)> for $typ { #[inline] fn to_writer( @@ -516,6 +817,29 @@ macro_rules! ImplDekuWrite { macro_rules! ForwardDekuWrite { ($typ:ty) => { + impl DekuWriter<(BitSize, Order)> for $typ { + #[inline(always)] + fn to_writer( + &self, + writer: &mut Writer, + (bit_size, order): (BitSize, Order), + ) -> Result<(), DekuError> { + <$typ>::to_writer(self, writer, (Endian::default(), bit_size, order)) + } + } + + impl DekuWriter<(Endian, Order)> for $typ { + #[inline(always)] + fn to_writer( + &self, + writer: &mut Writer, + (endian, order): (Endian, Order), + ) -> Result<(), DekuError> { + let byte_size = core::mem::size_of::<$typ>(); + <$typ>::to_writer(self, writer, (endian, ByteSize(byte_size), order)) + } + } + impl DekuWriter for $typ { #[inline] fn to_writer( @@ -538,10 +862,21 @@ macro_rules! ForwardDekuWrite { } } + impl DekuWriter for $typ { + #[inline] + fn to_writer( + &self, + writer: &mut Writer, + order: Order, + ) -> Result<(), DekuError> { + <$typ>::to_writer(self, writer, (Endian::default(), order)) + } + } + impl DekuWriter for $typ { #[inline] fn to_writer(&self, writer: &mut Writer, _: ()) -> Result<(), DekuError> { - <$typ>::to_writer(self, writer, Endian::default()) + <$typ>::to_writer(self, writer, (Endian::default())) } } }; @@ -621,7 +956,8 @@ mod tests { fn $test_name() { let mut r = std::io::Cursor::new($input); let mut reader = Reader::new(&mut r); - let res_read = <$typ>::from_reader_with_ctx(&mut reader, ENDIAN).unwrap(); + let res_read = + <$typ>::from_reader_with_ctx(&mut reader, (ENDIAN, Order::Msb0)).unwrap(); assert_eq!($expected, res_read); let mut out_buf = vec![]; @@ -743,7 +1079,8 @@ mod tests { let mut reader = Reader::new(&mut input); let res_read = match bit_size { Some(bit_size) => { - u32::from_reader_with_ctx(&mut reader, (endian, BitSize(bit_size))).unwrap() + u32::from_reader_with_ctx(&mut reader, (endian, BitSize(bit_size), Order::Msb0)) + .unwrap() } None => u32::from_reader_with_ctx(&mut reader, endian).unwrap(), }; diff --git a/src/reader.rs b/src/reader.rs index 6de4b35a..e8c613b9 100644 --- a/src/reader.rs +++ b/src/reader.rs @@ -5,7 +5,7 @@ use core::cmp::Ordering; use bitvec::prelude::*; use no_std_io::io::{ErrorKind, Read}; -use crate::{prelude::NeedSize, DekuError}; +use crate::{ctx::Order, prelude::NeedSize, DekuError}; use alloc::vec::Vec; #[cfg(feature = "logging")] @@ -112,7 +112,7 @@ impl<'a, R: Read> Reader<'a, R> { #[cfg(feature = "logging")] log::trace!("skip_bits: {amt}"); // Save, and keep the leftover bits since the read will most likely be less than a byte - self.read_bits(amt)?; + self.read_bits(amt, Order::Msb0)?; Ok(()) } @@ -128,7 +128,11 @@ impl<'a, R: Read> Reader<'a, R> { /// # Params /// `amt` - Amount of bits that will be read. Must be <= [`MAX_BITS_AMT`]. #[inline] - pub fn read_bits(&mut self, amt: usize) -> Result>, DekuError> { + pub fn read_bits( + &mut self, + amt: usize, + order: Order, + ) -> Result>, DekuError> { #[cfg(feature = "logging")] log::trace!("read_bits: requesting {amt} bits"); if amt == 0 { @@ -147,10 +151,9 @@ impl<'a, R: Read> Reader<'a, R> { // previous read was not enough to satisfy the amt requirement, return all previously Ordering::Greater => { // read bits - ret.extend_from_bitslice(&self.leftover); // calculate the amount of bytes we need to read to read enough bits - let bits_left = amt - self.leftover.len(); + let mut bits_left = amt - self.leftover.len(); let mut bytes_len = bits_left / 8; if (bits_left % 8) != 0 { bytes_len += 1; @@ -171,20 +174,61 @@ impl<'a, R: Read> Reader<'a, R> { log::trace!("read_bits: read() {:02x?}", read_buf); // create bitslice and remove unused bits - let rest = BitSlice::try_from_slice(read_buf).unwrap(); - let (rest, not_needed) = rest.split_at(bits_left); - core::mem::swap(&mut not_needed.to_bitvec(), &mut self.leftover); + let mut rest = BitSlice::try_from_slice(read_buf).unwrap(); - // create return - ret.extend_from_bitslice(rest); + #[cfg(feature = "logging")] + log::trace!("read_bits: bits: {}", rest); + + // remove bytes until we get to the last byte, of which + // we need to care abount bit-order + let mut front_bits = None; + + // Allow bits_left -= bits_left - (bits_left % 8), as this is correct + #[allow(clippy::misrefactored_assign_op)] + if bits_left > 8 { + let (used, more) = rest.split_at(bits_left - (bits_left % 8)); + bits_left -= bits_left - (bits_left % 8); + front_bits = Some(used); + rest = more; + } + + match order { + Order::Lsb0 => { + let (rest, used) = rest.split_at(rest.len() - bits_left); + ret.extend_from_bitslice(used); + ret.extend_from_bitslice(&self.leftover); + if let Some(front_bits) = front_bits { + ret.extend_from_bitslice(front_bits); + } + + self.leftover = rest.to_bitvec(); + } + Order::Msb0 => { + let (rest, not_needed) = rest.split_at(bits_left); + // TODO: test + if let Some(front_bits) = front_bits { + ret.extend_from_bitslice(front_bits); + } + ret.extend_from_bitslice(&self.leftover); + ret.extend_from_bitslice(rest); + + core::mem::swap(&mut not_needed.to_bitvec(), &mut self.leftover); + } + } } // The entire bits we need to return have been already read previously from bytes but // not all were read, return required leftover bits - Ordering::Less => { - let used = self.leftover.split_off(amt); - ret.extend_from_bitslice(&self.leftover); - self.leftover = used; - } + Ordering::Less => match order { + Order::Lsb0 => { + let used = self.leftover.split_off(self.leftover.len() - amt); + ret.extend_from_bitslice(&used); + } + Order::Msb0 => { + let used = self.leftover.split_off(amt); + ret.extend_from_bitslice(&self.leftover); + self.leftover = used; + } + }, } self.bits_read += ret.len(); @@ -200,7 +244,12 @@ impl<'a, R: Read> Reader<'a, R> { /// # Params /// `amt` - Amount of bytes that will be read #[inline] - pub fn read_bytes(&mut self, amt: usize, buf: &mut [u8]) -> Result { + pub fn read_bytes( + &mut self, + amt: usize, + buf: &mut [u8], + order: Order, + ) -> Result { #[cfg(feature = "logging")] log::trace!("read_bytes: requesting {amt} bytes"); if self.leftover.is_empty() { @@ -218,11 +267,11 @@ impl<'a, R: Read> Reader<'a, R> { self.bits_read += amt * 8; #[cfg(feature = "logging")] - log::trace!("read_bytes: returning {buf:02x?}"); + log::trace!("read_bytes: returning {:02x?}", &buf[..amt]); Ok(ReaderRet::Bytes) } else { - Ok(ReaderRet::Bits(self.read_bits(amt * 8)?)) + Ok(ReaderRet::Bits(self.read_bits(amt * 8, order)?)) } } } @@ -240,16 +289,16 @@ mod tests { let mut reader = Reader::new(&mut cursor); assert!(!reader.end()); let mut buf = [0; 1]; - let _ = reader.read_bytes(1, &mut buf); + let _ = reader.read_bytes(1, &mut buf, Order::Lsb0); assert!(reader.end()); let input = hex!("aa"); let mut cursor = Cursor::new(input); let mut reader = Reader::new(&mut cursor); assert!(!reader.end()); - let _ = reader.read_bits(4); + let _ = reader.read_bits(4, Order::Lsb0); assert!(!reader.end()); - let _ = reader.read_bits(4); + let _ = reader.read_bits(4, Order::Lsb0); assert!(reader.end()); } @@ -258,9 +307,9 @@ mod tests { let input = hex!("aa"); let mut cursor = Cursor::new(input); let mut reader = Reader::new(&mut cursor); - let _ = reader.read_bits(1); - let _ = reader.read_bits(4); - let _ = reader.read_bits(3); + let _ = reader.read_bits(1, Order::Lsb0); + let _ = reader.read_bits(4, Order::Lsb0); + let _ = reader.read_bits(3, Order::Lsb0); } #[test] @@ -269,7 +318,7 @@ mod tests { let mut cursor = Cursor::new(input); let mut reader = Reader::new(&mut cursor); let mut buf = [0; 1]; - let _ = reader.read_bytes(1, &mut buf); + let _ = reader.read_bytes(1, &mut buf, Order::Lsb0); assert_eq!([0xaa], buf); } } diff --git a/src/writer.rs b/src/writer.rs index 8b58cbd4..340ddd67 100644 --- a/src/writer.rs +++ b/src/writer.rs @@ -7,8 +7,12 @@ use no_std_io::io::Write; #[cfg(feature = "logging")] use log; +use crate::ctx::Order; use crate::DekuError; +#[cfg(feature = "alloc")] +use alloc::borrow::ToOwned; + const fn bits_of() -> usize { core::mem::size_of::().saturating_mul(::BITS as usize) } @@ -16,7 +20,7 @@ const fn bits_of() -> usize { /// Container to use with `from_reader` pub struct Writer<'a, W: Write> { pub(crate) inner: &'a mut W, - leftover: BitVec, + leftover: (BitVec, Order), /// Total bits written pub bits_written: usize, } @@ -27,7 +31,7 @@ impl<'a, W: Write> Writer<'a, W> { pub fn new(inner: &'a mut W) -> Self { Self { inner, - leftover: BitVec::new(), + leftover: (BitVec::new(), Order::Msb0), bits_written: 0, } } @@ -35,60 +39,160 @@ impl<'a, W: Write> Writer<'a, W> { /// Return the unused bits #[inline] pub fn rest(&mut self) -> alloc::vec::Vec { - self.leftover.iter().by_vals().collect() + self.leftover.0.iter().by_vals().collect() } /// Write all bits to `Writer` buffer if bits can fit into a byte buffer #[inline] - pub fn write_bits(&mut self, bits: &BitSlice) -> Result<(), DekuError> { + pub fn write_bits_order( + &mut self, + bits: &BitSlice, + order: Order, + ) -> Result<(), DekuError> { #[cfg(feature = "logging")] - log::trace!("attempting {} bits", bits.len()); + log::trace!("attempting {} bits : {}", bits.len(), bits); + + // quick return if we don't have enough bits to write to the byte buffer + if (self.leftover.0.len() + bits.len()) < 8 { + if self.leftover.1 == Order::Msb0 { + self.leftover.0.extend_from_bitslice(bits); + self.leftover.1 = order; - // quick return if we can't write to the bytes buffer - if (self.leftover.len() + bits.len()) < 8 { - self.leftover.extend_from_bitslice(bits); + #[cfg(feature = "logging")] + log::trace!( + "no write: pre-pending {} bits : {} => {}", + bits.len(), + bits, + self.leftover.0 + ); + } else { + let tmp = self.leftover.0.clone(); + self.leftover.0 = bits.to_owned(); + self.leftover.0.extend_from_bitslice(&tmp); + self.leftover.1 = order; + + #[cfg(feature = "logging")] + log::trace!( + "no write: post-pending {} bits : {} => {}", + bits.len(), + bits, + self.leftover.0 + ); + } return Ok(()); } - // pre-pend the previous attempt to write if needed - let mut bits = if self.leftover.is_empty() { + let mut bits = if self.leftover.0.is_empty() { bits + } else if self.leftover.1 == Order::Msb0 { + #[cfg(feature = "logging")] + log::trace!( + "pre-pending {} bits : {}", + self.leftover.0.len(), + self.leftover.0 + ); + + self.leftover.0.extend_from_bitslice(bits); + + #[cfg(feature = "logging")] + log::trace!("now {} bits : {}", self.leftover.0.len(), self.leftover.0); + &mut self.leftover.0 } else { #[cfg(feature = "logging")] - log::trace!("pre-pending {} bits", self.leftover.len()); - self.leftover.extend_from_bitslice(bits); - &mut self.leftover + log::trace!( + "post-pending {} bits : {}", + self.leftover.0.len(), + self.leftover.0 + ); + + let tmp = self.leftover.0.clone(); + self.leftover.0 = bits.to_owned(); + self.leftover.0.extend_from_bitslice(&tmp); + + #[cfg(feature = "logging")] + log::trace!("now {} bits : {}", self.leftover.0.len(), self.leftover.0); + &mut self.leftover.0 }; - // one shot impl of BitSlice::read(no read_exact), but for no_std - let mut buf = alloc::vec![0x00; bits.len() / 8]; - let mut count = 0; - bits.chunks_exact(bits_of::()) - .zip(buf.iter_mut()) - .for_each(|(byte, slot)| { - *slot = byte.load_be(); - count += 1; - }); - bits = unsafe { bits.get_unchecked(count * bits_of::()..) }; - - // TODO: with_capacity? - self.bits_written = buf.len() * 8; - self.leftover = bits.to_bitvec(); - if self.inner.write_all(&buf).is_err() { - return Err(DekuError::WriteError); + if order == Order::Msb0 { + // This is taken from bitvec's std::io::Read function for BitSlice, but + // supports no-std + let mut buf = alloc::vec![0x00; bits.len() / 8]; + let mut count = 0; + bits.chunks_exact(bits_of::()) + .zip(buf.iter_mut()) + .for_each(|(byte, slot)| { + *slot = byte.load_be(); + count += 1; + }); + // SAFETY: there is no safety comment in bitvec, but assume this is safe b/c of bits + // always still pointing to it's own instance of bits (size-wise) + bits = unsafe { bits.get_unchecked(count * bits_of::()..) }; + + // TODO: with_capacity? + self.bits_written = buf.len() * 8; + self.leftover = (bits.to_bitvec(), order); + if self.inner.write_all(&buf).is_err() { + return Err(DekuError::WriteError); + } + #[cfg(feature = "logging")] + log::trace!("wrote {} bits : 0x{:02x?}", buf.len() * 8, &buf); + } else { + // This is more complicated, as we need to skip the first bytes until we are "byte aligned" + // TODO: then reverse the buf before writing in the case that bits.len() > one byte buf ? + let skip_amount = bits.len() % 8; + + // This is taken from bitvec's std::io::Read function for BitSlice, but + // supports no-std + let mut buf = alloc::vec![0x00; bits.len() / 8]; + let mut count = 0; + + // SAFETY: there is no safety comment in bitvec, but assume this is safe b/c of bits + // always still pointing to it's own instance of bits (size-wise) + let inner_bits = unsafe { bits.get_unchecked(skip_amount..) }; + inner_bits + .chunks_exact(bits_of::()) + .zip(buf.iter_mut()) + .for_each(|(byte, slot)| { + *slot = byte.load_be(); + count += 1; + }); + // SAFETY: there is no safety comment in bitvec, but assume this is safe b/c of bits + // always still pointing to it's own instance of bits (size-wise) + bits = unsafe { bits.get_unchecked(..skip_amount) }; + + // TODO: with_capacity? + self.bits_written = buf.len() * 8; + self.leftover = (bits.to_bitvec(), order); + if self.inner.write_all(&buf).is_err() { + return Err(DekuError::WriteError); + } + #[cfg(feature = "logging")] + log::trace!("wrote {} bits : 0x{:02x?}", buf.len() * 8, &buf); } + #[cfg(feature = "logging")] - log::trace!("wrote {} bits", buf.len() * 8); + log::trace!( + "leftover {} bits : {}", + self.leftover.0.len(), + self.leftover.0 + ); Ok(()) } + /// Write all bits to `Writer` buffer if bits can fit into a byte buffer + #[inline] + pub fn write_bits(&mut self, bits: &BitSlice) -> Result<(), DekuError> { + self.write_bits_order(bits, Order::Msb0) + } + /// Write `buf` into `Writer` #[inline] pub fn write_bytes(&mut self, buf: &[u8]) -> Result<(), DekuError> { #[cfg(feature = "logging")] log::trace!("writing {} bytes", buf.len()); - if !self.leftover.is_empty() { + if !self.leftover.0.is_empty() { #[cfg(feature = "logging")] log::trace!("leftover exists"); // TODO: we could check here and only send the required bits to finish the byte? @@ -108,18 +212,20 @@ impl<'a, W: Write> Writer<'a, W> { /// into a byte buffer #[inline] pub fn finalize(&mut self) -> Result<(), DekuError> { - if !self.leftover.is_empty() { + if !self.leftover.0.is_empty() { #[cfg(feature = "logging")] - log::trace!("finalized: {} bits leftover", self.leftover.len()); + log::trace!("finalized: {} bits leftover", self.leftover.0.len()); // add bits to be byte aligned so we can write self.leftover - .extend_from_bitslice(&bitvec![u8, Msb0; 0; 8 - self.leftover.len()]); - let mut buf = alloc::vec![0x00; self.leftover.len() / 8]; + .0 + .extend_from_bitslice(&bitvec![u8, Msb0; 0; 8 - self.leftover.0.len()]); + let mut buf = alloc::vec![0x00; self.leftover.0.len() / 8]; // write as many leftover to the buffer (as we can, can't write bits just bytes) // TODO: error if bits are leftover? (not bytes aligned) self.leftover + .0 .chunks_exact(bits_of::()) .zip(buf.iter_mut()) .for_each(|(byte, slot)| { diff --git a/tests/bit_order.rs b/tests/bit_order.rs new file mode 100644 index 00000000..09d68b7d --- /dev/null +++ b/tests/bit_order.rs @@ -0,0 +1,341 @@ +use assert_hex::assert_eq_hex; +use deku::ctx::{BitSize, Order}; +use deku::prelude::*; + +use std::convert::TryFrom; + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(type = "u8", bits = "2")] +#[deku(bit_order = "ctx_lsb", ctx = "ctx_lsb: Order")] +pub enum FrameType { + #[deku(id = "0")] + Management, + #[deku(id = "1")] + Control, + #[deku(id = "2")] + Data, +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "ctx_lsb", ctx = "ctx_lsb: Order")] +pub struct Flags { + #[deku(bits = 1)] + pub to_ds: u8, + #[deku(bits = 1)] + pub from_ds: u8, + #[deku(bits = 1)] + pub more_fragments: u8, + #[deku(bits = 1)] + pub retry: u8, + #[deku(bits = 1)] + pub power_management: u8, + #[deku(bits = 1)] + pub more_data: u8, + #[deku(bits = 1)] + pub protected_frame: u8, + #[deku(bits = 1)] + pub order: u8, +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "lsb")] +pub struct FrameControl { + #[deku(bits = 4)] + pub sub_type: u8, + #[deku(bits = 2)] + pub protocol_version: u8, + pub frame_type: FrameType, + + pub flags: Flags, +} + +#[test] +fn test_bit_order_frame() { + let data = vec![0x88u8, 0x41]; + let control_frame = FrameControl::try_from(data.as_ref()).unwrap(); + assert_eq!( + control_frame, + FrameControl { + protocol_version: 0, + frame_type: FrameType::Data, + sub_type: 8, + + flags: Flags { + to_ds: 1, + from_ds: 0, + more_fragments: 0, + retry: 0, + power_management: 0, + more_data: 0, + protected_frame: 1, + order: 0, + } + } + ); + + let bytes = control_frame.to_bytes().unwrap(); + assert_eq_hex!(bytes, data); +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "lsb")] +pub struct ReadGreater { + #[deku(bits = "1")] + one: u8, + #[deku(bits = "2")] + two: u8, + #[deku(bits = "4")] + three: u8, + #[deku(bits = "3")] + four: u8, + #[deku(bits = "6")] + five: u8, +} + +#[test] +fn test_bit_order_read_greater() { + let data: &[u8] = &[0b0111_1001, 0b111_11100]; + let g = ReadGreater::try_from(data).unwrap(); + + let bytes = g.to_bytes().unwrap(); + assert_eq_hex!(bytes, data); +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "lsb")] +pub struct SquashfsV3 { + #[deku(bits = "4")] + inode_type: u32, + #[deku(bits = "12")] + mode: u32, + #[deku(bits = "8")] + uid: u32, + #[deku(bits = "8")] + guid: u32, + mtime: u32, + inode_number: u32, +} + +#[test] +fn test_bit_order_squashfs() { + let data: &[u8] = &[ + 0x31, 0x12, 0x04, 0x05, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + ]; + let header = SquashfsV3::try_from(data).unwrap(); + assert_eq!( + SquashfsV3 { + inode_type: 0x01, + mode: 0x123, + uid: 0x4, + guid: 0x5, + mtime: 0x6, + inode_number: 0x7 + }, + header, + ); +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +pub struct Surrounded { + one: u8, + header: SquashfsV3, + two: u8, + #[deku(bit_order = "lsb", bits = "4")] + three: u8, + #[deku(bits = "4")] + four: u8, + #[deku(bits = "4")] + five: u8, + #[deku(bit_order = "lsb", bits = "4")] + six: u8, +} + +#[test] +fn test_bit_order_surrounded() { + let data: &[u8] = &[ + 0xff, 0x31, 0x12, 0x04, 0x05, 0x06, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0xff, 0x0f, + 0x0f, + ]; + let header = Surrounded::try_from(data).unwrap(); + assert_eq!( + Surrounded { + one: 0xff, + header: SquashfsV3 { + inode_type: 0x01, + mode: 0x123, + uid: 0x4, + guid: 0x5, + mtime: 0x6, + inode_number: 0x7 + }, + two: 0xff, + three: 0xf, + four: 0x0, + five: 0x0, + six: 0xf, + }, + header + ); + + let bytes = header.to_bytes().unwrap(); + assert_eq_hex!(bytes, data); +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "lsb")] +pub struct Enums { + right: Choice, + left: Choice, +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku( + bits = "4", + type = "u8", + bit_order = "bit_order", + ctx = "bit_order: deku::ctx::Order" +)] +pub enum Choice { + Empty = 0x0, + Full = 0xf, +} + +#[test] +fn test_bit_order_enums() { + let data = vec![0xf0]; + let control_frame = Enums::try_from(data.as_ref()).unwrap(); + assert_eq!( + control_frame, + Enums { + right: Choice::Empty, + left: Choice::Full + } + ); + + let bytes = control_frame.to_bytes().unwrap(); + assert_eq_hex!(bytes, data); +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(bit_order = "lsb")] +pub struct MoreFirst { + #[deku(bits = "13")] + offset: u16, + #[deku(bits = "3")] + t: u8, +} + +#[test] +fn test_bit_order_more_first() { + let data = vec![0x40, 0x40]; + let more_first = MoreFirst::try_from(data.as_ref()).unwrap(); + assert_eq!(more_first, MoreFirst { offset: 0x40, t: 2 }); + + let bytes = more_first.to_bytes().unwrap(); + assert_eq_hex!(bytes, data); +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +pub struct LsbField { + #[deku(bit_order = "lsb", bits = "13")] + offset: u16, + #[deku(bit_order = "lsb", bits = "3")] + t: u8, +} + +#[test] +fn test_bit_order_lsb_field() { + let data = vec![0x40, 0x40]; + let more_first = LsbField::try_from(data.as_ref()).unwrap(); + assert_eq!(more_first, LsbField { offset: 0x40, t: 2 }); + + let bytes = more_first.to_bytes().unwrap(); + assert_eq_hex!(bytes, data); +} + +#[test] +fn test_bit_order_custom_reader_writer() { + fn reader_lsb(reader: &mut Reader) -> Result<(u16, u8), DekuError> { + let first = u16::from_reader_with_ctx(reader, (BitSize(13), Order::Lsb0))?; + let second = u8::from_reader_with_ctx(reader, BitSize(3))?; + + Ok((first, second)) + } + + fn reader_msb(reader: &mut Reader) -> Result<(u16, u8), DekuError> { + let first = u16::from_reader_with_ctx(reader, (BitSize(13), Order::Msb0))?; + let second = u8::from_reader_with_ctx(reader, BitSize(3))?; + + Ok((first, second)) + } + + fn writer_lsb( + val_msb: (u16, u8), + writer: &mut Writer, + ) -> Result<(), DekuError> { + val_msb.0.to_writer(writer, (BitSize(13), Order::Lsb0))?; + val_msb.1.to_writer(writer, (BitSize(3), Order::Msb0))?; + + Ok(()) + } + + fn writer_msb( + val_msb: (u16, u8), + writer: &mut Writer, + ) -> Result<(), DekuError> { + val_msb.0.to_writer(writer, (BitSize(13), Order::Msb0))?; + val_msb.1.to_writer(writer, (BitSize(3), Order::Msb0))?; + + Ok(()) + } + + #[derive(Debug, DekuRead, DekuWrite, PartialEq)] + pub struct Custom { + #[deku(reader = "reader_lsb(deku::reader)")] + #[deku(writer = "writer_lsb(*val_lsb, deku::writer)")] + val_lsb: (u16, u8), + #[deku(reader = "reader_msb(deku::reader)")] + #[deku(writer = "writer_msb(*val_msb, deku::writer)")] + val_msb: (u16, u8), + } + + // |lsb |msb + // | f |sss|rest f| f |sss| + let data = vec![0b0000_0000, 0b0011_1111, 0b0100_0000, 0b0011_0000]; + let more_first = Custom::try_from(data.as_ref()).unwrap(); + assert_eq!( + more_first, + Custom { + val_lsb: (0b1_1111_0000_0000, 1), + val_msb: (0b0_0110_0100_0000, 0) + } + ); + + let bytes = more_first.to_bytes().unwrap(); + assert_eq_hex!(bytes, data); +} + +#[derive(Debug, DekuRead, DekuWrite, PartialEq)] +#[deku(endian = "big", bit_order = "lsb")] +pub struct MoreFirstBe { + #[deku(bits = "13")] + offset: u16, + #[deku(bits = "3")] + t: u8, +} + +#[test] +fn test_bit_order_more_first_be() { + let data = vec![0x40, 0x40]; + let more_first = MoreFirstBe::try_from(data.as_ref()).unwrap(); + assert_eq!( + more_first, + MoreFirstBe { + offset: 0x4000, + t: 2 + } + ); + + let bytes = more_first.to_bytes().unwrap(); + assert_eq_hex!(bytes, data); +} diff --git a/tests/test_attributes/test_cond.rs b/tests/test_attributes/test_cond.rs index 2873d836..ece57e07 100644 --- a/tests/test_attributes/test_cond.rs +++ b/tests/test_attributes/test_cond.rs @@ -18,7 +18,7 @@ fn test_cond_deku() { assert_eq!( TestStruct { field_a: 0x01, - field_b: Some(0x02), + field_b: Some(0x02) }, ret_read ); diff --git a/tests/test_attributes/test_ctx.rs b/tests/test_attributes/test_ctx.rs index e40da0fe..5b4af53e 100644 --- a/tests/test_attributes/test_ctx.rs +++ b/tests/test_attributes/test_ctx.rs @@ -230,7 +230,7 @@ fn test_ctx_default_struct() { ) .unwrap(); assert_eq!(expected, ret_read); - let mut ret_write = bitvec![u8, Msb0;]; + let _ret_write = bitvec![u8, Msb0;]; let mut out_buf = vec![]; let mut writer = Writer::new(&mut out_buf); ret_read.to_writer(&mut writer, (1, 2)).unwrap(); diff --git a/tests/test_attributes/test_limits/test_bits_read.rs b/tests/test_attributes/test_limits/test_bits_read.rs index c31e6de2..140dac0f 100644 --- a/tests/test_attributes/test_limits/test_bits_read.rs +++ b/tests/test_attributes/test_limits/test_bits_read.rs @@ -52,7 +52,7 @@ mod test_slice { assert_eq!( TestStruct { bits: 16, - data: test_data[1..].to_vec(), + data: test_data[1..].to_vec() }, ret_read ); diff --git a/tests/test_attributes/test_limits/test_bytes_read.rs b/tests/test_attributes/test_limits/test_bytes_read.rs index bd7dc296..6c7ea610 100644 --- a/tests/test_attributes/test_limits/test_bytes_read.rs +++ b/tests/test_attributes/test_limits/test_bytes_read.rs @@ -19,7 +19,7 @@ mod test_slice { let ret_read = TestStruct::try_from(test_data.as_slice()).unwrap(); assert_eq!( TestStruct { - data: test_data.to_vec(), + data: test_data.to_vec() }, ret_read ); diff --git a/tests/test_attributes/test_limits/test_count.rs b/tests/test_attributes/test_limits/test_count.rs index c2e78712..a17e5282 100644 --- a/tests/test_attributes/test_limits/test_count.rs +++ b/tests/test_attributes/test_limits/test_count.rs @@ -42,7 +42,7 @@ mod test_slice { assert_eq!( TestStruct { count: 0x02, - data: test_data[1..].to_vec(), + data: test_data[1..].to_vec() }, ret_read ); diff --git a/tests/test_attributes/test_map.rs b/tests/test_attributes/test_map.rs index b639f1b5..37678483 100644 --- a/tests/test_attributes/test_map.rs +++ b/tests/test_attributes/test_map.rs @@ -24,7 +24,7 @@ fn test_map() { assert_eq!( TestStruct { field_a: "1".to_string(), - field_b: "2".to_string(), + field_b: "2".to_string() }, ret_read ); diff --git a/tests/test_attributes/test_padding/mod.rs b/tests/test_attributes/test_padding/mod.rs index 5c1d5e7a..5474f0d8 100644 --- a/tests/test_attributes/test_padding/mod.rs +++ b/tests/test_attributes/test_padding/mod.rs @@ -25,7 +25,7 @@ fn test_pad_bits_before_and_pad_bytes_before() { assert_eq!( TestStruct { field_a: 0b10, - field_b: 0xbb, + field_b: 0xbb }, ret_read ); @@ -50,7 +50,7 @@ fn test_pad_bits_after_and_pad_bytes_after() { assert_eq!( TestStruct { field_a: 0b10, - field_b: 0xbb, + field_b: 0xbb }, ret_read ); diff --git a/tests/test_attributes/test_padding/test_pad_bits_after.rs b/tests/test_attributes/test_padding/test_pad_bits_after.rs index 254e8ae2..a29bef5c 100644 --- a/tests/test_attributes/test_padding/test_pad_bits_after.rs +++ b/tests/test_attributes/test_padding/test_pad_bits_after.rs @@ -19,7 +19,7 @@ fn test_pad_bits_after() { assert_eq!( TestStruct { field_a: 0b10, - field_b: 0b0110, + field_b: 0b0110 }, ret_read ); diff --git a/tests/test_attributes/test_padding/test_pad_bits_before.rs b/tests/test_attributes/test_padding/test_pad_bits_before.rs index 68bf59cf..e60aabcf 100644 --- a/tests/test_attributes/test_padding/test_pad_bits_before.rs +++ b/tests/test_attributes/test_padding/test_pad_bits_before.rs @@ -19,7 +19,7 @@ fn test_pad_bits_before() { assert_eq!( TestStruct { field_a: 0b10, - field_b: 0b1001, + field_b: 0b1001 }, ret_read ); diff --git a/tests/test_attributes/test_padding/test_pad_bytes_after.rs b/tests/test_attributes/test_padding/test_pad_bytes_after.rs index 846c61c7..0204d91b 100644 --- a/tests/test_attributes/test_padding/test_pad_bytes_after.rs +++ b/tests/test_attributes/test_padding/test_pad_bytes_after.rs @@ -18,7 +18,7 @@ fn test_pad_bytes_after() { assert_eq!( TestStruct { field_a: 0xaa, - field_b: 0xdd, + field_b: 0xdd }, ret_read ); diff --git a/tests/test_attributes/test_padding/test_pad_bytes_before.rs b/tests/test_attributes/test_padding/test_pad_bytes_before.rs index f9a92e39..970e71bd 100644 --- a/tests/test_attributes/test_padding/test_pad_bytes_before.rs +++ b/tests/test_attributes/test_padding/test_pad_bytes_before.rs @@ -18,7 +18,7 @@ fn test_pad_bytes_before() { assert_eq!( TestStruct { field_a: 0xaa, - field_b: 0xdd, + field_b: 0xdd }, ret_read ); diff --git a/tests/test_attributes/test_skip.rs b/tests/test_attributes/test_skip.rs index 9a156d54..377103ff 100644 --- a/tests/test_attributes/test_skip.rs +++ b/tests/test_attributes/test_skip.rs @@ -49,7 +49,7 @@ fn test_skip_default() { TestStruct { field_a: 0x01, field_b: 0x05, - field_c: 0x02, + field_c: 0x02 }, ret_read ); @@ -90,7 +90,7 @@ fn test_skip_cond() { assert_eq!( TestStruct { field_a: 0x02, - field_b: 0x03, + field_b: 0x03 }, ret_read ); diff --git a/tests/test_compile/cases/temp_field.stderr b/tests/test_compile/cases/temp_field.stderr index f19994d1..c931b550 100644 --- a/tests/test_compile/cases/temp_field.stderr +++ b/tests/test_compile/cases/temp_field.stderr @@ -1,5 +1,5 @@ error[E0063]: missing field `field_a` in initializer of `Test1` - --> $DIR/temp_field.rs:4:10 + --> tests/test_compile/cases/temp_field.rs:4:10 | 4 | #[derive(DekuRead, DekuWrite)] | ^^^^^^^^ missing `field_a` @@ -7,7 +7,7 @@ error[E0063]: missing field `field_a` in initializer of `Test1` = note: this error originates in the derive macro `DekuRead` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0027]: pattern does not mention field `field_a` - --> $DIR/temp_field.rs:4:20 + --> tests/test_compile/cases/temp_field.rs:4:20 | 4 | #[derive(DekuRead, DekuWrite)] | ^^^^^^^^^ missing field `field_a` diff --git a/tests/test_regression.rs b/tests/test_regression.rs index 28fbcb89..ebbfbe6e 100644 --- a/tests/test_regression.rs +++ b/tests/test_regression.rs @@ -148,7 +148,7 @@ fn test_regression_292() { Reader { field1: 0, field2: 0xff, - field3: 0, + field3: 0 } ); @@ -170,7 +170,7 @@ fn test_regression_292() { ReaderBits { field1: 0, field2: 0xff, - field3: 0, + field3: 0 } ); @@ -190,7 +190,7 @@ fn test_regression_292() { ReaderByteNoEndian { field1: 0, field2: 0xff, - field3: 0, + field3: 0 } ); @@ -208,7 +208,7 @@ fn test_regression_292() { .1, ReaderBitPadding { field2: 0xff, - field3: 0, + field3: 0 } ); @@ -229,7 +229,7 @@ fn test_regression_292() { ReaderBitPadding1 { field1: 0, field2: 0xff, - field3: 0, + field3: 0 } ); @@ -252,7 +252,7 @@ fn test_regression_292() { ReaderTwo { field1: 0b11, field2: 0, - field3: 0b111111, + field3: 0b111111 } ); @@ -275,7 +275,7 @@ fn test_regression_292() { ReaderU16Le { field1: 0b11, field2: 0, - field3: 0b111111, + field3: 0b111111 } ); @@ -298,7 +298,7 @@ fn test_regression_292() { ReaderU16Be { field1: 0b11, field2: 0, - field3: 0b111111, + field3: 0b111111 } ); @@ -321,7 +321,7 @@ fn test_regression_292() { ReaderI16Le { field1: -0b01, field2: 1, - field3: -0b011111, + field3: -0b011111 } ); @@ -344,7 +344,7 @@ fn test_regression_292() { ReaderI16Be { field1: -0b01, field2: 1, - field3: -0b011111, + field3: -0b011111 } ); }