Skip to content

Commit

Permalink
test: get the e2e tests to pass reliably
Browse files Browse the repository at this point in the history
  • Loading branch information
JamesPiechota committed Feb 9, 2025
1 parent 793f39e commit 5d4f441
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 23 deletions.
8 changes: 5 additions & 3 deletions apps/arweave/e2e/ar_e2e.erl
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,9 @@ start_source_node(Node, PackingType, WalletFixture) ->

ar_e2e:assert_syncs_range(Node, 0, 4*?PARTITION_SIZE),

ar_e2e:assert_partition_size(Node, 0, SourcePacking),
ar_e2e:assert_partition_size(Node, 1, SourcePacking),
%% No overlap since we aren't syncing or repacking chunks.
ar_e2e:assert_partition_size(Node, 0, SourcePacking, ?PARTITION_SIZE),
ar_e2e:assert_partition_size(Node, 1, SourcePacking, ?PARTITION_SIZE),
ar_e2e:assert_partition_size(Node, 2, SourcePacking, floor(0.5*?PARTITION_SIZE)),

ar_e2e:assert_chunks(Node, SourcePacking, Chunks),
Expand Down Expand Up @@ -345,7 +346,8 @@ assert_does_not_sync_range(Node, StartOffset, EndOffset) ->
[Node, StartOffset, EndOffset]))).

assert_partition_size(Node, PartitionNumber, Packing) ->
assert_partition_size(Node, PartitionNumber, Packing, ?PARTITION_SIZE).
Overlap = ar_storage_module:get_overlap(Packing),
assert_partition_size(Node, PartitionNumber, Packing, ?PARTITION_SIZE + Overlap).
assert_partition_size(Node, PartitionNumber, Packing, Size) ->
?LOG_INFO("~p: Asserting partition ~p,~p is size ~p",
[Node, PartitionNumber, ar_serialize:encode_packing(Packing, true), Size]),
Expand Down
15 changes: 13 additions & 2 deletions apps/arweave/e2e/ar_repack_in_place_mine_tests.erl
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ test_repack_in_place_mine({FromPackingType, ToPackingType}) ->
{from_packing_type, FromPackingType}, {to_packing_type, ToPackingType}]),
ValidatorNode = peer1,
RepackerNode = peer2,
ar_test_node:stop(ValidatorNode),
ar_test_node:stop(RepackerNode),
{Blocks, _AddrA, Chunks} = ar_e2e:start_source_node(
RepackerNode, FromPackingType, wallet_a),

Expand All @@ -54,8 +56,17 @@ test_repack_in_place_mine({FromPackingType, ToPackingType}) ->
mining_addr = undefined
}),

ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking),
ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking),
%% Due to how we launch the unpacked source node it *does* end up syncing data in
%% the overlap. Main difference is that with the unpacked source node we launch a
%% spora node, and then sync data to the unpacked node. It's the syncing process that
%% writes data to the overlap.
ExpectedPartitionSize = case FromPackingType of
unpacked -> ?PARTITION_SIZE + ar_storage_module:get_overlap(unpacked);
_ -> ?PARTITION_SIZE
end,

ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking, ExpectedPartitionSize),
ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking, ExpectedPartitionSize),

ar_test_node:stop(RepackerNode),

Expand Down
14 changes: 11 additions & 3 deletions apps/arweave/e2e/ar_repack_mine_tests.erl
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ test_repack_mine({FromPackingType, ToPackingType}) ->
{from_packing_type, FromPackingType}, {to_packing_type, ToPackingType}]),
ValidatorNode = peer1,
RepackerNode = peer2,
ar_test_node:stop(ValidatorNode),
ar_test_node:stop(RepackerNode),
{Blocks, _AddrA, Chunks} = ar_e2e:start_source_node(
RepackerNode, FromPackingType, wallet_a),

Expand All @@ -57,10 +59,13 @@ test_repack_mine({FromPackingType, ToPackingType}) ->
mining_addr = AddrB
}),

Overlap = ar_storage_module:get_overlap(ToPacking),

ar_e2e:assert_syncs_range(RepackerNode, 0, 4*?PARTITION_SIZE),
ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking),
ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking),
ar_e2e:assert_partition_size(RepackerNode, 2, ToPacking, floor(0.5*?PARTITION_SIZE)),
ar_e2e:assert_partition_size(
RepackerNode, 2, ToPacking, floor(0.5*?PARTITION_SIZE)),
%% Don't assert chunks here. Since we have two storage modules defined we won't know
%% which packing format will be found - which complicates the assertion. We'll rely
%% on the assert_chunks later (after we restart with only a single set of storage modules)
Expand All @@ -75,7 +80,8 @@ test_repack_mine({FromPackingType, ToPackingType}) ->
ar_e2e:assert_syncs_range(RepackerNode, 0, 4*?PARTITION_SIZE),
ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking),
ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking),
ar_e2e:assert_partition_size(RepackerNode, 2, ToPacking, floor(0.5*?PARTITION_SIZE)),
ar_e2e:assert_partition_size(
RepackerNode, 2, ToPacking, floor(0.5*?PARTITION_SIZE)),
ar_e2e:assert_chunks(RepackerNode, ToPacking, Chunks),
ar_e2e:assert_empty_partition(RepackerNode, 3, ToPacking),

Expand All @@ -90,7 +96,7 @@ test_repack_mine({FromPackingType, ToPackingType}) ->
ar_e2e:assert_syncs_range(RepackerNode, 0, 4*?PARTITION_SIZE),
ar_e2e:assert_partition_size(RepackerNode, 0, ToPacking),
ar_e2e:assert_partition_size(RepackerNode, 1, ToPacking),
ar_e2e:assert_partition_size(RepackerNode, 2, ToPacking),
ar_e2e:assert_partition_size(RepackerNode, 2, ToPacking, ?PARTITION_SIZE),
%% All of partition 3 is still above the disk pool threshold
ar_e2e:assert_empty_partition(RepackerNode, 3, ToPacking)
end.
Expand All @@ -101,6 +107,8 @@ test_repacking_blocked({FromPackingType, ToPackingType}) ->
{from_packing_type, FromPackingType}, {to_packing_type, ToPackingType}]),
ValidatorNode = peer1,
RepackerNode = peer2,
ar_test_node:stop(ValidatorNode),
ar_test_node:stop(RepackerNode),
{Blocks, _AddrA, Chunks} = ar_e2e:start_source_node(
RepackerNode, FromPackingType, wallet_a),

Expand Down
38 changes: 28 additions & 10 deletions apps/arweave/e2e/ar_sync_pack_mine_tests.erl
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ setup_source_node(PackingType) ->
SourceNode = peer1,
SinkNode = peer2,
ar_test_node:stop(SinkNode),
ar_test_node:stop(SourceNode),
{Blocks, _SourceAddr, Chunks} = ar_e2e:start_source_node(SourceNode, PackingType, wallet_a),

{Blocks, Chunks, PackingType}.
Expand Down Expand Up @@ -67,6 +68,9 @@ unpacked_sync_pack_mine_test_() ->
]
end}.

%% Note: we should limit the number of tests run per setup_source_node to ~4, if it gets
%% too long then the source node may hit a difficulty adjustment, which can impact the
%% results.
unpacked_edge_case_test_() ->
{setup, fun () -> setup_source_node(unpacked) end,
fun (GenesisData) ->
Expand All @@ -78,11 +82,7 @@ unpacked_edge_case_test_() ->
instantiator(GenesisData, replica_2_9,
fun test_entropy_first_sync_pack_mine/1),
instantiator(GenesisData, replica_2_9,
fun test_entropy_last_sync_pack_mine/1),
instantiator(GenesisData, replica_2_9,
fun test_small_module_aligned_sync_pack_mine/1),
instantiator(GenesisData, replica_2_9,
fun test_small_module_unaligned_sync_pack_mine/1)
fun test_entropy_last_sync_pack_mine/1)
]
end}.

Expand All @@ -97,7 +97,25 @@ spora_2_6_edge_case_test_() ->
instantiator(GenesisData, replica_2_9,
fun test_entropy_first_sync_pack_mine/1),
instantiator(GenesisData, replica_2_9,
fun test_entropy_last_sync_pack_mine/1),
fun test_entropy_last_sync_pack_mine/1)
]
end}.

unpacked_small_module_test_() ->
{setup, fun () -> setup_source_node(unpacked) end,
fun (GenesisData) ->
[
instantiator(GenesisData, replica_2_9,
fun test_small_module_aligned_sync_pack_mine/1),
instantiator(GenesisData, replica_2_9,
fun test_small_module_unaligned_sync_pack_mine/1)
]
end}.

spora_2_6_small_module_test_() ->
{setup, fun () -> setup_source_node(spora_2_6) end,
fun (GenesisData) ->
[
instantiator(GenesisData, replica_2_9,
fun test_small_module_aligned_sync_pack_mine/1),
instantiator(GenesisData, replica_2_9,
Expand Down Expand Up @@ -337,11 +355,11 @@ test_small_module_aligned_sync_pack_mine({{Blocks, Chunks, SourcePackingType}, S
RangeSize = RangeEnd - RangeStart,

%% Make sure the expected data was synced
ar_e2e:assert_syncs_range(SinkNode, RangeStart, RangeEnd),
ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking, RangeSize),
ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded),
ar_e2e:assert_empty_partition(SinkNode, 1, unpacked),
ar_e2e:assert_chunks(SinkNode, SinkPacking, lists:sublist(Chunks, 1, 4)),
ar_e2e:assert_syncs_range(SinkNode, RangeStart, RangeEnd),

%% Make sure no extra entropy was generated
ar_e2e:assert_has_entropy(SinkNode, RangeStart, RangeEnd, StoreID),
Expand Down Expand Up @@ -384,12 +402,12 @@ test_small_module_unaligned_sync_pack_mine({{Blocks, Chunks, SourcePackingType},
RangeEnd = floor(2 * ?PARTITION_SIZE) + ar_storage_module:get_overlap(SinkPacking),
RangeSize = RangeEnd - RangeStart,

%% Make sure the expected data was synced
ar_e2e:assert_syncs_range(SinkNode, RangeStart, RangeEnd),
%% Make sure the expected data was synced
ar_e2e:assert_partition_size(SinkNode, 1, SinkPacking, RangeSize),
ar_e2e:assert_empty_partition(SinkNode, 1, unpacked_padded),
ar_e2e:assert_empty_partition(SinkNode, 1, unpacked),
ar_e2e:assert_chunks(SinkNode, SinkPacking, lists:sublist(Chunks, 5, 8)),
ar_e2e:assert_syncs_range(SinkNode, RangeStart, RangeEnd),

%% Make sure no extra entropy was generated
ar_e2e:assert_has_entropy(SinkNode, RangeStart, RangeEnd, StoreID),
Expand Down Expand Up @@ -438,7 +456,7 @@ test_disk_pool_threshold({SourcePackingType, SinkPackingType}) ->
%% Now that we mined a block, the rest of partition 2 is below the disk pool
%% threshold
ar_e2e:assert_syncs_range(SinkNode, ?PARTITION_SIZE, 4*?PARTITION_SIZE),
ar_e2e:assert_partition_size(SinkNode, 2, SinkPacking),
ar_e2e:assert_partition_size(SinkNode, 2, SinkPacking, ?PARTITION_SIZE),
%% All of partition 3 is still above the disk pool threshold
ar_e2e:assert_empty_partition(SinkNode, 3, SinkPacking),
ar_e2e:assert_does_not_sync_range(SinkNode, 0, ?PARTITION_SIZE),
Expand Down
7 changes: 2 additions & 5 deletions apps/arweave/src/ar_entropy_storage.erl
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ record_entropy(ChunkEntropy, BucketEndOffset, StoreID, RewardAddr) ->
%% Sanity checks
true = byte_size(ChunkEntropy) == ?DATA_CHUNK_SIZE,
%% End sanity checks
%%

Byte = get_chunk_byte_from_bucket_end(BucketEndOffset),
CheckUnpackedChunkRecorded = ar_sync_record:get_interval(
Byte + 1, ar_chunk_storage:sync_record_id(unpacked_padded), StoreID),
Expand Down Expand Up @@ -370,10 +370,7 @@ record_entropy(ChunkEntropy, BucketEndOffset, StoreID, RewardAddr) ->

%% @doc If we are not at the beginning of the entropy, shift the offset to
%% the left. store_entropy will traverse the entire 2.9 partition shifting
%% the offset by sector size. It may happen some sub-chunks will be written
%% to the neighbouring storage module(s) on the left or on the right
%% since the storage module may be configured to be smaller than the
%% partition.
%% the offset by sector size.
reset_entropy_offset(BucketEndOffset) ->
%% Sanity checks
BucketEndOffset = ar_chunk_storage:get_chunk_bucket_end(BucketEndOffset),
Expand Down

0 comments on commit 5d4f441

Please sign in to comment.