Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Head tracking batch #417

Draft
wants to merge 82 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
82 commits
Select commit Hold shift + click to select a range
b0bbeca
sketch of the page table
Scooletz Oct 23, 2024
a69330a
renaming and simplifying HeadBatch
Scooletz Nov 12, 2024
0817c05
renaming
Scooletz Nov 12, 2024
922b5a6
revere mapping
Scooletz Nov 12, 2024
d1c7675
more extraction
Scooletz Nov 12, 2024
b3c1469
Merge branch 'main' into page-table
Scooletz Nov 13, 2024
2bc911f
moving more members, making base context less dependent
Scooletz Nov 13, 2024
6409faf
stats and build
Scooletz Nov 13, 2024
c218587
head tracking batch
Scooletz Nov 13, 2024
96e7c33
separation of apis
Scooletz Nov 13, 2024
30edee7
towards implementation of the tracking
Scooletz Nov 13, 2024
4dea197
refactored root finding
Scooletz Nov 14, 2024
b199b61
towards testable MultiHeadChain
Scooletz Nov 14, 2024
8e37291
reading from multihead and proper disposal
Scooletz Nov 14, 2024
3fb6d8a
green test with reads
Scooletz Nov 14, 2024
164a8ad
GetAtWriting to ensure that MultiHead can track pages properly with a…
Scooletz Nov 14, 2024
2797d33
clearing
Scooletz Nov 14, 2024
d1d17f6
multi block
Scooletz Nov 14, 2024
6862627
moving metadata to Commit of the head
Scooletz Nov 15, 2024
4bc3872
ref counting proposed batches
Scooletz Nov 15, 2024
fee1ed1
fix doubled root creation
Scooletz Nov 15, 2024
86f6675
new api to support not mmaped writes
Scooletz Nov 15, 2024
989e726
flusher in MultiHead
Scooletz Nov 15, 2024
83f5882
into the channel
Scooletz Nov 15, 2024
33e088a
async disposable and finalization mechanism
Scooletz Nov 18, 2024
fa2f90a
finality and block lock
Scooletz Nov 18, 2024
cc6cbc8
reporting
Scooletz Nov 18, 2024
ec253ba
tests fixups
Scooletz Nov 19, 2024
62c6035
proper finalization handling for long chains of blocks
Scooletz Nov 19, 2024
e92e611
ref counting interface
Scooletz Nov 19, 2024
2dc59f8
HeadReader
Scooletz Nov 19, 2024
738ede5
reader offloading state
Scooletz Nov 19, 2024
e5f4879
readonly batch
Scooletz Nov 19, 2024
66f1ed4
no trace in pool
Scooletz Nov 19, 2024
a67ff8e
reading added
Scooletz Nov 20, 2024
d31da73
reader management
Scooletz Nov 20, 2024
b6abdb9
almost migrated
Scooletz Nov 20, 2024
b9103bf
more
Scooletz Nov 20, 2024
c3dd619
towards working tests
Scooletz Nov 21, 2024
e941655
moving blockchain to use multihead
Scooletz Nov 21, 2024
84d580b
stats updated
Scooletz Nov 21, 2024
f7293a0
towards working WorldState
Scooletz Nov 21, 2024
988edf6
abandoned registration and more
Scooletz Nov 22, 2024
7711369
two more tests
Scooletz Nov 22, 2024
9bccfb3
concurrent dictionary for mappings
Scooletz Nov 22, 2024
1afa56b
proper min batch id calculation as well as the reader creation
Scooletz Nov 25, 2024
41869b7
reversed map removal
Scooletz Nov 26, 2024
671f167
proper overwrites criteria
Scooletz Nov 26, 2024
f074f97
no hash normalization by setting the first root and proper disposal
Scooletz Nov 27, 2024
316e331
test fixes
Scooletz Nov 27, 2024
8ac678b
no copy needed
Scooletz Nov 27, 2024
c1033ec
one more with zero Keccak
Scooletz Nov 27, 2024
11a5836
test fixups
Scooletz Nov 27, 2024
d89e30a
Merge branch 'main' into page-table
Scooletz Nov 27, 2024
aeb0f13
Reader desposal made awaitable and properly handled in the FlusherTask
Scooletz Nov 28, 2024
b05b8a6
restored BitMapFilter
Scooletz Nov 28, 2024
f73b43e
Added last finalized api
Scooletz Nov 29, 2024
f099f27
test added for the LeaseLatest
Scooletz Nov 29, 2024
717ccd6
flushing events
Scooletz Nov 29, 2024
365fb3f
throw on not found
Scooletz Nov 29, 2024
494e783
MultiHead allows to create non-committable world state
Scooletz Nov 29, 2024
00397e5
pageTable cache in front of a dictionary
Scooletz Dec 3, 2024
c9916cb
atomic clean up
Scooletz Dec 3, 2024
9e48831
update cache only on mapping changes
Scooletz Dec 4, 2024
cb1990d
override removal made a bit faster
Scooletz Dec 4, 2024
a790db3
close prefetcher after commit
Scooletz Dec 4, 2024
bef584a
Merge branch 'main' into page-table
Scooletz Dec 6, 2024
a686b6c
prefetching disposal got right plus test fixes
Scooletz Dec 9, 2024
06f783e
Update src/Paprika/Store/PagedDb.cs
Scooletz Dec 9, 2024
97da87c
Merge branch 'main' into page-table
Scooletz Feb 5, 2025
70e6cf7
automatic finality fix
Scooletz Feb 6, 2025
5085efa
format
Scooletz Feb 6, 2025
ddcabe0
clear id cache
Scooletz Feb 6, 2025
9529b56
formatting
Scooletz Feb 6, 2025
dd6a5af
reopening prefetcher
Scooletz Feb 7, 2025
e896560
create page table properly sized
Scooletz Feb 7, 2025
5a914dd
comments
Scooletz Feb 7, 2025
74a7cc2
more comments
Scooletz Feb 7, 2025
714d139
more comments
Scooletz Feb 7, 2025
3c8a022
reader caches dictionary statically to do not create a lot of allocat…
Scooletz Feb 7, 2025
2bf3a1a
last reader reporter
Scooletz Feb 7, 2025
2a7edd1
use capacity
Scooletz Feb 7, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 10 additions & 12 deletions src/Paprika.Cli/VerifyWholeTreeSettings.cs
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,16 @@ public class Command : Command<VerifyWholeTreeSettings>
{
public override int Execute(CommandContext context, VerifyWholeTreeSettings settings)
{
using var db = settings.BuildDb();

using var read = db.BeginReadOnlyBatch();
using var latest = Blockchain.StartReadOnlyLatestFromDb(db);


AnsiConsole.WriteLine($"The latest state root hash persisted: {latest.Hash}.");
AnsiConsole.WriteLine("Verification of the whole state tree in progress...");

var keccak = new ComputeMerkleBehavior().CalculateStateRootHash(latest);

AnsiConsole.WriteLine($"The computed state root hash {keccak.ToString()}");
// using var db = settings.BuildDb();
//
// using var read = db.BeginReadOnlyBatch();
// using var latest = Blockchain.StartReadOnlyLatestFromDb(db);
//
// AnsiConsole.WriteLine("Checking whole tree...");
//
// var keccak = new ComputeMerkleBehavior().CalculateStateRootHash(read);
//
// AnsiConsole.WriteLine($"Keccak {keccak.ToString()}");

return 0;
}
Expand Down
172 changes: 59 additions & 113 deletions src/Paprika.Tests/Chain/BlockchainTests.cs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
using System.Buffers.Binary;
using System.Collections;
using System.Diagnostics.Metrics;
using System.Runtime.CompilerServices;
using FluentAssertions;
Expand Down Expand Up @@ -183,13 +182,12 @@
block.SetAccount(Key0, new Account(no, no));

// Finalize but only previous so that the dependency is there and should be managed properly
finality = blockchain.WaitTillFlush(hash);
blockchain.Finalize(hash);
finality = blockchain.Finalize(hash);

hash = block.Commit(no);
block.Dispose();
}

Check warning

Code scanning / CodeQL

Dispose may not be called if an exception is thrown during execution Warning test

Dispose missed if exception is thrown by
call to method StartNew
.
Dispose missed if exception is thrown by
call to method SetAccount
.
Dispose missed if exception is thrown by
call to method Finalize
.
Dispose missed if exception is thrown by
call to method Commit
.
// DO NOT FINALIZE the last block! it will clean the dependencies and destroy the purpose of the test
// blockchain.Finalize(block.Hash);

Expand Down Expand Up @@ -220,51 +218,48 @@

block.Dispose();

var finalized = Task.CompletedTask;
// Setup semaphore so that first N blocks are given, the last should be automatically flushed
var flushed = new SemaphoreSlim(automaticFinalityAfter, int.MaxValue);

Check warning

Code scanning / CodeQL

Missing Dispose call on local IDisposable Warning test

Disposable 'SemaphoreSlim' is created but not disposed.

blockchain.Flushed += (_, _) => flushed.Release();

for (uint no = 2; no < count; no++)
{
// create new, set, commit and dispose
block = blockchain.StartNew(hash);
block.SetAccount(Key0, new Account(no, no));

if (no > automaticFinalityAfter)
{
finalized = blockchain.WaitTillFlush(hashes.Dequeue());
finalized.IsCompleted.Should().BeFalse("The automatic finality should be reached only on the commit");
}

hash = block.Commit(no);
hashes.Enqueue(hash);

// Should be finalized after the block breaching the finality is committed
await finalized;
await flushed.WaitAsync();

block.Dispose();
}
}

[Test]
public async Task Account_destruction_same_block()

Check warning

Code scanning / CodeQL

Dispose may not be called if an exception is thrown during execution Warning test

Dispose missed if exception is thrown by
call to method StartNew
.
Dispose missed if exception is thrown by
call to method SetAccount
.
Dispose missed if exception is thrown by
call to method Dequeue
.
Dispose missed if exception is thrown by
call to method BeFalse
.
Dispose missed if exception is thrown by
call to method Should
.
Dispose missed if exception is thrown by call to method Commit.
Dispose missed if exception is thrown by call to method Enqueue.
{
using var db = PagedDb.NativeMemoryDb(1 * Mb, 2);
await using var blockchain = new Blockchain(db, new ComputeMerkleBehavior());

using var block = blockchain.StartNew(Keccak.EmptyTreeHash);
var before = Keccak.EmptyTreeHash;

var before = block.Hash;
using var worldState = blockchain.StartNew(before);

block.SetAccount(Key0, new Account(1, 1));
block.SetStorage(Key0, Key1, stackalloc byte[1] { 1 });
worldState.SetAccount(Key0, new Account(1, 1));
worldState.SetStorage(Key0, Key1, stackalloc byte[1] { 1 });

// force hash calculation
var mid = block.Hash;
var mid = worldState.Hash;

block.DestroyAccount(Key0);
block.GetAccount(Key0).Should().Be(new Account(0, 0));
block.AssertNoStorageAt(Key0, Key1);
worldState.DestroyAccount(Key0);
worldState.GetAccount(Key0).Should().Be(new Account(0, 0));
worldState.AssertNoStorageAt(Key0, Key1);

var after = block.Hash;
var after = worldState.Hash;

before.Should().Be(after);
before.Should().NotBe(mid);
Expand Down Expand Up @@ -328,13 +323,13 @@

if (finality.Count > 64)
{
blockchain.Finalize(finality.Dequeue());
await blockchain.Finalize(finality.Dequeue());
}
}

while (finality.TryDequeue(out var finalized))
{
blockchain.Finalize(finalized);
await blockchain.Finalize(finalized);
}
}

Expand All @@ -344,9 +339,8 @@
using var db = PagedDb.NativeMemoryDb(1 * Mb, 2);
await using var blockchain = new Blockchain(db, new ComputeMerkleBehavior());

using var block1 = blockchain.StartNew(Keccak.EmptyTreeHash);

var before = block1.Hash;
var before = Keccak.EmptyTreeHash;
using var block1 = blockchain.StartNew(before);

block1.SetAccount(Key0, new Account(1, 1));
block1.SetStorage(Key0, Key1, stackalloc byte[1] { 1 });
Expand Down Expand Up @@ -451,34 +445,28 @@
using var db = PagedDb.NativeMemoryDb(1 * Mb, 2);
await using var blockchain = new Blockchain(db, new ComputeMerkleBehavior());

using var block1 = blockchain.StartNew(Keccak.EmptyTreeHash);

var before = block1.Hash;

block1.SetAccount(Key0, new Account(1, 1));
block1.SetStorage(Key0, Key1, stackalloc byte[1] { 1 });

var hash = block1.Commit(blockNo++);
using var worldState = blockchain.StartNew(Keccak.EmptyTreeHash);

blockchain.Finalize(hash);
worldState.SetAccount(Key1, new Account(2, 2));

// Poor man's await on finalization flushed
await blockchain.WaitTillFlush(hash);
var before = worldState.Commit(blockNo++);
await blockchain.Finalize(before);

using var block2 = blockchain.StartNew(hash);
worldState.SetAccount(Key0, new Account(1, 1));
worldState.SetStorage(Key0, Key1, [1]);

block2.DestroyAccount(Key0);
var hash2 = block2.Commit(blockNo);
var hash = worldState.Commit(blockNo++);

var wait = blockchain.WaitTillFlush(blockNo);
await blockchain.Finalize(hash);

blockchain.Finalize(hash2);
worldState.DestroyAccount(Key0);
var hash2 = worldState.Commit(blockNo);

await wait;
await blockchain.Finalize(hash2);

using var read = db.BeginReadOnlyBatch();

read.Metadata.BlockNumber.Should().Be(2);
read.Metadata.BlockNumber.Should().Be(3);

read.AssertNoAccount(Key0);
read.AssertNoStorageAt(Key0, Key1);
Expand All @@ -499,7 +487,7 @@

await using (var blockchain = new Blockchain(db, behavior))
{
var hash = Keccak.Zero;
var hash = Keccak.EmptyTreeHash;

for (uint i = 1; i < blockCount + 1; i++)
{
Expand Down Expand Up @@ -567,11 +555,7 @@
const int block2 = 2;

var keccak2A = block2A.Commit(block2);
var task = blockchain.WaitTillFlush(block2);

blockchain.Finalize(keccak2A);

await task;
await blockchain.Finalize(keccak2A);

// start in the past
using (var block2B = blockchain.StartNew(keccak1A))
Expand Down Expand Up @@ -640,40 +624,7 @@
}

[Test]
public async Task Reports_ancestor_blocks()
{
using var db = PagedDb.NativeMemoryDb(1 * Mb);

await using var blockchain = new Blockchain(db, new PreCommit(), null, CacheBudget.Options.None,
CacheBudget.Options.None, 1);

// Arrange
const uint block1 = 1;
var (hash1, _) = BuildBlock(block1, Keccak.EmptyTreeHash);

const uint block2 = 2;
var (hash2, _) = BuildBlock(block2, hash1);

const uint block3 = 3;
var (hash3, last) = BuildBlock(block3, hash2);

// Assert stats in order
last.Stats.Ancestors
.Should()
.BeEquivalentTo(new[] { (block2, hash2), (block1, hash1) });

return;

(Keccak hash, IWorldState state) BuildBlock(uint number, in Keccak parent)
{
using var block = blockchain.StartNew(parent);
block.SetAccount(Keccak.OfAnEmptyString, new Account(number, number));
return (block.Commit(number), block);
}
}

[Test]
public async Task Read_accessor_updated_can_access_data()
public async Task Read_accessor()
{
const byte historyDepth = 16;
using var db = PagedDb.NativeMemoryDb(16 * Mb, historyDepth);
Expand All @@ -698,9 +649,7 @@

var h = hashes[historyDepth];

var task = blockchain.WaitTillFlush(h);
blockchain.Finalize(h);
await task;
await blockchain.Finalize(h);

// omit 0th
for (uint i = 10; i < count; i++)
Expand All @@ -723,45 +672,43 @@
static Account Value(uint i) => new(i, i);
}

[TestCase(true)]
[TestCase(false)]
public async Task Read_accessor_can_preload_readers_for_history(bool preloadHistory)
[Test]
public async Task Read_accessor_can_preload_readers_for_history()
{
const byte historyDepth = 16;
using var db = PagedDb.NativeMemoryDb(16 * Mb, historyDepth);

await using var blockchain = new Blockchain(db, new ComputeMerkleBehavior());

const int count = 128;

var parent = Keccak.EmptyTreeHash;

var hashes = new Keccak[count + 1];
hashes[0] = parent;

for (uint i = 0; i < count; i++)
await using (var blockchain = new Blockchain(db, new ComputeMerkleBehavior()))
{
using var block = blockchain.StartNew(parent);
block.SetAccount(Key(i), Value(i));
parent = hashes[i + 1] = block.Commit(i + 1);
}
var parent = Keccak.EmptyTreeHash;
hashes[0] = parent;

for (uint i = 0; i < count; i++)
{
using var block = blockchain.StartNew(parent);
block.SetAccount(Key(i), Value(i));
parent = hashes[i + 1] = block.Commit(i + 1);
}

// Flush the last
var task = blockchain.WaitTillFlush(parent);
blockchain.Finalize(parent);
await task;
// Flush the last
await blockchain.Finalize(parent);
}

// Reload blockchain so that accessor is built from zero
await using var reloaded = new Blockchain(db, new ComputeMerkleBehavior());
var accessor = blockchain.BuildReadOnlyAccessor(preloadHistory);
var accessor = reloaded.BuildReadOnlyAccessor();

// Assert only last historyDepth hashes
for (uint i = count - historyDepth + 1; i < count; i++)
{
var root = hashes[i + 1];

accessor.HasState(root).Should().Be(preloadHistory, $"Failed to properly assert at {i} out of {count}.");
accessor.GetAccount(root, Key(i)).Should().Be(preloadHistory ? Value(i) : default);
accessor.HasState(root).Should()
.BeTrue($"Failed to properly assert at {i} out of {count}.");
accessor.GetAccount(root, Key(i)).Should().Be(Value(i));
}

return;
Expand All @@ -776,26 +723,25 @@
static Account Value(uint i) => new(i, i);
}


[Test]
public async Task StartNew_when_throws_should_not_lock_db_readonly_batch()
{
using var db = PagedDb.NativeMemoryDb(1 * Mb, 2);

await using var blockchain = new Blockchain(db, new PreCommit());

const int none = 0;

db.CountReadOnlyBatches().Should().Be(none);
var expected = db.CountReadOnlyBatches();

var nonExistentParent = new Random(13).NextKeccak();
try
{
var exception = Assert.Throws<Exception>(() => blockchain.StartNew(nonExistentParent));
exception.Message.Should().Contain("dependencies");
exception.Message.Should().Contain("There is no root page with the given stateHash");
}
finally
{
db.CountReadOnlyBatches().Should().Be(none);
db.CountReadOnlyBatches().Should().Be(expected);
}
}

Expand Down Expand Up @@ -826,4 +772,4 @@
{
state.GetStorage(address, storage, stackalloc byte[32]).IsEmpty.Should().BeTrue();
}
}
}
7 changes: 6 additions & 1 deletion src/Paprika.Tests/Chain/BufferPoolTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ public void Rented_is_clear()
}

[Test]
public void Big_pool()
public unsafe void Big_pool()
{
const int pageCount = 1024;
using var pool = new BufferPool(pageCount, BufferPool.PageTracking.None);
Expand All @@ -80,5 +80,10 @@ public void Big_pool()
}

set.Count.Should().Be(pageCount);

foreach (var page in set)
{
pool.Return(new Page((byte*)page.ToPointer()));
}
}
}
Loading
Loading