aboutsummaryrefslogtreecommitdiffstats
path: root/core/lattice-data_test.go
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-10-05 09:12:26 +0800
committerGitHub <noreply@github.com>2018-10-05 09:12:26 +0800
commitefcb301ec31acf7b87312cbec962682148555999 (patch)
tree76ba2fbe5a7c7017005f771ab95102b997973f1f /core/lattice-data_test.go
parent6773c56fe29511aca0f4345e9fd3758ca05e174f (diff)
downloadtangerine-consensus-efcb301ec31acf7b87312cbec962682148555999.tar
tangerine-consensus-efcb301ec31acf7b87312cbec962682148555999.tar.gz
tangerine-consensus-efcb301ec31acf7b87312cbec962682148555999.tar.bz2
tangerine-consensus-efcb301ec31acf7b87312cbec962682148555999.tar.lz
tangerine-consensus-efcb301ec31acf7b87312cbec962682148555999.tar.xz
tangerine-consensus-efcb301ec31acf7b87312cbec962682148555999.tar.zst
tangerine-consensus-efcb301ec31acf7b87312cbec962682148555999.zip
core: find block in db (#174)
* Make sure block pool is large enough It's safe to use a larger blockPool when the number of chains is smaller. * Construct latticeData via config. * Seek acked blocks in blockdb when unable to find them in memory cache. In previous implementation, we assume our cache in memory is enough to perform DAG's sanity check. However, it's no longer true when the number of chains might be changed between rounds. * Simplify purge. Remove the relation to purge block by chainStatus.
Diffstat (limited to 'core/lattice-data_test.go')
-rw-r--r--core/lattice-data_test.go39
1 files changed, 29 insertions, 10 deletions
diff --git a/core/lattice-data_test.go b/core/lattice-data_test.go
index 92e66a4..9e3ce52 100644
--- a/core/lattice-data_test.go
+++ b/core/lattice-data_test.go
@@ -54,8 +54,10 @@ func (s *LatticeDataTestSuite) genTestCase1() (data *latticeData) {
req = s.Require()
err error
)
-
- data = newLatticeData(round, chainNum, 2*time.Nanosecond, 1000*time.Second)
+ db, err := blockdb.NewMemBackedBlockDB()
+ req.NoError(err)
+ data = newLatticeData(
+ db, round, s.newConfig(chainNum, 2*time.Nanosecond, 1000*time.Second))
// Add genesis blocks.
for i := uint32(0); i < chainNum; i++ {
b = s.prepareGenesisBlock(i)
@@ -157,6 +159,16 @@ func (s *LatticeDataTestSuite) genTestCase1() (data *latticeData) {
return
}
+func (s *LatticeDataTestSuite) newConfig(numChains uint32,
+ minBlockInterval, maxBlockInterval time.Duration) *latticeDataConfig {
+
+ return &latticeDataConfig{
+ numChains: numChains,
+ minBlockTimeInterval: minBlockInterval,
+ maxBlockTimeInterval: maxBlockInterval,
+ }
+}
+
// hashBlock is a helper to hash a block and check if any error.
func (s *LatticeDataTestSuite) hashBlock(b *types.Block) {
var err error
@@ -352,13 +364,14 @@ func (s *LatticeDataTestSuite) TestSanityCheckInDataLayer() {
b = &types.Block{
ParentHash: h,
Hash: common.NewRandomHash(),
- Timestamp: time.Now().UTC().Add(data.maxBlockTimeInterval),
Position: types.Position{
ChainID: 2,
Height: 1,
},
Acks: common.NewSortedHashes(common.Hashes{h}),
}
+ b.Timestamp = data.chains[2].getBlockByHeight(0).Timestamp.Add(
+ data.getConfig(0).maxBlockTimeInterval + time.Nanosecond)
s.hashBlock(b)
err = data.sanityCheck(b)
req.NotNil(err)
@@ -390,14 +403,16 @@ func (s *LatticeDataTestSuite) TestRandomIntensiveAcking() {
var (
round uint64
chainNum uint32 = 19
- data = newLatticeData(round, chainNum, 0, 1000*time.Second)
req = s.Require()
delivered []*types.Block
extracted []*types.Block
b *types.Block
err error
)
-
+ db, err := blockdb.NewMemBackedBlockDB()
+ req.NoError(err)
+ data := newLatticeData(
+ db, round, s.newConfig(chainNum, 0, 1000*time.Second))
// Generate genesis blocks.
for i := uint32(0); i < chainNum; i++ {
b = s.prepareGenesisBlock(i)
@@ -417,6 +432,10 @@ func (s *LatticeDataTestSuite) TestRandomIntensiveAcking() {
s.hashBlock(b)
delivered, err = data.addBlock(b)
req.Nil(err)
+ for _, b := range delivered {
+ req.NoError(db.Put(*b))
+ }
+ req.NoError(data.purgeBlocks(delivered))
extracted = append(extracted, delivered...)
}
@@ -459,7 +478,8 @@ func (s *LatticeDataTestSuite) TestRandomlyGeneratedBlocks() {
revealedHashesAsString := map[string]struct{}{}
deliveredHashesAsString := map[string]struct{}{}
for i := 0; i < repeat; i++ {
- data := newLatticeData(round, chainNum, 0, 1000*time.Second)
+ data := newLatticeData(
+ nil, round, s.newConfig(chainNum, 0, 1000*time.Second))
deliveredHashes := common.Hashes{}
revealedHashes := common.Hashes{}
revealer.Reset()
@@ -542,7 +562,7 @@ func (s *LatticeDataTestSuite) TestPrepareBlock() {
delivered []*types.Block
err error
data = newLatticeData(
- round, chainNum, 0, 3000*time.Second)
+ nil, round, s.newConfig(chainNum, 0, 3000*time.Second))
)
// Setup genesis blocks.
b00 := s.prepareGenesisBlock(0)
@@ -656,8 +676,7 @@ func (s *LatticeDataTestSuite) TestPurge() {
nextAck: []uint64{1, 1, 1, 1},
nextOutput: 1,
}
- hashes := chain.purge()
- s.Equal(hashes, common.Hashes{b00.Hash})
+ chain.purge()
s.Equal(chain.minHeight, uint64(1))
s.Require().Len(chain.blocks, 2)
s.Equal(chain.blocks[0].Hash, b01.Hash)
@@ -670,7 +689,7 @@ func (s *LatticeDataTestSuite) TestNextPosition() {
s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 4})
// Test 'NextPosition' method when lattice is empty.
- data = newLatticeData(0, 4, 0, 1000*time.Second)
+ data = newLatticeData(nil, 0, s.newConfig(4, 0, 1000*time.Second))
s.Equal(data.nextPosition(0), types.Position{ChainID: 0, Height: 0})
}