Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/iotaledger/goshimmer int…
Browse files Browse the repository at this point in the history
…o feat/clock-module
  • Loading branch information
hmoog committed Mar 6, 2023
2 parents b7e9557 + d487676 commit 877b317
Show file tree
Hide file tree
Showing 51 changed files with 694 additions and 516 deletions.
20 changes: 10 additions & 10 deletions packages/app/blockissuer/blockfactory/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
type Factory struct {
Events *Events

slotTimeProvider *slot.TimeProvider
slotTimeProviderFunc func() *slot.TimeProvider

// referenceProvider *ReferenceProvider
identity *identity.LocalIdentity
Expand All @@ -39,15 +39,15 @@ type Factory struct {
}

// NewBlockFactory creates a new block factory.
func NewBlockFactory(localIdentity *identity.LocalIdentity, slotTimeProvider *slot.TimeProvider, blockRetriever func(blockID models.BlockID) (block *blockdag.Block, exists bool), tipSelector TipSelectorFunc, referencesFunc ReferencesFunc, commitmentFunc CommitmentFunc, opts ...options.Option[Factory]) *Factory {
func NewBlockFactory(localIdentity *identity.LocalIdentity, slotTimeProviderFunc func() *slot.TimeProvider, blockRetriever func(blockID models.BlockID) (block *blockdag.Block, exists bool), tipSelector TipSelectorFunc, referencesFunc ReferencesFunc, commitmentFunc CommitmentFunc, opts ...options.Option[Factory]) *Factory {
return options.Apply(&Factory{
Events: newEvents(),
identity: localIdentity,
slotTimeProvider: slotTimeProvider,
blockRetriever: blockRetriever,
tipSelector: tipSelector,
referencesFunc: referencesFunc,
commitmentFunc: commitmentFunc,
Events: newEvents(),
identity: localIdentity,
slotTimeProviderFunc: slotTimeProviderFunc,
blockRetriever: blockRetriever,
tipSelector: tipSelector,
referencesFunc: referencesFunc,
commitmentFunc: commitmentFunc,

optsTipSelectionTimeout: 10 * time.Second,
optsTipSelectionRetryInterval: 200 * time.Millisecond,
Expand Down Expand Up @@ -117,7 +117,7 @@ func (f *Factory) createBlockWithPayload(p payload.Payload, references models.Pa
}
block.SetSignature(signature)

if err = block.DetermineID(f.slotTimeProvider); err != nil {
if err = block.DetermineID(f.slotTimeProviderFunc()); err != nil {
return nil, errors.Wrap(err, "there is a problem with the block syntax")
}

Expand Down
10 changes: 9 additions & 1 deletion packages/app/blockissuer/blockfactory/factory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,15 @@ func TestFactory_IssuePayload(t *testing.T) {

pay := payload.NewGenericDataPayload([]byte("test"))

factory := NewBlockFactory(localIdentity, slotTimeProvider, blockRetriever, tipSelectorFunc, referencesFunc, commitmentFunc)
factory := NewBlockFactory(localIdentity,
func() *slot.TimeProvider {
return slotTimeProvider
},
blockRetriever,
tipSelectorFunc,
referencesFunc,
commitmentFunc,
)
createdBlock, err := factory.CreateBlock(pay, 2)
require.NoError(t, err)

Expand Down
2 changes: 1 addition & 1 deletion packages/app/blockissuer/ratesetter/testframework.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func (tf *TestFramework) CreateBlock(issuer int) *models.Block {
parents := models.NewParentBlockIDs()
parents.AddStrong(models.EmptyBlockID)
blk := models.NewBlock(models.WithIssuer(tf.localIdentity[issuer].PublicKey()), models.WithParents(parents))
assert.NoError(tf.test, blk.DetermineID(tf.Protocol.Instance.SlotTimeProvider))
assert.NoError(tf.test, blk.DetermineID(tf.Protocol.Instance.SlotTimeProvider()))
return blk
}

Expand Down
2 changes: 1 addition & 1 deletion packages/app/retainer/retainer.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func (r *Retainer) BlockMetadata(blockID models.BlockID) (metadata *BlockMetadat

if metadata.M.Accepted && !metadata.M.Confirmed && blockID.Index() <= r.protocol.Engine().LastConfirmedSlot() {
metadata.M.ConfirmedBySlot = true
metadata.M.ConfirmedBySlotTime = r.protocol.SlotTimeProvider.EndTime(blockID.Index())
metadata.M.ConfirmedBySlotTime = r.protocol.SlotTimeProvider().EndTime(blockID.Index())
}
}

Expand Down
4 changes: 2 additions & 2 deletions packages/app/retainer/retainer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func TestRetainer_BlockMetadata_Evicted(t *testing.T) {
retainer.Shutdown()
})

b := tf.Engine.BlockDAG.CreateBlock("A", models.WithIssuingTime(tf.Instance.SlotTimeProvider.GenesisTime().Add(70*time.Second)))
b := tf.Engine.BlockDAG.CreateBlock("A", models.WithIssuingTime(tf.Instance.SlotTimeProvider().GenesisTime().Add(70*time.Second)))
tf.Engine.BlockDAG.IssueBlocks("A")

workers.WaitChildren()
Expand All @@ -150,7 +150,7 @@ func TestRetainer_BlockMetadata_Evicted(t *testing.T) {
require.True(t, exists)

// Trigger eviction through commitment creation
tf.Engine.Instance.NotarizationManager.SetAcceptanceTime(tf.Instance.SlotTimeProvider.EndTime(tf.Instance.SlotTimeProvider.IndexFromTime(tf.Instance.SlotTimeProvider.GenesisTime().Add(70*time.Second)) + 8))
tf.Engine.Instance.NotarizationManager.SetAcceptanceTime(tf.Instance.SlotTimeProvider().EndTime(tf.Instance.SlotTimeProvider().IndexFromTime(tf.Instance.SlotTimeProvider().GenesisTime().Add(70*time.Second)) + 8))
workers.WaitChildren()

meta, exists := retainer.BlockMetadata(block.ID())
Expand Down
28 changes: 20 additions & 8 deletions packages/core/snapshotcreator/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (

"github.com/iotaledger/goshimmer/packages/core/database"
"github.com/iotaledger/goshimmer/packages/protocol/ledger/vm"
"github.com/iotaledger/hive.go/core/slot"
"github.com/iotaledger/goshimmer/packages/protocol/ledger/vm/devnetvm"
"github.com/iotaledger/hive.go/crypto/ed25519"
"github.com/iotaledger/hive.go/runtime/options"

Expand Down Expand Up @@ -33,17 +33,22 @@ type Options struct {
InitialAttestationsPublicKey []ed25519.PublicKey
// AttestAll indicates that all nodes will be included in the attestation.
AttestAll bool
// SlotTimeProvider provides the genesis time of the snapshot.
SlotTimeProvider *slot.TimeProvider
// GenesisUnixTime provides the genesis time of the snapshot.
GenesisUnixTime int64
// SlotDuration defines the duration in seconds of each slot.
SlotDuration int64

dataBaseVersion database.Version
vm vm.VM
}

func NewOptions(opts ...options.Option[Options]) *Options {
return options.Apply(&Options{
FilePath: "snapshot.bin",
SlotTimeProvider: slot.NewTimeProvider(time.Now().Unix(), 10),
FilePath: "snapshot.bin",
dataBaseVersion: 1,
vm: new(devnetvm.VM),
GenesisUnixTime: time.Now().Unix(),
SlotDuration: 10,
}, opts)
}

Expand Down Expand Up @@ -174,10 +179,17 @@ func WithVM(vm vm.VM) options.Option[Options] {
}
}

// WithSlotTimeProvider sets the slot time provider to use for the snapshot.
func WithSlotTimeProvider(provider *slot.TimeProvider) options.Option[Options] {
// WithGenesisUnixTime provides the genesis time of the snapshot.
func WithGenesisUnixTime(unixTime int64) options.Option[Options] {
return func(m *Options) {
m.SlotTimeProvider = provider
m.GenesisUnixTime = unixTime
}
}

// WithSlotDuration defines the duration in seconds of each slot.
func WithSlotDuration(duration int64) options.Option[Options] {
return func(m *Options) {
m.SlotDuration = duration
}
}

Expand Down
19 changes: 12 additions & 7 deletions packages/core/snapshotcreator/snapshotcreator.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,26 +37,31 @@ import (
// | node1 | node1 |
// | node2 | node2 |.

func CreateSnapshot(opts ...options.Option[Options]) (err error) {
func CreateSnapshot(opts ...options.Option[Options]) error {
opt := NewOptions(opts...)

workers := workerpool.NewGroup("CreateSnapshot")
defer workers.Shutdown()
s := storage.New(lo.PanicOnErr(os.MkdirTemp(os.TempDir(), "*")), opt.dataBaseVersion)
defer s.Shutdown()

if err = s.Commitments.Store(commitment.NewEmptyCommitment()); err != nil {
if err := s.Commitments.Store(commitment.NewEmptyCommitment()); err != nil {
return errors.Wrap(err, "failed to store empty commitment")
}
if err = s.Settings.SetChainID(lo.PanicOnErr(s.Commitments.Load(0)).ID()); err != nil {
if err := s.Settings.SetGenesisUnixTime(opt.GenesisUnixTime); err != nil {
return errors.Wrap(err, "failed to set the genesis time")
}
if err := s.Settings.SetSlotDuration(opt.SlotDuration); err != nil {
return errors.Wrap(err, "failed to set the slot duration")
}
if err := s.Settings.SetChainID(lo.PanicOnErr(s.Commitments.Load(0)).ID()); err != nil {
return errors.Wrap(err, "failed to set chainID")
}

engineInstance := engine.New(workers.CreateGroup("Engine"), s, blocktime.NewProvider(), dpos.NewProvider(), mana1.NewProvider(), opt.SlotTimeProvider, engine.WithLedgerOptions(ledger.WithVM(opt.vm)))
engineInstance := engine.New(workers.CreateGroup("Engine"), s, blocktime.NewProvider(), dpos.NewProvider(), mana1.NewProvider(), engine.WithLedgerOptions(ledger.WithVM(opt.vm)))
defer engineInstance.Shutdown()

err = opt.createGenesisOutput(engineInstance)
if err != nil {
if err := opt.createGenesisOutput(engineInstance); err != nil {
return err
}
engineInstance.NotarizationManager.Attestations.SetLastCommittedSlot(-1)
Expand Down Expand Up @@ -102,7 +107,7 @@ func CreateSnapshot(opts ...options.Option[Options]) (err error) {
func (m *Options) attest(engineInstance *engine.Engine, nodePublicKey ed25519.PublicKey) error {
if _, err := engineInstance.NotarizationManager.Attestations.Add(&notarization.Attestation{
IssuerPublicKey: nodePublicKey,
IssuingTime: time.Unix(m.SlotTimeProvider.GenesisUnixTime()-1, 0),
IssuingTime: time.Unix(engineInstance.SlotTimeProvider().GenesisUnixTime()-1, 0),
}); err != nil {
return err
}
Expand Down
10 changes: 3 additions & 7 deletions packages/protocol/congestioncontrol/congestioncontrol.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"github.com/iotaledger/goshimmer/packages/protocol/congestioncontrol/icca/scheduler"
"github.com/iotaledger/goshimmer/packages/protocol/engine"
"github.com/iotaledger/goshimmer/packages/protocol/models"
"github.com/iotaledger/hive.go/core/slot"
"github.com/iotaledger/hive.go/lo"
"github.com/iotaledger/hive.go/runtime/event"
"github.com/iotaledger/hive.go/runtime/options"
Expand All @@ -15,18 +14,15 @@ import (
type CongestionControl struct {
Events *Events

slotTimeProvider *slot.TimeProvider

scheduler *scheduler.Scheduler
schedulerMutex sync.RWMutex

optsSchedulerOptions []options.Option[scheduler.Scheduler]
}

func New(slotTimeProvider *slot.TimeProvider, opts ...options.Option[CongestionControl]) *CongestionControl {
func New(opts ...options.Option[CongestionControl]) *CongestionControl {
return options.Apply(&CongestionControl{
Events: NewEvents(),
slotTimeProvider: slotTimeProvider,
Events: NewEvents(),
}, opts)
}

Expand All @@ -47,7 +43,7 @@ func (c *CongestionControl) LinkTo(engine *engine.Engine) {

c.scheduler = scheduler.New(
engine.EvictionState,
c.slotTimeProvider,
engine.SlotTimeProvider(),
engine.Consensus.BlockGadget.IsBlockAccepted,
engine.ThroughputQuota.BalanceByIDs,
engine.ThroughputQuota.TotalBalance,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func TestScheduler_StartStop(t *testing.T) {
t.Skip("skipping test in short mode.")
}
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))
tf.Scheduler.Start()
// Multiple calls to start should not create problems
tf.Scheduler.Start()
Expand All @@ -42,11 +42,11 @@ func TestScheduler_StartStop(t *testing.T) {

func TestScheduler_AddBlock(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))
tf.Scheduler.Start()

blk := virtualvoting.NewBlock(blockdag.NewBlock(models.NewBlock(models.WithStrongParents(tf.Tangle.BlockDAG.BlockIDs("Genesis"))), blockdag.WithSolid(true), blockdag.WithOrphaned(true)), virtualvoting.WithBooked(true), virtualvoting.WithStructureDetails(markers.NewStructureDetails()))
require.NoError(t, blk.DetermineID(slotTimeProvider))
require.NoError(t, blk.DetermineID(tf.SlotTimeProvider()))

tf.Scheduler.AddBlock(blk)

Expand All @@ -60,7 +60,7 @@ func TestScheduler_AddBlock(t *testing.T) {

func TestScheduler_Submit(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))
tf.Scheduler.Start()

blk := tf.CreateSchedulerBlock(models.WithIssuer(selfNode.PublicKey()))
Expand All @@ -72,7 +72,7 @@ func TestScheduler_Submit(t *testing.T) {

func TestScheduler_updateActiveNodeList(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.Scheduler.updateActiveIssuersList(map[identity.ID]int64{})
require.Equal(t, 0, tf.Scheduler.buffer.NumActiveIssuers())
Expand Down Expand Up @@ -125,7 +125,7 @@ func TestScheduler_updateActiveNodeList(t *testing.T) {

func TestScheduler_Dropped(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider, WithMaxBufferSize(numBlocks/2))
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), WithMaxBufferSize(numBlocks/2))

tf.CreateIssuer("nomana", 0)
tf.CreateIssuer("other", 10) // Add a second issuer so that totalMana is not zero!
Expand Down Expand Up @@ -168,7 +168,7 @@ func TestScheduler_Dropped(t *testing.T) {

func TestScheduler_Schedule(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.CreateIssuer("peer", 10)

Expand Down Expand Up @@ -197,7 +197,7 @@ func TestScheduler_Schedule(t *testing.T) {

func TestScheduler_HandleOrphanedBlock_Ready(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.CreateIssuer("peer", 10)

Expand Down Expand Up @@ -229,7 +229,7 @@ func TestScheduler_HandleOrphanedBlock_Ready(t *testing.T) {

func TestScheduler_HandleOrphanedBlock_Scheduled(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.CreateIssuer("peer", 10)

Expand Down Expand Up @@ -262,7 +262,7 @@ func TestScheduler_HandleOrphanedBlock_Scheduled(t *testing.T) {

func TestScheduler_HandleOrphanedBlock_Unready(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.CreateIssuer("peer", 10)

Expand Down Expand Up @@ -292,7 +292,7 @@ func TestScheduler_HandleOrphanedBlock_Unready(t *testing.T) {

func TestScheduler_SkipConfirmed(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider, WithAcceptedBlockScheduleThreshold(time.Minute))
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), WithAcceptedBlockScheduleThreshold(time.Minute))

tf.CreateIssuer("peer", 10)

Expand Down Expand Up @@ -395,7 +395,7 @@ func TestScheduler_SkipConfirmed(t *testing.T) {

func TestScheduler_Time(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.CreateIssuer("peer", 10)

Expand Down Expand Up @@ -440,7 +440,7 @@ func TestScheduler_Issue(t *testing.T) {
defer debug.SetEnabled(false)

workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.CreateIssuer("peer", 10)

Expand Down Expand Up @@ -491,7 +491,7 @@ func TestScheduler_Issue(t *testing.T) {

func TestSchedulerFlow(t *testing.T) {
workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.CreateIssuer("peer", 10)
tf.CreateIssuer("self", 10)
Expand Down Expand Up @@ -542,7 +542,7 @@ func TestSchedulerParallelSubmit(t *testing.T) {
const totalBlkCount = 200

workers := workerpool.NewGroup(t.Name())
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"), slotTimeProvider)
tf := NewTestFramework(t, workers.CreateGroup("SchedulerTestFramework"))

tf.Scheduler.Events.Error.Hook(func(err error) {
require.Failf(t, "unexpected error", "error event triggered: %v", err)
Expand Down
Loading

0 comments on commit 877b317

Please sign in to comment.