Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
name: Bump Minor Version
name: Minor release step 1 - Create new release branch
description: |
Create a new `main-vX.Y`` branch based on latest release branch.
Create a pull request to update version.go file from `vX.Y` to `vX.Y-rc`.
Next, developers should merge this PR.

on:
workflow_dispatch:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
name: Tag Minor Release Candidate
name: Minor release step 2 - Tag release candidate
description: |
Tag `vX.Y.0-rcN`, where the version used is the one from latest `main-vX.Y` release branch.
Next, developers should test the rc.
If new rc is needed, PRs with changes should be merged to `main-vX.Y` branch and developers should re-run this pipeline to create a new rc tag.

on:
workflow_dispatch:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
name: Prepare Minor Full Release
name: Minor release step 3 - Release branch to stable version
description: |
This workflow creates pull requests to update version files for a minor full release.
After the PRs are merged, a tag for the stable release should be created manually.
Create pull request to update version.go file in latest `main-vX.Y` from `vX.Y-rc` to `vX.Y`.

on:
workflow_dispatch:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
name: Tag Minor Full Release
name: Minor release step 4 - Tag minor full release
description: |
Tag `vX.Y.0`, where the version used is the one from latest `main-vX.Y` release branch.

on:
workflow_dispatch:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
name: Bump Patch Version
name: Patch release step 1 - Create new release branch
description: |
Create a PR to update version.go file from `vX.Y` to `vX.Y-rc` in latest `main-vX.Y` release branch.
Next, developers should merge this PR and open manually PRs to `main-vX.Y` with the desired cherry-picked commits from `main`.

on:
workflow_dispatch:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
name: Tag Patch Release Candidate
name: Patch release step 2 - Tag release candidate
description: |
Tag `vX.Y.Z-rcN`, where the version used is the one from latest `main-vX.Y` release branch and latest stable patch tag + 1.
Next, developers should test the rc.
If new rc is needed, PRs with changes should be merged to `main-vX.Y` branch and developers should re-run this pipeline to create a new rc tag.

on:
workflow_dispatch:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
name: Prepare Patch Full Release
name: Patch release step 3 - Release branch to stable version
description: |
Create pull request to update version.go file in latest `main-vX.Y` from `vX.Y-rc` to `vX.Y`.

on:
workflow_dispatch:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
name: Tag Patch Full Release
name: Patch release step 4 - Tag patch full release
description: |
Tag `vX.Y.Z`, where the version used is the one from latest `main-vX.Y` release branch and latest stable patch tag + 1.

on:
workflow_dispatch:
Expand Down
13 changes: 12 additions & 1 deletion cluster/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ import (
"github.com/obolnetwork/charon/tbls"
)

const (
// maxDefinitionSize is the maximum allowed size for a cluster definition file (16MB).
maxDefinitionSize = 16 * 1024 * 1024
)

// FetchDefinition fetches cluster definition file from a remote URI.
func FetchDefinition(ctx context.Context, url string) (Definition, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
Expand All @@ -47,11 +52,17 @@ func FetchDefinition(ctx context.Context, url string) (Definition, error) {

defer resp.Body.Close()

buf, err := io.ReadAll(resp.Body)
limitedReader := io.LimitReader(resp.Body, maxDefinitionSize+1)

buf, err := io.ReadAll(limitedReader)
if err != nil {
return Definition{}, errors.Wrap(err, "read response body")
}

if len(buf) > maxDefinitionSize {
return Definition{}, errors.New("definition file too large", z.Int("max_bytes", maxDefinitionSize))
}

var res Definition
if err := json.Unmarshal(buf, &res); err != nil {
return Definition{}, errors.Wrap(err, "unmarshal definition")
Expand Down
19 changes: 19 additions & 0 deletions cluster/helpers_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,11 @@ func TestFetchDefinition(t *testing.T) {
_, _ = w.Write(b)
case "/nonok":
w.WriteHeader(http.StatusInternalServerError)
case "/tooLarge":
// Simulate a response that exceeds maxDefinitionSize (16MB)
// Write 17MB of data to trigger the size limit
largeData := make([]byte, 17*1024*1024)
_, _ = w.Write(largeData)
}
}))
defer server.Close()
Expand All @@ -103,6 +108,7 @@ func TestFetchDefinition(t *testing.T) {
url string
want Definition
wantErr bool
errMsg string
}{
{
name: "Fetch valid definition",
Expand All @@ -122,12 +128,25 @@ func TestFetchDefinition(t *testing.T) {
want: invalidDef,
wantErr: true,
},
{
name: "Definition file too large (memory exhaustion protection)",
url: fmt.Sprintf("%s/%s", server.URL, "tooLarge"),
want: invalidDef,
wantErr: true,
errMsg: "definition file too large",
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := FetchDefinition(context.Background(), tt.url)
if tt.wantErr {
require.Error(t, err)

if tt.errMsg != "" {
require.ErrorContains(t, err, tt.errMsg)
}

return
}

Expand Down
17 changes: 12 additions & 5 deletions dkg/dkg_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -437,16 +437,17 @@ func verifyDKGResults(t *testing.T, def cluster.Definition, dir string) {
}
}

// Ensure keystores can generate valid tbls aggregate signature.
// Ensure keystores can generate valid tbls threshold aggregate signature.
for i := range def.NumValidators {
var sigs []tbls.Signature
sigsByIdx := make(map[int]tbls.Signature)
msg := []byte("data")

for j := range len(def.Operators) {
msg := []byte("data")
sig, err := tbls.Sign(secretShares[i][j], msg)
require.NoError(t, err)

sigs = append(sigs, sig)
// Use 1-based share indices as production does
sigsByIdx[j+1] = sig

// Ensure all public shares can verify the partial signature
for _, lock := range locks {
Expand All @@ -461,7 +462,13 @@ func verifyDKGResults(t *testing.T, def cluster.Definition, dir string) {
}
}

_, err := tbls.Aggregate(sigs)
// Use ThresholdAggregate (Lagrange interpolation) instead of simple Aggregate
// to ensure share indices are correct - this is what production uses.
aSig, err := tbls.ThresholdAggregate(sigsByIdx)
require.NoError(t, err)

// Verify against the validator's full public key
err = tbls.Verify(tbls.PublicKey(locks[0].Validators[i].PubKey), msg, aSig)
require.NoError(t, err)
}
}
Expand Down
34 changes: 27 additions & 7 deletions dkg/frostp2p.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,13 @@ func newBcastCallback(peers map[peer.ID]cluster.NodeIdx, round1CastsRecv chan *p
return errors.New("invalid round 1 casts message")
}

peerNode, ok := peers[pID]
if !ok {
return errors.New("unknown peer in round 1 cast", z.Any("peer", p2p.PeerName(pID)))
}

for _, cast := range msg.GetCasts() {
if int(cast.GetKey().GetSourceId()) != peers[pID].ShareIdx {
if int(cast.GetKey().GetSourceId()) != peerNode.ShareIdx {
return errors.New("invalid round 1 cast source ID")
} else if cast.GetKey().GetTargetId() != 0 {
return errors.New("invalid round 1 cast target ID")
Expand Down Expand Up @@ -174,8 +179,13 @@ func newBcastCallback(peers map[peer.ID]cluster.NodeIdx, round1CastsRecv chan *p
return errors.New("invalid round 2 casts message")
}

peerNode, ok := peers[pID]
if !ok {
return errors.New("unknown peer in round 2 cast", z.Any("peer", p2p.PeerName(pID)))
}

for _, cast := range msg.GetCasts() {
if int(cast.GetKey().GetSourceId()) != peers[pID].ShareIdx {
if int(cast.GetKey().GetSourceId()) != peerNode.ShareIdx {
return errors.New("invalid round 2 cast source ID")
} else if cast.GetKey().GetTargetId() != 0 {
return errors.New("invalid round 2 cast target ID")
Expand Down Expand Up @@ -209,18 +219,28 @@ func newP2PCallback(p2pNode host.Host, peers map[peer.ID]cluster.NodeIdx, round1
return nil, false, errors.New("invalid round 1 p2p message")
}

sourcePeer, ok := peers[pID]
if !ok {
return nil, false, errors.New("unknown source peer in round 1 p2p", z.Any("peer", p2p.PeerName(pID)))
}

targetPeer, ok := peers[p2pNode.ID()]
if !ok {
return nil, false, errors.New("unknown target peer in round 1 p2p", z.Any("peer", p2p.PeerName(p2pNode.ID())))
}

for _, share := range msg.GetShares() {
if int(share.GetKey().GetSourceId()) != peers[pID].ShareIdx {
if int(share.GetKey().GetSourceId()) != sourcePeer.ShareIdx {
return nil, false, errors.New("invalid round 1 p2p source ID")
} else if int(share.GetKey().GetTargetId()) != peers[p2pNode.ID()].ShareIdx {
} else if int(share.GetKey().GetTargetId()) != targetPeer.ShareIdx {
return nil, false, errors.New("invalid round 1 p2p target ID")
} else if int(share.GetKey().GetValIdx()) < 0 || int(share.GetKey().GetValIdx()) >= numVals {
return nil, false, errors.New("invalid round 1 p2p validator index")
}
}

if dedupRound1P2P[pID] {
log.Debug(ctx, "Ignoring duplicate round 2 message", z.Any("peer", p2p.PeerName(pID)))
log.Debug(ctx, "Ignoring duplicate round 1 message", z.Any("peer", p2p.PeerName(pID)))
return nil, false, nil
}

Expand Down Expand Up @@ -452,7 +472,7 @@ func round1CastFromProto(cast *pb.FrostRound1Cast) (msgKey, frost.Round1Bcast, e

ci, err := curve.Scalar.SetBytes(cast.GetCi())
if err != nil {
return msgKey{}, frost.Round1Bcast{}, errors.Wrap(err, "decode c1 scalar")
return msgKey{}, frost.Round1Bcast{}, errors.Wrap(err, "decode ci scalar")
}

var comms []curves.Point
Expand Down Expand Up @@ -498,7 +518,7 @@ func round2CastFromProto(cast *pb.FrostRound2Cast) (msgKey, frost.Round2Bcast, e

vkShare, err := curve.Point.FromAffineCompressed(cast.GetVkShare())
if err != nil {
return msgKey{}, frost.Round2Bcast{}, errors.Wrap(err, "decode c1 scalar")
return msgKey{}, frost.Round2Bcast{}, errors.Wrap(err, "decode verification key share")
}

key, err := keyFromProto(cast.GetKey())
Expand Down
7 changes: 6 additions & 1 deletion dkg/nodesigs.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func (n *nodeSigBcast) setSig(sig []byte, slot int) {
}

// broadcastCallback is the default bcast.Callback for nodeSigBcast.
func (n *nodeSigBcast) broadcastCallback(ctx context.Context, _ peer.ID, _ string, msg proto.Message) error {
func (n *nodeSigBcast) broadcastCallback(ctx context.Context, senderID peer.ID, _ string, msg proto.Message) error {
nodeSig, ok := msg.(*dkgpb.MsgNodeSig)
if !ok {
return errors.New("invalid node sig type")
Expand All @@ -138,6 +138,11 @@ func (n *nodeSigBcast) broadcastCallback(ctx context.Context, _ peer.ID, _ strin
return errors.New("invalid peer index")
}

// Verify that the actual sender's peer ID matches the claimed peer index
if n.peers[msgPeerIdx].ID != senderID {
return errors.New("sender peer ID does not match claimed peer index")
}

lockHash, err := n.lockHash(ctx)
if err != nil {
return errors.Wrap(err, "lock hash wait")
Expand Down
22 changes: 20 additions & 2 deletions dkg/nodesigs_internal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,24 @@ func TestSigsCallbacks(t *testing.T) {
require.ErrorContains(t, err, "invalid node sig type")
})

t.Run("sender peer ID mismatch", func(t *testing.T) {
ns.lockHashData = bytes.Repeat([]byte{42}, 32)

msg := &dkgpb.MsgNodeSig{
Signature: bytes.Repeat([]byte{42}, 65),
PeerIndex: uint32(2), // Claims to be from peer 2
}

// But actually sent by peer 1
err := ns.broadcastCallback(context.Background(),
peers[1],
"",
msg,
)

require.ErrorContains(t, err, "sender peer ID does not match claimed peer index")
})

t.Run("signature verification failed", func(t *testing.T) {
ns.lockHashData = bytes.Repeat([]byte{42}, 32)

Expand All @@ -236,7 +254,7 @@ func TestSigsCallbacks(t *testing.T) {
}

err := ns.broadcastCallback(context.Background(),
peers[0],
peers[2],
"",
msg,
)
Expand All @@ -251,7 +269,7 @@ func TestSigsCallbacks(t *testing.T) {
}

err := ns.broadcastCallback(context.Background(),
peers[0],
peers[2],
"",
msg,
)
Expand Down
Loading
Loading