feat(mpc-system): integrate reliability mechanisms and enable party-driven architecture
- Enable SubscribeSessionEvents for automatic session participation - Integrate heartbeat mechanism with pending message count - Add ACK sending after message receipt for reliable delivery - Add party activity tracking in session coordinator - Add CountPendingByParty for heartbeat response - Add retry package with exponential backoff for gRPC clients - Add memory-based message broker and event publisher adapters - Add account service integration for keygen completion - Add party timeout checking background job - Add notification service stub for future implementation 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
34f0f7b897
commit
135e821386
|
|
@ -16,7 +16,9 @@
|
|||
"Bash(powershell -Command:*)",
|
||||
"Bash(go build:*)",
|
||||
"Bash(git add:*)",
|
||||
"Bash(git commit:*)"
|
||||
"Bash(git commit:*)",
|
||||
"Bash(git push:*)",
|
||||
"Bash(git pull:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc v3.12.4
|
||||
// protoc v6.33.1
|
||||
// source: api/proto/session_coordinator.proto
|
||||
|
||||
package coordinator
|
||||
|
|
@ -24,12 +24,13 @@ const (
|
|||
// CreateSessionRequest creates a new MPC session
|
||||
type CreateSessionRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
SessionType string `protobuf:"bytes,1,opt,name=session_type,json=sessionType,proto3" json:"session_type,omitempty"` // "keygen" or "sign"
|
||||
ThresholdN int32 `protobuf:"varint,2,opt,name=threshold_n,json=thresholdN,proto3" json:"threshold_n,omitempty"` // Total number of parties
|
||||
ThresholdT int32 `protobuf:"varint,3,opt,name=threshold_t,json=thresholdT,proto3" json:"threshold_t,omitempty"` // Minimum required parties
|
||||
Participants []*ParticipantInfo `protobuf:"bytes,4,rep,name=participants,proto3" json:"participants,omitempty"`
|
||||
SessionType string `protobuf:"bytes,1,opt,name=session_type,json=sessionType,proto3" json:"session_type,omitempty"` // "keygen" or "sign"
|
||||
ThresholdN int32 `protobuf:"varint,2,opt,name=threshold_n,json=thresholdN,proto3" json:"threshold_n,omitempty"` // Total number of parties
|
||||
ThresholdT int32 `protobuf:"varint,3,opt,name=threshold_t,json=thresholdT,proto3" json:"threshold_t,omitempty"` // Minimum required parties
|
||||
Participants []*ParticipantInfo `protobuf:"bytes,4,rep,name=participants,proto3" json:"participants,omitempty"` // Optional: if empty, coordinator selects automatically
|
||||
MessageHash []byte `protobuf:"bytes,5,opt,name=message_hash,json=messageHash,proto3" json:"message_hash,omitempty"` // Required for sign sessions
|
||||
ExpiresInSeconds int64 `protobuf:"varint,6,opt,name=expires_in_seconds,json=expiresInSeconds,proto3" json:"expires_in_seconds,omitempty"` // Session expiration time
|
||||
PartyComposition *PartyComposition `protobuf:"bytes,7,opt,name=party_composition,json=partyComposition,proto3" json:"party_composition,omitempty"` // Optional: party composition requirements for auto-selection
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
|
@ -106,6 +107,74 @@ func (x *CreateSessionRequest) GetExpiresInSeconds() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (x *CreateSessionRequest) GetPartyComposition() *PartyComposition {
|
||||
if x != nil {
|
||||
return x.PartyComposition
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PartyComposition specifies requirements for automatic party selection
|
||||
type PartyComposition struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PersistentCount int32 `protobuf:"varint,1,opt,name=persistent_count,json=persistentCount,proto3" json:"persistent_count,omitempty"` // Number of persistent parties (store shares in DB)
|
||||
DelegateCount int32 `protobuf:"varint,2,opt,name=delegate_count,json=delegateCount,proto3" json:"delegate_count,omitempty"` // Number of delegate parties (return shares to user)
|
||||
TemporaryCount int32 `protobuf:"varint,3,opt,name=temporary_count,json=temporaryCount,proto3" json:"temporary_count,omitempty"` // Number of temporary parties
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *PartyComposition) Reset() {
|
||||
*x = PartyComposition{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *PartyComposition) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PartyComposition) ProtoMessage() {}
|
||||
|
||||
func (x *PartyComposition) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PartyComposition.ProtoReflect.Descriptor instead.
|
||||
func (*PartyComposition) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *PartyComposition) GetPersistentCount() int32 {
|
||||
if x != nil {
|
||||
return x.PersistentCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *PartyComposition) GetDelegateCount() int32 {
|
||||
if x != nil {
|
||||
return x.DelegateCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *PartyComposition) GetTemporaryCount() int32 {
|
||||
if x != nil {
|
||||
return x.TemporaryCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// ParticipantInfo contains information about a participant
|
||||
type ParticipantInfo struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
|
|
@ -117,7 +186,7 @@ type ParticipantInfo struct {
|
|||
|
||||
func (x *ParticipantInfo) Reset() {
|
||||
*x = ParticipantInfo{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[1]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -129,7 +198,7 @@ func (x *ParticipantInfo) String() string {
|
|||
func (*ParticipantInfo) ProtoMessage() {}
|
||||
|
||||
func (x *ParticipantInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[1]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -142,7 +211,7 @@ func (x *ParticipantInfo) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use ParticipantInfo.ProtoReflect.Descriptor instead.
|
||||
func (*ParticipantInfo) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{1}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ParticipantInfo) GetPartyId() string {
|
||||
|
|
@ -172,7 +241,7 @@ type DeviceInfo struct {
|
|||
|
||||
func (x *DeviceInfo) Reset() {
|
||||
*x = DeviceInfo{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[2]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -184,7 +253,7 @@ func (x *DeviceInfo) String() string {
|
|||
func (*DeviceInfo) ProtoMessage() {}
|
||||
|
||||
func (x *DeviceInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[2]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -197,7 +266,7 @@ func (x *DeviceInfo) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use DeviceInfo.ProtoReflect.Descriptor instead.
|
||||
func (*DeviceInfo) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{2}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *DeviceInfo) GetDeviceType() string {
|
||||
|
|
@ -230,17 +299,19 @@ func (x *DeviceInfo) GetAppVersion() string {
|
|||
|
||||
// CreateSessionResponse contains the created session info
|
||||
type CreateSessionResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
SessionId string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
|
||||
JoinTokens map[string]string `protobuf:"bytes,2,rep,name=join_tokens,json=joinTokens,proto3" json:"join_tokens,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // party_id -> join_token
|
||||
ExpiresAt int64 `protobuf:"varint,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` // Unix timestamp milliseconds
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
SessionId string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
|
||||
JoinTokens map[string]string `protobuf:"bytes,2,rep,name=join_tokens,json=joinTokens,proto3" json:"join_tokens,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // party_id -> join_token
|
||||
ExpiresAt int64 `protobuf:"varint,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` // Unix timestamp milliseconds
|
||||
SelectedParties []string `protobuf:"bytes,4,rep,name=selected_parties,json=selectedParties,proto3" json:"selected_parties,omitempty"` // List of selected party IDs
|
||||
DelegatePartyId string `protobuf:"bytes,5,opt,name=delegate_party_id,json=delegatePartyId,proto3" json:"delegate_party_id,omitempty"` // The delegate party ID (if any)
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *CreateSessionResponse) Reset() {
|
||||
*x = CreateSessionResponse{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[3]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -252,7 +323,7 @@ func (x *CreateSessionResponse) String() string {
|
|||
func (*CreateSessionResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CreateSessionResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[3]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[4]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -265,7 +336,7 @@ func (x *CreateSessionResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use CreateSessionResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CreateSessionResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{3}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *CreateSessionResponse) GetSessionId() string {
|
||||
|
|
@ -289,6 +360,20 @@ func (x *CreateSessionResponse) GetExpiresAt() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (x *CreateSessionResponse) GetSelectedParties() []string {
|
||||
if x != nil {
|
||||
return x.SelectedParties
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *CreateSessionResponse) GetDelegatePartyId() string {
|
||||
if x != nil {
|
||||
return x.DelegatePartyId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// JoinSessionRequest allows a participant to join a session
|
||||
type JoinSessionRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
|
|
@ -302,7 +387,7 @@ type JoinSessionRequest struct {
|
|||
|
||||
func (x *JoinSessionRequest) Reset() {
|
||||
*x = JoinSessionRequest{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[4]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -314,7 +399,7 @@ func (x *JoinSessionRequest) String() string {
|
|||
func (*JoinSessionRequest) ProtoMessage() {}
|
||||
|
||||
func (x *JoinSessionRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[4]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[5]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -327,7 +412,7 @@ func (x *JoinSessionRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use JoinSessionRequest.ProtoReflect.Descriptor instead.
|
||||
func (*JoinSessionRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{4}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *JoinSessionRequest) GetSessionId() string {
|
||||
|
|
@ -370,7 +455,7 @@ type JoinSessionResponse struct {
|
|||
|
||||
func (x *JoinSessionResponse) Reset() {
|
||||
*x = JoinSessionResponse{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[5]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -382,7 +467,7 @@ func (x *JoinSessionResponse) String() string {
|
|||
func (*JoinSessionResponse) ProtoMessage() {}
|
||||
|
||||
func (x *JoinSessionResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[5]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[6]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -395,7 +480,7 @@ func (x *JoinSessionResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use JoinSessionResponse.ProtoReflect.Descriptor instead.
|
||||
func (*JoinSessionResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{5}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *JoinSessionResponse) GetSuccess() bool {
|
||||
|
|
@ -434,7 +519,7 @@ type SessionInfo struct {
|
|||
|
||||
func (x *SessionInfo) Reset() {
|
||||
*x = SessionInfo{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[6]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -446,7 +531,7 @@ func (x *SessionInfo) String() string {
|
|||
func (*SessionInfo) ProtoMessage() {}
|
||||
|
||||
func (x *SessionInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[6]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[7]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -459,7 +544,7 @@ func (x *SessionInfo) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use SessionInfo.ProtoReflect.Descriptor instead.
|
||||
func (*SessionInfo) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{6}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *SessionInfo) GetSessionId() string {
|
||||
|
|
@ -516,7 +601,7 @@ type PartyInfo struct {
|
|||
|
||||
func (x *PartyInfo) Reset() {
|
||||
*x = PartyInfo{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[7]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -528,7 +613,7 @@ func (x *PartyInfo) String() string {
|
|||
func (*PartyInfo) ProtoMessage() {}
|
||||
|
||||
func (x *PartyInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[7]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[8]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -541,7 +626,7 @@ func (x *PartyInfo) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use PartyInfo.ProtoReflect.Descriptor instead.
|
||||
func (*PartyInfo) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{7}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *PartyInfo) GetPartyId() string {
|
||||
|
|
@ -575,7 +660,7 @@ type GetSessionStatusRequest struct {
|
|||
|
||||
func (x *GetSessionStatusRequest) Reset() {
|
||||
*x = GetSessionStatusRequest{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[8]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -587,7 +672,7 @@ func (x *GetSessionStatusRequest) String() string {
|
|||
func (*GetSessionStatusRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetSessionStatusRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[8]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[9]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -600,7 +685,7 @@ func (x *GetSessionStatusRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use GetSessionStatusRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetSessionStatusRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{8}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *GetSessionStatusRequest) GetSessionId() string {
|
||||
|
|
@ -624,7 +709,7 @@ type GetSessionStatusResponse struct {
|
|||
|
||||
func (x *GetSessionStatusResponse) Reset() {
|
||||
*x = GetSessionStatusResponse{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[9]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -636,7 +721,7 @@ func (x *GetSessionStatusResponse) String() string {
|
|||
func (*GetSessionStatusResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetSessionStatusResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[9]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[10]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -649,7 +734,7 @@ func (x *GetSessionStatusResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use GetSessionStatusResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetSessionStatusResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{9}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
func (x *GetSessionStatusResponse) GetStatus() string {
|
||||
|
|
@ -700,7 +785,7 @@ type ReportCompletionRequest struct {
|
|||
|
||||
func (x *ReportCompletionRequest) Reset() {
|
||||
*x = ReportCompletionRequest{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[10]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[11]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -712,7 +797,7 @@ func (x *ReportCompletionRequest) String() string {
|
|||
func (*ReportCompletionRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ReportCompletionRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[10]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[11]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -725,7 +810,7 @@ func (x *ReportCompletionRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use ReportCompletionRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ReportCompletionRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{10}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{11}
|
||||
}
|
||||
|
||||
func (x *ReportCompletionRequest) GetSessionId() string {
|
||||
|
|
@ -767,7 +852,7 @@ type ReportCompletionResponse struct {
|
|||
|
||||
func (x *ReportCompletionResponse) Reset() {
|
||||
*x = ReportCompletionResponse{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[11]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[12]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -779,7 +864,7 @@ func (x *ReportCompletionResponse) String() string {
|
|||
func (*ReportCompletionResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ReportCompletionResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[11]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[12]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -792,7 +877,7 @@ func (x *ReportCompletionResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use ReportCompletionResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ReportCompletionResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{11}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{12}
|
||||
}
|
||||
|
||||
func (x *ReportCompletionResponse) GetSuccess() bool {
|
||||
|
|
@ -819,7 +904,7 @@ type CloseSessionRequest struct {
|
|||
|
||||
func (x *CloseSessionRequest) Reset() {
|
||||
*x = CloseSessionRequest{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[12]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[13]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -831,7 +916,7 @@ func (x *CloseSessionRequest) String() string {
|
|||
func (*CloseSessionRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CloseSessionRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[12]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[13]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -844,7 +929,7 @@ func (x *CloseSessionRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use CloseSessionRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CloseSessionRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{12}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{13}
|
||||
}
|
||||
|
||||
func (x *CloseSessionRequest) GetSessionId() string {
|
||||
|
|
@ -864,7 +949,7 @@ type CloseSessionResponse struct {
|
|||
|
||||
func (x *CloseSessionResponse) Reset() {
|
||||
*x = CloseSessionResponse{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[13]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[14]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -876,7 +961,7 @@ func (x *CloseSessionResponse) String() string {
|
|||
func (*CloseSessionResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CloseSessionResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[13]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[14]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -889,7 +974,7 @@ func (x *CloseSessionResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use CloseSessionResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CloseSessionResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{13}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{14}
|
||||
}
|
||||
|
||||
func (x *CloseSessionResponse) GetSuccess() bool {
|
||||
|
|
@ -910,7 +995,7 @@ type MarkPartyReadyRequest struct {
|
|||
|
||||
func (x *MarkPartyReadyRequest) Reset() {
|
||||
*x = MarkPartyReadyRequest{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[14]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[15]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -922,7 +1007,7 @@ func (x *MarkPartyReadyRequest) String() string {
|
|||
func (*MarkPartyReadyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *MarkPartyReadyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[14]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[15]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -935,7 +1020,7 @@ func (x *MarkPartyReadyRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use MarkPartyReadyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*MarkPartyReadyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{14}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{15}
|
||||
}
|
||||
|
||||
func (x *MarkPartyReadyRequest) GetSessionId() string {
|
||||
|
|
@ -965,7 +1050,7 @@ type MarkPartyReadyResponse struct {
|
|||
|
||||
func (x *MarkPartyReadyResponse) Reset() {
|
||||
*x = MarkPartyReadyResponse{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[15]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[16]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -977,7 +1062,7 @@ func (x *MarkPartyReadyResponse) String() string {
|
|||
func (*MarkPartyReadyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *MarkPartyReadyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[15]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[16]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -990,7 +1075,7 @@ func (x *MarkPartyReadyResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use MarkPartyReadyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*MarkPartyReadyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{15}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{16}
|
||||
}
|
||||
|
||||
func (x *MarkPartyReadyResponse) GetSuccess() bool {
|
||||
|
|
@ -1031,7 +1116,7 @@ type StartSessionRequest struct {
|
|||
|
||||
func (x *StartSessionRequest) Reset() {
|
||||
*x = StartSessionRequest{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[16]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[17]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -1043,7 +1128,7 @@ func (x *StartSessionRequest) String() string {
|
|||
func (*StartSessionRequest) ProtoMessage() {}
|
||||
|
||||
func (x *StartSessionRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[16]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[17]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -1056,7 +1141,7 @@ func (x *StartSessionRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StartSessionRequest.ProtoReflect.Descriptor instead.
|
||||
func (*StartSessionRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{16}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{17}
|
||||
}
|
||||
|
||||
func (x *StartSessionRequest) GetSessionId() string {
|
||||
|
|
@ -1077,7 +1162,7 @@ type StartSessionResponse struct {
|
|||
|
||||
func (x *StartSessionResponse) Reset() {
|
||||
*x = StartSessionResponse{}
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[17]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[18]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -1089,7 +1174,7 @@ func (x *StartSessionResponse) String() string {
|
|||
func (*StartSessionResponse) ProtoMessage() {}
|
||||
|
||||
func (x *StartSessionResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[17]
|
||||
mi := &file_api_proto_session_coordinator_proto_msgTypes[18]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -1102,7 +1187,7 @@ func (x *StartSessionResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use StartSessionResponse.ProtoReflect.Descriptor instead.
|
||||
func (*StartSessionResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{17}
|
||||
return file_api_proto_session_coordinator_proto_rawDescGZIP(), []int{18}
|
||||
}
|
||||
|
||||
func (x *StartSessionResponse) GetSuccess() bool {
|
||||
|
|
@ -1123,7 +1208,7 @@ var File_api_proto_session_coordinator_proto protoreflect.FileDescriptor
|
|||
|
||||
const file_api_proto_session_coordinator_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"#api/proto/session_coordinator.proto\x12\x12mpc.coordinator.v1\"\x95\x02\n" +
|
||||
"#api/proto/session_coordinator.proto\x12\x12mpc.coordinator.v1\"\xe8\x02\n" +
|
||||
"\x14CreateSessionRequest\x12!\n" +
|
||||
"\fsession_type\x18\x01 \x01(\tR\vsessionType\x12\x1f\n" +
|
||||
"\vthreshold_n\x18\x02 \x01(\x05R\n" +
|
||||
|
|
@ -1132,7 +1217,12 @@ const file_api_proto_session_coordinator_proto_rawDesc = "" +
|
|||
"thresholdT\x12G\n" +
|
||||
"\fparticipants\x18\x04 \x03(\v2#.mpc.coordinator.v1.ParticipantInfoR\fparticipants\x12!\n" +
|
||||
"\fmessage_hash\x18\x05 \x01(\fR\vmessageHash\x12,\n" +
|
||||
"\x12expires_in_seconds\x18\x06 \x01(\x03R\x10expiresInSeconds\"m\n" +
|
||||
"\x12expires_in_seconds\x18\x06 \x01(\x03R\x10expiresInSeconds\x12Q\n" +
|
||||
"\x11party_composition\x18\a \x01(\v2$.mpc.coordinator.v1.PartyCompositionR\x10partyComposition\"\x8d\x01\n" +
|
||||
"\x10PartyComposition\x12)\n" +
|
||||
"\x10persistent_count\x18\x01 \x01(\x05R\x0fpersistentCount\x12%\n" +
|
||||
"\x0edelegate_count\x18\x02 \x01(\x05R\rdelegateCount\x12'\n" +
|
||||
"\x0ftemporary_count\x18\x03 \x01(\x05R\x0etemporaryCount\"m\n" +
|
||||
"\x0fParticipantInfo\x12\x19\n" +
|
||||
"\bparty_id\x18\x01 \x01(\tR\apartyId\x12?\n" +
|
||||
"\vdevice_info\x18\x02 \x01(\v2\x1e.mpc.coordinator.v1.DeviceInfoR\n" +
|
||||
|
|
@ -1144,14 +1234,16 @@ const file_api_proto_session_coordinator_proto_rawDesc = "" +
|
|||
"\tdevice_id\x18\x02 \x01(\tR\bdeviceId\x12\x1a\n" +
|
||||
"\bplatform\x18\x03 \x01(\tR\bplatform\x12\x1f\n" +
|
||||
"\vapp_version\x18\x04 \x01(\tR\n" +
|
||||
"appVersion\"\xf0\x01\n" +
|
||||
"appVersion\"\xc7\x02\n" +
|
||||
"\x15CreateSessionResponse\x12\x1d\n" +
|
||||
"\n" +
|
||||
"session_id\x18\x01 \x01(\tR\tsessionId\x12Z\n" +
|
||||
"\vjoin_tokens\x18\x02 \x03(\v29.mpc.coordinator.v1.CreateSessionResponse.JoinTokensEntryR\n" +
|
||||
"joinTokens\x12\x1d\n" +
|
||||
"\n" +
|
||||
"expires_at\x18\x03 \x01(\x03R\texpiresAt\x1a=\n" +
|
||||
"expires_at\x18\x03 \x01(\x03R\texpiresAt\x12)\n" +
|
||||
"\x10selected_parties\x18\x04 \x03(\tR\x0fselectedParties\x12*\n" +
|
||||
"\x11delegate_party_id\x18\x05 \x01(\tR\x0fdelegatePartyId\x1a=\n" +
|
||||
"\x0fJoinTokensEntry\x12\x10\n" +
|
||||
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
||||
"\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xae\x01\n" +
|
||||
|
|
@ -1245,55 +1337,57 @@ func file_api_proto_session_coordinator_proto_rawDescGZIP() []byte {
|
|||
return file_api_proto_session_coordinator_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_api_proto_session_coordinator_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
|
||||
var file_api_proto_session_coordinator_proto_msgTypes = make([]protoimpl.MessageInfo, 20)
|
||||
var file_api_proto_session_coordinator_proto_goTypes = []any{
|
||||
(*CreateSessionRequest)(nil), // 0: mpc.coordinator.v1.CreateSessionRequest
|
||||
(*ParticipantInfo)(nil), // 1: mpc.coordinator.v1.ParticipantInfo
|
||||
(*DeviceInfo)(nil), // 2: mpc.coordinator.v1.DeviceInfo
|
||||
(*CreateSessionResponse)(nil), // 3: mpc.coordinator.v1.CreateSessionResponse
|
||||
(*JoinSessionRequest)(nil), // 4: mpc.coordinator.v1.JoinSessionRequest
|
||||
(*JoinSessionResponse)(nil), // 5: mpc.coordinator.v1.JoinSessionResponse
|
||||
(*SessionInfo)(nil), // 6: mpc.coordinator.v1.SessionInfo
|
||||
(*PartyInfo)(nil), // 7: mpc.coordinator.v1.PartyInfo
|
||||
(*GetSessionStatusRequest)(nil), // 8: mpc.coordinator.v1.GetSessionStatusRequest
|
||||
(*GetSessionStatusResponse)(nil), // 9: mpc.coordinator.v1.GetSessionStatusResponse
|
||||
(*ReportCompletionRequest)(nil), // 10: mpc.coordinator.v1.ReportCompletionRequest
|
||||
(*ReportCompletionResponse)(nil), // 11: mpc.coordinator.v1.ReportCompletionResponse
|
||||
(*CloseSessionRequest)(nil), // 12: mpc.coordinator.v1.CloseSessionRequest
|
||||
(*CloseSessionResponse)(nil), // 13: mpc.coordinator.v1.CloseSessionResponse
|
||||
(*MarkPartyReadyRequest)(nil), // 14: mpc.coordinator.v1.MarkPartyReadyRequest
|
||||
(*MarkPartyReadyResponse)(nil), // 15: mpc.coordinator.v1.MarkPartyReadyResponse
|
||||
(*StartSessionRequest)(nil), // 16: mpc.coordinator.v1.StartSessionRequest
|
||||
(*StartSessionResponse)(nil), // 17: mpc.coordinator.v1.StartSessionResponse
|
||||
nil, // 18: mpc.coordinator.v1.CreateSessionResponse.JoinTokensEntry
|
||||
(*PartyComposition)(nil), // 1: mpc.coordinator.v1.PartyComposition
|
||||
(*ParticipantInfo)(nil), // 2: mpc.coordinator.v1.ParticipantInfo
|
||||
(*DeviceInfo)(nil), // 3: mpc.coordinator.v1.DeviceInfo
|
||||
(*CreateSessionResponse)(nil), // 4: mpc.coordinator.v1.CreateSessionResponse
|
||||
(*JoinSessionRequest)(nil), // 5: mpc.coordinator.v1.JoinSessionRequest
|
||||
(*JoinSessionResponse)(nil), // 6: mpc.coordinator.v1.JoinSessionResponse
|
||||
(*SessionInfo)(nil), // 7: mpc.coordinator.v1.SessionInfo
|
||||
(*PartyInfo)(nil), // 8: mpc.coordinator.v1.PartyInfo
|
||||
(*GetSessionStatusRequest)(nil), // 9: mpc.coordinator.v1.GetSessionStatusRequest
|
||||
(*GetSessionStatusResponse)(nil), // 10: mpc.coordinator.v1.GetSessionStatusResponse
|
||||
(*ReportCompletionRequest)(nil), // 11: mpc.coordinator.v1.ReportCompletionRequest
|
||||
(*ReportCompletionResponse)(nil), // 12: mpc.coordinator.v1.ReportCompletionResponse
|
||||
(*CloseSessionRequest)(nil), // 13: mpc.coordinator.v1.CloseSessionRequest
|
||||
(*CloseSessionResponse)(nil), // 14: mpc.coordinator.v1.CloseSessionResponse
|
||||
(*MarkPartyReadyRequest)(nil), // 15: mpc.coordinator.v1.MarkPartyReadyRequest
|
||||
(*MarkPartyReadyResponse)(nil), // 16: mpc.coordinator.v1.MarkPartyReadyResponse
|
||||
(*StartSessionRequest)(nil), // 17: mpc.coordinator.v1.StartSessionRequest
|
||||
(*StartSessionResponse)(nil), // 18: mpc.coordinator.v1.StartSessionResponse
|
||||
nil, // 19: mpc.coordinator.v1.CreateSessionResponse.JoinTokensEntry
|
||||
}
|
||||
var file_api_proto_session_coordinator_proto_depIdxs = []int32{
|
||||
1, // 0: mpc.coordinator.v1.CreateSessionRequest.participants:type_name -> mpc.coordinator.v1.ParticipantInfo
|
||||
2, // 1: mpc.coordinator.v1.ParticipantInfo.device_info:type_name -> mpc.coordinator.v1.DeviceInfo
|
||||
18, // 2: mpc.coordinator.v1.CreateSessionResponse.join_tokens:type_name -> mpc.coordinator.v1.CreateSessionResponse.JoinTokensEntry
|
||||
2, // 3: mpc.coordinator.v1.JoinSessionRequest.device_info:type_name -> mpc.coordinator.v1.DeviceInfo
|
||||
6, // 4: mpc.coordinator.v1.JoinSessionResponse.session_info:type_name -> mpc.coordinator.v1.SessionInfo
|
||||
7, // 5: mpc.coordinator.v1.JoinSessionResponse.other_parties:type_name -> mpc.coordinator.v1.PartyInfo
|
||||
2, // 6: mpc.coordinator.v1.PartyInfo.device_info:type_name -> mpc.coordinator.v1.DeviceInfo
|
||||
0, // 7: mpc.coordinator.v1.SessionCoordinator.CreateSession:input_type -> mpc.coordinator.v1.CreateSessionRequest
|
||||
4, // 8: mpc.coordinator.v1.SessionCoordinator.JoinSession:input_type -> mpc.coordinator.v1.JoinSessionRequest
|
||||
8, // 9: mpc.coordinator.v1.SessionCoordinator.GetSessionStatus:input_type -> mpc.coordinator.v1.GetSessionStatusRequest
|
||||
14, // 10: mpc.coordinator.v1.SessionCoordinator.MarkPartyReady:input_type -> mpc.coordinator.v1.MarkPartyReadyRequest
|
||||
16, // 11: mpc.coordinator.v1.SessionCoordinator.StartSession:input_type -> mpc.coordinator.v1.StartSessionRequest
|
||||
10, // 12: mpc.coordinator.v1.SessionCoordinator.ReportCompletion:input_type -> mpc.coordinator.v1.ReportCompletionRequest
|
||||
12, // 13: mpc.coordinator.v1.SessionCoordinator.CloseSession:input_type -> mpc.coordinator.v1.CloseSessionRequest
|
||||
3, // 14: mpc.coordinator.v1.SessionCoordinator.CreateSession:output_type -> mpc.coordinator.v1.CreateSessionResponse
|
||||
5, // 15: mpc.coordinator.v1.SessionCoordinator.JoinSession:output_type -> mpc.coordinator.v1.JoinSessionResponse
|
||||
9, // 16: mpc.coordinator.v1.SessionCoordinator.GetSessionStatus:output_type -> mpc.coordinator.v1.GetSessionStatusResponse
|
||||
15, // 17: mpc.coordinator.v1.SessionCoordinator.MarkPartyReady:output_type -> mpc.coordinator.v1.MarkPartyReadyResponse
|
||||
17, // 18: mpc.coordinator.v1.SessionCoordinator.StartSession:output_type -> mpc.coordinator.v1.StartSessionResponse
|
||||
11, // 19: mpc.coordinator.v1.SessionCoordinator.ReportCompletion:output_type -> mpc.coordinator.v1.ReportCompletionResponse
|
||||
13, // 20: mpc.coordinator.v1.SessionCoordinator.CloseSession:output_type -> mpc.coordinator.v1.CloseSessionResponse
|
||||
14, // [14:21] is the sub-list for method output_type
|
||||
7, // [7:14] is the sub-list for method input_type
|
||||
7, // [7:7] is the sub-list for extension type_name
|
||||
7, // [7:7] is the sub-list for extension extendee
|
||||
0, // [0:7] is the sub-list for field type_name
|
||||
2, // 0: mpc.coordinator.v1.CreateSessionRequest.participants:type_name -> mpc.coordinator.v1.ParticipantInfo
|
||||
1, // 1: mpc.coordinator.v1.CreateSessionRequest.party_composition:type_name -> mpc.coordinator.v1.PartyComposition
|
||||
3, // 2: mpc.coordinator.v1.ParticipantInfo.device_info:type_name -> mpc.coordinator.v1.DeviceInfo
|
||||
19, // 3: mpc.coordinator.v1.CreateSessionResponse.join_tokens:type_name -> mpc.coordinator.v1.CreateSessionResponse.JoinTokensEntry
|
||||
3, // 4: mpc.coordinator.v1.JoinSessionRequest.device_info:type_name -> mpc.coordinator.v1.DeviceInfo
|
||||
7, // 5: mpc.coordinator.v1.JoinSessionResponse.session_info:type_name -> mpc.coordinator.v1.SessionInfo
|
||||
8, // 6: mpc.coordinator.v1.JoinSessionResponse.other_parties:type_name -> mpc.coordinator.v1.PartyInfo
|
||||
3, // 7: mpc.coordinator.v1.PartyInfo.device_info:type_name -> mpc.coordinator.v1.DeviceInfo
|
||||
0, // 8: mpc.coordinator.v1.SessionCoordinator.CreateSession:input_type -> mpc.coordinator.v1.CreateSessionRequest
|
||||
5, // 9: mpc.coordinator.v1.SessionCoordinator.JoinSession:input_type -> mpc.coordinator.v1.JoinSessionRequest
|
||||
9, // 10: mpc.coordinator.v1.SessionCoordinator.GetSessionStatus:input_type -> mpc.coordinator.v1.GetSessionStatusRequest
|
||||
15, // 11: mpc.coordinator.v1.SessionCoordinator.MarkPartyReady:input_type -> mpc.coordinator.v1.MarkPartyReadyRequest
|
||||
17, // 12: mpc.coordinator.v1.SessionCoordinator.StartSession:input_type -> mpc.coordinator.v1.StartSessionRequest
|
||||
11, // 13: mpc.coordinator.v1.SessionCoordinator.ReportCompletion:input_type -> mpc.coordinator.v1.ReportCompletionRequest
|
||||
13, // 14: mpc.coordinator.v1.SessionCoordinator.CloseSession:input_type -> mpc.coordinator.v1.CloseSessionRequest
|
||||
4, // 15: mpc.coordinator.v1.SessionCoordinator.CreateSession:output_type -> mpc.coordinator.v1.CreateSessionResponse
|
||||
6, // 16: mpc.coordinator.v1.SessionCoordinator.JoinSession:output_type -> mpc.coordinator.v1.JoinSessionResponse
|
||||
10, // 17: mpc.coordinator.v1.SessionCoordinator.GetSessionStatus:output_type -> mpc.coordinator.v1.GetSessionStatusResponse
|
||||
16, // 18: mpc.coordinator.v1.SessionCoordinator.MarkPartyReady:output_type -> mpc.coordinator.v1.MarkPartyReadyResponse
|
||||
18, // 19: mpc.coordinator.v1.SessionCoordinator.StartSession:output_type -> mpc.coordinator.v1.StartSessionResponse
|
||||
12, // 20: mpc.coordinator.v1.SessionCoordinator.ReportCompletion:output_type -> mpc.coordinator.v1.ReportCompletionResponse
|
||||
14, // 21: mpc.coordinator.v1.SessionCoordinator.CloseSession:output_type -> mpc.coordinator.v1.CloseSessionResponse
|
||||
15, // [15:22] is the sub-list for method output_type
|
||||
8, // [8:15] is the sub-list for method input_type
|
||||
8, // [8:8] is the sub-list for extension type_name
|
||||
8, // [8:8] is the sub-list for extension extendee
|
||||
0, // [0:8] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_api_proto_session_coordinator_proto_init() }
|
||||
|
|
@ -1307,7 +1401,7 @@ func file_api_proto_session_coordinator_proto_init() {
|
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_proto_session_coordinator_proto_rawDesc), len(file_api_proto_session_coordinator_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 19,
|
||||
NumMessages: 20,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc v3.12.4
|
||||
// - protoc-gen-go-grpc v1.6.0
|
||||
// - protoc v6.33.1
|
||||
// source: api/proto/session_coordinator.proto
|
||||
|
||||
package coordinator
|
||||
|
|
@ -15,8 +15,8 @@ import (
|
|||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
// Requires gRPC-Go v1.64.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
|
||||
const (
|
||||
SessionCoordinator_CreateSession_FullMethodName = "/mpc.coordinator.v1.SessionCoordinator/CreateSession"
|
||||
|
|
@ -31,6 +31,8 @@ const (
|
|||
// SessionCoordinatorClient is the client API for SessionCoordinator service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
//
|
||||
// SessionCoordinator service manages MPC sessions
|
||||
type SessionCoordinatorClient interface {
|
||||
// Session management
|
||||
CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error)
|
||||
|
|
@ -51,8 +53,9 @@ func NewSessionCoordinatorClient(cc grpc.ClientConnInterface) SessionCoordinator
|
|||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreateSessionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_CreateSession_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_CreateSession_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -60,8 +63,9 @@ func (c *sessionCoordinatorClient) CreateSession(ctx context.Context, in *Create
|
|||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) JoinSession(ctx context.Context, in *JoinSessionRequest, opts ...grpc.CallOption) (*JoinSessionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(JoinSessionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_JoinSession_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_JoinSession_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -69,8 +73,9 @@ func (c *sessionCoordinatorClient) JoinSession(ctx context.Context, in *JoinSess
|
|||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) GetSessionStatus(ctx context.Context, in *GetSessionStatusRequest, opts ...grpc.CallOption) (*GetSessionStatusResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetSessionStatusResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_GetSessionStatus_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_GetSessionStatus_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -78,8 +83,9 @@ func (c *sessionCoordinatorClient) GetSessionStatus(ctx context.Context, in *Get
|
|||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) MarkPartyReady(ctx context.Context, in *MarkPartyReadyRequest, opts ...grpc.CallOption) (*MarkPartyReadyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(MarkPartyReadyResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_MarkPartyReady_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_MarkPartyReady_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -87,8 +93,9 @@ func (c *sessionCoordinatorClient) MarkPartyReady(ctx context.Context, in *MarkP
|
|||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) StartSession(ctx context.Context, in *StartSessionRequest, opts ...grpc.CallOption) (*StartSessionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(StartSessionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_StartSession_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_StartSession_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -96,8 +103,9 @@ func (c *sessionCoordinatorClient) StartSession(ctx context.Context, in *StartSe
|
|||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) ReportCompletion(ctx context.Context, in *ReportCompletionRequest, opts ...grpc.CallOption) (*ReportCompletionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ReportCompletionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_ReportCompletion_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_ReportCompletion_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -105,8 +113,9 @@ func (c *sessionCoordinatorClient) ReportCompletion(ctx context.Context, in *Rep
|
|||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) CloseSession(ctx context.Context, in *CloseSessionRequest, opts ...grpc.CallOption) (*CloseSessionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CloseSessionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_CloseSession_FullMethodName, in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_CloseSession_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -115,7 +124,9 @@ func (c *sessionCoordinatorClient) CloseSession(ctx context.Context, in *CloseSe
|
|||
|
||||
// SessionCoordinatorServer is the server API for SessionCoordinator service.
|
||||
// All implementations must embed UnimplementedSessionCoordinatorServer
|
||||
// for forward compatibility
|
||||
// for forward compatibility.
|
||||
//
|
||||
// SessionCoordinator service manages MPC sessions
|
||||
type SessionCoordinatorServer interface {
|
||||
// Session management
|
||||
CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error)
|
||||
|
|
@ -128,32 +139,36 @@ type SessionCoordinatorServer interface {
|
|||
mustEmbedUnimplementedSessionCoordinatorServer()
|
||||
}
|
||||
|
||||
// UnimplementedSessionCoordinatorServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedSessionCoordinatorServer struct {
|
||||
}
|
||||
// UnimplementedSessionCoordinatorServer must be embedded to have
|
||||
// forward compatible implementations.
|
||||
//
|
||||
// NOTE: this should be embedded by value instead of pointer to avoid a nil
|
||||
// pointer dereference when methods are called.
|
||||
type UnimplementedSessionCoordinatorServer struct{}
|
||||
|
||||
func (UnimplementedSessionCoordinatorServer) CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateSession not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method CreateSession not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) JoinSession(context.Context, *JoinSessionRequest) (*JoinSessionResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method JoinSession not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method JoinSession not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) GetSessionStatus(context.Context, *GetSessionStatusRequest) (*GetSessionStatusResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetSessionStatus not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method GetSessionStatus not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) MarkPartyReady(context.Context, *MarkPartyReadyRequest) (*MarkPartyReadyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method MarkPartyReady not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method MarkPartyReady not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) StartSession(context.Context, *StartSessionRequest) (*StartSessionResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method StartSession not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method StartSession not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) ReportCompletion(context.Context, *ReportCompletionRequest) (*ReportCompletionResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ReportCompletion not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method ReportCompletion not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) CloseSession(context.Context, *CloseSessionRequest) (*CloseSessionResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CloseSession not implemented")
|
||||
return nil, status.Error(codes.Unimplemented, "method CloseSession not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) mustEmbedUnimplementedSessionCoordinatorServer() {}
|
||||
func (UnimplementedSessionCoordinatorServer) testEmbeddedByValue() {}
|
||||
|
||||
// UnsafeSessionCoordinatorServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to SessionCoordinatorServer will
|
||||
|
|
@ -163,6 +178,13 @@ type UnsafeSessionCoordinatorServer interface {
|
|||
}
|
||||
|
||||
func RegisterSessionCoordinatorServer(s grpc.ServiceRegistrar, srv SessionCoordinatorServer) {
|
||||
// If the following call panics, it indicates UnimplementedSessionCoordinatorServer was
|
||||
// embedded by pointer and is nil. This will cause panics if an
|
||||
// unimplemented method is ever invoked, so we test this at initialization
|
||||
// time to prevent it from happening at runtime later due to I/O.
|
||||
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
|
||||
t.testEmbeddedByValue()
|
||||
}
|
||||
s.RegisterService(&SessionCoordinator_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -419,19 +419,83 @@ func (x *GetPendingMessagesResponse) GetMessages() []*MPCMessage {
|
|||
return nil
|
||||
}
|
||||
|
||||
// NotificationChannel represents a notification channel for offline parties
|
||||
// If a party has notification channels, it operates in offline mode (24h async)
|
||||
// If no notification channels, it operates in real-time mode (Message Router push)
|
||||
type NotificationChannel struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` // Optional: email address for notifications
|
||||
Phone string `protobuf:"bytes,2,opt,name=phone,proto3" json:"phone,omitempty"` // Optional: phone number for SMS notifications
|
||||
PushToken string `protobuf:"bytes,3,opt,name=push_token,json=pushToken,proto3" json:"push_token,omitempty"` // Optional: push notification token (FCM/APNs)
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) Reset() {
|
||||
*x = NotificationChannel{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*NotificationChannel) ProtoMessage() {}
|
||||
|
||||
func (x *NotificationChannel) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[6]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use NotificationChannel.ProtoReflect.Descriptor instead.
|
||||
func (*NotificationChannel) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetEmail() string {
|
||||
if x != nil {
|
||||
return x.Email
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetPhone() string {
|
||||
if x != nil {
|
||||
return x.Phone
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NotificationChannel) GetPushToken() string {
|
||||
if x != nil {
|
||||
return x.PushToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RegisterPartyRequest registers a party with the router
|
||||
type RegisterPartyRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PartyId string `protobuf:"bytes,1,opt,name=party_id,json=partyId,proto3" json:"party_id,omitempty"` // Unique party identifier
|
||||
PartyRole string `protobuf:"bytes,2,opt,name=party_role,json=partyRole,proto3" json:"party_role,omitempty"` // persistent, delegate, or temporary
|
||||
Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` // Party software version
|
||||
Notification *NotificationChannel `protobuf:"bytes,4,opt,name=notification,proto3" json:"notification,omitempty"` // Optional: notification channel for offline mode
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *RegisterPartyRequest) Reset() {
|
||||
*x = RegisterPartyRequest{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[6]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -443,7 +507,7 @@ func (x *RegisterPartyRequest) String() string {
|
|||
func (*RegisterPartyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *RegisterPartyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[6]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[7]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -456,7 +520,7 @@ func (x *RegisterPartyRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use RegisterPartyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*RegisterPartyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{6}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *RegisterPartyRequest) GetPartyId() string {
|
||||
|
|
@ -480,6 +544,13 @@ func (x *RegisterPartyRequest) GetVersion() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (x *RegisterPartyRequest) GetNotification() *NotificationChannel {
|
||||
if x != nil {
|
||||
return x.Notification
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterPartyResponse confirms party registration
|
||||
type RegisterPartyResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
|
|
@ -492,7 +563,7 @@ type RegisterPartyResponse struct {
|
|||
|
||||
func (x *RegisterPartyResponse) Reset() {
|
||||
*x = RegisterPartyResponse{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[7]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -504,7 +575,7 @@ func (x *RegisterPartyResponse) String() string {
|
|||
func (*RegisterPartyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *RegisterPartyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[7]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[8]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -517,7 +588,7 @@ func (x *RegisterPartyResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use RegisterPartyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*RegisterPartyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{7}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *RegisterPartyResponse) GetSuccess() bool {
|
||||
|
|
@ -552,7 +623,7 @@ type SubscribeSessionEventsRequest struct {
|
|||
|
||||
func (x *SubscribeSessionEventsRequest) Reset() {
|
||||
*x = SubscribeSessionEventsRequest{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[8]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -564,7 +635,7 @@ func (x *SubscribeSessionEventsRequest) String() string {
|
|||
func (*SubscribeSessionEventsRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SubscribeSessionEventsRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[8]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[9]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -577,7 +648,7 @@ func (x *SubscribeSessionEventsRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use SubscribeSessionEventsRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SubscribeSessionEventsRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{8}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
func (x *SubscribeSessionEventsRequest) GetPartyId() string {
|
||||
|
|
@ -613,7 +684,7 @@ type SessionEvent struct {
|
|||
|
||||
func (x *SessionEvent) Reset() {
|
||||
*x = SessionEvent{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[9]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -625,7 +696,7 @@ func (x *SessionEvent) String() string {
|
|||
func (*SessionEvent) ProtoMessage() {}
|
||||
|
||||
func (x *SessionEvent) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[9]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[10]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -638,7 +709,7 @@ func (x *SessionEvent) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use SessionEvent.ProtoReflect.Descriptor instead.
|
||||
func (*SessionEvent) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{9}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
func (x *SessionEvent) GetEventId() string {
|
||||
|
|
@ -721,7 +792,7 @@ type PublishSessionEventRequest struct {
|
|||
|
||||
func (x *PublishSessionEventRequest) Reset() {
|
||||
*x = PublishSessionEventRequest{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[10]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[11]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -733,7 +804,7 @@ func (x *PublishSessionEventRequest) String() string {
|
|||
func (*PublishSessionEventRequest) ProtoMessage() {}
|
||||
|
||||
func (x *PublishSessionEventRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[10]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[11]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -746,7 +817,7 @@ func (x *PublishSessionEventRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use PublishSessionEventRequest.ProtoReflect.Descriptor instead.
|
||||
func (*PublishSessionEventRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{10}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{11}
|
||||
}
|
||||
|
||||
func (x *PublishSessionEventRequest) GetEvent() *SessionEvent {
|
||||
|
|
@ -767,7 +838,7 @@ type PublishSessionEventResponse struct {
|
|||
|
||||
func (x *PublishSessionEventResponse) Reset() {
|
||||
*x = PublishSessionEventResponse{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[11]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[12]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -779,7 +850,7 @@ func (x *PublishSessionEventResponse) String() string {
|
|||
func (*PublishSessionEventResponse) ProtoMessage() {}
|
||||
|
||||
func (x *PublishSessionEventResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[11]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[12]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -792,7 +863,7 @@ func (x *PublishSessionEventResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use PublishSessionEventResponse.ProtoReflect.Descriptor instead.
|
||||
func (*PublishSessionEventResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{11}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{12}
|
||||
}
|
||||
|
||||
func (x *PublishSessionEventResponse) GetSuccess() bool {
|
||||
|
|
@ -820,7 +891,7 @@ type GetRegisteredPartiesRequest struct {
|
|||
|
||||
func (x *GetRegisteredPartiesRequest) Reset() {
|
||||
*x = GetRegisteredPartiesRequest{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[12]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[13]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -832,7 +903,7 @@ func (x *GetRegisteredPartiesRequest) String() string {
|
|||
func (*GetRegisteredPartiesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetRegisteredPartiesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[12]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[13]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -845,7 +916,7 @@ func (x *GetRegisteredPartiesRequest) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use GetRegisteredPartiesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetRegisteredPartiesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{12}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{13}
|
||||
}
|
||||
|
||||
func (x *GetRegisteredPartiesRequest) GetRoleFilter() string {
|
||||
|
|
@ -870,13 +941,14 @@ type RegisteredParty struct {
|
|||
Online bool `protobuf:"varint,3,opt,name=online,proto3" json:"online,omitempty"` // Whether party is currently connected
|
||||
RegisteredAt int64 `protobuf:"varint,4,opt,name=registered_at,json=registeredAt,proto3" json:"registered_at,omitempty"` // Unix timestamp milliseconds
|
||||
LastSeenAt int64 `protobuf:"varint,5,opt,name=last_seen_at,json=lastSeenAt,proto3" json:"last_seen_at,omitempty"` // Unix timestamp milliseconds
|
||||
Notification *NotificationChannel `protobuf:"bytes,6,opt,name=notification,proto3" json:"notification,omitempty"` // Optional: notification channel (if set, party is offline mode)
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *RegisteredParty) Reset() {
|
||||
*x = RegisteredParty{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[13]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[14]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -888,7 +960,7 @@ func (x *RegisteredParty) String() string {
|
|||
func (*RegisteredParty) ProtoMessage() {}
|
||||
|
||||
func (x *RegisteredParty) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[13]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[14]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -901,7 +973,7 @@ func (x *RegisteredParty) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use RegisteredParty.ProtoReflect.Descriptor instead.
|
||||
func (*RegisteredParty) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{13}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{14}
|
||||
}
|
||||
|
||||
func (x *RegisteredParty) GetPartyId() string {
|
||||
|
|
@ -939,6 +1011,13 @@ func (x *RegisteredParty) GetLastSeenAt() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (x *RegisteredParty) GetNotification() *NotificationChannel {
|
||||
if x != nil {
|
||||
return x.Notification
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRegisteredPartiesResponse returns registered parties
|
||||
type GetRegisteredPartiesResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
|
|
@ -950,7 +1029,7 @@ type GetRegisteredPartiesResponse struct {
|
|||
|
||||
func (x *GetRegisteredPartiesResponse) Reset() {
|
||||
*x = GetRegisteredPartiesResponse{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[14]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[15]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
|
@ -962,7 +1041,7 @@ func (x *GetRegisteredPartiesResponse) String() string {
|
|||
func (*GetRegisteredPartiesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetRegisteredPartiesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[14]
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[15]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
|
@ -975,7 +1054,7 @@ func (x *GetRegisteredPartiesResponse) ProtoReflect() protoreflect.Message {
|
|||
|
||||
// Deprecated: Use GetRegisteredPartiesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetRegisteredPartiesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{14}
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{15}
|
||||
}
|
||||
|
||||
func (x *GetRegisteredPartiesResponse) GetParties() []*RegisteredParty {
|
||||
|
|
@ -992,6 +1071,449 @@ func (x *GetRegisteredPartiesResponse) GetTotalCount() int32 {
|
|||
return 0
|
||||
}
|
||||
|
||||
// AcknowledgeMessageRequest acknowledges message receipt
|
||||
type AcknowledgeMessageRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` // ID of the message being acknowledged
|
||||
PartyId string `protobuf:"bytes,2,opt,name=party_id,json=partyId,proto3" json:"party_id,omitempty"` // ID of the party acknowledging
|
||||
SessionId string `protobuf:"bytes,3,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` // Session the message belongs to
|
||||
Success bool `protobuf:"varint,4,opt,name=success,proto3" json:"success,omitempty"` // True if message was processed successfully
|
||||
ErrorMessage string `protobuf:"bytes,5,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` // Error message if processing failed
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageRequest) Reset() {
|
||||
*x = AcknowledgeMessageRequest{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[16]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AcknowledgeMessageRequest) ProtoMessage() {}
|
||||
|
||||
func (x *AcknowledgeMessageRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[16]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AcknowledgeMessageRequest.ProtoReflect.Descriptor instead.
|
||||
func (*AcknowledgeMessageRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{16}
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageRequest) GetMessageId() string {
|
||||
if x != nil {
|
||||
return x.MessageId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageRequest) GetPartyId() string {
|
||||
if x != nil {
|
||||
return x.PartyId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageRequest) GetSessionId() string {
|
||||
if x != nil {
|
||||
return x.SessionId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageRequest) GetSuccess() bool {
|
||||
if x != nil {
|
||||
return x.Success
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageRequest) GetErrorMessage() string {
|
||||
if x != nil {
|
||||
return x.ErrorMessage
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// AcknowledgeMessageResponse confirms acknowledgment
|
||||
type AcknowledgeMessageResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageResponse) Reset() {
|
||||
*x = AcknowledgeMessageResponse{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[17]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*AcknowledgeMessageResponse) ProtoMessage() {}
|
||||
|
||||
func (x *AcknowledgeMessageResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[17]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use AcknowledgeMessageResponse.ProtoReflect.Descriptor instead.
|
||||
func (*AcknowledgeMessageResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{17}
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageResponse) GetSuccess() bool {
|
||||
if x != nil {
|
||||
return x.Success
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *AcknowledgeMessageResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetMessageStatusRequest requests message delivery status
|
||||
type GetMessageStatusRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"`
|
||||
SessionId string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusRequest) Reset() {
|
||||
*x = GetMessageStatusRequest{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[18]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMessageStatusRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetMessageStatusRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[18]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMessageStatusRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetMessageStatusRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{18}
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusRequest) GetMessageId() string {
|
||||
if x != nil {
|
||||
return x.MessageId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusRequest) GetSessionId() string {
|
||||
if x != nil {
|
||||
return x.SessionId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// MessageDeliveryStatus represents delivery status to a single party
|
||||
type MessageDeliveryStatus struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PartyId string `protobuf:"bytes,1,opt,name=party_id,json=partyId,proto3" json:"party_id,omitempty"`
|
||||
Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` // pending, delivered, acknowledged, failed
|
||||
DeliveredAt int64 `protobuf:"varint,3,opt,name=delivered_at,json=deliveredAt,proto3" json:"delivered_at,omitempty"` // Unix timestamp milliseconds
|
||||
AcknowledgedAt int64 `protobuf:"varint,4,opt,name=acknowledged_at,json=acknowledgedAt,proto3" json:"acknowledged_at,omitempty"` // Unix timestamp milliseconds
|
||||
RetryCount int32 `protobuf:"varint,5,opt,name=retry_count,json=retryCount,proto3" json:"retry_count,omitempty"` // Number of delivery retries
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *MessageDeliveryStatus) Reset() {
|
||||
*x = MessageDeliveryStatus{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[19]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *MessageDeliveryStatus) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MessageDeliveryStatus) ProtoMessage() {}
|
||||
|
||||
func (x *MessageDeliveryStatus) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[19]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MessageDeliveryStatus.ProtoReflect.Descriptor instead.
|
||||
func (*MessageDeliveryStatus) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{19}
|
||||
}
|
||||
|
||||
func (x *MessageDeliveryStatus) GetPartyId() string {
|
||||
if x != nil {
|
||||
return x.PartyId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *MessageDeliveryStatus) GetStatus() string {
|
||||
if x != nil {
|
||||
return x.Status
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *MessageDeliveryStatus) GetDeliveredAt() int64 {
|
||||
if x != nil {
|
||||
return x.DeliveredAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *MessageDeliveryStatus) GetAcknowledgedAt() int64 {
|
||||
if x != nil {
|
||||
return x.AcknowledgedAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *MessageDeliveryStatus) GetRetryCount() int32 {
|
||||
if x != nil {
|
||||
return x.RetryCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetMessageStatusResponse returns message delivery status
|
||||
type GetMessageStatusResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"`
|
||||
SessionId string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
|
||||
Deliveries []*MessageDeliveryStatus `protobuf:"bytes,3,rep,name=deliveries,proto3" json:"deliveries,omitempty"`
|
||||
AllAcknowledged bool `protobuf:"varint,4,opt,name=all_acknowledged,json=allAcknowledged,proto3" json:"all_acknowledged,omitempty"` // True if all recipients acknowledged
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusResponse) Reset() {
|
||||
*x = GetMessageStatusResponse{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[20]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMessageStatusResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetMessageStatusResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[20]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMessageStatusResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetMessageStatusResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{20}
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusResponse) GetMessageId() string {
|
||||
if x != nil {
|
||||
return x.MessageId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusResponse) GetSessionId() string {
|
||||
if x != nil {
|
||||
return x.SessionId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusResponse) GetDeliveries() []*MessageDeliveryStatus {
|
||||
if x != nil {
|
||||
return x.Deliveries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *GetMessageStatusResponse) GetAllAcknowledged() bool {
|
||||
if x != nil {
|
||||
return x.AllAcknowledged
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HeartbeatRequest sends a heartbeat to keep the party alive
|
||||
type HeartbeatRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
PartyId string `protobuf:"bytes,1,opt,name=party_id,json=partyId,proto3" json:"party_id,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Unix timestamp milliseconds
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HeartbeatRequest) Reset() {
|
||||
*x = HeartbeatRequest{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[21]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *HeartbeatRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HeartbeatRequest) ProtoMessage() {}
|
||||
|
||||
func (x *HeartbeatRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[21]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HeartbeatRequest.ProtoReflect.Descriptor instead.
|
||||
func (*HeartbeatRequest) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{21}
|
||||
}
|
||||
|
||||
func (x *HeartbeatRequest) GetPartyId() string {
|
||||
if x != nil {
|
||||
return x.PartyId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HeartbeatRequest) GetTimestamp() int64 {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// HeartbeatResponse confirms heartbeat receipt
|
||||
type HeartbeatResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
|
||||
ServerTimestamp int64 `protobuf:"varint,2,opt,name=server_timestamp,json=serverTimestamp,proto3" json:"server_timestamp,omitempty"` // Server timestamp for clock sync
|
||||
PendingMessages int32 `protobuf:"varint,3,opt,name=pending_messages,json=pendingMessages,proto3" json:"pending_messages,omitempty"` // Number of pending messages for this party
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
|
||||
func (x *HeartbeatResponse) Reset() {
|
||||
*x = HeartbeatResponse{}
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[22]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
||||
func (x *HeartbeatResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HeartbeatResponse) ProtoMessage() {}
|
||||
|
||||
func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_api_proto_message_router_proto_msgTypes[22]
|
||||
if x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead.
|
||||
func (*HeartbeatResponse) Descriptor() ([]byte, []int) {
|
||||
return file_api_proto_message_router_proto_rawDescGZIP(), []int{22}
|
||||
}
|
||||
|
||||
func (x *HeartbeatResponse) GetSuccess() bool {
|
||||
if x != nil {
|
||||
return x.Success
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *HeartbeatResponse) GetServerTimestamp() int64 {
|
||||
if x != nil {
|
||||
return x.ServerTimestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *HeartbeatResponse) GetPendingMessages() int32 {
|
||||
if x != nil {
|
||||
return x.PendingMessages
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_api_proto_message_router_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_api_proto_message_router_proto_rawDesc = "" +
|
||||
|
|
@ -1035,12 +1557,18 @@ const file_api_proto_message_router_proto_rawDesc = "" +
|
|||
"\bparty_id\x18\x02 \x01(\tR\apartyId\x12'\n" +
|
||||
"\x0fafter_timestamp\x18\x03 \x01(\x03R\x0eafterTimestamp\"S\n" +
|
||||
"\x1aGetPendingMessagesResponse\x125\n" +
|
||||
"\bmessages\x18\x01 \x03(\v2\x19.mpc.router.v1.MPCMessageR\bmessages\"j\n" +
|
||||
"\bmessages\x18\x01 \x03(\v2\x19.mpc.router.v1.MPCMessageR\bmessages\"`\n" +
|
||||
"\x13NotificationChannel\x12\x14\n" +
|
||||
"\x05email\x18\x01 \x01(\tR\x05email\x12\x14\n" +
|
||||
"\x05phone\x18\x02 \x01(\tR\x05phone\x12\x1d\n" +
|
||||
"\n" +
|
||||
"push_token\x18\x03 \x01(\tR\tpushToken\"\xb2\x01\n" +
|
||||
"\x14RegisterPartyRequest\x12\x19\n" +
|
||||
"\bparty_id\x18\x01 \x01(\tR\apartyId\x12\x1d\n" +
|
||||
"\n" +
|
||||
"party_role\x18\x02 \x01(\tR\tpartyRole\x12\x18\n" +
|
||||
"\aversion\x18\x03 \x01(\tR\aversion\"p\n" +
|
||||
"\aversion\x18\x03 \x01(\tR\aversion\x12F\n" +
|
||||
"\fnotification\x18\x04 \x01(\v2\".mpc.router.v1.NotificationChannelR\fnotification\"p\n" +
|
||||
"\x15RegisterPartyResponse\x12\x18\n" +
|
||||
"\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" +
|
||||
"\amessage\x18\x02 \x01(\tR\amessage\x12#\n" +
|
||||
|
|
@ -1080,23 +1608,66 @@ const file_api_proto_message_router_proto_rawDesc = "" +
|
|||
"\vrole_filter\x18\x01 \x01(\tR\n" +
|
||||
"roleFilter\x12\x1f\n" +
|
||||
"\vonly_online\x18\x02 \x01(\bR\n" +
|
||||
"onlyOnline\"\x9f\x01\n" +
|
||||
"onlyOnline\"\xe7\x01\n" +
|
||||
"\x0fRegisteredParty\x12\x19\n" +
|
||||
"\bparty_id\x18\x01 \x01(\tR\apartyId\x12\x12\n" +
|
||||
"\x04role\x18\x02 \x01(\tR\x04role\x12\x16\n" +
|
||||
"\x06online\x18\x03 \x01(\bR\x06online\x12#\n" +
|
||||
"\rregistered_at\x18\x04 \x01(\x03R\fregisteredAt\x12 \n" +
|
||||
"\flast_seen_at\x18\x05 \x01(\x03R\n" +
|
||||
"lastSeenAt\"y\n" +
|
||||
"lastSeenAt\x12F\n" +
|
||||
"\fnotification\x18\x06 \x01(\v2\".mpc.router.v1.NotificationChannelR\fnotification\"y\n" +
|
||||
"\x1cGetRegisteredPartiesResponse\x128\n" +
|
||||
"\aparties\x18\x01 \x03(\v2\x1e.mpc.router.v1.RegisteredPartyR\aparties\x12\x1f\n" +
|
||||
"\vtotal_count\x18\x02 \x01(\x05R\n" +
|
||||
"totalCount2\xd0\x05\n" +
|
||||
"totalCount\"\xb3\x01\n" +
|
||||
"\x19AcknowledgeMessageRequest\x12\x1d\n" +
|
||||
"\n" +
|
||||
"message_id\x18\x01 \x01(\tR\tmessageId\x12\x19\n" +
|
||||
"\bparty_id\x18\x02 \x01(\tR\apartyId\x12\x1d\n" +
|
||||
"\n" +
|
||||
"session_id\x18\x03 \x01(\tR\tsessionId\x12\x18\n" +
|
||||
"\asuccess\x18\x04 \x01(\bR\asuccess\x12#\n" +
|
||||
"\rerror_message\x18\x05 \x01(\tR\ferrorMessage\"P\n" +
|
||||
"\x1aAcknowledgeMessageResponse\x12\x18\n" +
|
||||
"\asuccess\x18\x01 \x01(\bR\asuccess\x12\x18\n" +
|
||||
"\amessage\x18\x02 \x01(\tR\amessage\"W\n" +
|
||||
"\x17GetMessageStatusRequest\x12\x1d\n" +
|
||||
"\n" +
|
||||
"message_id\x18\x01 \x01(\tR\tmessageId\x12\x1d\n" +
|
||||
"\n" +
|
||||
"session_id\x18\x02 \x01(\tR\tsessionId\"\xb7\x01\n" +
|
||||
"\x15MessageDeliveryStatus\x12\x19\n" +
|
||||
"\bparty_id\x18\x01 \x01(\tR\apartyId\x12\x16\n" +
|
||||
"\x06status\x18\x02 \x01(\tR\x06status\x12!\n" +
|
||||
"\fdelivered_at\x18\x03 \x01(\x03R\vdeliveredAt\x12'\n" +
|
||||
"\x0facknowledged_at\x18\x04 \x01(\x03R\x0eacknowledgedAt\x12\x1f\n" +
|
||||
"\vretry_count\x18\x05 \x01(\x05R\n" +
|
||||
"retryCount\"\xc9\x01\n" +
|
||||
"\x18GetMessageStatusResponse\x12\x1d\n" +
|
||||
"\n" +
|
||||
"message_id\x18\x01 \x01(\tR\tmessageId\x12\x1d\n" +
|
||||
"\n" +
|
||||
"session_id\x18\x02 \x01(\tR\tsessionId\x12D\n" +
|
||||
"\n" +
|
||||
"deliveries\x18\x03 \x03(\v2$.mpc.router.v1.MessageDeliveryStatusR\n" +
|
||||
"deliveries\x12)\n" +
|
||||
"\x10all_acknowledged\x18\x04 \x01(\bR\x0fallAcknowledged\"K\n" +
|
||||
"\x10HeartbeatRequest\x12\x19\n" +
|
||||
"\bparty_id\x18\x01 \x01(\tR\apartyId\x12\x1c\n" +
|
||||
"\ttimestamp\x18\x02 \x01(\x03R\ttimestamp\"\x83\x01\n" +
|
||||
"\x11HeartbeatResponse\x12\x18\n" +
|
||||
"\asuccess\x18\x01 \x01(\bR\asuccess\x12)\n" +
|
||||
"\x10server_timestamp\x18\x02 \x01(\x03R\x0fserverTimestamp\x12)\n" +
|
||||
"\x10pending_messages\x18\x03 \x01(\x05R\x0fpendingMessages2\xf0\a\n" +
|
||||
"\rMessageRouter\x12W\n" +
|
||||
"\fRouteMessage\x12\".mpc.router.v1.RouteMessageRequest\x1a#.mpc.router.v1.RouteMessageResponse\x12Y\n" +
|
||||
"\x11SubscribeMessages\x12'.mpc.router.v1.SubscribeMessagesRequest\x1a\x19.mpc.router.v1.MPCMessage0\x01\x12i\n" +
|
||||
"\x12GetPendingMessages\x12(.mpc.router.v1.GetPendingMessagesRequest\x1a).mpc.router.v1.GetPendingMessagesResponse\x12Z\n" +
|
||||
"\rRegisterParty\x12#.mpc.router.v1.RegisterPartyRequest\x1a$.mpc.router.v1.RegisterPartyResponse\x12e\n" +
|
||||
"\x12GetPendingMessages\x12(.mpc.router.v1.GetPendingMessagesRequest\x1a).mpc.router.v1.GetPendingMessagesResponse\x12i\n" +
|
||||
"\x12AcknowledgeMessage\x12(.mpc.router.v1.AcknowledgeMessageRequest\x1a).mpc.router.v1.AcknowledgeMessageResponse\x12c\n" +
|
||||
"\x10GetMessageStatus\x12&.mpc.router.v1.GetMessageStatusRequest\x1a'.mpc.router.v1.GetMessageStatusResponse\x12Z\n" +
|
||||
"\rRegisterParty\x12#.mpc.router.v1.RegisterPartyRequest\x1a$.mpc.router.v1.RegisterPartyResponse\x12N\n" +
|
||||
"\tHeartbeat\x12\x1f.mpc.router.v1.HeartbeatRequest\x1a .mpc.router.v1.HeartbeatResponse\x12e\n" +
|
||||
"\x16SubscribeSessionEvents\x12,.mpc.router.v1.SubscribeSessionEventsRequest\x1a\x1b.mpc.router.v1.SessionEvent0\x01\x12l\n" +
|
||||
"\x13PublishSessionEvent\x12).mpc.router.v1.PublishSessionEventRequest\x1a*.mpc.router.v1.PublishSessionEventResponse\x12o\n" +
|
||||
"\x14GetRegisteredParties\x12*.mpc.router.v1.GetRegisteredPartiesRequest\x1a+.mpc.router.v1.GetRegisteredPartiesResponseB;Z9github.com/rwadurian/mpc-system/api/grpc/router/v1;routerb\x06proto3"
|
||||
|
|
@ -1113,7 +1684,7 @@ func file_api_proto_message_router_proto_rawDescGZIP() []byte {
|
|||
return file_api_proto_message_router_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_api_proto_message_router_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
|
||||
var file_api_proto_message_router_proto_msgTypes = make([]protoimpl.MessageInfo, 24)
|
||||
var file_api_proto_message_router_proto_goTypes = []any{
|
||||
(*RouteMessageRequest)(nil), // 0: mpc.router.v1.RouteMessageRequest
|
||||
(*RouteMessageResponse)(nil), // 1: mpc.router.v1.RouteMessageResponse
|
||||
|
|
@ -1121,41 +1692,58 @@ var file_api_proto_message_router_proto_goTypes = []any{
|
|||
(*MPCMessage)(nil), // 3: mpc.router.v1.MPCMessage
|
||||
(*GetPendingMessagesRequest)(nil), // 4: mpc.router.v1.GetPendingMessagesRequest
|
||||
(*GetPendingMessagesResponse)(nil), // 5: mpc.router.v1.GetPendingMessagesResponse
|
||||
(*RegisterPartyRequest)(nil), // 6: mpc.router.v1.RegisterPartyRequest
|
||||
(*RegisterPartyResponse)(nil), // 7: mpc.router.v1.RegisterPartyResponse
|
||||
(*SubscribeSessionEventsRequest)(nil), // 8: mpc.router.v1.SubscribeSessionEventsRequest
|
||||
(*SessionEvent)(nil), // 9: mpc.router.v1.SessionEvent
|
||||
(*PublishSessionEventRequest)(nil), // 10: mpc.router.v1.PublishSessionEventRequest
|
||||
(*PublishSessionEventResponse)(nil), // 11: mpc.router.v1.PublishSessionEventResponse
|
||||
(*GetRegisteredPartiesRequest)(nil), // 12: mpc.router.v1.GetRegisteredPartiesRequest
|
||||
(*RegisteredParty)(nil), // 13: mpc.router.v1.RegisteredParty
|
||||
(*GetRegisteredPartiesResponse)(nil), // 14: mpc.router.v1.GetRegisteredPartiesResponse
|
||||
nil, // 15: mpc.router.v1.SessionEvent.JoinTokensEntry
|
||||
(*NotificationChannel)(nil), // 6: mpc.router.v1.NotificationChannel
|
||||
(*RegisterPartyRequest)(nil), // 7: mpc.router.v1.RegisterPartyRequest
|
||||
(*RegisterPartyResponse)(nil), // 8: mpc.router.v1.RegisterPartyResponse
|
||||
(*SubscribeSessionEventsRequest)(nil), // 9: mpc.router.v1.SubscribeSessionEventsRequest
|
||||
(*SessionEvent)(nil), // 10: mpc.router.v1.SessionEvent
|
||||
(*PublishSessionEventRequest)(nil), // 11: mpc.router.v1.PublishSessionEventRequest
|
||||
(*PublishSessionEventResponse)(nil), // 12: mpc.router.v1.PublishSessionEventResponse
|
||||
(*GetRegisteredPartiesRequest)(nil), // 13: mpc.router.v1.GetRegisteredPartiesRequest
|
||||
(*RegisteredParty)(nil), // 14: mpc.router.v1.RegisteredParty
|
||||
(*GetRegisteredPartiesResponse)(nil), // 15: mpc.router.v1.GetRegisteredPartiesResponse
|
||||
(*AcknowledgeMessageRequest)(nil), // 16: mpc.router.v1.AcknowledgeMessageRequest
|
||||
(*AcknowledgeMessageResponse)(nil), // 17: mpc.router.v1.AcknowledgeMessageResponse
|
||||
(*GetMessageStatusRequest)(nil), // 18: mpc.router.v1.GetMessageStatusRequest
|
||||
(*MessageDeliveryStatus)(nil), // 19: mpc.router.v1.MessageDeliveryStatus
|
||||
(*GetMessageStatusResponse)(nil), // 20: mpc.router.v1.GetMessageStatusResponse
|
||||
(*HeartbeatRequest)(nil), // 21: mpc.router.v1.HeartbeatRequest
|
||||
(*HeartbeatResponse)(nil), // 22: mpc.router.v1.HeartbeatResponse
|
||||
nil, // 23: mpc.router.v1.SessionEvent.JoinTokensEntry
|
||||
}
|
||||
var file_api_proto_message_router_proto_depIdxs = []int32{
|
||||
3, // 0: mpc.router.v1.GetPendingMessagesResponse.messages:type_name -> mpc.router.v1.MPCMessage
|
||||
15, // 1: mpc.router.v1.SessionEvent.join_tokens:type_name -> mpc.router.v1.SessionEvent.JoinTokensEntry
|
||||
9, // 2: mpc.router.v1.PublishSessionEventRequest.event:type_name -> mpc.router.v1.SessionEvent
|
||||
13, // 3: mpc.router.v1.GetRegisteredPartiesResponse.parties:type_name -> mpc.router.v1.RegisteredParty
|
||||
0, // 4: mpc.router.v1.MessageRouter.RouteMessage:input_type -> mpc.router.v1.RouteMessageRequest
|
||||
2, // 5: mpc.router.v1.MessageRouter.SubscribeMessages:input_type -> mpc.router.v1.SubscribeMessagesRequest
|
||||
4, // 6: mpc.router.v1.MessageRouter.GetPendingMessages:input_type -> mpc.router.v1.GetPendingMessagesRequest
|
||||
6, // 7: mpc.router.v1.MessageRouter.RegisterParty:input_type -> mpc.router.v1.RegisterPartyRequest
|
||||
8, // 8: mpc.router.v1.MessageRouter.SubscribeSessionEvents:input_type -> mpc.router.v1.SubscribeSessionEventsRequest
|
||||
10, // 9: mpc.router.v1.MessageRouter.PublishSessionEvent:input_type -> mpc.router.v1.PublishSessionEventRequest
|
||||
12, // 10: mpc.router.v1.MessageRouter.GetRegisteredParties:input_type -> mpc.router.v1.GetRegisteredPartiesRequest
|
||||
1, // 11: mpc.router.v1.MessageRouter.RouteMessage:output_type -> mpc.router.v1.RouteMessageResponse
|
||||
3, // 12: mpc.router.v1.MessageRouter.SubscribeMessages:output_type -> mpc.router.v1.MPCMessage
|
||||
5, // 13: mpc.router.v1.MessageRouter.GetPendingMessages:output_type -> mpc.router.v1.GetPendingMessagesResponse
|
||||
7, // 14: mpc.router.v1.MessageRouter.RegisterParty:output_type -> mpc.router.v1.RegisterPartyResponse
|
||||
9, // 15: mpc.router.v1.MessageRouter.SubscribeSessionEvents:output_type -> mpc.router.v1.SessionEvent
|
||||
11, // 16: mpc.router.v1.MessageRouter.PublishSessionEvent:output_type -> mpc.router.v1.PublishSessionEventResponse
|
||||
14, // 17: mpc.router.v1.MessageRouter.GetRegisteredParties:output_type -> mpc.router.v1.GetRegisteredPartiesResponse
|
||||
11, // [11:18] is the sub-list for method output_type
|
||||
4, // [4:11] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
6, // 1: mpc.router.v1.RegisterPartyRequest.notification:type_name -> mpc.router.v1.NotificationChannel
|
||||
23, // 2: mpc.router.v1.SessionEvent.join_tokens:type_name -> mpc.router.v1.SessionEvent.JoinTokensEntry
|
||||
10, // 3: mpc.router.v1.PublishSessionEventRequest.event:type_name -> mpc.router.v1.SessionEvent
|
||||
6, // 4: mpc.router.v1.RegisteredParty.notification:type_name -> mpc.router.v1.NotificationChannel
|
||||
14, // 5: mpc.router.v1.GetRegisteredPartiesResponse.parties:type_name -> mpc.router.v1.RegisteredParty
|
||||
19, // 6: mpc.router.v1.GetMessageStatusResponse.deliveries:type_name -> mpc.router.v1.MessageDeliveryStatus
|
||||
0, // 7: mpc.router.v1.MessageRouter.RouteMessage:input_type -> mpc.router.v1.RouteMessageRequest
|
||||
2, // 8: mpc.router.v1.MessageRouter.SubscribeMessages:input_type -> mpc.router.v1.SubscribeMessagesRequest
|
||||
4, // 9: mpc.router.v1.MessageRouter.GetPendingMessages:input_type -> mpc.router.v1.GetPendingMessagesRequest
|
||||
16, // 10: mpc.router.v1.MessageRouter.AcknowledgeMessage:input_type -> mpc.router.v1.AcknowledgeMessageRequest
|
||||
18, // 11: mpc.router.v1.MessageRouter.GetMessageStatus:input_type -> mpc.router.v1.GetMessageStatusRequest
|
||||
7, // 12: mpc.router.v1.MessageRouter.RegisterParty:input_type -> mpc.router.v1.RegisterPartyRequest
|
||||
21, // 13: mpc.router.v1.MessageRouter.Heartbeat:input_type -> mpc.router.v1.HeartbeatRequest
|
||||
9, // 14: mpc.router.v1.MessageRouter.SubscribeSessionEvents:input_type -> mpc.router.v1.SubscribeSessionEventsRequest
|
||||
11, // 15: mpc.router.v1.MessageRouter.PublishSessionEvent:input_type -> mpc.router.v1.PublishSessionEventRequest
|
||||
13, // 16: mpc.router.v1.MessageRouter.GetRegisteredParties:input_type -> mpc.router.v1.GetRegisteredPartiesRequest
|
||||
1, // 17: mpc.router.v1.MessageRouter.RouteMessage:output_type -> mpc.router.v1.RouteMessageResponse
|
||||
3, // 18: mpc.router.v1.MessageRouter.SubscribeMessages:output_type -> mpc.router.v1.MPCMessage
|
||||
5, // 19: mpc.router.v1.MessageRouter.GetPendingMessages:output_type -> mpc.router.v1.GetPendingMessagesResponse
|
||||
17, // 20: mpc.router.v1.MessageRouter.AcknowledgeMessage:output_type -> mpc.router.v1.AcknowledgeMessageResponse
|
||||
20, // 21: mpc.router.v1.MessageRouter.GetMessageStatus:output_type -> mpc.router.v1.GetMessageStatusResponse
|
||||
8, // 22: mpc.router.v1.MessageRouter.RegisterParty:output_type -> mpc.router.v1.RegisterPartyResponse
|
||||
22, // 23: mpc.router.v1.MessageRouter.Heartbeat:output_type -> mpc.router.v1.HeartbeatResponse
|
||||
10, // 24: mpc.router.v1.MessageRouter.SubscribeSessionEvents:output_type -> mpc.router.v1.SessionEvent
|
||||
12, // 25: mpc.router.v1.MessageRouter.PublishSessionEvent:output_type -> mpc.router.v1.PublishSessionEventResponse
|
||||
15, // 26: mpc.router.v1.MessageRouter.GetRegisteredParties:output_type -> mpc.router.v1.GetRegisteredPartiesResponse
|
||||
17, // [17:27] is the sub-list for method output_type
|
||||
7, // [7:17] is the sub-list for method input_type
|
||||
7, // [7:7] is the sub-list for extension type_name
|
||||
7, // [7:7] is the sub-list for extension extendee
|
||||
0, // [0:7] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_api_proto_message_router_proto_init() }
|
||||
|
|
@ -1169,7 +1757,7 @@ func file_api_proto_message_router_proto_init() {
|
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_proto_message_router_proto_rawDesc), len(file_api_proto_message_router_proto_rawDesc)),
|
||||
NumEnums: 0,
|
||||
NumMessages: 16,
|
||||
NumMessages: 24,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -22,7 +22,10 @@ const (
|
|||
MessageRouter_RouteMessage_FullMethodName = "/mpc.router.v1.MessageRouter/RouteMessage"
|
||||
MessageRouter_SubscribeMessages_FullMethodName = "/mpc.router.v1.MessageRouter/SubscribeMessages"
|
||||
MessageRouter_GetPendingMessages_FullMethodName = "/mpc.router.v1.MessageRouter/GetPendingMessages"
|
||||
MessageRouter_AcknowledgeMessage_FullMethodName = "/mpc.router.v1.MessageRouter/AcknowledgeMessage"
|
||||
MessageRouter_GetMessageStatus_FullMethodName = "/mpc.router.v1.MessageRouter/GetMessageStatus"
|
||||
MessageRouter_RegisterParty_FullMethodName = "/mpc.router.v1.MessageRouter/RegisterParty"
|
||||
MessageRouter_Heartbeat_FullMethodName = "/mpc.router.v1.MessageRouter/Heartbeat"
|
||||
MessageRouter_SubscribeSessionEvents_FullMethodName = "/mpc.router.v1.MessageRouter/SubscribeSessionEvents"
|
||||
MessageRouter_PublishSessionEvent_FullMethodName = "/mpc.router.v1.MessageRouter/PublishSessionEvent"
|
||||
MessageRouter_GetRegisteredParties_FullMethodName = "/mpc.router.v1.MessageRouter/GetRegisteredParties"
|
||||
|
|
@ -40,8 +43,15 @@ type MessageRouterClient interface {
|
|||
SubscribeMessages(ctx context.Context, in *SubscribeMessagesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[MPCMessage], error)
|
||||
// GetPendingMessages retrieves pending messages (polling alternative)
|
||||
GetPendingMessages(ctx context.Context, in *GetPendingMessagesRequest, opts ...grpc.CallOption) (*GetPendingMessagesResponse, error)
|
||||
// AcknowledgeMessage acknowledges receipt of a message
|
||||
// Must be called after processing a message to confirm delivery
|
||||
AcknowledgeMessage(ctx context.Context, in *AcknowledgeMessageRequest, opts ...grpc.CallOption) (*AcknowledgeMessageResponse, error)
|
||||
// GetMessageStatus gets the delivery status of a message
|
||||
GetMessageStatus(ctx context.Context, in *GetMessageStatusRequest, opts ...grpc.CallOption) (*GetMessageStatusResponse, error)
|
||||
// RegisterParty registers a party with the message router (party actively connects)
|
||||
RegisterParty(ctx context.Context, in *RegisterPartyRequest, opts ...grpc.CallOption) (*RegisterPartyResponse, error)
|
||||
// Heartbeat sends a heartbeat to keep the party alive
|
||||
Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error)
|
||||
// SubscribeSessionEvents subscribes to session lifecycle events (session start, etc.)
|
||||
SubscribeSessionEvents(ctx context.Context, in *SubscribeSessionEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SessionEvent], error)
|
||||
// PublishSessionEvent publishes a session event (called by Session Coordinator)
|
||||
|
|
@ -97,6 +107,26 @@ func (c *messageRouterClient) GetPendingMessages(ctx context.Context, in *GetPen
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) AcknowledgeMessage(ctx context.Context, in *AcknowledgeMessageRequest, opts ...grpc.CallOption) (*AcknowledgeMessageResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(AcknowledgeMessageResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_AcknowledgeMessage_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) GetMessageStatus(ctx context.Context, in *GetMessageStatusRequest, opts ...grpc.CallOption) (*GetMessageStatusResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetMessageStatusResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_GetMessageStatus_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) RegisterParty(ctx context.Context, in *RegisterPartyRequest, opts ...grpc.CallOption) (*RegisterPartyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RegisterPartyResponse)
|
||||
|
|
@ -107,6 +137,16 @@ func (c *messageRouterClient) RegisterParty(ctx context.Context, in *RegisterPar
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(HeartbeatResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_Heartbeat_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) SubscribeSessionEvents(ctx context.Context, in *SubscribeSessionEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SessionEvent], error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &MessageRouter_ServiceDesc.Streams[1], MessageRouter_SubscribeSessionEvents_FullMethodName, cOpts...)
|
||||
|
|
@ -158,8 +198,15 @@ type MessageRouterServer interface {
|
|||
SubscribeMessages(*SubscribeMessagesRequest, grpc.ServerStreamingServer[MPCMessage]) error
|
||||
// GetPendingMessages retrieves pending messages (polling alternative)
|
||||
GetPendingMessages(context.Context, *GetPendingMessagesRequest) (*GetPendingMessagesResponse, error)
|
||||
// AcknowledgeMessage acknowledges receipt of a message
|
||||
// Must be called after processing a message to confirm delivery
|
||||
AcknowledgeMessage(context.Context, *AcknowledgeMessageRequest) (*AcknowledgeMessageResponse, error)
|
||||
// GetMessageStatus gets the delivery status of a message
|
||||
GetMessageStatus(context.Context, *GetMessageStatusRequest) (*GetMessageStatusResponse, error)
|
||||
// RegisterParty registers a party with the message router (party actively connects)
|
||||
RegisterParty(context.Context, *RegisterPartyRequest) (*RegisterPartyResponse, error)
|
||||
// Heartbeat sends a heartbeat to keep the party alive
|
||||
Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error)
|
||||
// SubscribeSessionEvents subscribes to session lifecycle events (session start, etc.)
|
||||
SubscribeSessionEvents(*SubscribeSessionEventsRequest, grpc.ServerStreamingServer[SessionEvent]) error
|
||||
// PublishSessionEvent publishes a session event (called by Session Coordinator)
|
||||
|
|
@ -185,9 +232,18 @@ func (UnimplementedMessageRouterServer) SubscribeMessages(*SubscribeMessagesRequ
|
|||
func (UnimplementedMessageRouterServer) GetPendingMessages(context.Context, *GetPendingMessagesRequest) (*GetPendingMessagesResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method GetPendingMessages not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) AcknowledgeMessage(context.Context, *AcknowledgeMessageRequest) (*AcknowledgeMessageResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method AcknowledgeMessage not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) GetMessageStatus(context.Context, *GetMessageStatusRequest) (*GetMessageStatusResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method GetMessageStatus not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) RegisterParty(context.Context, *RegisterPartyRequest) (*RegisterPartyResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method RegisterParty not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method Heartbeat not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) SubscribeSessionEvents(*SubscribeSessionEventsRequest, grpc.ServerStreamingServer[SessionEvent]) error {
|
||||
return status.Error(codes.Unimplemented, "method SubscribeSessionEvents not implemented")
|
||||
}
|
||||
|
|
@ -265,6 +321,42 @@ func _MessageRouter_GetPendingMessages_Handler(srv interface{}, ctx context.Cont
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_AcknowledgeMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AcknowledgeMessageRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).AcknowledgeMessage(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_AcknowledgeMessage_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).AcknowledgeMessage(ctx, req.(*AcknowledgeMessageRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_GetMessageStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMessageStatusRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).GetMessageStatus(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_GetMessageStatus_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).GetMessageStatus(ctx, req.(*GetMessageStatusRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_RegisterParty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterPartyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
|
|
@ -283,6 +375,24 @@ func _MessageRouter_RegisterParty_Handler(srv interface{}, ctx context.Context,
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_Heartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HeartbeatRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).Heartbeat(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_Heartbeat_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).Heartbeat(ctx, req.(*HeartbeatRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_SubscribeSessionEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(SubscribeSessionEventsRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
|
|
@ -345,10 +455,22 @@ var MessageRouter_ServiceDesc = grpc.ServiceDesc{
|
|||
MethodName: "GetPendingMessages",
|
||||
Handler: _MessageRouter_GetPendingMessages_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AcknowledgeMessage",
|
||||
Handler: _MessageRouter_AcknowledgeMessage_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMessageStatus",
|
||||
Handler: _MessageRouter_GetMessageStatus_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RegisterParty",
|
||||
Handler: _MessageRouter_RegisterParty_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Heartbeat",
|
||||
Handler: _MessageRouter_Heartbeat_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "PublishSessionEvent",
|
||||
Handler: _MessageRouter_PublishSessionEvent_Handler,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -15,9 +15,19 @@ service MessageRouter {
|
|||
// GetPendingMessages retrieves pending messages (polling alternative)
|
||||
rpc GetPendingMessages(GetPendingMessagesRequest) returns (GetPendingMessagesResponse);
|
||||
|
||||
// AcknowledgeMessage acknowledges receipt of a message
|
||||
// Must be called after processing a message to confirm delivery
|
||||
rpc AcknowledgeMessage(AcknowledgeMessageRequest) returns (AcknowledgeMessageResponse);
|
||||
|
||||
// GetMessageStatus gets the delivery status of a message
|
||||
rpc GetMessageStatus(GetMessageStatusRequest) returns (GetMessageStatusResponse);
|
||||
|
||||
// RegisterParty registers a party with the message router (party actively connects)
|
||||
rpc RegisterParty(RegisterPartyRequest) returns (RegisterPartyResponse);
|
||||
|
||||
// Heartbeat sends a heartbeat to keep the party alive
|
||||
rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse);
|
||||
|
||||
// SubscribeSessionEvents subscribes to session lifecycle events (session start, etc.)
|
||||
rpc SubscribeSessionEvents(SubscribeSessionEventsRequest) returns (stream SessionEvent);
|
||||
|
||||
|
|
@ -74,11 +84,21 @@ message GetPendingMessagesResponse {
|
|||
repeated MPCMessage messages = 1;
|
||||
}
|
||||
|
||||
// NotificationChannel represents a notification channel for offline parties
|
||||
// If a party has notification channels, it operates in offline mode (24h async)
|
||||
// If no notification channels, it operates in real-time mode (Message Router push)
|
||||
message NotificationChannel {
|
||||
string email = 1; // Optional: email address for notifications
|
||||
string phone = 2; // Optional: phone number for SMS notifications
|
||||
string push_token = 3; // Optional: push notification token (FCM/APNs)
|
||||
}
|
||||
|
||||
// RegisterPartyRequest registers a party with the router
|
||||
message RegisterPartyRequest {
|
||||
string party_id = 1; // Unique party identifier
|
||||
string party_role = 2; // persistent, delegate, or temporary
|
||||
string version = 3; // Party software version
|
||||
NotificationChannel notification = 4; // Optional: notification channel for offline mode
|
||||
}
|
||||
|
||||
// RegisterPartyResponse confirms party registration
|
||||
|
|
@ -132,6 +152,7 @@ message RegisteredParty {
|
|||
bool online = 3; // Whether party is currently connected
|
||||
int64 registered_at = 4; // Unix timestamp milliseconds
|
||||
int64 last_seen_at = 5; // Unix timestamp milliseconds
|
||||
NotificationChannel notification = 6; // Optional: notification channel (if set, party is offline mode)
|
||||
}
|
||||
|
||||
// GetRegisteredPartiesResponse returns registered parties
|
||||
|
|
@ -139,3 +160,54 @@ message GetRegisteredPartiesResponse {
|
|||
repeated RegisteredParty parties = 1;
|
||||
int32 total_count = 2;
|
||||
}
|
||||
|
||||
// AcknowledgeMessageRequest acknowledges message receipt
|
||||
message AcknowledgeMessageRequest {
|
||||
string message_id = 1; // ID of the message being acknowledged
|
||||
string party_id = 2; // ID of the party acknowledging
|
||||
string session_id = 3; // Session the message belongs to
|
||||
bool success = 4; // True if message was processed successfully
|
||||
string error_message = 5; // Error message if processing failed
|
||||
}
|
||||
|
||||
// AcknowledgeMessageResponse confirms acknowledgment
|
||||
message AcknowledgeMessageResponse {
|
||||
bool success = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
// GetMessageStatusRequest requests message delivery status
|
||||
message GetMessageStatusRequest {
|
||||
string message_id = 1;
|
||||
string session_id = 2;
|
||||
}
|
||||
|
||||
// MessageDeliveryStatus represents delivery status to a single party
|
||||
message MessageDeliveryStatus {
|
||||
string party_id = 1;
|
||||
string status = 2; // pending, delivered, acknowledged, failed
|
||||
int64 delivered_at = 3; // Unix timestamp milliseconds
|
||||
int64 acknowledged_at = 4; // Unix timestamp milliseconds
|
||||
int32 retry_count = 5; // Number of delivery retries
|
||||
}
|
||||
|
||||
// GetMessageStatusResponse returns message delivery status
|
||||
message GetMessageStatusResponse {
|
||||
string message_id = 1;
|
||||
string session_id = 2;
|
||||
repeated MessageDeliveryStatus deliveries = 3;
|
||||
bool all_acknowledged = 4; // True if all recipients acknowledged
|
||||
}
|
||||
|
||||
// HeartbeatRequest sends a heartbeat to keep the party alive
|
||||
message HeartbeatRequest {
|
||||
string party_id = 1;
|
||||
int64 timestamp = 2; // Unix timestamp milliseconds
|
||||
}
|
||||
|
||||
// HeartbeatResponse confirms heartbeat receipt
|
||||
message HeartbeatResponse {
|
||||
bool success = 1;
|
||||
int64 server_timestamp = 2; // Server timestamp for clock sync
|
||||
int32 pending_messages = 3; // Number of pending messages for this party
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,496 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.6.0
|
||||
// - protoc v6.33.1
|
||||
// source: api/proto/message_router.proto
|
||||
|
||||
package router
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.64.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
|
||||
const (
|
||||
MessageRouter_RouteMessage_FullMethodName = "/mpc.router.v1.MessageRouter/RouteMessage"
|
||||
MessageRouter_SubscribeMessages_FullMethodName = "/mpc.router.v1.MessageRouter/SubscribeMessages"
|
||||
MessageRouter_GetPendingMessages_FullMethodName = "/mpc.router.v1.MessageRouter/GetPendingMessages"
|
||||
MessageRouter_AcknowledgeMessage_FullMethodName = "/mpc.router.v1.MessageRouter/AcknowledgeMessage"
|
||||
MessageRouter_GetMessageStatus_FullMethodName = "/mpc.router.v1.MessageRouter/GetMessageStatus"
|
||||
MessageRouter_RegisterParty_FullMethodName = "/mpc.router.v1.MessageRouter/RegisterParty"
|
||||
MessageRouter_Heartbeat_FullMethodName = "/mpc.router.v1.MessageRouter/Heartbeat"
|
||||
MessageRouter_SubscribeSessionEvents_FullMethodName = "/mpc.router.v1.MessageRouter/SubscribeSessionEvents"
|
||||
MessageRouter_PublishSessionEvent_FullMethodName = "/mpc.router.v1.MessageRouter/PublishSessionEvent"
|
||||
MessageRouter_GetRegisteredParties_FullMethodName = "/mpc.router.v1.MessageRouter/GetRegisteredParties"
|
||||
)
|
||||
|
||||
// MessageRouterClient is the client API for MessageRouter service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
//
|
||||
// MessageRouter service handles MPC message routing
|
||||
type MessageRouterClient interface {
|
||||
// RouteMessage routes a message from one party to others
|
||||
RouteMessage(ctx context.Context, in *RouteMessageRequest, opts ...grpc.CallOption) (*RouteMessageResponse, error)
|
||||
// SubscribeMessages subscribes to messages for a party (streaming)
|
||||
SubscribeMessages(ctx context.Context, in *SubscribeMessagesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[MPCMessage], error)
|
||||
// GetPendingMessages retrieves pending messages (polling alternative)
|
||||
GetPendingMessages(ctx context.Context, in *GetPendingMessagesRequest, opts ...grpc.CallOption) (*GetPendingMessagesResponse, error)
|
||||
// AcknowledgeMessage acknowledges receipt of a message
|
||||
// Must be called after processing a message to confirm delivery
|
||||
AcknowledgeMessage(ctx context.Context, in *AcknowledgeMessageRequest, opts ...grpc.CallOption) (*AcknowledgeMessageResponse, error)
|
||||
// GetMessageStatus gets the delivery status of a message
|
||||
GetMessageStatus(ctx context.Context, in *GetMessageStatusRequest, opts ...grpc.CallOption) (*GetMessageStatusResponse, error)
|
||||
// RegisterParty registers a party with the message router (party actively connects)
|
||||
RegisterParty(ctx context.Context, in *RegisterPartyRequest, opts ...grpc.CallOption) (*RegisterPartyResponse, error)
|
||||
// Heartbeat sends a heartbeat to keep the party alive
|
||||
Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error)
|
||||
// SubscribeSessionEvents subscribes to session lifecycle events (session start, etc.)
|
||||
SubscribeSessionEvents(ctx context.Context, in *SubscribeSessionEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SessionEvent], error)
|
||||
// PublishSessionEvent publishes a session event (called by Session Coordinator)
|
||||
PublishSessionEvent(ctx context.Context, in *PublishSessionEventRequest, opts ...grpc.CallOption) (*PublishSessionEventResponse, error)
|
||||
// GetRegisteredParties returns all registered parties (for Session Coordinator party discovery)
|
||||
GetRegisteredParties(ctx context.Context, in *GetRegisteredPartiesRequest, opts ...grpc.CallOption) (*GetRegisteredPartiesResponse, error)
|
||||
}
|
||||
|
||||
type messageRouterClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewMessageRouterClient(cc grpc.ClientConnInterface) MessageRouterClient {
|
||||
return &messageRouterClient{cc}
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) RouteMessage(ctx context.Context, in *RouteMessageRequest, opts ...grpc.CallOption) (*RouteMessageResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RouteMessageResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_RouteMessage_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) SubscribeMessages(ctx context.Context, in *SubscribeMessagesRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[MPCMessage], error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &MessageRouter_ServiceDesc.Streams[0], MessageRouter_SubscribeMessages_FullMethodName, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &grpc.GenericClientStream[SubscribeMessagesRequest, MPCMessage]{ClientStream: stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||
type MessageRouter_SubscribeMessagesClient = grpc.ServerStreamingClient[MPCMessage]
|
||||
|
||||
func (c *messageRouterClient) GetPendingMessages(ctx context.Context, in *GetPendingMessagesRequest, opts ...grpc.CallOption) (*GetPendingMessagesResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetPendingMessagesResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_GetPendingMessages_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) AcknowledgeMessage(ctx context.Context, in *AcknowledgeMessageRequest, opts ...grpc.CallOption) (*AcknowledgeMessageResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(AcknowledgeMessageResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_AcknowledgeMessage_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) GetMessageStatus(ctx context.Context, in *GetMessageStatusRequest, opts ...grpc.CallOption) (*GetMessageStatusResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetMessageStatusResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_GetMessageStatus_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) RegisterParty(ctx context.Context, in *RegisterPartyRequest, opts ...grpc.CallOption) (*RegisterPartyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(RegisterPartyResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_RegisterParty_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(HeartbeatResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_Heartbeat_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) SubscribeSessionEvents(ctx context.Context, in *SubscribeSessionEventsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SessionEvent], error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &MessageRouter_ServiceDesc.Streams[1], MessageRouter_SubscribeSessionEvents_FullMethodName, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &grpc.GenericClientStream[SubscribeSessionEventsRequest, SessionEvent]{ClientStream: stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||
type MessageRouter_SubscribeSessionEventsClient = grpc.ServerStreamingClient[SessionEvent]
|
||||
|
||||
func (c *messageRouterClient) PublishSessionEvent(ctx context.Context, in *PublishSessionEventRequest, opts ...grpc.CallOption) (*PublishSessionEventResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(PublishSessionEventResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_PublishSessionEvent_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *messageRouterClient) GetRegisteredParties(ctx context.Context, in *GetRegisteredPartiesRequest, opts ...grpc.CallOption) (*GetRegisteredPartiesResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetRegisteredPartiesResponse)
|
||||
err := c.cc.Invoke(ctx, MessageRouter_GetRegisteredParties_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// MessageRouterServer is the server API for MessageRouter service.
|
||||
// All implementations must embed UnimplementedMessageRouterServer
|
||||
// for forward compatibility.
|
||||
//
|
||||
// MessageRouter service handles MPC message routing
|
||||
type MessageRouterServer interface {
|
||||
// RouteMessage routes a message from one party to others
|
||||
RouteMessage(context.Context, *RouteMessageRequest) (*RouteMessageResponse, error)
|
||||
// SubscribeMessages subscribes to messages for a party (streaming)
|
||||
SubscribeMessages(*SubscribeMessagesRequest, grpc.ServerStreamingServer[MPCMessage]) error
|
||||
// GetPendingMessages retrieves pending messages (polling alternative)
|
||||
GetPendingMessages(context.Context, *GetPendingMessagesRequest) (*GetPendingMessagesResponse, error)
|
||||
// AcknowledgeMessage acknowledges receipt of a message
|
||||
// Must be called after processing a message to confirm delivery
|
||||
AcknowledgeMessage(context.Context, *AcknowledgeMessageRequest) (*AcknowledgeMessageResponse, error)
|
||||
// GetMessageStatus gets the delivery status of a message
|
||||
GetMessageStatus(context.Context, *GetMessageStatusRequest) (*GetMessageStatusResponse, error)
|
||||
// RegisterParty registers a party with the message router (party actively connects)
|
||||
RegisterParty(context.Context, *RegisterPartyRequest) (*RegisterPartyResponse, error)
|
||||
// Heartbeat sends a heartbeat to keep the party alive
|
||||
Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error)
|
||||
// SubscribeSessionEvents subscribes to session lifecycle events (session start, etc.)
|
||||
SubscribeSessionEvents(*SubscribeSessionEventsRequest, grpc.ServerStreamingServer[SessionEvent]) error
|
||||
// PublishSessionEvent publishes a session event (called by Session Coordinator)
|
||||
PublishSessionEvent(context.Context, *PublishSessionEventRequest) (*PublishSessionEventResponse, error)
|
||||
// GetRegisteredParties returns all registered parties (for Session Coordinator party discovery)
|
||||
GetRegisteredParties(context.Context, *GetRegisteredPartiesRequest) (*GetRegisteredPartiesResponse, error)
|
||||
mustEmbedUnimplementedMessageRouterServer()
|
||||
}
|
||||
|
||||
// UnimplementedMessageRouterServer must be embedded to have
|
||||
// forward compatible implementations.
|
||||
//
|
||||
// NOTE: this should be embedded by value instead of pointer to avoid a nil
|
||||
// pointer dereference when methods are called.
|
||||
type UnimplementedMessageRouterServer struct{}
|
||||
|
||||
func (UnimplementedMessageRouterServer) RouteMessage(context.Context, *RouteMessageRequest) (*RouteMessageResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method RouteMessage not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) SubscribeMessages(*SubscribeMessagesRequest, grpc.ServerStreamingServer[MPCMessage]) error {
|
||||
return status.Error(codes.Unimplemented, "method SubscribeMessages not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) GetPendingMessages(context.Context, *GetPendingMessagesRequest) (*GetPendingMessagesResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method GetPendingMessages not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) AcknowledgeMessage(context.Context, *AcknowledgeMessageRequest) (*AcknowledgeMessageResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method AcknowledgeMessage not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) GetMessageStatus(context.Context, *GetMessageStatusRequest) (*GetMessageStatusResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method GetMessageStatus not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) RegisterParty(context.Context, *RegisterPartyRequest) (*RegisterPartyResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method RegisterParty not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method Heartbeat not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) SubscribeSessionEvents(*SubscribeSessionEventsRequest, grpc.ServerStreamingServer[SessionEvent]) error {
|
||||
return status.Error(codes.Unimplemented, "method SubscribeSessionEvents not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) PublishSessionEvent(context.Context, *PublishSessionEventRequest) (*PublishSessionEventResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method PublishSessionEvent not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) GetRegisteredParties(context.Context, *GetRegisteredPartiesRequest) (*GetRegisteredPartiesResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method GetRegisteredParties not implemented")
|
||||
}
|
||||
func (UnimplementedMessageRouterServer) mustEmbedUnimplementedMessageRouterServer() {}
|
||||
func (UnimplementedMessageRouterServer) testEmbeddedByValue() {}
|
||||
|
||||
// UnsafeMessageRouterServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to MessageRouterServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeMessageRouterServer interface {
|
||||
mustEmbedUnimplementedMessageRouterServer()
|
||||
}
|
||||
|
||||
func RegisterMessageRouterServer(s grpc.ServiceRegistrar, srv MessageRouterServer) {
|
||||
// If the following call panics, it indicates UnimplementedMessageRouterServer was
|
||||
// embedded by pointer and is nil. This will cause panics if an
|
||||
// unimplemented method is ever invoked, so we test this at initialization
|
||||
// time to prevent it from happening at runtime later due to I/O.
|
||||
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
|
||||
t.testEmbeddedByValue()
|
||||
}
|
||||
s.RegisterService(&MessageRouter_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _MessageRouter_RouteMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RouteMessageRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).RouteMessage(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_RouteMessage_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).RouteMessage(ctx, req.(*RouteMessageRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_SubscribeMessages_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(SubscribeMessagesRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(MessageRouterServer).SubscribeMessages(m, &grpc.GenericServerStream[SubscribeMessagesRequest, MPCMessage]{ServerStream: stream})
|
||||
}
|
||||
|
||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||
type MessageRouter_SubscribeMessagesServer = grpc.ServerStreamingServer[MPCMessage]
|
||||
|
||||
func _MessageRouter_GetPendingMessages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetPendingMessagesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).GetPendingMessages(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_GetPendingMessages_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).GetPendingMessages(ctx, req.(*GetPendingMessagesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_AcknowledgeMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AcknowledgeMessageRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).AcknowledgeMessage(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_AcknowledgeMessage_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).AcknowledgeMessage(ctx, req.(*AcknowledgeMessageRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_GetMessageStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMessageStatusRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).GetMessageStatus(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_GetMessageStatus_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).GetMessageStatus(ctx, req.(*GetMessageStatusRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_RegisterParty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterPartyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).RegisterParty(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_RegisterParty_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).RegisterParty(ctx, req.(*RegisterPartyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_Heartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HeartbeatRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).Heartbeat(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_Heartbeat_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).Heartbeat(ctx, req.(*HeartbeatRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_SubscribeSessionEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(SubscribeSessionEventsRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(MessageRouterServer).SubscribeSessionEvents(m, &grpc.GenericServerStream[SubscribeSessionEventsRequest, SessionEvent]{ServerStream: stream})
|
||||
}
|
||||
|
||||
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
|
||||
type MessageRouter_SubscribeSessionEventsServer = grpc.ServerStreamingServer[SessionEvent]
|
||||
|
||||
func _MessageRouter_PublishSessionEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PublishSessionEventRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).PublishSessionEvent(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_PublishSessionEvent_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).PublishSessionEvent(ctx, req.(*PublishSessionEventRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MessageRouter_GetRegisteredParties_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetRegisteredPartiesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MessageRouterServer).GetRegisteredParties(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: MessageRouter_GetRegisteredParties_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MessageRouterServer).GetRegisteredParties(ctx, req.(*GetRegisteredPartiesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// MessageRouter_ServiceDesc is the grpc.ServiceDesc for MessageRouter service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var MessageRouter_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "mpc.router.v1.MessageRouter",
|
||||
HandlerType: (*MessageRouterServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "RouteMessage",
|
||||
Handler: _MessageRouter_RouteMessage_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetPendingMessages",
|
||||
Handler: _MessageRouter_GetPendingMessages_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AcknowledgeMessage",
|
||||
Handler: _MessageRouter_AcknowledgeMessage_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMessageStatus",
|
||||
Handler: _MessageRouter_GetMessageStatus_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RegisterParty",
|
||||
Handler: _MessageRouter_RegisterParty_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Heartbeat",
|
||||
Handler: _MessageRouter_Heartbeat_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "PublishSessionEvent",
|
||||
Handler: _MessageRouter_PublishSessionEvent_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetRegisteredParties",
|
||||
Handler: _MessageRouter_GetRegisteredParties_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "SubscribeMessages",
|
||||
Handler: _MessageRouter_SubscribeMessages_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "SubscribeSessionEvents",
|
||||
Handler: _MessageRouter_SubscribeSessionEvents_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "api/proto/message_router.proto",
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -21,9 +21,17 @@ message CreateSessionRequest {
|
|||
string session_type = 1; // "keygen" or "sign"
|
||||
int32 threshold_n = 2; // Total number of parties
|
||||
int32 threshold_t = 3; // Minimum required parties
|
||||
repeated ParticipantInfo participants = 4;
|
||||
repeated ParticipantInfo participants = 4; // Optional: if empty, coordinator selects automatically
|
||||
bytes message_hash = 5; // Required for sign sessions
|
||||
int64 expires_in_seconds = 6; // Session expiration time
|
||||
PartyComposition party_composition = 7; // Optional: party composition requirements for auto-selection
|
||||
}
|
||||
|
||||
// PartyComposition specifies requirements for automatic party selection
|
||||
message PartyComposition {
|
||||
int32 persistent_count = 1; // Number of persistent parties (store shares in DB)
|
||||
int32 delegate_count = 2; // Number of delegate parties (return shares to user)
|
||||
int32 temporary_count = 3; // Number of temporary parties
|
||||
}
|
||||
|
||||
// ParticipantInfo contains information about a participant
|
||||
|
|
@ -45,6 +53,8 @@ message CreateSessionResponse {
|
|||
string session_id = 1;
|
||||
map<string, string> join_tokens = 2; // party_id -> join_token
|
||||
int64 expires_at = 3; // Unix timestamp milliseconds
|
||||
repeated string selected_parties = 4; // List of selected party IDs
|
||||
string delegate_party_id = 5; // The delegate party ID (if any)
|
||||
}
|
||||
|
||||
// JoinSessionRequest allows a participant to join a session
|
||||
|
|
|
|||
|
|
@ -0,0 +1,355 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.6.0
|
||||
// - protoc v6.33.1
|
||||
// source: api/proto/session_coordinator.proto
|
||||
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.64.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
|
||||
const (
|
||||
SessionCoordinator_CreateSession_FullMethodName = "/mpc.coordinator.v1.SessionCoordinator/CreateSession"
|
||||
SessionCoordinator_JoinSession_FullMethodName = "/mpc.coordinator.v1.SessionCoordinator/JoinSession"
|
||||
SessionCoordinator_GetSessionStatus_FullMethodName = "/mpc.coordinator.v1.SessionCoordinator/GetSessionStatus"
|
||||
SessionCoordinator_MarkPartyReady_FullMethodName = "/mpc.coordinator.v1.SessionCoordinator/MarkPartyReady"
|
||||
SessionCoordinator_StartSession_FullMethodName = "/mpc.coordinator.v1.SessionCoordinator/StartSession"
|
||||
SessionCoordinator_ReportCompletion_FullMethodName = "/mpc.coordinator.v1.SessionCoordinator/ReportCompletion"
|
||||
SessionCoordinator_CloseSession_FullMethodName = "/mpc.coordinator.v1.SessionCoordinator/CloseSession"
|
||||
)
|
||||
|
||||
// SessionCoordinatorClient is the client API for SessionCoordinator service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
//
|
||||
// SessionCoordinator service manages MPC sessions
|
||||
type SessionCoordinatorClient interface {
|
||||
// Session management
|
||||
CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error)
|
||||
JoinSession(ctx context.Context, in *JoinSessionRequest, opts ...grpc.CallOption) (*JoinSessionResponse, error)
|
||||
GetSessionStatus(ctx context.Context, in *GetSessionStatusRequest, opts ...grpc.CallOption) (*GetSessionStatusResponse, error)
|
||||
MarkPartyReady(ctx context.Context, in *MarkPartyReadyRequest, opts ...grpc.CallOption) (*MarkPartyReadyResponse, error)
|
||||
StartSession(ctx context.Context, in *StartSessionRequest, opts ...grpc.CallOption) (*StartSessionResponse, error)
|
||||
ReportCompletion(ctx context.Context, in *ReportCompletionRequest, opts ...grpc.CallOption) (*ReportCompletionResponse, error)
|
||||
CloseSession(ctx context.Context, in *CloseSessionRequest, opts ...grpc.CallOption) (*CloseSessionResponse, error)
|
||||
}
|
||||
|
||||
type sessionCoordinatorClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewSessionCoordinatorClient(cc grpc.ClientConnInterface) SessionCoordinatorClient {
|
||||
return &sessionCoordinatorClient{cc}
|
||||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*CreateSessionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CreateSessionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_CreateSession_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) JoinSession(ctx context.Context, in *JoinSessionRequest, opts ...grpc.CallOption) (*JoinSessionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(JoinSessionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_JoinSession_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) GetSessionStatus(ctx context.Context, in *GetSessionStatusRequest, opts ...grpc.CallOption) (*GetSessionStatusResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(GetSessionStatusResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_GetSessionStatus_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) MarkPartyReady(ctx context.Context, in *MarkPartyReadyRequest, opts ...grpc.CallOption) (*MarkPartyReadyResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(MarkPartyReadyResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_MarkPartyReady_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) StartSession(ctx context.Context, in *StartSessionRequest, opts ...grpc.CallOption) (*StartSessionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(StartSessionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_StartSession_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) ReportCompletion(ctx context.Context, in *ReportCompletionRequest, opts ...grpc.CallOption) (*ReportCompletionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ReportCompletionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_ReportCompletion_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *sessionCoordinatorClient) CloseSession(ctx context.Context, in *CloseSessionRequest, opts ...grpc.CallOption) (*CloseSessionResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CloseSessionResponse)
|
||||
err := c.cc.Invoke(ctx, SessionCoordinator_CloseSession_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SessionCoordinatorServer is the server API for SessionCoordinator service.
|
||||
// All implementations must embed UnimplementedSessionCoordinatorServer
|
||||
// for forward compatibility.
|
||||
//
|
||||
// SessionCoordinator service manages MPC sessions
|
||||
type SessionCoordinatorServer interface {
|
||||
// Session management
|
||||
CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error)
|
||||
JoinSession(context.Context, *JoinSessionRequest) (*JoinSessionResponse, error)
|
||||
GetSessionStatus(context.Context, *GetSessionStatusRequest) (*GetSessionStatusResponse, error)
|
||||
MarkPartyReady(context.Context, *MarkPartyReadyRequest) (*MarkPartyReadyResponse, error)
|
||||
StartSession(context.Context, *StartSessionRequest) (*StartSessionResponse, error)
|
||||
ReportCompletion(context.Context, *ReportCompletionRequest) (*ReportCompletionResponse, error)
|
||||
CloseSession(context.Context, *CloseSessionRequest) (*CloseSessionResponse, error)
|
||||
mustEmbedUnimplementedSessionCoordinatorServer()
|
||||
}
|
||||
|
||||
// UnimplementedSessionCoordinatorServer must be embedded to have
|
||||
// forward compatible implementations.
|
||||
//
|
||||
// NOTE: this should be embedded by value instead of pointer to avoid a nil
|
||||
// pointer dereference when methods are called.
|
||||
type UnimplementedSessionCoordinatorServer struct{}
|
||||
|
||||
func (UnimplementedSessionCoordinatorServer) CreateSession(context.Context, *CreateSessionRequest) (*CreateSessionResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method CreateSession not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) JoinSession(context.Context, *JoinSessionRequest) (*JoinSessionResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method JoinSession not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) GetSessionStatus(context.Context, *GetSessionStatusRequest) (*GetSessionStatusResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method GetSessionStatus not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) MarkPartyReady(context.Context, *MarkPartyReadyRequest) (*MarkPartyReadyResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method MarkPartyReady not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) StartSession(context.Context, *StartSessionRequest) (*StartSessionResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method StartSession not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) ReportCompletion(context.Context, *ReportCompletionRequest) (*ReportCompletionResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method ReportCompletion not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) CloseSession(context.Context, *CloseSessionRequest) (*CloseSessionResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "method CloseSession not implemented")
|
||||
}
|
||||
func (UnimplementedSessionCoordinatorServer) mustEmbedUnimplementedSessionCoordinatorServer() {}
|
||||
func (UnimplementedSessionCoordinatorServer) testEmbeddedByValue() {}
|
||||
|
||||
// UnsafeSessionCoordinatorServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to SessionCoordinatorServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeSessionCoordinatorServer interface {
|
||||
mustEmbedUnimplementedSessionCoordinatorServer()
|
||||
}
|
||||
|
||||
func RegisterSessionCoordinatorServer(s grpc.ServiceRegistrar, srv SessionCoordinatorServer) {
|
||||
// If the following call panics, it indicates UnimplementedSessionCoordinatorServer was
|
||||
// embedded by pointer and is nil. This will cause panics if an
|
||||
// unimplemented method is ever invoked, so we test this at initialization
|
||||
// time to prevent it from happening at runtime later due to I/O.
|
||||
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
|
||||
t.testEmbeddedByValue()
|
||||
}
|
||||
s.RegisterService(&SessionCoordinator_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _SessionCoordinator_CreateSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateSessionRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SessionCoordinatorServer).CreateSession(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: SessionCoordinator_CreateSession_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SessionCoordinatorServer).CreateSession(ctx, req.(*CreateSessionRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SessionCoordinator_JoinSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(JoinSessionRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SessionCoordinatorServer).JoinSession(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: SessionCoordinator_JoinSession_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SessionCoordinatorServer).JoinSession(ctx, req.(*JoinSessionRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SessionCoordinator_GetSessionStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetSessionStatusRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SessionCoordinatorServer).GetSessionStatus(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: SessionCoordinator_GetSessionStatus_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SessionCoordinatorServer).GetSessionStatus(ctx, req.(*GetSessionStatusRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SessionCoordinator_MarkPartyReady_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(MarkPartyReadyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SessionCoordinatorServer).MarkPartyReady(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: SessionCoordinator_MarkPartyReady_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SessionCoordinatorServer).MarkPartyReady(ctx, req.(*MarkPartyReadyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SessionCoordinator_StartSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(StartSessionRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SessionCoordinatorServer).StartSession(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: SessionCoordinator_StartSession_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SessionCoordinatorServer).StartSession(ctx, req.(*StartSessionRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SessionCoordinator_ReportCompletion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ReportCompletionRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SessionCoordinatorServer).ReportCompletion(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: SessionCoordinator_ReportCompletion_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SessionCoordinatorServer).ReportCompletion(ctx, req.(*ReportCompletionRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _SessionCoordinator_CloseSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CloseSessionRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(SessionCoordinatorServer).CloseSession(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: SessionCoordinator_CloseSession_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(SessionCoordinatorServer).CloseSession(ctx, req.(*CloseSessionRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// SessionCoordinator_ServiceDesc is the grpc.ServiceDesc for SessionCoordinator service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var SessionCoordinator_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "mpc.coordinator.v1.SessionCoordinator",
|
||||
HandlerType: (*SessionCoordinatorServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "CreateSession",
|
||||
Handler: _SessionCoordinator_CreateSession_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "JoinSession",
|
||||
Handler: _SessionCoordinator_JoinSession_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetSessionStatus",
|
||||
Handler: _SessionCoordinator_GetSessionStatus_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "MarkPartyReady",
|
||||
Handler: _SessionCoordinator_MarkPartyReady_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "StartSession",
|
||||
Handler: _SessionCoordinator_StartSession_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ReportCompletion",
|
||||
Handler: _SessionCoordinator_ReportCompletion_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CloseSession",
|
||||
Handler: _SessionCoordinator_CloseSession_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "api/proto/session_coordinator.proto",
|
||||
}
|
||||
|
|
@ -39,48 +39,6 @@ services:
|
|||
networks:
|
||||
- mpc-network
|
||||
restart: unless-stopped
|
||||
# 生产环境不暴露端口到主机,仅内部网络可访问
|
||||
# ports:
|
||||
# - "5432:5432"
|
||||
|
||||
# Redis Cache
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: mpc-redis
|
||||
command: redis-server --appendonly yes --maxmemory 512mb --maxmemory-policy allkeys-lru ${REDIS_PASSWORD:+--requirepass $REDIS_PASSWORD}
|
||||
volumes:
|
||||
- redis-data:/data
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- mpc-network
|
||||
restart: unless-stopped
|
||||
|
||||
# RabbitMQ Message Broker
|
||||
rabbitmq:
|
||||
image: rabbitmq:3-management-alpine
|
||||
container_name: mpc-rabbitmq
|
||||
environment:
|
||||
RABBITMQ_DEFAULT_USER: ${RABBITMQ_USER:-mpc_user}
|
||||
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASSWORD:?RABBITMQ_PASSWORD must be set in .env}
|
||||
RABBITMQ_DEFAULT_VHOST: /
|
||||
volumes:
|
||||
- rabbitmq-data:/var/lib/rabbitmq
|
||||
healthcheck:
|
||||
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
networks:
|
||||
- mpc-network
|
||||
restart: unless-stopped
|
||||
# 生产环境管理界面仅开发时使用
|
||||
# ports:
|
||||
# - "15672:15672"
|
||||
|
||||
# ============================================
|
||||
# MPC Core Services
|
||||
|
|
@ -104,23 +62,12 @@ services:
|
|||
MPC_DATABASE_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD must be set}
|
||||
MPC_DATABASE_DBNAME: mpc_system
|
||||
MPC_DATABASE_SSLMODE: disable
|
||||
MPC_REDIS_HOST: redis
|
||||
MPC_REDIS_PORT: 6379
|
||||
MPC_REDIS_PASSWORD: ${REDIS_PASSWORD:-}
|
||||
MPC_RABBITMQ_HOST: rabbitmq
|
||||
MPC_RABBITMQ_PORT: 5672
|
||||
MPC_RABBITMQ_USER: ${RABBITMQ_USER:-mpc_user}
|
||||
MPC_RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD:?RABBITMQ_PASSWORD must be set}
|
||||
MPC_JWT_SECRET_KEY: ${JWT_SECRET_KEY}
|
||||
MPC_JWT_ISSUER: mpc-system
|
||||
MESSAGE_ROUTER_ADDR: message-router:50051
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
message-router:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
|
|
@ -151,15 +98,9 @@ services:
|
|||
MPC_DATABASE_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD must be set}
|
||||
MPC_DATABASE_DBNAME: mpc_system
|
||||
MPC_DATABASE_SSLMODE: disable
|
||||
MPC_RABBITMQ_HOST: rabbitmq
|
||||
MPC_RABBITMQ_PORT: 5672
|
||||
MPC_RABBITMQ_USER: ${RABBITMQ_USER:-mpc_user}
|
||||
MPC_RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD:?RABBITMQ_PASSWORD must be set}
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-sf", "http://localhost:8080/health"]
|
||||
interval: 30s
|
||||
|
|
@ -342,13 +283,6 @@ services:
|
|||
MPC_DATABASE_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD must be set}
|
||||
MPC_DATABASE_DBNAME: mpc_system
|
||||
MPC_DATABASE_SSLMODE: disable
|
||||
MPC_REDIS_HOST: redis
|
||||
MPC_REDIS_PORT: 6379
|
||||
MPC_REDIS_PASSWORD: ${REDIS_PASSWORD:-}
|
||||
MPC_RABBITMQ_HOST: rabbitmq
|
||||
MPC_RABBITMQ_PORT: 5672
|
||||
MPC_RABBITMQ_USER: ${RABBITMQ_USER:-mpc_user}
|
||||
MPC_RABBITMQ_PASSWORD: ${RABBITMQ_PASSWORD:?RABBITMQ_PASSWORD must be set}
|
||||
MPC_COORDINATOR_URL: session-coordinator:50051
|
||||
MPC_JWT_SECRET_KEY: ${JWT_SECRET_KEY}
|
||||
# API 认证密钥 (与 mpc-service 配置的 MPC_API_KEY 一致)
|
||||
|
|
@ -359,10 +293,6 @@ services:
|
|||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
session-coordinator:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
|
|
@ -388,7 +318,3 @@ networks:
|
|||
volumes:
|
||||
postgres-data:
|
||||
driver: local
|
||||
redis-data:
|
||||
driver: local
|
||||
rabbitmq-data:
|
||||
driver: local
|
||||
|
|
|
|||
|
|
@ -0,0 +1,249 @@
|
|||
package grpcutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
)
|
||||
|
||||
// ClientConfig holds configuration for gRPC client connections
|
||||
type ClientConfig struct {
|
||||
// Connection settings
|
||||
Address string
|
||||
ConnectTimeout time.Duration
|
||||
BlockingConnect bool
|
||||
|
||||
// Keepalive settings
|
||||
KeepaliveTime time.Duration // How often to send pings
|
||||
KeepaliveTimeout time.Duration // How long to wait for ping ack
|
||||
PermitWithoutStream bool // Allow pings even without active streams
|
||||
|
||||
// Reconnection settings
|
||||
EnableReconnect bool
|
||||
ReconnectBackoff time.Duration
|
||||
MaxReconnectBackoff time.Duration
|
||||
}
|
||||
|
||||
// DefaultClientConfig returns a sensible default configuration
|
||||
func DefaultClientConfig(address string) ClientConfig {
|
||||
return ClientConfig{
|
||||
Address: address,
|
||||
ConnectTimeout: 10 * time.Second,
|
||||
BlockingConnect: true,
|
||||
KeepaliveTime: 30 * time.Second,
|
||||
KeepaliveTimeout: 10 * time.Second,
|
||||
PermitWithoutStream: true,
|
||||
EnableReconnect: true,
|
||||
ReconnectBackoff: 1 * time.Second,
|
||||
MaxReconnectBackoff: 30 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// ResilientConn wraps a gRPC connection with automatic reconnection
|
||||
type ResilientConn struct {
|
||||
config ClientConfig
|
||||
conn *grpc.ClientConn
|
||||
mu sync.RWMutex
|
||||
closed bool
|
||||
closeChan chan struct{}
|
||||
}
|
||||
|
||||
// NewResilientConn creates a new resilient gRPC connection
|
||||
func NewResilientConn(config ClientConfig) (*ResilientConn, error) {
|
||||
rc := &ResilientConn{
|
||||
config: config,
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Initial connection
|
||||
conn, err := rc.dial()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rc.conn = conn
|
||||
|
||||
// Start connection monitor if reconnection is enabled
|
||||
if config.EnableReconnect {
|
||||
go rc.monitorConnection()
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// dial creates a new gRPC connection with keepalive settings
|
||||
func (rc *ResilientConn) dial() (*grpc.ClientConn, error) {
|
||||
// Build dial options
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||
Time: rc.config.KeepaliveTime,
|
||||
Timeout: rc.config.KeepaliveTimeout,
|
||||
PermitWithoutStream: rc.config.PermitWithoutStream,
|
||||
}),
|
||||
}
|
||||
|
||||
if rc.config.BlockingConnect {
|
||||
opts = append(opts, grpc.WithBlock())
|
||||
}
|
||||
|
||||
// Create context with timeout for connection
|
||||
ctx, cancel := context.WithTimeout(context.Background(), rc.config.ConnectTimeout)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(ctx, rc.config.Address, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Info("gRPC connection established",
|
||||
zap.String("address", rc.config.Address),
|
||||
zap.Duration("keepalive_time", rc.config.KeepaliveTime))
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// monitorConnection monitors the connection state and reconnects if needed
|
||||
func (rc *ResilientConn) monitorConnection() {
|
||||
backoff := rc.config.ReconnectBackoff
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-rc.closeChan:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
rc.mu.RLock()
|
||||
conn := rc.conn
|
||||
closed := rc.closed
|
||||
rc.mu.RUnlock()
|
||||
|
||||
if closed {
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for state change
|
||||
state := conn.GetState()
|
||||
if state == connectivity.TransientFailure || state == connectivity.Shutdown {
|
||||
logger.Warn("gRPC connection lost, attempting reconnection",
|
||||
zap.String("address", rc.config.Address),
|
||||
zap.String("state", state.String()))
|
||||
|
||||
// Attempt reconnection with backoff
|
||||
for {
|
||||
select {
|
||||
case <-rc.closeChan:
|
||||
return
|
||||
case <-time.After(backoff):
|
||||
}
|
||||
|
||||
newConn, err := rc.dial()
|
||||
if err != nil {
|
||||
logger.Error("Reconnection failed, retrying",
|
||||
zap.String("address", rc.config.Address),
|
||||
zap.Duration("backoff", backoff),
|
||||
zap.Error(err))
|
||||
|
||||
// Increase backoff
|
||||
backoff = backoff * 2
|
||||
if backoff > rc.config.MaxReconnectBackoff {
|
||||
backoff = rc.config.MaxReconnectBackoff
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Successfully reconnected
|
||||
rc.mu.Lock()
|
||||
oldConn := rc.conn
|
||||
rc.conn = newConn
|
||||
rc.mu.Unlock()
|
||||
|
||||
// Close old connection
|
||||
if oldConn != nil {
|
||||
oldConn.Close()
|
||||
}
|
||||
|
||||
logger.Info("gRPC connection restored",
|
||||
zap.String("address", rc.config.Address))
|
||||
|
||||
// Reset backoff
|
||||
backoff = rc.config.ReconnectBackoff
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for next state change or check periodically
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
conn.WaitForStateChange(ctx, state)
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// GetConn returns the current connection
|
||||
func (rc *ResilientConn) GetConn() *grpc.ClientConn {
|
||||
rc.mu.RLock()
|
||||
defer rc.mu.RUnlock()
|
||||
return rc.conn
|
||||
}
|
||||
|
||||
// IsConnected returns true if the connection is ready
|
||||
func (rc *ResilientConn) IsConnected() bool {
|
||||
rc.mu.RLock()
|
||||
defer rc.mu.RUnlock()
|
||||
if rc.conn == nil {
|
||||
return false
|
||||
}
|
||||
state := rc.conn.GetState()
|
||||
return state == connectivity.Ready || state == connectivity.Idle
|
||||
}
|
||||
|
||||
// WaitForReady waits for the connection to be ready
|
||||
func (rc *ResilientConn) WaitForReady(ctx context.Context) bool {
|
||||
for {
|
||||
rc.mu.RLock()
|
||||
conn := rc.conn
|
||||
rc.mu.RUnlock()
|
||||
|
||||
if conn == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
state := conn.GetState()
|
||||
if state == connectivity.Ready {
|
||||
return true
|
||||
}
|
||||
if state == connectivity.Shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
// Wait for state change
|
||||
if !conn.WaitForStateChange(ctx, state) {
|
||||
return false // Context cancelled
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the connection and stops the monitor
|
||||
func (rc *ResilientConn) Close() error {
|
||||
rc.mu.Lock()
|
||||
defer rc.mu.Unlock()
|
||||
|
||||
if rc.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
rc.closed = true
|
||||
close(rc.closeChan)
|
||||
|
||||
if rc.conn != nil {
|
||||
return rc.conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,186 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/rwadurian/mpc-system/pkg/jwt"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// AuthConfig holds configuration for authentication middleware
|
||||
type AuthConfig struct {
|
||||
JWTService *jwt.JWTService
|
||||
SkipPaths []string // Paths to skip authentication (e.g., /health, /auth/*)
|
||||
AllowAnonymous bool // If true, allow requests without token (user info will be nil)
|
||||
}
|
||||
|
||||
// ContextKey is a custom type for context keys to avoid collisions
|
||||
type ContextKey string
|
||||
|
||||
const (
|
||||
// UserContextKey is the key for storing user info in gin context
|
||||
UserContextKey ContextKey = "user"
|
||||
// ClaimsContextKey is the key for storing JWT claims in gin context
|
||||
ClaimsContextKey ContextKey = "claims"
|
||||
)
|
||||
|
||||
// UserInfo represents authenticated user information
|
||||
type UserInfo struct {
|
||||
UserID string
|
||||
Username string
|
||||
}
|
||||
|
||||
// BearerAuth creates a middleware that validates Bearer tokens
|
||||
// Extracts token from Authorization header: "Bearer <token>"
|
||||
func BearerAuth(config AuthConfig) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Check if path should be skipped
|
||||
path := c.Request.URL.Path
|
||||
for _, skipPath := range config.SkipPaths {
|
||||
if matchPath(skipPath, path) {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Extract token from Authorization header
|
||||
authHeader := c.GetHeader("Authorization")
|
||||
if authHeader == "" {
|
||||
if config.AllowAnonymous {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "unauthorized",
|
||||
"message": "missing authorization header",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Check Bearer prefix
|
||||
parts := strings.SplitN(authHeader, " ", 2)
|
||||
if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" {
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "unauthorized",
|
||||
"message": "invalid authorization header format, expected: Bearer <token>",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
token := parts[1]
|
||||
|
||||
// Validate access token
|
||||
claims, err := config.JWTService.ValidateAccessToken(token)
|
||||
if err != nil {
|
||||
logger.Debug("Token validation failed",
|
||||
zap.Error(err),
|
||||
zap.String("path", path))
|
||||
|
||||
statusCode := http.StatusUnauthorized
|
||||
message := "invalid token"
|
||||
|
||||
if err == jwt.ErrExpiredToken {
|
||||
message = "token expired"
|
||||
}
|
||||
|
||||
c.AbortWithStatusJSON(statusCode, gin.H{
|
||||
"error": "unauthorized",
|
||||
"message": message,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Store user info in context
|
||||
userInfo := &UserInfo{
|
||||
UserID: claims.Subject,
|
||||
Username: claims.Username,
|
||||
}
|
||||
c.Set(string(UserContextKey), userInfo)
|
||||
c.Set(string(ClaimsContextKey), claims)
|
||||
|
||||
logger.Debug("Request authenticated",
|
||||
zap.String("user_id", userInfo.UserID),
|
||||
zap.String("username", userInfo.Username),
|
||||
zap.String("path", path))
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// GetUser extracts UserInfo from gin context
|
||||
func GetUser(c *gin.Context) *UserInfo {
|
||||
if user, exists := c.Get(string(UserContextKey)); exists {
|
||||
if userInfo, ok := user.(*UserInfo); ok {
|
||||
return userInfo
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequireUser is a middleware that ensures user is authenticated
|
||||
// Use this after BearerAuth with AllowAnonymous=true
|
||||
func RequireUser() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
user := GetUser(c)
|
||||
if user == nil {
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "unauthorized",
|
||||
"message": "authentication required",
|
||||
})
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// RequireOwnership is a middleware that ensures the authenticated user
|
||||
// matches the resource owner (identified by a path parameter)
|
||||
func RequireOwnership(paramName string) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
user := GetUser(c)
|
||||
if user == nil {
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "unauthorized",
|
||||
"message": "authentication required",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
resourceOwner := c.Param(paramName)
|
||||
if resourceOwner != "" && resourceOwner != user.UserID && resourceOwner != user.Username {
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
|
||||
"error": "forbidden",
|
||||
"message": "access denied to this resource",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// matchPath checks if a pattern matches a path
|
||||
// Supports wildcard suffix: "/auth/*" matches "/auth/login", "/auth/refresh"
|
||||
func matchPath(pattern, path string) bool {
|
||||
// Exact match
|
||||
if pattern == path {
|
||||
return true
|
||||
}
|
||||
|
||||
// Wildcard match
|
||||
if strings.HasSuffix(pattern, "/*") {
|
||||
prefix := strings.TrimSuffix(pattern, "/*")
|
||||
return strings.HasPrefix(path, prefix+"/") || path == prefix
|
||||
}
|
||||
|
||||
// Prefix match (for backward compatibility)
|
||||
if strings.HasSuffix(pattern, "*") {
|
||||
prefix := strings.TrimSuffix(pattern, "*")
|
||||
return strings.HasPrefix(path, prefix)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
@ -0,0 +1,134 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// CORSConfig holds configuration for CORS middleware
|
||||
type CORSConfig struct {
|
||||
// AllowOrigins is a list of origins that are allowed to access the resource
|
||||
// Use "*" to allow all origins (not recommended for production)
|
||||
AllowOrigins []string
|
||||
// AllowMethods is a list of HTTP methods allowed for CORS requests
|
||||
AllowMethods []string
|
||||
// AllowHeaders is a list of headers that are allowed in CORS requests
|
||||
AllowHeaders []string
|
||||
// ExposeHeaders is a list of headers that the browser is allowed to access
|
||||
ExposeHeaders []string
|
||||
// AllowCredentials indicates whether credentials (cookies, auth headers) are allowed
|
||||
AllowCredentials bool
|
||||
// MaxAge is the maximum time (in seconds) that preflight results can be cached
|
||||
MaxAge int
|
||||
}
|
||||
|
||||
// DefaultCORSConfig returns a default CORS configuration
|
||||
func DefaultCORSConfig() CORSConfig {
|
||||
return CORSConfig{
|
||||
AllowOrigins: []string{},
|
||||
AllowMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"},
|
||||
AllowHeaders: []string{
|
||||
"Origin",
|
||||
"Content-Type",
|
||||
"Accept",
|
||||
"Authorization",
|
||||
"X-Requested-With",
|
||||
"X-Request-ID",
|
||||
},
|
||||
ExposeHeaders: []string{"Content-Length", "X-Request-ID"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 86400, // 24 hours
|
||||
}
|
||||
}
|
||||
|
||||
// CORS creates a middleware that handles Cross-Origin Resource Sharing
|
||||
func CORS(config CORSConfig) gin.HandlerFunc {
|
||||
// Precompute allowed origins map for fast lookup
|
||||
allowedOrigins := make(map[string]bool)
|
||||
allowAllOrigins := false
|
||||
for _, origin := range config.AllowOrigins {
|
||||
if origin == "*" {
|
||||
allowAllOrigins = true
|
||||
break
|
||||
}
|
||||
allowedOrigins[origin] = true
|
||||
}
|
||||
|
||||
// Precompute header values
|
||||
allowMethodsHeader := strings.Join(config.AllowMethods, ", ")
|
||||
allowHeadersHeader := strings.Join(config.AllowHeaders, ", ")
|
||||
exposeHeadersHeader := strings.Join(config.ExposeHeaders, ", ")
|
||||
maxAgeHeader := strconv.Itoa(config.MaxAge)
|
||||
|
||||
return func(c *gin.Context) {
|
||||
origin := c.GetHeader("Origin")
|
||||
|
||||
// If no origin header, this is not a CORS request
|
||||
if origin == "" {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// Check if origin is allowed
|
||||
var allowOrigin string
|
||||
if allowAllOrigins {
|
||||
allowOrigin = "*"
|
||||
} else if allowedOrigins[origin] {
|
||||
allowOrigin = origin
|
||||
} else {
|
||||
// Origin not allowed, but still process the request
|
||||
// The browser will block the response based on missing headers
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// Set CORS headers
|
||||
c.Header("Access-Control-Allow-Origin", allowOrigin)
|
||||
|
||||
if config.AllowCredentials && !allowAllOrigins {
|
||||
c.Header("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
|
||||
if exposeHeadersHeader != "" {
|
||||
c.Header("Access-Control-Expose-Headers", exposeHeadersHeader)
|
||||
}
|
||||
|
||||
// Handle preflight request
|
||||
if c.Request.Method == http.MethodOptions {
|
||||
c.Header("Access-Control-Allow-Methods", allowMethodsHeader)
|
||||
c.Header("Access-Control-Allow-Headers", allowHeadersHeader)
|
||||
c.Header("Access-Control-Max-Age", maxAgeHeader)
|
||||
c.AbortWithStatus(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// AllowAllCORS is a permissive CORS middleware (for development only)
|
||||
// WARNING: Do not use in production
|
||||
func AllowAllCORS() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
origin := c.GetHeader("Origin")
|
||||
if origin == "" {
|
||||
origin = "*"
|
||||
}
|
||||
|
||||
c.Header("Access-Control-Allow-Origin", origin)
|
||||
c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS")
|
||||
c.Header("Access-Control-Allow-Headers", "Origin, Content-Type, Accept, Authorization, X-Requested-With")
|
||||
c.Header("Access-Control-Allow-Credentials", "true")
|
||||
c.Header("Access-Control-Max-Age", "86400")
|
||||
|
||||
if c.Request.Method == http.MethodOptions {
|
||||
c.AbortWithStatus(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// SecurityConfig holds configuration for security headers middleware
|
||||
type SecurityConfig struct {
|
||||
// EnableHSTS enables HTTP Strict Transport Security header
|
||||
EnableHSTS bool
|
||||
// HSTSMaxAge is the max-age value for HSTS in seconds (default: 31536000 = 1 year)
|
||||
HSTSMaxAge int
|
||||
// EnableNoSniff enables X-Content-Type-Options: nosniff
|
||||
EnableNoSniff bool
|
||||
// EnableXSSFilter enables X-XSS-Protection header
|
||||
EnableXSSFilter bool
|
||||
// EnableFrameDeny enables X-Frame-Options: DENY
|
||||
EnableFrameDeny bool
|
||||
// ContentSecurityPolicy sets the Content-Security-Policy header
|
||||
ContentSecurityPolicy string
|
||||
// ReferrerPolicy sets the Referrer-Policy header
|
||||
ReferrerPolicy string
|
||||
}
|
||||
|
||||
// DefaultSecurityConfig returns a secure default configuration
|
||||
func DefaultSecurityConfig() SecurityConfig {
|
||||
return SecurityConfig{
|
||||
EnableHSTS: true,
|
||||
HSTSMaxAge: 31536000, // 1 year
|
||||
EnableNoSniff: true,
|
||||
EnableXSSFilter: true,
|
||||
EnableFrameDeny: true,
|
||||
ContentSecurityPolicy: "default-src 'self'",
|
||||
ReferrerPolicy: "strict-origin-when-cross-origin",
|
||||
}
|
||||
}
|
||||
|
||||
// SecurityHeaders creates a middleware that adds security headers to responses
|
||||
func SecurityHeaders(config SecurityConfig) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Prevent MIME type sniffing
|
||||
if config.EnableNoSniff {
|
||||
c.Header("X-Content-Type-Options", "nosniff")
|
||||
}
|
||||
|
||||
// XSS protection (legacy, but still useful for older browsers)
|
||||
if config.EnableXSSFilter {
|
||||
c.Header("X-XSS-Protection", "1; mode=block")
|
||||
}
|
||||
|
||||
// Prevent clickjacking
|
||||
if config.EnableFrameDeny {
|
||||
c.Header("X-Frame-Options", "DENY")
|
||||
}
|
||||
|
||||
// HTTP Strict Transport Security
|
||||
if config.EnableHSTS {
|
||||
hstsValue := "max-age=31536000; includeSubDomains"
|
||||
if config.HSTSMaxAge > 0 {
|
||||
hstsValue = "max-age=" + string(rune(config.HSTSMaxAge)) + "; includeSubDomains"
|
||||
}
|
||||
c.Header("Strict-Transport-Security", hstsValue)
|
||||
}
|
||||
|
||||
// Content Security Policy
|
||||
if config.ContentSecurityPolicy != "" {
|
||||
c.Header("Content-Security-Policy", config.ContentSecurityPolicy)
|
||||
}
|
||||
|
||||
// Referrer Policy
|
||||
if config.ReferrerPolicy != "" {
|
||||
c.Header("Referrer-Policy", config.ReferrerPolicy)
|
||||
}
|
||||
|
||||
// Permissions Policy (formerly Feature-Policy)
|
||||
c.Header("Permissions-Policy", "geolocation=(), microphone=(), camera=()")
|
||||
|
||||
// Cache control for API responses
|
||||
c.Header("Cache-Control", "no-store, no-cache, must-revalidate, proxy-revalidate")
|
||||
c.Header("Pragma", "no-cache")
|
||||
c.Header("Expires", "0")
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// SecureHeaders is a convenience function that applies default security headers
|
||||
func SecureHeaders() gin.HandlerFunc {
|
||||
return SecurityHeaders(DefaultSecurityConfig())
|
||||
}
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Config defines retry configuration
|
||||
type Config struct {
|
||||
MaxAttempts int // Maximum number of retry attempts (default: 3)
|
||||
InitialBackoff time.Duration // Initial backoff duration (default: 100ms)
|
||||
MaxBackoff time.Duration // Maximum backoff duration (default: 5s)
|
||||
BackoffMultiple float64 // Backoff multiplier (default: 2.0)
|
||||
}
|
||||
|
||||
// DefaultConfig returns default retry configuration
|
||||
func DefaultConfig() Config {
|
||||
return Config{
|
||||
MaxAttempts: 3,
|
||||
InitialBackoff: 100 * time.Millisecond,
|
||||
MaxBackoff: 5 * time.Second,
|
||||
BackoffMultiple: 2.0,
|
||||
}
|
||||
}
|
||||
|
||||
// Do executes a function with retry logic
|
||||
// Returns the result of the function or the last error after all retries are exhausted
|
||||
func Do[T any](ctx context.Context, cfg Config, operation string, fn func() (T, error)) (T, error) {
|
||||
var result T
|
||||
var lastErr error
|
||||
|
||||
backoff := cfg.InitialBackoff
|
||||
|
||||
for attempt := 1; attempt <= cfg.MaxAttempts; attempt++ {
|
||||
result, lastErr = fn()
|
||||
if lastErr == nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Check if error is retryable
|
||||
if !IsRetryable(lastErr) {
|
||||
logger.Warn("Non-retryable error, not retrying",
|
||||
zap.String("operation", operation),
|
||||
zap.Int("attempt", attempt),
|
||||
zap.Error(lastErr))
|
||||
return result, lastErr
|
||||
}
|
||||
|
||||
// Check if context is cancelled
|
||||
if ctx.Err() != nil {
|
||||
logger.Warn("Context cancelled, stopping retry",
|
||||
zap.String("operation", operation),
|
||||
zap.Int("attempt", attempt),
|
||||
zap.Error(ctx.Err()))
|
||||
return result, ctx.Err()
|
||||
}
|
||||
|
||||
// Don't wait after the last attempt
|
||||
if attempt < cfg.MaxAttempts {
|
||||
logger.Warn("Operation failed, retrying",
|
||||
zap.String("operation", operation),
|
||||
zap.Int("attempt", attempt),
|
||||
zap.Int("max_attempts", cfg.MaxAttempts),
|
||||
zap.Duration("backoff", backoff),
|
||||
zap.Error(lastErr))
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return result, ctx.Err()
|
||||
case <-time.After(backoff):
|
||||
}
|
||||
|
||||
// Calculate next backoff
|
||||
backoff = time.Duration(float64(backoff) * cfg.BackoffMultiple)
|
||||
if backoff > cfg.MaxBackoff {
|
||||
backoff = cfg.MaxBackoff
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Error("Operation failed after all retries",
|
||||
zap.String("operation", operation),
|
||||
zap.Int("attempts", cfg.MaxAttempts),
|
||||
zap.Error(lastErr))
|
||||
|
||||
return result, lastErr
|
||||
}
|
||||
|
||||
// DoVoid executes a function that returns only error with retry logic
|
||||
func DoVoid(ctx context.Context, cfg Config, operation string, fn func() error) error {
|
||||
_, err := Do(ctx, cfg, operation, func() (struct{}, error) {
|
||||
return struct{}{}, fn()
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// IsRetryable determines if an error is retryable
|
||||
func IsRetryable(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check gRPC status codes
|
||||
st, ok := status.FromError(err)
|
||||
if !ok {
|
||||
// Not a gRPC error, assume retryable for network errors
|
||||
return true
|
||||
}
|
||||
|
||||
switch st.Code() {
|
||||
case codes.Unavailable,
|
||||
codes.ResourceExhausted,
|
||||
codes.Aborted,
|
||||
codes.Internal,
|
||||
codes.Unknown,
|
||||
codes.DeadlineExceeded:
|
||||
return true
|
||||
case codes.InvalidArgument,
|
||||
codes.NotFound,
|
||||
codes.AlreadyExists,
|
||||
codes.PermissionDenied,
|
||||
codes.FailedPrecondition,
|
||||
codes.OutOfRange,
|
||||
codes.Unimplemented,
|
||||
codes.Canceled,
|
||||
codes.Unauthenticated:
|
||||
return false
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -74,6 +74,7 @@ func (h *AccountHTTPHandler) RegisterRoutes(router *gin.RouterGroup) {
|
|||
accounts := router.Group("/accounts")
|
||||
{
|
||||
accounts.POST("", h.CreateAccount)
|
||||
accounts.POST("/from-keygen", h.CreateAccountFromKeygen)
|
||||
accounts.GET("", h.ListAccounts)
|
||||
accounts.GET("/:id", h.GetAccount)
|
||||
accounts.PUT("/:id", h.UpdateAccount)
|
||||
|
|
@ -535,20 +536,15 @@ func (h *AccountHTTPHandler) CancelRecovery(c *gin.Context) {
|
|||
// ============================================
|
||||
|
||||
// CreateKeygenSessionRequest represents the request for creating a keygen session
|
||||
// Coordinator will automatically select parties from registered pool
|
||||
type CreateKeygenSessionRequest struct {
|
||||
ThresholdN int `json:"threshold_n" binding:"required,min=2"`
|
||||
ThresholdT int `json:"threshold_t" binding:"required,min=1"`
|
||||
Participants []ParticipantRequest `json:"participants" binding:"required,min=2"`
|
||||
}
|
||||
|
||||
// ParticipantRequest represents a participant in the request
|
||||
type ParticipantRequest struct {
|
||||
PartyID string `json:"party_id" binding:"required"`
|
||||
DeviceType string `json:"device_type"`
|
||||
DeviceID string `json:"device_id"`
|
||||
ThresholdN int `json:"threshold_n" binding:"required,min=2"` // Total number of parties (e.g., 3)
|
||||
ThresholdT int `json:"threshold_t" binding:"required,min=1"` // Threshold for signing (e.g., 2)
|
||||
RequireDelegate bool `json:"require_delegate"` // If true, one party will be delegate (returns share to user)
|
||||
}
|
||||
|
||||
// CreateKeygenSession handles creating a new keygen session
|
||||
// Parties are automatically selected by Coordinator from registered pool
|
||||
func (h *AccountHTTPHandler) CreateKeygenSession(c *gin.Context) {
|
||||
var req CreateKeygenSessionRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
|
|
@ -562,35 +558,33 @@ func (h *AccountHTTPHandler) CreateKeygenSession(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
if len(req.Participants) != req.ThresholdN {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "number of participants must equal threshold_n"})
|
||||
return
|
||||
}
|
||||
|
||||
// Convert participants to gRPC format
|
||||
participants := make([]grpc.ParticipantInfo, len(req.Participants))
|
||||
for i, p := range req.Participants {
|
||||
participants[i] = grpc.ParticipantInfo{
|
||||
PartyID: p.PartyID,
|
||||
DeviceType: p.DeviceType,
|
||||
DeviceID: p.DeviceID,
|
||||
}
|
||||
}
|
||||
|
||||
// Call session coordinator via gRPC
|
||||
// Call session coordinator via gRPC (no participants - coordinator selects automatically)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
logger.Info("Calling CreateKeygenSession via gRPC",
|
||||
logger.Info("Calling CreateKeygenSession via gRPC (auto party selection)",
|
||||
zap.Int("threshold_n", req.ThresholdN),
|
||||
zap.Int("threshold_t", req.ThresholdT),
|
||||
zap.Int("num_participants", len(participants)))
|
||||
zap.Bool("require_delegate", req.RequireDelegate))
|
||||
|
||||
resp, err := h.sessionCoordinatorClient.CreateKeygenSession(
|
||||
// Calculate party composition based on require_delegate
|
||||
var persistentCount, delegateCount int
|
||||
if req.RequireDelegate {
|
||||
// One delegate party, rest are persistent
|
||||
delegateCount = 1
|
||||
persistentCount = req.ThresholdN - 1
|
||||
} else {
|
||||
// All persistent parties
|
||||
persistentCount = req.ThresholdN
|
||||
delegateCount = 0
|
||||
}
|
||||
|
||||
resp, err := h.sessionCoordinatorClient.CreateKeygenSessionAuto(
|
||||
ctx,
|
||||
int32(req.ThresholdN),
|
||||
int32(req.ThresholdT),
|
||||
participants,
|
||||
int32(persistentCount),
|
||||
int32(delegateCount),
|
||||
600, // 10 minutes expiry
|
||||
)
|
||||
|
||||
|
|
@ -602,26 +596,30 @@ func (h *AccountHTTPHandler) CreateKeygenSession(c *gin.Context) {
|
|||
|
||||
logger.Info("gRPC CreateKeygenSession succeeded",
|
||||
zap.String("session_id", resp.SessionID),
|
||||
zap.Int("num_join_tokens", len(resp.JoinTokens)))
|
||||
zap.Int("num_parties", len(resp.SelectedParties)))
|
||||
|
||||
// Return response with selected parties info
|
||||
c.JSON(http.StatusCreated, gin.H{
|
||||
"session_id": resp.SessionID,
|
||||
"session_type": "keygen",
|
||||
"threshold_n": req.ThresholdN,
|
||||
"threshold_t": req.ThresholdT,
|
||||
"join_tokens": resp.JoinTokens,
|
||||
"status": "created",
|
||||
"session_id": resp.SessionID,
|
||||
"session_type": "keygen",
|
||||
"threshold_n": req.ThresholdN,
|
||||
"threshold_t": req.ThresholdT,
|
||||
"selected_parties": resp.SelectedParties,
|
||||
"delegate_party": resp.DelegateParty, // The party that will return share to user
|
||||
"status": "created",
|
||||
})
|
||||
}
|
||||
|
||||
// CreateSigningSessionRequest represents the request for creating a signing session
|
||||
// Coordinator will automatically select parties based on account's registered shares
|
||||
type CreateSigningSessionRequest struct {
|
||||
AccountID string `json:"account_id" binding:"required"`
|
||||
MessageHash string `json:"message_hash" binding:"required"`
|
||||
Participants []ParticipantRequest `json:"participants" binding:"required,min=2"`
|
||||
AccountID string `json:"account_id" binding:"required"` // Account to sign for
|
||||
MessageHash string `json:"message_hash" binding:"required"` // SHA-256 hash to sign (hex encoded)
|
||||
UserShare string `json:"user_share"` // Optional: user's encrypted share (hex) if delegate party is used
|
||||
}
|
||||
|
||||
// CreateSigningSession handles creating a new signing session
|
||||
// Parties are automatically selected based on the account's registered shares
|
||||
func (h *AccountHTTPHandler) CreateSigningSession(c *gin.Context) {
|
||||
var req CreateSigningSessionRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
|
|
@ -648,8 +646,8 @@ func (h *AccountHTTPHandler) CreateSigningSession(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
// Get account to verify it exists and get threshold info
|
||||
output, err := h.getAccountUC.Execute(c.Request.Context(), ports.GetAccountInput{
|
||||
// Get account to verify it exists and get share info
|
||||
accountOutput, err := h.getAccountUC.Execute(c.Request.Context(), ports.GetAccountInput{
|
||||
AccountID: &accountID,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -657,51 +655,59 @@ func (h *AccountHTTPHandler) CreateSigningSession(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
// Validate participant count against threshold
|
||||
if len(req.Participants) < output.Account.ThresholdT {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": "insufficient participants",
|
||||
"required": output.Account.ThresholdT,
|
||||
"provided": len(req.Participants),
|
||||
})
|
||||
return
|
||||
// Get the party IDs from account shares
|
||||
var partyIDs []string
|
||||
for _, share := range accountOutput.Shares {
|
||||
if share.IsActive {
|
||||
partyIDs = append(partyIDs, share.PartyID)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert participants to gRPC format
|
||||
participants := make([]grpc.ParticipantInfo, len(req.Participants))
|
||||
for i, p := range req.Participants {
|
||||
participants[i] = grpc.ParticipantInfo{
|
||||
PartyID: p.PartyID,
|
||||
DeviceType: p.DeviceType,
|
||||
DeviceID: p.DeviceID,
|
||||
}
|
||||
// Validate we have enough active shares
|
||||
if len(partyIDs) < accountOutput.Account.ThresholdT {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": "insufficient active shares for signing",
|
||||
"required": accountOutput.Account.ThresholdT,
|
||||
"active": len(partyIDs),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Call session coordinator via gRPC
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := h.sessionCoordinatorClient.CreateSigningSession(
|
||||
logger.Info("Calling CreateSigningSession via gRPC (auto party selection)",
|
||||
zap.String("account_id", req.AccountID),
|
||||
zap.Int("threshold_t", accountOutput.Account.ThresholdT),
|
||||
zap.Int("available_parties", len(partyIDs)))
|
||||
|
||||
resp, err := h.sessionCoordinatorClient.CreateSigningSessionAuto(
|
||||
ctx,
|
||||
int32(output.Account.ThresholdT),
|
||||
participants,
|
||||
int32(accountOutput.Account.ThresholdT),
|
||||
partyIDs,
|
||||
messageHash,
|
||||
600, // 10 minutes expiry
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
logger.Error("gRPC CreateSigningSession failed", zap.Error(err))
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("gRPC CreateSigningSession succeeded",
|
||||
zap.String("session_id", resp.SessionID),
|
||||
zap.Int("num_parties", len(resp.SelectedParties)))
|
||||
|
||||
c.JSON(http.StatusCreated, gin.H{
|
||||
"session_id": resp.SessionID,
|
||||
"session_type": "sign",
|
||||
"account_id": req.AccountID,
|
||||
"message_hash": req.MessageHash,
|
||||
"threshold_t": output.Account.ThresholdT,
|
||||
"join_tokens": resp.JoinTokens,
|
||||
"status": "created",
|
||||
"session_id": resp.SessionID,
|
||||
"session_type": "sign",
|
||||
"account_id": req.AccountID,
|
||||
"message_hash": req.MessageHash,
|
||||
"threshold_t": accountOutput.Account.ThresholdT,
|
||||
"selected_parties": resp.SelectedParties,
|
||||
"status": "created",
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -742,3 +748,112 @@ func (h *AccountHTTPHandler) GetSessionStatus(c *gin.Context) {
|
|||
|
||||
c.JSON(http.StatusOK, response)
|
||||
}
|
||||
|
||||
// ============================================
|
||||
// Account Creation from Keygen (Internal API)
|
||||
// ============================================
|
||||
|
||||
// CreateAccountFromKeygenRequest represents the request from Session Coordinator
|
||||
// after keygen completion
|
||||
type CreateAccountFromKeygenRequest struct {
|
||||
PublicKey string `json:"public_key" binding:"required"`
|
||||
KeygenSessionID string `json:"keygen_session_id" binding:"required"`
|
||||
ThresholdN int `json:"threshold_n" binding:"required,min=2"`
|
||||
ThresholdT int `json:"threshold_t" binding:"required,min=1"`
|
||||
Shares []ShareInfoFromKeygenInput `json:"shares" binding:"required,min=1"`
|
||||
}
|
||||
|
||||
// ShareInfoFromKeygenInput represents share info from keygen
|
||||
type ShareInfoFromKeygenInput struct {
|
||||
PartyID string `json:"party_id" binding:"required"`
|
||||
PartyIndex int `json:"party_index"`
|
||||
ShareType string `json:"share_type" binding:"required"` // "persistent" or "delegate"
|
||||
}
|
||||
|
||||
// CreateAccountFromKeygen handles account creation after keygen completion
|
||||
// This is called by Session Coordinator when all parties complete keygen
|
||||
func (h *AccountHTTPHandler) CreateAccountFromKeygen(c *gin.Context) {
|
||||
var req CreateAccountFromKeygenRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate threshold
|
||||
if req.ThresholdT > req.ThresholdN {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "threshold_t cannot be greater than threshold_n"})
|
||||
return
|
||||
}
|
||||
|
||||
// Decode public key
|
||||
publicKey, err := hex.DecodeString(req.PublicKey)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid public key format"})
|
||||
return
|
||||
}
|
||||
|
||||
// Parse keygen session ID
|
||||
keygenSessionID, err := uuid.Parse(req.KeygenSessionID)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid keygen_session_id format"})
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a unique username based on keygen session ID
|
||||
// In production, you might want a different naming scheme
|
||||
username := "wallet-" + req.KeygenSessionID[:8]
|
||||
|
||||
// Convert shares - map share type string to value_objects.ShareType
|
||||
shares := make([]ports.ShareInput, len(req.Shares))
|
||||
for i, s := range req.Shares {
|
||||
var shareType value_objects.ShareType
|
||||
switch s.ShareType {
|
||||
case "persistent", "server":
|
||||
shareType = value_objects.ShareTypeServer
|
||||
case "delegate", "user_device":
|
||||
shareType = value_objects.ShareTypeUserDevice
|
||||
default:
|
||||
shareType = value_objects.ShareTypeServer
|
||||
}
|
||||
shares[i] = ports.ShareInput{
|
||||
ShareType: shareType,
|
||||
PartyID: s.PartyID,
|
||||
PartyIndex: s.PartyIndex,
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Creating account from keygen",
|
||||
zap.String("keygen_session_id", req.KeygenSessionID),
|
||||
zap.String("username", username),
|
||||
zap.Int("threshold_n", req.ThresholdN),
|
||||
zap.Int("threshold_t", req.ThresholdT),
|
||||
zap.Int("num_shares", len(shares)))
|
||||
|
||||
// Create account
|
||||
output, err := h.createAccountUC.Execute(c.Request.Context(), ports.CreateAccountInput{
|
||||
Username: username,
|
||||
PublicKey: publicKey,
|
||||
KeygenSessionID: keygenSessionID,
|
||||
ThresholdN: req.ThresholdN,
|
||||
ThresholdT: req.ThresholdT,
|
||||
Shares: shares,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Failed to create account from keygen",
|
||||
zap.String("keygen_session_id", req.KeygenSessionID),
|
||||
zap.Error(err))
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("Account created from keygen successfully",
|
||||
zap.String("account_id", output.Account.ID.String()),
|
||||
zap.String("username", output.Account.Username),
|
||||
zap.String("keygen_session_id", req.KeygenSessionID))
|
||||
|
||||
c.JSON(http.StatusCreated, gin.H{
|
||||
"account_id": output.Account.ID.String(),
|
||||
"username": output.Account.Username,
|
||||
"public_key": hex.EncodeToString(output.Account.PublicKey),
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,39 +62,34 @@ func NewSessionCoordinatorClient(address string) (*SessionCoordinatorClient, err
|
|||
}, nil
|
||||
}
|
||||
|
||||
// CreateKeygenSession creates a new keygen session
|
||||
func (c *SessionCoordinatorClient) CreateKeygenSession(
|
||||
// CreateKeygenSessionAuto creates a new keygen session with automatic party selection
|
||||
// Coordinator will select parties from registered pool based on composition requirements
|
||||
func (c *SessionCoordinatorClient) CreateKeygenSessionAuto(
|
||||
ctx context.Context,
|
||||
thresholdN int32,
|
||||
thresholdT int32,
|
||||
participants []ParticipantInfo,
|
||||
persistentCount int32,
|
||||
delegateCount int32,
|
||||
expiresInSeconds int64,
|
||||
) (*CreateSessionResponse, error) {
|
||||
pbParticipants := make([]*coordinatorpb.ParticipantInfo, len(participants))
|
||||
for i, p := range participants {
|
||||
pbParticipants[i] = &coordinatorpb.ParticipantInfo{
|
||||
PartyId: p.PartyID,
|
||||
DeviceInfo: &coordinatorpb.DeviceInfo{
|
||||
DeviceType: p.DeviceType,
|
||||
DeviceId: p.DeviceID,
|
||||
Platform: p.Platform,
|
||||
AppVersion: p.AppVersion,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
) (*CreateSessionAutoResponse, error) {
|
||||
req := &coordinatorpb.CreateSessionRequest{
|
||||
SessionType: "keygen",
|
||||
ThresholdN: thresholdN,
|
||||
ThresholdT: thresholdT,
|
||||
Participants: pbParticipants,
|
||||
Participants: nil, // No participants - coordinator selects automatically
|
||||
ExpiresInSeconds: expiresInSeconds,
|
||||
PartyComposition: &coordinatorpb.PartyComposition{
|
||||
PersistentCount: persistentCount,
|
||||
DelegateCount: delegateCount,
|
||||
},
|
||||
}
|
||||
|
||||
logger.Info("Sending CreateSession gRPC request",
|
||||
logger.Info("Sending CreateSession gRPC request (auto party selection)",
|
||||
zap.String("session_type", "keygen"),
|
||||
zap.Int32("threshold_n", thresholdN),
|
||||
zap.Int32("threshold_t", thresholdT))
|
||||
zap.Int32("threshold_t", thresholdT),
|
||||
zap.Int32("persistent_count", persistentCount),
|
||||
zap.Int32("delegate_count", delegateCount))
|
||||
|
||||
resp, err := c.client.CreateSession(ctx, req)
|
||||
if err != nil {
|
||||
|
|
@ -102,56 +97,81 @@ func (c *SessionCoordinatorClient) CreateKeygenSession(
|
|||
return nil, fmt.Errorf("failed to create keygen session: %w", err)
|
||||
}
|
||||
|
||||
// Extract selected parties and delegate party from response
|
||||
var selectedParties []string
|
||||
var delegateParty string
|
||||
for partyID := range resp.JoinTokens {
|
||||
selectedParties = append(selectedParties, partyID)
|
||||
}
|
||||
if resp.DelegatePartyId != "" {
|
||||
delegateParty = resp.DelegatePartyId
|
||||
}
|
||||
|
||||
logger.Info("CreateSession gRPC call succeeded",
|
||||
zap.String("session_id", resp.SessionId),
|
||||
zap.Int("num_join_tokens", len(resp.JoinTokens)))
|
||||
zap.Int("num_parties", len(selectedParties)),
|
||||
zap.String("delegate_party", delegateParty))
|
||||
|
||||
return &CreateSessionResponse{
|
||||
SessionID: resp.SessionId,
|
||||
JoinTokens: resp.JoinTokens,
|
||||
ExpiresAt: resp.ExpiresAt,
|
||||
return &CreateSessionAutoResponse{
|
||||
SessionID: resp.SessionId,
|
||||
SelectedParties: selectedParties,
|
||||
DelegateParty: delegateParty,
|
||||
JoinTokens: resp.JoinTokens,
|
||||
ExpiresAt: resp.ExpiresAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateSigningSession creates a new signing session
|
||||
func (c *SessionCoordinatorClient) CreateSigningSession(
|
||||
// CreateSigningSessionAuto creates a new signing session with automatic party selection
|
||||
// Coordinator will select parties from the provided party IDs (from account shares)
|
||||
func (c *SessionCoordinatorClient) CreateSigningSessionAuto(
|
||||
ctx context.Context,
|
||||
thresholdT int32,
|
||||
participants []ParticipantInfo,
|
||||
partyIDs []string,
|
||||
messageHash []byte,
|
||||
expiresInSeconds int64,
|
||||
) (*CreateSessionResponse, error) {
|
||||
pbParticipants := make([]*coordinatorpb.ParticipantInfo, len(participants))
|
||||
for i, p := range participants {
|
||||
) (*CreateSessionAutoResponse, error) {
|
||||
// Convert party IDs to participant info (minimal info, coordinator will fill in details)
|
||||
pbParticipants := make([]*coordinatorpb.ParticipantInfo, len(partyIDs))
|
||||
for i, partyID := range partyIDs {
|
||||
pbParticipants[i] = &coordinatorpb.ParticipantInfo{
|
||||
PartyId: p.PartyID,
|
||||
DeviceInfo: &coordinatorpb.DeviceInfo{
|
||||
DeviceType: p.DeviceType,
|
||||
DeviceId: p.DeviceID,
|
||||
Platform: p.Platform,
|
||||
AppVersion: p.AppVersion,
|
||||
},
|
||||
PartyId: partyID,
|
||||
}
|
||||
}
|
||||
|
||||
req := &coordinatorpb.CreateSessionRequest{
|
||||
SessionType: "sign",
|
||||
ThresholdN: int32(len(participants)),
|
||||
ThresholdN: int32(len(partyIDs)),
|
||||
ThresholdT: thresholdT,
|
||||
Participants: pbParticipants,
|
||||
MessageHash: messageHash,
|
||||
ExpiresInSeconds: expiresInSeconds,
|
||||
}
|
||||
|
||||
logger.Info("Sending CreateSigningSession gRPC request",
|
||||
zap.Int32("threshold_t", thresholdT),
|
||||
zap.Int("num_parties", len(partyIDs)))
|
||||
|
||||
resp, err := c.client.CreateSession(ctx, req)
|
||||
if err != nil {
|
||||
logger.Error("CreateSigningSession gRPC call failed", zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to create signing session: %w", err)
|
||||
}
|
||||
|
||||
return &CreateSessionResponse{
|
||||
SessionID: resp.SessionId,
|
||||
JoinTokens: resp.JoinTokens,
|
||||
ExpiresAt: resp.ExpiresAt,
|
||||
// Extract selected parties from response
|
||||
var selectedParties []string
|
||||
for partyID := range resp.JoinTokens {
|
||||
selectedParties = append(selectedParties, partyID)
|
||||
}
|
||||
|
||||
logger.Info("CreateSigningSession gRPC call succeeded",
|
||||
zap.String("session_id", resp.SessionId),
|
||||
zap.Int("num_parties", len(selectedParties)))
|
||||
|
||||
return &CreateSessionAutoResponse{
|
||||
SessionID: resp.SessionId,
|
||||
SelectedParties: selectedParties,
|
||||
JoinTokens: resp.JoinTokens,
|
||||
ExpiresAt: resp.ExpiresAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -202,6 +222,15 @@ type CreateSessionResponse struct {
|
|||
ExpiresAt int64
|
||||
}
|
||||
|
||||
// CreateSessionAutoResponse contains the created session info with auto-selected parties
|
||||
type CreateSessionAutoResponse struct {
|
||||
SessionID string
|
||||
SelectedParties []string
|
||||
DelegateParty string
|
||||
JoinTokens map[string]string
|
||||
ExpiresAt int64
|
||||
}
|
||||
|
||||
// SessionStatusResponse contains session status information
|
||||
type SessionStatusResponse struct {
|
||||
Status string
|
||||
|
|
|
|||
|
|
@ -0,0 +1,135 @@
|
|||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rwadurian/mpc-system/services/account/application/ports"
|
||||
)
|
||||
|
||||
// cacheEntry holds a cached value with its expiration time
|
||||
type cacheEntry struct {
|
||||
value []byte
|
||||
expiresAt time.Time
|
||||
}
|
||||
|
||||
// CacheAdapter implements CacheService using in-memory storage
|
||||
type CacheAdapter struct {
|
||||
mu sync.RWMutex
|
||||
store map[string]cacheEntry
|
||||
}
|
||||
|
||||
// NewCacheAdapter creates a new in-memory CacheAdapter
|
||||
func NewCacheAdapter() ports.CacheService {
|
||||
adapter := &CacheAdapter{
|
||||
store: make(map[string]cacheEntry),
|
||||
}
|
||||
// Start cleanup goroutine
|
||||
go adapter.cleanupLoop()
|
||||
return adapter
|
||||
}
|
||||
|
||||
// cleanupLoop periodically removes expired entries
|
||||
func (c *CacheAdapter) cleanupLoop() {
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for range ticker.C {
|
||||
c.cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup removes expired entries
|
||||
func (c *CacheAdapter) cleanup() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
for key, entry := range c.store {
|
||||
if !entry.expiresAt.IsZero() && now.After(entry.expiresAt) {
|
||||
delete(c.store, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set sets a value in the cache
|
||||
func (c *CacheAdapter) Set(ctx context.Context, key string, value interface{}, ttlSeconds int) error {
|
||||
data, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
entry := cacheEntry{
|
||||
value: data,
|
||||
}
|
||||
if ttlSeconds > 0 {
|
||||
entry.expiresAt = time.Now().Add(time.Duration(ttlSeconds) * time.Second)
|
||||
}
|
||||
|
||||
c.store[key] = entry
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets a value from the cache
|
||||
func (c *CacheAdapter) Get(ctx context.Context, key string) (interface{}, error) {
|
||||
c.mu.RLock()
|
||||
entry, exists := c.store[key]
|
||||
c.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
if !entry.expiresAt.IsZero() && time.Now().After(entry.expiresAt) {
|
||||
c.mu.Lock()
|
||||
delete(c.store, key)
|
||||
c.mu.Unlock()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
if err := json.Unmarshal(entry.value, &value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Delete deletes a value from the cache
|
||||
func (c *CacheAdapter) Delete(ctx context.Context, key string) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
delete(c.store, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Exists checks if a key exists in the cache
|
||||
func (c *CacheAdapter) Exists(ctx context.Context, key string) (bool, error) {
|
||||
c.mu.RLock()
|
||||
entry, exists := c.store[key]
|
||||
c.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
if !entry.expiresAt.IsZero() && time.Now().After(entry.expiresAt) {
|
||||
c.mu.Lock()
|
||||
delete(c.store, key)
|
||||
c.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Ensure interface compliance
|
||||
var _ ports.CacheService = (*CacheAdapter)(nil)
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/services/account/application/ports"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// EventPublisherAdapter implements EventPublisher using in-memory logging
|
||||
// Events are logged but not distributed (suitable for single-instance deployment)
|
||||
type EventPublisherAdapter struct{}
|
||||
|
||||
// NewEventPublisherAdapter creates a new in-memory event publisher
|
||||
func NewEventPublisherAdapter() ports.EventPublisher {
|
||||
return &EventPublisherAdapter{}
|
||||
}
|
||||
|
||||
// Publish logs the event (no actual distribution in single-instance mode)
|
||||
func (p *EventPublisherAdapter) Publish(ctx context.Context, event ports.AccountEvent) error {
|
||||
logger.Info("Account event published",
|
||||
zap.String("type", string(event.Type)),
|
||||
zap.String("account_id", event.AccountID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close is a no-op for in-memory publisher
|
||||
func (p *EventPublisherAdapter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure interface compliance
|
||||
var _ ports.EventPublisher = (*EventPublisherAdapter)(nil)
|
||||
|
|
@ -8,23 +8,22 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
_ "github.com/lib/pq"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/config"
|
||||
"github.com/rwadurian/mpc-system/pkg/jwt"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/pkg/middleware"
|
||||
httphandler "github.com/rwadurian/mpc-system/services/account/adapters/input/http"
|
||||
grpcadapter "github.com/rwadurian/mpc-system/services/account/adapters/output/grpc"
|
||||
jwtadapter "github.com/rwadurian/mpc-system/services/account/adapters/output/jwt"
|
||||
"github.com/rwadurian/mpc-system/services/account/adapters/output/memory"
|
||||
"github.com/rwadurian/mpc-system/services/account/adapters/output/postgres"
|
||||
"github.com/rwadurian/mpc-system/services/account/adapters/output/rabbitmq"
|
||||
redisadapter "github.com/rwadurian/mpc-system/services/account/adapters/output/redis"
|
||||
"github.com/rwadurian/mpc-system/services/account/application/use_cases"
|
||||
"github.com/rwadurian/mpc-system/services/account/domain/services"
|
||||
"go.uber.org/zap"
|
||||
|
|
@ -63,19 +62,11 @@ func main() {
|
|||
}
|
||||
defer db.Close()
|
||||
|
||||
// Initialize Redis connection
|
||||
redisClient := initRedis(cfg.Redis)
|
||||
defer redisClient.Close()
|
||||
|
||||
// Initialize RabbitMQ connection
|
||||
rabbitConn, err := initRabbitMQ(cfg.RabbitMQ)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to connect to RabbitMQ", zap.Error(err))
|
||||
}
|
||||
defer rabbitConn.Close()
|
||||
|
||||
// Initialize gRPC client for session coordinator
|
||||
sessionCoordinatorAddr := "mpc-session-coordinator:50051"
|
||||
sessionCoordinatorAddr := os.Getenv("MPC_COORDINATOR_URL")
|
||||
if sessionCoordinatorAddr == "" {
|
||||
sessionCoordinatorAddr = "mpc-session-coordinator:50051"
|
||||
}
|
||||
sessionCoordinatorClient, err := grpcadapter.NewSessionCoordinatorClient(sessionCoordinatorAddr)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to connect to session coordinator", zap.Error(err))
|
||||
|
|
@ -87,14 +78,9 @@ func main() {
|
|||
shareRepo := postgres.NewAccountSharePostgresRepo(db)
|
||||
recoveryRepo := postgres.NewRecoverySessionPostgresRepo(db)
|
||||
|
||||
// Initialize adapters
|
||||
eventPublisher, err := rabbitmq.NewEventPublisherAdapter(rabbitConn)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to create event publisher", zap.Error(err))
|
||||
}
|
||||
defer eventPublisher.Close()
|
||||
|
||||
cacheAdapter := redisadapter.NewCacheAdapter(redisClient)
|
||||
// Initialize adapters (using in-memory implementations)
|
||||
eventPublisher := memory.NewEventPublisherAdapter()
|
||||
cacheAdapter := memory.NewCacheAdapter()
|
||||
|
||||
// Initialize JWT service
|
||||
jwtService := jwt.NewJWTService(
|
||||
|
|
@ -132,6 +118,7 @@ func main() {
|
|||
go func() {
|
||||
if err := startHTTPServer(
|
||||
cfg,
|
||||
jwtService,
|
||||
createAccountUC,
|
||||
getAccountUC,
|
||||
updateAccountUC,
|
||||
|
|
@ -229,140 +216,9 @@ func initDatabase(cfg config.DatabaseConfig) (*sql.DB, error) {
|
|||
return nil, fmt.Errorf("failed to connect to database after %d retries: %w", maxRetries, err)
|
||||
}
|
||||
|
||||
func initRedis(cfg config.RedisConfig) *redis.Client {
|
||||
const maxRetries = 10
|
||||
const retryDelay = 2 * time.Second
|
||||
|
||||
client := redis.NewClient(&redis.Options{
|
||||
Addr: cfg.Addr(),
|
||||
Password: cfg.Password,
|
||||
DB: cfg.DB,
|
||||
})
|
||||
|
||||
// Test connection with retry
|
||||
ctx := context.Background()
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
if err := client.Ping(ctx).Err(); err != nil {
|
||||
logger.Warn("Redis connection failed, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.Error(err))
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
logger.Info("Connected to Redis")
|
||||
return client
|
||||
}
|
||||
|
||||
logger.Warn("Redis connection failed after retries, continuing without cache")
|
||||
return client
|
||||
}
|
||||
|
||||
func initRabbitMQ(cfg config.RabbitMQConfig) (*amqp.Connection, error) {
|
||||
const maxRetries = 10
|
||||
const retryDelay = 2 * time.Second
|
||||
|
||||
var conn *amqp.Connection
|
||||
var err error
|
||||
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
// Attempt to dial RabbitMQ
|
||||
conn, err = amqp.Dial(cfg.URL())
|
||||
if err != nil {
|
||||
logger.Warn("Failed to dial RabbitMQ, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.String("url", maskPassword(cfg.URL())),
|
||||
zap.Error(err))
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify connection is actually usable by opening a channel
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
logger.Warn("RabbitMQ connection established but channel creation failed, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.Error(err))
|
||||
conn.Close()
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Test the channel with a simple operation (declare a test exchange)
|
||||
err = ch.ExchangeDeclare(
|
||||
"mpc.health.check", // name
|
||||
"fanout", // type
|
||||
false, // durable
|
||||
true, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
logger.Warn("RabbitMQ channel created but exchange declaration failed, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.Error(err))
|
||||
ch.Close()
|
||||
conn.Close()
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Clean up test exchange
|
||||
ch.ExchangeDelete("mpc.health.check", false, false)
|
||||
ch.Close()
|
||||
|
||||
// Setup connection close notification
|
||||
closeChan := make(chan *amqp.Error, 1)
|
||||
conn.NotifyClose(closeChan)
|
||||
go func() {
|
||||
err := <-closeChan
|
||||
if err != nil {
|
||||
logger.Error("RabbitMQ connection closed unexpectedly", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Connected to RabbitMQ and verified connectivity",
|
||||
zap.Int("attempt", i+1))
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to connect to RabbitMQ after %d retries: %w", maxRetries, err)
|
||||
}
|
||||
|
||||
// maskPassword masks the password in the RabbitMQ URL for logging
|
||||
func maskPassword(url string) string {
|
||||
// Simple masking: amqp://user:password@host:port -> amqp://user:****@host:port
|
||||
start := 0
|
||||
for i := 0; i < len(url); i++ {
|
||||
if url[i] == ':' && i > 0 && url[i-1] != '/' {
|
||||
start = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if start == 0 {
|
||||
return url
|
||||
}
|
||||
|
||||
end := start
|
||||
for i := start; i < len(url); i++ {
|
||||
if url[i] == '@' {
|
||||
end = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if end == start {
|
||||
return url
|
||||
}
|
||||
|
||||
return url[:start] + "****" + url[end:]
|
||||
}
|
||||
|
||||
func startHTTPServer(
|
||||
cfg *config.Config,
|
||||
jwtService *jwt.JWTService,
|
||||
createAccountUC *use_cases.CreateAccountUseCase,
|
||||
getAccountUC *use_cases.GetAccountUseCase,
|
||||
updateAccountUC *use_cases.UpdateAccountUseCase,
|
||||
|
|
@ -387,6 +243,29 @@ func startHTTPServer(
|
|||
router.Use(gin.Recovery())
|
||||
router.Use(gin.Logger())
|
||||
|
||||
// Apply security headers middleware
|
||||
router.Use(middleware.SecureHeaders())
|
||||
|
||||
// Apply CORS middleware
|
||||
// Parse allowed origins from environment or use defaults
|
||||
allowedOrigins := []string{}
|
||||
if origins := os.Getenv("CORS_ALLOWED_ORIGINS"); origins != "" {
|
||||
allowedOrigins = strings.Split(origins, ",")
|
||||
}
|
||||
if cfg.Server.Environment != "production" {
|
||||
// Allow all origins in development
|
||||
router.Use(middleware.AllowAllCORS())
|
||||
} else if len(allowedOrigins) > 0 {
|
||||
router.Use(middleware.CORS(middleware.CORSConfig{
|
||||
AllowOrigins: allowedOrigins,
|
||||
AllowMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"},
|
||||
AllowHeaders: []string{"Origin", "Content-Type", "Accept", "Authorization", "X-Requested-With"},
|
||||
ExposeHeaders: []string{"Content-Length", "X-Request-ID"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 86400,
|
||||
}))
|
||||
}
|
||||
|
||||
// Create HTTP handler with session coordinator client
|
||||
httpHandler := httphandler.NewAccountHTTPHandler(
|
||||
createAccountUC,
|
||||
|
|
@ -405,7 +284,7 @@ func startHTTPServer(
|
|||
sessionCoordinatorClient,
|
||||
)
|
||||
|
||||
// Health check
|
||||
// Health check (public)
|
||||
router.GET("/health", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"status": "healthy",
|
||||
|
|
@ -413,10 +292,26 @@ func startHTTPServer(
|
|||
})
|
||||
})
|
||||
|
||||
// Register API routes
|
||||
// Configure authentication middleware
|
||||
// Skip paths that don't require authentication
|
||||
authConfig := middleware.AuthConfig{
|
||||
JWTService: jwtService,
|
||||
SkipPaths: []string{
|
||||
"/health",
|
||||
"/api/v1/auth/*", // Auth endpoints (login, refresh, challenge)
|
||||
"/api/v1/accounts/from-keygen", // Internal API from coordinator
|
||||
},
|
||||
AllowAnonymous: false,
|
||||
}
|
||||
|
||||
// API routes with authentication
|
||||
api := router.Group("/api/v1")
|
||||
api.Use(middleware.BearerAuth(authConfig))
|
||||
httpHandler.RegisterRoutes(api)
|
||||
|
||||
logger.Info("Starting HTTP server", zap.Int("port", cfg.Server.HTTPPort))
|
||||
logger.Info("Starting HTTP server",
|
||||
zap.Int("port", cfg.Server.HTTPPort),
|
||||
zap.String("environment", cfg.Server.Environment),
|
||||
zap.Bool("cors_enabled", len(allowedOrigins) > 0 || cfg.Server.Environment != "production"))
|
||||
return router.Run(fmt.Sprintf(":%d", cfg.Server.HTTPPort))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,35 +2,44 @@ package grpc
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
pb "github.com/rwadurian/mpc-system/api/grpc/router/v1"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/adapters/output/rabbitmq"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/application/use_cases"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/domain"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/domain/entities"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/domain/repositories"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// MessageBroker defines the interface for message subscription
|
||||
type MessageBroker interface {
|
||||
SubscribeToPartyMessages(ctx context.Context, partyID string) (<-chan *entities.MessageDTO, error)
|
||||
SubscribeToSessionMessages(ctx context.Context, sessionID string, partyID string) (<-chan *entities.MessageDTO, error)
|
||||
}
|
||||
|
||||
// MessageRouterServer implements the gRPC MessageRouter service
|
||||
type MessageRouterServer struct {
|
||||
pb.UnimplementedMessageRouterServer
|
||||
routeMessageUC *use_cases.RouteMessageUseCase
|
||||
getPendingMessagesUC *use_cases.GetPendingMessagesUseCase
|
||||
messageBroker *rabbitmq.MessageBrokerAdapter
|
||||
messageBroker MessageBroker
|
||||
partyRegistry *domain.PartyRegistry
|
||||
eventBroadcaster *domain.SessionEventBroadcaster
|
||||
messageRepo repositories.MessageRepository
|
||||
}
|
||||
|
||||
// NewMessageRouterServer creates a new gRPC server
|
||||
func NewMessageRouterServer(
|
||||
routeMessageUC *use_cases.RouteMessageUseCase,
|
||||
getPendingMessagesUC *use_cases.GetPendingMessagesUseCase,
|
||||
messageBroker *rabbitmq.MessageBrokerAdapter,
|
||||
messageBroker MessageBroker,
|
||||
partyRegistry *domain.PartyRegistry,
|
||||
eventBroadcaster *domain.SessionEventBroadcaster,
|
||||
messageRepo repositories.MessageRepository,
|
||||
) *MessageRouterServer {
|
||||
return &MessageRouterServer{
|
||||
routeMessageUC: routeMessageUC,
|
||||
|
|
@ -38,6 +47,7 @@ func NewMessageRouterServer(
|
|||
messageBroker: messageBroker,
|
||||
partyRegistry: partyRegistry,
|
||||
eventBroadcaster: eventBroadcaster,
|
||||
messageRepo: messageRepo,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -152,13 +162,30 @@ func (s *MessageRouterServer) RegisterParty(
|
|||
return nil, status.Error(codes.InvalidArgument, "party_id is required")
|
||||
}
|
||||
|
||||
// Register party
|
||||
party := s.partyRegistry.Register(req.PartyId, req.PartyRole, req.Version)
|
||||
// Convert notification channel if provided
|
||||
var notification *domain.NotificationChannel
|
||||
if req.Notification != nil {
|
||||
notification = &domain.NotificationChannel{
|
||||
Email: req.Notification.Email,
|
||||
Phone: req.Notification.Phone,
|
||||
PushToken: req.Notification.PushToken,
|
||||
}
|
||||
}
|
||||
|
||||
// Register party with notification channel
|
||||
party := s.partyRegistry.RegisterWithNotification(req.PartyId, req.PartyRole, req.Version, notification)
|
||||
|
||||
mode := "real-time"
|
||||
if party.IsOfflineMode() {
|
||||
mode = "offline"
|
||||
}
|
||||
|
||||
logger.Info("Party registered",
|
||||
zap.String("party_id", req.PartyId),
|
||||
zap.String("role", req.PartyRole),
|
||||
zap.String("version", req.Version))
|
||||
zap.String("version", req.Version),
|
||||
zap.String("mode", mode),
|
||||
zap.Bool("has_notification", notification != nil && notification.HasAnyChannel()))
|
||||
|
||||
return &pb.RegisterPartyResponse{
|
||||
Success: true,
|
||||
|
|
@ -167,6 +194,92 @@ func (s *MessageRouterServer) RegisterParty(
|
|||
}, nil
|
||||
}
|
||||
|
||||
// Heartbeat handles party heartbeat to keep the connection alive
|
||||
func (s *MessageRouterServer) Heartbeat(
|
||||
ctx context.Context,
|
||||
req *pb.HeartbeatRequest,
|
||||
) (*pb.HeartbeatResponse, error) {
|
||||
if req.PartyId == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "party_id is required")
|
||||
}
|
||||
|
||||
// Update heartbeat
|
||||
if !s.partyRegistry.Heartbeat(req.PartyId) {
|
||||
return nil, status.Error(codes.NotFound, "party not registered")
|
||||
}
|
||||
|
||||
// Count pending messages for this party
|
||||
pendingMessages := int32(0)
|
||||
if s.messageRepo != nil {
|
||||
count, err := s.messageRepo.CountPendingByParty(ctx, req.PartyId)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to count pending messages",
|
||||
zap.String("party_id", req.PartyId),
|
||||
zap.Error(err))
|
||||
} else {
|
||||
pendingMessages = int32(count)
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.HeartbeatResponse{
|
||||
Success: true,
|
||||
ServerTimestamp: time.Now().UnixMilli(),
|
||||
PendingMessages: pendingMessages,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AcknowledgeMessage acknowledges receipt of a message
|
||||
func (s *MessageRouterServer) AcknowledgeMessage(
|
||||
ctx context.Context,
|
||||
req *pb.AcknowledgeMessageRequest,
|
||||
) (*pb.AcknowledgeMessageResponse, error) {
|
||||
if req.MessageId == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "message_id is required")
|
||||
}
|
||||
if req.PartyId == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "party_id is required")
|
||||
}
|
||||
|
||||
// TODO: Store acknowledgment in database for message tracking
|
||||
// For now, just log and return success
|
||||
logger.Debug("Message acknowledged",
|
||||
zap.String("message_id", req.MessageId),
|
||||
zap.String("party_id", req.PartyId),
|
||||
zap.String("session_id", req.SessionId),
|
||||
zap.Bool("success", req.Success))
|
||||
|
||||
if !req.Success {
|
||||
logger.Warn("Message processing failed",
|
||||
zap.String("message_id", req.MessageId),
|
||||
zap.String("party_id", req.PartyId),
|
||||
zap.String("error", req.ErrorMessage))
|
||||
}
|
||||
|
||||
return &pb.AcknowledgeMessageResponse{
|
||||
Success: true,
|
||||
Message: "Acknowledgment received",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetMessageStatus returns the delivery status of a message
|
||||
func (s *MessageRouterServer) GetMessageStatus(
|
||||
ctx context.Context,
|
||||
req *pb.GetMessageStatusRequest,
|
||||
) (*pb.GetMessageStatusResponse, error) {
|
||||
if req.MessageId == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "message_id is required")
|
||||
}
|
||||
|
||||
// TODO: Implement actual message status tracking in database
|
||||
// For now, return a placeholder response
|
||||
return &pb.GetMessageStatusResponse{
|
||||
MessageId: req.MessageId,
|
||||
SessionId: req.SessionId,
|
||||
Deliveries: []*pb.MessageDeliveryStatus{},
|
||||
AllAcknowledged: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubscribeSessionEvents subscribes to session lifecycle events (streaming)
|
||||
func (s *MessageRouterServer) SubscribeSessionEvents(
|
||||
req *pb.SubscribeSessionEventsRequest,
|
||||
|
|
@ -277,19 +390,34 @@ func (s *MessageRouterServer) GetRegisteredParties(
|
|||
// Convert to protobuf format
|
||||
protoParties := make([]*pb.RegisteredParty, 0, len(parties))
|
||||
for _, party := range parties {
|
||||
// For now, consider all registered parties as online
|
||||
// TODO: Track actual online status via heartbeats
|
||||
protoParties = append(protoParties, &pb.RegisteredParty{
|
||||
// Filter by online status if requested
|
||||
if req.OnlyOnline && !party.Online {
|
||||
continue
|
||||
}
|
||||
|
||||
protoParty := &pb.RegisteredParty{
|
||||
PartyId: party.PartyID,
|
||||
Role: party.Role,
|
||||
Online: true, // Assume online if registered
|
||||
Online: party.Online,
|
||||
RegisteredAt: party.RegisteredAt.UnixMilli(),
|
||||
LastSeenAt: party.LastSeen.UnixMilli(),
|
||||
})
|
||||
}
|
||||
|
||||
// Include notification channel if present
|
||||
if party.Notification != nil && party.Notification.HasAnyChannel() {
|
||||
protoParty.Notification = &pb.NotificationChannel{
|
||||
Email: party.Notification.Email,
|
||||
Phone: party.Notification.Phone,
|
||||
PushToken: party.Notification.PushToken,
|
||||
}
|
||||
}
|
||||
|
||||
protoParties = append(protoParties, protoParty)
|
||||
}
|
||||
|
||||
logger.Debug("GetRegisteredParties called",
|
||||
zap.String("role_filter", req.RoleFilter),
|
||||
zap.Bool("only_online", req.OnlyOnline),
|
||||
zap.Int("party_count", len(protoParties)))
|
||||
|
||||
return &pb.GetRegisteredPartiesResponse{
|
||||
|
|
|
|||
|
|
@ -0,0 +1,209 @@
|
|||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/application/use_cases"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/domain/entities"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// MessageBrokerAdapter implements MessageBroker using in-memory channels
|
||||
type MessageBrokerAdapter struct {
|
||||
mu sync.RWMutex
|
||||
// partyChannels maps partyID to their message channel
|
||||
partyChannels map[string]chan *entities.MessageDTO
|
||||
// sessionChannels maps sessionID:partyID to their broadcast channel
|
||||
sessionChannels map[string]chan *entities.MessageDTO
|
||||
}
|
||||
|
||||
// NewMessageBrokerAdapter creates a new in-memory message broker
|
||||
func NewMessageBrokerAdapter() *MessageBrokerAdapter {
|
||||
return &MessageBrokerAdapter{
|
||||
partyChannels: make(map[string]chan *entities.MessageDTO),
|
||||
sessionChannels: make(map[string]chan *entities.MessageDTO),
|
||||
}
|
||||
}
|
||||
|
||||
// PublishToParty publishes a message to a specific party
|
||||
func (a *MessageBrokerAdapter) PublishToParty(ctx context.Context, partyID string, message *entities.MessageDTO) error {
|
||||
a.mu.RLock()
|
||||
ch, exists := a.partyChannels[partyID]
|
||||
a.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
logger.Debug("party channel not found, message will be available via polling",
|
||||
zap.String("party_id", partyID))
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- message:
|
||||
logger.Debug("published message to party",
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("message_id", message.ID))
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Channel full, message will be available via polling
|
||||
logger.Warn("party channel full, message available via polling",
|
||||
zap.String("party_id", partyID))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublishToSession publishes a message to all parties in a session (except sender)
|
||||
func (a *MessageBrokerAdapter) PublishToSession(
|
||||
ctx context.Context,
|
||||
sessionID string,
|
||||
excludeParty string,
|
||||
message *entities.MessageDTO,
|
||||
) error {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
|
||||
prefix := sessionID + ":"
|
||||
for key, ch := range a.sessionChannels {
|
||||
if len(key) > len(prefix) && key[:len(prefix)] == prefix {
|
||||
partyID := key[len(prefix):]
|
||||
if partyID == excludeParty {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- message:
|
||||
logger.Debug("broadcast message to party",
|
||||
zap.String("session_id", sessionID),
|
||||
zap.String("party_id", partyID))
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Channel full
|
||||
logger.Warn("session channel full for party",
|
||||
zap.String("party_id", partyID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubscribeToPartyMessages subscribes to messages for a specific party
|
||||
func (a *MessageBrokerAdapter) SubscribeToPartyMessages(
|
||||
ctx context.Context,
|
||||
partyID string,
|
||||
) (<-chan *entities.MessageDTO, error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
// Create channel if not exists
|
||||
if _, exists := a.partyChannels[partyID]; !exists {
|
||||
a.partyChannels[partyID] = make(chan *entities.MessageDTO, 100)
|
||||
}
|
||||
|
||||
ch := a.partyChannels[partyID]
|
||||
|
||||
// Return a read-only channel
|
||||
out := make(chan *entities.MessageDTO, 100)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case msg, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case out <- msg:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SubscribeToSessionMessages subscribes to all messages in a session
|
||||
func (a *MessageBrokerAdapter) SubscribeToSessionMessages(
|
||||
ctx context.Context,
|
||||
sessionID string,
|
||||
partyID string,
|
||||
) (<-chan *entities.MessageDTO, error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
key := sessionID + ":" + partyID
|
||||
|
||||
// Create channel if not exists
|
||||
if _, exists := a.sessionChannels[key]; !exists {
|
||||
a.sessionChannels[key] = make(chan *entities.MessageDTO, 100)
|
||||
}
|
||||
|
||||
ch := a.sessionChannels[key]
|
||||
|
||||
// Return a read-only channel
|
||||
out := make(chan *entities.MessageDTO, 100)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Cleanup on context done
|
||||
a.mu.Lock()
|
||||
delete(a.sessionChannels, key)
|
||||
a.mu.Unlock()
|
||||
return
|
||||
case msg, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case out <- msg:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// UnsubscribeParty removes a party's channel
|
||||
func (a *MessageBrokerAdapter) UnsubscribeParty(partyID string) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
if ch, exists := a.partyChannels[partyID]; exists {
|
||||
close(ch)
|
||||
delete(a.partyChannels, partyID)
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes all channels
|
||||
func (a *MessageBrokerAdapter) Close() error {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
for _, ch := range a.partyChannels {
|
||||
close(ch)
|
||||
}
|
||||
for _, ch := range a.sessionChannels {
|
||||
close(ch)
|
||||
}
|
||||
|
||||
a.partyChannels = make(map[string]chan *entities.MessageDTO)
|
||||
a.sessionChannels = make(map[string]chan *entities.MessageDTO)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure interface compliance
|
||||
var _ use_cases.MessageBroker = (*MessageBrokerAdapter)(nil)
|
||||
|
|
@ -94,6 +94,22 @@ func (r *MessagePostgresRepo) GetPendingMessages(
|
|||
return r.scanMessages(rows)
|
||||
}
|
||||
|
||||
// CountPendingByParty counts all pending messages for a party across all sessions
|
||||
func (r *MessagePostgresRepo) CountPendingByParty(ctx context.Context, partyID string) (int64, error) {
|
||||
var count int64
|
||||
err := r.db.QueryRowContext(ctx, `
|
||||
SELECT COUNT(*)
|
||||
FROM mpc_messages
|
||||
WHERE delivered_at IS NULL
|
||||
AND from_party != $1
|
||||
AND (to_parties IS NULL OR cardinality(to_parties) = 0 OR $1 = ANY(to_parties))
|
||||
`, partyID).Scan(&count)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetMessagesByRound retrieves messages for a specific round
|
||||
func (r *MessagePostgresRepo) GetMessagesByRound(
|
||||
ctx context.Context,
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import (
|
|||
|
||||
"github.com/gin-gonic/gin"
|
||||
_ "github.com/lib/pq"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/reflection"
|
||||
|
||||
|
|
@ -22,8 +21,8 @@ import (
|
|||
"github.com/rwadurian/mpc-system/pkg/config"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
grpcadapter "github.com/rwadurian/mpc-system/services/message-router/adapters/input/grpc"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/adapters/output/memory"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/adapters/output/postgres"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/adapters/output/rabbitmq"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/application/use_cases"
|
||||
"github.com/rwadurian/mpc-system/services/message-router/domain"
|
||||
"go.uber.org/zap"
|
||||
|
|
@ -63,19 +62,11 @@ func main() {
|
|||
}
|
||||
defer db.Close()
|
||||
|
||||
// Initialize RabbitMQ connection
|
||||
rabbitConn, err := initRabbitMQ(cfg.RabbitMQ)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to connect to RabbitMQ", zap.Error(err))
|
||||
}
|
||||
defer rabbitConn.Close()
|
||||
|
||||
// Initialize repositories and adapters
|
||||
messageRepo := postgres.NewMessagePostgresRepo(db)
|
||||
messageBroker, err := rabbitmq.NewMessageBrokerAdapter(rabbitConn)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to create message broker", zap.Error(err))
|
||||
}
|
||||
|
||||
// Initialize in-memory message broker (replaces RabbitMQ)
|
||||
messageBroker := memory.NewMessageBrokerAdapter()
|
||||
defer messageBroker.Close()
|
||||
|
||||
// Initialize party registry and event broadcaster for party-driven architecture
|
||||
|
|
@ -89,6 +80,9 @@ func main() {
|
|||
// Start message cleanup background job
|
||||
go runMessageCleanup(messageRepo)
|
||||
|
||||
// Start stale party detection background job
|
||||
go runStalePartyDetection(partyRegistry)
|
||||
|
||||
// Create shutdown context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
|
@ -98,7 +92,7 @@ func main() {
|
|||
|
||||
// Start gRPC server
|
||||
go func() {
|
||||
if err := startGRPCServer(cfg, routeMessageUC, getPendingMessagesUC, messageBroker, partyRegistry, eventBroadcaster); err != nil {
|
||||
if err := startGRPCServer(cfg, routeMessageUC, getPendingMessagesUC, messageBroker, partyRegistry, eventBroadcaster, messageRepo); err != nil {
|
||||
errChan <- fmt.Errorf("gRPC server error: %w", err)
|
||||
}
|
||||
}()
|
||||
|
|
@ -187,116 +181,14 @@ func initDatabase(cfg config.DatabaseConfig) (*sql.DB, error) {
|
|||
return nil, fmt.Errorf("failed to connect to database after %d retries: %w", maxRetries, err)
|
||||
}
|
||||
|
||||
func initRabbitMQ(cfg config.RabbitMQConfig) (*amqp.Connection, error) {
|
||||
const maxRetries = 10
|
||||
const retryDelay = 2 * time.Second
|
||||
|
||||
var conn *amqp.Connection
|
||||
var err error
|
||||
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
// Attempt to dial RabbitMQ
|
||||
conn, err = amqp.Dial(cfg.URL())
|
||||
if err != nil {
|
||||
logger.Warn("Failed to dial RabbitMQ, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.String("url", maskPassword(cfg.URL())),
|
||||
zap.Error(err))
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify connection is actually usable by opening a channel
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
logger.Warn("RabbitMQ connection established but channel creation failed, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.Error(err))
|
||||
conn.Close()
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Test the channel with a simple operation (declare a test exchange)
|
||||
err = ch.ExchangeDeclare(
|
||||
"mpc.health.check", // name
|
||||
"fanout", // type
|
||||
false, // durable
|
||||
true, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
logger.Warn("RabbitMQ channel created but exchange declaration failed, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.Error(err))
|
||||
ch.Close()
|
||||
conn.Close()
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Clean up test exchange
|
||||
ch.ExchangeDelete("mpc.health.check", false, false)
|
||||
ch.Close()
|
||||
|
||||
// Setup connection close notification
|
||||
closeChan := make(chan *amqp.Error, 1)
|
||||
conn.NotifyClose(closeChan)
|
||||
go func() {
|
||||
err := <-closeChan
|
||||
if err != nil {
|
||||
logger.Error("RabbitMQ connection closed unexpectedly", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Connected to RabbitMQ and verified connectivity",
|
||||
zap.Int("attempt", i+1))
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to connect to RabbitMQ after %d retries: %w", maxRetries, err)
|
||||
}
|
||||
|
||||
// maskPassword masks the password in the RabbitMQ URL for logging
|
||||
func maskPassword(url string) string {
|
||||
// Simple masking: amqp://user:password@host:port -> amqp://user:****@host:port
|
||||
start := 0
|
||||
for i := 0; i < len(url); i++ {
|
||||
if url[i] == ':' && i > 0 && url[i-1] != '/' {
|
||||
start = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if start == 0 {
|
||||
return url
|
||||
}
|
||||
|
||||
end := start
|
||||
for i := start; i < len(url); i++ {
|
||||
if url[i] == '@' {
|
||||
end = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if end == start {
|
||||
return url
|
||||
}
|
||||
|
||||
return url[:start] + "****" + url[end:]
|
||||
}
|
||||
|
||||
func startGRPCServer(
|
||||
cfg *config.Config,
|
||||
routeMessageUC *use_cases.RouteMessageUseCase,
|
||||
getPendingMessagesUC *use_cases.GetPendingMessagesUseCase,
|
||||
messageBroker *rabbitmq.MessageBrokerAdapter,
|
||||
messageBroker *memory.MessageBrokerAdapter,
|
||||
partyRegistry *domain.PartyRegistry,
|
||||
eventBroadcaster *domain.SessionEventBroadcaster,
|
||||
messageRepo *postgres.MessagePostgresRepo,
|
||||
) error {
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", cfg.Server.GRPCPort))
|
||||
if err != nil {
|
||||
|
|
@ -312,6 +204,7 @@ func startGRPCServer(
|
|||
messageBroker,
|
||||
partyRegistry,
|
||||
eventBroadcaster,
|
||||
messageRepo,
|
||||
)
|
||||
pb.RegisterMessageRouterServer(grpcServer, messageRouterServer)
|
||||
|
||||
|
|
@ -422,3 +315,32 @@ func runMessageCleanup(messageRepo *postgres.MessagePostgresRepo) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runStalePartyDetection periodically checks for stale parties and marks them as offline
|
||||
// Parties that haven't sent a heartbeat within the timeout are considered offline
|
||||
func runStalePartyDetection(partyRegistry *domain.PartyRegistry) {
|
||||
// Check every 30 seconds for stale parties
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Parties are considered stale if no heartbeat for 2 minutes
|
||||
staleTimeout := 2 * time.Minute
|
||||
|
||||
logger.Info("Started stale party detection",
|
||||
zap.Duration("check_interval", 30*time.Second),
|
||||
zap.Duration("stale_timeout", staleTimeout))
|
||||
|
||||
for range ticker.C {
|
||||
staleParties := partyRegistry.MarkStalePartiesOffline(staleTimeout)
|
||||
|
||||
if len(staleParties) > 0 {
|
||||
for _, party := range staleParties {
|
||||
logger.Warn("Party marked as offline (no heartbeat)",
|
||||
zap.String("party_id", party.PartyID),
|
||||
zap.String("role", party.Role),
|
||||
zap.Time("last_seen", party.LastSeen),
|
||||
zap.Bool("has_notification", party.IsOfflineMode()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,18 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// NotificationChannel represents notification channels for offline parties
|
||||
type NotificationChannel struct {
|
||||
Email string
|
||||
Phone string
|
||||
PushToken string
|
||||
}
|
||||
|
||||
// HasAnyChannel returns true if any notification channel is configured
|
||||
func (nc *NotificationChannel) HasAnyChannel() bool {
|
||||
return nc != nil && (nc.Email != "" || nc.Phone != "" || nc.PushToken != "")
|
||||
}
|
||||
|
||||
// RegisteredParty represents a party registered with the router
|
||||
type RegisteredParty struct {
|
||||
PartyID string
|
||||
|
|
@ -12,6 +24,13 @@ type RegisteredParty struct {
|
|||
Version string
|
||||
RegisteredAt time.Time
|
||||
LastSeen time.Time
|
||||
Online bool // Whether the party is currently connected
|
||||
Notification *NotificationChannel // Optional notification channels for offline mode
|
||||
}
|
||||
|
||||
// IsOfflineMode returns true if the party operates in offline mode (has notification channels)
|
||||
func (p *RegisteredParty) IsOfflineMode() bool {
|
||||
return p.Notification != nil && p.Notification.HasAnyChannel()
|
||||
}
|
||||
|
||||
// PartyRegistry manages registered parties
|
||||
|
|
@ -29,6 +48,11 @@ func NewPartyRegistry() *PartyRegistry {
|
|||
|
||||
// Register registers a party
|
||||
func (r *PartyRegistry) Register(partyID, role, version string) *RegisteredParty {
|
||||
return r.RegisterWithNotification(partyID, role, version, nil)
|
||||
}
|
||||
|
||||
// RegisterWithNotification registers a party with optional notification channels
|
||||
func (r *PartyRegistry) RegisterWithNotification(partyID, role, version string, notification *NotificationChannel) *RegisteredParty {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
|
|
@ -39,6 +63,8 @@ func (r *PartyRegistry) Register(partyID, role, version string) *RegisteredParty
|
|||
Version: version,
|
||||
RegisteredAt: now,
|
||||
LastSeen: now,
|
||||
Online: true,
|
||||
Notification: notification,
|
||||
}
|
||||
|
||||
r.parties[partyID] = party
|
||||
|
|
@ -105,3 +131,87 @@ func (r *PartyRegistry) Count() int {
|
|||
|
||||
return len(r.parties)
|
||||
}
|
||||
|
||||
// SetOnline sets the online status of a party
|
||||
func (r *PartyRegistry) SetOnline(partyID string, online bool) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if party, exists := r.parties[partyID]; exists {
|
||||
party.Online = online
|
||||
if online {
|
||||
party.LastSeen = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IsOnline checks if a party is currently online
|
||||
func (r *PartyRegistry) IsOnline(partyID string) bool {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
if party, exists := r.parties[partyID]; exists {
|
||||
return party.Online
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetOnlineParties returns all online parties
|
||||
func (r *PartyRegistry) GetOnlineParties() []*RegisteredParty {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
parties := make([]*RegisteredParty, 0)
|
||||
for _, party := range r.parties {
|
||||
if party.Online {
|
||||
parties = append(parties, party)
|
||||
}
|
||||
}
|
||||
return parties
|
||||
}
|
||||
|
||||
// GetOfflineParties returns all parties that are offline (have notification channels but not connected)
|
||||
func (r *PartyRegistry) GetOfflineParties() []*RegisteredParty {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
parties := make([]*RegisteredParty, 0)
|
||||
for _, party := range r.parties {
|
||||
if !party.Online && party.IsOfflineMode() {
|
||||
parties = append(parties, party)
|
||||
}
|
||||
}
|
||||
return parties
|
||||
}
|
||||
|
||||
// MarkStalePartiesOffline marks parties as offline if they haven't sent a heartbeat within the timeout
|
||||
// Returns the list of parties that were marked offline
|
||||
func (r *PartyRegistry) MarkStalePartiesOffline(timeout time.Duration) []*RegisteredParty {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
staleParties := make([]*RegisteredParty, 0)
|
||||
|
||||
for _, party := range r.parties {
|
||||
if party.Online && now.Sub(party.LastSeen) > timeout {
|
||||
party.Online = false
|
||||
staleParties = append(staleParties, party)
|
||||
}
|
||||
}
|
||||
|
||||
return staleParties
|
||||
}
|
||||
|
||||
// Heartbeat updates the last seen timestamp and marks the party as online
|
||||
func (r *PartyRegistry) Heartbeat(partyID string) bool {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if party, exists := r.parties[partyID]; exists {
|
||||
party.LastSeen = time.Now()
|
||||
party.Online = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,9 @@ type MessageRepository interface {
|
|||
// GetPendingMessages retrieves pending messages for a party
|
||||
GetPendingMessages(ctx context.Context, sessionID uuid.UUID, partyID string, afterTime time.Time) ([]*entities.MPCMessage, error)
|
||||
|
||||
// CountPendingByParty counts all pending messages for a party across all sessions
|
||||
CountPendingByParty(ctx context.Context, partyID string) (int64, error)
|
||||
|
||||
// GetMessagesByRound retrieves messages for a specific round
|
||||
GetMessagesByRound(ctx context.Context, sessionID uuid.UUID, roundNumber int) ([]*entities.MPCMessage, error)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,12 +17,15 @@ import (
|
|||
"github.com/rwadurian/mpc-system/pkg/config"
|
||||
"github.com/rwadurian/mpc-system/pkg/crypto"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/pkg/tss"
|
||||
grpcclient "github.com/rwadurian/mpc-system/services/server-party/adapters/output/grpc"
|
||||
"github.com/rwadurian/mpc-system/services/server-party/application/use_cases"
|
||||
"github.com/rwadurian/mpc-system/services/server-party/infrastructure/cache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Global share cache for delegate parties
|
||||
var globalShareCache *cache.ShareCache
|
||||
|
||||
func main() {
|
||||
// Parse flags
|
||||
configPath := flag.String("config", "", "Path to config file")
|
||||
|
|
@ -45,10 +48,14 @@ func main() {
|
|||
}
|
||||
defer logger.Sync()
|
||||
|
||||
logger.Info("Starting Server Party API Service",
|
||||
logger.Info("Starting Server Party API Service (Delegate Mode)",
|
||||
zap.String("environment", cfg.Server.Environment),
|
||||
zap.Int("http_port", cfg.Server.HTTPPort))
|
||||
|
||||
// Initialize share cache for delegate parties (15 minute TTL)
|
||||
globalShareCache = cache.NewShareCache(15 * time.Minute)
|
||||
logger.Info("Share cache initialized", zap.Duration("ttl", 15*time.Minute))
|
||||
|
||||
// Initialize crypto service with master key from environment
|
||||
masterKeyHex := os.Getenv("MPC_CRYPTO_MASTER_KEY")
|
||||
if masterKeyHex == "" {
|
||||
|
|
@ -96,14 +103,16 @@ func main() {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Get party ID from environment (or generate one)
|
||||
// Get party ID from environment (or use default)
|
||||
partyID := os.Getenv("PARTY_ID")
|
||||
if partyID == "" {
|
||||
partyID = "server-party-api"
|
||||
}
|
||||
|
||||
// Force PARTY_ROLE to delegate for this service
|
||||
os.Setenv("PARTY_ROLE", "delegate")
|
||||
|
||||
// Register this party as a delegate party with Message Router
|
||||
// This allows Session Coordinator to discover this party for session creation
|
||||
logger.Info("Registering party with Message Router",
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("role", "delegate"))
|
||||
|
|
@ -116,10 +125,25 @@ func main() {
|
|||
zap.String("party_id", partyID),
|
||||
zap.String("role", "delegate"))
|
||||
|
||||
// Initialize use cases with nil keyShareRepo (delegate doesn't use DB)
|
||||
// The use cases check PARTY_ROLE env var to determine behavior
|
||||
participateKeygenUC := use_cases.NewParticipateKeygenUseCase(
|
||||
nil, // No database storage for delegate
|
||||
sessionClient,
|
||||
messageRouter,
|
||||
cryptoService,
|
||||
)
|
||||
participateSigningUC := use_cases.NewParticipateSigningUseCase(
|
||||
nil, // No database storage for delegate
|
||||
sessionClient,
|
||||
messageRouter,
|
||||
cryptoService,
|
||||
)
|
||||
|
||||
// Start HTTP server
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
if err := startHTTPServer(cfg, sessionClient, messageRouter, cryptoService, apiKey); err != nil {
|
||||
if err := startHTTPServer(cfg, participateKeygenUC, participateSigningUC, cryptoService, apiKey); err != nil {
|
||||
errChan <- fmt.Errorf("HTTP server error: %w", err)
|
||||
}
|
||||
}()
|
||||
|
|
@ -147,8 +171,8 @@ func main() {
|
|||
|
||||
func startHTTPServer(
|
||||
cfg *config.Config,
|
||||
sessionClient use_cases.SessionCoordinatorClient,
|
||||
messageRouter use_cases.MessageRouterClient,
|
||||
participateKeygenUC *use_cases.ParticipateKeygenUseCase,
|
||||
participateSigningUC *use_cases.ParticipateSigningUseCase,
|
||||
cryptoService *crypto.CryptoService,
|
||||
apiKey string,
|
||||
) error {
|
||||
|
|
@ -165,6 +189,7 @@ func startHTTPServer(
|
|||
c.JSON(http.StatusOK, gin.H{
|
||||
"status": "healthy",
|
||||
"service": "server-party-api",
|
||||
"role": "delegate",
|
||||
})
|
||||
})
|
||||
|
||||
|
|
@ -175,15 +200,12 @@ func startHTTPServer(
|
|||
}
|
||||
|
||||
{
|
||||
// Generate user share - synchronous endpoint that returns the share
|
||||
// This is the main endpoint for mpc-service to call
|
||||
api.POST("/keygen/generate-user-share", func(c *gin.Context) {
|
||||
// Keygen participation - same as server-party but returns share
|
||||
api.POST("/keygen/participate", func(c *gin.Context) {
|
||||
var req struct {
|
||||
SessionID string `json:"session_id" binding:"required"`
|
||||
PartyID string `json:"party_id" binding:"required"`
|
||||
JoinToken string `json:"join_token" binding:"required"`
|
||||
// Optional: encryption key for the share (provided by user)
|
||||
UserPublicKey string `json:"user_public_key"`
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
|
|
@ -197,26 +219,23 @@ func startHTTPServer(
|
|||
return
|
||||
}
|
||||
|
||||
logger.Info("Generating user share",
|
||||
logger.Info("Starting keygen participation (delegate)",
|
||||
zap.String("session_id", req.SessionID),
|
||||
zap.String("party_id", req.PartyID))
|
||||
|
||||
// Execute keygen synchronously and return the share
|
||||
// Execute keygen synchronously for delegate party
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
result, err := generateUserShare(
|
||||
ctx,
|
||||
sessionClient,
|
||||
messageRouter,
|
||||
cryptoService,
|
||||
sessionID,
|
||||
req.PartyID,
|
||||
req.JoinToken,
|
||||
req.UserPublicKey,
|
||||
)
|
||||
input := use_cases.ParticipateKeygenInput{
|
||||
SessionID: sessionID,
|
||||
PartyID: req.PartyID,
|
||||
JoinToken: req.JoinToken,
|
||||
}
|
||||
|
||||
output, err := participateKeygenUC.Execute(ctx, input)
|
||||
if err != nil {
|
||||
logger.Error("Failed to generate user share",
|
||||
logger.Error("Keygen participation failed",
|
||||
zap.String("session_id", req.SessionID),
|
||||
zap.String("party_id", req.PartyID),
|
||||
zap.Error(err))
|
||||
|
|
@ -229,28 +248,41 @@ func startHTTPServer(
|
|||
return
|
||||
}
|
||||
|
||||
logger.Info("User share generated successfully",
|
||||
logger.Info("Keygen participation completed (delegate)",
|
||||
zap.String("session_id", req.SessionID),
|
||||
zap.String("party_id", req.PartyID))
|
||||
zap.String("party_id", req.PartyID),
|
||||
zap.Bool("success", output.Success))
|
||||
|
||||
// For delegate party, ShareForUser contains the encrypted share
|
||||
if len(output.ShareForUser) == 0 {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": "share not generated for delegate party",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Store in cache for retrieval (optional, for async pattern)
|
||||
globalShareCache.Store(sessionID, req.PartyID, output.ShareForUser, output.PublicKey)
|
||||
|
||||
// Return share directly
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"session_id": req.SessionID,
|
||||
"party_id": req.PartyID,
|
||||
"party_index": result.PartyIndex,
|
||||
"share_data": result.ShareData,
|
||||
"public_key": result.PublicKey,
|
||||
"party_index": output.KeyShare.PartyIndex,
|
||||
"share_data": hex.EncodeToString(output.ShareForUser),
|
||||
"public_key": hex.EncodeToString(output.PublicKey),
|
||||
})
|
||||
})
|
||||
|
||||
// Sign with user share - synchronous endpoint
|
||||
api.POST("/sign/with-user-share", func(c *gin.Context) {
|
||||
// Signing with user-provided share
|
||||
api.POST("/sign/participate", func(c *gin.Context) {
|
||||
var req struct {
|
||||
SessionID string `json:"session_id" binding:"required"`
|
||||
PartyID string `json:"party_id" binding:"required"`
|
||||
JoinToken string `json:"join_token" binding:"required"`
|
||||
ShareData string `json:"share_data" binding:"required"`
|
||||
MessageHash string `json:"message_hash" binding:"required"`
|
||||
ShareData string `json:"share_data" binding:"required"` // User's encrypted share
|
||||
MessageHash string `json:"message_hash"`
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
|
|
@ -270,32 +302,34 @@ func startHTTPServer(
|
|||
return
|
||||
}
|
||||
|
||||
messageHash, err := hex.DecodeString(req.MessageHash)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid message_hash format (expected hex)"})
|
||||
return
|
||||
var messageHash []byte
|
||||
if req.MessageHash != "" {
|
||||
messageHash, err = hex.DecodeString(req.MessageHash)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid message_hash format (expected hex)"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Signing with user share",
|
||||
logger.Info("Starting signing participation (delegate)",
|
||||
zap.String("session_id", req.SessionID),
|
||||
zap.String("party_id", req.PartyID))
|
||||
|
||||
// Execute signing synchronously
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
result, err := signWithUserShare(
|
||||
ctx,
|
||||
sessionClient,
|
||||
messageRouter,
|
||||
cryptoService,
|
||||
sessionID,
|
||||
req.PartyID,
|
||||
req.JoinToken,
|
||||
shareData,
|
||||
messageHash,
|
||||
)
|
||||
input := use_cases.ParticipateSigningInput{
|
||||
SessionID: sessionID,
|
||||
PartyID: req.PartyID,
|
||||
JoinToken: req.JoinToken,
|
||||
MessageHash: messageHash,
|
||||
UserShareData: shareData, // Pass user's share
|
||||
}
|
||||
|
||||
output, err := participateSigningUC.Execute(ctx, input)
|
||||
if err != nil {
|
||||
logger.Error("Failed to sign with user share",
|
||||
logger.Error("Signing participation failed",
|
||||
zap.String("session_id", req.SessionID),
|
||||
zap.String("party_id", req.PartyID),
|
||||
zap.Error(err))
|
||||
|
|
@ -308,18 +342,62 @@ func startHTTPServer(
|
|||
return
|
||||
}
|
||||
|
||||
logger.Info("Signing completed successfully",
|
||||
logger.Info("Signing participation completed (delegate)",
|
||||
zap.String("session_id", req.SessionID),
|
||||
zap.String("party_id", req.PartyID))
|
||||
zap.String("party_id", req.PartyID),
|
||||
zap.Bool("success", output.Success))
|
||||
|
||||
// Return signature
|
||||
var rHex, sHex string
|
||||
if output.R != nil {
|
||||
rHex = hex.EncodeToString(output.R.Bytes())
|
||||
}
|
||||
if output.S != nil {
|
||||
sHex = hex.EncodeToString(output.S.Bytes())
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"session_id": req.SessionID,
|
||||
"party_id": req.PartyID,
|
||||
"signature": result.Signature,
|
||||
"r": result.R,
|
||||
"s": result.S,
|
||||
"v": result.V,
|
||||
"signature": hex.EncodeToString(output.Signature),
|
||||
"r": rHex,
|
||||
"s": sHex,
|
||||
})
|
||||
})
|
||||
|
||||
// Get user share from cache (for async keygen pattern)
|
||||
api.GET("/sessions/:session_id/user-share", func(c *gin.Context) {
|
||||
sessionIDStr := c.Param("session_id")
|
||||
|
||||
sessionID, err := uuid.Parse(sessionIDStr)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": "invalid session_id format",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve and delete share from cache (one-time retrieval)
|
||||
entry, exists := globalShareCache.GetAndDelete(sessionID)
|
||||
if !exists {
|
||||
c.JSON(http.StatusNotFound, gin.H{
|
||||
"error": "Share not found or already retrieved",
|
||||
"note": "Shares can only be retrieved once and expire after 15 minutes",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("User share retrieved successfully",
|
||||
zap.String("session_id", sessionIDStr),
|
||||
zap.String("party_id", entry.PartyID))
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"session_id": sessionIDStr,
|
||||
"party_id": entry.PartyID,
|
||||
"share": hex.EncodeToString(entry.Share),
|
||||
"public_key": hex.EncodeToString(entry.PublicKey),
|
||||
"note": "This share has been deleted from memory and cannot be retrieved again",
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
@ -342,373 +420,3 @@ func apiKeyAuth(expectedKey string) gin.HandlerFunc {
|
|||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// UserShareResult contains the result of user share generation
|
||||
type UserShareResult struct {
|
||||
PartyIndex int
|
||||
ShareData string // hex encoded
|
||||
PublicKey string // hex encoded
|
||||
}
|
||||
|
||||
// generateUserShare generates a share for the user without storing it
|
||||
func generateUserShare(
|
||||
ctx context.Context,
|
||||
sessionClient use_cases.SessionCoordinatorClient,
|
||||
messageRouter use_cases.MessageRouterClient,
|
||||
cryptoService *crypto.CryptoService,
|
||||
sessionID uuid.UUID,
|
||||
partyID string,
|
||||
joinToken string,
|
||||
userPublicKey string,
|
||||
) (*UserShareResult, error) {
|
||||
// 1. Join session via coordinator
|
||||
sessionInfo, err := sessionClient.JoinSession(ctx, sessionID, partyID, joinToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to join session: %w", err)
|
||||
}
|
||||
|
||||
if sessionInfo.SessionType != "keygen" {
|
||||
return nil, fmt.Errorf("invalid session type: expected keygen, got %s", sessionInfo.SessionType)
|
||||
}
|
||||
|
||||
// 2. Find self in participants and build party index map
|
||||
var selfIndex int
|
||||
partyIndexMap := make(map[string]int)
|
||||
for _, p := range sessionInfo.Participants {
|
||||
partyIndexMap[p.PartyID] = p.PartyIndex
|
||||
if p.PartyID == partyID {
|
||||
selfIndex = p.PartyIndex
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Subscribe to messages
|
||||
msgChan, err := messageRouter.SubscribeMessages(ctx, sessionID, partyID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to subscribe to messages: %w", err)
|
||||
}
|
||||
|
||||
// 4. Run TSS Keygen protocol
|
||||
saveData, publicKey, err := runKeygenProtocol(
|
||||
ctx,
|
||||
sessionID,
|
||||
partyID,
|
||||
selfIndex,
|
||||
sessionInfo.Participants,
|
||||
sessionInfo.ThresholdN,
|
||||
sessionInfo.ThresholdT,
|
||||
msgChan,
|
||||
partyIndexMap,
|
||||
messageRouter,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("keygen protocol failed: %w", err)
|
||||
}
|
||||
|
||||
// 5. Encrypt share (optionally with user's public key if provided)
|
||||
var encryptedShare []byte
|
||||
if userPublicKey != "" {
|
||||
// TODO: Encrypt with user's public key for end-to-end encryption
|
||||
encryptedShare, err = cryptoService.EncryptShare(saveData, partyID)
|
||||
} else {
|
||||
encryptedShare, err = cryptoService.EncryptShare(saveData, partyID)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt share: %w", err)
|
||||
}
|
||||
|
||||
// 6. Report completion to coordinator
|
||||
if err := sessionClient.ReportCompletion(ctx, sessionID, partyID, publicKey); err != nil {
|
||||
logger.Error("failed to report completion", zap.Error(err))
|
||||
// Don't fail - share is generated
|
||||
}
|
||||
|
||||
return &UserShareResult{
|
||||
PartyIndex: selfIndex,
|
||||
ShareData: hex.EncodeToString(encryptedShare),
|
||||
PublicKey: hex.EncodeToString(publicKey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SigningResult contains the result of signing
|
||||
type SigningResult struct {
|
||||
Signature string
|
||||
R string
|
||||
S string
|
||||
V int
|
||||
}
|
||||
|
||||
// signWithUserShare signs using the user's share
|
||||
func signWithUserShare(
|
||||
ctx context.Context,
|
||||
sessionClient use_cases.SessionCoordinatorClient,
|
||||
messageRouter use_cases.MessageRouterClient,
|
||||
cryptoService *crypto.CryptoService,
|
||||
sessionID uuid.UUID,
|
||||
partyID string,
|
||||
joinToken string,
|
||||
shareData []byte,
|
||||
messageHash []byte,
|
||||
) (*SigningResult, error) {
|
||||
// 1. Join session via coordinator
|
||||
sessionInfo, err := sessionClient.JoinSession(ctx, sessionID, partyID, joinToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to join session: %w", err)
|
||||
}
|
||||
|
||||
if sessionInfo.SessionType != "sign" {
|
||||
return nil, fmt.Errorf("invalid session type: expected sign, got %s", sessionInfo.SessionType)
|
||||
}
|
||||
|
||||
// 2. Decrypt share
|
||||
decryptedShare, err := cryptoService.DecryptShare(shareData, partyID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decrypt share: %w", err)
|
||||
}
|
||||
|
||||
// 3. Find self in participants
|
||||
var selfIndex int
|
||||
partyIndexMap := make(map[string]int)
|
||||
for _, p := range sessionInfo.Participants {
|
||||
partyIndexMap[p.PartyID] = p.PartyIndex
|
||||
if p.PartyID == partyID {
|
||||
selfIndex = p.PartyIndex
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Subscribe to messages
|
||||
msgChan, err := messageRouter.SubscribeMessages(ctx, sessionID, partyID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to subscribe to messages: %w", err)
|
||||
}
|
||||
|
||||
// 5. Run TSS Signing protocol
|
||||
signature, r, s, v, err := runSigningProtocol(
|
||||
ctx,
|
||||
sessionID,
|
||||
partyID,
|
||||
selfIndex,
|
||||
sessionInfo.Participants,
|
||||
sessionInfo.ThresholdN,
|
||||
sessionInfo.ThresholdT,
|
||||
msgChan,
|
||||
partyIndexMap,
|
||||
messageRouter,
|
||||
decryptedShare,
|
||||
messageHash,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("signing protocol failed: %w", err)
|
||||
}
|
||||
|
||||
// 6. Report completion to coordinator
|
||||
if err := sessionClient.ReportCompletion(ctx, sessionID, partyID, signature); err != nil {
|
||||
logger.Error("failed to report completion", zap.Error(err))
|
||||
}
|
||||
|
||||
return &SigningResult{
|
||||
Signature: hex.EncodeToString(signature),
|
||||
R: hex.EncodeToString(r),
|
||||
S: hex.EncodeToString(s),
|
||||
V: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// runKeygenProtocol runs the TSS keygen protocol
|
||||
func runKeygenProtocol(
|
||||
ctx context.Context,
|
||||
sessionID uuid.UUID,
|
||||
partyID string,
|
||||
selfIndex int,
|
||||
participants []use_cases.ParticipantInfo,
|
||||
n, t int,
|
||||
msgChan <-chan *use_cases.MPCMessage,
|
||||
partyIndexMap map[string]int,
|
||||
messageRouter use_cases.MessageRouterClient,
|
||||
) ([]byte, []byte, error) {
|
||||
logger.Info("Running keygen protocol",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID),
|
||||
zap.Int("self_index", selfIndex),
|
||||
zap.Int("n", n),
|
||||
zap.Int("t", t))
|
||||
|
||||
// Create message handler adapter
|
||||
msgHandler := &messageHandler{
|
||||
sessionID: sessionID,
|
||||
partyID: partyID,
|
||||
messageRouter: messageRouter,
|
||||
msgChan: make(chan *tss.ReceivedMessage, 100),
|
||||
partyIndexMap: partyIndexMap,
|
||||
}
|
||||
|
||||
// Start message conversion goroutine
|
||||
go msgHandler.convertMessages(ctx, msgChan)
|
||||
|
||||
// Create keygen config
|
||||
config := tss.KeygenConfig{
|
||||
Threshold: t,
|
||||
TotalParties: n,
|
||||
Timeout: 10 * time.Minute,
|
||||
}
|
||||
|
||||
// Create party list
|
||||
allParties := make([]tss.KeygenParty, len(participants))
|
||||
for i, p := range participants {
|
||||
allParties[i] = tss.KeygenParty{
|
||||
PartyID: p.PartyID,
|
||||
PartyIndex: p.PartyIndex,
|
||||
}
|
||||
}
|
||||
|
||||
selfParty := tss.KeygenParty{
|
||||
PartyID: partyID,
|
||||
PartyIndex: selfIndex,
|
||||
}
|
||||
|
||||
// Create keygen session
|
||||
session, err := tss.NewKeygenSession(config, selfParty, allParties, msgHandler)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Run keygen
|
||||
result, err := session.Start(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
logger.Info("Keygen completed successfully",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID))
|
||||
|
||||
return result.LocalPartySaveData, result.PublicKeyBytes, nil
|
||||
}
|
||||
|
||||
// runSigningProtocol runs the TSS signing protocol
|
||||
func runSigningProtocol(
|
||||
ctx context.Context,
|
||||
sessionID uuid.UUID,
|
||||
partyID string,
|
||||
selfIndex int,
|
||||
participants []use_cases.ParticipantInfo,
|
||||
n, t int,
|
||||
msgChan <-chan *use_cases.MPCMessage,
|
||||
partyIndexMap map[string]int,
|
||||
messageRouter use_cases.MessageRouterClient,
|
||||
shareData []byte,
|
||||
messageHash []byte,
|
||||
) ([]byte, []byte, []byte, int, error) {
|
||||
logger.Info("Running signing protocol",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID),
|
||||
zap.Int("self_index", selfIndex))
|
||||
|
||||
// Create message handler adapter
|
||||
msgHandler := &messageHandler{
|
||||
sessionID: sessionID,
|
||||
partyID: partyID,
|
||||
messageRouter: messageRouter,
|
||||
msgChan: make(chan *tss.ReceivedMessage, 100),
|
||||
partyIndexMap: partyIndexMap,
|
||||
}
|
||||
|
||||
// Start message conversion goroutine
|
||||
go msgHandler.convertMessages(ctx, msgChan)
|
||||
|
||||
// Create signing config
|
||||
config := tss.SigningConfig{
|
||||
Threshold: t,
|
||||
TotalSigners: n,
|
||||
Timeout: 5 * time.Minute,
|
||||
}
|
||||
|
||||
// Create party list
|
||||
allParties := make([]tss.SigningParty, len(participants))
|
||||
for i, p := range participants {
|
||||
allParties[i] = tss.SigningParty{
|
||||
PartyID: p.PartyID,
|
||||
PartyIndex: p.PartyIndex,
|
||||
}
|
||||
}
|
||||
|
||||
selfParty := tss.SigningParty{
|
||||
PartyID: partyID,
|
||||
PartyIndex: selfIndex,
|
||||
}
|
||||
|
||||
// Create signing session
|
||||
session, err := tss.NewSigningSession(config, selfParty, allParties, shareData, messageHash, msgHandler)
|
||||
if err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
// Run signing
|
||||
result, err := session.Start(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, 0, err
|
||||
}
|
||||
|
||||
logger.Info("Signing completed successfully",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID))
|
||||
|
||||
// Convert big.Int to []byte
|
||||
var rBytes, sBytes []byte
|
||||
if result.R != nil {
|
||||
rBytes = result.R.Bytes()
|
||||
}
|
||||
if result.S != nil {
|
||||
sBytes = result.S.Bytes()
|
||||
}
|
||||
|
||||
return result.Signature, rBytes, sBytes, result.RecoveryID, nil
|
||||
}
|
||||
|
||||
// messageHandler adapts MPCMessage channel to tss.MessageHandler
|
||||
type messageHandler struct {
|
||||
sessionID uuid.UUID
|
||||
partyID string
|
||||
messageRouter use_cases.MessageRouterClient
|
||||
msgChan chan *tss.ReceivedMessage
|
||||
partyIndexMap map[string]int
|
||||
}
|
||||
|
||||
func (h *messageHandler) SendMessage(ctx context.Context, isBroadcast bool, toParties []string, msgBytes []byte) error {
|
||||
return h.messageRouter.RouteMessage(ctx, h.sessionID, h.partyID, toParties, 0, msgBytes)
|
||||
}
|
||||
|
||||
func (h *messageHandler) ReceiveMessages() <-chan *tss.ReceivedMessage {
|
||||
return h.msgChan
|
||||
}
|
||||
|
||||
func (h *messageHandler) convertMessages(ctx context.Context, inChan <-chan *use_cases.MPCMessage) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(h.msgChan)
|
||||
return
|
||||
case msg, ok := <-inChan:
|
||||
if !ok {
|
||||
close(h.msgChan)
|
||||
return
|
||||
}
|
||||
|
||||
fromIndex, exists := h.partyIndexMap[msg.FromParty]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
tssMsg := &tss.ReceivedMessage{
|
||||
FromPartyIndex: fromIndex,
|
||||
IsBroadcast: msg.IsBroadcast,
|
||||
MsgBytes: msg.Payload,
|
||||
}
|
||||
|
||||
select {
|
||||
case h.msgChan <- tssMsg:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,49 +8,70 @@ import (
|
|||
|
||||
"github.com/google/uuid"
|
||||
router "github.com/rwadurian/mpc-system/api/grpc/router/v1"
|
||||
"github.com/rwadurian/mpc-system/pkg/grpcutil"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/pkg/retry"
|
||||
"github.com/rwadurian/mpc-system/services/server-party/application/use_cases"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// MessageRouterClient implements use_cases.MessageRouterClient
|
||||
type MessageRouterClient struct {
|
||||
conn *grpc.ClientConn
|
||||
address string
|
||||
mu sync.Mutex
|
||||
resilientConn *grpcutil.ResilientConn
|
||||
address string
|
||||
mu sync.Mutex
|
||||
retryCfg retry.Config
|
||||
}
|
||||
|
||||
// NewMessageRouterClient creates a new message router gRPC client
|
||||
// NewMessageRouterClient creates a new message router gRPC client with auto-reconnection
|
||||
func NewMessageRouterClient(address string) (*MessageRouterClient, error) {
|
||||
conn, err := grpc.Dial(
|
||||
address,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
grpc.WithTimeout(10*time.Second),
|
||||
)
|
||||
config := grpcutil.DefaultClientConfig(address)
|
||||
resilientConn, err := grpcutil.NewResilientConn(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Info("Connected to Message Router", zap.String("address", address))
|
||||
logger.Info("Connected to Message Router with keepalive",
|
||||
zap.String("address", address),
|
||||
zap.Duration("keepalive_time", config.KeepaliveTime),
|
||||
zap.Bool("auto_reconnect", config.EnableReconnect))
|
||||
|
||||
return &MessageRouterClient{
|
||||
conn: conn,
|
||||
address: address,
|
||||
resilientConn: resilientConn,
|
||||
address: address,
|
||||
retryCfg: retry.DefaultConfig(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getConn returns the current gRPC connection
|
||||
func (c *MessageRouterClient) getConn() *grpc.ClientConn {
|
||||
return c.resilientConn.GetConn()
|
||||
}
|
||||
|
||||
// Close closes the gRPC connection
|
||||
func (c *MessageRouterClient) Close() error {
|
||||
if c.conn != nil {
|
||||
return c.conn.Close()
|
||||
if c.resilientConn != nil {
|
||||
return c.resilientConn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsConnected returns true if the connection is ready
|
||||
func (c *MessageRouterClient) IsConnected() bool {
|
||||
return c.resilientConn != nil && c.resilientConn.IsConnected()
|
||||
}
|
||||
|
||||
// WaitForReady waits for the connection to be ready
|
||||
func (c *MessageRouterClient) WaitForReady(ctx context.Context) bool {
|
||||
if c.resilientConn == nil {
|
||||
return false
|
||||
}
|
||||
return c.resilientConn.WaitForReady(ctx)
|
||||
}
|
||||
|
||||
// RouteMessage sends an MPC protocol message to other parties
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
func (c *MessageRouterClient) RouteMessage(
|
||||
ctx context.Context,
|
||||
sessionID uuid.UUID,
|
||||
|
|
@ -68,29 +89,27 @@ func (c *MessageRouterClient) RouteMessage(
|
|||
Payload: payload,
|
||||
}
|
||||
|
||||
resp := &router.RouteMessageResponse{}
|
||||
err := c.conn.Invoke(ctx, "/mpc.router.v1.MessageRouter/RouteMessage", req, resp)
|
||||
if err != nil {
|
||||
logger.Error("Failed to route message",
|
||||
zap.Error(err),
|
||||
return retry.DoVoid(ctx, c.retryCfg, "RouteMessage", func() error {
|
||||
resp := &router.RouteMessageResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.router.v1.MessageRouter/RouteMessage", req, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !resp.Success {
|
||||
logger.Error("Message routing failed",
|
||||
zap.String("session_id", sessionID.String()))
|
||||
return use_cases.ErrKeygenFailed
|
||||
}
|
||||
|
||||
logger.Debug("Message routed successfully",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("from", fromParty))
|
||||
return err
|
||||
}
|
||||
zap.String("from", fromParty),
|
||||
zap.Int("to_count", len(toParties)),
|
||||
zap.Int("round", roundNumber))
|
||||
|
||||
if !resp.Success {
|
||||
logger.Error("Message routing failed",
|
||||
zap.String("session_id", sessionID.String()))
|
||||
return use_cases.ErrKeygenFailed
|
||||
}
|
||||
|
||||
logger.Debug("Message routed successfully",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("from", fromParty),
|
||||
zap.Int("to_count", len(toParties)),
|
||||
zap.Int("round", roundNumber))
|
||||
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// SubscribeMessages subscribes to MPC messages for a party
|
||||
|
|
@ -145,6 +164,8 @@ func (c *MessageRouterClient) SubscribeMessages(
|
|||
|
||||
// Convert to use_cases.MPCMessage
|
||||
mpcMsg := &use_cases.MPCMessage{
|
||||
MessageID: msg.MessageId,
|
||||
SessionID: msg.SessionId,
|
||||
FromParty: msg.FromParty,
|
||||
IsBroadcast: msg.IsBroadcast,
|
||||
RoundNumber: int(msg.RoundNumber),
|
||||
|
|
@ -154,8 +175,24 @@ func (c *MessageRouterClient) SubscribeMessages(
|
|||
select {
|
||||
case msgChan <- mpcMsg:
|
||||
logger.Debug("Received MPC message",
|
||||
zap.String("message_id", msg.MessageId),
|
||||
zap.String("from", msg.FromParty),
|
||||
zap.Int("round", int(msg.RoundNumber)))
|
||||
|
||||
// Send acknowledgment for the received message
|
||||
go func(messageID, sessionIDStr, pID string) {
|
||||
ackCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
sid, _ := uuid.Parse(sessionIDStr)
|
||||
if err := c.AcknowledgeMessage(ackCtx, messageID, sid, pID, true, ""); err != nil {
|
||||
logger.Warn("Failed to acknowledge message",
|
||||
zap.String("message_id", messageID),
|
||||
zap.Error(err))
|
||||
} else {
|
||||
logger.Debug("Message acknowledged",
|
||||
zap.String("message_id", messageID))
|
||||
}
|
||||
}(msg.MessageId, msg.SessionId, partyID)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
|
@ -180,7 +217,7 @@ func (c *MessageRouterClient) createSubscribeStream(
|
|||
ServerStreams: true,
|
||||
}
|
||||
|
||||
stream, err := c.conn.NewStream(ctx, streamDesc, "/mpc.router.v1.MessageRouter/SubscribeMessages")
|
||||
stream, err := c.getConn().NewStream(ctx, streamDesc, "/mpc.router.v1.MessageRouter/SubscribeMessages")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -197,6 +234,7 @@ func (c *MessageRouterClient) createSubscribeStream(
|
|||
}
|
||||
|
||||
// GetPendingMessages gets pending messages (polling alternative)
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
func (c *MessageRouterClient) GetPendingMessages(
|
||||
ctx context.Context,
|
||||
sessionID uuid.UUID,
|
||||
|
|
@ -209,31 +247,81 @@ func (c *MessageRouterClient) GetPendingMessages(
|
|||
AfterTimestamp: afterTimestamp,
|
||||
}
|
||||
|
||||
resp := &router.GetPendingMessagesResponse{}
|
||||
err := c.conn.Invoke(ctx, "/mpc.router.v1.MessageRouter/GetPendingMessages", req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
messages := make([]*use_cases.MPCMessage, len(resp.Messages))
|
||||
for i, msg := range resp.Messages {
|
||||
messages[i] = &use_cases.MPCMessage{
|
||||
FromParty: msg.FromParty,
|
||||
IsBroadcast: msg.IsBroadcast,
|
||||
RoundNumber: int(msg.RoundNumber),
|
||||
Payload: msg.Payload,
|
||||
return retry.Do(ctx, c.retryCfg, "GetPendingMessages", func() ([]*use_cases.MPCMessage, error) {
|
||||
resp := &router.GetPendingMessagesResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.router.v1.MessageRouter/GetPendingMessages", req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return messages, nil
|
||||
messages := make([]*use_cases.MPCMessage, len(resp.Messages))
|
||||
for i, msg := range resp.Messages {
|
||||
messages[i] = &use_cases.MPCMessage{
|
||||
MessageID: msg.MessageId,
|
||||
SessionID: msg.SessionId,
|
||||
FromParty: msg.FromParty,
|
||||
IsBroadcast: msg.IsBroadcast,
|
||||
RoundNumber: int(msg.RoundNumber),
|
||||
Payload: msg.Payload,
|
||||
}
|
||||
|
||||
// Send acknowledgment for each received message
|
||||
go func(messageID, sessionIDStr, pID string) {
|
||||
ackCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
sid, _ := uuid.Parse(sessionIDStr)
|
||||
if err := c.AcknowledgeMessage(ackCtx, messageID, sid, pID, true, ""); err != nil {
|
||||
logger.Warn("Failed to acknowledge pending message",
|
||||
zap.String("message_id", messageID),
|
||||
zap.Error(err))
|
||||
}
|
||||
}(msg.MessageId, msg.SessionId, partyID)
|
||||
}
|
||||
|
||||
return messages, nil
|
||||
})
|
||||
}
|
||||
|
||||
// NotificationConfig represents notification channel configuration for a party
|
||||
// If any notification channel is set, party operates in OFFLINE mode (24h async)
|
||||
// If no notification channels are set, party operates in REAL-TIME mode (Message Router push)
|
||||
type NotificationConfig struct {
|
||||
Email string // Optional: email address for session invitations
|
||||
Phone string // Optional: phone number for SMS notifications
|
||||
PushToken string // Optional: push notification token (FCM/APNs)
|
||||
}
|
||||
|
||||
// HasAnyChannel returns true if any notification channel is configured
|
||||
func (nc *NotificationConfig) HasAnyChannel() bool {
|
||||
return nc != nil && (nc.Email != "" || nc.Phone != "" || nc.PushToken != "")
|
||||
}
|
||||
|
||||
// RegisterParty registers this party with the message router
|
||||
// This should be called on startup and whenever party configuration changes
|
||||
func (c *MessageRouterClient) RegisterParty(
|
||||
ctx context.Context,
|
||||
partyID string,
|
||||
partyRole string,
|
||||
version string,
|
||||
) error {
|
||||
return c.RegisterPartyWithNotification(ctx, partyID, partyRole, version, nil)
|
||||
}
|
||||
|
||||
// RegisterPartyWithNotification registers party with optional notification channels
|
||||
// If notification channels are provided, party operates in offline mode (24h async)
|
||||
// If no notification channels, party operates in real-time mode (Message Router push)
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
//
|
||||
// This method should be called:
|
||||
// 1. On startup to register the party
|
||||
// 2. When notification channels are updated (e.g., user binds email/phone)
|
||||
// 3. When party configuration changes
|
||||
func (c *MessageRouterClient) RegisterPartyWithNotification(
|
||||
ctx context.Context,
|
||||
partyID string,
|
||||
partyRole string,
|
||||
version string,
|
||||
notification *NotificationConfig,
|
||||
) error {
|
||||
req := &router.RegisterPartyRequest{
|
||||
PartyId: partyID,
|
||||
|
|
@ -241,21 +329,58 @@ func (c *MessageRouterClient) RegisterParty(
|
|||
Version: version,
|
||||
}
|
||||
|
||||
resp := &router.RegisterPartyResponse{}
|
||||
err := c.conn.Invoke(ctx, "/mpc.router.v1.MessageRouter/RegisterParty", req, resp)
|
||||
if err != nil {
|
||||
logger.Error("Failed to register party",
|
||||
zap.Error(err),
|
||||
zap.String("party_id", partyID))
|
||||
return err
|
||||
// Set notification channel if configured (enables offline mode)
|
||||
if notification != nil && notification.HasAnyChannel() {
|
||||
req.Notification = &router.NotificationChannel{
|
||||
Email: notification.Email,
|
||||
Phone: notification.Phone,
|
||||
PushToken: notification.PushToken,
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Party registered successfully",
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("role", partyRole),
|
||||
zap.Bool("success", resp.Success))
|
||||
// Log registration mode
|
||||
mode := "real-time"
|
||||
if notification != nil && notification.HasAnyChannel() {
|
||||
mode = "offline"
|
||||
}
|
||||
|
||||
return nil
|
||||
return retry.DoVoid(ctx, c.retryCfg, "RegisterParty", func() error {
|
||||
resp := &router.RegisterPartyResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.router.v1.MessageRouter/RegisterParty", req, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Party registered successfully",
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("role", partyRole),
|
||||
zap.String("mode", mode),
|
||||
zap.Bool("has_email", notification != nil && notification.Email != ""),
|
||||
zap.Bool("has_phone", notification != nil && notification.Phone != ""),
|
||||
zap.Bool("has_push", notification != nil && notification.PushToken != ""),
|
||||
zap.Bool("success", resp.Success))
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateNotificationChannels re-registers party with new notification channels
|
||||
// Call this when party's notification configuration changes
|
||||
// TODO: Implement when notification management is needed
|
||||
func (c *MessageRouterClient) UpdateNotificationChannels(
|
||||
ctx context.Context,
|
||||
partyID string,
|
||||
partyRole string,
|
||||
version string,
|
||||
notification *NotificationConfig,
|
||||
) error {
|
||||
logger.Info("Updating party notification channels",
|
||||
zap.String("party_id", partyID),
|
||||
zap.Bool("has_email", notification != nil && notification.Email != ""),
|
||||
zap.Bool("has_phone", notification != nil && notification.Phone != ""),
|
||||
zap.Bool("has_push", notification != nil && notification.PushToken != ""))
|
||||
|
||||
return c.RegisterPartyWithNotification(ctx, partyID, partyRole, version, notification)
|
||||
}
|
||||
|
||||
// SubscribeSessionEvents subscribes to session lifecycle events
|
||||
|
|
@ -330,7 +455,7 @@ func (c *MessageRouterClient) createSessionEventStream(
|
|||
ServerStreams: true,
|
||||
}
|
||||
|
||||
stream, err := c.conn.NewStream(ctx, streamDesc, "/mpc.router.v1.MessageRouter/SubscribeSessionEvents")
|
||||
stream, err := c.getConn().NewStream(ctx, streamDesc, "/mpc.router.v1.MessageRouter/SubscribeSessionEvents")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -345,3 +470,132 @@ func (c *MessageRouterClient) createSessionEventStream(
|
|||
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// AcknowledgeMessage acknowledges receipt and processing of a message
|
||||
// This should be called after successfully processing an MPC message
|
||||
// Includes automatic retry with exponential backoff
|
||||
func (c *MessageRouterClient) AcknowledgeMessage(
|
||||
ctx context.Context,
|
||||
messageID string,
|
||||
sessionID uuid.UUID,
|
||||
partyID string,
|
||||
success bool,
|
||||
errorMessage string,
|
||||
) error {
|
||||
req := &router.AcknowledgeMessageRequest{
|
||||
MessageId: messageID,
|
||||
PartyId: partyID,
|
||||
SessionId: sessionID.String(),
|
||||
Success: success,
|
||||
ErrorMessage: errorMessage,
|
||||
}
|
||||
|
||||
return retry.DoVoid(ctx, c.retryCfg, "AcknowledgeMessage", func() error {
|
||||
resp := &router.AcknowledgeMessageResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.router.v1.MessageRouter/AcknowledgeMessage", req, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !resp.Success {
|
||||
logger.Warn("Message acknowledgment failed",
|
||||
zap.String("message_id", messageID),
|
||||
zap.String("response", resp.Message))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetMessageStatus gets the delivery status of a message
|
||||
// Includes automatic retry with exponential backoff
|
||||
func (c *MessageRouterClient) GetMessageStatus(
|
||||
ctx context.Context,
|
||||
messageID string,
|
||||
sessionID uuid.UUID,
|
||||
) (*router.GetMessageStatusResponse, error) {
|
||||
req := &router.GetMessageStatusRequest{
|
||||
MessageId: messageID,
|
||||
SessionId: sessionID.String(),
|
||||
}
|
||||
|
||||
return retry.Do(ctx, c.retryCfg, "GetMessageStatus", func() (*router.GetMessageStatusResponse, error) {
|
||||
resp := &router.GetMessageStatusResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.router.v1.MessageRouter/GetMessageStatus", req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Heartbeat sends a heartbeat to keep the party connection alive
|
||||
// Returns the number of pending messages for this party
|
||||
// Includes automatic retry with exponential backoff
|
||||
func (c *MessageRouterClient) Heartbeat(
|
||||
ctx context.Context,
|
||||
partyID string,
|
||||
) (int32, error) {
|
||||
req := &router.HeartbeatRequest{
|
||||
PartyId: partyID,
|
||||
Timestamp: time.Now().UnixMilli(),
|
||||
}
|
||||
|
||||
return retry.Do(ctx, c.retryCfg, "Heartbeat", func() (int32, error) {
|
||||
resp := &router.HeartbeatResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.router.v1.MessageRouter/Heartbeat", req, resp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if resp.PendingMessages > 0 {
|
||||
logger.Debug("Heartbeat response",
|
||||
zap.String("party_id", partyID),
|
||||
zap.Int32("pending_messages", resp.PendingMessages))
|
||||
}
|
||||
|
||||
return resp.PendingMessages, nil
|
||||
})
|
||||
}
|
||||
|
||||
// StartHeartbeat starts a background goroutine that sends heartbeats periodically
|
||||
// Returns a cancel function to stop the heartbeat
|
||||
func (c *MessageRouterClient) StartHeartbeat(
|
||||
ctx context.Context,
|
||||
partyID string,
|
||||
interval time.Duration,
|
||||
onPendingMessages func(count int32),
|
||||
) context.CancelFunc {
|
||||
heartbeatCtx, cancel := context.WithCancel(ctx)
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-heartbeatCtx.Done():
|
||||
logger.Info("Heartbeat stopped", zap.String("party_id", partyID))
|
||||
return
|
||||
case <-ticker.C:
|
||||
pendingCount, err := c.Heartbeat(heartbeatCtx, partyID)
|
||||
if err != nil {
|
||||
logger.Warn("Heartbeat failed",
|
||||
zap.String("party_id", partyID),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
if onPendingMessages != nil && pendingCount > 0 {
|
||||
onPendingMessages(pendingCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Heartbeat started",
|
||||
zap.String("party_id", partyID),
|
||||
zap.Duration("interval", interval))
|
||||
|
||||
return cancel
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,52 +2,72 @@ package grpc
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
coordinator "github.com/rwadurian/mpc-system/api/grpc/coordinator/v1"
|
||||
"github.com/rwadurian/mpc-system/pkg/grpcutil"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/pkg/retry"
|
||||
"github.com/rwadurian/mpc-system/services/server-party/application/use_cases"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// SessionCoordinatorClient implements use_cases.SessionCoordinatorClient
|
||||
type SessionCoordinatorClient struct {
|
||||
conn *grpc.ClientConn
|
||||
address string
|
||||
resilientConn *grpcutil.ResilientConn
|
||||
address string
|
||||
retryCfg retry.Config
|
||||
}
|
||||
|
||||
// NewSessionCoordinatorClient creates a new session coordinator gRPC client
|
||||
// NewSessionCoordinatorClient creates a new session coordinator gRPC client with auto-reconnection
|
||||
func NewSessionCoordinatorClient(address string) (*SessionCoordinatorClient, error) {
|
||||
conn, err := grpc.Dial(
|
||||
address,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
grpc.WithTimeout(10*time.Second),
|
||||
)
|
||||
config := grpcutil.DefaultClientConfig(address)
|
||||
resilientConn, err := grpcutil.NewResilientConn(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Info("Connected to Session Coordinator", zap.String("address", address))
|
||||
logger.Info("Connected to Session Coordinator with keepalive",
|
||||
zap.String("address", address),
|
||||
zap.Duration("keepalive_time", config.KeepaliveTime),
|
||||
zap.Bool("auto_reconnect", config.EnableReconnect))
|
||||
|
||||
return &SessionCoordinatorClient{
|
||||
conn: conn,
|
||||
address: address,
|
||||
resilientConn: resilientConn,
|
||||
address: address,
|
||||
retryCfg: retry.DefaultConfig(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getConn returns the current gRPC connection
|
||||
func (c *SessionCoordinatorClient) getConn() *grpc.ClientConn {
|
||||
return c.resilientConn.GetConn()
|
||||
}
|
||||
|
||||
// Close closes the gRPC connection
|
||||
func (c *SessionCoordinatorClient) Close() error {
|
||||
if c.conn != nil {
|
||||
return c.conn.Close()
|
||||
if c.resilientConn != nil {
|
||||
return c.resilientConn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsConnected returns true if the connection is ready
|
||||
func (c *SessionCoordinatorClient) IsConnected() bool {
|
||||
return c.resilientConn != nil && c.resilientConn.IsConnected()
|
||||
}
|
||||
|
||||
// WaitForReady waits for the connection to be ready
|
||||
func (c *SessionCoordinatorClient) WaitForReady(ctx context.Context) bool {
|
||||
if c.resilientConn == nil {
|
||||
return false
|
||||
}
|
||||
return c.resilientConn.WaitForReady(ctx)
|
||||
}
|
||||
|
||||
// JoinSession joins an MPC session
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
func (c *SessionCoordinatorClient) JoinSession(
|
||||
ctx context.Context,
|
||||
sessionID uuid.UUID,
|
||||
|
|
@ -66,53 +86,55 @@ func (c *SessionCoordinatorClient) JoinSession(
|
|||
},
|
||||
}
|
||||
|
||||
// Make the gRPC call using the raw connection
|
||||
resp := &coordinator.JoinSessionResponse{}
|
||||
err := c.conn.Invoke(ctx, "/mpc.coordinator.v1.SessionCoordinator/JoinSession", req, resp)
|
||||
if err != nil {
|
||||
logger.Error("Failed to join session", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !resp.Success {
|
||||
logger.Error("Join session failed", zap.String("session_id", sessionID.String()))
|
||||
return nil, use_cases.ErrInvalidSession
|
||||
}
|
||||
|
||||
// Convert response to SessionInfo
|
||||
// Note: OtherParties should include ALL participants (including self) from coordinator
|
||||
participants := make([]use_cases.ParticipantInfo, len(resp.OtherParties))
|
||||
for i, p := range resp.OtherParties {
|
||||
// Debug: Log what we received from gRPC
|
||||
logger.Info("gRPC client - received party_index from protobuf response",
|
||||
zap.String("party_id", p.PartyId),
|
||||
zap.Int32("proto_party_index", p.PartyIndex),
|
||||
zap.Int("converted_party_index", int(p.PartyIndex)))
|
||||
|
||||
participants[i] = use_cases.ParticipantInfo{
|
||||
PartyID: p.PartyId,
|
||||
PartyIndex: int(p.PartyIndex),
|
||||
return retry.Do(ctx, c.retryCfg, "JoinSession", func() (*use_cases.SessionInfo, error) {
|
||||
// Make the gRPC call using the raw connection
|
||||
resp := &coordinator.JoinSessionResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.coordinator.v1.SessionCoordinator/JoinSession", req, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
sessionInfo := &use_cases.SessionInfo{
|
||||
SessionID: sessionID,
|
||||
SessionType: resp.SessionInfo.SessionType,
|
||||
ThresholdN: int(resp.SessionInfo.ThresholdN),
|
||||
ThresholdT: int(resp.SessionInfo.ThresholdT),
|
||||
MessageHash: resp.SessionInfo.MessageHash,
|
||||
Participants: participants,
|
||||
}
|
||||
if !resp.Success {
|
||||
logger.Error("Join session failed", zap.String("session_id", sessionID.String()))
|
||||
return nil, use_cases.ErrInvalidSession
|
||||
}
|
||||
|
||||
logger.Info("Joined session successfully",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("session_type", sessionInfo.SessionType))
|
||||
// Convert response to SessionInfo
|
||||
// Note: OtherParties should include ALL participants (including self) from coordinator
|
||||
participants := make([]use_cases.ParticipantInfo, len(resp.OtherParties))
|
||||
for i, p := range resp.OtherParties {
|
||||
// Debug: Log what we received from gRPC
|
||||
logger.Debug("gRPC client - received party_index from protobuf response",
|
||||
zap.String("party_id", p.PartyId),
|
||||
zap.Int32("proto_party_index", p.PartyIndex),
|
||||
zap.Int("converted_party_index", int(p.PartyIndex)))
|
||||
|
||||
return sessionInfo, nil
|
||||
participants[i] = use_cases.ParticipantInfo{
|
||||
PartyID: p.PartyId,
|
||||
PartyIndex: int(p.PartyIndex),
|
||||
}
|
||||
}
|
||||
|
||||
sessionInfo := &use_cases.SessionInfo{
|
||||
SessionID: sessionID,
|
||||
SessionType: resp.SessionInfo.SessionType,
|
||||
ThresholdN: int(resp.SessionInfo.ThresholdN),
|
||||
ThresholdT: int(resp.SessionInfo.ThresholdT),
|
||||
MessageHash: resp.SessionInfo.MessageHash,
|
||||
Participants: participants,
|
||||
}
|
||||
|
||||
logger.Info("Joined session successfully",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("session_type", sessionInfo.SessionType))
|
||||
|
||||
return sessionInfo, nil
|
||||
})
|
||||
}
|
||||
|
||||
// ReportCompletion reports that a party has completed the MPC protocol
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
func (c *SessionCoordinatorClient) ReportCompletion(
|
||||
ctx context.Context,
|
||||
sessionID uuid.UUID,
|
||||
|
|
@ -125,22 +147,24 @@ func (c *SessionCoordinatorClient) ReportCompletion(
|
|||
PublicKey: resultData, // For keygen, this is public key; for signing, this is signature
|
||||
}
|
||||
|
||||
resp := &coordinator.ReportCompletionResponse{}
|
||||
err := c.conn.Invoke(ctx, "/mpc.coordinator.v1.SessionCoordinator/ReportCompletion", req, resp)
|
||||
if err != nil {
|
||||
logger.Error("Failed to report completion", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
return retry.DoVoid(ctx, c.retryCfg, "ReportCompletion", func() error {
|
||||
resp := &coordinator.ReportCompletionResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.coordinator.v1.SessionCoordinator/ReportCompletion", req, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Reported completion",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID),
|
||||
zap.Bool("all_completed", resp.AllCompleted))
|
||||
logger.Info("Reported completion",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID),
|
||||
zap.Bool("all_completed", resp.AllCompleted))
|
||||
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// MarkPartyReady marks the party as ready to start the protocol
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
func (c *SessionCoordinatorClient) MarkPartyReady(
|
||||
ctx context.Context,
|
||||
sessionID uuid.UUID,
|
||||
|
|
@ -151,22 +175,24 @@ func (c *SessionCoordinatorClient) MarkPartyReady(
|
|||
PartyId: partyID,
|
||||
}
|
||||
|
||||
resp := &coordinator.MarkPartyReadyResponse{}
|
||||
err := c.conn.Invoke(ctx, "/mpc.coordinator.v1.SessionCoordinator/MarkPartyReady", req, resp)
|
||||
if err != nil {
|
||||
logger.Error("Failed to mark party ready", zap.Error(err))
|
||||
return false, err
|
||||
}
|
||||
return retry.Do(ctx, c.retryCfg, "MarkPartyReady", func() (bool, error) {
|
||||
resp := &coordinator.MarkPartyReadyResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.coordinator.v1.SessionCoordinator/MarkPartyReady", req, resp)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
logger.Info("Marked party ready",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID),
|
||||
zap.Bool("all_ready", resp.AllReady))
|
||||
logger.Info("Marked party ready",
|
||||
zap.String("session_id", sessionID.String()),
|
||||
zap.String("party_id", partyID),
|
||||
zap.Bool("all_ready", resp.AllReady))
|
||||
|
||||
return resp.AllReady, nil
|
||||
return resp.AllReady, nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetSessionStatus gets the current session status
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
func (c *SessionCoordinatorClient) GetSessionStatus(
|
||||
ctx context.Context,
|
||||
sessionID uuid.UUID,
|
||||
|
|
@ -175,13 +201,15 @@ func (c *SessionCoordinatorClient) GetSessionStatus(
|
|||
SessionId: sessionID.String(),
|
||||
}
|
||||
|
||||
resp := &coordinator.GetSessionStatusResponse{}
|
||||
err := c.conn.Invoke(ctx, "/mpc.coordinator.v1.SessionCoordinator/GetSessionStatus", req, resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return retry.Do(ctx, c.retryCfg, "GetSessionStatus", func() (string, error) {
|
||||
resp := &coordinator.GetSessionStatusResponse{}
|
||||
err := c.getConn().Invoke(ctx, "/mpc.coordinator.v1.SessionCoordinator/GetSessionStatus", req, resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return resp.Status, nil
|
||||
return resp.Status, nil
|
||||
})
|
||||
}
|
||||
|
||||
// findPartyIndex finds the party index from the list of parties
|
||||
|
|
|
|||
|
|
@ -67,6 +67,8 @@ type ParticipantInfo struct {
|
|||
|
||||
// MPCMessage represents an MPC message from the router
|
||||
type MPCMessage struct {
|
||||
MessageID string // Unique message ID for acknowledgment
|
||||
SessionID string // Session ID for acknowledgment
|
||||
FromParty string
|
||||
IsBroadcast bool
|
||||
RoundNumber int
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/rwadurian/mpc-system/pkg/crypto"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/pkg/tss"
|
||||
"github.com/rwadurian/mpc-system/services/server-party/domain/entities"
|
||||
"github.com/rwadurian/mpc-system/services/server-party/domain/repositories"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
|
@ -27,6 +28,8 @@ type ParticipateSigningInput struct {
|
|||
PartyID string
|
||||
JoinToken string
|
||||
MessageHash []byte
|
||||
// For delegate parties: encrypted share provided by user (not loaded from DB)
|
||||
UserShareData []byte
|
||||
}
|
||||
|
||||
// ParticipateSigningOutput contains output from signing participation
|
||||
|
|
@ -75,19 +78,37 @@ func (uc *ParticipateSigningUseCase) Execute(
|
|||
return nil, ErrInvalidSignSession
|
||||
}
|
||||
|
||||
// 2. Load key share for this party
|
||||
keyShares, err := uc.keyShareRepo.ListByParty(ctx, input.PartyID)
|
||||
if err != nil || len(keyShares) == 0 {
|
||||
return nil, ErrKeyShareNotFound
|
||||
}
|
||||
// 2. Get share data - either from user input (delegate) or from database (persistent)
|
||||
var shareData []byte
|
||||
var keyShareForUpdate *entities.PartyKeyShare
|
||||
|
||||
// Use the most recent key share (in production, would match by public key or session reference)
|
||||
keyShare := keyShares[len(keyShares)-1]
|
||||
if len(input.UserShareData) > 0 {
|
||||
// Delegate party: use share provided by user
|
||||
shareData, err = uc.cryptoService.DecryptShare(input.UserShareData, input.PartyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.Info("Using user-provided share (delegate party)",
|
||||
zap.String("party_id", input.PartyID),
|
||||
zap.String("session_id", input.SessionID.String()))
|
||||
} else {
|
||||
// Persistent party: load from database
|
||||
keyShares, err := uc.keyShareRepo.ListByParty(ctx, input.PartyID)
|
||||
if err != nil || len(keyShares) == 0 {
|
||||
return nil, ErrKeyShareNotFound
|
||||
}
|
||||
|
||||
// 3. Decrypt share data
|
||||
shareData, err := uc.cryptoService.DecryptShare(keyShare.ShareData, input.PartyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Use the most recent key share (in production, would match by public key or session reference)
|
||||
keyShareForUpdate = keyShares[len(keyShares)-1]
|
||||
|
||||
// Decrypt share data
|
||||
shareData, err = uc.cryptoService.DecryptShare(keyShareForUpdate.ShareData, input.PartyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logger.Info("Using database share (persistent party)",
|
||||
zap.String("party_id", input.PartyID),
|
||||
zap.String("session_id", input.SessionID.String()))
|
||||
}
|
||||
|
||||
// 4. Find self in participants and build party index map
|
||||
|
|
@ -129,10 +150,12 @@ func (uc *ParticipateSigningUseCase) Execute(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// 7. Update key share last used
|
||||
keyShare.MarkUsed()
|
||||
if err := uc.keyShareRepo.Update(ctx, keyShare); err != nil {
|
||||
logger.Warn("failed to update key share last used", zap.Error(err))
|
||||
// 7. Update key share last used (only for persistent parties)
|
||||
if keyShareForUpdate != nil {
|
||||
keyShareForUpdate.MarkUsed()
|
||||
if err := uc.keyShareRepo.Update(ctx, keyShareForUpdate); err != nil {
|
||||
logger.Warn("failed to update key share last used", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// 8. Report completion to coordinator
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/google/uuid"
|
||||
_ "github.com/lib/pq"
|
||||
|
||||
router "github.com/rwadurian/mpc-system/api/grpc/router/v1"
|
||||
"github.com/rwadurian/mpc-system/pkg/config"
|
||||
"github.com/rwadurian/mpc-system/pkg/crypto"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
|
|
@ -139,33 +140,64 @@ func main() {
|
|||
partyRole = "persistent"
|
||||
}
|
||||
|
||||
// Get optional notification channels from environment
|
||||
// If notification channels are set, party operates in OFFLINE mode (24h async)
|
||||
// If no notification channels, party operates in REAL-TIME mode (Message Router push)
|
||||
var notificationConfig *grpcclient.NotificationConfig
|
||||
partyEmail := os.Getenv("PARTY_NOTIFICATION_EMAIL")
|
||||
partyPhone := os.Getenv("PARTY_NOTIFICATION_PHONE")
|
||||
partyPushToken := os.Getenv("PARTY_NOTIFICATION_PUSH_TOKEN")
|
||||
|
||||
if partyEmail != "" || partyPhone != "" || partyPushToken != "" {
|
||||
notificationConfig = &grpcclient.NotificationConfig{
|
||||
Email: partyEmail,
|
||||
Phone: partyPhone,
|
||||
PushToken: partyPushToken,
|
||||
}
|
||||
logger.Info("Party configured for OFFLINE mode (notification channels set)",
|
||||
zap.Bool("has_email", partyEmail != ""),
|
||||
zap.Bool("has_phone", partyPhone != ""),
|
||||
zap.Bool("has_push", partyPushToken != ""))
|
||||
} else {
|
||||
logger.Info("Party configured for REAL-TIME mode (no notification channels)")
|
||||
}
|
||||
|
||||
// Register this party with Message Router
|
||||
// This should be called again whenever party configuration changes
|
||||
logger.Info("Registering party with Message Router",
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("role", partyRole))
|
||||
|
||||
if err := messageRouter.RegisterParty(ctx, partyID, partyRole, "1.0.0"); err != nil {
|
||||
if err := messageRouter.RegisterPartyWithNotification(ctx, partyID, partyRole, "1.0.0", notificationConfig); err != nil {
|
||||
logger.Fatal("Failed to register party", zap.Error(err))
|
||||
}
|
||||
|
||||
// Start heartbeat to keep party registered and detect pending messages
|
||||
// Heartbeat interval: 30 seconds, callback for pending messages notification
|
||||
heartbeatCancel := messageRouter.StartHeartbeat(ctx, partyID, 30*time.Second, func(pendingCount int32) {
|
||||
if pendingCount > 0 {
|
||||
logger.Info("Pending messages detected via heartbeat",
|
||||
zap.String("party_id", partyID),
|
||||
zap.Int32("pending_count", pendingCount))
|
||||
}
|
||||
})
|
||||
defer heartbeatCancel()
|
||||
logger.Info("Heartbeat started", zap.String("party_id", partyID), zap.Duration("interval", 30*time.Second))
|
||||
|
||||
// Subscribe to session events and handle them automatically
|
||||
// Note: This will work after protobuf regeneration
|
||||
logger.Info("Subscribing to session events", zap.String("party_id", partyID))
|
||||
|
||||
// TODO: Uncomment after protobuf regeneration
|
||||
/*
|
||||
eventHandler := createSessionEventHandler(
|
||||
ctx,
|
||||
partyID,
|
||||
participateKeygenUC,
|
||||
participateSigningUC,
|
||||
sessionClient,
|
||||
)
|
||||
eventHandler := createSessionEventHandler(
|
||||
ctx,
|
||||
partyID,
|
||||
participateKeygenUC,
|
||||
participateSigningUC,
|
||||
sessionClient,
|
||||
)
|
||||
|
||||
if err := messageRouter.SubscribeSessionEvents(ctx, partyID, eventHandler); err != nil {
|
||||
logger.Fatal("Failed to subscribe to session events", zap.Error(err))
|
||||
}
|
||||
*/
|
||||
if err := messageRouter.SubscribeSessionEvents(ctx, partyID, eventHandler); err != nil {
|
||||
logger.Fatal("Failed to subscribe to session events", zap.Error(err))
|
||||
}
|
||||
|
||||
logger.Info("Party-driven architecture initialized successfully",
|
||||
zap.String("party_id", partyID),
|
||||
|
|
@ -496,120 +528,107 @@ func startHTTPServer(
|
|||
return router.Run(fmt.Sprintf(":%d", cfg.Server.HTTPPort))
|
||||
}
|
||||
|
||||
// createSessionEventHandler creates a handler for session events
|
||||
// This implements the party-driven architecture where parties automatically
|
||||
// respond to session creation events
|
||||
//
|
||||
// TODO: After protobuf regeneration, uncomment this function and update the import
|
||||
// to include: router "github.com/rwadurian/mpc-system/api/grpc/router/v1"
|
||||
// createSessionEventHandler creates a handler for session events (party-driven architecture)
|
||||
// Parties automatically respond to session creation events by joining keygen or signing sessions
|
||||
func createSessionEventHandler(
|
||||
ctx context.Context,
|
||||
partyID string,
|
||||
participateKeygenUC *use_cases.ParticipateKeygenUseCase,
|
||||
participateSigningUC *use_cases.ParticipateSigningUseCase,
|
||||
sessionClient *grpcclient.SessionCoordinatorClient,
|
||||
) func(event interface{}) {
|
||||
return func(eventInterface interface{}) {
|
||||
// After protobuf regeneration, uncomment and use this implementation:
|
||||
/*
|
||||
event, ok := eventInterface.(*router.SessionEvent)
|
||||
if !ok {
|
||||
logger.Error("Invalid event type")
|
||||
return
|
||||
) func(*router.SessionEvent) {
|
||||
return func(event *router.SessionEvent) {
|
||||
// Check if this party is selected for the session
|
||||
isSelected := false
|
||||
for _, selectedParty := range event.SelectedParties {
|
||||
if selectedParty == partyID {
|
||||
isSelected = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this party is selected for the session
|
||||
isSelected := false
|
||||
for _, selectedParty := range event.SelectedParties {
|
||||
if selectedParty == partyID {
|
||||
isSelected = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !isSelected {
|
||||
logger.Debug("Party not selected for this session",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID))
|
||||
return
|
||||
}
|
||||
|
||||
// Get join token for this party
|
||||
joinToken, exists := event.JoinTokens[partyID]
|
||||
if !exists {
|
||||
logger.Error("No join token found for party",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID))
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("Party selected for session, auto-participating",
|
||||
if !isSelected {
|
||||
logger.Debug("Party not selected for this session",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("event_type", event.EventType))
|
||||
zap.String("party_id", partyID))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse session ID
|
||||
sessionID, err := uuid.Parse(event.SessionId)
|
||||
if err != nil {
|
||||
logger.Error("Invalid session ID", zap.Error(err))
|
||||
return
|
||||
}
|
||||
// Get join token for this party
|
||||
joinToken, exists := event.JoinTokens[partyID]
|
||||
if !exists {
|
||||
logger.Error("No join token found for party",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID))
|
||||
return
|
||||
}
|
||||
|
||||
// Automatically participate based on session type
|
||||
go func() {
|
||||
ctx := context.Background()
|
||||
logger.Info("Party selected for session, auto-participating",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID),
|
||||
zap.String("event_type", event.EventType))
|
||||
|
||||
// Determine session type from event
|
||||
if event.EventType == "session_created" {
|
||||
// Check if it's keygen or sign based on message_hash
|
||||
if len(event.MessageHash) == 0 {
|
||||
// Keygen session
|
||||
logger.Info("Auto-participating in keygen session",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID))
|
||||
// Parse session ID
|
||||
sessionID, err := uuid.Parse(event.SessionId)
|
||||
if err != nil {
|
||||
logger.Error("Invalid session ID", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
input := use_cases.ParticipateKeygenInput{
|
||||
SessionID: sessionID,
|
||||
PartyID: partyID,
|
||||
JoinToken: joinToken,
|
||||
}
|
||||
// Automatically participate based on session type
|
||||
go func() {
|
||||
ctx := context.Background()
|
||||
|
||||
result, err := participateKeygenUC.Execute(ctx, input)
|
||||
if err != nil {
|
||||
logger.Error("Keygen participation failed",
|
||||
zap.Error(err),
|
||||
zap.String("session_id", event.SessionId))
|
||||
} else {
|
||||
logger.Info("Keygen participation completed",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("public_key", result.PublicKeyHex))
|
||||
}
|
||||
// Determine session type from event
|
||||
if event.EventType == "session_created" {
|
||||
// Check if it's keygen or sign based on message_hash
|
||||
if len(event.MessageHash) == 0 {
|
||||
// Keygen session
|
||||
logger.Info("Auto-participating in keygen session",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID))
|
||||
|
||||
input := use_cases.ParticipateKeygenInput{
|
||||
SessionID: sessionID,
|
||||
PartyID: partyID,
|
||||
JoinToken: joinToken,
|
||||
}
|
||||
|
||||
result, err := participateKeygenUC.Execute(ctx, input)
|
||||
if err != nil {
|
||||
logger.Error("Keygen participation failed",
|
||||
zap.Error(err),
|
||||
zap.String("session_id", event.SessionId))
|
||||
} else {
|
||||
// Sign session
|
||||
logger.Info("Auto-participating in sign session",
|
||||
logger.Info("Keygen participation completed",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID))
|
||||
zap.String("public_key", hex.EncodeToString(result.PublicKey)))
|
||||
}
|
||||
} else {
|
||||
// Sign session
|
||||
logger.Info("Auto-participating in sign session",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("party_id", partyID))
|
||||
|
||||
input := use_cases.ParticipateSigningInput{
|
||||
SessionID: sessionID,
|
||||
PartyID: partyID,
|
||||
JoinToken: joinToken,
|
||||
MessageHash: event.MessageHash,
|
||||
}
|
||||
input := use_cases.ParticipateSigningInput{
|
||||
SessionID: sessionID,
|
||||
PartyID: partyID,
|
||||
JoinToken: joinToken,
|
||||
MessageHash: event.MessageHash,
|
||||
}
|
||||
|
||||
result, err := participateSigningUC.Execute(ctx, input)
|
||||
if err != nil {
|
||||
logger.Error("Signing participation failed",
|
||||
zap.Error(err),
|
||||
zap.String("session_id", event.SessionId))
|
||||
} else {
|
||||
logger.Info("Signing participation completed",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("signature", result.SignatureHex))
|
||||
}
|
||||
result, err := participateSigningUC.Execute(ctx, input)
|
||||
if err != nil {
|
||||
logger.Error("Signing participation failed",
|
||||
zap.Error(err),
|
||||
zap.String("session_id", event.SessionId))
|
||||
} else {
|
||||
logger.Info("Signing participation completed",
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.String("signature", hex.EncodeToString(result.Signature)))
|
||||
}
|
||||
}
|
||||
}()
|
||||
*/
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,51 +2,60 @@ package grpc
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
router "github.com/rwadurian/mpc-system/api/grpc/router/v1"
|
||||
"github.com/rwadurian/mpc-system/pkg/grpcutil"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/pkg/retry"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// MessageRouterClient wraps gRPC client for Message Router service
|
||||
type MessageRouterClient struct {
|
||||
conn *grpc.ClientConn
|
||||
client router.MessageRouterClient
|
||||
resilientConn *grpcutil.ResilientConn
|
||||
retryCfg retry.Config
|
||||
}
|
||||
|
||||
// NewMessageRouterClient creates a new Message Router gRPC client
|
||||
// NewMessageRouterClient creates a new Message Router gRPC client with auto-reconnection
|
||||
func NewMessageRouterClient(address string) (*MessageRouterClient, error) {
|
||||
conn, err := grpc.Dial(
|
||||
address,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithBlock(),
|
||||
grpc.WithTimeout(10*time.Second),
|
||||
)
|
||||
config := grpcutil.DefaultClientConfig(address)
|
||||
resilientConn, err := grpcutil.NewResilientConn(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Info("Connected to Message Router", zap.String("address", address))
|
||||
logger.Info("Connected to Message Router with keepalive",
|
||||
zap.String("address", address),
|
||||
zap.Duration("keepalive_time", config.KeepaliveTime),
|
||||
zap.Bool("auto_reconnect", config.EnableReconnect))
|
||||
|
||||
return &MessageRouterClient{
|
||||
conn: conn,
|
||||
client: router.NewMessageRouterClient(conn),
|
||||
resilientConn: resilientConn,
|
||||
retryCfg: retry.DefaultConfig(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getClient returns a new MessageRouterClient using the current connection
|
||||
func (c *MessageRouterClient) getClient() router.MessageRouterClient {
|
||||
return router.NewMessageRouterClient(c.resilientConn.GetConn())
|
||||
}
|
||||
|
||||
// Close closes the gRPC connection
|
||||
func (c *MessageRouterClient) Close() error {
|
||||
if c.conn != nil {
|
||||
return c.conn.Close()
|
||||
if c.resilientConn != nil {
|
||||
return c.resilientConn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsConnected returns true if the connection is ready
|
||||
func (c *MessageRouterClient) IsConnected() bool {
|
||||
return c.resilientConn != nil && c.resilientConn.IsConnected()
|
||||
}
|
||||
|
||||
// PublishSessionEvent publishes a session event to all subscribed parties
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
func (c *MessageRouterClient) PublishSessionEvent(
|
||||
ctx context.Context,
|
||||
event *router.SessionEvent,
|
||||
|
|
@ -60,24 +69,23 @@ func (c *MessageRouterClient) PublishSessionEvent(
|
|||
Event: event,
|
||||
}
|
||||
|
||||
resp, err := c.client.PublishSessionEvent(ctx, req)
|
||||
if err != nil {
|
||||
logger.Error("Failed to publish session event",
|
||||
zap.Error(err),
|
||||
return retry.DoVoid(ctx, c.retryCfg, "PublishSessionEvent", func() error {
|
||||
resp, err := c.getClient().PublishSessionEvent(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Session event published successfully",
|
||||
zap.String("event_type", event.EventType),
|
||||
zap.String("session_id", event.SessionId))
|
||||
return err
|
||||
}
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.Int32("subscriber_count", resp.SubscriberCount))
|
||||
|
||||
logger.Info("Session event published successfully",
|
||||
zap.String("event_type", event.EventType),
|
||||
zap.String("session_id", event.SessionId),
|
||||
zap.Int32("subscriber_count", resp.SubscriberCount))
|
||||
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetRegisteredParties retrieves registered parties from Message Router
|
||||
// Includes automatic retry with exponential backoff for transient failures
|
||||
func (c *MessageRouterClient) GetRegisteredParties(
|
||||
ctx context.Context,
|
||||
roleFilter string,
|
||||
|
|
@ -87,19 +95,18 @@ func (c *MessageRouterClient) GetRegisteredParties(
|
|||
OnlyOnline: true,
|
||||
}
|
||||
|
||||
resp, err := c.client.GetRegisteredParties(ctx, req)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get registered parties",
|
||||
zap.Error(err),
|
||||
zap.String("role_filter", roleFilter))
|
||||
return nil, err
|
||||
}
|
||||
return retry.Do(ctx, c.retryCfg, "GetRegisteredParties", func() ([]*router.RegisteredParty, error) {
|
||||
resp, err := c.getClient().GetRegisteredParties(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Debug("Retrieved registered parties from Message Router",
|
||||
zap.String("role_filter", roleFilter),
|
||||
zap.Int32("count", resp.TotalCount))
|
||||
logger.Debug("Retrieved registered parties from Message Router",
|
||||
zap.String("role_filter", roleFilter),
|
||||
zap.Int32("count", resp.TotalCount))
|
||||
|
||||
return resp.Parties, nil
|
||||
return resp.Parties, nil
|
||||
})
|
||||
}
|
||||
|
||||
// PublishSessionCreated publishes a session_created event
|
||||
|
|
|
|||
|
|
@ -0,0 +1,127 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/application/ports/output"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// AccountServiceClient implements AccountServicePort using HTTP
|
||||
type AccountServiceClient struct {
|
||||
baseURL string
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// NewAccountServiceClient creates a new Account Service HTTP client
|
||||
func NewAccountServiceClient(baseURL string) *AccountServiceClient {
|
||||
return &AccountServiceClient{
|
||||
baseURL: baseURL,
|
||||
httpClient: &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createAccountRequest represents the request body for account creation
|
||||
type createAccountRequest struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
KeygenSessionID string `json:"keygen_session_id"`
|
||||
ThresholdN int `json:"threshold_n"`
|
||||
ThresholdT int `json:"threshold_t"`
|
||||
Shares []shareInfoRequest `json:"shares"`
|
||||
}
|
||||
|
||||
type shareInfoRequest struct {
|
||||
PartyID string `json:"party_id"`
|
||||
PartyIndex int `json:"party_index"`
|
||||
ShareType string `json:"share_type"`
|
||||
}
|
||||
|
||||
// createAccountResponse represents the response from account creation
|
||||
type createAccountResponse struct {
|
||||
AccountID string `json:"account_id"`
|
||||
Username string `json:"username"`
|
||||
PublicKey string `json:"public_key"`
|
||||
}
|
||||
|
||||
// CreateAccountFromKeygen creates an account record after successful keygen
|
||||
func (c *AccountServiceClient) CreateAccountFromKeygen(
|
||||
ctx context.Context,
|
||||
input output.CreateAccountInput,
|
||||
) (*output.CreateAccountOutput, error) {
|
||||
// Build request
|
||||
shares := make([]shareInfoRequest, len(input.Shares))
|
||||
for i, s := range input.Shares {
|
||||
shares[i] = shareInfoRequest{
|
||||
PartyID: s.PartyID,
|
||||
PartyIndex: s.PartyIndex,
|
||||
ShareType: s.ShareType,
|
||||
}
|
||||
}
|
||||
|
||||
reqBody := createAccountRequest{
|
||||
PublicKey: hex.EncodeToString(input.PublicKey),
|
||||
KeygenSessionID: input.KeygenSessionID,
|
||||
ThresholdN: input.ThresholdN,
|
||||
ThresholdT: input.ThresholdT,
|
||||
Shares: shares,
|
||||
}
|
||||
|
||||
jsonBody, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
// Make HTTP request
|
||||
url := fmt.Sprintf("%s/api/v1/accounts/from-keygen", c.baseURL)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(jsonBody))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
logger.Info("Calling Account Service to create account from keygen",
|
||||
zap.String("url", url),
|
||||
zap.String("keygen_session_id", input.KeygenSessionID))
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to call account service: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK {
|
||||
logger.Error("Account Service returned error",
|
||||
zap.Int("status_code", resp.StatusCode),
|
||||
zap.String("body", string(body)))
|
||||
return nil, fmt.Errorf("account service error: status=%d, body=%s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var result createAccountResponse
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal response: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("Account created successfully from keygen",
|
||||
zap.String("account_id", result.AccountID),
|
||||
zap.String("keygen_session_id", input.KeygenSessionID))
|
||||
|
||||
return &output.CreateAccountOutput{
|
||||
AccountID: result.AccountID,
|
||||
Success: true,
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/application/ports/output"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// EventPublisherAdapter implements MessageBrokerPort using in-memory logging
|
||||
// Events are logged but not distributed (suitable for single-instance deployment)
|
||||
type EventPublisherAdapter struct{}
|
||||
|
||||
// NewEventPublisherAdapter creates a new in-memory event publisher
|
||||
func NewEventPublisherAdapter() output.MessageBrokerPort {
|
||||
return &EventPublisherAdapter{}
|
||||
}
|
||||
|
||||
// PublishEvent logs the event
|
||||
func (p *EventPublisherAdapter) PublishEvent(ctx context.Context, topic string, event interface{}) error {
|
||||
logger.Info("Session event published",
|
||||
zap.String("topic", topic),
|
||||
zap.Any("event", event))
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublishMessage logs the message
|
||||
func (p *EventPublisherAdapter) PublishMessage(ctx context.Context, partyID string, message interface{}) error {
|
||||
logger.Debug("Message published to party",
|
||||
zap.String("party_id", partyID))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe returns a nil channel (no actual subscription in single-instance mode)
|
||||
func (p *EventPublisherAdapter) Subscribe(ctx context.Context, topic string) (<-chan []byte, error) {
|
||||
// Return a closed channel since we don't have actual pub/sub
|
||||
ch := make(chan []byte)
|
||||
close(ch)
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// Close is a no-op for in-memory publisher
|
||||
func (p *EventPublisherAdapter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure interface compliance
|
||||
var _ output.MessageBrokerPort = (*EventPublisherAdapter)(nil)
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
package notification
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/application/ports/output"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// StubNotificationService is a placeholder implementation of NotificationServicePort
|
||||
// It logs notification requests but does not actually send them
|
||||
//
|
||||
// TODO: Implement real notification adapters:
|
||||
// - EmailNotificationAdapter: Uses SMTP or email service (SendGrid, SES, etc.)
|
||||
// - SMSNotificationAdapter: Uses SMS gateway (Twilio, etc.)
|
||||
// - PushNotificationAdapter: Uses FCM/APNs for mobile push notifications
|
||||
type StubNotificationService struct{}
|
||||
|
||||
// NewStubNotificationService creates a new stub notification service
|
||||
func NewStubNotificationService() *StubNotificationService {
|
||||
return &StubNotificationService{}
|
||||
}
|
||||
|
||||
// SendSessionInvitation logs the invitation but does not send it
|
||||
// TODO: Implement actual notification sending
|
||||
func (s *StubNotificationService) SendSessionInvitation(
|
||||
ctx context.Context,
|
||||
input output.SessionInvitationInput,
|
||||
) error {
|
||||
logger.Info("[TODO] Would send session invitation notification",
|
||||
zap.String("party_id", input.PartyID),
|
||||
zap.String("session_id", input.SessionID),
|
||||
zap.String("session_type", input.SessionType),
|
||||
zap.String("email", maskEmail(input.Email)),
|
||||
zap.String("phone", maskPhone(input.Phone)),
|
||||
zap.Bool("has_push_token", input.PushToken != ""),
|
||||
zap.Int64("expires_at", input.ExpiresAt))
|
||||
|
||||
// TODO: Implement actual notification sending:
|
||||
// 1. If email is set, send email with session details and join link
|
||||
// 2. If phone is set, send SMS with short message and link
|
||||
// 3. If push_token is set, send push notification
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendSessionReminder logs the reminder but does not send it
|
||||
// TODO: Implement actual notification sending
|
||||
func (s *StubNotificationService) SendSessionReminder(
|
||||
ctx context.Context,
|
||||
input output.SessionReminderInput,
|
||||
) error {
|
||||
logger.Info("[TODO] Would send session reminder notification",
|
||||
zap.String("party_id", input.PartyID),
|
||||
zap.String("session_id", input.SessionID),
|
||||
zap.String("time_remaining", input.TimeRemaining),
|
||||
zap.Int("reminder_count", input.ReminderCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendSessionExpiredNotification logs the expiration but does not send it
|
||||
// TODO: Implement actual notification sending
|
||||
func (s *StubNotificationService) SendSessionExpiredNotification(
|
||||
ctx context.Context,
|
||||
input output.SessionExpiredInput,
|
||||
) error {
|
||||
logger.Info("[TODO] Would send session expired notification",
|
||||
zap.String("party_id", input.PartyID),
|
||||
zap.String("session_id", input.SessionID),
|
||||
zap.Int64("expired_at", input.ExpiredAt))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// maskEmail masks an email address for logging (e.g., "j***@example.com")
|
||||
func maskEmail(email string) string {
|
||||
if email == "" {
|
||||
return ""
|
||||
}
|
||||
if len(email) < 3 {
|
||||
return "***"
|
||||
}
|
||||
// Find @ position
|
||||
atPos := -1
|
||||
for i, c := range email {
|
||||
if c == '@' {
|
||||
atPos = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if atPos <= 0 {
|
||||
return "***"
|
||||
}
|
||||
return email[:1] + "***" + email[atPos:]
|
||||
}
|
||||
|
||||
// maskPhone masks a phone number for logging (e.g., "+1***1234")
|
||||
func maskPhone(phone string) string {
|
||||
if phone == "" {
|
||||
return ""
|
||||
}
|
||||
if len(phone) < 4 {
|
||||
return "***"
|
||||
}
|
||||
// Keep first 2 and last 4 characters
|
||||
if len(phone) <= 6 {
|
||||
return phone[:1] + "***" + phone[len(phone)-2:]
|
||||
}
|
||||
return phone[:2] + "***" + phone[len(phone)-4:]
|
||||
}
|
||||
|
||||
// Ensure StubNotificationService implements NotificationServicePort
|
||||
var _ output.NotificationServicePort = (*StubNotificationService)(nil)
|
||||
|
|
@ -149,6 +149,7 @@ func (r *SessionPostgresRepo) FindByUUID(ctx context.Context, id uuid.UUID) (*en
|
|||
session.Status,
|
||||
session.MessageHash,
|
||||
session.PublicKey,
|
||||
"", // delegatePartyID - not stored in DB yet, will be empty for old sessions
|
||||
session.CreatedBy,
|
||||
session.CreatedAt,
|
||||
session.UpdatedAt,
|
||||
|
|
@ -189,6 +190,23 @@ func (r *SessionPostgresRepo) FindExpired(ctx context.Context) ([]*entities.MPCS
|
|||
return r.scanSessions(ctx, rows)
|
||||
}
|
||||
|
||||
// FindActive retrieves all active sessions (created or in_progress)
|
||||
func (r *SessionPostgresRepo) FindActive(ctx context.Context) ([]*entities.MPCSession, error) {
|
||||
rows, err := r.db.QueryContext(ctx, `
|
||||
SELECT id, session_type, threshold_n, threshold_t, status,
|
||||
message_hash, public_key, created_by, created_at, updated_at, expires_at, completed_at
|
||||
FROM mpc_sessions
|
||||
WHERE status IN ('created', 'in_progress')
|
||||
ORDER BY created_at ASC
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
return r.scanSessions(ctx, rows)
|
||||
}
|
||||
|
||||
// FindByCreator retrieves sessions created by a user
|
||||
func (r *SessionPostgresRepo) FindByCreator(ctx context.Context, creatorID string) ([]*entities.MPCSession, error) {
|
||||
rows, err := r.db.QueryContext(ctx, `
|
||||
|
|
@ -400,6 +418,7 @@ func (r *SessionPostgresRepo) scanSessions(ctx context.Context, rows *sql.Rows)
|
|||
s.Status,
|
||||
s.MessageHash,
|
||||
s.PublicKey,
|
||||
"", // delegatePartyID - not stored in DB yet
|
||||
s.CreatedBy,
|
||||
s.CreatedAt,
|
||||
s.UpdatedAt,
|
||||
|
|
|
|||
|
|
@ -265,6 +265,7 @@ func cacheEntryToSession(entry sessionCacheEntry) (*entities.MPCSession, error)
|
|||
entry.Status,
|
||||
entry.MessageHash,
|
||||
entry.PublicKey,
|
||||
"", // delegatePartyID - not cached
|
||||
entry.CreatedBy,
|
||||
time.UnixMilli(entry.CreatedAt),
|
||||
time.UnixMilli(entry.UpdatedAt),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,32 @@
|
|||
package output
|
||||
|
||||
import "context"
|
||||
|
||||
// ShareInfo contains information about a key share for account creation
|
||||
type ShareInfo struct {
|
||||
PartyID string
|
||||
PartyIndex int
|
||||
ShareType string // "persistent" or "delegate"
|
||||
}
|
||||
|
||||
// CreateAccountInput contains input for creating an account after keygen
|
||||
type CreateAccountInput struct {
|
||||
KeygenSessionID string
|
||||
PublicKey []byte
|
||||
ThresholdN int
|
||||
ThresholdT int
|
||||
Shares []ShareInfo
|
||||
DelegatePartyID string // The delegate party ID if any (for user share retrieval)
|
||||
}
|
||||
|
||||
// CreateAccountOutput contains output from account creation
|
||||
type CreateAccountOutput struct {
|
||||
AccountID string
|
||||
Success bool
|
||||
}
|
||||
|
||||
// AccountServicePort defines the interface for account service integration
|
||||
type AccountServicePort interface {
|
||||
// CreateAccountFromKeygen creates an account record after successful keygen
|
||||
CreateAccountFromKeygen(ctx context.Context, input CreateAccountInput) (*CreateAccountOutput, error)
|
||||
}
|
||||
|
|
@ -21,16 +21,17 @@ type MessageBrokerPort interface {
|
|||
|
||||
// Event types
|
||||
const (
|
||||
TopicSessionCreated = "mpc.session.created"
|
||||
TopicSessionStarted = "mpc.session.started"
|
||||
TopicSessionCompleted = "mpc.session.completed"
|
||||
TopicSessionFailed = "mpc.session.failed"
|
||||
TopicSessionExpired = "mpc.session.expired"
|
||||
TopicParticipantJoined = "mpc.participant.joined"
|
||||
TopicParticipantReady = "mpc.participant.ready"
|
||||
TopicSessionCreated = "mpc.session.created"
|
||||
TopicSessionStarted = "mpc.session.started"
|
||||
TopicSessionCompleted = "mpc.session.completed"
|
||||
TopicSessionFailed = "mpc.session.failed"
|
||||
TopicSessionExpired = "mpc.session.expired"
|
||||
TopicParticipantJoined = "mpc.participant.joined"
|
||||
TopicParticipantReady = "mpc.participant.ready"
|
||||
TopicParticipantCompleted = "mpc.participant.completed"
|
||||
TopicParticipantFailed = "mpc.participant.failed"
|
||||
TopicMPCMessage = "mpc.message"
|
||||
TopicParticipantFailed = "mpc.participant.failed"
|
||||
TopicParticipantTimedOut = "mpc.participant.timed_out"
|
||||
TopicMPCMessage = "mpc.message"
|
||||
)
|
||||
|
||||
// SessionCreatedEvent is published when a session is created
|
||||
|
|
@ -100,6 +101,14 @@ type ParticipantFailedEvent struct {
|
|||
FailedAt int64 `json:"failed_at"`
|
||||
}
|
||||
|
||||
// ParticipantTimedOutEvent is published when a participant times out due to inactivity
|
||||
type ParticipantTimedOutEvent struct {
|
||||
SessionID string `json:"session_id"`
|
||||
PartyID string `json:"party_id"`
|
||||
InactivitySeconds int64 `json:"inactivity_seconds"`
|
||||
TimedOutAt int64 `json:"timed_out_at"`
|
||||
}
|
||||
|
||||
// MPCMessageEvent is published when an MPC message is routed
|
||||
type MPCMessageEvent struct {
|
||||
MessageID string `json:"message_id"`
|
||||
|
|
|
|||
|
|
@ -0,0 +1,86 @@
|
|||
package output
|
||||
|
||||
import "context"
|
||||
|
||||
// NotificationServicePort defines the interface for sending notifications to parties
|
||||
// This is used for parties operating in OFFLINE mode (with notification channels configured)
|
||||
//
|
||||
// Parties without notification channels operate in REAL-TIME mode:
|
||||
// - Session events are pushed via Message Router
|
||||
// - Parties must be connected to receive events immediately
|
||||
//
|
||||
// Parties with notification channels operate in OFFLINE mode:
|
||||
// - Session invitations are sent via email/SMS/push notifications
|
||||
// - Parties have 24 hours to complete their participation
|
||||
// - Parties poll for pending sessions when they come online
|
||||
//
|
||||
// TODO: Implement notification service adapters (email, SMS, push)
|
||||
type NotificationServicePort interface {
|
||||
// SendSessionInvitation sends a session invitation notification to a party
|
||||
// This is called when a party with notification channels is selected for a session
|
||||
SendSessionInvitation(ctx context.Context, input SessionInvitationInput) error
|
||||
|
||||
// SendSessionReminder sends a reminder for a pending session
|
||||
// Called periodically for sessions approaching expiration
|
||||
SendSessionReminder(ctx context.Context, input SessionReminderInput) error
|
||||
|
||||
// SendSessionExpiredNotification notifies a party that a session has expired
|
||||
SendSessionExpiredNotification(ctx context.Context, input SessionExpiredInput) error
|
||||
}
|
||||
|
||||
// SessionInvitationInput contains data for sending a session invitation
|
||||
type SessionInvitationInput struct {
|
||||
// Party identification
|
||||
PartyID string
|
||||
|
||||
// Notification channels (at least one should be set)
|
||||
Email string
|
||||
Phone string
|
||||
PushToken string
|
||||
|
||||
// Session details
|
||||
SessionID string
|
||||
SessionType string // "keygen" or "sign"
|
||||
JoinToken string
|
||||
ExpiresAt int64 // Unix timestamp milliseconds
|
||||
|
||||
// Optional context
|
||||
InitiatorID string // Who initiated the session
|
||||
MessageToSign string // For sign sessions, human-readable description
|
||||
}
|
||||
|
||||
// SessionReminderInput contains data for sending a session reminder
|
||||
type SessionReminderInput struct {
|
||||
PartyID string
|
||||
Email string
|
||||
Phone string
|
||||
PushToken string
|
||||
|
||||
SessionID string
|
||||
SessionType string
|
||||
JoinToken string
|
||||
ExpiresAt int64
|
||||
|
||||
// Reminder specific
|
||||
TimeRemaining string // e.g., "2 hours", "30 minutes"
|
||||
ReminderCount int // How many reminders have been sent
|
||||
}
|
||||
|
||||
// SessionExpiredInput contains data for sending session expiration notification
|
||||
type SessionExpiredInput struct {
|
||||
PartyID string
|
||||
Email string
|
||||
Phone string
|
||||
PushToken string
|
||||
|
||||
SessionID string
|
||||
SessionType string
|
||||
ExpiredAt int64
|
||||
}
|
||||
|
||||
// NotificationResult represents the result of a notification attempt
|
||||
type NotificationResult struct {
|
||||
Success bool
|
||||
Channel string // Which channel was used: "email", "sms", "push"
|
||||
Error string // Error message if failed
|
||||
}
|
||||
|
|
@ -14,13 +14,43 @@ const (
|
|||
PartyRoleTemporary PartyRole = "temporary"
|
||||
)
|
||||
|
||||
// NotificationChannel represents notification channels for a party
|
||||
// If a party has any notification channel set, it operates in OFFLINE mode (24h async completion)
|
||||
// If no notification channels are set, it operates in REAL-TIME mode (Message Router push)
|
||||
type NotificationChannel struct {
|
||||
Email string // Optional: email address for session invitations
|
||||
Phone string // Optional: phone number for SMS notifications
|
||||
PushToken string // Optional: push notification token (FCM/APNs)
|
||||
}
|
||||
|
||||
// HasAnyChannel returns true if any notification channel is configured
|
||||
func (nc *NotificationChannel) HasAnyChannel() bool {
|
||||
return nc != nil && (nc.Email != "" || nc.Phone != "" || nc.PushToken != "")
|
||||
}
|
||||
|
||||
// IsRealTimeMode returns true if party operates in real-time mode (no notification channels)
|
||||
func (nc *NotificationChannel) IsRealTimeMode() bool {
|
||||
return !nc.HasAnyChannel()
|
||||
}
|
||||
|
||||
// PartyEndpoint represents a party from the pool
|
||||
// Note: Address is removed - parties connect to Message Router themselves
|
||||
// Session Coordinator only needs PartyID for message routing
|
||||
type PartyEndpoint struct {
|
||||
PartyID string
|
||||
Ready bool
|
||||
Role PartyRole // Role of the party (persistent, delegate, temporary)
|
||||
PartyID string
|
||||
Ready bool
|
||||
Role PartyRole // Role of the party (persistent, delegate, temporary)
|
||||
Notification *NotificationChannel // Optional: notification channels for offline mode
|
||||
}
|
||||
|
||||
// IsRealTimeMode returns true if party operates in real-time mode
|
||||
func (p *PartyEndpoint) IsRealTimeMode() bool {
|
||||
return p.Notification == nil || p.Notification.IsRealTimeMode()
|
||||
}
|
||||
|
||||
// IsOfflineMode returns true if party operates in offline mode (has notification channels)
|
||||
func (p *PartyEndpoint) IsOfflineMode() bool {
|
||||
return !p.IsRealTimeMode()
|
||||
}
|
||||
|
||||
// PartySelectionFilter defines filtering criteria for party selection
|
||||
|
|
|
|||
|
|
@ -0,0 +1,126 @@
|
|||
package use_cases
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/application/ports/output"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/domain/repositories"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// CheckPartyTimeoutsUseCase handles checking for timed-out parties in active sessions
|
||||
type CheckPartyTimeoutsUseCase struct {
|
||||
sessionRepo repositories.SessionRepository
|
||||
eventPublisher output.MessageBrokerPort
|
||||
inactivityTimeout time.Duration
|
||||
}
|
||||
|
||||
// CheckPartyTimeoutsResult contains the result of the timeout check
|
||||
type CheckPartyTimeoutsResult struct {
|
||||
SessionsChecked int
|
||||
PartiesTimedOut int
|
||||
SessionsFailedDueToTimeout int
|
||||
}
|
||||
|
||||
// NewCheckPartyTimeoutsUseCase creates a new check party timeouts use case
|
||||
func NewCheckPartyTimeoutsUseCase(
|
||||
sessionRepo repositories.SessionRepository,
|
||||
eventPublisher output.MessageBrokerPort,
|
||||
inactivityTimeout time.Duration,
|
||||
) *CheckPartyTimeoutsUseCase {
|
||||
// Default to 2 minutes if not specified
|
||||
if inactivityTimeout <= 0 {
|
||||
inactivityTimeout = 2 * time.Minute
|
||||
}
|
||||
return &CheckPartyTimeoutsUseCase{
|
||||
sessionRepo: sessionRepo,
|
||||
eventPublisher: eventPublisher,
|
||||
inactivityTimeout: inactivityTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Execute checks all active sessions for timed-out parties
|
||||
func (uc *CheckPartyTimeoutsUseCase) Execute(ctx context.Context) (*CheckPartyTimeoutsResult, error) {
|
||||
result := &CheckPartyTimeoutsResult{}
|
||||
|
||||
// 1. Find all active sessions
|
||||
sessions, err := uc.sessionRepo.FindActive(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result.SessionsChecked = len(sessions)
|
||||
|
||||
for _, session := range sessions {
|
||||
// 2. Check for timed-out parties
|
||||
timedOutParties := session.MarkTimedOutPartiesAsFailed(uc.inactivityTimeout)
|
||||
|
||||
if len(timedOutParties) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
result.PartiesTimedOut += len(timedOutParties)
|
||||
|
||||
// 3. Publish timeout events for each timed-out party
|
||||
for _, party := range timedOutParties {
|
||||
event := output.ParticipantTimedOutEvent{
|
||||
SessionID: session.ID.String(),
|
||||
PartyID: party.PartyID.String(),
|
||||
InactivitySeconds: int64(party.TimeSinceLastActivity().Seconds()),
|
||||
TimedOutAt: time.Now().UnixMilli(),
|
||||
}
|
||||
if err := uc.eventPublisher.PublishEvent(ctx, output.TopicParticipantTimedOut, event); err != nil {
|
||||
logger.Error("failed to publish participant timeout event",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.String("party_id", party.PartyID.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
logger.Warn("Party timed out due to inactivity",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.String("party_id", party.PartyID.String()),
|
||||
zap.Duration("inactivity", party.TimeSinceLastActivity()),
|
||||
zap.Duration("timeout_threshold", uc.inactivityTimeout))
|
||||
}
|
||||
|
||||
// 4. If any party timed out in an in_progress session, fail the entire session
|
||||
// MPC requires all parties to participate; if one fails, the session fails
|
||||
if session.Status.IsActive() && len(timedOutParties) > 0 {
|
||||
if err := session.Fail(); err != nil {
|
||||
logger.Error("failed to mark session as failed after party timeout",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.Error(err))
|
||||
} else {
|
||||
result.SessionsFailedDueToTimeout++
|
||||
|
||||
// Publish session failed event
|
||||
failedEvent := output.SessionFailedEvent{
|
||||
SessionID: session.ID.String(),
|
||||
Reason: "party timed out due to inactivity",
|
||||
FailedAt: time.Now().UnixMilli(),
|
||||
}
|
||||
if err := uc.eventPublisher.PublishEvent(ctx, output.TopicSessionFailed, failedEvent); err != nil {
|
||||
logger.Error("failed to publish session failed event",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Save updated session
|
||||
if err := uc.sessionRepo.Update(ctx, session); err != nil {
|
||||
logger.Error("failed to update session after party timeout",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetInactivityTimeout returns the configured inactivity timeout
|
||||
func (uc *CheckPartyTimeoutsUseCase) GetInactivityTimeout() time.Duration {
|
||||
return uc.inactivityTimeout
|
||||
}
|
||||
|
|
@ -38,6 +38,7 @@ type CreateSessionUseCase struct {
|
|||
eventPublisher output.MessageBrokerPort
|
||||
partyPool output.PartyPoolPort
|
||||
messageRouterClient MessageRouterClient
|
||||
notificationService output.NotificationServicePort // TODO: Wire up notification service
|
||||
coordinatorSvc *services.SessionCoordinatorService
|
||||
}
|
||||
|
||||
|
|
@ -55,6 +56,28 @@ func NewCreateSessionUseCase(
|
|||
eventPublisher: eventPublisher,
|
||||
partyPool: partyPool,
|
||||
messageRouterClient: messageRouterClient,
|
||||
notificationService: nil, // TODO: Inject notification service
|
||||
coordinatorSvc: services.NewSessionCoordinatorService(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewCreateSessionUseCaseWithNotification creates a new create session use case with notification support
|
||||
// TODO: Use this constructor when notification service is implemented
|
||||
func NewCreateSessionUseCaseWithNotification(
|
||||
sessionRepo repositories.SessionRepository,
|
||||
tokenGen jwt.TokenGenerator,
|
||||
eventPublisher output.MessageBrokerPort,
|
||||
partyPool output.PartyPoolPort,
|
||||
messageRouterClient MessageRouterClient,
|
||||
notificationService output.NotificationServicePort,
|
||||
) *CreateSessionUseCase {
|
||||
return &CreateSessionUseCase{
|
||||
sessionRepo: sessionRepo,
|
||||
tokenGen: tokenGen,
|
||||
eventPublisher: eventPublisher,
|
||||
partyPool: partyPool,
|
||||
messageRouterClient: messageRouterClient,
|
||||
notificationService: notificationService,
|
||||
coordinatorSvc: services.NewSessionCoordinatorService(),
|
||||
}
|
||||
}
|
||||
|
|
@ -145,6 +168,10 @@ func (uc *CreateSessionUseCase) Execute(
|
|||
}
|
||||
tokens["*"] = universalToken
|
||||
} else {
|
||||
// Track parties by mode for notification/push routing
|
||||
var realTimeParties []output.PartyEndpoint
|
||||
var offlineParties []output.PartyEndpoint
|
||||
|
||||
// Add selected parties as participants
|
||||
for i, party := range selectedParties {
|
||||
partyID, err := value_objects.NewPartyID(party.PartyID)
|
||||
|
|
@ -162,17 +189,40 @@ func (uc *CreateSessionUseCase) Execute(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Track delegate party for user share retrieval
|
||||
if party.Role == output.PartyRoleDelegate {
|
||||
session.DelegatePartyID = party.PartyID
|
||||
}
|
||||
|
||||
// Generate join token for this party
|
||||
token, err := uc.tokenGen.GenerateJoinToken(session.ID.UUID(), party.PartyID, expiresIn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokens[party.PartyID] = token
|
||||
|
||||
// Categorize party by mode based on notification channel presence
|
||||
// Real-time mode: No notification channel - use Message Router push
|
||||
// Offline mode: Has notification channel - send notification, 24h to complete
|
||||
if party.IsRealTimeMode() {
|
||||
realTimeParties = append(realTimeParties, party)
|
||||
} else {
|
||||
offlineParties = append(offlineParties, party)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("selected parties from K8s pool",
|
||||
logger.Info("selected parties from pool",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.Int("party_count", len(selectedParties)))
|
||||
zap.Int("party_count", len(selectedParties)),
|
||||
zap.Int("realtime_parties", len(realTimeParties)),
|
||||
zap.Int("offline_parties", len(offlineParties)),
|
||||
zap.String("delegate_party", session.DelegatePartyID))
|
||||
|
||||
// TODO: Send notifications to offline parties
|
||||
// Offline parties have notification channels and can complete within 24 hours
|
||||
if len(offlineParties) > 0 && uc.notificationService != nil {
|
||||
uc.sendNotificationsToOfflineParties(ctx, session, offlineParties, tokens)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No party pool configured - fallback to dynamic join
|
||||
|
|
@ -337,3 +387,57 @@ func extractPartyIDs(participants []input.ParticipantInfo) []string {
|
|||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// sendNotificationsToOfflineParties sends session invitation notifications to offline parties
|
||||
// Offline parties have notification channels configured and can complete within 24 hours
|
||||
// TODO: Implement actual notification sending when NotificationService is ready
|
||||
func (uc *CreateSessionUseCase) sendNotificationsToOfflineParties(
|
||||
ctx context.Context,
|
||||
session *entities.MPCSession,
|
||||
offlineParties []output.PartyEndpoint,
|
||||
tokens map[string]string,
|
||||
) {
|
||||
for _, party := range offlineParties {
|
||||
joinToken, exists := tokens[party.PartyID]
|
||||
if !exists {
|
||||
logger.Warn("no join token found for offline party",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.String("party_id", party.PartyID))
|
||||
continue
|
||||
}
|
||||
|
||||
// Build notification input
|
||||
notificationInput := output.SessionInvitationInput{
|
||||
PartyID: party.PartyID,
|
||||
SessionID: session.ID.String(),
|
||||
SessionType: string(session.SessionType),
|
||||
JoinToken: joinToken,
|
||||
ExpiresAt: session.ExpiresAt.UnixMilli(),
|
||||
InitiatorID: session.CreatedBy,
|
||||
}
|
||||
|
||||
// Add notification channels from party
|
||||
if party.Notification != nil {
|
||||
notificationInput.Email = party.Notification.Email
|
||||
notificationInput.Phone = party.Notification.Phone
|
||||
notificationInput.PushToken = party.Notification.PushToken
|
||||
}
|
||||
|
||||
// Send notification (async to not block session creation)
|
||||
go func(input output.SessionInvitationInput) {
|
||||
if err := uc.notificationService.SendSessionInvitation(ctx, input); err != nil {
|
||||
logger.Error("failed to send session invitation notification",
|
||||
zap.String("session_id", input.SessionID),
|
||||
zap.String("party_id", input.PartyID),
|
||||
zap.Error(err))
|
||||
} else {
|
||||
logger.Info("session invitation notification sent",
|
||||
zap.String("session_id", input.SessionID),
|
||||
zap.String("party_id", input.PartyID),
|
||||
zap.Bool("has_email", input.Email != ""),
|
||||
zap.Bool("has_phone", input.Phone != ""),
|
||||
zap.Bool("has_push", input.PushToken != ""))
|
||||
}
|
||||
}(notificationInput)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,6 +117,14 @@ func (uc *JoinSessionUseCase) Execute(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// 6.1 Update party activity timestamp for timeout tracking
|
||||
if err := session.UpdatePartyActivity(partyID); err != nil {
|
||||
logger.Warn("failed to update party activity",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.String("party_id", inputData.PartyID),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// 7. Check if session should start (all participants joined)
|
||||
if uc.coordinatorSvc.ShouldStartSession(session) {
|
||||
if err := session.Start(); err != nil {
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/application/ports/input"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/application/ports/output"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/domain/entities"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/domain/repositories"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/domain/services"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/domain/value_objects"
|
||||
|
|
@ -17,6 +18,7 @@ import (
|
|||
type ReportCompletionUseCase struct {
|
||||
sessionRepo repositories.SessionRepository
|
||||
eventPublisher output.MessageBrokerPort
|
||||
accountService output.AccountServicePort
|
||||
coordinatorSvc *services.SessionCoordinatorService
|
||||
}
|
||||
|
||||
|
|
@ -24,10 +26,12 @@ type ReportCompletionUseCase struct {
|
|||
func NewReportCompletionUseCase(
|
||||
sessionRepo repositories.SessionRepository,
|
||||
eventPublisher output.MessageBrokerPort,
|
||||
accountService output.AccountServicePort,
|
||||
) *ReportCompletionUseCase {
|
||||
return &ReportCompletionUseCase{
|
||||
sessionRepo: sessionRepo,
|
||||
eventPublisher: eventPublisher,
|
||||
accountService: accountService,
|
||||
coordinatorSvc: services.NewSessionCoordinatorService(),
|
||||
}
|
||||
}
|
||||
|
|
@ -54,6 +58,14 @@ func (uc *ReportCompletionUseCase) Execute(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// 3.1 Update party activity timestamp (party is still active)
|
||||
if err := session.UpdatePartyActivity(partyID); err != nil {
|
||||
logger.Warn("failed to update party activity",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.String("party_id", inputData.PartyID),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// 4. Update participant's public key if provided
|
||||
participant, err := session.GetParticipant(partyID)
|
||||
if err != nil {
|
||||
|
|
@ -82,6 +94,11 @@ func (uc *ReportCompletionUseCase) Execute(
|
|||
zap.String("session_id", session.ID.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// For keygen sessions, automatically create account record
|
||||
if session.SessionType == entities.SessionTypeKeygen && uc.accountService != nil {
|
||||
uc.createAccountFromKeygen(ctx, session)
|
||||
}
|
||||
}
|
||||
|
||||
// 6. Save updated session
|
||||
|
|
@ -107,3 +124,42 @@ func (uc *ReportCompletionUseCase) Execute(
|
|||
AllCompleted: allCompleted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createAccountFromKeygen creates an account record after successful keygen
|
||||
func (uc *ReportCompletionUseCase) createAccountFromKeygen(ctx context.Context, session *entities.MPCSession) {
|
||||
// Build share info from participants
|
||||
shares := make([]output.ShareInfo, 0, len(session.Participants))
|
||||
for _, p := range session.Participants {
|
||||
shareType := "persistent"
|
||||
if p.PartyID.String() == session.DelegatePartyID {
|
||||
shareType = "delegate"
|
||||
}
|
||||
shares = append(shares, output.ShareInfo{
|
||||
PartyID: p.PartyID.String(),
|
||||
PartyIndex: p.PartyIndex,
|
||||
ShareType: shareType,
|
||||
})
|
||||
}
|
||||
|
||||
// Call account service to create account
|
||||
accountInput := output.CreateAccountInput{
|
||||
KeygenSessionID: session.ID.String(),
|
||||
PublicKey: session.PublicKey,
|
||||
ThresholdN: session.Threshold.N(),
|
||||
ThresholdT: session.Threshold.T(),
|
||||
Shares: shares,
|
||||
DelegatePartyID: session.DelegatePartyID,
|
||||
}
|
||||
|
||||
result, err := uc.accountService.CreateAccountFromKeygen(ctx, accountInput)
|
||||
if err != nil {
|
||||
logger.Error("failed to create account from keygen",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("account created from keygen",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.String("account_id", result.AccountID))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -78,6 +78,21 @@ func (uc *RouteMessageUseCase) Execute(
|
|||
return err
|
||||
}
|
||||
|
||||
// 4.1 Update sender's activity timestamp (they're actively participating)
|
||||
if err := session.UpdatePartyActivity(fromPartyID); err != nil {
|
||||
logger.Warn("failed to update sender activity",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.String("party_id", input.FromParty),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// 4.2 Save updated activity timestamp
|
||||
if err := uc.sessionRepo.Update(ctx, session); err != nil {
|
||||
logger.Warn("failed to persist activity update",
|
||||
zap.String("session_id", session.ID.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// 5. Create message entity
|
||||
msg := entities.NewSessionMessage(
|
||||
session.ID,
|
||||
|
|
|
|||
|
|
@ -9,13 +9,12 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
_ "github.com/lib/pq"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/reflection"
|
||||
|
||||
|
|
@ -23,12 +22,14 @@ import (
|
|||
"github.com/rwadurian/mpc-system/pkg/config"
|
||||
"github.com/rwadurian/mpc-system/pkg/jwt"
|
||||
"github.com/rwadurian/mpc-system/pkg/logger"
|
||||
"github.com/rwadurian/mpc-system/pkg/middleware"
|
||||
grpcadapter "github.com/rwadurian/mpc-system/services/session-coordinator/adapters/input/grpc"
|
||||
httphandler "github.com/rwadurian/mpc-system/services/session-coordinator/adapters/input/http"
|
||||
grpcclient "github.com/rwadurian/mpc-system/services/session-coordinator/adapters/output/grpc"
|
||||
httpclient "github.com/rwadurian/mpc-system/services/session-coordinator/adapters/output/http"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/adapters/output/memory"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/adapters/output/notification"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/adapters/output/postgres"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/adapters/output/rabbitmq"
|
||||
redisadapter "github.com/rwadurian/mpc-system/services/session-coordinator/adapters/output/redis"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/application/use_cases"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/domain/repositories"
|
||||
"github.com/rwadurian/mpc-system/services/session-coordinator/infrastructure/discovery"
|
||||
|
|
@ -69,25 +70,12 @@ func main() {
|
|||
}
|
||||
defer db.Close()
|
||||
|
||||
// Initialize Redis connection
|
||||
redisClient := initRedis(cfg.Redis)
|
||||
defer redisClient.Close()
|
||||
|
||||
// Initialize RabbitMQ connection
|
||||
rabbitConn, err := initRabbitMQ(cfg.RabbitMQ)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to connect to RabbitMQ", zap.Error(err))
|
||||
}
|
||||
defer rabbitConn.Close()
|
||||
|
||||
// Initialize repositories and adapters
|
||||
sessionRepo := postgres.NewSessionPostgresRepo(db)
|
||||
messageRepo := postgres.NewMessagePostgresRepo(db)
|
||||
sessionCache := redisadapter.NewSessionCacheAdapter(redisClient)
|
||||
eventPublisher, err := rabbitmq.NewEventPublisherAdapter(rabbitConn)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to create event publisher", zap.Error(err))
|
||||
}
|
||||
|
||||
// Initialize in-memory event publisher (replaces RabbitMQ)
|
||||
eventPublisher := memory.NewEventPublisherAdapter()
|
||||
defer eventPublisher.Close()
|
||||
|
||||
// Initialize JWT service
|
||||
|
|
@ -114,17 +102,34 @@ func main() {
|
|||
partyPool := discovery.NewMessageRouterPartyDiscovery(messageRouterClient, logger.Log)
|
||||
logger.Info("Party discovery initialized using Message Router")
|
||||
|
||||
// Initialize Account Service HTTP client
|
||||
accountServiceAddr := os.Getenv("ACCOUNT_SERVICE_ADDR")
|
||||
if accountServiceAddr == "" {
|
||||
accountServiceAddr = "http://localhost:8081" // Default for local development
|
||||
}
|
||||
accountServiceClient := httpclient.NewAccountServiceClient(accountServiceAddr)
|
||||
logger.Info("Account Service client initialized", zap.String("address", accountServiceAddr))
|
||||
|
||||
// Initialize Notification Service (stub implementation - logs but doesn't send)
|
||||
// Replace with real implementation (email/SMS/push) when notification providers are configured
|
||||
notificationService := notification.NewStubNotificationService()
|
||||
logger.Info("Notification Service initialized (stub mode - logging only)")
|
||||
|
||||
// Initialize use cases
|
||||
createSessionUC := use_cases.NewCreateSessionUseCase(sessionRepo, jwtService, eventPublisher, partyPool, messageRouterClient)
|
||||
createSessionUC := use_cases.NewCreateSessionUseCaseWithNotification(sessionRepo, jwtService, eventPublisher, partyPool, messageRouterClient, notificationService)
|
||||
joinSessionUC := use_cases.NewJoinSessionUseCase(sessionRepo, jwtService, eventPublisher)
|
||||
getSessionStatusUC := use_cases.NewGetSessionStatusUseCase(sessionRepo)
|
||||
reportCompletionUC := use_cases.NewReportCompletionUseCase(sessionRepo, eventPublisher)
|
||||
reportCompletionUC := use_cases.NewReportCompletionUseCase(sessionRepo, eventPublisher, accountServiceClient)
|
||||
closeSessionUC := use_cases.NewCloseSessionUseCase(sessionRepo, messageRepo, eventPublisher)
|
||||
expireSessionsUC := use_cases.NewExpireSessionsUseCase(sessionRepo, eventPublisher)
|
||||
checkPartyTimeoutsUC := use_cases.NewCheckPartyTimeoutsUseCase(sessionRepo, eventPublisher, 2*time.Minute)
|
||||
|
||||
// Start session expiration background job
|
||||
go runSessionExpiration(expireSessionsUC)
|
||||
|
||||
// Start party timeout checking background job
|
||||
go runPartyTimeoutCheck(checkPartyTimeoutsUC)
|
||||
|
||||
// Create shutdown context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
|
@ -151,6 +156,7 @@ func main() {
|
|||
go func() {
|
||||
if err := startHTTPServer(
|
||||
cfg,
|
||||
jwtService,
|
||||
createSessionUC,
|
||||
joinSessionUC,
|
||||
getSessionStatusUC,
|
||||
|
|
@ -182,7 +188,6 @@ func main() {
|
|||
logger.Info("Shutdown complete")
|
||||
|
||||
_ = ctx
|
||||
_ = sessionCache
|
||||
}
|
||||
|
||||
func initDatabase(cfg config.DatabaseConfig) (*sql.DB, error) {
|
||||
|
|
@ -225,138 +230,6 @@ func initDatabase(cfg config.DatabaseConfig) (*sql.DB, error) {
|
|||
return nil, fmt.Errorf("failed to connect to database after %d retries: %w", maxRetries, err)
|
||||
}
|
||||
|
||||
func initRedis(cfg config.RedisConfig) *redis.Client {
|
||||
const maxRetries = 10
|
||||
const retryDelay = 2 * time.Second
|
||||
|
||||
client := redis.NewClient(&redis.Options{
|
||||
Addr: cfg.Addr(),
|
||||
Password: cfg.Password,
|
||||
DB: cfg.DB,
|
||||
})
|
||||
|
||||
// Test connection with retry
|
||||
ctx := context.Background()
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
if err := client.Ping(ctx).Err(); err != nil {
|
||||
logger.Warn("Redis connection failed, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.Error(err))
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
logger.Info("Connected to Redis")
|
||||
return client
|
||||
}
|
||||
|
||||
logger.Warn("Redis connection failed after retries, continuing without cache")
|
||||
return client
|
||||
}
|
||||
|
||||
func initRabbitMQ(cfg config.RabbitMQConfig) (*amqp.Connection, error) {
|
||||
const maxRetries = 10
|
||||
const retryDelay = 2 * time.Second
|
||||
|
||||
var conn *amqp.Connection
|
||||
var err error
|
||||
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
// Attempt to dial RabbitMQ
|
||||
conn, err = amqp.Dial(cfg.URL())
|
||||
if err != nil {
|
||||
logger.Warn("Failed to dial RabbitMQ, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.String("url", maskPassword(cfg.URL())),
|
||||
zap.Error(err))
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify connection is actually usable by opening a channel
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
logger.Warn("RabbitMQ connection established but channel creation failed, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.Error(err))
|
||||
conn.Close()
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Test the channel with a simple operation (declare a test exchange)
|
||||
err = ch.ExchangeDeclare(
|
||||
"mpc.health.check", // name
|
||||
"fanout", // type
|
||||
false, // durable
|
||||
true, // auto-deleted
|
||||
false, // internal
|
||||
false, // no-wait
|
||||
nil, // arguments
|
||||
)
|
||||
if err != nil {
|
||||
logger.Warn("RabbitMQ channel created but exchange declaration failed, retrying...",
|
||||
zap.Int("attempt", i+1),
|
||||
zap.Int("max_retries", maxRetries),
|
||||
zap.Error(err))
|
||||
ch.Close()
|
||||
conn.Close()
|
||||
time.Sleep(retryDelay * time.Duration(i+1))
|
||||
continue
|
||||
}
|
||||
|
||||
// Clean up test exchange
|
||||
ch.ExchangeDelete("mpc.health.check", false, false)
|
||||
ch.Close()
|
||||
|
||||
// Setup connection close notification
|
||||
closeChan := make(chan *amqp.Error, 1)
|
||||
conn.NotifyClose(closeChan)
|
||||
go func() {
|
||||
err := <-closeChan
|
||||
if err != nil {
|
||||
logger.Error("RabbitMQ connection closed unexpectedly", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Connected to RabbitMQ and verified connectivity",
|
||||
zap.Int("attempt", i+1))
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to connect to RabbitMQ after %d retries: %w", maxRetries, err)
|
||||
}
|
||||
|
||||
// maskPassword masks the password in the RabbitMQ URL for logging
|
||||
func maskPassword(url string) string {
|
||||
// Simple masking: amqp://user:password@host:port -> amqp://user:****@host:port
|
||||
start := 0
|
||||
for i := 0; i < len(url); i++ {
|
||||
if url[i] == ':' && i > 0 && url[i-1] != '/' {
|
||||
start = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if start == 0 {
|
||||
return url
|
||||
}
|
||||
|
||||
end := start
|
||||
for i := start; i < len(url); i++ {
|
||||
if url[i] == '@' {
|
||||
end = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if end == start {
|
||||
return url
|
||||
}
|
||||
|
||||
return url[:start] + "****" + url[end:]
|
||||
}
|
||||
|
||||
func startGRPCServer(
|
||||
cfg *config.Config,
|
||||
createSessionUC *use_cases.CreateSessionUseCase,
|
||||
|
|
@ -393,6 +266,7 @@ func startGRPCServer(
|
|||
|
||||
func startHTTPServer(
|
||||
cfg *config.Config,
|
||||
jwtService *jwt.JWTService,
|
||||
createSessionUC *use_cases.CreateSessionUseCase,
|
||||
joinSessionUC *use_cases.JoinSessionUseCase,
|
||||
getSessionStatusUC *use_cases.GetSessionStatusUseCase,
|
||||
|
|
@ -409,6 +283,27 @@ func startHTTPServer(
|
|||
router.Use(gin.Recovery())
|
||||
router.Use(gin.Logger())
|
||||
|
||||
// Apply security headers middleware
|
||||
router.Use(middleware.SecureHeaders())
|
||||
|
||||
// Apply CORS middleware
|
||||
allowedOrigins := []string{}
|
||||
if origins := os.Getenv("CORS_ALLOWED_ORIGINS"); origins != "" {
|
||||
allowedOrigins = strings.Split(origins, ",")
|
||||
}
|
||||
if cfg.Server.Environment != "production" {
|
||||
router.Use(middleware.AllowAllCORS())
|
||||
} else if len(allowedOrigins) > 0 {
|
||||
router.Use(middleware.CORS(middleware.CORSConfig{
|
||||
AllowOrigins: allowedOrigins,
|
||||
AllowMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"},
|
||||
AllowHeaders: []string{"Origin", "Content-Type", "Accept", "Authorization", "X-Requested-With"},
|
||||
ExposeHeaders: []string{"Content-Length", "X-Request-ID"},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 86400,
|
||||
}))
|
||||
}
|
||||
|
||||
// Create HTTP handler
|
||||
httpHandler := httphandler.NewSessionHTTPHandler(
|
||||
createSessionUC,
|
||||
|
|
@ -419,7 +314,7 @@ func startHTTPServer(
|
|||
sessionRepo,
|
||||
)
|
||||
|
||||
// Health check
|
||||
// Health check (public)
|
||||
router.GET("/health", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"status": "healthy",
|
||||
|
|
@ -427,11 +322,25 @@ func startHTTPServer(
|
|||
})
|
||||
})
|
||||
|
||||
// Register API routes
|
||||
// Configure authentication middleware
|
||||
// Session Coordinator HTTP API requires authentication
|
||||
// Note: gRPC endpoints have separate authentication (service-to-service)
|
||||
authConfig := middleware.AuthConfig{
|
||||
JWTService: jwtService,
|
||||
SkipPaths: []string{
|
||||
"/health",
|
||||
},
|
||||
AllowAnonymous: false,
|
||||
}
|
||||
|
||||
// API routes with authentication
|
||||
api := router.Group("/api/v1")
|
||||
api.Use(middleware.BearerAuth(authConfig))
|
||||
httpHandler.RegisterRoutes(api)
|
||||
|
||||
logger.Info("Starting HTTP server", zap.Int("port", cfg.Server.HTTPPort))
|
||||
logger.Info("Starting HTTP server",
|
||||
zap.Int("port", cfg.Server.HTTPPort),
|
||||
zap.String("environment", cfg.Server.Environment))
|
||||
return router.Run(fmt.Sprintf(":%d", cfg.Server.HTTPPort))
|
||||
}
|
||||
|
||||
|
|
@ -451,3 +360,28 @@ func runSessionExpiration(expireSessionsUC *use_cases.ExpireSessionsUseCase) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runPartyTimeoutCheck(checkPartyTimeoutsUC *use_cases.CheckPartyTimeoutsUseCase) {
|
||||
// Check for party timeouts every 30 seconds
|
||||
ticker := time.NewTicker(30 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
logger.Info("Party timeout checker started",
|
||||
zap.Duration("check_interval", 30*time.Second),
|
||||
zap.Duration("inactivity_timeout", checkPartyTimeoutsUC.GetInactivityTimeout()))
|
||||
|
||||
for range ticker.C {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
result, err := checkPartyTimeoutsUC.Execute(ctx)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
logger.Error("Failed to check party timeouts", zap.Error(err))
|
||||
} else if result.PartiesTimedOut > 0 {
|
||||
logger.Warn("Detected timed-out parties",
|
||||
zap.Int("sessions_checked", result.SessionsChecked),
|
||||
zap.Int("parties_timed_out", result.PartiesTimedOut),
|
||||
zap.Int("sessions_failed", result.SessionsFailedDueToTimeout))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,13 +9,14 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
ErrSessionNotFound = errors.New("session not found")
|
||||
ErrSessionFull = errors.New("session is full")
|
||||
ErrSessionExpired = errors.New("session expired")
|
||||
ErrSessionNotInProgress = errors.New("session not in progress")
|
||||
ErrParticipantNotFound = errors.New("participant not found")
|
||||
ErrInvalidSessionType = errors.New("invalid session type")
|
||||
ErrSessionNotFound = errors.New("session not found")
|
||||
ErrSessionFull = errors.New("session is full")
|
||||
ErrSessionExpired = errors.New("session expired")
|
||||
ErrSessionNotInProgress = errors.New("session not in progress")
|
||||
ErrParticipantNotFound = errors.New("participant not found")
|
||||
ErrInvalidSessionType = errors.New("invalid session type")
|
||||
ErrInvalidStatusTransition = errors.New("invalid status transition")
|
||||
ErrParticipantTimedOut = errors.New("participant timed out")
|
||||
)
|
||||
|
||||
// SessionType represents the type of MPC session
|
||||
|
|
@ -34,18 +35,19 @@ func (t SessionType) IsValid() bool {
|
|||
// MPCSession represents an MPC session
|
||||
// Coordinator only manages session metadata, does not participate in MPC computation
|
||||
type MPCSession struct {
|
||||
ID value_objects.SessionID
|
||||
SessionType SessionType
|
||||
Threshold value_objects.Threshold
|
||||
Participants []*Participant
|
||||
Status value_objects.SessionStatus
|
||||
MessageHash []byte // Used for Sign sessions
|
||||
PublicKey []byte // Group public key after Keygen completion
|
||||
CreatedBy string
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
ExpiresAt time.Time
|
||||
CompletedAt *time.Time
|
||||
ID value_objects.SessionID
|
||||
SessionType SessionType
|
||||
Threshold value_objects.Threshold
|
||||
Participants []*Participant
|
||||
Status value_objects.SessionStatus
|
||||
MessageHash []byte // Used for Sign sessions
|
||||
PublicKey []byte // Group public key after Keygen completion
|
||||
DelegatePartyID string // The delegate party ID (returns share to user instead of storing)
|
||||
CreatedBy string
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
ExpiresAt time.Time
|
||||
CompletedAt *time.Time
|
||||
}
|
||||
|
||||
// NewMPCSession creates a new MPC session
|
||||
|
|
@ -282,6 +284,50 @@ func (s *MPCSession) GetOtherParties(excludePartyID value_objects.PartyID) []*Pa
|
|||
return others
|
||||
}
|
||||
|
||||
// UpdatePartyActivity updates the activity timestamp for a party
|
||||
func (s *MPCSession) UpdatePartyActivity(partyID value_objects.PartyID) error {
|
||||
for _, p := range s.Participants {
|
||||
if p.PartyID.Equals(partyID) {
|
||||
p.UpdateActivity()
|
||||
s.UpdatedAt = time.Now().UTC()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return ErrParticipantNotFound
|
||||
}
|
||||
|
||||
// GetTimedOutParties returns participants that have exceeded the inactivity timeout
|
||||
// Only checks active participants (joined or ready, not yet completed/failed)
|
||||
func (s *MPCSession) GetTimedOutParties(timeout time.Duration) []*Participant {
|
||||
timedOut := make([]*Participant, 0)
|
||||
for _, p := range s.Participants {
|
||||
// Only check active participants
|
||||
if p.IsJoined() && !p.IsCompleted() && !p.IsFailed() {
|
||||
if p.IsTimedOut(timeout) {
|
||||
timedOut = append(timedOut, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
return timedOut
|
||||
}
|
||||
|
||||
// HasTimedOutParties checks if any active participant has timed out
|
||||
func (s *MPCSession) HasTimedOutParties(timeout time.Duration) bool {
|
||||
return len(s.GetTimedOutParties(timeout)) > 0
|
||||
}
|
||||
|
||||
// MarkTimedOutPartiesAsFailed marks all timed-out parties as failed
|
||||
func (s *MPCSession) MarkTimedOutPartiesAsFailed(timeout time.Duration) []*Participant {
|
||||
timedOut := s.GetTimedOutParties(timeout)
|
||||
for _, p := range timedOut {
|
||||
p.MarkFailed()
|
||||
}
|
||||
if len(timedOut) > 0 {
|
||||
s.UpdatedAt = time.Now().UTC()
|
||||
}
|
||||
return timedOut
|
||||
}
|
||||
|
||||
// ToDTO converts to a DTO for API responses
|
||||
func (s *MPCSession) ToDTO() SessionDTO {
|
||||
participants := make([]ParticipantDTO, len(s.Participants))
|
||||
|
|
@ -333,6 +379,7 @@ func ReconstructSession(
|
|||
thresholdT, thresholdN int,
|
||||
status string,
|
||||
messageHash, publicKey []byte,
|
||||
delegatePartyID string,
|
||||
createdBy string,
|
||||
createdAt, updatedAt, expiresAt time.Time,
|
||||
completedAt *time.Time,
|
||||
|
|
@ -349,17 +396,18 @@ func ReconstructSession(
|
|||
}
|
||||
|
||||
return &MPCSession{
|
||||
ID: value_objects.SessionIDFromUUID(id),
|
||||
SessionType: SessionType(sessionType),
|
||||
Threshold: threshold,
|
||||
Participants: participants,
|
||||
Status: sessionStatus,
|
||||
MessageHash: messageHash,
|
||||
PublicKey: publicKey,
|
||||
CreatedBy: createdBy,
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
ExpiresAt: expiresAt,
|
||||
CompletedAt: completedAt,
|
||||
ID: value_objects.SessionIDFromUUID(id),
|
||||
SessionType: SessionType(sessionType),
|
||||
Threshold: threshold,
|
||||
Participants: participants,
|
||||
Status: sessionStatus,
|
||||
MessageHash: messageHash,
|
||||
PublicKey: publicKey,
|
||||
DelegatePartyID: delegatePartyID,
|
||||
CreatedBy: createdBy,
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
ExpiresAt: expiresAt,
|
||||
CompletedAt: completedAt,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,13 +15,14 @@ var (
|
|||
|
||||
// Participant represents a party in an MPC session
|
||||
type Participant struct {
|
||||
PartyID value_objects.PartyID
|
||||
PartyIndex int
|
||||
Status value_objects.ParticipantStatus
|
||||
DeviceInfo DeviceInfo
|
||||
PublicKey []byte // Party's identity public key (for authentication)
|
||||
JoinedAt time.Time
|
||||
CompletedAt *time.Time
|
||||
PartyID value_objects.PartyID
|
||||
PartyIndex int
|
||||
Status value_objects.ParticipantStatus
|
||||
DeviceInfo DeviceInfo
|
||||
PublicKey []byte // Party's identity public key (for authentication)
|
||||
JoinedAt time.Time
|
||||
CompletedAt *time.Time
|
||||
LastActivityAt time.Time // Last activity timestamp for timeout detection
|
||||
}
|
||||
|
||||
// NewParticipant creates a new participant
|
||||
|
|
@ -107,3 +108,27 @@ func (p *Participant) IsFailed() bool {
|
|||
func (p *Participant) SetPublicKey(publicKey []byte) {
|
||||
p.PublicKey = publicKey
|
||||
}
|
||||
|
||||
// UpdateActivity updates the last activity timestamp
|
||||
func (p *Participant) UpdateActivity() {
|
||||
p.LastActivityAt = time.Now().UTC()
|
||||
}
|
||||
|
||||
// IsTimedOut checks if the participant has exceeded the inactivity timeout
|
||||
func (p *Participant) IsTimedOut(timeout time.Duration) bool {
|
||||
// If LastActivityAt is zero, use JoinedAt as reference
|
||||
refTime := p.LastActivityAt
|
||||
if refTime.IsZero() {
|
||||
refTime = p.JoinedAt
|
||||
}
|
||||
return time.Since(refTime) > timeout
|
||||
}
|
||||
|
||||
// TimeSinceLastActivity returns the duration since the last activity
|
||||
func (p *Participant) TimeSinceLastActivity() time.Duration {
|
||||
refTime := p.LastActivityAt
|
||||
if refTime.IsZero() {
|
||||
refTime = p.JoinedAt
|
||||
}
|
||||
return time.Since(refTime)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,9 @@ type SessionRepository interface {
|
|||
// FindExpired retrieves all expired sessions
|
||||
FindExpired(ctx context.Context) ([]*entities.MPCSession, error)
|
||||
|
||||
// FindActive retrieves all active sessions (created or in_progress)
|
||||
FindActive(ctx context.Context) ([]*entities.MPCSession, error)
|
||||
|
||||
// FindByCreator retrieves sessions created by a user
|
||||
FindByCreator(ctx context.Context, creatorID string) ([]*entities.MPCSession, error)
|
||||
|
||||
|
|
|
|||
|
|
@ -39,11 +39,24 @@ func (d *MessageRouterPartyDiscovery) GetAvailableParties() []output.PartyEndpoi
|
|||
|
||||
endpoints := make([]output.PartyEndpoint, 0, len(parties))
|
||||
for _, party := range parties {
|
||||
endpoints = append(endpoints, output.PartyEndpoint{
|
||||
endpoint := output.PartyEndpoint{
|
||||
PartyID: party.PartyId,
|
||||
Ready: party.Online,
|
||||
Role: output.PartyRole(party.Role),
|
||||
})
|
||||
}
|
||||
|
||||
// Parse notification channel from party registration
|
||||
// If notification channel is present, party operates in offline mode
|
||||
// If not present, party operates in real-time mode (Message Router push)
|
||||
if party.Notification != nil {
|
||||
endpoint.Notification = &output.NotificationChannel{
|
||||
Email: party.Notification.Email,
|
||||
Phone: party.Notification.Phone,
|
||||
PushToken: party.Notification.PushToken,
|
||||
}
|
||||
}
|
||||
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
|
||||
return endpoints
|
||||
|
|
@ -65,11 +78,24 @@ func (d *MessageRouterPartyDiscovery) GetAvailablePartiesByRole(role output.Part
|
|||
|
||||
endpoints := make([]output.PartyEndpoint, 0, len(parties))
|
||||
for _, party := range parties {
|
||||
endpoints = append(endpoints, output.PartyEndpoint{
|
||||
endpoint := output.PartyEndpoint{
|
||||
PartyID: party.PartyId,
|
||||
Ready: party.Online,
|
||||
Role: output.PartyRole(party.Role),
|
||||
})
|
||||
}
|
||||
|
||||
// Parse notification channel from party registration
|
||||
// If notification channel is present, party operates in offline mode
|
||||
// If not present, party operates in real-time mode (Message Router push)
|
||||
if party.Notification != nil {
|
||||
endpoint.Notification = &output.NotificationChannel{
|
||||
Email: party.Notification.Email,
|
||||
Phone: party.Notification.Phone,
|
||||
PushToken: party.Notification.PushToken,
|
||||
}
|
||||
}
|
||||
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
|
||||
return endpoints
|
||||
|
|
|
|||
Loading…
Reference in New Issue